mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-07 17:41:52 +08:00
[Backend] TRT cast GPU input from int64 to int32, output from int32 to int64, and Windows support building CUDA files (#426)
* TRT cast int64 to int32 * windows cmake build cuda src * fix windows cmake error when build cuda src * add a notice in windows gpu build doc * cmake add cuda std=11 * TRT cast output from int32 to int64 * nits * trt get original input output dtype
This commit is contained in:
@@ -13,8 +13,10 @@
|
||||
// limitations under the License.
|
||||
|
||||
#include "fastdeploy/backends/tensorrt/trt_backend.h"
|
||||
#include "fastdeploy/function/cuda_cast.h"
|
||||
|
||||
#include <cstring>
|
||||
#include <unordered_map>
|
||||
|
||||
#include "NvInferRuntime.h"
|
||||
#include "fastdeploy/utils/utils.h"
|
||||
@@ -234,6 +236,7 @@ bool TrtBackend::InitFromOnnx(const std::string& model_file,
|
||||
inputs_desc_[i].name = name;
|
||||
inputs_desc_[i].shape.assign(shape.begin(), shape.end());
|
||||
inputs_desc_[i].dtype = ReaderDtypeToTrtDtype(onnx_reader.inputs[i].dtype);
|
||||
inputs_desc_[i].original_dtype = ReaderDtypeToFDDtype(onnx_reader.inputs[i].dtype);
|
||||
auto info = ShapeRangeInfo(shape);
|
||||
info.name = name;
|
||||
auto iter_min = option.min_shape.find(name);
|
||||
@@ -256,6 +259,8 @@ bool TrtBackend::InitFromOnnx(const std::string& model_file,
|
||||
outputs_desc_[i].shape.assign(shape.begin(), shape.end());
|
||||
outputs_desc_[i].dtype =
|
||||
ReaderDtypeToTrtDtype(onnx_reader.outputs[i].dtype);
|
||||
outputs_desc_[i].original_dtype =
|
||||
ReaderDtypeToFDDtype(onnx_reader.outputs[i].dtype);
|
||||
}
|
||||
|
||||
if (option_.external_stream_) {
|
||||
@@ -315,9 +320,29 @@ bool TrtBackend::Infer(std::vector<FDTensor>& inputs,
|
||||
FDERROR << "Failed to Infer with TensorRT." << std::endl;
|
||||
return false;
|
||||
}
|
||||
for (size_t i = 0; i < outputs->size(); ++i) {
|
||||
// if the final output tensor's dtype is different from the model output tensor's dtype,
|
||||
// then we need cast the data to the final output's dtype
|
||||
auto model_output_dtype = GetFDDataType(outputs_device_buffer_[(*outputs)[i].name].dtype());
|
||||
if ((*outputs)[i].dtype != model_output_dtype) {
|
||||
FDTensor output_tensor;
|
||||
output_tensor.SetExternalData((*outputs)[i].shape, model_output_dtype,
|
||||
outputs_device_buffer_[(*outputs)[i].name].data(),
|
||||
Device::GPU);
|
||||
|
||||
casted_output_tensors_[(*outputs)[i].name].Resize((*outputs)[i].shape, (*outputs)[i].dtype,
|
||||
(*outputs)[i].name, Device::GPU);
|
||||
CudaCast(output_tensor, &casted_output_tensors_[(*outputs)[i].name], stream_);
|
||||
} else {
|
||||
casted_output_tensors_[(*outputs)[i].name].SetExternalData(
|
||||
(*outputs)[i].shape, model_output_dtype,
|
||||
outputs_device_buffer_[(*outputs)[i].name].data(),
|
||||
Device::GPU);
|
||||
}
|
||||
}
|
||||
for (size_t i = 0; i < outputs->size(); ++i) {
|
||||
FDASSERT(cudaMemcpyAsync((*outputs)[i].Data(),
|
||||
outputs_device_buffer_[(*outputs)[i].name].data(),
|
||||
casted_output_tensors_[(*outputs)[i].name].Data(),
|
||||
(*outputs)[i].Nbytes(), cudaMemcpyDeviceToHost,
|
||||
stream_) == 0,
|
||||
"[ERROR] Error occurs while copy memory from GPU to CPU.");
|
||||
@@ -329,6 +354,17 @@ bool TrtBackend::Infer(std::vector<FDTensor>& inputs,
|
||||
}
|
||||
|
||||
void TrtBackend::GetInputOutputInfo() {
|
||||
// Read the original dtypes from inputs_desc_ and outputs_desc_
|
||||
std::unordered_map<std::string, FDDataType> inputs_original_dtype_map;
|
||||
std::unordered_map<std::string, FDDataType> outputs_original_dtype_map;
|
||||
for (size_t i = 0; i < inputs_desc_.size(); ++i) {
|
||||
inputs_original_dtype_map[inputs_desc_[i].name] = inputs_desc_[i].original_dtype;
|
||||
}
|
||||
for (size_t i = 0; i < outputs_desc_.size(); ++i) {
|
||||
outputs_original_dtype_map[outputs_desc_[i].name] = outputs_desc_[i].original_dtype;
|
||||
}
|
||||
|
||||
// Re-read the tensor infos from TRT model and write into inputs_desc_ and outputs_desc_
|
||||
std::vector<TrtValueInfo>().swap(inputs_desc_);
|
||||
std::vector<TrtValueInfo>().swap(outputs_desc_);
|
||||
inputs_desc_.clear();
|
||||
@@ -339,11 +375,14 @@ void TrtBackend::GetInputOutputInfo() {
|
||||
auto shape = ToVec(engine_->getBindingDimensions(i));
|
||||
auto dtype = engine_->getBindingDataType(i);
|
||||
if (engine_->bindingIsInput(i)) {
|
||||
inputs_desc_.emplace_back(TrtValueInfo{name, shape, dtype});
|
||||
auto original_dtype = inputs_original_dtype_map.count(name) ? inputs_original_dtype_map[name] : GetFDDataType(dtype);
|
||||
inputs_desc_.emplace_back(TrtValueInfo{name, shape, dtype, original_dtype});
|
||||
inputs_device_buffer_[name] = FDDeviceBuffer(dtype);
|
||||
} else {
|
||||
outputs_desc_.emplace_back(TrtValueInfo{name, shape, dtype});
|
||||
auto original_dtype = outputs_original_dtype_map.count(name) ? outputs_original_dtype_map[name] : GetFDDataType(dtype);
|
||||
outputs_desc_.emplace_back(TrtValueInfo{name, shape, dtype, original_dtype});
|
||||
outputs_device_buffer_[name] = FDDeviceBuffer(dtype);
|
||||
casted_output_tensors_[name] = FDTensor();
|
||||
}
|
||||
}
|
||||
bindings_.resize(num_binds);
|
||||
@@ -358,11 +397,12 @@ void TrtBackend::SetInputs(const std::vector<FDTensor>& inputs) {
|
||||
|
||||
if (item.device == Device::GPU) {
|
||||
if (item.dtype == FDDataType::INT64) {
|
||||
// TODO(liqi): cast int64 to int32
|
||||
// TRT don't support INT64
|
||||
FDASSERT(false,
|
||||
"TRT don't support INT64 input on GPU, "
|
||||
"please use INT32 input");
|
||||
inputs_device_buffer_[item.name].resize(dims);
|
||||
FDTensor input_tensor;
|
||||
input_tensor.SetExternalData(item.shape, FDDataType::INT32,
|
||||
inputs_device_buffer_[item.name].data(),
|
||||
Device::GPU);
|
||||
CudaCast(item, &input_tensor, stream_);
|
||||
} else {
|
||||
// no copy
|
||||
inputs_device_buffer_[item.name].SetExternalData(dims, item.Data());
|
||||
@@ -413,7 +453,7 @@ void TrtBackend::AllocateOutputsBuffer(std::vector<FDTensor>* outputs) {
|
||||
std::vector<int64_t> shape(output_dims.d,
|
||||
output_dims.d + output_dims.nbDims);
|
||||
(*outputs)[ori_idx].is_pinned_memory = option_.enable_pinned_memory;
|
||||
(*outputs)[ori_idx].Resize(shape, GetFDDataType(outputs_desc_[i].dtype),
|
||||
(*outputs)[ori_idx].Resize(shape, outputs_desc_[i].original_dtype,
|
||||
outputs_desc_[i].name);
|
||||
|
||||
// Allocate output buffer memory
|
||||
@@ -629,7 +669,7 @@ TensorInfo TrtBackend::GetInputInfo(int index) {
|
||||
info.name = inputs_desc_[index].name;
|
||||
info.shape.assign(inputs_desc_[index].shape.begin(),
|
||||
inputs_desc_[index].shape.end());
|
||||
info.dtype = GetFDDataType(inputs_desc_[index].dtype);
|
||||
info.dtype = inputs_desc_[index].original_dtype;
|
||||
return info;
|
||||
}
|
||||
|
||||
@@ -649,7 +689,7 @@ TensorInfo TrtBackend::GetOutputInfo(int index) {
|
||||
info.name = outputs_desc_[index].name;
|
||||
info.shape.assign(outputs_desc_[index].shape.begin(),
|
||||
outputs_desc_[index].shape.end());
|
||||
info.dtype = GetFDDataType(outputs_desc_[index].dtype);
|
||||
info.dtype = outputs_desc_[index].original_dtype;
|
||||
return info;
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user