mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 16:48:03 +08:00
code revine
This commit is contained in:
@@ -51,7 +51,7 @@ void PaddleBackend::BuildOption(const PaddleBackendOption& option) {
|
|||||||
config_.SetOptimCacheDir(opt_cache_dir);
|
config_.SetOptimCacheDir(opt_cache_dir);
|
||||||
}
|
}
|
||||||
config_.EnableTensorRtEngine(option.trt_option.max_workspace_size,
|
config_.EnableTensorRtEngine(option.trt_option.max_workspace_size,
|
||||||
option.trt_option.max_batch_size, 20,
|
option.trt_option.max_batch_size, 3,
|
||||||
precision, use_static);
|
precision, use_static);
|
||||||
SetTRTDynamicShapeToConfig(option);
|
SetTRTDynamicShapeToConfig(option);
|
||||||
}
|
}
|
||||||
@@ -128,10 +128,9 @@ bool PaddleBackend::InitFromPaddle(const std::string& model_buffer,
|
|||||||
"file will save to the directory where paddle model saved."
|
"file will save to the directory where paddle model saved."
|
||||||
<< std::endl;
|
<< std::endl;
|
||||||
use_static = true;
|
use_static = true;
|
||||||
config_.SetOptimCacheDir(option.trt_option.serialize_file);
|
|
||||||
}
|
}
|
||||||
config_.EnableTensorRtEngine(option.trt_option.max_workspace_size,
|
config_.EnableTensorRtEngine(option.trt_option.max_workspace_size,
|
||||||
option.trt_option.max_batch_size, 20,
|
option.trt_option.max_batch_size, 3,
|
||||||
paddle_infer::PrecisionType::kInt8,
|
paddle_infer::PrecisionType::kInt8,
|
||||||
use_static, false);
|
use_static, false);
|
||||||
SetTRTDynamicShapeToConfig(option);
|
SetTRTDynamicShapeToConfig(option);
|
||||||
|
@@ -202,7 +202,7 @@ void Runtime::BindOutputTensor(const std::string& name, FDTensor& output) {
|
|||||||
bool is_exist = false;
|
bool is_exist = false;
|
||||||
for (auto& t : output_tensors_) {
|
for (auto& t : output_tensors_) {
|
||||||
if (t.name == name) {
|
if (t.name == name) {
|
||||||
// FDWARNING << "The output name [" << name << "] is exist." << std::endl;
|
FDINFO << "The output name [" << name << "] is exist." << std::endl;
|
||||||
is_exist = true;
|
is_exist = true;
|
||||||
t.SetExternalData(output.shape, output.dtype, output.MutableData(),
|
t.SetExternalData(output.shape, output.dtype, output.MutableData(),
|
||||||
output.device, output.device_id);
|
output.device, output.device_id);
|
||||||
@@ -210,8 +210,7 @@ void Runtime::BindOutputTensor(const std::string& name, FDTensor& output) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!is_exist) {
|
if (!is_exist) {
|
||||||
// FDWARNING << "The output name [" << name << "] don't exist." <<
|
FDINFO << "The output name [" << name << "] is prebinded added into output tensor list." << std::endl;
|
||||||
// std::endl;
|
|
||||||
FDTensor new_tensor(name);
|
FDTensor new_tensor(name);
|
||||||
new_tensor.SetExternalData(output.shape, output.dtype, output.MutableData(),
|
new_tensor.SetExternalData(output.shape, output.dtype, output.MutableData(),
|
||||||
output.device, output.device_id);
|
output.device, output.device_id);
|
||||||
|
Reference in New Issue
Block a user