[Backend] Add collect shape for pp-trt backend (#372)

* Add collect_shape attr

* add EnableTunedTensorRtDynamicShape

* Add collect shape python api

* Fix quant model not set trt dynamic shape

* Add shape info print

* Fix shape print

* Use CopyFromCpu instead of ShareExternalData

* Add ENABLE_TRT_BACKEND macro

* Add shared data with
This commit is contained in:
Jack Zhou
2022-10-20 17:02:56 +08:00
committed by GitHub
parent c28f4d6019
commit dccb737d8d
9 changed files with 251 additions and 20 deletions

View File

@@ -13,6 +13,8 @@
// limitations under the License.
#include "fastdeploy/backends/paddle/paddle_backend.h"
#include "fastdeploy/utils/path.h"
#include <sstream>
namespace fastdeploy {
@@ -31,21 +33,7 @@ void PaddleBackend::BuildOption(const PaddleBackendOption& option) {
use_static = true;
}
config_.EnableTensorRtEngine(option.trt_option.max_workspace_size, 32, 3, precision, use_static);
std::map<std::string, std::vector<int>> max_shape;
std::map<std::string, std::vector<int>> min_shape;
std::map<std::string, std::vector<int>> opt_shape;
for (const auto& item : option.trt_option.min_shape) {
auto max_iter = option.trt_option.max_shape.find(item.first);
auto opt_iter = option.trt_option.opt_shape.find(item.first);
FDASSERT(max_iter != option.trt_option.max_shape.end(), "Cannot find %s in TrtBackendOption::min_shape.", item.first.c_str());
FDASSERT(opt_iter != option.trt_option.opt_shape.end(), "Cannot find %s in TrtBackendOption::opt_shape.", item.first.c_str());
max_shape[item.first].assign(max_iter->second.begin(), max_iter->second.end());
opt_shape[item.first].assign(opt_iter->second.begin(), opt_iter->second.end());
min_shape[item.first].assign(item.second.begin(), item.second.end());
}
if (min_shape.size() > 0) {
config_.SetTRTDynamicShapeInfo(min_shape, max_shape, opt_shape);
}
SetTRTDynamicShapeToConfig(option);
#else
FDWARNING << "The FastDeploy is not compiled with TensorRT backend, so will fallback to GPU with Paddle Inference Backend." << std::endl;
#endif
@@ -97,6 +85,17 @@ bool PaddleBackend::InitFromPaddle(const std::string& model_file,
if (reader.is_quantize_model) {
if (option.use_gpu) {
FDWARNING << "The loaded model is a quantized model, while inference on GPU, please use TensorRT backend to get better performance." << std::endl;
if (option.enable_trt) {
#ifdef ENABLE_TRT_BACKEND
bool use_static = false;
if (option.trt_option.serialize_file != "") {
FDWARNING << "Detect that tensorrt cache file has been set to " << option.trt_option.serialize_file << ", but while enable paddle2trt, please notice that the cache file will save to the directory where paddle model saved." << std::endl;
use_static = true;
}
config_.EnableTensorRtEngine(option.trt_option.max_workspace_size, 32, 3, paddle_infer::PrecisionType::kInt8, use_static, false);
SetTRTDynamicShapeToConfig(option);
#endif
}
}
if (option.enable_mkldnn) {
config_.EnableMkldnnInt8();
@@ -123,7 +122,31 @@ bool PaddleBackend::InitFromPaddle(const std::string& model_file,
outputs_desc_[i].shape.assign(shape.begin(), shape.end());
outputs_desc_[i].dtype = ReaderDataTypeToFD(reader.outputs[i].dtype);
}
#ifdef ENABLE_TRT_BACKEND
if (option.collect_shape) {
// Set the shape info file.
auto curr_model_dir = GetDirFromPath(model_file);
std::string shape_range_info = PathJoin(curr_model_dir, "shape_range_info.pbtxt");
if (!CheckFileExists(shape_range_info)) {
FDINFO << "Start generating shape range info file." << std::endl;
paddle_infer::Config analysis_config;
analysis_config.SetModel(model_file, params_file);
analysis_config.CollectShapeRangeInfo(shape_range_info);
auto predictor_tmp = paddle_infer::CreatePredictor(analysis_config);
std::map<std::string, std::vector<int>> max_shape;
std::map<std::string, std::vector<int>> min_shape;
std::map<std::string, std::vector<int>> opt_shape;
GetDynamicShapeFromOption(option, &max_shape, &min_shape, &opt_shape);
// Need to run once to get the shape range info file.
CollectShapeRun(predictor_tmp.get(), max_shape);
CollectShapeRun(predictor_tmp.get(), min_shape);
CollectShapeRun(predictor_tmp.get(), opt_shape);
FDINFO << "Finish generating shape range info file." << std::endl;
}
FDINFO << "Start loading shape range info file "<< shape_range_info << " to set TensorRT dynamic shape." << std::endl;
config_.EnableTunedTensorRtDynamicShape(shape_range_info, false);
}
#endif
predictor_ = paddle_infer::CreatePredictor(config_);
initialized_ = true;
return true;
@@ -172,4 +195,87 @@ bool PaddleBackend::Infer(std::vector<FDTensor>& inputs,
return true;
}
#ifdef ENABLE_TRT_BACKEND
void PaddleBackend::SetTRTDynamicShapeToConfig(const PaddleBackendOption& option) {
std::map<std::string, std::vector<int>> max_shape;
std::map<std::string, std::vector<int>> min_shape;
std::map<std::string, std::vector<int>> opt_shape;
GetDynamicShapeFromOption(option, &max_shape, &min_shape, &opt_shape);
FDINFO << "Start setting trt dynamic shape." << std::endl;
if (min_shape.size() > 0) {
config_.SetTRTDynamicShapeInfo(min_shape, max_shape, opt_shape);
}
FDINFO << "Finish setting trt dynamic shape." << std::endl;
}
void PaddleBackend::GetDynamicShapeFromOption(const PaddleBackendOption& option,
std::map<std::string, std::vector<int>>* max_shape,
std::map<std::string, std::vector<int>>* min_shape,
std::map<std::string, std::vector<int>>* opt_shape) const {
auto print_shape = [](const std::vector<int>& shape) -> std::string {
std::ostringstream oss;
oss << "[";
for (int i = 0; i < shape.size(); ++i) {
oss << shape[i];
if (i < shape.size() - 1) {
oss << ", ";
}
}
oss << "]";
return oss.str();
};
for (const auto& item : option.trt_option.min_shape) {
auto max_iter = option.trt_option.max_shape.find(item.first);
auto opt_iter = option.trt_option.opt_shape.find(item.first);
FDASSERT(max_iter != option.trt_option.max_shape.end(), "Cannot find %s in TrtBackendOption::min_shape.", item.first.c_str());
FDASSERT(opt_iter != option.trt_option.opt_shape.end(), "Cannot find %s in TrtBackendOption::opt_shape.", item.first.c_str());
(*max_shape)[item.first].assign(max_iter->second.begin(), max_iter->second.end());
(*opt_shape)[item.first].assign(opt_iter->second.begin(), opt_iter->second.end());
(*min_shape)[item.first].assign(item.second.begin(), item.second.end());
FDINFO << item.first << ": the max shape = " << print_shape(max_iter->second)
<< ", the min shape = " << print_shape(item.second)
<< ", the opt shape = " << print_shape(opt_iter->second) << std::endl;
}
}
void PaddleBackend::CollectShapeRun(paddle_infer::Predictor* predictor,
const std::map<std::string, std::vector<int>>& shape) const {
auto input_names = predictor->GetInputNames();
auto input_type = predictor->GetInputTypes();
for(auto name : input_names) {
FDASSERT(shape.find(name) != shape.end() && input_type.find(name) != input_type.end(),
"Paddle Input name [%s] is not one of the trt dynamic shape.", name.c_str());
auto tensor = predictor->GetInputHandle(name);
auto shape_value = shape.at(name);
int shape_num = std::accumulate(shape_value.begin(), shape_value.end(), 1,
std::multiplies<int>());
tensor->Reshape(shape_value);
auto dtype = input_type[name];
switch (dtype) {
case paddle_infer::DataType::FLOAT32: {
std::vector<float> input_data(shape_num, 1.0);
tensor->CopyFromCpu(input_data.data());
break;
}
case paddle_infer::DataType::INT32: {
std::vector<int> input_data(shape_num, 1);
tensor->CopyFromCpu(input_data.data());
break;
}
case paddle_infer::DataType::INT64: {
std::vector<int64_t> input_data(shape_num, 1);
tensor->CopyFromCpu(input_data.data());
break;
}
default: {
FDASSERT(false, "Input data Paddle backend only supports FP32/INT32/INT64 currently.");
break;
}
}
}
predictor->Run();
}
#endif
} // namespace fastdeploy