mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-12 20:11:20 +08:00
Support remove multiclass_nms to enable ppyoloe to tensorrt (#40)
* Add custom operator for onnxruntime ans fix paddle backend * Polish cmake files and runtime apis * Remove copy libraries * fix some issue * fix bug * fix bug * Support remove multiclass_nms to enable paddledetection run tensorrt * Support remove multiclass_nms to enable paddledetection run tensorrt * Support remove multiclass_nms to enable paddledetection run tensorrt * Support remove multiclass_nms to enable paddledetection run tensorrt * add common operator multiclassnms * fix compile problem Co-authored-by: root <root@bjyz-sys-gpu-kongming3.bjyz.baidu.com>
This commit is contained in:
@@ -162,18 +162,41 @@ bool TrtBackend::InitFromPaddle(const std::string& model_file,
|
||||
}
|
||||
|
||||
#ifdef ENABLE_PADDLE_FRONTEND
|
||||
std::vector<paddle2onnx::CustomOp> custom_ops;
|
||||
for (auto& item : option.custom_op_info_) {
|
||||
paddle2onnx::CustomOp op;
|
||||
std::strcpy(op.op_name, item.first.c_str());
|
||||
std::strcpy(op.export_op_name, item.second.c_str());
|
||||
custom_ops.emplace_back(op);
|
||||
}
|
||||
char* model_content_ptr;
|
||||
int model_content_size = 0;
|
||||
if (!paddle2onnx::Export(model_file.c_str(), params_file.c_str(),
|
||||
&model_content_ptr, &model_content_size, 11, true,
|
||||
verbose, true, true, true)) {
|
||||
verbose, true, true, true, custom_ops.data(),
|
||||
custom_ops.size())) {
|
||||
FDERROR << "Error occured while export PaddlePaddle to ONNX format."
|
||||
<< std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (option.remove_multiclass_nms_) {
|
||||
char* new_model = nullptr;
|
||||
int new_model_size = 0;
|
||||
if (!paddle2onnx::RemoveMultiClassNMS(model_content_ptr, model_content_size,
|
||||
&new_model, &new_model_size)) {
|
||||
FDERROR << "Try to remove MultiClassNMS failed." << std::endl;
|
||||
return false;
|
||||
}
|
||||
delete[] model_content_ptr;
|
||||
std::string onnx_model_proto(new_model, new_model + new_model_size);
|
||||
delete[] new_model;
|
||||
return InitFromOnnx(onnx_model_proto, option, true);
|
||||
}
|
||||
|
||||
std::string onnx_model_proto(model_content_ptr,
|
||||
model_content_ptr + model_content_size);
|
||||
delete model_content_ptr;
|
||||
delete[] model_content_ptr;
|
||||
model_content_ptr = nullptr;
|
||||
return InitFromOnnx(onnx_model_proto, option, true);
|
||||
#else
|
||||
|
@@ -50,6 +50,10 @@ struct TrtBackendOption {
|
||||
std::map<std::string, std::vector<int32_t>> min_shape;
|
||||
std::map<std::string, std::vector<int32_t>> opt_shape;
|
||||
std::string serialize_file = "";
|
||||
|
||||
// inside parameter, maybe remove next version
|
||||
bool remove_multiclass_nms_ = false;
|
||||
std::map<std::string, std::string> custom_op_info_;
|
||||
};
|
||||
|
||||
std::vector<int> toVec(const nvinfer1::Dims& dim);
|
||||
|
Reference in New Issue
Block a user