Support remove multiclass_nms to enable ppyoloe to tensorrt (#40)

* Add custom operator for onnxruntime ans fix paddle backend

* Polish cmake files and runtime apis

* Remove copy libraries

* fix some issue

* fix bug

* fix bug

* Support remove multiclass_nms to enable paddledetection run tensorrt

* Support remove multiclass_nms to enable paddledetection run tensorrt

* Support remove multiclass_nms to enable paddledetection run tensorrt

* Support remove multiclass_nms to enable paddledetection run tensorrt

* add common operator multiclassnms

* fix compile problem

Co-authored-by: root <root@bjyz-sys-gpu-kongming3.bjyz.baidu.com>
This commit is contained in:
Jason
2022-07-26 11:16:01 +08:00
committed by GitHub
parent fc71d79e58
commit 17e4dc6b5e
18 changed files with 460 additions and 56 deletions

View File

@@ -162,18 +162,41 @@ bool TrtBackend::InitFromPaddle(const std::string& model_file,
}
#ifdef ENABLE_PADDLE_FRONTEND
std::vector<paddle2onnx::CustomOp> custom_ops;
for (auto& item : option.custom_op_info_) {
paddle2onnx::CustomOp op;
std::strcpy(op.op_name, item.first.c_str());
std::strcpy(op.export_op_name, item.second.c_str());
custom_ops.emplace_back(op);
}
char* model_content_ptr;
int model_content_size = 0;
if (!paddle2onnx::Export(model_file.c_str(), params_file.c_str(),
&model_content_ptr, &model_content_size, 11, true,
verbose, true, true, true)) {
verbose, true, true, true, custom_ops.data(),
custom_ops.size())) {
FDERROR << "Error occured while export PaddlePaddle to ONNX format."
<< std::endl;
return false;
}
if (option.remove_multiclass_nms_) {
char* new_model = nullptr;
int new_model_size = 0;
if (!paddle2onnx::RemoveMultiClassNMS(model_content_ptr, model_content_size,
&new_model, &new_model_size)) {
FDERROR << "Try to remove MultiClassNMS failed." << std::endl;
return false;
}
delete[] model_content_ptr;
std::string onnx_model_proto(new_model, new_model + new_model_size);
delete[] new_model;
return InitFromOnnx(onnx_model_proto, option, true);
}
std::string onnx_model_proto(model_content_ptr,
model_content_ptr + model_content_size);
delete model_content_ptr;
delete[] model_content_ptr;
model_content_ptr = nullptr;
return InitFromOnnx(onnx_model_proto, option, true);
#else