[Backend] Enable TensorRT BatchedNMSDynamic_TRT plugin (#449)

* Enable TensorRT EfficientNMS plugin

* remove some temporary code

* Update trt_backend.cc

* Update utils.h
This commit is contained in:
Jason
2022-11-04 11:46:29 +08:00
committed by GitHub
parent 7150e6405c
commit 9fa612c24b
8 changed files with 36 additions and 94 deletions

View File

@@ -80,21 +80,18 @@ bool OrtBackend::InitFromPaddle(const std::string& model_file,
<< std::endl;
return false;
}
#ifdef ENABLE_PADDLE_FRONTEND
char* model_content_ptr;
int model_content_size = 0;
std::vector<paddle2onnx::CustomOp> custom_ops;
for (auto& item : option.custom_op_info_) {
paddle2onnx::CustomOp op;
strcpy(op.op_name, item.first.c_str());
strcpy(op.export_op_name, item.second.c_str());
custom_ops.emplace_back(op);
}
#ifdef ENABLE_PADDLE_FRONTEND
paddle2onnx::CustomOp op;
strcpy(op.op_name, "multiclass_nms3");
strcpy(op.export_op_name, "MultiClassNMS");
if (!paddle2onnx::Export(model_file.c_str(), params_file.c_str(),
&model_content_ptr, &model_content_size, 11, true,
verbose, true, true, true, custom_ops.data(),
custom_ops.size())) {
verbose, true, true, true, &op,
1)) {
FDERROR << "Error occured while export PaddlePaddle to ONNX format."
<< std::endl;
return false;
@@ -106,7 +103,7 @@ bool OrtBackend::InitFromPaddle(const std::string& model_file,
model_content_ptr = nullptr;
return InitFromOnnx(onnx_model_proto, option, true);
#else
FDERROR << "Didn't compile with PaddlePaddle frontend, you can try to "
FDERROR << "Didn't compile with PaddlePaddle Frontend, you can try to "
"call `InitFromOnnx` instead."
<< std::endl;
#endif