[Backend] Support the deployment of models which is larger than 2G in ORT and TRT backend (#514)

* support model bigger than 2G

* update code

* update code

* fix bug

* update code
This commit is contained in:
yeliang2258
2022-11-09 11:14:15 +08:00
committed by GitHub
parent d259952224
commit 8d31fe59e8
4 changed files with 35 additions and 5 deletions

View File

@@ -43,7 +43,7 @@ else()
endif(WIN32)
set(PADDLE2ONNX_URL_BASE "https://bj.bcebos.com/fastdeploy/third_libs/")
set(PADDLE2ONNX_VERSION "1.0.2rc")
set(PADDLE2ONNX_VERSION "1.0.2")
if(WIN32)
set(PADDLE2ONNX_FILE "paddle2onnx-win-x64-${PADDLE2ONNX_VERSION}.zip")
if(NOT CMAKE_CL_64)

13
fastdeploy/backends/ort/ort_backend.cc Normal file → Executable file
View File

@@ -82,7 +82,7 @@ bool OrtBackend::InitFromPaddle(const std::string& model_file,
}
char* model_content_ptr;
int model_content_size = 0;
bool save_external = false;
#ifdef ENABLE_PADDLE_FRONTEND
paddle2onnx::CustomOp op;
strcpy(op.op_name, "multiclass_nms3");
@@ -91,7 +91,7 @@ bool OrtBackend::InitFromPaddle(const std::string& model_file,
if (!paddle2onnx::Export(model_file.c_str(), params_file.c_str(),
&model_content_ptr, &model_content_size, 11, true,
verbose, true, true, true, &op,
1)) {
1, "onnxruntime", nullptr, 0, "", &save_external)) {
FDERROR << "Error occured while export PaddlePaddle to ONNX format."
<< std::endl;
return false;
@@ -101,6 +101,15 @@ bool OrtBackend::InitFromPaddle(const std::string& model_file,
model_content_ptr + model_content_size);
delete[] model_content_ptr;
model_content_ptr = nullptr;
if(save_external){
std::string model_file_name = "model.onnx";
std::fstream f(model_file_name, std::ios::out);
FDASSERT(f.is_open(), "Can not open file: %s to save model.",
model_file_name.c_str());
f << onnx_model_proto;
f.close();
return InitFromOnnx(model_file_name, option, false);
}
return InitFromOnnx(onnx_model_proto, option, true);
#else
FDERROR << "Didn't compile with PaddlePaddle Frontend, you can try to "

View File

@@ -132,7 +132,7 @@ bool TrtBackend::InitFromPaddle(const std::string& model_file,
&model_content_ptr, &model_content_size, 11, true,
verbose, true, true, true, nullptr,
0, "tensorrt",
&calibration_cache_ptr, &calibration_cache_size)) {
&calibration_cache_ptr, &calibration_cache_size, "", &save_external_)) {
FDERROR << "Error occured while export PaddlePaddle to ONNX format."
<< std::endl;
return false;
@@ -148,6 +148,15 @@ bool TrtBackend::InitFromPaddle(const std::string& model_file,
calibration_str_ = calibration_str;
delete[] calibration_cache_ptr;
}
if(save_external_){
model_file_name_ = "model.onnx";
std::fstream f(model_file_name_, std::ios::out);
FDASSERT(f.is_open(), "Can not open file: %s to save model.",
model_file_name_.c_str());
f << onnx_model_proto;
f.close();
return InitFromOnnx(model_file_name_, option, false);
}
return InitFromOnnx(onnx_model_proto, option, true);
#else
FDERROR << "Didn't compile with PaddlePaddle frontend, you can try to "
@@ -242,6 +251,10 @@ bool TrtBackend::InitFromOnnx(const std::string& model_file,
"[ERROR] Error occurs while calling cudaStreamCreate().");
}
if(save_external_){
onnx_content.clear();
onnx_content = model_file_name_;
}
if (!CreateTrtEngineFromOnnx(onnx_content)) {
FDERROR << "Failed to create tensorrt engine." << std::endl;
return false;
@@ -593,7 +606,13 @@ bool TrtBackend::CreateTrtEngineFromOnnx(const std::string& onnx_model_buffer) {
FDERROR << "Failed to call createParser()." << std::endl;
return false;
}
if (!parser_->parse(onnx_model_buffer.data(), onnx_model_buffer.size())) {
bool model_parser;
if(save_external_){
model_parser=!parser_->parseFromFile(onnx_model_buffer.c_str(), 0);
}else{
model_parser = !parser_->parse(onnx_model_buffer.data(), onnx_model_buffer.size());
}
if (model_parser) {
FDERROR << "Failed to parse ONNX model by TensorRT." << std::endl;
return false;
}

View File

@@ -130,6 +130,8 @@ class TrtBackend : public BaseBackend {
std::map<std::string, int> io_name_index_;
std::string calibration_str_;
bool save_external_ = false;
std::string model_file_name_ = "";
// Sometimes while the number of outputs > 1
// the output order of tensorrt may not be same