[Backend] Support the deployment of models which is larger than 2G in ORT and TRT backend (#514)

* support model bigger than 2G

* update code

* update code

* fix bug

* update code
This commit is contained in:
yeliang2258
2022-11-09 11:14:15 +08:00
committed by GitHub
parent d259952224
commit 8d31fe59e8
4 changed files with 35 additions and 5 deletions

View File

@@ -132,7 +132,7 @@ bool TrtBackend::InitFromPaddle(const std::string& model_file,
&model_content_ptr, &model_content_size, 11, true,
verbose, true, true, true, nullptr,
0, "tensorrt",
&calibration_cache_ptr, &calibration_cache_size)) {
&calibration_cache_ptr, &calibration_cache_size, "", &save_external_)) {
FDERROR << "Error occured while export PaddlePaddle to ONNX format."
<< std::endl;
return false;
@@ -148,6 +148,15 @@ bool TrtBackend::InitFromPaddle(const std::string& model_file,
calibration_str_ = calibration_str;
delete[] calibration_cache_ptr;
}
if(save_external_){
model_file_name_ = "model.onnx";
std::fstream f(model_file_name_, std::ios::out);
FDASSERT(f.is_open(), "Can not open file: %s to save model.",
model_file_name_.c_str());
f << onnx_model_proto;
f.close();
return InitFromOnnx(model_file_name_, option, false);
}
return InitFromOnnx(onnx_model_proto, option, true);
#else
FDERROR << "Didn't compile with PaddlePaddle frontend, you can try to "
@@ -242,6 +251,10 @@ bool TrtBackend::InitFromOnnx(const std::string& model_file,
"[ERROR] Error occurs while calling cudaStreamCreate().");
}
if(save_external_){
onnx_content.clear();
onnx_content = model_file_name_;
}
if (!CreateTrtEngineFromOnnx(onnx_content)) {
FDERROR << "Failed to create tensorrt engine." << std::endl;
return false;
@@ -593,7 +606,13 @@ bool TrtBackend::CreateTrtEngineFromOnnx(const std::string& onnx_model_buffer) {
FDERROR << "Failed to call createParser()." << std::endl;
return false;
}
if (!parser_->parse(onnx_model_buffer.data(), onnx_model_buffer.size())) {
bool model_parser;
if(save_external_){
model_parser=!parser_->parseFromFile(onnx_model_buffer.c_str(), 0);
}else{
model_parser = !parser_->parse(onnx_model_buffer.data(), onnx_model_buffer.size());
}
if (model_parser) {
FDERROR << "Failed to parse ONNX model by TensorRT." << std::endl;
return false;
}