[Other] Unify initialize api for lite/trt backend (#1249)

* Unify initialize api for lite/trt backend

* Unify initialize api for lite/trt backend
This commit is contained in:
Jason
2023-02-08 11:16:39 +08:00
committed by GitHub
parent 9712f250a5
commit c5b414a774
5 changed files with 94 additions and 75 deletions

View File

@@ -324,12 +324,6 @@ void Runtime::CreateOrtBackend() {
}
void Runtime::CreateTrtBackend() {
FDASSERT(option.device == Device::GPU,
"Backend::TRT only supports Device::GPU.");
FDASSERT(option.model_format == ModelFormat::PADDLE ||
option.model_format == ModelFormat::ONNX,
"TrtBackend only support model format of ModelFormat::PADDLE / "
"ModelFormat::ONNX.");
#ifdef ENABLE_TRT_BACKEND
option.trt_option.model_file = option.model_file;
option.trt_option.params_file = option.params_file;
@@ -338,40 +332,8 @@ void Runtime::CreateTrtBackend() {
option.trt_option.enable_pinned_memory = option.enable_pinned_memory;
option.trt_option.external_stream_ = option.external_stream_;
backend_ = utils::make_unique<TrtBackend>();
auto casted_backend = dynamic_cast<TrtBackend*>(backend_.get());
casted_backend->benchmark_option_ = option.benchmark_option;
if (option.model_format == ModelFormat::ONNX) {
if (option.model_from_memory_) {
FDASSERT(
casted_backend->InitFromOnnx(option.model_file, option.trt_option),
"Load model from ONNX failed while initliazing TrtBackend.");
ReleaseModelMemoryBuffer();
} else {
std::string model_buffer = "";
FDASSERT(ReadBinaryFromFile(option.model_file, &model_buffer),
"Fail to read binary from model file");
FDASSERT(casted_backend->InitFromOnnx(model_buffer, option.trt_option),
"Load model from ONNX failed while initliazing TrtBackend.");
}
} else {
if (option.model_from_memory_) {
FDASSERT(casted_backend->InitFromPaddle(
option.model_file, option.params_file, option.trt_option),
"Load model from Paddle failed while initliazing TrtBackend.");
ReleaseModelMemoryBuffer();
} else {
std::string model_buffer = "";
std::string params_buffer = "";
FDASSERT(ReadBinaryFromFile(option.model_file, &model_buffer),
"Fail to read binary from model file");
FDASSERT(ReadBinaryFromFile(option.params_file, &params_buffer),
"Fail to read binary from parameter file");
FDASSERT(casted_backend->InitFromPaddle(model_buffer, params_buffer,
option.trt_option),
"Load model from Paddle failed while initliazing TrtBackend.");
}
}
backend_->benchmark_option_ = option.benchmark_option;
FDASSERT(backend_->Init(option), "Failed to initialize TensorRT backend.");
#else
FDASSERT(false,
"TrtBackend is not available, please compiled with "
@@ -383,29 +345,18 @@ void Runtime::CreateTrtBackend() {
void Runtime::CreateLiteBackend() {
#ifdef ENABLE_LITE_BACKEND
FDASSERT(option.model_from_memory_ == false,
"LiteBackend don't support to load model from memory");
FDASSERT(option.device == Device::CPU || option.device == Device::TIMVX ||
option.device == Device::KUNLUNXIN ||
option.device == Device::ASCEND,
"Backend::LITE only supports "
"Device::CPU/Device::TIMVX/Device::KUNLUNXIN/Device::ASCEND.");
FDASSERT(option.model_format == ModelFormat::PADDLE,
"LiteBackend only support model format of ModelFormat::PADDLE");
backend_ = utils::make_unique<LiteBackend>();
auto casted_backend = dynamic_cast<LiteBackend*>(backend_.get());
casted_backend->benchmark_option_ = option.benchmark_option;
backend_->benchmark_option_ = option.benchmark_option;
FDASSERT(casted_backend->InitFromPaddle(option.model_file, option.params_file,
option.paddle_lite_option),
FDASSERT(backend_->Init(option),
"Load model from nb file failed while initializing LiteBackend.");
#else
FDASSERT(false,
"LiteBackend is not available, please compiled with "
"ENABLE_LITE_BACKEND=ON.");
#endif
FDINFO << "Runtime initialized with Backend::LITE in " << option.device << "."
<< std::endl;
FDINFO << "Runtime initialized with Backend::PDLITE in " << option.device
<< "." << std::endl;
}
void Runtime::CreateRKNPU2Backend() {