mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 16:48:03 +08:00
Delete redundant code (#1222)
Update paddle_backend.cc Delete redundant code Co-authored-by: Jason <jiangjiajun@baidu.com>
This commit is contained in:
@@ -92,23 +92,19 @@ void PaddleBackend::BuildOption(const PaddleBackendOption& option) {
|
|||||||
bool PaddleBackend::InitFromPaddle(const std::string& model_buffer,
|
bool PaddleBackend::InitFromPaddle(const std::string& model_buffer,
|
||||||
const std::string& params_buffer,
|
const std::string& params_buffer,
|
||||||
const PaddleBackendOption& option) {
|
const PaddleBackendOption& option) {
|
||||||
// bool PaddleBackend::InitFromPaddle(const std::string& contents) {
|
|
||||||
if (initialized_) {
|
if (initialized_) {
|
||||||
FDERROR << "PaddleBackend is already initlized, cannot initialize again."
|
FDERROR << "PaddleBackend is already initlized, cannot initialize again."
|
||||||
<< std::endl;
|
<< std::endl;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
config_.SetModelBuffer(model_buffer.c_str(), model_buffer.size(),
|
||||||
|
params_buffer.c_str(), params_buffer.size());
|
||||||
|
config_.EnableMemoryOptim();
|
||||||
|
BuildOption(option);
|
||||||
|
|
||||||
// The input/output information get from predictor is not right, use
|
// The input/output information get from predictor is not right, use
|
||||||
// PaddleReader instead now
|
// PaddleReader instead now
|
||||||
std::string contents;
|
auto reader = paddle2onnx::PaddleReader(model_buffer.c_str(), model_buffer.size());
|
||||||
|
|
||||||
config_.SetModelBuffer(model_buffer.c_str(), model_buffer.size(),
|
|
||||||
params_buffer.c_str(), params_buffer.size());
|
|
||||||
contents = model_buffer;
|
|
||||||
config_.EnableMemoryOptim();
|
|
||||||
BuildOption(option);
|
|
||||||
auto reader = paddle2onnx::PaddleReader(contents.c_str(), contents.size());
|
|
||||||
// If it's a quantized model, and use cpu with mkldnn, automaticaly switch to
|
// If it's a quantized model, and use cpu with mkldnn, automaticaly switch to
|
||||||
// int8 mode
|
// int8 mode
|
||||||
if (reader.is_quantize_model) {
|
if (reader.is_quantize_model) {
|
||||||
|
Reference in New Issue
Block a user