mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-12 20:11:20 +08:00
Add paddlelite backend support (#260)
* Add paddlelite backend support * Update CMakeLists.txt * Update __init__.py
This commit is contained in:
141
fastdeploy/backends/lite/lite_backend.cc
Normal file
141
fastdeploy/backends/lite/lite_backend.cc
Normal file
@@ -0,0 +1,141 @@
|
||||
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "fastdeploy/backends/lite/lite_backend.h"
|
||||
#include <cstring>
|
||||
|
||||
namespace fastdeploy {
|
||||
|
||||
// Convert data type from paddle lite to fastdeploy
|
||||
FDDataType LiteDataTypeToFD(const paddle::lite_api::PrecisionType& dtype) {
|
||||
if (dtype == paddle::lite_api::PrecisionType::kFloat) {
|
||||
return FDDataType::FP32;
|
||||
} else if (dtype == paddle::lite_api::PrecisionType::kInt8) {
|
||||
return FDDataType::INT8;
|
||||
} else if (dtype == paddle::lite_api::PrecisionType::kInt32) {
|
||||
return FDDataType::INT32;
|
||||
} else if (dtype == paddle::lite_api::PrecisionType::kInt64) {
|
||||
return FDDataType::INT64;
|
||||
} else if (dtype == paddle::lite_api::PrecisionType::kInt16) {
|
||||
return FDDataType::INT16;
|
||||
} else if (dtype == paddle::lite_api::PrecisionType::kUInt8) {
|
||||
return FDDataType::UINT8;
|
||||
} else if (dtype == paddle::lite_api::PrecisionType::kFP64) {
|
||||
return FDDataType::FP64;
|
||||
}
|
||||
FDASSERT(false, "Unexpected data type of %d.", dtype);
|
||||
return FDDataType::FP32;
|
||||
}
|
||||
|
||||
void LiteBackend::BuildOption(const LiteBackendOption& option) {
|
||||
std::vector<paddle::lite_api::Place> valid_places;
|
||||
valid_places.push_back(paddle::lite_api::Place{TARGET(kARM), PRECISION(kFloat)});
|
||||
config_.set_valid_places(valid_places);
|
||||
}
|
||||
|
||||
bool LiteBackend::InitFromPaddle(const std::string& model_file,
|
||||
const std::string& params_file,
|
||||
const LiteBackendOption& option) {
|
||||
if (initialized_) {
|
||||
FDERROR << "LiteBackend is already initialized, cannot initialize again."
|
||||
<< std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
config_.set_model_file(model_file);
|
||||
config_.set_param_file(params_file);
|
||||
BuildOption(option);
|
||||
predictor_ = paddle::lite_api::CreatePaddlePredictor<paddle::lite_api::CxxConfig>(config_);
|
||||
|
||||
inputs_desc_.clear();
|
||||
outputs_desc_.clear();
|
||||
inputs_order_.clear();
|
||||
std::vector<std::string> input_names = predictor_->GetInputNames();
|
||||
std::vector<std::string> output_names = predictor_->GetOutputNames();
|
||||
for (size_t i = 0; i < input_names.size(); ++i) {
|
||||
inputs_order_[input_names[i]] = i;
|
||||
TensorInfo info;
|
||||
auto tensor = predictor_->GetInput(i);
|
||||
auto shape = tensor->shape();
|
||||
info.shape.assign(shape.begin(), shape.end());
|
||||
info.name = input_names[i];
|
||||
info.dtype = LiteDataTypeToFD(tensor->precision());
|
||||
inputs_desc_.emplace_back(info);
|
||||
}
|
||||
for (size_t i = 0; i < output_names.size(); ++i) {
|
||||
TensorInfo info;
|
||||
auto tensor = predictor_->GetOutput(i);
|
||||
auto shape = tensor->shape();
|
||||
info.shape.assign(shape.begin(), shape.end());
|
||||
info.name = output_names[i];
|
||||
info.dtype = LiteDataTypeToFD(tensor->precision());
|
||||
outputs_desc_.emplace_back(info);
|
||||
}
|
||||
|
||||
initialized_ = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
TensorInfo LiteBackend::GetInputInfo(int index) {
|
||||
FDASSERT(index < NumInputs(),
|
||||
"The index: %d should less than the number of inputs: %d.", index,
|
||||
NumInputs());
|
||||
return inputs_desc_[index];
|
||||
}
|
||||
|
||||
std::vector<TensorInfo> LiteBackend::GetInputInfos() { return inputs_desc_; }
|
||||
|
||||
TensorInfo LiteBackend::GetOutputInfo(int index) {
|
||||
FDASSERT(index < NumOutputs(),
|
||||
"The index: %d should less than the number of outputs %d.", index,
|
||||
NumOutputs());
|
||||
return outputs_desc_[index];
|
||||
}
|
||||
|
||||
std::vector<TensorInfo> LiteBackend::GetOutputInfos() {
|
||||
return outputs_desc_;
|
||||
}
|
||||
|
||||
bool LiteBackend::Infer(std::vector<FDTensor>& inputs,
|
||||
std::vector<FDTensor>* outputs) {
|
||||
if (inputs.size() != inputs_desc_.size()) {
|
||||
FDERROR << "[LiteBackend] Size of inputs(" << inputs.size()
|
||||
<< ") should keep same with the inputs of this model("
|
||||
<< inputs_desc_.size() << ")." << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < inputs.size(); ++i) {
|
||||
auto iter = inputs_order_.find(inputs[i].name);
|
||||
if (iter == inputs_order_.end()) {
|
||||
FDERROR << "Cannot find input with name:" << inputs[i].name << " in loaded model." << std::endl;
|
||||
return false;
|
||||
}
|
||||
auto tensor = predictor_->GetInput(iter->second);
|
||||
tensor->Resize(inputs[i].shape);
|
||||
tensor->ShareExternalMemory(const_cast<void*>(inputs[i].CpuData()), inputs[i].Nbytes(), paddle::lite_api::TargetType::kARM);
|
||||
}
|
||||
|
||||
predictor_->Run();
|
||||
|
||||
outputs->resize(outputs_desc_.size());
|
||||
for (size_t i = 0; i < outputs_desc_.size(); ++i) {
|
||||
auto tensor = predictor_->GetOutput(i);
|
||||
(*outputs)[i].Resize(tensor->shape(), outputs_desc_[i].dtype, outputs_desc_[i].name);
|
||||
memcpy((*outputs)[i].MutableData(), tensor->data<void>(), (*outputs)[i].Nbytes());
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace fastdeploy
|
61
fastdeploy/backends/lite/lite_backend.h
Normal file
61
fastdeploy/backends/lite/lite_backend.h
Normal file
@@ -0,0 +1,61 @@
|
||||
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <iostream>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "fastdeploy/backends/backend.h"
|
||||
#include "paddle_api.h" // NOLINT
|
||||
|
||||
namespace fastdeploy {
|
||||
|
||||
struct LiteBackendOption {
|
||||
};
|
||||
|
||||
// Convert data type from paddle lite to fastdeploy
|
||||
FDDataType LiteDataTypeToFD(const paddle::lite_api::PrecisionType& dtype);
|
||||
|
||||
class LiteBackend : public BaseBackend {
|
||||
public:
|
||||
LiteBackend() {}
|
||||
virtual ~LiteBackend() = default;
|
||||
void BuildOption(const LiteBackendOption& option);
|
||||
|
||||
bool InitFromPaddle(const std::string& model_file,
|
||||
const std::string& params_file,
|
||||
const LiteBackendOption& option = LiteBackendOption());
|
||||
|
||||
bool Infer(std::vector<FDTensor>& inputs, std::vector<FDTensor>* outputs);
|
||||
|
||||
int NumInputs() const { return inputs_desc_.size(); }
|
||||
|
||||
int NumOutputs() const { return outputs_desc_.size(); }
|
||||
|
||||
TensorInfo GetInputInfo(int index);
|
||||
TensorInfo GetOutputInfo(int index);
|
||||
std::vector<TensorInfo> GetInputInfos() override;
|
||||
std::vector<TensorInfo> GetOutputInfos() override;
|
||||
|
||||
private:
|
||||
paddle::lite_api::CxxConfig config_;
|
||||
std::shared_ptr<paddle::lite_api::PaddlePredictor> predictor_;
|
||||
std::vector<TensorInfo> inputs_desc_;
|
||||
std::vector<TensorInfo> outputs_desc_;
|
||||
std::map<std::string, int> inputs_order_;
|
||||
};
|
||||
} // namespace fastdeploy
|
@@ -11,7 +11,6 @@
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#pragma once
|
||||
|
||||
#ifdef WITH_GPU
|
||||
#include <cuda_runtime_api.h>
|
||||
|
@@ -166,7 +166,7 @@ std::map<std::string, float> FastDeployModel::PrintStatisInfoOfRuntime() {
|
||||
std::map<std::string, float> statis_info_of_runtime_dict;
|
||||
|
||||
if (time_of_runtime_.size() < 10) {
|
||||
std::cout << "[FastDeploy] [WARNING] PrintStatisInfoOfRuntime require the runtime ran 10 times at "
|
||||
FDWARNING << "PrintStatisInfoOfRuntime require the runtime ran 10 times at "
|
||||
"least, but now you only ran "
|
||||
<< time_of_runtime_.size() << " times." << std::endl;
|
||||
}
|
||||
|
@@ -75,7 +75,7 @@ class FASTDEPLOY_DECL FastDeployModel {
|
||||
#define TIMERECORD_END(id, prefix) \
|
||||
if (DebugEnabled()) { \
|
||||
tc_##id.End(); \
|
||||
FDINFO << __FILE__ << "(" << __LINE__ << "):" << __FUNCTION__ << " " \
|
||||
FDLogger() << __FILE__ << "(" << __LINE__ << "):" << __FUNCTION__ << " " \
|
||||
<< prefix << " duration = " << tc_##id.Duration() << "s." \
|
||||
<< std::endl; \
|
||||
}
|
||||
|
@@ -33,6 +33,10 @@
|
||||
#include "fastdeploy/backends/openvino/ov_backend.h"
|
||||
#endif
|
||||
|
||||
#ifdef ENABLE_LITE_BACKEND
|
||||
#include "fastdeploy/backends/lite/lite_backend.h"
|
||||
#endif
|
||||
|
||||
namespace fastdeploy {
|
||||
|
||||
std::vector<Backend> GetAvailableBackends() {
|
||||
@@ -48,6 +52,9 @@ std::vector<Backend> GetAvailableBackends() {
|
||||
#endif
|
||||
#ifdef ENABLE_OPENVINO_BACKEND
|
||||
backends.push_back(Backend::OPENVINO);
|
||||
#endif
|
||||
#ifdef ENABLE_LITE_BACKEND
|
||||
backends.push_back(Backend::LITE);
|
||||
#endif
|
||||
return backends;
|
||||
}
|
||||
@@ -71,6 +78,8 @@ std::string Str(const Backend& b) {
|
||||
return "Backend::PDINFER";
|
||||
} else if (b == Backend::OPENVINO) {
|
||||
return "Backend::OPENVINO";
|
||||
} else if (b == Backend::LITE) {
|
||||
return "Backend::LITE";
|
||||
}
|
||||
return "UNKNOWN-Backend";
|
||||
}
|
||||
@@ -194,6 +203,15 @@ void RuntimeOption::UseOpenVINOBackend() {
|
||||
FDASSERT(false, "The FastDeploy didn't compile with OpenVINO.");
|
||||
#endif
|
||||
}
|
||||
|
||||
void RuntimeOption::UseLiteBackend() {
|
||||
#ifdef ENABLE_LITE_BACKEND
|
||||
backend = Backend::LITE;
|
||||
#else
|
||||
FDASSERT(false, "The FastDeploy didn't compile with Paddle Lite.");
|
||||
#endif
|
||||
}
|
||||
|
||||
void RuntimeOption::EnablePaddleMKLDNN() { pd_enable_mkldnn = true; }
|
||||
|
||||
void RuntimeOption::DisablePaddleMKLDNN() { pd_enable_mkldnn = false; }
|
||||
@@ -262,12 +280,12 @@ bool Runtime::Init(const RuntimeOption& _option) {
|
||||
FDASSERT(option.device == Device::CPU || option.device == Device::GPU,
|
||||
"Backend::ORT only supports Device::CPU/Device::GPU.");
|
||||
CreateOrtBackend();
|
||||
FDINFO << "Runtime initialized with Backend::ORT in device " << Str(option.device) << "." << std::endl;
|
||||
FDINFO << "Runtime initialized with Backend::ORT in " << Str(option.device) << "." << std::endl;
|
||||
} else if (option.backend == Backend::TRT) {
|
||||
FDASSERT(option.device == Device::GPU,
|
||||
"Backend::TRT only supports Device::GPU.");
|
||||
CreateTrtBackend();
|
||||
FDINFO << "Runtime initialized with Backend::TRT in device " << Str(option.device) << "." << std::endl;
|
||||
FDINFO << "Runtime initialized with Backend::TRT in " << Str(option.device) << "." << std::endl;
|
||||
} else if (option.backend == Backend::PDINFER) {
|
||||
FDASSERT(option.device == Device::CPU || option.device == Device::GPU,
|
||||
"Backend::TRT only supports Device::CPU/Device::GPU.");
|
||||
@@ -275,12 +293,16 @@ bool Runtime::Init(const RuntimeOption& _option) {
|
||||
option.model_format == Frontend::PADDLE,
|
||||
"Backend::PDINFER only supports model format of Frontend::PADDLE.");
|
||||
CreatePaddleBackend();
|
||||
FDINFO << "Runtime initialized with Backend::PDINFER in device " << Str(option.device) << "." << std::endl;
|
||||
FDINFO << "Runtime initialized with Backend::PDINFER in " << Str(option.device) << "." << std::endl;
|
||||
} else if (option.backend == Backend::OPENVINO) {
|
||||
FDASSERT(option.device == Device::CPU,
|
||||
"Backend::OPENVINO only supports Device::CPU");
|
||||
CreateOpenVINOBackend();
|
||||
FDINFO << "Runtime initialized with Backend::OPENVINO in device " << Str(option.device) << "." << std::endl;
|
||||
FDINFO << "Runtime initialized with Backend::OPENVINO in " << Str(option.device) << "." << std::endl;
|
||||
} else if (option.backend == Backend::LITE) {
|
||||
FDASSERT(option.device == Device::CPU, "Backend::LITE only supports Device::CPU");
|
||||
CreateLiteBackend();
|
||||
FDINFO << "Runtime initialized with Backend::LITE in " << Str(option.device) << "." << std::endl;
|
||||
} else {
|
||||
FDERROR << "Runtime only support "
|
||||
"Backend::ORT/Backend::TRT/Backend::PDINFER as backend now."
|
||||
@@ -433,4 +455,21 @@ void Runtime::CreateTrtBackend() {
|
||||
"ENABLE_TRT_BACKEND=ON.");
|
||||
#endif
|
||||
}
|
||||
|
||||
void Runtime::CreateLiteBackend() {
|
||||
#ifdef ENABLE_LITE_BACKEND
|
||||
auto lite_option = LiteBackendOption();
|
||||
FDASSERT(option.model_format == Frontend::PADDLE,
|
||||
"LiteBackend only support model format of Frontend::PADDLE");
|
||||
backend_ = utils::make_unique<LiteBackend>();
|
||||
auto casted_backend = dynamic_cast<LiteBackend*>(backend_.get());
|
||||
FDASSERT(casted_backend->InitFromPaddle(option.model_file, option.params_file, lite_option),
|
||||
"Load model from nb file failed while initializing LiteBackend.");
|
||||
#else
|
||||
FDASSERT(false,
|
||||
"LiteBackend is not available, please compiled with "
|
||||
"ENABLE_LITE_BACKEND=ON.");
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace fastdeploy
|
||||
|
@@ -21,7 +21,7 @@
|
||||
|
||||
namespace fastdeploy {
|
||||
|
||||
enum FASTDEPLOY_DECL Backend { UNKNOWN, ORT, TRT, PDINFER, OPENVINO };
|
||||
enum FASTDEPLOY_DECL Backend { UNKNOWN, ORT, TRT, PDINFER, OPENVINO, LITE };
|
||||
// AUTOREC will according to the name of model file
|
||||
// to decide which Frontend is
|
||||
enum FASTDEPLOY_DECL Frontend { AUTOREC, PADDLE, ONNX };
|
||||
@@ -66,6 +66,9 @@ struct FASTDEPLOY_DECL RuntimeOption {
|
||||
// use openvino backend
|
||||
void UseOpenVINOBackend();
|
||||
|
||||
// use paddle lite backend
|
||||
void UseLiteBackend();
|
||||
|
||||
// enable mkldnn while use paddle inference in CPU
|
||||
void EnablePaddleMKLDNN();
|
||||
// disable mkldnn while use paddle inference in CPU
|
||||
@@ -161,6 +164,8 @@ struct FASTDEPLOY_DECL Runtime {
|
||||
|
||||
void CreateOpenVINOBackend();
|
||||
|
||||
void CreateLiteBackend();
|
||||
|
||||
int NumInputs() { return backend_->NumInputs(); }
|
||||
int NumOutputs() { return backend_->NumOutputs(); }
|
||||
TensorInfo GetInputInfo(int index);
|
||||
|
@@ -27,6 +27,7 @@ void BindRuntime(pybind11::module& m) {
|
||||
.def("use_ort_backend", &RuntimeOption::UseOrtBackend)
|
||||
.def("use_trt_backend", &RuntimeOption::UseTrtBackend)
|
||||
.def("use_openvino_backend", &RuntimeOption::UseOpenVINOBackend)
|
||||
.def("use_lite_backend", &RuntimeOption::UseLiteBackend)
|
||||
.def("enable_paddle_mkldnn", &RuntimeOption::EnablePaddleMKLDNN)
|
||||
.def("disable_paddle_mkldnn", &RuntimeOption::DisablePaddleMKLDNN)
|
||||
.def("enable_paddle_log_info", &RuntimeOption::EnablePaddleLogInfo)
|
||||
@@ -109,7 +110,8 @@ void BindRuntime(pybind11::module& m) {
|
||||
.value("UNKOWN", Backend::UNKNOWN)
|
||||
.value("ORT", Backend::ORT)
|
||||
.value("TRT", Backend::TRT)
|
||||
.value("PDINFER", Backend::PDINFER);
|
||||
.value("PDINFER", Backend::PDINFER)
|
||||
.value("LITE", Backend::LITE);
|
||||
pybind11::enum_<Frontend>(m, "Frontend", pybind11::arithmetic(),
|
||||
"Frontend for inference.")
|
||||
.value("PADDLE", Frontend::PADDLE)
|
||||
|
@@ -137,10 +137,6 @@ PYBIND11_MODULE(@PY_LIBRARY_NAME@, m) {
|
||||
"Make programer easier to deploy deeplearning model, save time to save "
|
||||
"the world!";
|
||||
|
||||
pybind11::class_<FDLogger>(m, "FDLogger")
|
||||
.def_readwrite_static("disable_info", &FDLogger::disable_info)
|
||||
.def_readwrite_static("disable_warning", &FDLogger::disable_warning);
|
||||
|
||||
BindRuntime(m);
|
||||
BindFDModel(m);
|
||||
#ifdef ENABLE_VISION
|
||||
|
@@ -38,7 +38,7 @@ class FASTDEPLOY_DECL TimeCounter {
|
||||
if (!print_out) {
|
||||
return;
|
||||
}
|
||||
std::cout << prefix << " duration = " << Duration() << "s." << std::endl;
|
||||
FDLogger() << prefix << " duration = " << Duration() << "s." << std::endl;
|
||||
}
|
||||
|
||||
private:
|
||||
@@ -46,4 +46,4 @@ class FASTDEPLOY_DECL TimeCounter {
|
||||
std::chrono::time_point<std::chrono::system_clock> end_;
|
||||
};
|
||||
|
||||
} // namespace fastdeploy
|
||||
} // namespace fastdeploy
|
||||
|
@@ -16,17 +16,14 @@
|
||||
|
||||
namespace fastdeploy {
|
||||
|
||||
bool FDLogger::disable_info = false;
|
||||
bool FDLogger::disable_warning = false;
|
||||
|
||||
FDLogger::FDLogger(int level, const std::string& prefix) {
|
||||
FDLogger::FDLogger(bool verbose, const std::string& prefix) {
|
||||
verbose_ = verbose;
|
||||
line_ = "";
|
||||
level_ = level;
|
||||
prefix_ = prefix;
|
||||
}
|
||||
|
||||
FDLogger& FDLogger::operator<<(std::ostream& (*os)(std::ostream&)) {
|
||||
if (!verbose()) {
|
||||
if (!verbose_) {
|
||||
return *this;
|
||||
}
|
||||
std::cout << prefix_ << " " << line_ << std::endl;
|
||||
|
@@ -37,26 +37,16 @@ namespace fastdeploy {
|
||||
|
||||
class FASTDEPLOY_DECL FDLogger {
|
||||
public:
|
||||
static bool disable_info;
|
||||
static bool disable_warning;
|
||||
// 0: INFO
|
||||
// 1: WARNING
|
||||
// 2: ERROR
|
||||
explicit FDLogger(int level = 0, const std::string& prefix = "[FastDeploy]");
|
||||
|
||||
bool verbose() {
|
||||
if (disable_info && level_ == 0) {
|
||||
return false;
|
||||
}
|
||||
if (disable_warning && level_ == 1) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
FDLogger() {
|
||||
line_ = "";
|
||||
prefix_ = "[FastDeploy]";
|
||||
verbose_ = true;
|
||||
}
|
||||
explicit FDLogger(bool verbose, const std::string& prefix = "[FastDeploy]");
|
||||
|
||||
template <typename T>
|
||||
FDLogger& operator<<(const T& val) {
|
||||
if (!verbose()) {
|
||||
if (!verbose_) {
|
||||
return *this;
|
||||
}
|
||||
std::stringstream ss;
|
||||
@@ -66,7 +56,7 @@ class FASTDEPLOY_DECL FDLogger {
|
||||
}
|
||||
FDLogger& operator<<(std::ostream& (*os)(std::ostream&));
|
||||
~FDLogger() {
|
||||
if (!verbose() && line_ != "") {
|
||||
if (!verbose_ && line_ != "") {
|
||||
std::cout << line_ << std::endl;
|
||||
}
|
||||
}
|
||||
@@ -74,7 +64,7 @@ class FASTDEPLOY_DECL FDLogger {
|
||||
private:
|
||||
std::string line_;
|
||||
std::string prefix_;
|
||||
int level_ = 0;
|
||||
bool verbose_ = true;
|
||||
};
|
||||
|
||||
FASTDEPLOY_DECL bool ReadBinaryFromFile(const std::string& file,
|
||||
@@ -85,15 +75,15 @@ FASTDEPLOY_DECL bool ReadBinaryFromFile(const std::string& file,
|
||||
#endif
|
||||
|
||||
#define FDERROR \
|
||||
FDLogger(2, "[ERROR]") << __REL_FILE__ << "(" << __LINE__ \
|
||||
FDLogger(true, "[ERROR]") << __REL_FILE__ << "(" << __LINE__ \
|
||||
<< ")::" << __FUNCTION__ << "\t"
|
||||
|
||||
#define FDWARNING \
|
||||
FDLogger(1, "[WARNING]") << __REL_FILE__ << "(" << __LINE__ \
|
||||
FDLogger(true, "[WARNING]") << __REL_FILE__ << "(" << __LINE__ \
|
||||
<< ")::" << __FUNCTION__ << "\t"
|
||||
|
||||
#define FDINFO \
|
||||
FDLogger(0, "[INFO]") << __REL_FILE__ << "(" << __LINE__ \
|
||||
FDLogger(true, "[INFO]") << __REL_FILE__ << "(" << __LINE__ \
|
||||
<< ")::" << __FUNCTION__ << "\t"
|
||||
|
||||
#define FDASSERT(condition, format, ...) \
|
||||
|
@@ -26,7 +26,7 @@ PaddleClasModel::PaddleClasModel(const std::string& model_file,
|
||||
const RuntimeOption& custom_option,
|
||||
const Frontend& model_format) {
|
||||
config_file_ = config_file;
|
||||
valid_cpu_backends = {Backend::ORT, Backend::OPENVINO, Backend::PDINFER};
|
||||
valid_cpu_backends = {Backend::ORT, Backend::OPENVINO, Backend::PDINFER, Backend::LITE};
|
||||
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
|
||||
runtime_option = custom_option;
|
||||
runtime_option.model_format = model_format;
|
||||
|
@@ -24,7 +24,7 @@ PicoDet::PicoDet(const std::string& model_file, const std::string& params_file,
|
||||
const RuntimeOption& custom_option,
|
||||
const Frontend& model_format) {
|
||||
config_file_ = config_file;
|
||||
valid_cpu_backends = {Backend::PDINFER, Backend::ORT};
|
||||
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::LITE};
|
||||
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
|
||||
runtime_option = custom_option;
|
||||
runtime_option.model_format = model_format;
|
||||
|
@@ -24,6 +24,7 @@ PPYOLOE::PPYOLOE(const std::string& model_file, const std::string& params_file,
|
||||
}
|
||||
|
||||
void PPYOLOE::GetNmsInfo() {
|
||||
#ifdef ENABLE_PADDLE_FRONTEND
|
||||
if (runtime_option.model_format == Frontend::PADDLE) {
|
||||
std::string contents;
|
||||
if (!ReadBinaryFromFile(runtime_option.model_file, &contents)) {
|
||||
@@ -41,6 +42,7 @@ void PPYOLOE::GetNmsInfo() {
|
||||
normalized = reader.nms_params.normalized;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
bool PPYOLOE::Initialize() {
|
||||
|
@@ -56,13 +56,13 @@ class FASTDEPLOY_DECL PPYOLOE : public FastDeployModel {
|
||||
float score_threshold = 0.01;
|
||||
int64_t nms_top_k = 10000;
|
||||
bool normalized = true;
|
||||
bool has_nms_ = false;
|
||||
bool has_nms_ = true;
|
||||
|
||||
// This function will used to check if this model contains multiclass_nms
|
||||
// and get parameters from the operator
|
||||
void GetNmsInfo();
|
||||
};
|
||||
|
||||
} // namespace detection
|
||||
} // namespace detection
|
||||
} // namespace vision
|
||||
} // namespace fastdeploy
|
||||
|
@@ -26,12 +26,11 @@ Classifier::Classifier(const std::string& model_file,
|
||||
const RuntimeOption& custom_option,
|
||||
const Frontend& model_format) {
|
||||
if (model_format == Frontend::ONNX) {
|
||||
valid_cpu_backends = {Backend::ORT,
|
||||
Backend::OPENVINO}; // 指定可用的CPU后端
|
||||
valid_cpu_backends = {Backend::ORT, Backend::OPENVINO}; // 指定可用的CPU后端
|
||||
valid_gpu_backends = {Backend::ORT, Backend::TRT}; // 指定可用的GPU后端
|
||||
} else {
|
||||
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO};
|
||||
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
|
||||
valid_gpu_backends = {Backend::PDINFER, Backend::TRT, Backend::ORT};
|
||||
}
|
||||
runtime_option = custom_option;
|
||||
runtime_option.model_format = model_format;
|
||||
|
@@ -44,12 +44,12 @@ Recognizer::Recognizer(const std::string& model_file,
|
||||
const RuntimeOption& custom_option,
|
||||
const Frontend& model_format) {
|
||||
if (model_format == Frontend::ONNX) {
|
||||
valid_cpu_backends = {Backend::ORT,
|
||||
Backend::OPENVINO}; // 指定可用的CPU后端
|
||||
valid_cpu_backends = {Backend::ORT, Backend::OPENVINO}; // 指定可用的CPU后端
|
||||
valid_gpu_backends = {Backend::ORT, Backend::TRT}; // 指定可用的GPU后端
|
||||
} else {
|
||||
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO};
|
||||
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
|
||||
// NOTE:此模型暂不支持paddle-inference-Gpu推理
|
||||
valid_cpu_backends = {Backend::ORT, Backend::PDINFER, Backend::OPENVINO};
|
||||
valid_gpu_backends = {Backend::ORT, Backend::TRT};
|
||||
}
|
||||
|
||||
runtime_option = custom_option;
|
||||
|
Reference in New Issue
Block a user