Add paddlelite backend support (#260)

* Add paddlelite backend support

* Update CMakeLists.txt

* Update __init__.py
This commit is contained in:
Jason
2022-09-21 13:22:34 +08:00
committed by GitHub
parent 32596b3b89
commit addce837bc
22 changed files with 370 additions and 56 deletions

View File

@@ -42,7 +42,8 @@ option(WITH_GPU "Whether WITH_GPU=ON, will enable onnxruntime-gpu/paddle-infernc
option(ENABLE_ORT_BACKEND "Whether to enable onnxruntime backend." OFF)
option(ENABLE_TRT_BACKEND "Whether to enable tensorrt backend." OFF)
option(ENABLE_PADDLE_BACKEND "Whether to enable paddle backend." OFF)
option(ENABLE_OPENVINO_BACKEND "Whether to enable paddle backend." OFF)
option(ENABLE_OPENVINO_BACKEND "Whether to enable openvino backend." OFF)
option(ENABLE_LITE_BACKEND "Whether to enable paddle lite backend." OFF)
option(CUDA_DIRECTORY "If build tensorrt backend, need to define path of cuda library.")
option(TRT_DIRECTORY "If build tensorrt backend, need to define path of tensorrt library.")
option(ENABLE_VISION "Whether to enable vision models usage." OFF)
@@ -120,10 +121,11 @@ file(GLOB_RECURSE DEPLOY_ORT_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastde
file(GLOB_RECURSE DEPLOY_PADDLE_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/backends/paddle/*.cc)
file(GLOB_RECURSE DEPLOY_TRT_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/backends/tensorrt/*.cc ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/backends/tensorrt/*.cpp)
file(GLOB_RECURSE DEPLOY_OPENVINO_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/backends/openvino/*.cc)
file(GLOB_RECURSE DEPLOY_LITE_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/backends/lite/*.cc)
file(GLOB_RECURSE DEPLOY_VISION_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/vision/*.cc)
file(GLOB_RECURSE DEPLOY_TEXT_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/text/*.cc)
file(GLOB_RECURSE DEPLOY_PYBIND_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/pybind/*.cc ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/*_pybind.cc)
list(REMOVE_ITEM ALL_DEPLOY_SRCS ${DEPLOY_ORT_SRCS} ${DEPLOY_PADDLE_SRCS} ${DEPLOY_TRT_SRCS} ${DEPLOY_OPENVINO_SRCS} ${DEPLOY_VISION_SRCS} ${DEPLOY_TEXT_SRCS})
list(REMOVE_ITEM ALL_DEPLOY_SRCS ${DEPLOY_ORT_SRCS} ${DEPLOY_PADDLE_SRCS} ${DEPLOY_TRT_SRCS} ${DEPLOY_OPENVINO_SRCS} ${DEPLOY_LITE_SRCS} ${DEPLOY_VISION_SRCS} ${DEPLOY_TEXT_SRCS})
set(DEPEND_LIBS "")
@@ -147,6 +149,12 @@ if(ENABLE_ORT_BACKEND)
list(APPEND DEPEND_LIBS external_onnxruntime)
endif()
if(ENABLE_LITE_BACKEND)
add_definitions(-DENABLE_LITE_BACKEND)
include(${PROJECT_SOURCE_DIR}/cmake/paddlelite.cmake)
list(APPEND ALL_DEPLOY_SRCS ${DEPLOY_LITE_SRCS})
endif()
if(ENABLE_PADDLE_BACKEND)
add_definitions(-DENABLE_PADDLE_BACKEND)
list(APPEND ALL_DEPLOY_SRCS ${DEPLOY_PADDLE_SRCS})

72
cmake/paddlelite.cmake Normal file
View File

@@ -0,0 +1,72 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
include(ExternalProject)
set(PADDLELITE_PROJECT "extern_paddlelite")
set(PADDLELITE_PREFIX_DIR ${THIRD_PARTY_PATH}/paddlelite)
set(PADDLELITE_SOURCE_DIR
${THIRD_PARTY_PATH}/paddlelite/src/${PADDLELITE_PROJECT})
set(PADDLELITE_INSTALL_DIR ${THIRD_PARTY_PATH}/install/paddlelite)
set(PADDLELITE_INC_DIR
"${PADDLELITE_INSTALL_DIR}/include"
CACHE PATH "paddlelite include directory." FORCE)
set(PADDLELITE_LIB_DIR
"${PADDLELITE_INSTALL_DIR}/lib"
CACHE PATH "paddlelite lib directory." FORCE)
set(CMAKE_BUILD_RPATH "${CMAKE_BUILD_RPATH}" "${PADDLELITE_LIB_DIR}")
#set(PADDLELITE_URL "https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.11/inference_lite_lib.armlinux.armv8.gcc.with_extra.with_cv.tar.gz")
set(PADDLELITE_URL "https://bj.bcebos.com/fastdeploy/third_libs/lite-linux-arm64-20220920.tgz")
if(WIN32)
message(FATAL_ERROR "Doesn't support windows platform with backend Paddle-Lite.")
elseif(APPLE)
message(FATAL_ERROR "Doesn't support Mac OSX platform with backend Paddle-Lite.")
else()
if(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "aarch64")
set(PADDLELITE_URL "https://bj.bcebos.com/fastdeploy/third_libs/lite-linux-arm64-20220920.tgz")
else()
message(FATAL_ERROR "Only support Linux aarch64 now, x64 is not supported with backend Paddle-Lite.")
endif()
endif()
include_directories(${PADDLELITE_INC_DIR})
if(WIN32)
elseif(APPLE)
else()
set(PADDLELITE_LIB "${PADDLELITE_INSTALL_DIR}/lib/libpaddle_full_api_shared.so")
endif()
ExternalProject_Add(
${PADDLELITE_PROJECT}
${EXTERNAL_PROJECT_LOG_ARGS}
URL ${PADDLELITE_URL}
PREFIX ${PADDLELITE_PREFIX_DIR}
DOWNLOAD_NO_PROGRESS 1
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
UPDATE_COMMAND ""
INSTALL_COMMAND
${CMAKE_COMMAND} -E remove_directory ${PADDLELITE_INSTALL_DIR} &&
${CMAKE_COMMAND} -E make_directory ${PADDLELITE_INSTALL_DIR} &&
${CMAKE_COMMAND} -E rename ${PADDLELITE_SOURCE_DIR}/lib/ ${PADDLELITE_INSTALL_DIR}/lib &&
${CMAKE_COMMAND} -E copy_directory ${PADDLELITE_SOURCE_DIR}/include
${PADDLELITE_INC_DIR}
BUILD_BYPRODUCTS ${PADDLELITE_LIB})
add_library(external_paddle_lite STATIC IMPORTED GLOBAL)
set_property(TARGET external_paddle_lite PROPERTY IMPORTED_LOCATION ${PADDLELITE_LIB})
add_dependencies(external_paddle_lite ${PADDLELITE_PROJECT})

View File

@@ -0,0 +1,141 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "fastdeploy/backends/lite/lite_backend.h"
#include <cstring>
namespace fastdeploy {
// Convert data type from paddle lite to fastdeploy
FDDataType LiteDataTypeToFD(const paddle::lite_api::PrecisionType& dtype) {
if (dtype == paddle::lite_api::PrecisionType::kFloat) {
return FDDataType::FP32;
} else if (dtype == paddle::lite_api::PrecisionType::kInt8) {
return FDDataType::INT8;
} else if (dtype == paddle::lite_api::PrecisionType::kInt32) {
return FDDataType::INT32;
} else if (dtype == paddle::lite_api::PrecisionType::kInt64) {
return FDDataType::INT64;
} else if (dtype == paddle::lite_api::PrecisionType::kInt16) {
return FDDataType::INT16;
} else if (dtype == paddle::lite_api::PrecisionType::kUInt8) {
return FDDataType::UINT8;
} else if (dtype == paddle::lite_api::PrecisionType::kFP64) {
return FDDataType::FP64;
}
FDASSERT(false, "Unexpected data type of %d.", dtype);
return FDDataType::FP32;
}
void LiteBackend::BuildOption(const LiteBackendOption& option) {
std::vector<paddle::lite_api::Place> valid_places;
valid_places.push_back(paddle::lite_api::Place{TARGET(kARM), PRECISION(kFloat)});
config_.set_valid_places(valid_places);
}
bool LiteBackend::InitFromPaddle(const std::string& model_file,
const std::string& params_file,
const LiteBackendOption& option) {
if (initialized_) {
FDERROR << "LiteBackend is already initialized, cannot initialize again."
<< std::endl;
return false;
}
config_.set_model_file(model_file);
config_.set_param_file(params_file);
BuildOption(option);
predictor_ = paddle::lite_api::CreatePaddlePredictor<paddle::lite_api::CxxConfig>(config_);
inputs_desc_.clear();
outputs_desc_.clear();
inputs_order_.clear();
std::vector<std::string> input_names = predictor_->GetInputNames();
std::vector<std::string> output_names = predictor_->GetOutputNames();
for (size_t i = 0; i < input_names.size(); ++i) {
inputs_order_[input_names[i]] = i;
TensorInfo info;
auto tensor = predictor_->GetInput(i);
auto shape = tensor->shape();
info.shape.assign(shape.begin(), shape.end());
info.name = input_names[i];
info.dtype = LiteDataTypeToFD(tensor->precision());
inputs_desc_.emplace_back(info);
}
for (size_t i = 0; i < output_names.size(); ++i) {
TensorInfo info;
auto tensor = predictor_->GetOutput(i);
auto shape = tensor->shape();
info.shape.assign(shape.begin(), shape.end());
info.name = output_names[i];
info.dtype = LiteDataTypeToFD(tensor->precision());
outputs_desc_.emplace_back(info);
}
initialized_ = true;
return true;
}
TensorInfo LiteBackend::GetInputInfo(int index) {
FDASSERT(index < NumInputs(),
"The index: %d should less than the number of inputs: %d.", index,
NumInputs());
return inputs_desc_[index];
}
std::vector<TensorInfo> LiteBackend::GetInputInfos() { return inputs_desc_; }
TensorInfo LiteBackend::GetOutputInfo(int index) {
FDASSERT(index < NumOutputs(),
"The index: %d should less than the number of outputs %d.", index,
NumOutputs());
return outputs_desc_[index];
}
std::vector<TensorInfo> LiteBackend::GetOutputInfos() {
return outputs_desc_;
}
bool LiteBackend::Infer(std::vector<FDTensor>& inputs,
std::vector<FDTensor>* outputs) {
if (inputs.size() != inputs_desc_.size()) {
FDERROR << "[LiteBackend] Size of inputs(" << inputs.size()
<< ") should keep same with the inputs of this model("
<< inputs_desc_.size() << ")." << std::endl;
return false;
}
for (size_t i = 0; i < inputs.size(); ++i) {
auto iter = inputs_order_.find(inputs[i].name);
if (iter == inputs_order_.end()) {
FDERROR << "Cannot find input with name:" << inputs[i].name << " in loaded model." << std::endl;
return false;
}
auto tensor = predictor_->GetInput(iter->second);
tensor->Resize(inputs[i].shape);
tensor->ShareExternalMemory(const_cast<void*>(inputs[i].CpuData()), inputs[i].Nbytes(), paddle::lite_api::TargetType::kARM);
}
predictor_->Run();
outputs->resize(outputs_desc_.size());
for (size_t i = 0; i < outputs_desc_.size(); ++i) {
auto tensor = predictor_->GetOutput(i);
(*outputs)[i].Resize(tensor->shape(), outputs_desc_[i].dtype, outputs_desc_[i].name);
memcpy((*outputs)[i].MutableData(), tensor->data<void>(), (*outputs)[i].Nbytes());
}
return true;
}
} // namespace fastdeploy

View File

@@ -0,0 +1,61 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <iostream>
#include <memory>
#include <string>
#include <vector>
#include "fastdeploy/backends/backend.h"
#include "paddle_api.h" // NOLINT
namespace fastdeploy {
struct LiteBackendOption {
};
// Convert data type from paddle lite to fastdeploy
FDDataType LiteDataTypeToFD(const paddle::lite_api::PrecisionType& dtype);
class LiteBackend : public BaseBackend {
public:
LiteBackend() {}
virtual ~LiteBackend() = default;
void BuildOption(const LiteBackendOption& option);
bool InitFromPaddle(const std::string& model_file,
const std::string& params_file,
const LiteBackendOption& option = LiteBackendOption());
bool Infer(std::vector<FDTensor>& inputs, std::vector<FDTensor>* outputs);
int NumInputs() const { return inputs_desc_.size(); }
int NumOutputs() const { return outputs_desc_.size(); }
TensorInfo GetInputInfo(int index);
TensorInfo GetOutputInfo(int index);
std::vector<TensorInfo> GetInputInfos() override;
std::vector<TensorInfo> GetOutputInfos() override;
private:
paddle::lite_api::CxxConfig config_;
std::shared_ptr<paddle::lite_api::PaddlePredictor> predictor_;
std::vector<TensorInfo> inputs_desc_;
std::vector<TensorInfo> outputs_desc_;
std::map<std::string, int> inputs_order_;
};
} // namespace fastdeploy

View File

@@ -11,7 +11,6 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#ifdef WITH_GPU
#include <cuda_runtime_api.h>

View File

@@ -166,7 +166,7 @@ std::map<std::string, float> FastDeployModel::PrintStatisInfoOfRuntime() {
std::map<std::string, float> statis_info_of_runtime_dict;
if (time_of_runtime_.size() < 10) {
std::cout << "[FastDeploy] [WARNING] PrintStatisInfoOfRuntime require the runtime ran 10 times at "
FDWARNING << "PrintStatisInfoOfRuntime require the runtime ran 10 times at "
"least, but now you only ran "
<< time_of_runtime_.size() << " times." << std::endl;
}

View File

@@ -75,7 +75,7 @@ class FASTDEPLOY_DECL FastDeployModel {
#define TIMERECORD_END(id, prefix) \
if (DebugEnabled()) { \
tc_##id.End(); \
FDINFO << __FILE__ << "(" << __LINE__ << "):" << __FUNCTION__ << " " \
FDLogger() << __FILE__ << "(" << __LINE__ << "):" << __FUNCTION__ << " " \
<< prefix << " duration = " << tc_##id.Duration() << "s." \
<< std::endl; \
}

View File

@@ -33,6 +33,10 @@
#include "fastdeploy/backends/openvino/ov_backend.h"
#endif
#ifdef ENABLE_LITE_BACKEND
#include "fastdeploy/backends/lite/lite_backend.h"
#endif
namespace fastdeploy {
std::vector<Backend> GetAvailableBackends() {
@@ -48,6 +52,9 @@ std::vector<Backend> GetAvailableBackends() {
#endif
#ifdef ENABLE_OPENVINO_BACKEND
backends.push_back(Backend::OPENVINO);
#endif
#ifdef ENABLE_LITE_BACKEND
backends.push_back(Backend::LITE);
#endif
return backends;
}
@@ -71,6 +78,8 @@ std::string Str(const Backend& b) {
return "Backend::PDINFER";
} else if (b == Backend::OPENVINO) {
return "Backend::OPENVINO";
} else if (b == Backend::LITE) {
return "Backend::LITE";
}
return "UNKNOWN-Backend";
}
@@ -194,6 +203,15 @@ void RuntimeOption::UseOpenVINOBackend() {
FDASSERT(false, "The FastDeploy didn't compile with OpenVINO.");
#endif
}
void RuntimeOption::UseLiteBackend() {
#ifdef ENABLE_LITE_BACKEND
backend = Backend::LITE;
#else
FDASSERT(false, "The FastDeploy didn't compile with Paddle Lite.");
#endif
}
void RuntimeOption::EnablePaddleMKLDNN() { pd_enable_mkldnn = true; }
void RuntimeOption::DisablePaddleMKLDNN() { pd_enable_mkldnn = false; }
@@ -262,12 +280,12 @@ bool Runtime::Init(const RuntimeOption& _option) {
FDASSERT(option.device == Device::CPU || option.device == Device::GPU,
"Backend::ORT only supports Device::CPU/Device::GPU.");
CreateOrtBackend();
FDINFO << "Runtime initialized with Backend::ORT in device " << Str(option.device) << "." << std::endl;
FDINFO << "Runtime initialized with Backend::ORT in " << Str(option.device) << "." << std::endl;
} else if (option.backend == Backend::TRT) {
FDASSERT(option.device == Device::GPU,
"Backend::TRT only supports Device::GPU.");
CreateTrtBackend();
FDINFO << "Runtime initialized with Backend::TRT in device " << Str(option.device) << "." << std::endl;
FDINFO << "Runtime initialized with Backend::TRT in " << Str(option.device) << "." << std::endl;
} else if (option.backend == Backend::PDINFER) {
FDASSERT(option.device == Device::CPU || option.device == Device::GPU,
"Backend::TRT only supports Device::CPU/Device::GPU.");
@@ -275,12 +293,16 @@ bool Runtime::Init(const RuntimeOption& _option) {
option.model_format == Frontend::PADDLE,
"Backend::PDINFER only supports model format of Frontend::PADDLE.");
CreatePaddleBackend();
FDINFO << "Runtime initialized with Backend::PDINFER in device " << Str(option.device) << "." << std::endl;
FDINFO << "Runtime initialized with Backend::PDINFER in " << Str(option.device) << "." << std::endl;
} else if (option.backend == Backend::OPENVINO) {
FDASSERT(option.device == Device::CPU,
"Backend::OPENVINO only supports Device::CPU");
CreateOpenVINOBackend();
FDINFO << "Runtime initialized with Backend::OPENVINO in device " << Str(option.device) << "." << std::endl;
FDINFO << "Runtime initialized with Backend::OPENVINO in " << Str(option.device) << "." << std::endl;
} else if (option.backend == Backend::LITE) {
FDASSERT(option.device == Device::CPU, "Backend::LITE only supports Device::CPU");
CreateLiteBackend();
FDINFO << "Runtime initialized with Backend::LITE in " << Str(option.device) << "." << std::endl;
} else {
FDERROR << "Runtime only support "
"Backend::ORT/Backend::TRT/Backend::PDINFER as backend now."
@@ -433,4 +455,21 @@ void Runtime::CreateTrtBackend() {
"ENABLE_TRT_BACKEND=ON.");
#endif
}
void Runtime::CreateLiteBackend() {
#ifdef ENABLE_LITE_BACKEND
auto lite_option = LiteBackendOption();
FDASSERT(option.model_format == Frontend::PADDLE,
"LiteBackend only support model format of Frontend::PADDLE");
backend_ = utils::make_unique<LiteBackend>();
auto casted_backend = dynamic_cast<LiteBackend*>(backend_.get());
FDASSERT(casted_backend->InitFromPaddle(option.model_file, option.params_file, lite_option),
"Load model from nb file failed while initializing LiteBackend.");
#else
FDASSERT(false,
"LiteBackend is not available, please compiled with "
"ENABLE_LITE_BACKEND=ON.");
#endif
}
} // namespace fastdeploy

View File

@@ -21,7 +21,7 @@
namespace fastdeploy {
enum FASTDEPLOY_DECL Backend { UNKNOWN, ORT, TRT, PDINFER, OPENVINO };
enum FASTDEPLOY_DECL Backend { UNKNOWN, ORT, TRT, PDINFER, OPENVINO, LITE };
// AUTOREC will according to the name of model file
// to decide which Frontend is
enum FASTDEPLOY_DECL Frontend { AUTOREC, PADDLE, ONNX };
@@ -66,6 +66,9 @@ struct FASTDEPLOY_DECL RuntimeOption {
// use openvino backend
void UseOpenVINOBackend();
// use paddle lite backend
void UseLiteBackend();
// enable mkldnn while use paddle inference in CPU
void EnablePaddleMKLDNN();
// disable mkldnn while use paddle inference in CPU
@@ -161,6 +164,8 @@ struct FASTDEPLOY_DECL Runtime {
void CreateOpenVINOBackend();
void CreateLiteBackend();
int NumInputs() { return backend_->NumInputs(); }
int NumOutputs() { return backend_->NumOutputs(); }
TensorInfo GetInputInfo(int index);

View File

@@ -27,6 +27,7 @@ void BindRuntime(pybind11::module& m) {
.def("use_ort_backend", &RuntimeOption::UseOrtBackend)
.def("use_trt_backend", &RuntimeOption::UseTrtBackend)
.def("use_openvino_backend", &RuntimeOption::UseOpenVINOBackend)
.def("use_lite_backend", &RuntimeOption::UseLiteBackend)
.def("enable_paddle_mkldnn", &RuntimeOption::EnablePaddleMKLDNN)
.def("disable_paddle_mkldnn", &RuntimeOption::DisablePaddleMKLDNN)
.def("enable_paddle_log_info", &RuntimeOption::EnablePaddleLogInfo)
@@ -109,7 +110,8 @@ void BindRuntime(pybind11::module& m) {
.value("UNKOWN", Backend::UNKNOWN)
.value("ORT", Backend::ORT)
.value("TRT", Backend::TRT)
.value("PDINFER", Backend::PDINFER);
.value("PDINFER", Backend::PDINFER)
.value("LITE", Backend::LITE);
pybind11::enum_<Frontend>(m, "Frontend", pybind11::arithmetic(),
"Frontend for inference.")
.value("PADDLE", Frontend::PADDLE)

View File

@@ -137,10 +137,6 @@ PYBIND11_MODULE(@PY_LIBRARY_NAME@, m) {
"Make programer easier to deploy deeplearning model, save time to save "
"the world!";
pybind11::class_<FDLogger>(m, "FDLogger")
.def_readwrite_static("disable_info", &FDLogger::disable_info)
.def_readwrite_static("disable_warning", &FDLogger::disable_warning);
BindRuntime(m);
BindFDModel(m);
#ifdef ENABLE_VISION

View File

@@ -38,7 +38,7 @@ class FASTDEPLOY_DECL TimeCounter {
if (!print_out) {
return;
}
std::cout << prefix << " duration = " << Duration() << "s." << std::endl;
FDLogger() << prefix << " duration = " << Duration() << "s." << std::endl;
}
private:

View File

@@ -16,17 +16,14 @@
namespace fastdeploy {
bool FDLogger::disable_info = false;
bool FDLogger::disable_warning = false;
FDLogger::FDLogger(int level, const std::string& prefix) {
FDLogger::FDLogger(bool verbose, const std::string& prefix) {
verbose_ = verbose;
line_ = "";
level_ = level;
prefix_ = prefix;
}
FDLogger& FDLogger::operator<<(std::ostream& (*os)(std::ostream&)) {
if (!verbose()) {
if (!verbose_) {
return *this;
}
std::cout << prefix_ << " " << line_ << std::endl;

View File

@@ -37,26 +37,16 @@ namespace fastdeploy {
class FASTDEPLOY_DECL FDLogger {
public:
static bool disable_info;
static bool disable_warning;
// 0: INFO
// 1: WARNING
// 2: ERROR
explicit FDLogger(int level = 0, const std::string& prefix = "[FastDeploy]");
bool verbose() {
if (disable_info && level_ == 0) {
return false;
}
if (disable_warning && level_ == 1) {
return false;
}
return true;
FDLogger() {
line_ = "";
prefix_ = "[FastDeploy]";
verbose_ = true;
}
explicit FDLogger(bool verbose, const std::string& prefix = "[FastDeploy]");
template <typename T>
FDLogger& operator<<(const T& val) {
if (!verbose()) {
if (!verbose_) {
return *this;
}
std::stringstream ss;
@@ -66,7 +56,7 @@ class FASTDEPLOY_DECL FDLogger {
}
FDLogger& operator<<(std::ostream& (*os)(std::ostream&));
~FDLogger() {
if (!verbose() && line_ != "") {
if (!verbose_ && line_ != "") {
std::cout << line_ << std::endl;
}
}
@@ -74,7 +64,7 @@ class FASTDEPLOY_DECL FDLogger {
private:
std::string line_;
std::string prefix_;
int level_ = 0;
bool verbose_ = true;
};
FASTDEPLOY_DECL bool ReadBinaryFromFile(const std::string& file,
@@ -85,15 +75,15 @@ FASTDEPLOY_DECL bool ReadBinaryFromFile(const std::string& file,
#endif
#define FDERROR \
FDLogger(2, "[ERROR]") << __REL_FILE__ << "(" << __LINE__ \
FDLogger(true, "[ERROR]") << __REL_FILE__ << "(" << __LINE__ \
<< ")::" << __FUNCTION__ << "\t"
#define FDWARNING \
FDLogger(1, "[WARNING]") << __REL_FILE__ << "(" << __LINE__ \
FDLogger(true, "[WARNING]") << __REL_FILE__ << "(" << __LINE__ \
<< ")::" << __FUNCTION__ << "\t"
#define FDINFO \
FDLogger(0, "[INFO]") << __REL_FILE__ << "(" << __LINE__ \
FDLogger(true, "[INFO]") << __REL_FILE__ << "(" << __LINE__ \
<< ")::" << __FUNCTION__ << "\t"
#define FDASSERT(condition, format, ...) \

View File

@@ -26,7 +26,7 @@ PaddleClasModel::PaddleClasModel(const std::string& model_file,
const RuntimeOption& custom_option,
const Frontend& model_format) {
config_file_ = config_file;
valid_cpu_backends = {Backend::ORT, Backend::OPENVINO, Backend::PDINFER};
valid_cpu_backends = {Backend::ORT, Backend::OPENVINO, Backend::PDINFER, Backend::LITE};
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
runtime_option = custom_option;
runtime_option.model_format = model_format;

View File

@@ -24,7 +24,7 @@ PicoDet::PicoDet(const std::string& model_file, const std::string& params_file,
const RuntimeOption& custom_option,
const Frontend& model_format) {
config_file_ = config_file;
valid_cpu_backends = {Backend::PDINFER, Backend::ORT};
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::LITE};
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
runtime_option = custom_option;
runtime_option.model_format = model_format;

View File

@@ -24,6 +24,7 @@ PPYOLOE::PPYOLOE(const std::string& model_file, const std::string& params_file,
}
void PPYOLOE::GetNmsInfo() {
#ifdef ENABLE_PADDLE_FRONTEND
if (runtime_option.model_format == Frontend::PADDLE) {
std::string contents;
if (!ReadBinaryFromFile(runtime_option.model_file, &contents)) {
@@ -41,6 +42,7 @@ void PPYOLOE::GetNmsInfo() {
normalized = reader.nms_params.normalized;
}
}
#endif
}
bool PPYOLOE::Initialize() {

View File

@@ -56,7 +56,7 @@ class FASTDEPLOY_DECL PPYOLOE : public FastDeployModel {
float score_threshold = 0.01;
int64_t nms_top_k = 10000;
bool normalized = true;
bool has_nms_ = false;
bool has_nms_ = true;
// This function will used to check if this model contains multiclass_nms
// and get parameters from the operator

View File

@@ -26,12 +26,11 @@ Classifier::Classifier(const std::string& model_file,
const RuntimeOption& custom_option,
const Frontend& model_format) {
if (model_format == Frontend::ONNX) {
valid_cpu_backends = {Backend::ORT,
Backend::OPENVINO}; // 指定可用的CPU后端
valid_cpu_backends = {Backend::ORT, Backend::OPENVINO}; // 指定可用的CPU后端
valid_gpu_backends = {Backend::ORT, Backend::TRT}; // 指定可用的GPU后端
} else {
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO};
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
valid_gpu_backends = {Backend::PDINFER, Backend::TRT, Backend::ORT};
}
runtime_option = custom_option;
runtime_option.model_format = model_format;

View File

@@ -44,12 +44,12 @@ Recognizer::Recognizer(const std::string& model_file,
const RuntimeOption& custom_option,
const Frontend& model_format) {
if (model_format == Frontend::ONNX) {
valid_cpu_backends = {Backend::ORT,
Backend::OPENVINO}; // 指定可用的CPU后端
valid_cpu_backends = {Backend::ORT, Backend::OPENVINO}; // 指定可用的CPU后端
valid_gpu_backends = {Backend::ORT, Backend::TRT}; // 指定可用的GPU后端
} else {
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO};
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
// NOTE:此模型暂不支持paddle-inference-Gpu推理
valid_cpu_backends = {Backend::ORT, Backend::PDINFER, Backend::OPENVINO};
valid_gpu_backends = {Backend::ORT, Backend::TRT};
}
runtime_option = custom_option;

View File

@@ -17,7 +17,7 @@ import os
import sys
from .c_lib_wrap import (Frontend, Backend, FDDataType, TensorInfo, Device,
FDLogger, is_built_with_gpu, is_built_with_ort,
is_built_with_gpu, is_built_with_ort,
is_built_with_paddle, is_built_with_trt,
get_default_cuda_directory)
from .runtime import Runtime, RuntimeOption

View File

@@ -79,6 +79,9 @@ class RuntimeOption:
def use_openvino_backend(self):
return self._option.use_openvino_backend()
def use_lite_backend(self):
return self._option.use_lite_backend()
def enable_paddle_mkldnn(self):
return self._option.enable_paddle_mkldnn()