Merge branch 'develop' into matting

This commit is contained in:
huangjianhui
2023-02-15 11:18:36 +08:00
committed by GitHub
8 changed files with 79 additions and 60 deletions

View File

@@ -34,4 +34,19 @@ typedef enum _rknpu2_core_mask {
RKNN_NPU_CORE_UNDEFINED,
} CoreMask;
} // namespace rknpu2
struct RKNPU2BackendOption {
rknpu2::CpuName cpu_name = rknpu2::CpuName::RK3588;
// The specification of NPU core setting.It has the following choices :
// RKNN_NPU_CORE_AUTO : Referring to automatic mode, meaning that it will
// select the idle core inside the NPU.
// RKNN_NPU_CORE_0 : Running on the NPU0 core
// RKNN_NPU_CORE_1: Runing on the NPU1 core
// RKNN_NPU_CORE_2: Runing on the NPU2 core
// RKNN_NPU_CORE_0_1: Running on both NPU0 and NPU1 core simultaneously.
// RKNN_NPU_CORE_0_1_2: Running on both NPU0, NPU1 and NPU2 simultaneously.
rknpu2::CoreMask core_mask = rknpu2::CoreMask::RKNN_NPU_CORE_AUTO;
};
} // namespace fastdeploy

View File

@@ -76,17 +76,24 @@ void RKNPU2Backend::BuildOption(const RKNPU2BackendOption& option) {
}
/***************************************************************
* @name InitFromRKNN
* @name Init
* @brief Initialize RKNN model
* @param model_file: Binary data for the RKNN model or the path of RKNN
*model. params_file: None option: config
* @return bool
* @note None
***************************************************************/
bool RKNPU2Backend::InitFromRKNN(const std::string& model_file,
const RKNPU2BackendOption& option) {
bool RKNPU2Backend::Init(const RuntimeOption& runtime_option) {
if (!(Supported(runtime_option.model_format, Backend::RKNPU2) && Supported(runtime_option.device, Backend::RKNPU2))) {
return false;
}
if (runtime_option.model_from_memory_) {
FDERROR << "RKNPU2 backend doesn't support load model from memory, please load model from disk." << std::endl;
return false;
}
// LoadModel
if (!this->LoadModel((char*)model_file.data())) {
if (!this->LoadModel((char*)runtime_option.model_file.data())) {
FDERROR << "load model failed" << std::endl;
return false;
}
@@ -98,7 +105,7 @@ bool RKNPU2Backend::InitFromRKNN(const std::string& model_file,
}
// BuildOption
this->BuildOption(option);
this->BuildOption(runtime_option.rknpu2_option);
// SetCoreMask if RK3588
if (this->option_.cpu_name == rknpu2::CpuName::RK3588) {
@@ -124,7 +131,7 @@ bool RKNPU2Backend::InitFromRKNN(const std::string& model_file,
* @return bool
* @note Only support RK3588
***************************************************************/
bool RKNPU2Backend::SetCoreMask(const rknpu2::CoreMask& core_mask) {
bool RKNPU2Backend::SetCoreMask(const rknpu2::CoreMask& core_mask) const {
int ret = rknn_set_core_mask(ctx, static_cast<rknn_core_mask>(core_mask));
if (ret != RKNN_SUCC) {
FDERROR << "rknn_set_core_mask fail! ret=" << ret << std::endl;

View File

@@ -24,40 +24,13 @@
#include <vector>
namespace fastdeploy {
struct RKNPU2BackendOption {
rknpu2::CpuName cpu_name = rknpu2::CpuName::RK356X;
// The specification of NPU core setting.It has the following choices :
// RKNN_NPU_CORE_AUTO : Referring to automatic mode, meaning that it will
// select the idle core inside the NPU.
// RKNN_NPU_CORE_0 : Running on the NPU0 core
// RKNN_NPU_CORE_1: Runing on the NPU1 core
// RKNN_NPU_CORE_2: Runing on the NPU2 core
// RKNN_NPU_CORE_0_1: Running on both NPU0 and NPU1 core simultaneously.
// RKNN_NPU_CORE_0_1_2: Running on both NPU0, NPU1 and NPU2 simultaneously.
rknpu2::CoreMask core_mask = rknpu2::CoreMask::RKNN_NPU_CORE_AUTO;
};
class RKNPU2Backend : public BaseBackend {
public:
RKNPU2Backend() = default;
virtual ~RKNPU2Backend();
// RKNN API
bool LoadModel(void* model);
bool GetSDKAndDeviceVersion();
bool SetCoreMask(const rknpu2::CoreMask& core_mask);
bool GetModelInputOutputInfos();
// BaseBackend API
void BuildOption(const RKNPU2BackendOption& option);
bool InitFromRKNN(const std::string& model_file,
const RKNPU2BackendOption& option = RKNPU2BackendOption());
bool Init(const RuntimeOption& runtime_option);
int NumInputs() const override {
return static_cast<int>(inputs_desc_.size());
@@ -75,6 +48,18 @@ class RKNPU2Backend : public BaseBackend {
bool copy_to_fd = true) override;
private:
// BaseBackend API
void BuildOption(const RKNPU2BackendOption& option);
// RKNN API
bool LoadModel(void* model);
bool GetSDKAndDeviceVersion();
bool SetCoreMask(const rknpu2::CoreMask& core_mask) const;
bool GetModelInputOutputInfos();
// The object of rknn context.
rknn_context ctx{};
// The structure rknn_sdk_version is used to indicate the version

View File

@@ -96,6 +96,37 @@ static std::map<Device, std::vector<Backend>>
{Device::SOPHGOTPUD, {Backend::SOPHGOTPU}}
};
inline bool Supported(ModelFormat format, Backend backend) {
auto iter = s_default_backends_by_format.find(format);
if (iter == s_default_backends_by_format.end()) {
FDERROR << "Didn't find format is registered in s_default_backends_by_format." << std::endl;
return false;
}
for (size_t i = 0; i < iter->second.size(); ++i) {
if (iter->second[i] == backend) {
return true;
}
}
std::string msg = Str(iter->second);
FDERROR << backend << " only supports " << msg << ", but now it's " << format << "." << std::endl;
return false;
}
inline bool Supported(Device device, Backend backend) {
auto iter = s_default_backends_by_device.find(device);
if (iter == s_default_backends_by_device.end()) {
FDERROR << "Didn't find device is registered in s_default_backends_by_device." << std::endl;
return false;
}
for (size_t i = 0; i < iter->second.size(); ++i) {
if (iter->second[i] == backend) {
return true;
}
}
std::string msg = Str(iter->second);
FDERROR << backend << " only supports " << msg << ", but now it's " << device << "." << std::endl;
return false;
}
FASTDEPLOY_DECL std::ostream& operator<<(std::ostream& o, const Backend& b);
FASTDEPLOY_DECL std::ostream& operator<<(std::ostream& o, const Device& d);

View File

@@ -169,6 +169,7 @@ bool Runtime::Init(const RuntimeOption& _option) {
<< std::endl;
return false;
}
backend_->benchmark_option_ = option.benchmark_option;
return true;
}
@@ -282,7 +283,6 @@ void Runtime::CreatePaddleBackend() {
option.paddle_infer_option.trt_option.gpu_id = option.device_id;
backend_ = utils::make_unique<PaddleBackend>();
auto casted_backend = dynamic_cast<PaddleBackend*>(backend_.get());
casted_backend->benchmark_option_ = option.benchmark_option;
if (option.model_from_memory_) {
FDASSERT(
@@ -313,7 +313,6 @@ void Runtime::CreatePaddleBackend() {
void Runtime::CreateOpenVINOBackend() {
#ifdef ENABLE_OPENVINO_BACKEND
backend_ = utils::make_unique<OpenVINOBackend>();
backend_->benchmark_option_ = option.benchmark_option;
FDASSERT(backend_->Init(option), "Failed to initialize OpenVINOBackend.");
#else
FDASSERT(false,
@@ -327,7 +326,6 @@ void Runtime::CreateOpenVINOBackend() {
void Runtime::CreateOrtBackend() {
#ifdef ENABLE_ORT_BACKEND
backend_ = utils::make_unique<OrtBackend>();
backend_->benchmark_option_ = option.benchmark_option;
FDASSERT(backend_->Init(option), "Failed to initialize Backend::ORT.");
#else
@@ -348,7 +346,6 @@ void Runtime::CreateTrtBackend() {
option.trt_option.enable_pinned_memory = option.enable_pinned_memory;
option.trt_option.external_stream_ = option.external_stream_;
backend_ = utils::make_unique<TrtBackend>();
backend_->benchmark_option_ = option.benchmark_option;
FDASSERT(backend_->Init(option), "Failed to initialize TensorRT backend.");
#else
FDASSERT(false,
@@ -362,7 +359,6 @@ void Runtime::CreateTrtBackend() {
void Runtime::CreateLiteBackend() {
#ifdef ENABLE_LITE_BACKEND
backend_ = utils::make_unique<LiteBackend>();
backend_->benchmark_option_ = option.benchmark_option;
FDASSERT(backend_->Init(option),
"Load model from nb file failed while initializing LiteBackend.");
@@ -376,20 +372,9 @@ void Runtime::CreateLiteBackend() {
}
void Runtime::CreateRKNPU2Backend() {
FDASSERT(option.model_from_memory_ == false,
"RKNPU2Backend don't support to load model from memory");
FDASSERT(option.device == Device::RKNPU,
"Backend::RKNPU2 only supports Device::RKNPU2");
FDASSERT(option.model_format == ModelFormat::RKNN,
"RKNPU2Backend only support model format of ModelFormat::RKNN");
#ifdef ENABLE_RKNPU2_BACKEND
auto rknpu2_option = RKNPU2BackendOption();
rknpu2_option.cpu_name = option.rknpu2_cpu_name_;
rknpu2_option.core_mask = option.rknpu2_core_mask_;
backend_ = utils::make_unique<RKNPU2Backend>();
auto casted_backend = dynamic_cast<RKNPU2Backend*>(backend_.get());
FDASSERT(casted_backend->InitFromRKNN(option.model_file, rknpu2_option),
"Load model from nb file failed while initializing LiteBackend.");
FDASSERT(backend_->Init(option), "Failed to initialize RKNPU2 backend.");
#else
FDASSERT(false,
"RKNPU2Backend is not available, please compiled with "

View File

@@ -60,8 +60,8 @@ void RuntimeOption::UseCpu() { device = Device::CPU; }
void RuntimeOption::UseRKNPU2(fastdeploy::rknpu2::CpuName rknpu2_name,
fastdeploy::rknpu2::CoreMask rknpu2_core) {
rknpu2_cpu_name_ = rknpu2_name;
rknpu2_core_mask_ = rknpu2_core;
rknpu2_option.cpu_name = rknpu2_name;
rknpu2_option.core_mask = rknpu2_core;
device = Device::RKNPU;
}

View File

@@ -151,6 +151,8 @@ struct FASTDEPLOY_DECL RuntimeOption {
OpenVINOBackendOption openvino_option;
/// Option to configure Paddle Lite backend
LiteBackendOption paddle_lite_option;
/// Option to configure RKNPU2 backend
RKNPU2BackendOption rknpu2_option;
/** \brief Set the profile mode as 'true'.
*
@@ -199,12 +201,6 @@ struct FASTDEPLOY_DECL RuntimeOption {
bool enable_pinned_memory = false;
// ======Only for RKNPU2 Backend=======
fastdeploy::rknpu2::CpuName rknpu2_cpu_name_ =
fastdeploy::rknpu2::CpuName::RK3588;
fastdeploy::rknpu2::CoreMask rknpu2_core_mask_ =
fastdeploy::rknpu2::CoreMask::RKNN_NPU_CORE_AUTO;
// *** The belowing api are deprecated, will be removed in v1.2.0
// *** Do not use it anymore
void SetPaddleMKLDNN(bool pd_mkldnn = true);

View File

@@ -207,7 +207,7 @@ template <typename T>
std::string Str(const std::vector<T>& shape) {
std::ostringstream oss;
oss << "[ " << shape[0];
for (int i = 1; i < shape.size(); ++i) {
for (size_t i = 1; i < shape.size(); ++i) {
oss << " ," << shape[i];
}
oss << " ]";