mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-16 21:51:31 +08:00
@@ -15,10 +15,10 @@
|
||||
|
||||
namespace fastdeploy {
|
||||
RKNPU2Backend::~RKNPU2Backend() {
|
||||
if(input_attrs != nullptr){
|
||||
if (input_attrs != nullptr) {
|
||||
free(input_attrs);
|
||||
}
|
||||
if(output_attrs != nullptr){
|
||||
if (output_attrs != nullptr) {
|
||||
free(output_attrs);
|
||||
}
|
||||
}
|
||||
@@ -150,7 +150,8 @@ bool RKNPU2Backend::GetModelInputOutputInfos() {
|
||||
}
|
||||
|
||||
// Get detailed input parameters
|
||||
input_attrs = (rknn_tensor_attr*)malloc(sizeof(rknn_tensor_attr) * io_num.n_input);
|
||||
input_attrs =
|
||||
(rknn_tensor_attr*)malloc(sizeof(rknn_tensor_attr) * io_num.n_input);
|
||||
memset(input_attrs, 0, io_num.n_input * sizeof(rknn_tensor_attr));
|
||||
inputs_desc_.resize(io_num.n_input);
|
||||
for (uint32_t i = 0; i < io_num.n_input; i++) {
|
||||
@@ -348,7 +349,8 @@ bool RKNPU2Backend::Infer(std::vector<FDTensor>& inputs,
|
||||
}
|
||||
(*outputs)[i].Resize(temp_shape, outputs_desc_[i].dtype,
|
||||
outputs_desc_[i].name);
|
||||
memcpy((*outputs)[i].MutableData(), (float*)output_mems[i]->virt_addr, (*outputs)[i].Nbytes());
|
||||
memcpy((*outputs)[i].MutableData(), (float*)output_mems[i]->virt_addr,
|
||||
(*outputs)[i].Nbytes());
|
||||
rknn_destroy_mem(ctx, output_mems[i]);
|
||||
}
|
||||
|
||||
|
@@ -16,8 +16,8 @@
|
||||
#include "fastdeploy/backends/backend.h"
|
||||
#include "fastdeploy/core/fd_tensor.h"
|
||||
#include "rknn_api.h" // NOLINT
|
||||
#include "rknpu2_config.h"
|
||||
#include <cstring> // for memset
|
||||
#include "fastdeploy/backends/rknpu/rknpu2/rknpu2_config.h"
|
||||
#include <cstring>
|
||||
#include <iostream>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
@@ -27,7 +27,7 @@ namespace fastdeploy {
|
||||
struct RKNPU2BackendOption {
|
||||
rknpu2::CpuName cpu_name = rknpu2::CpuName::RK3588;
|
||||
|
||||
//The specification of NPU core setting.It has the following choices :
|
||||
// The specification of NPU core setting.It has the following choices :
|
||||
// RKNN_NPU_CORE_AUTO : Referring to automatic mode, meaning that it will
|
||||
// select the idle core inside the NPU.
|
||||
// RKNN_NPU_CORE_0 : Running on the NPU0 core
|
||||
@@ -77,9 +77,11 @@ class RKNPU2Backend : public BaseBackend {
|
||||
private:
|
||||
// The object of rknn context.
|
||||
rknn_context ctx{};
|
||||
// The structure rknn_sdk_version is used to indicate the version information of the RKNN SDK.
|
||||
// The structure rknn_sdk_version is used to indicate the version
|
||||
// information of the RKNN SDK.
|
||||
rknn_sdk_version sdk_ver{};
|
||||
// The structure rknn_input_output_num represents the number of input and output Tensor
|
||||
// The structure rknn_input_output_num represents the number of
|
||||
// input and output Tensor
|
||||
rknn_input_output_num io_num{};
|
||||
std::vector<TensorInfo> inputs_desc_;
|
||||
std::vector<TensorInfo> outputs_desc_;
|
||||
@@ -93,4 +95,4 @@ class RKNPU2Backend : public BaseBackend {
|
||||
static FDDataType RknnTensorTypeToFDDataType(rknn_tensor_type type);
|
||||
static rknn_tensor_type FDDataTypeToRknnTensorType(FDDataType type);
|
||||
};
|
||||
} // namespace fastdeploy
|
||||
} // namespace fastdeploy
|
||||
|
@@ -11,9 +11,7 @@
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#ifndef RKNPU2_CONFIG_H
|
||||
#define RKNPU2_CONFIG_H
|
||||
#pragma once
|
||||
|
||||
namespace fastdeploy {
|
||||
namespace rknpu2 {
|
||||
@@ -25,16 +23,15 @@ typedef enum _rknpu2_cpu_name {
|
||||
|
||||
/*! RKNPU2 core mask for mobile device. */
|
||||
typedef enum _rknpu2_core_mask {
|
||||
RKNN_NPU_CORE_AUTO = 0, ///< default, run on NPU core randomly.
|
||||
RKNN_NPU_CORE_0 = 1, ///< run on NPU core 0.
|
||||
RKNN_NPU_CORE_1 = 2, ///< run on NPU core 1.
|
||||
RKNN_NPU_CORE_2 = 4, ///< run on NPU core 2.
|
||||
RKNN_NPU_CORE_AUTO = 0, //< default, run on NPU core randomly.
|
||||
RKNN_NPU_CORE_0 = 1, //< run on NPU core 0.
|
||||
RKNN_NPU_CORE_1 = 2, //< run on NPU core 1.
|
||||
RKNN_NPU_CORE_2 = 4, //< run on NPU core 2.
|
||||
RKNN_NPU_CORE_0_1 =
|
||||
RKNN_NPU_CORE_0 | RKNN_NPU_CORE_1, ///< run on NPU core 1 and core 2.
|
||||
RKNN_NPU_CORE_0 | RKNN_NPU_CORE_1, //< run on NPU core 1 and core 2.
|
||||
RKNN_NPU_CORE_0_1_2 =
|
||||
RKNN_NPU_CORE_0_1 | RKNN_NPU_CORE_2, ///< run on NPU core 1 and core 2.
|
||||
RKNN_NPU_CORE_0_1 | RKNN_NPU_CORE_2, //< run on NPU core 1 and core 2.
|
||||
RKNN_NPU_CORE_UNDEFINED,
|
||||
} CoreMask;
|
||||
} // namespace RKNN
|
||||
} // namespace fastdeploy
|
||||
#endif //RKNPU2_CONFIG_H
|
||||
} // namespace rknpu2
|
||||
} // namespace fastdeploy
|
||||
|
Reference in New Issue
Block a user