[RKNPU2] Add Quantized PPHumanSeg (#905)

* 更新rknpu2 backend核心代码

* 更新模型导出核心代码

* 删除无用的config文件

* 新增配置文件以及修改文档

* 模型转换以及文档

* 更新文档

* 更新与配置文件

* 更新PPHumanSeg全量化

* 更新文档

* 更新文档

* 更新文档
This commit is contained in:
Zheng_Bicheng
2022-12-19 20:07:32 +08:00
committed by GitHub
parent 4ac0e33b71
commit 218f33f8b1
8 changed files with 129 additions and 108 deletions

View File

@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "fastdeploy/backends/rknpu/rknpu2/rknpu2_backend.h"
#include "fastdeploy/utils/perf.h"
namespace fastdeploy {
RKNPU2Backend::~RKNPU2Backend() {
// Release memory uniformly here
@@ -178,9 +178,14 @@ bool RKNPU2Backend::GetModelInputOutputInfos() {
// get input info and copy to input tensor info
for (uint32_t i = 0; i < io_num.n_input; i++) {
input_attrs_[i].index = i;
// query info
ret = rknn_query(ctx, RKNN_QUERY_INPUT_ATTR, &(input_attrs_[i]),
ret = rknn_query(ctx,
RKNN_QUERY_INPUT_ATTR,
&(input_attrs_[i]),
sizeof(rknn_tensor_attr));
DumpTensorAttr(input_attrs_[i]);
if (ret != RKNN_SUCC) {
printf("rknn_init error! ret=%d\n", ret);
return false;
@@ -214,8 +219,12 @@ bool RKNPU2Backend::GetModelInputOutputInfos() {
for (uint32_t i = 0; i < io_num.n_output; i++) {
output_attrs_[i].index = i;
// query info
ret = rknn_query(ctx, RKNN_QUERY_OUTPUT_ATTR, &(output_attrs_[i]),
ret = rknn_query(ctx,
RKNN_QUERY_OUTPUT_ATTR,
&(output_attrs_[i]),
sizeof(rknn_tensor_attr));
DumpTensorAttr(output_attrs_[i]);
if (ret != RKNN_SUCC) {
FDERROR << "rknn_query fail! ret = " << ret << std::endl;
return false;
@@ -254,11 +263,12 @@ bool RKNPU2Backend::GetModelInputOutputInfos() {
void RKNPU2Backend::DumpTensorAttr(rknn_tensor_attr& attr) {
printf("index=%d, name=%s, n_dims=%d, dims=[%d, %d, %d, %d], "
"n_elems=%d, size=%d, fmt=%s, type=%s, "
"qnt_type=%s, zp=%d, scale=%f\n",
"qnt_type=%s, zp=%d, scale=%f, pass_through=%d\n",
attr.index, attr.name, attr.n_dims, attr.dims[0], attr.dims[1],
attr.dims[2], attr.dims[3], attr.n_elems, attr.size,
get_format_string(attr.fmt), get_type_string(attr.type),
get_qnt_type_string(attr.qnt_type), attr.zp, attr.scale);
get_qnt_type_string(attr.qnt_type), attr.zp, attr.scale,
attr.pass_through);
}
TensorInfo RKNPU2Backend::GetInputInfo(int index) {
@@ -309,7 +319,12 @@ bool RKNPU2Backend::Infer(std::vector<FDTensor>& inputs,
input_attrs_[i].type = input_type;
input_attrs_[i].size = inputs[0].Nbytes();
input_attrs_[i].size_with_stride = inputs[0].Nbytes();
input_attrs_[i].pass_through = 0;
if(input_attrs_[i].type == RKNN_TENSOR_FLOAT16 ||
input_attrs_[i].type == RKNN_TENSOR_FLOAT32){
FDINFO << "The input model is not a quantitative model. "
"Close the normalize operation." << std::endl;
}
input_mems_[i] = rknn_create_mem(ctx, inputs[i].Nbytes());
if (input_mems_[i] == nullptr) {
FDERROR << "rknn_create_mem input_mems_ error." << std::endl;
@@ -340,6 +355,7 @@ bool RKNPU2Backend::Infer(std::vector<FDTensor>& inputs,
// default output type is depend on model, this requires float32 to compute top5
ret = rknn_set_io_mem(ctx, output_mems_[i], &output_attrs_[i]);
// set output memory and attribute
if (ret != RKNN_SUCC) {
FDERROR << "output tensor memory rknn_set_io_mem fail! ret=" << ret
@@ -350,7 +366,7 @@ bool RKNPU2Backend::Infer(std::vector<FDTensor>& inputs,
this->infer_init = true;
}
// Copy input data to input tensor memory
for (uint32_t i = 0; i < io_num.n_input; i++) {
uint32_t width = input_attrs_[i].dims[2];