Files
FastDeploy/examples/vision/segmentation/paddleseg/rknpu2/cpp/infer.cc
Zheng_Bicheng ce828ecb38 [Backend And DOC] 改进ppseg文档 + 为RKNPU2后端新增对多输入模型的支持 (#491)
* 11-02/14:35
* 新增输入数据format错误判断
* 优化推理过程,减少内存分配次数
* 支持多输入rknn模型
* rknn模型输出shape为三维时,输出将被强制对齐为4纬。现在将直接抹除rknn补充的shape,方便部分对输出shape进行判断的模型进行正确的后处理。

* 11-03/17:25
* 支持导出多输入RKNN模型
* 更新各种文档
* ppseg改用Fastdeploy中的模型进行转换

* 11-03/17:25
* 新增开源头

* 11-03/21:48
* 删除无用debug代码,补充注释
2022-11-04 09:39:23 +08:00

97 lines
2.9 KiB
C++

// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <iostream>
#include <string>
#include "fastdeploy/vision.h"
void InferHumanPPHumansegv2Lite(const std::string& device = "cpu");
int main() {
InferHumanPPHumansegv2Lite("npu");
return 0;
}
fastdeploy::RuntimeOption GetOption(const std::string& device) {
auto option = fastdeploy::RuntimeOption();
if (device == "npu") {
option.UseRKNPU2();
} else {
option.UseCpu();
}
return option;
}
fastdeploy::ModelFormat GetFormat(const std::string& device) {
auto format = fastdeploy::ModelFormat::ONNX;
if (device == "npu") {
format = fastdeploy::ModelFormat::RKNN;
} else {
format = fastdeploy::ModelFormat::ONNX;
}
return format;
}
std::string GetModelPath(std::string& model_path, const std::string& device) {
if (device == "npu") {
model_path += "rknn";
} else {
model_path += "onnx";
}
return model_path;
}
void InferHumanPPHumansegv2Lite(const std::string& device) {
std::string model_file =
"./model/Portrait_PP_HumanSegV2_Lite_256x144_infer/"
"Portrait_PP_HumanSegV2_Lite_256x144_infer_rk3588.";
std::string params_file;
std::string config_file =
"./model/Portrait_PP_HumanSegV2_Lite_256x144_infer/deploy.yaml";
fastdeploy::RuntimeOption option = GetOption(device);
fastdeploy::ModelFormat format = GetFormat(device);
model_file = GetModelPath(model_file, device);
auto model = fastdeploy::vision::segmentation::PaddleSegModel(
model_file, params_file, config_file, option, format);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return;
}
auto image_file =
"./images/portrait_heng.jpg";
auto im = cv::imread(image_file);
if (device == "npu") {
model.DisableNormalizeAndPermute();
}
fastdeploy::vision::SegmentationResult res;
clock_t start = clock();
if (!model.Predict(&im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
clock_t end = clock();
auto dur = (double)(end - start);
printf("infer_human_pp_humansegv2_lite_npu use time:%f\n",
(dur / CLOCKS_PER_SEC));
std::cout << res.Str() << std::endl;
auto vis_im = fastdeploy::vision::VisSegmentation(im, res);
cv::imwrite("human_pp_humansegv2_lite_npu_result.jpg", vis_im);
std::cout
<< "Visualized result saved in ./human_pp_humansegv2_lite_npu_result.jpg"
<< std::endl;
}