Files
FastDeploy/examples/vision/segmentation/paddleseg/rknpu2/cpp/infer.cc
huangjianhui 6c31198342 [Other] Update deprecated segmentation apis && segmentation label map resize interinterpolation (#790)
* Refactor PaddleSeg with preprocessor && postprocessor

* Fix bugs

* Delete redundancy code

* Modify by comments

* Refactor according to comments

* Add batch evaluation

* Add single test script

* Add ppliteseg single test script && fix eval(raise) error

* fix bug

* Fix evaluation segmentation.py batch predict

* Fix segmentation evaluation bug

* Fix evaluation segmentation bugs

* Update segmentation result docs

* Update old predict api and DisableNormalizeAndPermute

* Update resize segmentation label map with cv::INTER_NEAREST

Co-authored-by: Jason <jiangjiajun@baidu.com>
2022-12-05 09:57:54 +08:00

97 lines
2.9 KiB
C++

// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <iostream>
#include <string>
#include "fastdeploy/vision.h"
void InferHumanPPHumansegv2Lite(const std::string& device = "cpu");
int main() {
InferHumanPPHumansegv2Lite("npu");
return 0;
}
fastdeploy::RuntimeOption GetOption(const std::string& device) {
auto option = fastdeploy::RuntimeOption();
if (device == "npu") {
option.UseRKNPU2();
} else {
option.UseCpu();
}
return option;
}
fastdeploy::ModelFormat GetFormat(const std::string& device) {
auto format = fastdeploy::ModelFormat::ONNX;
if (device == "npu") {
format = fastdeploy::ModelFormat::RKNN;
} else {
format = fastdeploy::ModelFormat::ONNX;
}
return format;
}
std::string GetModelPath(std::string& model_path, const std::string& device) {
if (device == "npu") {
model_path += "rknn";
} else {
model_path += "onnx";
}
return model_path;
}
void InferHumanPPHumansegv2Lite(const std::string& device) {
std::string model_file =
"./model/Portrait_PP_HumanSegV2_Lite_256x144_infer/"
"Portrait_PP_HumanSegV2_Lite_256x144_infer_rk3588.";
std::string params_file;
std::string config_file =
"./model/Portrait_PP_HumanSegV2_Lite_256x144_infer/deploy.yaml";
fastdeploy::RuntimeOption option = GetOption(device);
fastdeploy::ModelFormat format = GetFormat(device);
model_file = GetModelPath(model_file, device);
auto model = fastdeploy::vision::segmentation::PaddleSegModel(
model_file, params_file, config_file, option, format);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return;
}
auto image_file =
"./images/portrait_heng.jpg";
auto im = cv::imread(image_file);
if (device == "npu") {
model.GetPreprocessor().DisableNormalizeAndPermute();
}
fastdeploy::vision::SegmentationResult res;
clock_t start = clock();
if (!model.Predict(im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
clock_t end = clock();
auto dur = (double)(end - start);
printf("infer_human_pp_humansegv2_lite_npu use time:%f\n",
(dur / CLOCKS_PER_SEC));
std::cout << res.Str() << std::endl;
auto vis_im = fastdeploy::vision::VisSegmentation(im, res);
cv::imwrite("human_pp_humansegv2_lite_npu_result.jpg", vis_im);
std::cout
<< "Visualized result saved in ./human_pp_humansegv2_lite_npu_result.jpg"
<< std::endl;
}