mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-08 10:00:29 +08:00
[Docs] Pick seg fastdeploy docs from PaddleSeg (#1482)
* [Docs] Pick seg fastdeploy docs from PaddleSeg * [Docs] update seg docs * [Docs] Add c&csharp examples for seg * [Docs] Add c&csharp examples for seg * [Doc] Update paddleseg README.md * Update README.md
This commit is contained in:
@@ -0,0 +1,11 @@
|
||||
PROJECT(infer_demo C CXX)
|
||||
CMAKE_MINIMUM_REQUIRED (VERSION 3.10)
|
||||
|
||||
option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.")
|
||||
|
||||
include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
|
||||
|
||||
include_directories(${FASTDEPLOY_INCS})
|
||||
|
||||
add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cc)
|
||||
target_link_libraries(infer_demo ${FASTDEPLOY_LIBS})
|
@@ -0,0 +1,69 @@
|
||||
[English](README.md) | 简体中文
|
||||
# PaddleSeg CPU-GPU C++部署示例
|
||||
|
||||
本目录下提供`infer.cc`快速完成PP-LiteSeg在CPU/GPU,以及GPU上通过Paddle-TensorRT加速部署的示例。
|
||||
|
||||
## 1. 说明
|
||||
PaddleSeg支持利用FastDeploy在NVIDIA GPU、X86 CPU、飞腾CPU、ARM CPU、Intel GPU(独立显卡/集成显卡)硬件上快速部署Segmentation模型。
|
||||
|
||||
## 2. 部署环境准备
|
||||
在部署前,需确认软硬件环境,同时下载预编译部署库,参考[FastDeploy安装文档](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/build_and_install#FastDeploy预编译库安装)安装FastDeploy预编译库。
|
||||
|
||||
## 3. 部署模型准备
|
||||
在部署前,请准备好您所需要运行的推理模型,你可以选择使用[预导出的推理模型](../README.md)或者[自行导出PaddleSeg部署模型](../README.md),如果你部署的为**PP-Matting**、**PP-HumanMatting**以及**ModNet**请参考[Matting模型部署](../../../matting)。
|
||||
|
||||
## 4. 运行部署示例
|
||||
以Linux上推理为例,在本目录执行如下命令即可完成编译测试,支持此模型需保证FastDeploy版本1.0.0以上(x.x.x>=1.0.0)
|
||||
|
||||
```bash
|
||||
# 下载FastDeploy预编译库,用户可在上文提到的`FastDeploy预编译库`中自行选择合适的版本使用
|
||||
wget https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-x.x.x.tgz
|
||||
tar xvf fastdeploy-linux-x64-x.x.x.tgz
|
||||
|
||||
# 下载部署示例代码
|
||||
git clone https://github.com/PaddlePaddle/FastDeploy.git
|
||||
cd FastDeploy/examples/vision/segmentation/semantic_segmentation/cpp-gpu/cpp
|
||||
# 如果您希望从PaddleSeg下载示例代码,请运行
|
||||
# git clone https://github.com/PaddlePaddle/PaddleSeg.git
|
||||
# # 注意:如果当前分支找不到下面的fastdeploy测试代码,请切换到develop分支
|
||||
# # git checkout develop
|
||||
# cd PaddleSeg/deploy/fastdeploy/semantic_segmentation/cpp-gpu/cpp
|
||||
|
||||
# 编译部署示例
|
||||
mkdir build && cd build
|
||||
cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-x.x.x
|
||||
make -j
|
||||
|
||||
# 下载PP-LiteSeg模型文件和测试图片
|
||||
wget https://bj.bcebos.com/paddlehub/fastdeploy/PP_LiteSeg_B_STDC2_cityscapes_without_argmax_infer.tgz
|
||||
tar -xvf PP_LiteSeg_B_STDC2_cityscapes_without_argmax_infer.tgz
|
||||
wget https://paddleseg.bj.bcebos.com/dygraph/demo/cityscapes_demo.png
|
||||
|
||||
# 运行部署示例
|
||||
# CPU推理
|
||||
./infer_demo PP_LiteSeg_B_STDC2_cityscapes_without_argmax_infer cityscapes_demo.png 0
|
||||
# GPU推理
|
||||
./infer_demo PP_LiteSeg_B_STDC2_cityscapes_without_argmax_infer cityscapes_demo.png 1
|
||||
# GPU上Paddle-TensorRT推理
|
||||
./infer_demo PP_LiteSeg_B_STDC2_cityscapes_without_argmax_infer cityscapes_demo.png 2
|
||||
```
|
||||
|
||||
运行完成可视化结果如下图所示
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/16222477/191712880-91ae128d-247a-43e0-b1e3-cafae78431e0.jpg", width=512px, height=256px />
|
||||
</div>
|
||||
|
||||
- 注意,以上命令只适用于Linux或MacOS, Windows下SDK的使用方式请参考文档: [如何在Windows中使用FastDeploy C++ SDK](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/faq/use_sdk_on_windows.md)
|
||||
- 关于如何通过FastDeploy使用更多不同的推理后端,以及如何使用不同的硬件,请参考文档:[如何切换模型推理后端引擎](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/faq/how_to_change_backend.md)
|
||||
|
||||
## 6. 更多指南
|
||||
- [PaddleSeg C++ API文档](https://www.paddlepaddle.org.cn/fastdeploy-api-doc/cpp/html/namespacefastdeploy_1_1vision_1_1segmentation.html)
|
||||
- [FastDeploy部署PaddleSeg模型概览](../../)
|
||||
- [Python部署](../python)
|
||||
|
||||
## 7. 常见问题
|
||||
- [如何切换模型推理后端引擎](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/faq/how_to_change_backend.md)
|
||||
- [Intel GPU(独立显卡/集成显卡)的使用](https://github.com/PaddlePaddle/FastDeploy/blob/develop/tutorials/intel_gpu/README.md)
|
||||
- [编译CPU部署库](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/build_and_install/cpu.md)
|
||||
- [编译GPU部署库](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/build_and_install/gpu.md)
|
||||
- [编译Jetson部署库](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/build_and_install/jetson.md)
|
@@ -0,0 +1,138 @@
|
||||
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "fastdeploy/vision.h"
|
||||
|
||||
#ifdef WIN32
|
||||
const char sep = '\\';
|
||||
#else
|
||||
const char sep = '/';
|
||||
#endif
|
||||
|
||||
void CpuInfer(const std::string& model_dir, const std::string& image_file) {
|
||||
auto model_file = model_dir + sep + "model.pdmodel";
|
||||
auto params_file = model_dir + sep + "model.pdiparams";
|
||||
auto config_file = model_dir + sep + "deploy.yaml";
|
||||
auto option = fastdeploy::RuntimeOption();
|
||||
option.UseCpu();
|
||||
auto model = fastdeploy::vision::segmentation::PaddleSegModel(
|
||||
model_file, params_file, config_file, option);
|
||||
|
||||
if (!model.Initialized()) {
|
||||
std::cerr << "Failed to initialize." << std::endl;
|
||||
return;
|
||||
}
|
||||
|
||||
auto im = cv::imread(image_file);
|
||||
|
||||
fastdeploy::vision::SegmentationResult res;
|
||||
if (!model.Predict(im, &res)) {
|
||||
std::cerr << "Failed to predict." << std::endl;
|
||||
return;
|
||||
}
|
||||
|
||||
std::cout << res.Str() << std::endl;
|
||||
auto vis_im = fastdeploy::vision::VisSegmentation(im, res, 0.5);
|
||||
cv::imwrite("vis_result.jpg", vis_im);
|
||||
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
|
||||
}
|
||||
|
||||
void GpuInfer(const std::string& model_dir, const std::string& image_file) {
|
||||
auto model_file = model_dir + sep + "model.pdmodel";
|
||||
auto params_file = model_dir + sep + "model.pdiparams";
|
||||
auto config_file = model_dir + sep + "deploy.yaml";
|
||||
|
||||
auto option = fastdeploy::RuntimeOption();
|
||||
option.UseGpu();
|
||||
auto model = fastdeploy::vision::segmentation::PaddleSegModel(
|
||||
model_file, params_file, config_file, option);
|
||||
|
||||
if (!model.Initialized()) {
|
||||
std::cerr << "Failed to initialize." << std::endl;
|
||||
return;
|
||||
}
|
||||
|
||||
auto im = cv::imread(image_file);
|
||||
|
||||
fastdeploy::vision::SegmentationResult res;
|
||||
if (!model.Predict(im, &res)) {
|
||||
std::cerr << "Failed to predict." << std::endl;
|
||||
return;
|
||||
}
|
||||
|
||||
std::cout << res.Str() << std::endl;
|
||||
auto vis_im = fastdeploy::vision::VisSegmentation(im, res, 0.5);
|
||||
cv::imwrite("vis_result.jpg", vis_im);
|
||||
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
|
||||
}
|
||||
|
||||
void TrtInfer(const std::string& model_dir, const std::string& image_file) {
|
||||
auto model_file = model_dir + sep + "model.pdmodel";
|
||||
auto params_file = model_dir + sep + "model.pdiparams";
|
||||
auto config_file = model_dir + sep + "deploy.yaml";
|
||||
|
||||
auto option = fastdeploy::RuntimeOption();
|
||||
option.UseGpu();
|
||||
option.UseTrtBackend();
|
||||
// If use original Tensorrt, not Paddle-TensorRT,
|
||||
// comment the following two lines
|
||||
option.EnablePaddleToTrt();
|
||||
option.EnablePaddleTrtCollectShape();
|
||||
option.SetTrtInputShape("x", {1, 3, 256, 256}, {1, 3, 1024, 1024},
|
||||
{1, 3, 2048, 2048});
|
||||
|
||||
auto model = fastdeploy::vision::segmentation::PaddleSegModel(
|
||||
model_file, params_file, config_file, option);
|
||||
|
||||
if (!model.Initialized()) {
|
||||
std::cerr << "Failed to initialize." << std::endl;
|
||||
return;
|
||||
}
|
||||
|
||||
auto im = cv::imread(image_file);
|
||||
|
||||
fastdeploy::vision::SegmentationResult res;
|
||||
if (!model.Predict(im, &res)) {
|
||||
std::cerr << "Failed to predict." << std::endl;
|
||||
return;
|
||||
}
|
||||
|
||||
std::cout << res.Str() << std::endl;
|
||||
auto vis_im = fastdeploy::vision::VisSegmentation(im, res, 0.5);
|
||||
cv::imwrite("vis_result.jpg", vis_im);
|
||||
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
|
||||
}
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
if (argc < 4) {
|
||||
std::cout
|
||||
<< "Usage: infer_demo path/to/model_dir path/to/image run_option, "
|
||||
"e.g ./infer_model ./ppseg_model_dir ./test.jpeg 0"
|
||||
<< std::endl;
|
||||
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
|
||||
"with gpu; 2: run with gpu and use tensorrt backend; 3: run "
|
||||
"with kunlunxin."
|
||||
<< std::endl;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (std::atoi(argv[3]) == 0) {
|
||||
CpuInfer(argv[1], argv[2]);
|
||||
} else if (std::atoi(argv[3]) == 1) {
|
||||
GpuInfer(argv[1], argv[2]);
|
||||
} else if (std::atoi(argv[3]) == 2) {
|
||||
TrtInfer(argv[1], argv[2]);
|
||||
}
|
||||
return 0;
|
||||
}
|
Reference in New Issue
Block a user