mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-06 00:57:33 +08:00
[Backend] add sophgo backend (#1015)
* Add Sophgo Device add sophgo backend in fastdeploy add resnet50, yolov5s, liteseg examples. * replace sophgo lib with download links; fix model.cc bug * modify CodeStyle * remove unuseful files;change the names of sophgo device and sophgo backend * sophgo support python and add python examples * remove unuseful rows in cmake according pr Co-authored-by: Zilong Xing <zilong.xing@sophgo.com>
This commit is contained in:
89
examples/vision/segmentation/paddleseg/sophgo/README.md
Normal file
89
examples/vision/segmentation/paddleseg/sophgo/README.md
Normal file
@@ -0,0 +1,89 @@
|
||||
# PaddleSeg C++部署示例
|
||||
|
||||
## 支持模型列表
|
||||
|
||||
- PP-LiteSeg部署模型实现来自[PaddleSeg PP-LiteSeg系列模型](https://github.com/PaddlePaddle/PaddleSeg/blob/release/2.6/configs/pp_liteseg/README.md)
|
||||
|
||||
## 准备PP-LiteSeg部署模型以及转换模型
|
||||
|
||||
SOPHGO-TPU部署模型前需要将Paddle模型转换成bmodel模型,具体步骤如下:
|
||||
- 下载Paddle模型[PP-LiteSeg-B(STDC2)-cityscapes-without-argmax](https://bj.bcebos.com/paddlehub/fastdeploy/PP_LiteSeg_B_STDC2_cityscapes_without_argmax_infer.tgz)
|
||||
- Pddle模型转换为ONNX模型,请参考[Paddle2ONNX](https://github.com/PaddlePaddle/Paddle2ONNX)
|
||||
- ONNX模型转换bmodel模型的过程,请参考[TPU-MLIR](https://github.com/sophgo/tpu-mlir)
|
||||
|
||||
## 模型转换example
|
||||
|
||||
下面以[PP-LiteSeg-B(STDC2)-cityscapes-without-argmax](https://bj.bcebos.com/paddlehub/fastdeploy/PP_LiteSeg_B_STDC2_cityscapes_without_argmax_infer.tgz)为例子,教大家如何转换Paddle模型到SOPHGO-TPU模型
|
||||
|
||||
### 下载PP-LiteSeg-B(STDC2)-cityscapes-without-argmax模型,并转换为ONNX模型
|
||||
```shell
|
||||
https://bj.bcebos.com/paddlehub/fastdeploy/PP_LiteSeg_B_STDC2_cityscapes_without_argmax_infer.tgz
|
||||
tar xvf PP_LiteSeg_B_STDC2_cityscapes_without_argmax_infer.tgz
|
||||
|
||||
# 修改PP_LiteSeg_B_STDC2_cityscapes_without_argmax_infer模型的输入shape,由动态输入变成固定输入
|
||||
python paddle_infer_shape.py --model_dir PP_LiteSeg_B_STDC2_cityscapes_without_argmax_infer \
|
||||
--model_filename model.pdmodel \
|
||||
--params_filename model.pdiparams \
|
||||
--save_dir pp_liteseg_fix \
|
||||
--input_shape_dict="{'x':[1,3,512,512]}"
|
||||
|
||||
#将固定输入的Paddle模型转换成ONNX模型
|
||||
paddle2onnx --model_dir pp_liteseg_fix \
|
||||
--model_filename model.pdmodel \
|
||||
--params_filename model.pdiparams \
|
||||
--save_file pp_liteseg.onnx \
|
||||
--enable_dev_version True
|
||||
```
|
||||
|
||||
### 导出bmodel模型
|
||||
|
||||
以转换BM1684x的bmodel模型为例子,我们需要下载[TPU-MLIR](https://github.com/sophgo/tpu-mlir)工程,安装过程具体参见[TPU-MLIR文档](https://github.com/sophgo/tpu-mlir/blob/master/README.md)。
|
||||
### 1. 安装
|
||||
``` shell
|
||||
docker pull sophgo/tpuc_dev:latest
|
||||
|
||||
# myname1234是一个示例,也可以设置其他名字
|
||||
docker run --privileged --name myname1234 -v $PWD:/workspace -it sophgo/tpuc_dev:latest
|
||||
|
||||
source ./envsetup.sh
|
||||
./build.sh
|
||||
```
|
||||
|
||||
### 2. ONNX模型转换为bmodel模型
|
||||
``` shell
|
||||
mkdir pp_liteseg && cd pp_liteseg
|
||||
|
||||
#在该文件中放入测试图片,同时将上一步转换的pp_liteseg.onnx放入该文件夹中
|
||||
cp -rf ${REGRESSION_PATH}/dataset/COCO2017 .
|
||||
cp -rf ${REGRESSION_PATH}/image .
|
||||
#放入onnx模型文件pp_liteseg.onnx
|
||||
|
||||
mkdir workspace && cd workspace
|
||||
|
||||
#将ONNX模型转换为mlir模型,其中参数--output_names可以通过NETRON查看
|
||||
model_transform.py \
|
||||
--model_name pp_liteseg \
|
||||
--model_def ../pp_liteseg.onnx \
|
||||
--input_shapes [[1,3,512,512]] \
|
||||
--mean 0.0,0.0,0.0 \
|
||||
--scale 0.0039216,0.0039216,0.0039216 \
|
||||
--keep_aspect_ratio \
|
||||
--pixel_format rgb \
|
||||
--output_names bilinear_interp_v2_6.tmp_0 \
|
||||
--test_input ../image/dog.jpg \
|
||||
--test_result pp_liteseg_top_outputs.npz \
|
||||
--mlir pp_liteseg.mlir
|
||||
|
||||
#将mlir模型转换为BM1684x的F32 bmodel模型
|
||||
model_deploy.py \
|
||||
--mlir pp_liteseg.mlir \
|
||||
--quantize F32 \
|
||||
--chip bm1684x \
|
||||
--test_input pp_liteseg_in_f32.npz \
|
||||
--test_reference pp_liteseg_top_outputs.npz \
|
||||
--model pp_liteseg_1684x_f32.bmodel
|
||||
```
|
||||
最终获得可以在BM1684x上能够运行的bmodel模型pp_liteseg_1684x_f32.bmodel。如果需要进一步对模型进行加速,可以将ONNX模型转换为INT8 bmodel,具体步骤参见[TPU-MLIR文档](https://github.com/sophgo/tpu-mlir/blob/master/README.md)。
|
||||
|
||||
## 其他链接
|
||||
- [Cpp部署](./cpp)
|
@@ -0,0 +1,17 @@
|
||||
PROJECT(infer_demo C CXX)
|
||||
CMAKE_MINIMUM_REQUIRED (VERSION 3.10)
|
||||
# 指定下载解压后的fastdeploy库路径
|
||||
option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.")
|
||||
|
||||
set(ENABLE_LITE_BACKEND OFF)
|
||||
#set(FDLIB ${FASTDEPLOY_INSTALL_DIR})
|
||||
|
||||
include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
|
||||
|
||||
# 添加FastDeploy依赖头文件
|
||||
include_directories(${FASTDEPLOY_INCS})
|
||||
include_directories(${FastDeploy_INCLUDE_DIRS})
|
||||
|
||||
add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cc)
|
||||
# 添加FastDeploy库依赖
|
||||
target_link_libraries(infer_demo ${FASTDEPLOY_LIBS})
|
56
examples/vision/segmentation/paddleseg/sophgo/cpp/README.md
Normal file
56
examples/vision/segmentation/paddleseg/sophgo/cpp/README.md
Normal file
@@ -0,0 +1,56 @@
|
||||
# PaddleSeg C++部署示例
|
||||
|
||||
本目录下提供`infer.cc`快速完成pp_liteseg模型在SOPHGO BM1684x板子上加速部署的示例。
|
||||
|
||||
在部署前,需确认以下两个步骤:
|
||||
|
||||
1. 软硬件环境满足要求
|
||||
2. 根据开发环境,从头编译FastDeploy仓库
|
||||
|
||||
以上步骤请参考[SOPHGO部署库编译](../../../../../../docs/cn/build_and_install/sophgo.md)实现
|
||||
|
||||
## 生成基本目录文件
|
||||
|
||||
该例程由以下几个部分组成
|
||||
```text
|
||||
.
|
||||
├── CMakeLists.txt
|
||||
├── build # 编译文件夹
|
||||
├── image # 存放图片的文件夹
|
||||
├── infer.cc
|
||||
└── model # 存放模型文件的文件夹
|
||||
```
|
||||
|
||||
## 编译
|
||||
|
||||
### 编译并拷贝SDK到thirdpartys文件夹
|
||||
|
||||
请参考[SOPHGO部署库编译](../../../../../../docs/cn/build_and_install/sophgo.md)仓库编译SDK,编译完成后,将在build目录下生成fastdeploy-0.0.3目录.
|
||||
|
||||
### 拷贝模型文件,以及配置文件至model文件夹
|
||||
将Paddle模型转换为SOPHGO bmodel模型,转换步骤参考[文档](../README.md)
|
||||
将转换后的SOPHGO bmodel模型文件拷贝至model中
|
||||
|
||||
### 准备测试图片至image文件夹
|
||||
```bash
|
||||
wget https://paddleseg.bj.bcebos.com/dygraph/demo/cityscapes_demo.png
|
||||
cp cityscapes_demo.png ./images
|
||||
```
|
||||
|
||||
### 编译example
|
||||
|
||||
```bash
|
||||
cd build
|
||||
cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-0.0.3
|
||||
make
|
||||
```
|
||||
|
||||
## 运行例程
|
||||
|
||||
```bash
|
||||
./infer_demo model images/cityscapes_demo.png
|
||||
```
|
||||
|
||||
|
||||
- [模型介绍](../../)
|
||||
- [模型转换](../)
|
71
examples/vision/segmentation/paddleseg/sophgo/cpp/infer.cc
Normal file
71
examples/vision/segmentation/paddleseg/sophgo/cpp/infer.cc
Normal file
@@ -0,0 +1,71 @@
|
||||
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
#include "fastdeploy/vision.h"
|
||||
|
||||
void SophgoInfer(const std::string& model_dir, const std::string& image_file) {
|
||||
std::string model_file = model_dir + "/pp_liteseg_1684x_f32.bmodel";
|
||||
std::string params_file;
|
||||
std::string config_file = model_dir + "/deploy.yaml";
|
||||
auto option = fastdeploy::RuntimeOption();
|
||||
option.UseSophgo();
|
||||
auto model_format = fastdeploy::ModelFormat::SOPHGO;
|
||||
|
||||
auto model = fastdeploy::vision::segmentation::PaddleSegModel(
|
||||
model_file, params_file, config_file, option, model_format);
|
||||
if (!model.Initialized()) {
|
||||
std::cerr << "Failed to initialize." << std::endl;
|
||||
return;
|
||||
}
|
||||
//model.GetPreprocessor().DisableNormalizeAndPermute();
|
||||
|
||||
fastdeploy::TimeCounter tc;
|
||||
tc.Start();
|
||||
auto im_org = cv::imread(image_file);
|
||||
|
||||
//the input of bmodel should be fixed
|
||||
int new_width = 512;
|
||||
int new_height = 512;
|
||||
cv::Mat im;
|
||||
cv::resize(im_org, im, cv::Size(new_width, new_height), cv::INTER_LINEAR);
|
||||
|
||||
fastdeploy::vision::SegmentationResult res;
|
||||
if (!model.Predict(&im, &res)) {
|
||||
std::cerr << "Failed to predict." << std::endl;
|
||||
return;
|
||||
}
|
||||
auto vis_im = fastdeploy::vision::VisSegmentation(im, res);
|
||||
tc.End();
|
||||
tc.PrintInfo("PPSeg in Sophgo");
|
||||
|
||||
cv::imwrite("infer_sophgo.jpg", vis_im);
|
||||
std::cout
|
||||
<< "Visualized result saved in ./infer_sophgo.jpg"
|
||||
<< std::endl;
|
||||
}
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
if (argc < 3) {
|
||||
std::cout
|
||||
<< "Usage: infer_demo path/to/model_dir path/to/image run_option, "
|
||||
"e.g ./infer_model ./bmodel ./test.jpeg"
|
||||
<< std::endl;
|
||||
return -1;
|
||||
}
|
||||
|
||||
SophgoInfer(argv[1], argv[2]);
|
||||
return 0;
|
||||
}
|
||||
|
@@ -0,0 +1,26 @@
|
||||
# PaddleSeg Python部署示例
|
||||
|
||||
在部署前,需确认以下两个步骤
|
||||
|
||||
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../../docs/cn/build_and_install/sophgo.md)
|
||||
|
||||
本目录下提供`infer.py`快速完成 pp_liteseg 在SOPHGO TPU上部署的示例。执行如下脚本即可完成
|
||||
|
||||
```bash
|
||||
# 下载部署示例代码
|
||||
git clone https://github.com/PaddlePaddle/FastDeploy.git
|
||||
cd FastDeploy/examples/vision/segmentation/paddleseg/sophgo/python
|
||||
|
||||
# 下载图片
|
||||
wget https://paddleseg.bj.bcebos.com/dygraph/demo/cityscapes_demo.png
|
||||
|
||||
# 推理
|
||||
python3 infer.py --model_file ./bmodel/pp_liteseg_1684x_f32.bmodel --config_file ./bmodel/deploy.yaml --image cityscapes_demo.png
|
||||
|
||||
# 运行完成后返回结果如下所示
|
||||
运行结果保存在sophgo_img.png中
|
||||
```
|
||||
|
||||
## 其它文档
|
||||
- [pp_liteseg C++部署](../cpp)
|
||||
- [转换 pp_liteseg SOPHGO模型文档](../README.md)
|
@@ -0,0 +1,45 @@
|
||||
import fastdeploy as fd
|
||||
import cv2
|
||||
import os
|
||||
|
||||
|
||||
def parse_arguments():
|
||||
import argparse
|
||||
import ast
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--model", required=True, help="Path of model.")
|
||||
parser.add_argument(
|
||||
"--config_file", required=True, help="Path of config file.")
|
||||
parser.add_argument(
|
||||
"--image", type=str, required=True, help="Path of test image file.")
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
args = parse_arguments()
|
||||
|
||||
# 配置runtime,加载模型
|
||||
runtime_option = fd.RuntimeOption()
|
||||
runtime_option.use_sophgo()
|
||||
|
||||
model_file = args.model
|
||||
params_file = ""
|
||||
config_file = args.config_file
|
||||
|
||||
model = fd.vision.segmentation.PaddleSegModel(
|
||||
model_file,
|
||||
params_file,
|
||||
config_file,
|
||||
runtime_option=runtime_option,
|
||||
model_format=fd.ModelFormat.SOPHGO)
|
||||
|
||||
# 预测图片分类结果
|
||||
im_org = cv2.imread(args.image)
|
||||
#bmodel 是静态模型,模型输入固定,这里设置为[512, 512]
|
||||
im = cv2.resize(im_org, [512, 512], interpolation=cv2.INTER_LINEAR)
|
||||
result = model.predict(im)
|
||||
print(result)
|
||||
|
||||
# 预测结果可视化
|
||||
vis_im = fd.vision.vis_segmentation(im, result, weight=0.5)
|
||||
cv2.imwrite("sophgo_img.png", vis_im)
|
Reference in New Issue
Block a user