[Backend] add sophgo backend (#1015)

* Add Sophgo Device

add sophgo backend in fastdeploy

add resnet50, yolov5s, liteseg examples.

* replace sophgo lib with download links; fix model.cc bug

* modify CodeStyle

* remove unuseful files;change the names of sophgo device and sophgo
backend

* sophgo support python and add python examples

* remove unuseful rows in cmake according pr

Co-authored-by: Zilong Xing <zilong.xing@sophgo.com>
This commit is contained in:
Dantès
2023-01-04 15:49:17 +08:00
committed by GitHub
parent 0c292c0766
commit 34bea7649d
41 changed files with 1583 additions and 9 deletions

View File

@@ -15,6 +15,7 @@
PROJECT(fastdeploy C CXX)
CMAKE_MINIMUM_REQUIRED (VERSION 3.10)
option(CSRCS_DIR_NAME "Name of source code directory")
option(LIBRARY_NAME "Name of build library name")
option(PY_LIBRARY_NAME "Name of build python library name")
@@ -60,6 +61,7 @@ option(ENABLE_PADDLE_BACKEND "Whether to enable paddle backend." OFF)
option(ENABLE_POROS_BACKEND "Whether to enable poros backend." OFF)
option(ENABLE_OPENVINO_BACKEND "Whether to enable openvino backend." OFF)
option(ENABLE_RKNPU2_BACKEND "Whether to enable RKNPU2 backend." OFF)
option(ENABLE_SOPHGO_BACKEND "Whether to enable SOPHON backend." OFF)
option(ENABLE_LITE_BACKEND "Whether to enable paddle lite backend." OFF)
option(ENABLE_VISION "Whether to enable vision models usage." OFF)
option(ENABLE_TEXT "Whether to enable text models usage." OFF)
@@ -194,6 +196,7 @@ file(GLOB_RECURSE DEPLOY_POROS_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fast
file(GLOB_RECURSE DEPLOY_TRT_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/backends/tensorrt/*.cc ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/backends/tensorrt/*.cpp)
file(GLOB_RECURSE DEPLOY_OPENVINO_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/backends/openvino/*.cc)
file(GLOB_RECURSE DEPLOY_RKNPU2_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/backends/rknpu/rknpu2/*.cc)
file(GLOB_RECURSE DEPLOY_SOPHGO_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/backends/sophgo/*.cc)
file(GLOB_RECURSE DEPLOY_LITE_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/backends/lite/*.cc)
file(GLOB_RECURSE DEPLOY_VISION_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/vision/*.cc)
file(GLOB_RECURSE DEPLOY_ENCRYPTION_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/encryption/*.cc)
@@ -201,8 +204,7 @@ file(GLOB_RECURSE DEPLOY_PIPELINE_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/f
file(GLOB_RECURSE DEPLOY_VISION_CUDA_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/vision/*.cu)
file(GLOB_RECURSE DEPLOY_TEXT_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/text/*.cc)
file(GLOB_RECURSE DEPLOY_PYBIND_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/pybind/*.cc ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/*_pybind.cc)
list(REMOVE_ITEM ALL_DEPLOY_SRCS ${DEPLOY_ORT_SRCS} ${DEPLOY_PADDLE_SRCS} ${DEPLOY_POROS_SRCS} ${DEPLOY_TRT_SRCS} ${DEPLOY_OPENVINO_SRCS} ${DEPLOY_LITE_SRCS} ${DEPLOY_VISION_SRCS} ${DEPLOY_TEXT_SRCS} ${DEPLOY_PIPELINE_SRCS} ${DEPLOY_RKNPU2_SRCS} ${DEPLOY_ENCRYPTION_SRCS})
list(REMOVE_ITEM ALL_DEPLOY_SRCS ${DEPLOY_ORT_SRCS} ${DEPLOY_PADDLE_SRCS} ${DEPLOY_POROS_SRCS} ${DEPLOY_TRT_SRCS} ${DEPLOY_OPENVINO_SRCS} ${DEPLOY_LITE_SRCS} ${DEPLOY_VISION_SRCS} ${DEPLOY_TEXT_SRCS} ${DEPLOY_PIPELINE_SRCS} ${DEPLOY_RKNPU2_SRCS} ${DEPLOY_SOPHGO_SRCS} ${DEPLOY_ENCRYPTION_SRCS})
set(DEPEND_LIBS "")
@@ -266,6 +268,13 @@ if(ENABLE_RKNPU2_BACKEND)
list(APPEND DEPEND_LIBS ${RKNN_RT_LIB})
endif()
if(ENABLE_SOPHGO_BACKEND)
add_definitions(-DENABLE_SOPHGO_BACKEND)
list(APPEND ALL_DEPLOY_SRCS ${DEPLOY_SOPHGO_SRCS})
include(${PROJECT_SOURCE_DIR}/cmake/sophgo.cmake)
list(APPEND DEPEND_LIBS ${SOPHGO_RT_LIB})
endif()
if(ENABLE_POROS_BACKEND)
set(CMAKE_CXX_STANDARD 14)
add_definitions(-DENABLE_POROS_BACKEND)

View File

@@ -3,6 +3,7 @@ CMAKE_MINIMUM_REQUIRED(VERSION 3.8)
set(WITH_GPU @WITH_GPU@)
set(ENABLE_ORT_BACKEND @ENABLE_ORT_BACKEND@)
set(ENABLE_RKNPU2_BACKEND @ENABLE_RKNPU2_BACKEND@)
set(ENABLE_SOPHGO_BACKEND @ENABLE_SOPHGO_BACKEND@)
set(ENABLE_LITE_BACKEND @ENABLE_LITE_BACKEND@)
set(ENABLE_PADDLE_BACKEND @ENABLE_PADDLE_BACKEND@)
set(ENABLE_OPENVINO_BACKEND @ENABLE_OPENVINO_BACKEND@)
@@ -271,6 +272,7 @@ message(STATUS " CXX flags : ${CMAKE_CXX_FLAGS}")
message(STATUS " WITH_GPU : ${WITH_GPU}")
message(STATUS " ENABLE_ORT_BACKEND : ${ENABLE_ORT_BACKEND}")
message(STATUS " ENABLE_RKNPU2_BACKEND : ${ENABLE_RKNPU2_BACKEND}")
message(STATUS " ENABLE_SOPHGO_BACKEND : ${ENABLE_SOPHGO_BACKEND}")
message(STATUS " ENABLE_PADDLE_BACKEND : ${ENABLE_PADDLE_BACKEND}")
message(STATUS " ENABLE_POROS_BACKEND : ${ENABLE_POROS_BACKEND}")
message(STATUS " ENABLE_OPENVINO_BACKEND : ${ENABLE_OPENVINO_BACKEND}")

7
cmake/sophgo.cmake Normal file
View File

@@ -0,0 +1,7 @@
CMAKE_MINIMUM_REQUIRED (VERSION 3.10)
find_package(libsophon REQUIRED)
message(${LIBSOPHON_LIB_DIRS})
include_directories(${LIBSOPHON_INCLUDE_DIRS})
message(${LIBSOPHON_LIB_DIRS})
set(SOPHGO_RT_LIB ${LIBSOPHON_LIB_DIRS}/libbmrt.so)

View File

@@ -32,6 +32,7 @@ function(fastdeploy_summary)
message(STATUS " Paddle2ONNX version : ${PADDLE2ONNX_VERSION}")
message(STATUS " ENABLE_ORT_BACKEND : ${ENABLE_ORT_BACKEND}")
message(STATUS " ENABLE_RKNPU2_BACKEND : ${ENABLE_RKNPU2_BACKEND}")
message(STATUS " ENABLE_SOPHGO_BACKEND : ${ENABLE_SOPHGO_BACKEND}")
message(STATUS " ENABLE_PADDLE_BACKEND : ${ENABLE_PADDLE_BACKEND}")
message(STATUS " ENABLE_LITE_BACKEND : ${ENABLE_LITE_BACKEND}")
message(STATUS " ENABLE_POROS_BACKEND : ${ENABLE_POROS_BACKEND}")

View File

@@ -26,6 +26,7 @@
| ENABLE_PADDLE_BACKEND | 默认OFF是否编译集成Paddle Inference后端(CPU/GPU上推荐打开) |
| ENABLE_LITE_BACKEND | 默认OFF是否编译集成Paddle Lite后端(编译Android库时需要设置为ON) |
| ENABLE_RKNPU2_BACKEND | 默认OFF是否编译集成RKNPU2后端(RK3588/RK3568/RK3566上推荐打开) |
| ENABLE_SOPHGO_BACKEND | 默认OFF是否编译集成SOPHGO后端, 当在SOPHGO TPU上部署时需要设置为ON |
| WITH_ASCEND | 默认OFF当在华为昇腾NPU上部署时, 需要设置为ON |
| WITH_KUNLUNXIN | 默认OFF当在昆仑芯XPU上部署时需设置为ON |
| WITH_TIMVX | 默认OFF需要在RV1126/RV1109/A311D上部署时需设置为ON |

View File

@@ -0,0 +1,78 @@
# SOPHGO 部署库编译
## SOPHGO 环境准备
SOPHGO支持linux下进行编译,系统为Debian/Ubuntu
安装包由三个文件构成
- [sophon-driver\_0.4.2\_$arch.deb](http://219.142.246.77:65000/sharing/KWqbmEcKp)
- [sophon-libsophon\_0.4.2\_$arch.deb](http://219.142.246.77:65000/sharing/PlvlBXhWY)
- [sophon-libsophon-dev\_0.4.2\_$arch.deb](http://219.142.246.77:65000/sharing/zTErLlpS7)
其中“$arch”为当前机器的硬件架构使用以下命令可以获取当前的服务器arch
```shell
uname -m
```
通常x86_64 机器对应的硬件架构为amd64,arm64 机器对应的硬件架构为 arm64:
```text
- sophon-driver_0.4.2_$arch.deb
- sophon-libsophon_0.4.2_$arch.deb
- sophon-libsophon-dev_0.4.2_$arch.deb
```
其中:sophon-driver 包含了 PCIe 加速卡驱动;sophon-libsophon 包含了运行时环境(库文
件、工具等);sophon-libsophon-dev 包含了开发环境(头文件等)。如果只是在部署环境上安
装,则不需要安装 sophon-libsophon-dev。
可以通过如下步骤安装:
```shell
#安装依赖库,只需要执行一次:
sudo apt install dkms libncurses5
#安装 libsophon:
sudo dpkg -i sophon-*.deb
#在终端执行如下命令,或者登出再登入当前用户后即可使用 bm-smi 等命令:
source /etc/profile
```
安装位置为:
```text
/opt/sophon/
├── driver-0.4.2
├── libsophon-0.4.2
| ├──bin
| ├──data
| ├──include
| └──lib
└── libsophon-current->/opt/sophon/libsophon-0.4.2
```
## C++ SDK编译安装
搭建好编译环境之后,编译命令如下:
```bash
# Download the latest source code
git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy
mkdir build && cd build
# CMake configuration with Ascend
cmake -DENABLE_SOPHGO_BACKEND=ON \
-DCMAKE_INSTALL_PREFIX=${PWD}/fastdeploy-sophgo \
-DENABLE_VISION=ON \
..
# Build FastDeploy Ascend C++ SDK
make -j8
make install
```
编译完成之后会在当前的build目录下生成 fastdeploy-sophgo 目录,编译完成。
## Python FastDeploy 库编译
搭建好编译环境之后,编译命令如下:
```bash
# Download the latest source code
git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy/python
export ENABLE_SOPHGO_BACKEND=ON
export ENABLE_VISION=ON
python setup.py build
python setup.py bdist_wheel
#编译完成后,请用户自行安装当前目录的dist文件夹内的whl包.
```

View File

@@ -0,0 +1,77 @@
# How to Build SOPHGO Deployment Environment
## SOPHGO Environment Preparation
SOPHGO supports compilation on linux, using Debian/Ubuntu as an example
The installation package consists of three files
- [sophon-driver\_0.4.2\_$arch.deb](http://219.142.246.77:65000/sharing/KWqbmEcKp)
- [sophon-libsophon\_0.4.2\_$arch.deb](http://219.142.246.77:65000/sharing/PlvlBXhWY)
- [sophon-libsophon-dev\_0.4.2\_$arch.deb](http://219.142.246.77:65000/sharing/zTErLlpS7)
$arch indicates the hardware architecture of the current machine. Run the following command to obtain the current server arch:
```shell
uname -m
```
Generally, the hardware architecture of x86_64 machines is amd64, so the hardware architecture is arm64:
```text
- sophon-driver_0.4.2_$arch.deb
- sophon-libsophon_0.4.2_$arch.deb
- sophon-libsophon-dev_0.4.2_$arch.deb
```
sophon-driver contains PCIe acceleration card drivers; sophon-libsophon contains the runtime environment (librarys, tools, etc); sophon-libsophon-dev contains the development environment (header files, etc.). If you install packages only on a deployment environment, you do not need to install sophon-libsophon-dev.
You can perform the following steps to install:
```shell
#To install a dependency library, you only need to do this once:
sudo apt install dkms libncurses5
#install libsophon:
sudo dpkg -i sophon-*.deb
#Run the following command on the terminal, log out and then log in the current user to use commands such as bm-smi:
source /etc/profile
```
The position of installation:
```text
/opt/sophon/
├── driver-0.4.2
├── libsophon-0.4.2
| ├──bin
| ├──data
| ├──include
| └──lib
└── libsophon-current->/opt/sophon/libsophon-0.4.2
```
## How to Build and Install C++ SDK
After setting up the compilation environment, the compilation command is as follows
```bash
# Download the latest source code
git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy
mkdir build && cd build
# CMake configuration with Ascend
cmake -DENABLE_SOPHGO_BACKEND=ON \
-DCMAKE_INSTALL_PREFIX=${PWD}/fastdeploy-sophgo \
-DENABLE_VISION=ON \
..
# Build FastDeploy Ascend C++ SDK
make -j8
make install
```
When the compilation is complete, the fastdeploy-sophgo directory is created in the current build directory, indicating that the FastDeploy library has been compiled.
## Compiling Python FastDeploy Libraries
After setting up the compilation environment, the compilation command is as follows
```bash
# Download the latest source code
git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy/python
export ENABLE_SOPHGO_BACKEND=ON
export ENABLE_VISION=ON
python setup.py build
python setup.py bdist_wheel
#After the compilation is complete, please install the whl package in the dist folder of the current directory.
```

View File

@@ -0,0 +1,84 @@
# PaddleDetection SOPHGO部署示例
## 支持模型列表
目前FastDeploy支持的如下模型的部署[ResNet系列模型](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.4/docs/zh_CN/models/ResNet_and_vd.md)
## 准备ResNet部署模型以及转换模型
SOPHGO-TPU部署模型前需要将Paddle模型转换成bmodel模型具体步骤如下:
- Paddle动态图模型转换为ONNX模型请参考[Paddle2ONNX模型转换](https://github.com/PaddlePaddle/Paddle2ONNX/tree/develop/model_zoo/classification)
- ONNX模型转换bmodel模型的过程请参考[TPU-MLIR](https://github.com/sophgo/tpu-mlir)。
## 模型转换example
下面以[ResNet50_vd](https://bj.bcebos.com/paddlehub/fastdeploy/ResNet50_vd_infer.tgz)为例子,教大家如何转换Paddle模型到SOPHGO-TPU模型。
## 导出ONNX模型
### 下载Paddle ResNet50_vd静态图模型并解压
```shell
wget https://bj.bcebos.com/paddlehub/fastdeploy/ResNet50_vd_infer.tgz
tar xvf ResNet50_vd_infer.tgz
```
### 静态图转ONNX模型注意这里的save_file请和压缩包名对齐
```shell
paddle2onnx --model_dir ResNet50_vd_infer \
--model_filename inference.pdmodel \
--params_filename inference.pdiparams \
--save_file ResNet50_vd_infer.onnx \
--enable_dev_version True
```
### 导出bmodel模型
以转化BM1684x的bmodel模型为例子我们需要下载[TPU-MLIR](https://github.com/sophgo/tpu-mlir)工程,安装过程具体参见[TPU-MLIR文档](https://github.com/sophgo/tpu-mlir/blob/master/README.md)。
### 1. 安装
``` shell
docker pull sophgo/tpuc_dev:latest
# myname1234是一个示例也可以设置其他名字
docker run --privileged --name myname1234 -v $PWD:/workspace -it sophgo/tpuc_dev:latest
source ./envsetup.sh
./build.sh
```
### 2. ONNX模型转换为bmodel模型
``` shell
mkdir ResNet50_vd_infer && cd ResNet50_vd_infer
# 在该文件中放入测试图片同时将上一步转换好的ResNet50_vd_infer.onnx放入该文件夹中
cp -rf ${REGRESSION_PATH}/dataset/COCO2017 .
cp -rf ${REGRESSION_PATH}/image .
# 放入onnx模型文件ResNet50_vd_infer.onnx
mkdir workspace && cd workspace
# 将ONNX模型转换为mlir模型其中参数--output_names可以通过NETRON查看
model_transform.py \
--model_name ResNet50_vd_infer \
--model_def ../ResNet50_vd_infer.onnx \
--input_shapes [[1,3,224,224]] \
--mean 0.0,0.0,0.0 \
--scale 0.0039216,0.0039216,0.0039216 \
--keep_aspect_ratio \
--pixel_format rgb \
--output_names save_infer_model/scale_0.tmp_1 \
--test_input ../image/dog.jpg \
--test_result ResNet50_vd_infer_top_outputs.npz \
--mlir ResNet50_vd_infer.mlir
# 将mlir模型转换为BM1684x的F32 bmodel模型
model_deploy.py \
--mlir ResNet50_vd_infer.mlir \
--quantize F32 \
--chip bm1684x \
--test_input ResNet50_vd_infer_in_f32.npz \
--test_reference ResNet50_vd_infer_top_outputs.npz \
--model ResNet50_vd_infer_1684x_f32.bmodel
```
最终获得可以在BM1684x上能够运行的bmodel模型ResNet50_vd_infer_1684x_f32.bmodel。如果需要进一步对模型进行加速可以将ONNX模型转换为INT8 bmodel具体步骤参见[TPU-MLIR文档](https://github.com/sophgo/tpu-mlir/blob/master/README.md)。
## 其他链接
- [Cpp部署](./cpp)

View File

@@ -0,0 +1,17 @@
PROJECT(infer_demo C CXX)
CMAKE_MINIMUM_REQUIRED (VERSION 3.10)
# 指定下载解压后的fastdeploy库路径
option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.")
set(ENABLE_LITE_BACKEND OFF)
#set(FDLIB ${FASTDEPLOY_INSTALL_DIR})
include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
# 添加FastDeploy依赖头文件
include_directories(${FASTDEPLOY_INCS})
include_directories(${FastDeploy_INCLUDE_DIRS})
add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cc)
# 添加FastDeploy库依赖
target_link_libraries(infer_demo ${FASTDEPLOY_LIBS})

View File

@@ -0,0 +1,61 @@
# PaddleClas C++部署示例
本目录下提供`infer.cc`快速完成ResNet50_vd模型在SOPHGO BM1684x板子上加速部署的示例。
在部署前,需确认以下两个步骤:
1. 软硬件环境满足要求
2. 根据开发环境从头编译FastDeploy仓库
以上步骤请参考[SOPHGO部署库编译](../../../../../../docs/cn/build_and_install/sophgo.md)实现
## 生成基本目录文件
该例程由以下几个部分组成
```text
.
├── CMakeLists.txt
├── build # 编译文件夹
├── image # 存放图片的文件夹
├── infer.cc
├── preprocess_config.yaml #示例前处理配置文件
└── model # 存放模型文件的文件夹
```
## 编译
### 编译并拷贝SDK到thirdpartys文件夹
请参考[SOPHGO部署库编译](../../../../../../docs/cn/build_and_install/sophgo.md)仓库编译SDK编译完成后将在build目录下生成fastdeploy-0.0.3目录.
### 拷贝模型文件以及配置文件至model文件夹
将Paddle模型转换为SOPHGO bmodel模型转换步骤参考[文档](../README.md)
将转换后的SOPHGO bmodel模型文件拷贝至model中
将前处理配置文件也拷贝到model中
```bash
cp preprocess_config.yaml ./model
```
### 准备测试图片至image文件夹
```bash
wget https://gitee.com/paddlepaddle/PaddleClas/raw/release/2.4/deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg
cp ILSVRC2012_val_00000010.jpeg ./images
```
### 编译example
```bash
cd build
cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-0.0.3
make
```
## 运行例程
```bash
./infer_demo model images/ILSVRC2012_val_00000010.jpeg
```
- [模型介绍](../../)
- [模型转换](../)

View File

@@ -0,0 +1,61 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <string>
#include "fastdeploy/vision.h"
#ifdef WIN32
const char sep = '\\';
#else
const char sep = '/';
#endif
void InitAndInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "resnet50_1684x_f32.bmodel";
auto params_file = model_dir + sep + "";
auto config_file = model_dir + sep + "preprocess_config.yaml";
fastdeploy::RuntimeOption option;
option.UseSophgo();
auto model_format = fastdeploy::ModelFormat::SOPHGO;
auto model = fastdeploy::vision::classification::PaddleClasModel(
model_file, params_file, config_file, option, model_format);
assert(model.Initialized());
auto im = cv::imread(image_file);
fastdeploy::vision::ClassifyResult res;
if (!model.Predict(im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
std::cout << res.Str() << std::endl;
}
int main(int argc, char* argv[]) {
if (argc < 3) {
std::cout << "Usage: infer_demo path/to/model "
"path/to/image "
"run_option, "
"e.g ./infer_demo ./bmodel ./test.jpeg"
<< std::endl;
return -1;
}
std::string model_dir = argv[1];
std::string test_image = argv[2];
InitAndInfer(model_dir, test_image);
return 0;
}

View File

@@ -0,0 +1,29 @@
# PaddleClas Python部署示例
在部署前,需确认以下两个步骤
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../../docs/cn/build_and_install/sophgo.md)
本目录下提供`infer.py`快速完成 ResNet50_vd 在SOPHGO TPU上部署的示例。执行如下脚本即可完成
```bash
# 下载部署示例代码
git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy/examples/vision/classification/paddleclas/sophgo/python
# 下载图片
wget https://gitee.com/paddlepaddle/PaddleClas/raw/release/2.4/deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg
# 推理
python3 infer.py --model_file ./bmodel/resnet50_1684x_f32.bmodel --config_file ResNet50_vd_infer/inference_cls.yaml --image ILSVRC2012_val_00000010.jpeg
# 运行完成后返回结果如下所示
ClassifyResult(
label_ids: 153,
scores: 0.684570,
)
```
## 其它文档
- [ResNet50_vd C++部署](../cpp)
- [转换ResNet50_vd SOPHGO模型文档](../README.md)

View File

@@ -0,0 +1,41 @@
import fastdeploy as fd
import cv2
import os
def parse_arguments():
import argparse
import ast
parser = argparse.ArgumentParser()
parser.add_argument("--model", required=True, help="Path of model.")
parser.add_argument(
"--config_file", required=True, help="Path of config file.")
parser.add_argument(
"--image", type=str, required=True, help="Path of test image file.")
parser.add_argument(
"--topk", type=int, default=1, help="Return topk results.")
return parser.parse_args()
args = parse_arguments()
# 配置runtime加载模型
runtime_option = fd.RuntimeOption()
runtime_option.use_sophgo()
model_file = args.model
params_file = ""
config_file = args.config_file
model = fd.vision.classification.PaddleClasModel(
model_file,
params_file,
config_file,
runtime_option=runtime_option,
model_format=fd.ModelFormat.SOPHGO)
# 预测图片分类结果
im = cv2.imread(args.image)
result = model.predict(im, args.topk)
print(result)

View File

@@ -0,0 +1,75 @@
# YOLOv5 SOPHGO部署示例
## 支持模型列表
YOLOv5 v6.0部署模型实现来自[YOLOv5](https://github.com/ultralytics/yolov5/tree/v6.0),和[基于COCO的预训练模型](https://github.com/ultralytics/yolov5/releases/tag/v6.0)
## 准备YOLOv5部署模型以及转换模型
SOPHGO-TPU部署模型前需要将Paddle模型转换成bmodel模型具体步骤如下:
- 下载预训练ONNX模型请参考[YOLOv5准备部署模型](https://github.com/PaddlePaddle/FastDeploy/tree/develop/examples/vision/detection/yolov5)
- ONNX模型转换bmodel模型的过程请参考[TPU-MLIR](https://github.com/sophgo/tpu-mlir)
## 模型转换example
下面以YOLOv5s为例子,教大家如何转换ONNX模型到SOPHGO-TPU模型
## 下载YOLOv5s模型
### 下载ONNX YOLOv5s静态图模型
```shell
wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov5s.onnx
```
### 导出bmodel模型
以转化BM1684x的bmodel模型为例子我们需要下载[TPU-MLIR](https://github.com/sophgo/tpu-mlir)工程,安装过程具体参见[TPU-MLIR文档](https://github.com/sophgo/tpu-mlir/blob/master/README.md)。
### 1. 安装
``` shell
docker pull sophgo/tpuc_dev:latest
# myname1234是一个示例也可以设置其他名字
docker run --privileged --name myname1234 -v $PWD:/workspace -it sophgo/tpuc_dev:latest
source ./envsetup.sh
./build.sh
```
### 2. ONNX模型转换为bmodel模型
``` shell
mkdir YOLOv5s && cd YOLOv5s
# 在该文件中放入测试图片同时将上一步下载的yolov5s.onnx放入该文件夹中
cp -rf ${REGRESSION_PATH}/dataset/COCO2017 .
cp -rf ${REGRESSION_PATH}/image .
# 放入onnx模型文件yolov5s.onnx
mkdir workspace && cd workspace
# 将ONNX模型转换为mlir模型其中参数--output_names可以通过NETRON查看
model_transform.py \
--model_name yolov5s \
--model_def ../yolov5s.onnx \
--input_shapes [[1,3,640,640]] \
--mean 0.0,0.0,0.0 \
--scale 0.0039216,0.0039216,0.0039216 \
--keep_aspect_ratio \
--pixel_format rgb \
--output_names output,350,498,646 \
--test_input ../image/dog.jpg \
--test_result yolov5s_top_outputs.npz \
--mlir yolov5s.mlir
# 将mlir模型转换为BM1684x的F32 bmodel模型
model_deploy.py \
--mlir yolov5s.mlir \
--quantize F32 \
--chip bm1684x \
--test_input yolov5s_in_f32.npz \
--test_reference yolov5s_top_outputs.npz \
--model yolov5s_1684x_f32.bmodel
```
最终获得可以在BM1684x上能够运行的bmodel模型yolov5s_1684x_f32.bmodel。如果需要进一步对模型进行加速可以将ONNX模型转换为INT8 bmodel具体步骤参见[TPU-MLIR文档](https://github.com/sophgo/tpu-mlir/blob/master/README.md)。
## 其他链接
- [Cpp部署](./cpp)

View File

@@ -0,0 +1,17 @@
PROJECT(infer_demo C CXX)
CMAKE_MINIMUM_REQUIRED (VERSION 3.10)
# 指定下载解压后的fastdeploy库路径
option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.")
set(ENABLE_LITE_BACKEND OFF)
#set(FDLIB ${FASTDEPLOY_INSTALL_DIR})
include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
# 添加FastDeploy依赖头文件
include_directories(${FASTDEPLOY_INCS})
include_directories(${FastDeploy_INCLUDE_DIRS})
add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cc)
# 添加FastDeploy库依赖
target_link_libraries(infer_demo ${FASTDEPLOY_LIBS})

View File

@@ -0,0 +1,56 @@
# YOLOv5 C++部署示例
本目录下提供`infer.cc`快速完成yolov5s模型在SOPHGO BM1684x板子上加速部署的示例。
在部署前,需确认以下两个步骤:
1. 软硬件环境满足要求
2. 根据开发环境从头编译FastDeploy仓库
以上步骤请参考[SOPHGO部署库编译](../../../../../../docs/cn/build_and_install/sophgo.md)实现
## 生成基本目录文件
该例程由以下几个部分组成
```text
.
├── CMakeLists.txt
├── build # 编译文件夹
├── image # 存放图片的文件夹
├── infer.cc
└── model # 存放模型文件的文件夹
```
## 编译
### 编译并拷贝SDK到thirdpartys文件夹
请参考[SOPHGO部署库编译](../../../../../../docs/cn/build_and_install/sophgo.md)仓库编译SDK编译完成后将在build目录下生成fastdeploy-0.0.3目录.
### 拷贝模型文件以及配置文件至model文件夹
将Paddle模型转换为SOPHGO bmodel模型转换步骤参考[文档](../README.md)
将转换后的SOPHGO bmodel模型文件拷贝至model中
### 准备测试图片至image文件夹
```bash
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
cp 000000014439.jpg ./images
```
### 编译example
```bash
cd build
cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-0.0.3
make
```
## 运行例程
```bash
./infer_demo model images/000000014439.jpg
```
- [模型介绍](../../)
- [模型转换](../)

View File

@@ -0,0 +1,61 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <string>
#include "fastdeploy/vision.h"
#ifdef WIN32
const char sep = '\\';
#else
const char sep = '/';
#endif
void InitAndInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "yolov5s_1684x_f32.bmodel";
auto params_file = model_dir + sep + "";
fastdeploy::RuntimeOption option;
option.UseSophgo();
auto model_format = fastdeploy::ModelFormat::SOPHGO;
auto model = fastdeploy::vision::detection::YOLOv5(
model_file, params_file, option, model_format);
assert(model.Initialized());
auto im = cv::imread(image_file);
fastdeploy::vision::DetectionResult res;
if (!model.Predict(im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
std::cout << res.Str() << std::endl;
}
int main(int argc, char* argv[]) {
if (argc < 3) {
std::cout << "Usage: infer_demo path/to/model "
"path/to/image "
"run_option, "
"e.g ./infer_demo ./model ./test.jpeg"
<< std::endl;
return -1;
}
std::string model_dir = argv[1];
std::string test_image = argv[2];
InitAndInfer(model_dir, test_image);
return 0;
}

View File

@@ -0,0 +1,46 @@
# YOLOv5 Python部署示例
在部署前,需确认以下两个步骤
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../../docs/cn/build_and_install/sophgo.md)
本目录下提供`infer.py`快速完成 YOLOv5 在SOPHGO TPU上部署的示例。执行如下脚本即可完成
```bash
# 下载部署示例代码
git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy/examples/vision/detection/yolov5/sophgo/python
# 下载图片
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
# 推理
python3 infer.py --model_file ./bmodel/yolov5s_1684x_f32.bmodel --image 000000014439.jpg
# 运行完成后返回结果如下所示
DetectionResult: [xmin, ymin, xmax, ymax, score, label_id]
268.480255,81.053055, 298.694794, 169.439026, 0.896569, 0
104.731163,45.661972, 127.583824, 93.449387, 0.869531, 0
378.909363,39.750137, 395.608643, 84.243454, 0.868430, 0
158.552979,80.361511, 199.185760, 168.181915, 0.842988, 0
414.375305,90.948090, 506.321899, 280.405182, 0.835842, 0
364.003448,56.608932, 381.978607, 115.968216, 0.815136, 0
351.725128,42.635330, 366.910309, 98.048386, 0.808936, 0
505.888306,114.366791, 593.124878, 275.995270, 0.801361, 0
327.708618,38.363693, 346.849915, 80.893021, 0.794725, 0
583.493408,114.532883, 612.354614, 175.873535, 0.760649, 0
186.470657,44.941360, 199.664505, 61.037643, 0.632591, 0
169.615891,48.014603, 178.141556, 60.888596, 0.613938, 0
25.810200,117.199692, 59.888783, 152.850128, 0.590614, 0
352.145294,46.712723, 381.946075, 106.752151, 0.505329, 0
1.875000,150.734375, 37.968750, 173.781250, 0.404573, 24
464.657288,15.901413, 472.512939, 34.116409, 0.346033, 0
64.625000,135.171875, 84.500000, 154.406250, 0.332831, 24
57.812500,151.234375, 103.000000, 174.156250, 0.332566, 24
165.906250,88.609375, 527.906250, 339.953125, 0.259424, 33
101.406250,152.562500, 118.890625, 169.140625, 0.253891, 24
```
## 其它文档
- [YOLOv5 C++部署](../cpp)
- [转换YOLOv5 SOPHGO模型文档](../README.md)

View File

@@ -0,0 +1,40 @@
import fastdeploy as fd
import cv2
import os
def parse_arguments():
import argparse
import ast
parser = argparse.ArgumentParser()
parser.add_argument("--model", required=True, help="Path of model.")
parser.add_argument(
"--image", type=str, required=True, help="Path of test image file.")
return parser.parse_args()
args = parse_arguments()
# 配置runtime加载模型
runtime_option = fd.RuntimeOption()
runtime_option.use_sophgo()
model_file = args.model
params_file = ""
model = fd.vision.detection.YOLOv5(
model_file,
params_file,
runtime_option=runtime_option,
model_format=fd.ModelFormat.SOPHGO)
# 预测图片分类结果
im = cv2.imread(args.image)
result = model.predict(im)
print(result)
# 预测结果可视化
vis_im = fd.vision.vis_detection(im, result)
cv2.imwrite("sophgo_result.jpg", vis_im)
print("Visualized result save in ./sophgo_result.jpg")

View File

@@ -0,0 +1,89 @@
# PaddleSeg C++部署示例
## 支持模型列表
- PP-LiteSeg部署模型实现来自[PaddleSeg PP-LiteSeg系列模型](https://github.com/PaddlePaddle/PaddleSeg/blob/release/2.6/configs/pp_liteseg/README.md)
## 准备PP-LiteSeg部署模型以及转换模型
SOPHGO-TPU部署模型前需要将Paddle模型转换成bmodel模型具体步骤如下:
- 下载Paddle模型[PP-LiteSeg-B(STDC2)-cityscapes-without-argmax](https://bj.bcebos.com/paddlehub/fastdeploy/PP_LiteSeg_B_STDC2_cityscapes_without_argmax_infer.tgz)
- Pddle模型转换为ONNX模型请参考[Paddle2ONNX](https://github.com/PaddlePaddle/Paddle2ONNX)
- ONNX模型转换bmodel模型的过程请参考[TPU-MLIR](https://github.com/sophgo/tpu-mlir)
## 模型转换example
下面以[PP-LiteSeg-B(STDC2)-cityscapes-without-argmax](https://bj.bcebos.com/paddlehub/fastdeploy/PP_LiteSeg_B_STDC2_cityscapes_without_argmax_infer.tgz)为例子,教大家如何转换Paddle模型到SOPHGO-TPU模型
### 下载PP-LiteSeg-B(STDC2)-cityscapes-without-argmax模型,并转换为ONNX模型
```shell
https://bj.bcebos.com/paddlehub/fastdeploy/PP_LiteSeg_B_STDC2_cityscapes_without_argmax_infer.tgz
tar xvf PP_LiteSeg_B_STDC2_cityscapes_without_argmax_infer.tgz
# 修改PP_LiteSeg_B_STDC2_cityscapes_without_argmax_infer模型的输入shape由动态输入变成固定输入
python paddle_infer_shape.py --model_dir PP_LiteSeg_B_STDC2_cityscapes_without_argmax_infer \
--model_filename model.pdmodel \
--params_filename model.pdiparams \
--save_dir pp_liteseg_fix \
--input_shape_dict="{'x':[1,3,512,512]}"
#将固定输入的Paddle模型转换成ONNX模型
paddle2onnx --model_dir pp_liteseg_fix \
--model_filename model.pdmodel \
--params_filename model.pdiparams \
--save_file pp_liteseg.onnx \
--enable_dev_version True
```
### 导出bmodel模型
以转换BM1684x的bmodel模型为例子我们需要下载[TPU-MLIR](https://github.com/sophgo/tpu-mlir)工程,安装过程具体参见[TPU-MLIR文档](https://github.com/sophgo/tpu-mlir/blob/master/README.md)。
### 1. 安装
``` shell
docker pull sophgo/tpuc_dev:latest
# myname1234是一个示例也可以设置其他名字
docker run --privileged --name myname1234 -v $PWD:/workspace -it sophgo/tpuc_dev:latest
source ./envsetup.sh
./build.sh
```
### 2. ONNX模型转换为bmodel模型
``` shell
mkdir pp_liteseg && cd pp_liteseg
#在该文件中放入测试图片同时将上一步转换的pp_liteseg.onnx放入该文件夹中
cp -rf ${REGRESSION_PATH}/dataset/COCO2017 .
cp -rf ${REGRESSION_PATH}/image .
#放入onnx模型文件pp_liteseg.onnx
mkdir workspace && cd workspace
#将ONNX模型转换为mlir模型其中参数--output_names可以通过NETRON查看
model_transform.py \
--model_name pp_liteseg \
--model_def ../pp_liteseg.onnx \
--input_shapes [[1,3,512,512]] \
--mean 0.0,0.0,0.0 \
--scale 0.0039216,0.0039216,0.0039216 \
--keep_aspect_ratio \
--pixel_format rgb \
--output_names bilinear_interp_v2_6.tmp_0 \
--test_input ../image/dog.jpg \
--test_result pp_liteseg_top_outputs.npz \
--mlir pp_liteseg.mlir
#将mlir模型转换为BM1684x的F32 bmodel模型
model_deploy.py \
--mlir pp_liteseg.mlir \
--quantize F32 \
--chip bm1684x \
--test_input pp_liteseg_in_f32.npz \
--test_reference pp_liteseg_top_outputs.npz \
--model pp_liteseg_1684x_f32.bmodel
```
最终获得可以在BM1684x上能够运行的bmodel模型pp_liteseg_1684x_f32.bmodel。如果需要进一步对模型进行加速可以将ONNX模型转换为INT8 bmodel具体步骤参见[TPU-MLIR文档](https://github.com/sophgo/tpu-mlir/blob/master/README.md)。
## 其他链接
- [Cpp部署](./cpp)

View File

@@ -0,0 +1,17 @@
PROJECT(infer_demo C CXX)
CMAKE_MINIMUM_REQUIRED (VERSION 3.10)
# 指定下载解压后的fastdeploy库路径
option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.")
set(ENABLE_LITE_BACKEND OFF)
#set(FDLIB ${FASTDEPLOY_INSTALL_DIR})
include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
# 添加FastDeploy依赖头文件
include_directories(${FASTDEPLOY_INCS})
include_directories(${FastDeploy_INCLUDE_DIRS})
add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cc)
# 添加FastDeploy库依赖
target_link_libraries(infer_demo ${FASTDEPLOY_LIBS})

View File

@@ -0,0 +1,56 @@
# PaddleSeg C++部署示例
本目录下提供`infer.cc`快速完成pp_liteseg模型在SOPHGO BM1684x板子上加速部署的示例。
在部署前,需确认以下两个步骤:
1. 软硬件环境满足要求
2. 根据开发环境从头编译FastDeploy仓库
以上步骤请参考[SOPHGO部署库编译](../../../../../../docs/cn/build_and_install/sophgo.md)实现
## 生成基本目录文件
该例程由以下几个部分组成
```text
.
├── CMakeLists.txt
├── build # 编译文件夹
├── image # 存放图片的文件夹
├── infer.cc
└── model # 存放模型文件的文件夹
```
## 编译
### 编译并拷贝SDK到thirdpartys文件夹
请参考[SOPHGO部署库编译](../../../../../../docs/cn/build_and_install/sophgo.md)仓库编译SDK编译完成后将在build目录下生成fastdeploy-0.0.3目录.
### 拷贝模型文件以及配置文件至model文件夹
将Paddle模型转换为SOPHGO bmodel模型转换步骤参考[文档](../README.md)
将转换后的SOPHGO bmodel模型文件拷贝至model中
### 准备测试图片至image文件夹
```bash
wget https://paddleseg.bj.bcebos.com/dygraph/demo/cityscapes_demo.png
cp cityscapes_demo.png ./images
```
### 编译example
```bash
cd build
cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-0.0.3
make
```
## 运行例程
```bash
./infer_demo model images/cityscapes_demo.png
```
- [模型介绍](../../)
- [模型转换](../)

View File

@@ -0,0 +1,71 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <iostream>
#include <string>
#include "fastdeploy/vision.h"
void SophgoInfer(const std::string& model_dir, const std::string& image_file) {
std::string model_file = model_dir + "/pp_liteseg_1684x_f32.bmodel";
std::string params_file;
std::string config_file = model_dir + "/deploy.yaml";
auto option = fastdeploy::RuntimeOption();
option.UseSophgo();
auto model_format = fastdeploy::ModelFormat::SOPHGO;
auto model = fastdeploy::vision::segmentation::PaddleSegModel(
model_file, params_file, config_file, option, model_format);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return;
}
//model.GetPreprocessor().DisableNormalizeAndPermute();
fastdeploy::TimeCounter tc;
tc.Start();
auto im_org = cv::imread(image_file);
//the input of bmodel should be fixed
int new_width = 512;
int new_height = 512;
cv::Mat im;
cv::resize(im_org, im, cv::Size(new_width, new_height), cv::INTER_LINEAR);
fastdeploy::vision::SegmentationResult res;
if (!model.Predict(&im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
auto vis_im = fastdeploy::vision::VisSegmentation(im, res);
tc.End();
tc.PrintInfo("PPSeg in Sophgo");
cv::imwrite("infer_sophgo.jpg", vis_im);
std::cout
<< "Visualized result saved in ./infer_sophgo.jpg"
<< std::endl;
}
int main(int argc, char* argv[]) {
if (argc < 3) {
std::cout
<< "Usage: infer_demo path/to/model_dir path/to/image run_option, "
"e.g ./infer_model ./bmodel ./test.jpeg"
<< std::endl;
return -1;
}
SophgoInfer(argv[1], argv[2]);
return 0;
}

View File

@@ -0,0 +1,26 @@
# PaddleSeg Python部署示例
在部署前,需确认以下两个步骤
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../../docs/cn/build_and_install/sophgo.md)
本目录下提供`infer.py`快速完成 pp_liteseg 在SOPHGO TPU上部署的示例。执行如下脚本即可完成
```bash
# 下载部署示例代码
git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy/examples/vision/segmentation/paddleseg/sophgo/python
# 下载图片
wget https://paddleseg.bj.bcebos.com/dygraph/demo/cityscapes_demo.png
# 推理
python3 infer.py --model_file ./bmodel/pp_liteseg_1684x_f32.bmodel --config_file ./bmodel/deploy.yaml --image cityscapes_demo.png
# 运行完成后返回结果如下所示
运行结果保存在sophgo_img.png中
```
## 其它文档
- [pp_liteseg C++部署](../cpp)
- [转换 pp_liteseg SOPHGO模型文档](../README.md)

View File

@@ -0,0 +1,45 @@
import fastdeploy as fd
import cv2
import os
def parse_arguments():
import argparse
import ast
parser = argparse.ArgumentParser()
parser.add_argument("--model", required=True, help="Path of model.")
parser.add_argument(
"--config_file", required=True, help="Path of config file.")
parser.add_argument(
"--image", type=str, required=True, help="Path of test image file.")
return parser.parse_args()
args = parse_arguments()
# 配置runtime加载模型
runtime_option = fd.RuntimeOption()
runtime_option.use_sophgo()
model_file = args.model
params_file = ""
config_file = args.config_file
model = fd.vision.segmentation.PaddleSegModel(
model_file,
params_file,
config_file,
runtime_option=runtime_option,
model_format=fd.ModelFormat.SOPHGO)
# 预测图片分类结果
im_org = cv2.imread(args.image)
#bmodel 是静态模型,模型输入固定,这里设置为[512, 512]
im = cv2.resize(im_org, [512, 512], interpolation=cv2.INTER_LINEAR)
result = model.predict(im)
print(result)
# 预测结果可视化
vis_im = fd.vision.vis_segmentation(im, result, weight=0.5)
cv2.imwrite("sophgo_img.png", vis_im)

View File

@@ -0,0 +1,290 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "fastdeploy/backends/sophgo/sophgo_backend.h"
#include <assert.h>
namespace fastdeploy {
SophgoBackend::~SophgoBackend() {
bm_dev_free(handle_);
}
/***************************************************************
* @name GetSDKAndDeviceVersion
* @brief get Sophgo sdk and device version
* @param None
* @return bool
* @note None
***************************************************************/
bool SophgoBackend::GetSDKAndDeviceVersion() {
return true;
}
/***************************************************************
* @name BuildOption
* @brief save option
* @param SOPHGOTPU2BackendOption
* @note None
***************************************************************/
void SophgoBackend::BuildOption(const SophgoBackendOption& option) {
// this->option_ = option;
// save cpu_name
// this->option_.cpu_name = option.cpu_name;
}
/***************************************************************
* @name InitFromSophgo
* @brief Initialize Sophgo model
* @param model_file: Binary data for the Sophgo model.
* params_file: None
* option: config
* @return bool
* @note None
***************************************************************/
bool SophgoBackend::InitFromSophgo(const std::string& model_file,
const SophgoBackendOption& option) {
// LoadModel
if (!this->LoadModel((char*)model_file.data())) {
FDERROR << "load model failed" << std::endl;
return false;
}
// GetSDKAndDeviceVersion
if (!this->GetSDKAndDeviceVersion()) {
FDERROR << "get SDK and device version failed" << std::endl;
return false;
}
// BuildOption
this->BuildOption(option);
// GetModelInputOutputInfos
if (!this->GetModelInputOutputInfos()) {
FDERROR << "get model input output infos failed" << std::endl;
return false;
}
return true;
}
/***************************************************************
* @name LoadModel
* @brief read Sophgo bmodel
* @param model: Binary data for the Sophgo model.
* @return bool
* @note None
***************************************************************/
bool SophgoBackend::LoadModel(void* model) {
unsigned int card_num = 0;
bm_status_t status = bm_get_card_num(&card_num);
status = bm_dev_request(&handle_, 0);
p_bmrt_ = bmrt_create(handle_);
assert(NULL != p_bmrt_);
bool load_status = bmrt_load_bmodel(p_bmrt_, (char*)model);
assert(load_status);
int network_num = bmrt_get_network_number(p_bmrt_);
const char **net_names = NULL;
bmrt_get_network_names(p_bmrt_, &net_names);
net_name_ = net_names[0];
free(net_names);
net_info_ = bmrt_get_network_info(p_bmrt_, net_name_.c_str());
assert(NULL != net_info_);
return true;
}
/***************************************************************
* @name GetModelInputOutputInfos
* @brief Get the detailed input and output infos of Model
* @param None
* @return bool
* @note None
***************************************************************/
bool SophgoBackend::GetModelInputOutputInfos() {
inputs_desc_.resize(net_info_->input_num);
bm_shape_t* input_shapes = net_info_->stages->input_shapes;
for(int idx=0; idx<net_info_->input_num; idx++){
std::string temp_name = (net_info_->input_names)[idx];
std::vector<int> temp_shape{};
temp_shape.resize(input_shapes[idx].num_dims);
for(int i=0; i<input_shapes[idx].num_dims; i++){
temp_shape[i] = input_shapes[idx].dims[i];
}
bm_data_type_t* input_dtypes = net_info_->input_dtypes;
//SophgoType to FDDataType
FDDataType temp_dtype = SophgoTensorTypeToFDDataType(*input_dtypes);
TensorInfo temp_input_info = {temp_name, temp_shape, temp_dtype};
inputs_desc_[idx] = temp_input_info;
}
outputs_desc_.resize(net_info_->output_num);
bm_shape_t* output_shapes = net_info_->stages->output_shapes;
for(int idx=0; idx<net_info_->output_num; idx++){
std::string temp_name1 = (net_info_->output_names)[idx];
std::vector<int> temp_shape1{};
temp_shape1.resize(output_shapes[idx].num_dims);
for(int i=0; i<output_shapes[idx].num_dims; i++){
temp_shape1[i] = output_shapes[idx].dims[i];
}
bm_data_type_t* output_dtypes = net_info_->output_dtypes;
//SophgoType to FDDataType
FDDataType temp_dtype1 = SophgoTensorTypeToFDDataType(*output_dtypes);
TensorInfo temp_output_info = {temp_name1, temp_shape1, temp_dtype1};
outputs_desc_[idx] = temp_output_info;
}
return true;
}
TensorInfo SophgoBackend::GetInputInfo(int index) {
FDASSERT(index < NumInputs(),
"The index: %d should less than the number of inputs: %d.", index,
NumInputs())
return inputs_desc_[index];
}
std::vector<TensorInfo> SophgoBackend::GetInputInfos() { return inputs_desc_; }
TensorInfo SophgoBackend::GetOutputInfo(int index) {
FDASSERT(index < NumOutputs(),
"The index: %d should less than the number of outputs %d.", index,
NumOutputs())
return outputs_desc_[index];
}
std::vector<TensorInfo> SophgoBackend::GetOutputInfos() { return outputs_desc_; }
bool SophgoBackend::Infer(std::vector<FDTensor>& inputs,
std::vector<FDTensor>* outputs,
bool copy_to_fd) {
int input_size = inputs.size();
assert(input_size != 0);
assert(input_size == NumInputs());
bm_tensor_t input_tensors[input_size];
bm_status_t status = BM_SUCCESS;
bm_data_type_t* input_dtypes = net_info_->input_dtypes;
for(int i=0;i<input_size;i++){
status = bm_malloc_device_byte(handle_,
&input_tensors[i].device_mem,net_info_->max_input_bytes[i]);
assert(BM_SUCCESS == status);
input_tensors[i].dtype = input_dtypes[i];
input_tensors[i].st_mode = BM_STORE_1N;
input_tensors[i].shape = *(net_info_->stages[i].input_shapes);
unsigned int input_byte = bmrt_tensor_bytesize(&input_tensors[i]);
bm_memcpy_s2d_partial(handle_, input_tensors[i].device_mem, (void *)inputs[i].Data(),
bmrt_tensor_bytesize(&input_tensors[i]));
}
int output_size = NumOutputs();
bm_tensor_t output_tensors[output_size];
for(int i=0;i<output_size;i++){
status = bm_malloc_device_byte(handle_, &output_tensors[i].device_mem,
net_info_->max_output_bytes[i]);
assert(BM_SUCCESS == status);
}
bool launch_status = bmrt_launch_tensor_ex(p_bmrt_, net_name_.c_str(), input_tensors, net_info_->input_num,
output_tensors, net_info_->output_num, true, false);
assert(launch_status);
status = bm_thread_sync(handle_);
assert(status == BM_SUCCESS);
outputs->resize(outputs_desc_.size());
bm_data_type_t* output_dtypes = net_info_->output_dtypes;
for(int i=0;i<output_size;i++){
int temp_bytesize = bmrt_tensor_bytesize(&output_tensors[i]); //Byte
float *temp_out = (float *)malloc(temp_bytesize);
bm_memcpy_d2s_partial(handle_, temp_out, output_tensors[i].device_mem, temp_bytesize);
std::vector<int64_t> temp_shape;
temp_shape.resize(outputs_desc_[i].shape.size());
for (int j = 0; j < outputs_desc_[i].shape.size(); ++j) {
temp_shape[j] = outputs_desc_[i].shape[j];
}
(*outputs)[i].Resize(temp_shape, outputs_desc_[i].dtype, outputs_desc_[i].name);
memcpy((*outputs)[i].MutableData(), temp_out, (*outputs)[i].Nbytes());
free(temp_out);
}
return true;
}
/***************************************************************
* @name SophgoTensorTypeToFDDataType
* @brief Change SophgoTensorType To FDDataType
* @param bm_data_type_t
* @return None
* @note None
***************************************************************/
FDDataType SophgoBackend::SophgoTensorTypeToFDDataType(bm_data_type_t type) {
if (type == BM_FLOAT16) {
return FDDataType::FP32;
}
if (type == BM_FLOAT32) {
return FDDataType::FP32;
}
if (type == BM_INT8) {
return FDDataType::INT8;
}
if (type == BM_INT16) {
return FDDataType::INT16;
}
if (type == BM_INT32) {
return FDDataType::INT32;
}
if (type == BM_UINT8) {
return FDDataType::UINT8;
}
FDERROR << "FDDataType don't support this type" << std::endl;
return FDDataType::UNKNOWN1;
}
/***************************************************************
* @name FDDataTypeToSophgoTensorType
* @brief Change FDDataType To SophgoTensorType
* @param FDDataType
* @return None
* @note None
***************************************************************/
// Sophgo_tensor_type
bm_data_type_t SophgoBackend::FDDataTypeToSophgoTensorType(fastdeploy::FDDataType type) {
if (type == FDDataType::FP16) {
return BM_FLOAT16;
}
if (type == FDDataType::FP32) {
return BM_FLOAT32;
}
if (type == FDDataType::INT8) {
return BM_INT8;
}
if (type == FDDataType::INT16) {
return BM_INT16;
}
if (type == FDDataType::INT32) {
return BM_INT32;
}
if (type == FDDataType::UINT8) {
return BM_UINT8;
}
FDERROR << "Sophgo_tensor_type don't support this type" << std::endl;
return BM_FLOAT32;
}
}

View File

@@ -0,0 +1,75 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "fastdeploy/backends/backend.h"
#include "fastdeploy/core/fd_tensor.h"
#include "bmruntime_interface.h" // NOLINT
#include "bmlib_runtime.h" // NOLINT
#include "fastdeploy/backends/sophgo/sophgo_config.h"
#include <cstring>
#include <iostream>
#include <memory>
#include <string>
#include <vector>
namespace fastdeploy {
struct SophgoBackendOption{
};
class SophgoBackend : public BaseBackend {
public:
SophgoBackend() = default;
virtual ~SophgoBackend();
bool LoadModel(void* model);
bool GetSDKAndDeviceVersion();
bool GetModelInputOutputInfos();
void BuildOption(const SophgoBackendOption& option);
bool InitFromSophgo(const std::string& model_file,
const SophgoBackendOption& option = SophgoBackendOption());
int NumInputs() const override {
return static_cast<int>(inputs_desc_.size());
}
int NumOutputs() const override {
return static_cast<int>(outputs_desc_.size());
}
TensorInfo GetInputInfo(int index) override;
TensorInfo GetOutputInfo(int index) override;
std::vector<TensorInfo> GetInputInfos() override;
std::vector<TensorInfo> GetOutputInfos() override;
bool Infer(std::vector<FDTensor>& inputs,
std::vector<FDTensor>* outputs,
bool copy_to_fd = true) override;
private:
std::vector<TensorInfo> inputs_desc_;
std::vector<TensorInfo> outputs_desc_;
std::string net_name_;
bm_handle_t handle_;
void * p_bmrt_ = nullptr;
bool infer_init = false;
const bm_net_info_t* net_info_ = nullptr;
// SophgoTPU2BackendOption option_;
static FDDataType SophgoTensorTypeToFDDataType(bm_data_type_t type);
static bm_data_type_t FDDataTypeToSophgoTensorType(FDDataType type);
};
} // namespace fastdeploy

View File

@@ -56,6 +56,9 @@ std::string Str(const Device& d) {
case Device::RKNPU:
out = "Device::RKNPU";
break;
case Device::SOPHGOTPUD:
out = "Device::SOPHGOTPUD";
break;
case Device::IPU:
out = "Device::IPU";
break;
@@ -85,6 +88,9 @@ std::ostream& operator<<(std::ostream& out,const Device& d){
case Device::RKNPU:
out << "Device::RKNPU";
break;
case Device::SOPHGOTPUD:
out << "Device::SOPHGOTPUD";
break;
case Device::TIMVX:
out << "Device::TIMVX";
break;
@@ -205,8 +211,10 @@ std::string Str(const ModelFormat& f) {
return "ModelFormat::PADDLE";
} else if (f == ModelFormat::ONNX) {
return "ModelFormat::ONNX";
}else if (f == ModelFormat::RKNN) {
} else if (f == ModelFormat::RKNN) {
return "ModelFormat::RKNN";
} else if (f == ModelFormat::SOPHGO) {
return "ModelFormat::SOPHGO";
} else if (f == ModelFormat::TORCHSCRIPT) {
return "ModelFormat::TORCHSCRIPT";
}
@@ -220,6 +228,8 @@ std::ostream& operator<<(std::ostream& out, const ModelFormat& format) {
out << "ModelFormat::ONNX";
} else if (format == ModelFormat::RKNN) {
out << "ModelFormat::RKNN";
} else if (format == ModelFormat::SOPHGO) {
out << "ModelFormat::SOPHGO";
} else if (format == ModelFormat::TORCHSCRIPT) {
out << "ModelFormat::TORCHSCRIPT";
}

View File

@@ -22,7 +22,8 @@
namespace fastdeploy {
enum FASTDEPLOY_DECL Device { CPU, GPU, RKNPU, IPU, TIMVX, KUNLUNXIN, ASCEND};
enum FASTDEPLOY_DECL Device {CPU, GPU, RKNPU, IPU, TIMVX, KUNLUNXIN, ASCEND,
SOPHGOTPUD};
FASTDEPLOY_DECL std::string Str(const Device& d);
@@ -72,6 +73,7 @@ enum ModelFormat {
ONNX, ///< Model with ONNX format
RKNN, ///< Model with RKNN format
TORCHSCRIPT, ///< Model with TorchScript format
SOPHGO, ///< Model with SOPHGO format
};
FASTDEPLOY_DECL std::ostream& operator<<(std::ostream& out,

View File

@@ -50,6 +50,7 @@ bool FastDeployModel::InitRuntimeWithSpecifiedBackend() {
bool use_gpu = (runtime_option.device == Device::GPU);
bool use_ipu = (runtime_option.device == Device::IPU);
bool use_rknpu = (runtime_option.device == Device::RKNPU);
bool use_sophgotpu = (runtime_option.device == Device::SOPHGOTPUD);
bool use_timvx = (runtime_option.device == Device::TIMVX);
bool use_ascend = (runtime_option.device == Device::ASCEND);
bool use_kunlunxin = (runtime_option.device == Device::KUNLUNXIN);
@@ -64,6 +65,11 @@ bool FastDeployModel::InitRuntimeWithSpecifiedBackend() {
FDERROR << "The valid rknpu backends of model " << ModelName() << " are " << Str(valid_rknpu_backends) << ", " << runtime_option.backend << " is not supported." << std::endl;
return false;
}
} else if (use_sophgotpu) {
if (!IsSupported(valid_sophgonpu_backends, runtime_option.backend)) {
FDERROR << "The valid rknpu backends of model " << ModelName() << " are " << Str(valid_rknpu_backends) << ", " << runtime_option.backend << " is not supported." << std::endl;
return false;
}
} else if (use_timvx) {
if (!IsSupported(valid_timvx_backends, runtime_option.backend)) {
FDERROR << "The valid timvx backends of model " << ModelName() << " are " << Str(valid_timvx_backends) << ", " << runtime_option.backend << " is not supported." << std::endl;
@@ -118,6 +124,8 @@ bool FastDeployModel::InitRuntimeWithSpecifiedDevice() {
return CreateASCENDBackend();
} else if (runtime_option.device == Device::KUNLUNXIN) {
return CreateKunlunXinBackend();
} else if (runtime_option.device == Device::SOPHGOTPUD) {
return CreateSophgoNPUBackend();
} else if (runtime_option.device == Device::IPU) {
#ifdef WITH_IPU
return CreateIpuBackend();
@@ -218,6 +226,30 @@ bool FastDeployModel::CreateRKNPUBackend() {
return false;
}
bool FastDeployModel::CreateSophgoNPUBackend() {
if (valid_sophgonpu_backends.empty()) {
FDERROR << "There's no valid npu backends for model: " << ModelName()
<< std::endl;
return false;
}
for (size_t i = 0; i < valid_sophgonpu_backends.size(); ++i) {
if (!IsBackendAvailable(valid_sophgonpu_backends[i])) {
continue;
}
runtime_option.backend = valid_sophgonpu_backends[i];
runtime_ = std::unique_ptr<Runtime>(new Runtime());
if (!runtime_->Init(runtime_option)) {
return false;
}
runtime_initialized_ = true;
return true;
}
FDERROR << "Cannot find an available npu backend to load this model."
<< std::endl;
return false;
}
bool FastDeployModel::CreateTimVXBackend() {
if (valid_timvx_backends.size() == 0) {
FDERROR << "There's no valid timvx backends for model: " << ModelName()

View File

@@ -54,6 +54,9 @@ class FASTDEPLOY_DECL FastDeployModel {
/** Model's valid hardware backends. This member defined all the gpu backends have successfully tested for the model
*/
std::vector<Backend> valid_rknpu_backends = {};
/** Model's valid hardware backends. This member defined all the sophgo npu backends have successfully tested for the model
*/
std::vector<Backend> valid_sophgonpu_backends = {};
/// Get number of inputs for this model
virtual int NumInputsOfRuntime() { return runtime_->NumInputs(); }
@@ -148,6 +151,7 @@ class FASTDEPLOY_DECL FastDeployModel {
bool CreateGpuBackend();
bool CreateIpuBackend();
bool CreateRKNPUBackend();
bool CreateSophgoNPUBackend();
bool CreateTimVXBackend();
bool CreateKunlunXinBackend();
bool CreateASCENDBackend();

View File

@@ -24,6 +24,7 @@ void BindRuntime(pybind11::module& m) {
.def("use_gpu", &RuntimeOption::UseGpu)
.def("use_cpu", &RuntimeOption::UseCpu)
.def("use_rknpu2", &RuntimeOption::UseRKNPU2)
.def("use_sophgo", &RuntimeOption::UseSophgo)
.def("use_ascend", &RuntimeOption::UseAscend)
.def("use_kunlunxin", &RuntimeOption::UseKunlunXin)
.def("set_external_stream", &RuntimeOption::SetExternalStream)
@@ -241,19 +242,22 @@ void BindRuntime(pybind11::module& m) {
.value("POROS", Backend::POROS)
.value("PDINFER", Backend::PDINFER)
.value("RKNPU2", Backend::RKNPU2)
.value("SOPHGOTPU", Backend::SOPHGOTPU)
.value("LITE", Backend::LITE);
pybind11::enum_<ModelFormat>(m, "ModelFormat", pybind11::arithmetic(),
"ModelFormat for inference.")
.value("PADDLE", ModelFormat::PADDLE)
.value("TORCHSCRIPT", ModelFormat::TORCHSCRIPT)
.value("RKNN", ModelFormat::RKNN)
.value("SOPHGO", ModelFormat::SOPHGO)
.value("ONNX", ModelFormat::ONNX);
pybind11::enum_<Device>(m, "Device", pybind11::arithmetic(),
"Device for inference.")
.value("CPU", Device::CPU)
.value("GPU", Device::GPU)
.value("IPU", Device::IPU)
.value("RKNPU", Device::RKNPU);
.value("RKNPU", Device::RKNPU)
.value("SOPHGOTPU", Device::SOPHGOTPUD);
pybind11::enum_<FDDataType>(m, "FDDataType", pybind11::arithmetic(),
"Data type of FastDeploy.")

View File

@@ -45,6 +45,10 @@
#include "fastdeploy/backends/rknpu/rknpu2/rknpu2_backend.h"
#endif
#ifdef ENABLE_SOPHGO_BACKEND
#include "fastdeploy/backends/sophgo/sophgo_backend.h"
#endif
namespace fastdeploy {
std::vector<Backend> GetAvailableBackends() {
@@ -69,6 +73,9 @@ std::vector<Backend> GetAvailableBackends() {
#endif
#ifdef ENABLE_RKNPU2_BACKEND
backends.push_back(Backend::RKNPU2);
#endif
#ifdef ENABLE_SOPHGO_BACKEND
backends.push_back(Backend::SOPHGOTPU);
#endif
return backends;
}
@@ -94,6 +101,8 @@ std::string Str(const Backend& b) {
return "Backend::POROS";
} else if (b == Backend::RKNPU2) {
return "Backend::RKNPU2";
} else if (b == Backend::SOPHGOTPU) {
return "Backend::SOPHGOTPU";
} else if (b == Backend::OPENVINO) {
return "Backend::OPENVINO";
} else if (b == Backend::LITE) {
@@ -113,6 +122,8 @@ std::ostream& operator<<(std::ostream& out, const Backend& backend) {
out << "Backend::OPENVINO";
} else if (backend == Backend::RKNPU2) {
out << "Backend::RKNPU2";
} else if (backend == Backend::SOPHGOTPU) {
out << "Backend::SOPHGOTPU";
} else if (backend == Backend::POROS) {
out << "Backend::POROS";
} else if (backend == Backend::LITE) {
@@ -158,6 +169,15 @@ bool CheckModelFormat(const std::string& model_file,
<< model_file << std::endl;
return false;
}
} else if (model_format == ModelFormat::SOPHGO) {
if (model_file.size() < 7 ||
model_file.substr(model_file.size() -7, 7) != ".bmodel") {
FDERROR
<< "With model format of ModelFormat::SOPHGO, the model file "
"should ends with `.bmodel`, but now it's "
<< model_file << std::endl;
return false;
}
} else {
FDERROR
<< "Only support model format with frontend ModelFormat::PADDLE / "
@@ -185,6 +205,10 @@ ModelFormat GuessModelFormat(const std::string& model_file) {
model_file.substr(model_file.size() - 5, 5) == ".rknn") {
FDINFO << "Model Format: RKNN." << std::endl;
return ModelFormat::RKNN;
} else if (model_file.size() > 7 &&
model_file.substr(model_file.size() - 7, 7) == ".bmodel") {
FDINFO << "Model Format: SOPHGO." << std::endl;
return ModelFormat::SOPHGO;
}
FDERROR << "Cannot guess which model format you are using, please set "
@@ -288,6 +312,11 @@ void RuntimeOption::UseAscend(){
device = Device::ASCEND;
}
void RuntimeOption::UseSophgo() {
device = Device::SOPHGOTPUD;
UseSophgoBackend();
}
void RuntimeOption::SetExternalStream(void* external_stream) {
external_stream_ = external_stream;
}
@@ -323,6 +352,15 @@ void RuntimeOption::UseOrtBackend() {
#endif
}
// use sophgoruntime backend
void RuntimeOption::UseSophgoBackend() {
#ifdef ENABLE_SOPHGO_BACKEND
backend = Backend::SOPHGOTPU;
#else
FDASSERT(false, "The FastDeploy didn't compile with SophgoBackend.");
#endif
}
// use poros backend
void RuntimeOption::UsePorosBackend() {
#ifdef ENABLE_POROS_BACKEND
@@ -564,6 +602,8 @@ bool Runtime::Init(const RuntimeOption& _option) {
option.backend = Backend::OPENVINO;
} else if (IsBackendAvailable(Backend::RKNPU2)) {
option.backend = Backend::RKNPU2;
} else if (IsBackendAvailable(Backend::SOPHGOTPU)) {
option.backend = Backend::SOPHGOTPU;
} else {
FDERROR << "Please define backend in RuntimeOption, current it's "
"Backend::UNKNOWN."
@@ -623,7 +663,15 @@ bool Runtime::Init(const RuntimeOption& _option) {
FDINFO << "Runtime initialized with Backend::RKNPU2 in "
<< Str(option.device) << "." << std::endl;
} else {
} else if (option.backend == Backend::SOPHGOTPU) {
FDASSERT(option.device == Device::SOPHGOTPUD,
"Backend::SOPHGO only supports Device::SOPHGO");
CreateSophgoNPUBackend();
FDINFO << "Runtime initialized with Backend::SOPHGO in "
<< Str(option.device) << "." << std::endl;
}
else {
FDERROR << "Runtime only support "
"Backend::ORT/Backend::TRT/Backend::PDINFER/Backend::POROS as "
"backend now."
@@ -926,6 +974,21 @@ void Runtime::CreateRKNPU2Backend() {
#endif
}
void Runtime::CreateSophgoNPUBackend() {
#ifdef ENABLE_SOPHGO_BACKEND
auto sophgo_option = SophgoBackendOption();
FDASSERT(option.model_format == ModelFormat::SOPHGO,
"SophgoBackend only support model format of ModelFormat::SOPHGO");
backend_ = utils::make_unique<SophgoBackend>();
auto casted_backend = dynamic_cast<SophgoBackend*>(backend_.get());
FDASSERT(casted_backend->InitFromSophgo(option.model_file, sophgo_option),
"Load model from nb file failed while initializing LiteBackend.");
#else
FDASSERT(false, "SophgoBackend is not available, please compiled with "
"ENABLE_SOPHGO_BACKEND=ON.");
#endif
}
Runtime* Runtime::Clone(void* stream, int device_id) {
Runtime* runtime = new Runtime();
if (option.backend != Backend::OPENVINO &&

View File

@@ -43,6 +43,7 @@ enum Backend {
OPENVINO, ///< Intel OpenVINO, support Paddle/ONNX format, CPU only
LITE, ///< Paddle Lite, support Paddle format model, ARM CPU only
RKNPU2, ///< RKNPU2, support RKNN format model, Rockchip NPU only
SOPHGOTPU, ///< SOPHGOTPU, support SOPHGO format model, Sophgo TPU only
};
FASTDEPLOY_DECL std::ostream& operator<<(std::ostream& out,
@@ -151,6 +152,9 @@ struct FASTDEPLOY_DECL RuntimeOption {
bool adaptive_seqlen = false,
bool enable_multi_stream = false);
/// Use Sophgo to inference
void UseSophgo();
void SetExternalStream(void* external_stream);
/*
@@ -170,6 +174,9 @@ struct FASTDEPLOY_DECL RuntimeOption {
/// Set ONNX Runtime as inference backend, support CPU/GPU
void UseOrtBackend();
/// Set SOPHGO Runtime as inference backend, support CPU/GPU
void UseSophgoBackend();
/// Set TensorRT as inference backend, only support GPU
void UseTrtBackend();
@@ -576,6 +583,7 @@ struct FASTDEPLOY_DECL Runtime {
void CreateOpenVINOBackend();
void CreateLiteBackend();
void CreateRKNPU2Backend();
void CreateSophgoNPUBackend();
std::unique_ptr<BaseBackend> backend_;
std::vector<FDTensor> input_tensors_;
std::vector<FDTensor> output_tensors_;

View File

@@ -32,7 +32,10 @@ PaddleClasModel::PaddleClasModel(const std::string& model_file,
valid_ascend_backends = {Backend::LITE};
valid_kunlunxin_backends = {Backend::LITE};
valid_ipu_backends = {Backend::PDINFER};
} else {
}else if (model_format == ModelFormat::SOPHGO) {
valid_sophgonpu_backends = {Backend::SOPHGOTPU};
}
else {
valid_cpu_backends = {Backend::ORT, Backend::OPENVINO};
valid_gpu_backends = {Backend::ORT, Backend::TRT};
valid_rknpu_backends = {Backend::RKNPU2};

View File

@@ -24,6 +24,8 @@ YOLOv5::YOLOv5(const std::string& model_file, const std::string& params_file,
if (model_format == ModelFormat::ONNX) {
valid_cpu_backends = {Backend::OPENVINO, Backend::ORT};
valid_gpu_backends = {Backend::ORT, Backend::TRT};
} else if (model_format == ModelFormat::SOPHGO) {
valid_sophgonpu_backends = {Backend::SOPHGOTPU};
} else {
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};

View File

@@ -41,6 +41,7 @@ class FASTDEPLOY_DECL PicoDet : public PPDetBase {
valid_rknpu_backends = {Backend::RKNPU2};
valid_kunlunxin_backends = {Backend::LITE};
valid_ascend_backends = {Backend::LITE};
valid_sophgonpu_backends = {Backend::SOPHGOTPU};
initialized = Initialize();
}

View File

@@ -25,12 +25,18 @@ PaddleSegModel::PaddleSegModel(const std::string& model_file,
const RuntimeOption& custom_option,
const ModelFormat& model_format) : preprocessor_(config_file),
postprocessor_(config_file) {
if(model_format == ModelFormat::SOPHGO) {
valid_sophgonpu_backends = {Backend::SOPHGOTPU};
}
else{
valid_cpu_backends = {Backend::OPENVINO, Backend::PDINFER, Backend::ORT, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
}
valid_rknpu_backends = {Backend::RKNPU2};
valid_timvx_backends = {Backend::LITE};
valid_kunlunxin_backends = {Backend::LITE};
valid_ascend_backends = {Backend::LITE};
runtime_option = custom_option;
runtime_option.model_format = model_format;
runtime_option.model_file = model_file;

View File

@@ -301,6 +301,11 @@ class RuntimeOption:
rknpu2_core=rknpu2.CoreMask.RKNN_NPU_CORE_0):
return self._option.use_rknpu2(rknpu2_name, rknpu2_core)
def use_sophgo(self):
"""Inference with SOPHGO TPU
"""
return self._option.use_sophgo()
def use_ascend(self):
"""Inference with Huawei Ascend NPU
"""

View File

@@ -56,6 +56,8 @@ if os.getenv("BUILD_ON_CPU", "OFF") == "ON":
setup_configs = dict()
setup_configs["ENABLE_RKNPU2_BACKEND"] = os.getenv("ENABLE_RKNPU2_BACKEND",
"OFF")
setup_configs["ENABLE_SOPHGO_BACKEND"] = os.getenv("ENABLE_SOPHGO_BACKEND",
"OFF")
setup_configs["WITH_ASCEND"] = os.getenv("WITH_ASCEND", "OFF")
setup_configs["ENABLE_ORT_BACKEND"] = os.getenv("ENABLE_ORT_BACKEND", "OFF")
setup_configs["ENABLE_OPENVINO_BACKEND"] = os.getenv("ENABLE_OPENVINO_BACKEND",