Merge branch 'develop' of https://github.com/PaddlePaddle/FastDeploy into huawei

This commit is contained in:
yunyaoXYY
2022-12-28 12:57:13 +00:00
84 changed files with 2505 additions and 671 deletions

View File

@@ -0,0 +1,14 @@
PROJECT(infer_demo C CXX)
CMAKE_MINIMUM_REQUIRED (VERSION 3.10)
# Specifies the path to the fastdeploy library after you have downloaded it
option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.")
include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
# Include the FastDeploy dependency header file
include_directories(${FASTDEPLOY_INCS})
add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cc)
# Add the FastDeploy library dependency
target_link_libraries(infer_demo ${FASTDEPLOY_LIBS})

View File

@@ -0,0 +1,87 @@
# FastestDet C++部署示例
本目录下提供`infer.cc`快速完成FastestDet在CPU/GPU以及GPU上通过TensorRT加速部署的示例。
在部署前,需确认以下两个步骤
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
- 2. 根据开发环境下载预编译部署库和samples代码参考[FastDeploy预编译库](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
以Linux上CPU推理为例在本目录执行如下命令即可完成编译测试
```bash
mkdir build
cd build
wget https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-1.0.3.tgz
tar xvf fastdeploy-linux-x64-1.0.3.tgz
cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-1.0.3
make -j
#下载官方转换好的FastestDet模型文件和测试图片
wget https://bj.bcebos.com/paddlehub/fastdeploy/FastestDet.onnx
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
# CPU推理
./infer_demo FastestDet.onnx 000000014439.jpg 0
# GPU推理
./infer_demo FastestDet.onnx 000000014439.jpg 1
# GPU上TensorRT推理
./infer_demo FastestDet.onnx 000000014439.jpg 2
```
运行完成可视化结果如下图所示
<img width="640" src="https://user-images.githubusercontent.com/44280887/206176291-61eb118b-391b-4431-b79e-a393b9452138.jpg">
以上命令只适用于Linux或MacOS, Windows下SDK的使用方式请参考:
- [如何在Windows中使用FastDeploy C++ SDK](../../../../../docs/cn/faq/use_sdk_on_windows.md)
## FastestDet C++接口
### FastestDet类
```c++
fastdeploy::vision::detection::FastestDet(
const string& model_file,
const string& params_file = "",
const RuntimeOption& runtime_option = RuntimeOption(),
const ModelFormat& model_format = ModelFormat::ONNX)
```
FastestDet模型加载和初始化其中model_file为导出的ONNX模型格式。
**参数**
> * **model_file**(str): 模型文件路径
> * **params_file**(str): 参数文件路径当模型格式为ONNX时此参数传入空字符串即可
> * **runtime_option**(RuntimeOption): 后端推理配置默认为None即采用默认配置
> * **model_format**(ModelFormat): 模型格式默认为ONNX格式
#### Predict函数
> ```c++
> FastestDet::Predict(cv::Mat* im, DetectionResult* result,
> float conf_threshold = 0.65,
> float nms_iou_threshold = 0.45)
> ```
>
> 模型预测接口,输入图像直接输出检测结果。
>
> **参数**
>
> > * **im**: 输入图像注意需为HWCBGR格式
> > * **result**: 检测结果,包括检测框,各个框的置信度, DetectionResult说明参考[视觉模型预测结果](../../../../../docs/api/vision_results/)
> > * **conf_threshold**: 检测框置信度过滤阈值
> > * **nms_iou_threshold**: NMS处理过程中iou阈值
### 类成员变量
#### 预处理参数
用户可按照自己的实际需求,修改下列预处理参数,从而影响最终的推理和部署效果
> > * **size**(vector&lt;int&gt;): 通过此参数修改预处理过程中resize的大小包含两个整型元素表示[width, height], 默认值为[352, 352]
- [模型介绍](../../)
- [Python部署](../python)
- [视觉模型预测结果](../../../../../docs/api/vision_results/)
- [如何切换模型推理后端引擎](../../../../../docs/cn/faq/how_to_change_backend.md)

View File

@@ -0,0 +1,105 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "fastdeploy/vision.h"
void CpuInfer(const std::string& model_file, const std::string& image_file) {
auto model = fastdeploy::vision::detection::FastestDet(model_file);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return;
}
auto im = cv::imread(image_file);
fastdeploy::vision::DetectionResult res;
if (!model.Predict(im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
std::cout << res.Str() << std::endl;
auto vis_im = fastdeploy::vision::VisDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
void GpuInfer(const std::string& model_file, const std::string& image_file) {
auto option = fastdeploy::RuntimeOption();
option.UseGpu();
auto model = fastdeploy::vision::detection::FastestDet(model_file, "", option);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return;
}
auto im = cv::imread(image_file);
fastdeploy::vision::DetectionResult res;
if (!model.Predict(im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
std::cout << res.Str() << std::endl;
auto vis_im = fastdeploy::vision::VisDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
void TrtInfer(const std::string& model_file, const std::string& image_file) {
auto option = fastdeploy::RuntimeOption();
option.UseGpu();
option.UseTrtBackend();
option.SetTrtInputShape("images", {1, 3, 352, 352});
auto model = fastdeploy::vision::detection::FastestDet(model_file, "", option);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return;
}
auto im = cv::imread(image_file);
fastdeploy::vision::DetectionResult res;
if (!model.Predict(im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
std::cout << res.Str() << std::endl;
auto vis_im = fastdeploy::vision::VisDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
int main(int argc, char* argv[]) {
if (argc < 4) {
std::cout << "Usage: infer_demo path/to/model path/to/image run_option, "
"e.g ./infer_model ./FastestDet.onnx ./test.jpeg 0"
<< std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
"with gpu; 2: run with gpu and use tensorrt backend."
<< std::endl;
return -1;
}
if (std::atoi(argv[3]) == 0) {
CpuInfer(argv[1], argv[2]);
} else if (std::atoi(argv[3]) == 1) {
GpuInfer(argv[1], argv[2]);
} else if (std::atoi(argv[3]) == 2) {
TrtInfer(argv[1], argv[2]);
}
return 0;
}

View File

@@ -0,0 +1,74 @@
# FastestDet Python部署示例
在部署前,需确认以下两个步骤
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
- 2. FastDeploy Python whl包安装参考[FastDeploy Python安装](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
本目录下提供`infer.py`快速完成FastestDet在CPU/GPU以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成
```bash
#下载部署示例代码
git clone https://github.com/PaddlePaddle/FastDeploy.git
cd examples/vision/detection/fastestdet/python/
#下载fastestdet模型文件和测试图片
wget https://bj.bcebos.com/paddlehub/fastdeploy/FastestDet.onnx
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
# CPU推理
python infer.py --model FastestDet.onnx --image 000000014439.jpg --device cpu
# GPU推理
python infer.py --model FastestDet.onnx --image 000000014439.jpg --device gpu
# GPU上使用TensorRT推理
python infer.py --model FastestDet.onnx --image 000000014439.jpg --device gpu --use_trt True
```
运行完成可视化结果如下图所示
<img width="640" src="https://user-images.githubusercontent.com/44280887/206176291-61eb118b-391b-4431-b79e-a393b9452138.jpg">
## FastestDet Python接口
```python
fastdeploy.vision.detection.FastestDet(model_file, params_file=None, runtime_option=None, model_format=ModelFormat.ONNX)
```
FastestDet模型加载和初始化其中model_file为导出的ONNX模型格式
**参数**
> * **model_file**(str): 模型文件路径
> * **params_file**(str): 参数文件路径当模型格式为ONNX格式时此参数无需设定
> * **runtime_option**(RuntimeOption): 后端推理配置默认为None即采用默认配置
> * **model_format**(ModelFormat): 模型格式默认为ONNX
### predict函数
> ```python
> FastestDet.predict(image_data)
> ```
>
> 模型预测接口,输入图像直接输出检测结果。
>
> **参数**
>
> > * **image_data**(np.ndarray): 输入数据注意需为HWCBGR格式
> **返回**
>
> > 返回`fastdeploy.vision.DetectionResult`结构体,结构体说明参考文档[视觉模型预测结果](../../../../../docs/api/vision_results/)
### 类成员属性
#### 预处理参数
用户可按照自己的实际需求,修改下列预处理参数,从而影响最终的推理和部署效果
> > * **size**(list[int]): 通过此参数修改预处理过程中resize的大小包含两个整型元素表示[width, height], 默认值为[352, 352]
## 其它文档
- [FastestDet 模型介绍](..)
- [FastestDet C++部署](../cpp)
- [模型预测结果说明](../../../../../docs/api/vision_results/)
- [如何切换模型推理后端引擎](../../../../../docs/cn/faq/how_to_change_backend.md)

View File

@@ -0,0 +1,51 @@
import fastdeploy as fd
import cv2
def parse_arguments():
import argparse
import ast
parser = argparse.ArgumentParser()
parser.add_argument(
"--model", required=True, help="Path of FastestDet onnx model.")
parser.add_argument(
"--image", required=True, help="Path of test image file.")
parser.add_argument(
"--device",
type=str,
default='cpu',
help="Type of inference device, support 'cpu' or 'gpu'.")
parser.add_argument(
"--use_trt",
type=ast.literal_eval,
default=False,
help="Wether to use tensorrt.")
return parser.parse_args()
def build_option(args):
option = fd.RuntimeOption()
if args.device.lower() == "gpu":
option.use_gpu()
if args.use_trt:
option.use_trt_backend()
option.set_trt_input_shape("images", [1, 3, 352, 352])
return option
args = parse_arguments()
# Configure runtime and load model
runtime_option = build_option(args)
model = fd.vision.detection.FastestDet(args.model, runtime_option=runtime_option)
# Predict picture detection results
im = cv2.imread(args.image)
result = model.predict(im)
# Visualization of prediction results
vis_im = fd.vision.vis_detection(im, result)
cv2.imwrite("visualized_result.jpg", vis_im)
print("Visualized result save in ./visualized_result.jpg")

View File

@@ -17,16 +17,9 @@ cd FastDeploy/examples/vision/detection/paddledetection/python/serving
wget https://bj.bcebos.com/paddlehub/fastdeploy/ppyoloe_crn_l_300e_coco.tgz
tar xvf ppyoloe_crn_l_300e_coco.tgz
# 安装uvicorn
pip install uvicorn
# 启动服务可选择是否使用GPU和TensorRT可根据uvicorn --help配置IP、端口号等
# CPU
MODEL_DIR=ppyoloe_crn_l_300e_coco DEVICE=cpu uvicorn server:app
# GPU
MODEL_DIR=ppyoloe_crn_l_300e_coco DEVICE=gpu uvicorn server:app
# GPU上使用TensorRT 注意TensorRT推理第一次运行有序列化模型的操作有一定耗时需要耐心等待
MODEL_DIR=ppyoloe_crn_l_300e_coco DEVICE=gpu USE_TRT=true uvicorn server:app
# 启动服务可修改server.py中的配置项来指定硬件、后端等
# 可通过--host、--port指定IP和端口号
fastdeploy simple_serving --app server:app
```
客户端:

View File

@@ -17,17 +17,9 @@ cd FastDeploy/examples/vision/detection/paddledetection/python/serving
wget https://bj.bcebos.com/paddlehub/fastdeploy/ppyoloe_crn_l_300e_coco.tgz
tar xvf ppyoloe_crn_l_300e_coco.tgz
# Install uvicorn
pip install uvicorn
# Launch server, it's configurable to use GPU and TensorRT,
# and run 'uvicorn --help' to check how to specify IP and port, etc.
# CPU
MODEL_DIR=ppyoloe_crn_l_300e_coco DEVICE=cpu uvicorn server:app
# GPU
MODEL_DIR=ppyoloe_crn_l_300e_coco DEVICE=gpu uvicorn server:app
# GPU and TensorRT
MODEL_DIR=ppyoloe_crn_l_300e_coco DEVICE=gpu USE_TRT=true uvicorn server:app
# Launch server, change the configurations in server.py to select hardware, backend, etc.
# and use --host, --port to specify IP and port
fastdeploy simple_serving --app server:app
```
Client:

View File

@@ -1,20 +1,15 @@
import requests
import json
import cv2
import base64
import fastdeploy as fd
from fastdeploy.serving.utils import cv2_to_base64
if __name__ == '__main__':
url = "http://127.0.0.1:8000/fd/ppyoloe"
headers = {"Content-Type": "application/json"}
im = cv2.imread("000000014439.jpg")
data = {
"data": {
"image": fd.serving.utils.cv2_to_base64(im)
},
"parameters": {}
}
data = {"data": {"image": cv2_to_base64(im)}, "parameters": {}}
resp = requests.post(url=url, headers=headers, data=json.dumps(data))
if resp.status_code == 200:

View File

@@ -1,18 +1,16 @@
import fastdeploy as fd
from fastdeploy.serving.server import SimpleServer
import os
import logging
logging.getLogger().setLevel(logging.INFO)
# Get arguments from envrionment variables
model_dir = os.environ.get('MODEL_DIR')
device = os.environ.get('DEVICE', 'cpu')
use_trt = os.environ.get('USE_TRT', False)
# Prepare model, download from hub or use local dir
if model_dir is None:
model_dir = fd.download_model(name='ppyoloe_crn_l_300e_coco')
# Configurations
model_dir = 'ppyoloe_crn_l_300e_coco'
device = 'cpu'
use_trt = False
# Prepare model
model_file = os.path.join(model_dir, "model.pdmodel")
params_file = os.path.join(model_dir, "model.pdiparams")
config_file = os.path.join(model_dir, "infer_cfg.yml")
@@ -33,7 +31,7 @@ model_instance = fd.vision.detection.PPYOLOE(
runtime_option=option)
# Create server, setup REST API
app = fd.serving.SimpleServer()
app = SimpleServer()
app.register(
task_name="fd/ppyoloe",
model_handler=fd.serving.handler.VisionModelHandler,

View File

@@ -20,19 +20,11 @@ install(TARGETS infer_demo DESTINATION ./)
install(DIRECTORY models DESTINATION ./)
install(DIRECTORY images DESTINATION ./)
# install(DIRECTORY run_with_adb.sh DESTINATION ./)
file(GLOB FASTDEPLOY_LIBS ${FASTDEPLOY_INSTALL_DIR}/lib/*)
install(PROGRAMS ${FASTDEPLOY_LIBS} DESTINATION lib)
file(GLOB OPENCV_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/opencv/lib/lib*)
install(PROGRAMS ${OPENCV_LIBS} DESTINATION lib)
file(GLOB PADDLELITE_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/paddlelite/lib/lib*)
install(PROGRAMS ${PADDLELITE_LIBS} DESTINATION lib)
file(GLOB TIMVX_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/paddlelite/lib/verisilicon_timvx/*)
install(PROGRAMS ${TIMVX_LIBS} DESTINATION lib)
file(GLOB_RECURSE FASTDEPLOY_LIBS ${FASTDEPLOY_INSTALL_DIR}/lib/lib*.so*)
file(GLOB_RECURSE ALL_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/lib*.so*)
list(APPEND ALL_LIBS ${FASTDEPLOY_LIBS})
install(PROGRAMS ${ALL_LIBS} DESTINATION lib)
file(GLOB ADB_TOOLS run_with_adb.sh)
install(PROGRAMS ${ADB_TOOLS} DESTINATION ./)

View File

@@ -24,7 +24,7 @@ void InitAndInfer(const std::string& model_dir, const std::string& image_file) {
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "infer_cfg.yml";
auto subgraph_file = model_dir + sep + "subgraph.txt";
fastdeploy::vision::EnableFlyCV();
fastdeploy::RuntimeOption option;
option.UseTimVX();
option.SetLiteSubgraphPartitionPath(subgraph_file);

View File

@@ -37,13 +37,13 @@ cp models/runtime/ppyoloe_runtime_config.pbtxt models/runtime/config.pbtxt
# 拉取fastdeploy镜像(x.y.z为镜像版本号需替换成fastdeploy版本数字)
# GPU镜像
docker pull paddlepaddle/fastdeploy:x.y.z-gpu-cuda11.4-trt8.4-21.10
docker pull registry.baidubce.com/paddlepaddle/fastdeploy:x.y.z-gpu-cuda11.4-trt8.4-21.10
# CPU镜像
docker pull paddlepaddle/fastdeploy:z.y.z-cpu-only-21.10
# 运行容器.容器名字为 fd_serving, 并挂载当前目录为容器的 /serving 目录
nvidia-docker run -it --net=host --name fd_serving --shm-size="1g" -v `pwd`/:/serving paddlepaddle/fastdeploy:x.y.z-gpu-cuda11.4-trt8.4-21.10 bash
nvidia-docker run -it --net=host --name fd_serving --shm-size="1g" -v `pwd`/:/serving registry.baidubce.com/paddlepaddle/fastdeploy:x.y.z-gpu-cuda11.4-trt8.4-21.10 bash
# 启动服务(不设置CUDA_VISIBLE_DEVICES环境变量会拥有所有GPU卡的调度权限)
CUDA_VISIBLE_DEVICES=0 fastdeployserver --model-repository=/serving/models

View File

@@ -0,0 +1 @@
README_CN.md

View File

@@ -0,0 +1,36 @@
简体中文 | [English](README_EN.md)
# YOLOv5 Python轻量服务化部署示例
在部署前,需确认以下两个步骤
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
- 2. FastDeploy Python whl包安装参考[FastDeploy Python安装](../../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
服务端:
```bash
# 下载部署示例代码
git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy/examples/vision/detection/yolov5/python/serving
# 下载模型文件
wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov5s_infer.tar
tar xvf yolov5s_infer.tar
# 启动服务可修改server.py中的配置项来指定硬件、后端等
# 可通过--host、--port指定IP和端口号
fastdeploy simple_serving --app server:app
```
客户端:
```bash
# 下载部署示例代码
git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy/examples/vision/detection/yolov5/python/serving
# 下载测试图片
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
# 请求服务获取推理结果如有必要请修改脚本中的IP和端口号
python client.py
```

View File

@@ -0,0 +1,36 @@
English | [简体中文](README_CN.md)
# YOLOv5 Python Simple Serving Demo
## Environment
- 1. Prepare environment and install FastDeploy Python whl, refer to [download_prebuilt_libraries](../../../../../../docs/en/build_and_install/download_prebuilt_libraries.md)
Server:
```bash
# Download demo code
git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy/examples/vision/detection/yolov5/python/serving
# Download model
wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov5s_infer.tar
tar xvf yolov5s_infer.tar
# Launch server, change the configurations in server.py to select hardware, backend, etc.
# and use --host, --port to specify IP and port
fastdeploy simple_serving --app server:app
```
Client:
```bash
# Download demo code
git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy/examples/vision/detection/yolov5/python/serving
# Download test image
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
# Send request and get inference result (Please adapt the IP and port if necessary)
python client.py
```

View File

@@ -0,0 +1,23 @@
import requests
import json
import cv2
import fastdeploy as fd
from fastdeploy.serving.utils import cv2_to_base64
if __name__ == '__main__':
url = "http://127.0.0.1:8000/fd/yolov5s"
headers = {"Content-Type": "application/json"}
im = cv2.imread("000000014439.jpg")
data = {"data": {"image": cv2_to_base64(im)}, "parameters": {}}
resp = requests.post(url=url, headers=headers, data=json.dumps(data))
if resp.status_code == 200:
r_json = json.loads(resp.json()["result"])
det_result = fd.vision.utils.json_to_detection(r_json)
vis_im = fd.vision.vis_detection(im, det_result, score_threshold=0.5)
cv2.imwrite("visualized_result.jpg", vis_im)
print("Visualized result save in ./visualized_result.jpg")
else:
print("Error code:", resp.status_code)
print(resp.text)

View File

@@ -0,0 +1,38 @@
import fastdeploy as fd
from fastdeploy.serving.server import SimpleServer
import os
import logging
logging.getLogger().setLevel(logging.INFO)
# Configurations
model_dir = 'yolov5s_infer'
device = 'cpu'
use_trt = False
# Prepare model
model_file = os.path.join(model_dir, "model.pdmodel")
params_file = os.path.join(model_dir, "model.pdiparams")
# Setup runtime option to select hardware, backend, etc.
option = fd.RuntimeOption()
if device.lower() == 'gpu':
option.use_gpu()
if use_trt:
option.use_trt_backend()
option.set_trt_input_shape("images", [1, 3, 640, 640])
option.set_trt_cache_file('yolov5s.trt')
# Create model instance
model_instance = fd.vision.detection.YOLOv5(
model_file,
params_file,
runtime_option=option,
model_format=fd.ModelFormat.PADDLE)
# Create server, setup REST API
app = SimpleServer()
app.register(
task_name="fd/yolov5s",
model_handler=fd.serving.handler.VisionModelHandler,
predictor=model_instance)

View File

@@ -21,17 +21,10 @@ install(TARGETS infer_demo DESTINATION ./)
install(DIRECTORY models DESTINATION ./)
install(DIRECTORY images DESTINATION ./)
file(GLOB FASTDEPLOY_LIBS ${FASTDEPLOY_INSTALL_DIR}/lib/*)
install(PROGRAMS ${FASTDEPLOY_LIBS} DESTINATION lib)
file(GLOB OPENCV_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/opencv/lib/lib*)
install(PROGRAMS ${OPENCV_LIBS} DESTINATION lib)
file(GLOB PADDLELITE_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/paddlelite/lib/lib*)
install(PROGRAMS ${PADDLELITE_LIBS} DESTINATION lib)
file(GLOB TIMVX_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/paddlelite/lib/verisilicon_timvx/*)
install(PROGRAMS ${TIMVX_LIBS} DESTINATION lib)
file(GLOB_RECURSE FASTDEPLOY_LIBS ${FASTDEPLOY_INSTALL_DIR}/lib/lib*.so*)
file(GLOB_RECURSE ALL_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/lib*.so*)
list(APPEND ALL_LIBS ${FASTDEPLOY_LIBS})
install(PROGRAMS ${ALL_LIBS} DESTINATION lib)
file(GLOB ADB_TOOLS run_with_adb.sh)
install(PROGRAMS ${ADB_TOOLS} DESTINATION ./)

View File

@@ -23,7 +23,7 @@ void InitAndInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto subgraph_file = model_dir + sep + "subgraph.txt";
fastdeploy::vision::EnableFlyCV();
fastdeploy::RuntimeOption option;
option.UseTimVX();
option.SetLiteSubgraphPartitionPath(subgraph_file);

View File

@@ -20,12 +20,12 @@ mv yolov5s.onnx models/runtime/1/model.onnx
# 拉取fastdeploy镜像(x.y.z为镜像版本号需参照serving文档替换为数字)
# GPU镜像
docker pull paddlepaddle/fastdeploy:x.y.z-gpu-cuda11.4-trt8.4-21.10
docker pull registry.baidubce.com/paddlepaddle/fastdeploy:x.y.z-gpu-cuda11.4-trt8.4-21.10
# CPU镜像
docker pull paddlepaddle/fastdeploy:x.y.z-cpu-only-21.10
docker pull registry.baidubce.com/paddlepaddle/fastdeploy:x.y.z-cpu-only-21.10
# 运行容器.容器名字为 fd_serving, 并挂载当前目录为容器的 /yolov5_serving 目录
nvidia-docker run -it --net=host --name fd_serving -v `pwd`/:/yolov5_serving paddlepaddle/fastdeploy:x.y.z-gpu-cuda11.4-trt8.4-21.10 bash
nvidia-docker run -it --net=host --name fd_serving -v `pwd`/:/yolov5_serving registry.baidubce.com/paddlepaddle/fastdeploy:x.y.z-gpu-cuda11.4-trt8.4-21.10 bash
# 启动服务(不设置CUDA_VISIBLE_DEVICES环境变量会拥有所有GPU卡的调度权限)
CUDA_VISIBLE_DEVICES=0 fastdeployserver --model-repository=/yolov5_serving/models --backend-config=python,shm-default-byte-size=10485760