mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 08:37:06 +08:00
[docs][win] add windows c++ sdk demo to examples (#136)
* [docs] format docs with markdown with language tags * [docs][win] add windows c++ sdk demo * [docs][win] add windows c++ sdk demo to examples * [docs][api] update runtime_option docs
This commit is contained in:
18
README.md
18
README.md
@@ -19,7 +19,7 @@
|
||||
## 近期更新
|
||||
|
||||
- 🔥 **2022.8.18:发布FastDeploy [release/v0.2.0](https://github.com/PaddlePaddle/FastDeploy/releases/tag/release%2F0.2.0)** <br>
|
||||
- **服务端全新升级:一套SDK,覆盖全量模型**
|
||||
- **服务端全新升级:一套SDK,覆盖全量模型**
|
||||
- 发布基于x86 CPU、NVIDIA GPU的易用、高性能推理引擎SDK,推理速度大幅提升
|
||||
- 支持ONNXRuntime、Paddle Inference、TensorRT推理引擎
|
||||
- 支持YOLOv7、YOLOv6、YOLOv5、PP-YOLOE等目标检测最优模型及[Demo示例](examples/vision/detection/)
|
||||
@@ -51,7 +51,7 @@
|
||||
|
||||
<div id="fastdeploy-quick-start"></div>
|
||||
|
||||
### 1.1 快速安装 FastDeploy Python/C++ 库
|
||||
### 1.1 快速安装 FastDeploy Python/C++ 库
|
||||
|
||||
#### 环境依赖
|
||||
|
||||
@@ -63,11 +63,11 @@
|
||||
- python 3.6\~3.9(Windows 10 3.8\~3.9)
|
||||
|
||||
#### 安装 CPU Python 版本
|
||||
```
|
||||
```bash
|
||||
pip install numpy opencv-python fastdeploy-python -f https://www.paddlepaddle.org.cn/whl/fastdeploy.html
|
||||
```
|
||||
#### 安装 GPU Python 版本
|
||||
```
|
||||
```bash
|
||||
pip install numpy opencv-python fastdeploy-gpu-python -f https://www.paddlepaddle.org.cn/whl/fastdeploy.html
|
||||
```
|
||||
#### 安装 C++ 版本
|
||||
@@ -93,8 +93,8 @@ wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/0000000
|
||||
import cv2
|
||||
import fastdeploy.vision as vision
|
||||
|
||||
model = vision.detection.PPYOLOE("ppyoloe_crn_l_300e_coco/model.pdmodel",
|
||||
"ppyoloe_crn_l_300e_coco/model.pdiparams",
|
||||
model = vision.detection.PPYOLOE("ppyoloe_crn_l_300e_coco/model.pdmodel",
|
||||
"ppyoloe_crn_l_300e_coco/model.pdiparams",
|
||||
"ppyoloe_crn_l_300e_coco/nfer_cfg.yml")
|
||||
im = cv2.imread("000000014439.jpg")
|
||||
result = model.predict(im.copy())
|
||||
@@ -114,8 +114,8 @@ cv2.imwrite("vis_image.jpg", vis_im)
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
namespace vision = fastdeploy::vision;
|
||||
auto model = vision::detection::PPYOLOE("ppyoloe_crn_l_300e_coco/model.pdmodel",
|
||||
"ppyoloe_crn_l_300e_coco/model.pdiparams",
|
||||
auto model = vision::detection::PPYOLOE("ppyoloe_crn_l_300e_coco/model.pdmodel",
|
||||
"ppyoloe_crn_l_300e_coco/model.pdiparams",
|
||||
"ppyoloe_crn_l_300e_coco/infer_cfg.yml");
|
||||
auto im = cv::imread("000000014439.jpg");
|
||||
|
||||
@@ -128,7 +128,7 @@ int main(int argc, char* argv[]) {
|
||||
```
|
||||
|
||||
更多部署案例请参考[视觉模型部署示例](examples/vision) .
|
||||
|
||||
|
||||
## 2. 服务端模型列表 🔥🔥🔥
|
||||
|
||||
<div id="fastdeploy-server-models"></div>
|
||||
|
@@ -15,13 +15,13 @@ FastDeploy产品中的Runtime包含多个推理后端,其各关系如下所示
|
||||
| GPU | 支持 | 支持 | 支持 | 支持 |
|
||||
|
||||
在各模型的,均通过`RuntimeOption`来配置推理的后端,以及推理时的参数,例如在python中,加载模型后可通过如下代码打印推理配置
|
||||
```
|
||||
model = fastdeploy.vision.ultralytics.YOLOv5("yolov5s.onnx")
|
||||
```python
|
||||
model = fastdeploy.vision.detection.YOLOv5("yolov5s.onnx")
|
||||
print(model.runtime_option)
|
||||
```
|
||||
可看下如下输出
|
||||
|
||||
```
|
||||
```python
|
||||
RuntimeOption(
|
||||
backend : Backend.ORT # 推理后端ONNXRuntime
|
||||
cpu_thread_num : 8 # CPU线程数(仅当使用CPU推理时有效)
|
||||
@@ -70,7 +70,7 @@ RuntimeOption(
|
||||
> * **trt_max_shape**(dict[str : list[int]]): 当模型为动态shape,且实际推理时输入shape也会变化,通过此参数配置输入的最大shape
|
||||
> * **trt_max_batch_size**(int): TensorRT推理时的最大batch数
|
||||
|
||||
```
|
||||
```python
|
||||
import fastdeploy as fd
|
||||
|
||||
option = fd.RuntimeOption()
|
||||
@@ -81,10 +81,11 @@ option.trt_min_shape = {"x": [1, 3, 224, 224]}
|
||||
option.trt_opt_shape = {"x": [4, 3, 224, 224]}
|
||||
option.trt_max_shape = {"x": [8, 3, 224, 224]}
|
||||
|
||||
model = fd.vision.ppcls.Model("resnet50/inference.pdmodel",
|
||||
"resnet50/inference.pdiparams",
|
||||
"resnet50/inference_cls.yaml",
|
||||
runtime_option=option)
|
||||
model = fd.vision.classification.PaddleClasModel(
|
||||
"resnet50/inference.pdmodel",
|
||||
"resnet50/inference.pdiparams",
|
||||
"resnet50/inference_cls.yaml",
|
||||
runtime_option=option)
|
||||
```
|
||||
|
||||
## C++ 使用
|
||||
@@ -112,7 +113,7 @@ model = fd.vision.ppcls.Model("resnet50/inference.pdmodel",
|
||||
> * **trt_max_shape**(map<string, vector<int>>): 当模型为动态shape,且实际推理时输入shape也会变化,通过此参数配置输入的最大shape
|
||||
> * **trt_max_batch_size**(int): TensorRT推理时的最大batch数
|
||||
|
||||
```
|
||||
```c++
|
||||
#include "fastdeploy/vision.h"
|
||||
|
||||
int main() {
|
||||
@@ -121,11 +122,11 @@ int main() {
|
||||
option.trt_opt_shape["x"] = {4, 3, 224, 224};
|
||||
option.trt_max_shape["x"] = {8, 3, 224, 224};
|
||||
|
||||
auto model = fastdeploy::vision::ppcls.Model(
|
||||
auto model = fastdeploy::vision::classification::PaddleClasModel(
|
||||
"resnet50/inference.pdmodel",
|
||||
"resnet50/inference.pdiparams",
|
||||
"resnet50/inference_cls.yaml",
|
||||
option);
|
||||
option);
|
||||
return 0;
|
||||
}
|
||||
```
|
||||
|
@@ -6,7 +6,7 @@ ClassifyResult代码定义在`csrcs/fastdeploy/vision/common/result.h`中,用
|
||||
|
||||
`fastdeploy::vision::ClassifyResult`
|
||||
|
||||
```
|
||||
```c++
|
||||
struct ClassifyResult {
|
||||
std::vector<int32_t> label_ids;
|
||||
std::vector<float> scores;
|
||||
|
@@ -6,7 +6,7 @@ DetectionResult代码定义在`csrcs/fastdeploy/vision/common/result.h`中,用
|
||||
|
||||
`fastdeploy::vision::DetectionResult`
|
||||
|
||||
```
|
||||
```c++
|
||||
struct DetectionResult {
|
||||
std::vector<std::array<float, 4>> boxes;
|
||||
std::vector<float> scores;
|
||||
|
@@ -6,7 +6,7 @@ FaceDetectionResult 代码定义在`csrcs/fastdeploy/vision/common/result.h`中
|
||||
|
||||
`fastdeploy::vision::FaceDetectionResult`
|
||||
|
||||
```
|
||||
```c++
|
||||
struct FaceDetectionResult {
|
||||
std::vector<std::array<float, 4>> boxes;
|
||||
std::vector<std::array<float, 2>> landmarks;
|
||||
@@ -32,4 +32,3 @@ struct FaceDetectionResult {
|
||||
- **scores**(list of float): 成员变量,表示单张图片检测出来的所有目标置信度
|
||||
- **landmarks**(list of list(float)): 成员变量,表示单张图片检测出来的所有人脸的关键点
|
||||
- **landmarks_per_face**(int): 成员变量,表示每个人脸框中的关键点的数量。
|
||||
|
||||
|
@@ -5,7 +5,7 @@ FaceRecognitionResult 代码定义在`csrcs/fastdeploy/vision/common/result.h`
|
||||
|
||||
`fastdeploy::vision::FaceRecognitionResult`
|
||||
|
||||
```
|
||||
```c++
|
||||
struct FaceRecognitionResult {
|
||||
std::vector<float> embedding;
|
||||
void Clear();
|
||||
|
@@ -6,7 +6,7 @@ MattingResult 代码定义在`csrcs/fastdeploy/vision/common/result.h`中,用
|
||||
|
||||
`fastdeploy::vision::MattingResult`
|
||||
|
||||
```
|
||||
```c++
|
||||
struct MattingResult {
|
||||
std::vector<float> alpha;
|
||||
std::vector<float> foreground;
|
||||
|
@@ -6,7 +6,7 @@ SegmentationResult代码定义在`csrcs/fastdeploy/vision/common/result.h`中,
|
||||
|
||||
`fastdeploy::vision::DetectionResult`
|
||||
|
||||
```
|
||||
```c++
|
||||
struct DetectionResult {
|
||||
std::vector<uint8_t> label_map;
|
||||
std::vector<float> score_map;
|
||||
|
@@ -9,7 +9,7 @@
|
||||
- TensorRT >= 8.4 (当ENABLE_TRT_BACKEND=ON)
|
||||
|
||||
## 编译C++
|
||||
```
|
||||
```bash
|
||||
git clone https://github.com/PaddlePaddle/FastDeploy.git
|
||||
cd FastDeploy
|
||||
git checkout develop
|
||||
@@ -23,7 +23,7 @@ make install
|
||||
编译后的预测库即在当前目录下的`fastdeploy-0.0.3`
|
||||
|
||||
## 编译Python安装包
|
||||
```
|
||||
```bash
|
||||
git clone https://github.com/PaddlePaddle/FastDeploy.git
|
||||
cd FastDeploy
|
||||
git checkout develop
|
||||
|
@@ -3,13 +3,13 @@
|
||||
## 环境依赖
|
||||
|
||||
- cmake >= 3.12
|
||||
- g++ >= 8.2
|
||||
- Visual Studio 16 2019
|
||||
- cuda >= 11.2 (当WITH_GPU=ON)
|
||||
- cudnn >= 11.2 (当WITH_GPU=ON)
|
||||
- TensorRT >= 8.4 (当ENABLE_TRT_BACKEND=ON)
|
||||
|
||||
## 获取代码
|
||||
```
|
||||
```bat
|
||||
git clone https://github.com/PaddlePaddle/FastDeploy.git
|
||||
cd FastDeploy
|
||||
git checkout develop
|
||||
@@ -19,10 +19,10 @@ git checkout develop
|
||||
|
||||
Windows菜单打开`x64 Native Tools Command Prompt for VS 2019`命令工具,其中`CMAKE_INSTALL_PREFIX`用于指定编译后生成的SDK路径
|
||||
|
||||
```
|
||||
```bat
|
||||
mkdir build
|
||||
cd build
|
||||
cmake -G "Visual Studio 16 2019" -A x64 -DCMAKE_INSTALL_PREFIX=D:\Paddle\FastDeploy\build\fastdeploy-win-x64-0.2.0 -DENABLE_ORT_BACKEND=ON -DENABLE_VISION=ON ..
|
||||
cmake -G "Visual Studio 16 2019" -A x64 -DCMAKE_INSTALL_PREFIX=D:\Paddle\FastDeploy\build\fastdeploy-win-x64-0.2.0 -DENABLE_ORT_BACKEND=ON -DENABLE_VISION=ON ..
|
||||
msbuild fastdeploy.sln /m /p:Configuration=Release /p:Platform=x64
|
||||
msbuild INSTALL.vcxproj /m /p:Configuration=Release /p:Platform=x64
|
||||
```
|
||||
@@ -31,7 +31,7 @@ msbuild INSTALL.vcxproj /m /p:Configuration=Release /p:Platform=x64
|
||||
## 编译Python Wheel包
|
||||
|
||||
Python编译时,通过环境变量获取编译选项
|
||||
```
|
||||
```bat
|
||||
git clone https://github.com/PaddlePaddle/FastDeploy.git
|
||||
cd FastDeploy
|
||||
git checkout develop
|
||||
|
63
docs/compile/how_to_use_sdk_on_windows.md
Normal file
63
docs/compile/how_to_use_sdk_on_windows.md
Normal file
@@ -0,0 +1,63 @@
|
||||
# 在 Windows 使用 FastDeploy C++ SDK
|
||||
|
||||
在 Windows 下使用 FastDeploy C++ SDK 与在 Linux 下使用稍有不同。以下以 PPYOLOE 为例进行演示在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。
|
||||
|
||||
在部署前,需确认以下两个步骤
|
||||
|
||||
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/the%20software%20and%20hardware%20requirements.md)
|
||||
- 2. 根据开发环境,下载预编译部署库和samples代码,参考[FastDeploy预编译库](../../../../../docs/quick_start)
|
||||
|
||||
## 环境依赖
|
||||
|
||||
- cmake >= 3.12
|
||||
- Visual Studio 16 2019
|
||||
- cuda >= 11.2 (当WITH_GPU=ON)
|
||||
- cudnn >= 11.2 (当WITH_GPU=ON)
|
||||
- TensorRT >= 8.4 (当ENABLE_TRT_BACKEND=ON)
|
||||
|
||||
## 下载 FastDeploy Windows 10 C++ SDK
|
||||
可以从以下链接下载编译好的 FastDeploy Windows 10 C++ SDK,SDK中包含了examples代码。
|
||||
```text
|
||||
https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-win-x64-gpu-0.2.0.zip
|
||||
```
|
||||
## 准备模型文件和测试图片
|
||||
可以从以下链接下载模型文件和测试图片,并解压缩
|
||||
```text
|
||||
https://bj.bcebos.com/paddlehub/fastdeploy/ppyoloe_crn_l_300e_coco.tgz # (下载后解压缩)
|
||||
https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
|
||||
```
|
||||
|
||||
## 在 Windows 上编译 PPYOLOE
|
||||
Windows菜单打开`x64 Native Tools Command Prompt for VS 2019`命令工具,cd到ppyoloe的demo路径
|
||||
```bat
|
||||
cd fastdeploy-win-x64-gpu-0.2.0\examples\vision\detection\paddledetection\cpp
|
||||
```
|
||||
```bat
|
||||
mkdir build && cd build
|
||||
cmake .. -G "Visual Studio 16 2019" -A x64 -DFASTDEPLOY_INSTALL_DIR=%cd%\..\..\..\..\..\..\..\fastdeploy-win-x64-gpu-0.2.0 -DCUDA_DIRECTORY="C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.2"
|
||||
```
|
||||
然后执行
|
||||
```bat
|
||||
msbuild infer_demo.sln /m:4 /p:Configuration=Release /p:Platform=x64
|
||||
```
|
||||
编译好的exe保存在Release目录下,在运行demo前,需要将模型和测试图片拷贝至该目录。另外,需要在终端指定DLL的搜索路径。请在build目录下执行以下命令。
|
||||
```bat
|
||||
set FASTDEPLOY_PATH=%cd%\..\..\..\..\..\..\..\fastdeploy-win-x64-gpu-0.2.0
|
||||
set PATH=%FASTDEPLOY_PATH%\lib;%FASTDEPLOY_PATH%\third_libs\install\onnxruntime\lib;%FASTDEPLOY_PATH%\third_libs\install\opencv-win-x64-3.4.16\build\x64\vc15\bin;%FASTDEPLOY_PATH%\third_libs\install\paddle_inference\paddle\lib;%FASTDEPLOY_PATH%\third_libs\install\paddle_inference\third_party\install\mkldnn\lib;%FASTDEPLOY_PATH%\third_libs\install\paddle_inference\third_party\install\mklml\lib;%FASTDEPLOY_PATH%\third_libs\install\paddle2onnx\lib;%FASTDEPLOY_PATH%\third_libs\install\tensorrt\lib;%FASTDEPLOY_PATH%\third_libs\install\yaml-cpp\lib;%PATH%
|
||||
```
|
||||
注意,需要拷贝onnxruntime.dll到exe所在的目录。
|
||||
```bat
|
||||
copy /Y %FASTDEPLOY_PATH%\third_libs\install\onnxruntime\lib\onnxruntime* Release\
|
||||
```
|
||||
由于较新的Windows在System32系统目录下自带了onnxruntime.dll,因此就算设置了PATH,系统依然会出现onnxruntime的加载冲突。因此需要先拷贝demo用到的onnxruntime.dll到exe所在的目录。
|
||||
```bat
|
||||
where onnxruntime.dll
|
||||
C:\Windows\System32\onnxruntime.dll # windows自带的onnxruntime.dll
|
||||
```
|
||||
## 运行 demo
|
||||
```bat
|
||||
cd Release
|
||||
infer_ppyoloe_demo.exe ppyoloe_crn_l_300e_coco 000000014439.jpg 0 # CPU
|
||||
infer_ppyoloe_demo.exe ppyoloe_crn_l_300e_coco 000000014439.jpg 1 # GPU
|
||||
infer_ppyoloe_demo.exe ppyoloe_crn_l_300e_coco 000000014439.jpg 2 # GPU + TensorRT
|
||||
```
|
@@ -1,7 +1,7 @@
|
||||
# 代码提交说明
|
||||
|
||||
FastDeploy使用clang-format, cpplint检查和格式化代码,提交代码前,需安装pre-commit
|
||||
```
|
||||
```bash
|
||||
git clone https://github.com/PaddlePaddle/FastDeploy.git
|
||||
cd FastDeploy
|
||||
git checkout develop
|
||||
|
@@ -9,11 +9,11 @@ FastDeploy提供了在Windows/Linux/Mac上的预先编译Python Wheel包,开
|
||||
- Mac 支持Python3.6~3.9
|
||||
|
||||
## 安装 CPU Python 版本
|
||||
```
|
||||
```bash
|
||||
pip install fastdeploy-python -f https://www.paddlepaddle.org.cn/whl/fastdeploy.html
|
||||
```
|
||||
## 安装 GPU Python 版本
|
||||
```
|
||||
```bash
|
||||
pip install fastdeploy-gpu-python -f https://www.paddlepaddle.org.cn/whl/fastdeploy.html
|
||||
```
|
||||
|
||||
|
@@ -9,14 +9,14 @@
|
||||
|
||||
以Linux上ResNet50_vd推理为例,在本目录执行如下命令即可完成编译测试
|
||||
|
||||
```
|
||||
```bash
|
||||
#下载SDK,编译模型examples代码(SDK中包含了examples代码)
|
||||
wget https://bj.bcebos.com/paddlehub/fastdeploy/libs/0.2.0/fastdeploy-linux-x64-gpu-0.2.0.tgz
|
||||
tar xvf fastdeploy-linux-x64-gpu-0.2.0.tgz
|
||||
cd fastdeploy-linux-x64-gpu-0.2.0/examples/vision/classification/paddleclas/cpp
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/../../../../../../../fastdeploy-linux-x64-gpu-0.2.0
|
||||
cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/../../../../../../../fastdeploy-linux-x64-gpu-0.2.0
|
||||
make -j
|
||||
|
||||
# 下载ResNet50_vd模型文件和测试图片
|
||||
@@ -33,11 +33,14 @@ wget https://gitee.com/paddlepaddle/PaddleClas/raw/release/2.4/deploy/images/Ima
|
||||
./infer_demo ResNet50_vd_infer ILSVRC2012_val_00000010.jpeg 2
|
||||
```
|
||||
|
||||
以上命令只适用于Linux或MacOS, Windows下SDK的使用方式请参考:
|
||||
- [如何在Windows中使用FastDeploy C++ SDK](../../../../../docs/compile/how_to_use_sdk_on_windows.md)
|
||||
|
||||
## PaddleClas C++接口
|
||||
|
||||
### PaddleClas类
|
||||
|
||||
```
|
||||
```c++
|
||||
fastdeploy::vision::classification::PaddleClasModel(
|
||||
const string& model_file,
|
||||
const string& params_file,
|
||||
@@ -58,14 +61,14 @@ PaddleClas模型加载和初始化,其中model_file, params_file为训练模
|
||||
|
||||
#### Predict函数
|
||||
|
||||
> ```
|
||||
> ```c++
|
||||
> PaddleClasModel::Predict(cv::Mat* im, ClassifyResult* result, int topk = 1)
|
||||
> ```
|
||||
>
|
||||
>
|
||||
> 模型预测接口,输入图像直接输出检测结果。
|
||||
>
|
||||
>
|
||||
> **参数**
|
||||
>
|
||||
>
|
||||
> > * **im**: 输入图像,注意需为HWC,BGR格式
|
||||
> > * **result**: 分类结果,包括label_id,以及相应的置信度, ClassifyResult说明参考[视觉模型预测结果](../../../../../docs/api/vision_results/)
|
||||
> > * **topk**(int):返回预测概率最高的topk个分类结果,默认为1
|
||||
|
@@ -7,7 +7,7 @@
|
||||
|
||||
本目录下提供`infer.py`快速完成ResNet50_vd在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成
|
||||
|
||||
```
|
||||
```bash
|
||||
#下载部署示例代码
|
||||
git clone https://github.com/PaddlePaddle/FastDeploy.git
|
||||
cd FastDeploy/examples/vision/classification/paddleclas/python
|
||||
@@ -26,7 +26,7 @@ python infer.py --model ResNet50_vd_infer --image ILSVRC2012_val_00000010.jpeg -
|
||||
```
|
||||
|
||||
运行完成后返回结果如下所示
|
||||
```
|
||||
```bash
|
||||
ClassifyResult(
|
||||
label_ids: 153,
|
||||
scores: 0.686229,
|
||||
@@ -35,7 +35,7 @@ scores: 0.686229,
|
||||
|
||||
## PaddleClasModel Python接口
|
||||
|
||||
```
|
||||
```python
|
||||
fd.vision.classification.PaddleClasModel(model_file, params_file, config_file, runtime_option=None, model_format=Frontend.PADDLE)
|
||||
```
|
||||
|
||||
@@ -51,19 +51,19 @@ PaddleClas模型加载和初始化,其中model_file, params_file为训练模
|
||||
|
||||
### predict函数
|
||||
|
||||
> ```
|
||||
> ```python
|
||||
> PaddleClasModel.predict(input_image, topk=1)
|
||||
> ```
|
||||
>
|
||||
>
|
||||
> 模型预测结口,输入图像直接输出检测结果。
|
||||
>
|
||||
>
|
||||
> **参数**
|
||||
>
|
||||
>
|
||||
> > * **input_image**(np.ndarray): 输入数据,注意需为HWC,BGR格式
|
||||
> > * **topk**(int):返回预测概率最高的topk个分类结果,默认为1
|
||||
|
||||
> **返回**
|
||||
>
|
||||
>
|
||||
> > 返回`fastdeploy.vision.ClassifyResult`结构体,结构体说明参考文档[视觉模型预测结果](../../../../../docs/api/vision_results/)
|
||||
|
||||
|
||||
|
@@ -9,7 +9,7 @@
|
||||
|
||||
以Linux上CPU推理为例,在本目录执行如下命令即可完成编译测试
|
||||
|
||||
```
|
||||
```bash
|
||||
mkdir build
|
||||
cd build
|
||||
wget https://https://bj.bcebos.com/paddlehub/fastdeploy/cpp/fastdeploy-linux-x64-gpu-0.2.0.tgz
|
||||
@@ -34,11 +34,14 @@ wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/0000000
|
||||
|
||||
<img width="640" src="https://user-images.githubusercontent.com/67993288/184301689-87ee5205-2eff-4204-b615-24c400f01323.jpg">
|
||||
|
||||
以上命令只适用于Linux或MacOS, Windows下SDK的使用方式请参考:
|
||||
- [如何在Windows中使用FastDeploy C++ SDK](../../../../../docs/compile/how_to_use_sdk_on_windows.md)
|
||||
|
||||
## NanoDetPlus C++接口
|
||||
|
||||
### NanoDetPlus类
|
||||
|
||||
```
|
||||
```c++
|
||||
fastdeploy::vision::detection::NanoDetPlus(
|
||||
const string& model_file,
|
||||
const string& params_file = "",
|
||||
@@ -57,7 +60,7 @@ NanoDetPlus模型加载和初始化,其中model_file为导出的ONNX模型格
|
||||
|
||||
#### Predict函数
|
||||
|
||||
> ```
|
||||
> ```c++
|
||||
> NanoDetPlus::Predict(cv::Mat* im, DetectionResult* result,
|
||||
> float conf_threshold = 0.25,
|
||||
> float nms_iou_threshold = 0.5)
|
||||
|
@@ -7,7 +7,7 @@
|
||||
|
||||
本目录下提供`infer.py`快速完成NanoDetPlus在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成
|
||||
|
||||
```
|
||||
```bash
|
||||
#下载部署示例代码
|
||||
git clone https://github.com/PaddlePaddle/FastDeploy.git
|
||||
cd examples/vision/detection/nanodet_plus/python/
|
||||
@@ -30,7 +30,7 @@ python infer.py --model nanodet-plus-m_320.onnx --image 000000014439.jpg --devic
|
||||
|
||||
## NanoDetPlus Python接口
|
||||
|
||||
```
|
||||
```python
|
||||
fastdeploy.vision.detection.NanoDetPlus(model_file, params_file=None, runtime_option=None, model_format=Frontend.ONNX)
|
||||
```
|
||||
|
||||
@@ -45,7 +45,7 @@ NanoDetPlus模型加载和初始化,其中model_file为导出的ONNX模型格
|
||||
|
||||
### predict函数
|
||||
|
||||
> ```
|
||||
> ```python
|
||||
> NanoDetPlus.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5)
|
||||
> ```
|
||||
>
|
||||
|
@@ -9,7 +9,7 @@
|
||||
|
||||
以Linux上推理为例,在本目录执行如下命令即可完成编译测试
|
||||
|
||||
```
|
||||
```bash
|
||||
以ppyoloe为例进行推理部署
|
||||
|
||||
#下载SDK,编译模型examples代码(SDK中包含了examples代码)
|
||||
@@ -34,12 +34,15 @@ tar xvf ppyoloe_crn_l_300e_coco.tgz
|
||||
./infer_ppyoloe_demo ./ppyoloe_crn_l_300e_coco 000000014439.jpg 2
|
||||
```
|
||||
|
||||
以上命令只适用于Linux或MacOS, Windows下SDK的使用方式请参考:
|
||||
- [如何在Windows中使用FastDeploy C++ SDK](../../../../../docs/compile/how_to_use_sdk_on_windows.md)
|
||||
|
||||
## PaddleDetection C++接口
|
||||
|
||||
### 模型类
|
||||
|
||||
PaddleDetection目前支持6种模型系列,类名分别为`PPYOLOE`, `PicoDet`, `PaddleYOLOX`, `PPYOLO`, `FasterRCNN`,所有类名的构造函数和预测函数在参数上完全一致,本文档以PPYOLOE为例讲解API
|
||||
```
|
||||
```c++
|
||||
fastdeploy::vision::detection::PPYOLOE(
|
||||
const string& model_file,
|
||||
const string& params_file,
|
||||
@@ -60,7 +63,7 @@ PaddleDetection PPYOLOE模型加载和初始化,其中model_file为导出的ON
|
||||
|
||||
#### Predict函数
|
||||
|
||||
> ```
|
||||
> ```c++
|
||||
> PPYOLOE::Predict(cv::Mat* im, DetectionResult* result)
|
||||
> ```
|
||||
>
|
||||
|
@@ -7,7 +7,7 @@
|
||||
|
||||
本目录下提供`infer_xxx.py`快速完成PPYOLOE/PicoDet等模型在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成
|
||||
|
||||
```
|
||||
```bash
|
||||
#下载部署示例代码
|
||||
git clone https://github.com/PaddlePaddle/FastDeploy.git
|
||||
cd examples/vision/detection/paddledetection/python/
|
||||
@@ -26,13 +26,13 @@ python infer_ppyoloe.py --model_dir ppyoloe_crn_l_300e_coco --image 000000014439
|
||||
```
|
||||
|
||||
运行完成可视化结果如下图所示
|
||||
<div align="center">
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/19339784/184326520-7075e907-10ed-4fad-93f8-52d0e35d4964.jpg", width=480px, height=320px />
|
||||
</div>
|
||||
|
||||
## PaddleDetection Python接口
|
||||
|
||||
```
|
||||
```python
|
||||
fastdeploy.vision.detection.PPYOLOE(model_file, params_file, config_file, runtime_option=None, model_format=Frontend.PADDLE)
|
||||
fastdeploy.vision.detection.PicoDet(model_file, params_file, config_file, runtime_option=None, model_format=Frontend.PADDLE)
|
||||
fastdeploy.vision.detection.PaddleYOLOX(model_file, params_file, config_file, runtime_option=None, model_format=Frontend.PADDLE)
|
||||
@@ -54,7 +54,7 @@ PaddleDetection模型加载和初始化,其中model_file, params_file为导
|
||||
### predict函数
|
||||
|
||||
PaddleDetection中各个模型,包括PPYOLOE/PicoDet/PaddleYOLOX/YOLOv3/PPYOLO/FasterRCNN,均提供如下同样的成员函数用于进行图像的检测
|
||||
> ```
|
||||
> ```python
|
||||
> PPYOLOE.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5)
|
||||
> ```
|
||||
>
|
||||
|
@@ -11,7 +11,7 @@
|
||||
|
||||
访问[ScaledYOLOv4](https://github.com/WongKinYiu/ScaledYOLOv4)官方github库,按照指引下载安装,下载`scaledyolov4.pt` 模型,利用 `models/export.py` 得到`onnx`格式文件。如果您导出的`onnx`模型出现问题,可以参考[ScaledYOLOv4#401](https://github.com/WongKinYiu/ScaledYOLOv4/issues/401)的解决办法
|
||||
|
||||
```
|
||||
```bash
|
||||
#下载ScaledYOLOv4模型文件
|
||||
Download from the goole drive https://drive.google.com/file/d/1aXZZE999sHMP1gev60XhNChtHPRMH3Fz/view?usp=sharing
|
||||
|
||||
|
@@ -9,7 +9,7 @@
|
||||
|
||||
以Linux上CPU推理为例,在本目录执行如下命令即可完成编译测试
|
||||
|
||||
```
|
||||
```bash
|
||||
mkdir build
|
||||
cd build
|
||||
wget https://https://bj.bcebos.com/paddlehub/fastdeploy/cpp/fastdeploy-linux-x64-gpu-0.2.0.tgz
|
||||
@@ -34,11 +34,14 @@ wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/0000000
|
||||
|
||||
<img width="640" src="https://user-images.githubusercontent.com/67993288/184301908-7027cf41-af51-4485-bd32-87aca0e77336.jpg">
|
||||
|
||||
以上命令只适用于Linux或MacOS, Windows下SDK的使用方式请参考:
|
||||
- [如何在Windows中使用FastDeploy C++ SDK](../../../../../docs/compile/how_to_use_sdk_on_windows.md)
|
||||
|
||||
## ScaledYOLOv4 C++接口
|
||||
|
||||
### ScaledYOLOv4类
|
||||
|
||||
```
|
||||
```c++
|
||||
fastdeploy::vision::detection::ScaledYOLOv4(
|
||||
const string& model_file,
|
||||
const string& params_file = "",
|
||||
@@ -57,7 +60,7 @@ ScaledYOLOv4模型加载和初始化,其中model_file为导出的ONNX模型格
|
||||
|
||||
#### Predict函数
|
||||
|
||||
> ```
|
||||
> ```c++
|
||||
> ScaledYOLOv4::Predict(cv::Mat* im, DetectionResult* result,
|
||||
> float conf_threshold = 0.25,
|
||||
> float nms_iou_threshold = 0.5)
|
||||
|
@@ -7,7 +7,7 @@
|
||||
|
||||
本目录下提供`infer.py`快速完成ScaledYOLOv4在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成
|
||||
|
||||
```
|
||||
```bash
|
||||
#下载部署示例代码
|
||||
git clone https://github.com/PaddlePaddle/FastDeploy.git
|
||||
cd examples/vision/detection/scaledyolov4/python/
|
||||
@@ -30,7 +30,7 @@ python infer.py --model scaled_yolov4-p5.onnx --image 000000014439.jpg --device
|
||||
|
||||
## ScaledYOLOv4 Python接口
|
||||
|
||||
```
|
||||
```python
|
||||
fastdeploy.vision.detection.ScaledYOLOv4(model_file, params_file=None, runtime_option=None, model_format=Frontend.ONNX)
|
||||
```
|
||||
|
||||
@@ -45,7 +45,7 @@ ScaledYOLOv4模型加载和初始化,其中model_file为导出的ONNX模型格
|
||||
|
||||
### predict函数
|
||||
|
||||
> ```
|
||||
> ```python
|
||||
> ScaledYOLOv4.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5)
|
||||
> ```
|
||||
>
|
||||
|
@@ -11,7 +11,7 @@
|
||||
|
||||
访问[YOLOR](https://github.com/WongKinYiu/yolor)官方github库,按照指引下载安装,下载`yolor.pt` 模型,利用 `models/export.py` 得到`onnx`格式文件。如果您导出的`onnx`模型出现精度不达标或者是数据维度的问题,可以参考[yolor#32](https://github.com/WongKinYiu/yolor/issues/32)的解决办法
|
||||
|
||||
```
|
||||
```bash
|
||||
#下载yolor模型文件
|
||||
wget https://github.com/WongKinYiu/yolor/releases/download/weights/yolor-d6-paper-570.pt
|
||||
|
||||
|
@@ -9,7 +9,7 @@
|
||||
|
||||
以Linux上CPU推理为例,在本目录执行如下命令即可完成编译测试
|
||||
|
||||
```
|
||||
```bash
|
||||
mkdir build
|
||||
cd build
|
||||
wget https://https://bj.bcebos.com/paddlehub/fastdeploy/cpp/fastdeploy-linux-x64-gpu-0.2.0.tgz
|
||||
@@ -34,11 +34,14 @@ wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/0000000
|
||||
|
||||
<img width="640" src="https://user-images.githubusercontent.com/67993288/184301926-fa3711bf-5984-4e61-9c98-7fdeacb622e9.jpg">
|
||||
|
||||
以上命令只适用于Linux或MacOS, Windows下SDK的使用方式请参考:
|
||||
- [如何在Windows中使用FastDeploy C++ SDK](../../../../../docs/compile/how_to_use_sdk_on_windows.md)
|
||||
|
||||
## YOLOR C++接口
|
||||
|
||||
### YOLOR类
|
||||
|
||||
```
|
||||
```c++
|
||||
fastdeploy::vision::detection::YOLOR(
|
||||
const string& model_file,
|
||||
const string& params_file = "",
|
||||
@@ -57,7 +60,7 @@ YOLOR模型加载和初始化,其中model_file为导出的ONNX模型格式。
|
||||
|
||||
#### Predict函数
|
||||
|
||||
> ```
|
||||
> ```c++
|
||||
> YOLOR::Predict(cv::Mat* im, DetectionResult* result,
|
||||
> float conf_threshold = 0.25,
|
||||
> float nms_iou_threshold = 0.5)
|
||||
|
@@ -7,7 +7,7 @@
|
||||
|
||||
本目录下提供`infer.py`快速完成YOLOR在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成
|
||||
|
||||
```
|
||||
```bash
|
||||
#下载部署示例代码
|
||||
git clone https://github.com/PaddlePaddle/FastDeploy.git
|
||||
cd examples/vision/detection/yolor/python/
|
||||
@@ -30,7 +30,7 @@ python infer.py --model yolor-p6-paper-541-640-640.onnx --image 000000014439.jpg
|
||||
|
||||
## YOLOR Python接口
|
||||
|
||||
```
|
||||
```python
|
||||
fastdeploy.vision.detection.YOLOR(model_file, params_file=None, runtime_option=None, model_format=Frontend.ONNX)
|
||||
```
|
||||
|
||||
@@ -45,7 +45,7 @@ YOLOR模型加载和初始化,其中model_file为导出的ONNX模型格式
|
||||
|
||||
### predict函数
|
||||
|
||||
> ```
|
||||
> ```python
|
||||
> YOLOR.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5)
|
||||
> ```
|
||||
>
|
||||
|
@@ -9,7 +9,7 @@
|
||||
|
||||
以Linux上CPU推理为例,在本目录执行如下命令即可完成编译测试
|
||||
|
||||
```
|
||||
```bash
|
||||
mkdir build
|
||||
cd build
|
||||
wget https://https://bj.bcebos.com/paddlehub/fastdeploy/cpp/fastdeploy-linux-x64-gpu-0.2.0.tgz
|
||||
@@ -34,11 +34,14 @@ wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/0000000
|
||||
|
||||
<img width="640" src="https://user-images.githubusercontent.com/67993288/184309358-d803347a-8981-44b6-b589-4608021ad0f4.jpg">
|
||||
|
||||
以上命令只适用于Linux或MacOS, Windows下SDK的使用方式请参考:
|
||||
- [如何在Windows中使用FastDeploy C++ SDK](../../../../../docs/compile/how_to_use_sdk_on_windows.md)
|
||||
|
||||
## YOLOv5 C++接口
|
||||
|
||||
### YOLOv5类
|
||||
|
||||
```
|
||||
```c++
|
||||
fastdeploy::vision::detection::YOLOv5(
|
||||
const string& model_file,
|
||||
const string& params_file = "",
|
||||
@@ -57,7 +60,7 @@ YOLOv5模型加载和初始化,其中model_file为导出的ONNX模型格式。
|
||||
|
||||
#### Predict函数
|
||||
|
||||
> ```
|
||||
> ```c++
|
||||
> YOLOv5::Predict(cv::Mat* im, DetectionResult* result,
|
||||
> float conf_threshold = 0.25,
|
||||
> float nms_iou_threshold = 0.5)
|
||||
|
@@ -7,7 +7,7 @@
|
||||
|
||||
本目录下提供`infer.py`快速完成YOLOv5在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成
|
||||
|
||||
```
|
||||
```bash
|
||||
#下载部署示例代码
|
||||
git clone https://github.com/PaddlePaddle/FastDeploy.git
|
||||
cd examples/vision/detection/yolov5/python/
|
||||
@@ -30,7 +30,7 @@ python infer.py --model yolov5s.onnx --image 000000014439.jpg --device gpu --use
|
||||
|
||||
## YOLOv5 Python接口
|
||||
|
||||
```
|
||||
```python
|
||||
fastdeploy.vision.detection.YOLOv5(model_file, params_file=None, runtime_option=None, model_format=Frontend.ONNX)
|
||||
```
|
||||
|
||||
@@ -45,7 +45,7 @@ YOLOv5模型加载和初始化,其中model_file为导出的ONNX模型格式
|
||||
|
||||
### predict函数
|
||||
|
||||
> ```
|
||||
> ```python
|
||||
> YOLOv5.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5)
|
||||
> ```
|
||||
>
|
||||
|
@@ -12,7 +12,7 @@
|
||||
- 自动获取
|
||||
访问[YOLOv5Lite](https://github.com/ppogg/YOLOv5-Lite)
|
||||
官方github库,按照指引下载安装,下载`yolov5-lite-xx.onnx` 模型(Tips:官方提供的ONNX文件目前是没有decode模块的)
|
||||
```
|
||||
```bash
|
||||
#下载yolov5-lite模型文件(.onnx)
|
||||
Download from https://drive.google.com/file/d/1bJByk9eoS6pv8Z3N4bcLRCV3i7uk24aU/view
|
||||
官方Repo也支持百度云下载
|
||||
@@ -27,7 +27,7 @@
|
||||
|
||||
首先需要参考[YOLOv5-Lite#189](https://github.com/ppogg/YOLOv5-Lite/pull/189)的解决办法,修改代码。
|
||||
|
||||
```
|
||||
```bash
|
||||
#下载yolov5-lite模型文件(.pt)
|
||||
Download from https://drive.google.com/file/d/1oftzqOREGqDCerf7DtD5BZp9YWELlkMe/view
|
||||
官方Repo也支持百度云下载
|
||||
@@ -39,7 +39,7 @@
|
||||
```
|
||||
- 导出无decode模块的ONNX文件(不需要修改代码)
|
||||
|
||||
```
|
||||
```bash
|
||||
#下载yolov5-lite模型文件
|
||||
Download from https://drive.google.com/file/d/1oftzqOREGqDCerf7DtD5BZp9YWELlkMe/view
|
||||
官方Repo也支持百度云下载
|
||||
|
@@ -9,7 +9,7 @@
|
||||
|
||||
以Linux上CPU推理为例,在本目录执行如下命令即可完成编译测试
|
||||
|
||||
```
|
||||
```bash
|
||||
mkdir build
|
||||
cd build
|
||||
wget https://https://bj.bcebos.com/paddlehub/fastdeploy/cpp/fastdeploy-linux-x64-gpu-0.2.0.tgz
|
||||
@@ -34,11 +34,14 @@ wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/0000000
|
||||
|
||||
<img width="640" src="https://user-images.githubusercontent.com/67993288/184301943-263c8153-a52a-4533-a7c1-ee86d05d314b.jpg">
|
||||
|
||||
以上命令只适用于Linux或MacOS, Windows下SDK的使用方式请参考:
|
||||
- [如何在Windows中使用FastDeploy C++ SDK](../../../../../docs/compile/how_to_use_sdk_on_windows.md)
|
||||
|
||||
## YOLOv5Lite C++接口
|
||||
|
||||
### YOLOv5Lite类
|
||||
|
||||
```
|
||||
```c++
|
||||
fastdeploy::vision::detection::YOLOv5Lite(
|
||||
const string& model_file,
|
||||
const string& params_file = "",
|
||||
@@ -57,7 +60,7 @@ YOLOv5Lite模型加载和初始化,其中model_file为导出的ONNX模型格
|
||||
|
||||
#### Predict函数
|
||||
|
||||
> ```
|
||||
> ```c++
|
||||
> YOLOv5Lite::Predict(cv::Mat* im, DetectionResult* result,
|
||||
> float conf_threshold = 0.25,
|
||||
> float nms_iou_threshold = 0.5)
|
||||
|
@@ -7,7 +7,7 @@
|
||||
|
||||
本目录下提供`infer.py`快速完成YOLOv5Lite在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成
|
||||
|
||||
```
|
||||
```bash
|
||||
#下载部署示例代码
|
||||
git clone https://github.com/PaddlePaddle/FastDeploy.git
|
||||
cd examples/vision/detection/yolov5lite/python/
|
||||
@@ -30,7 +30,7 @@ python infer.py --model v5Lite-g-sim-640.onnx --image 000000014439.jpg --device
|
||||
|
||||
## YOLOv5Lite Python接口
|
||||
|
||||
```
|
||||
```python
|
||||
fastdeploy.vision.detection.YOLOv5Lite(model_file, params_file=None, runtime_option=None, model_format=Frontend.ONNX)
|
||||
```
|
||||
|
||||
@@ -45,7 +45,7 @@ YOLOv5Lite模型加载和初始化,其中model_file为导出的ONNX模型格
|
||||
|
||||
### predict函数
|
||||
|
||||
> ```
|
||||
> ```python
|
||||
> YOLOv5Lite.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5)
|
||||
> ```
|
||||
>
|
||||
|
@@ -9,7 +9,7 @@
|
||||
|
||||
以Linux上CPU推理为例,在本目录执行如下命令即可完成编译测试
|
||||
|
||||
```
|
||||
```bash
|
||||
mkdir build
|
||||
cd build
|
||||
wget https://https://bj.bcebos.com/paddlehub/fastdeploy/cpp/fastdeploy-linux-x64-gpu-0.2.0.tgz
|
||||
@@ -34,11 +34,14 @@ wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/0000000
|
||||
|
||||
<img width="640" src="https://user-images.githubusercontent.com/67993288/184301725-390e4abb-db2b-482d-931d-469381322626.jpg">
|
||||
|
||||
以上命令只适用于Linux或MacOS, Windows下SDK的使用方式请参考:
|
||||
- [如何在Windows中使用FastDeploy C++ SDK](../../../../../docs/compile/how_to_use_sdk_on_windows.md)
|
||||
|
||||
## YOLOv6 C++接口
|
||||
|
||||
### YOLOv6类
|
||||
|
||||
```
|
||||
```c++
|
||||
fastdeploy::vision::detection::YOLOv6(
|
||||
const string& model_file,
|
||||
const string& params_file = "",
|
||||
@@ -57,7 +60,7 @@ YOLOv6模型加载和初始化,其中model_file为导出的ONNX模型格式。
|
||||
|
||||
#### Predict函数
|
||||
|
||||
> ```
|
||||
> ```c++
|
||||
> YOLOv6::Predict(cv::Mat* im, DetectionResult* result,
|
||||
> float conf_threshold = 0.25,
|
||||
> float nms_iou_threshold = 0.5)
|
||||
|
@@ -7,7 +7,7 @@
|
||||
|
||||
本目录下提供`infer.py`快速完成YOLOv6在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成
|
||||
|
||||
```
|
||||
```bash
|
||||
#下载部署示例代码
|
||||
git clone https://github.com/PaddlePaddle/FastDeploy.git
|
||||
cd examples/vision/detection/yolov6/python/
|
||||
@@ -31,7 +31,7 @@ python infer.py --model yolov6s.onnx --image 000000014439.jpg --device gpu --use
|
||||
|
||||
## YOLOv6 Python接口
|
||||
|
||||
```
|
||||
```python
|
||||
fastdeploy.vision.detection.YOLOv6(model_file, params_file=None, runtime_option=None, model_format=Frontend.ONNX)
|
||||
```
|
||||
|
||||
@@ -46,7 +46,7 @@ YOLOv6模型加载和初始化,其中model_file为导出的ONNX模型格式
|
||||
|
||||
### predict函数
|
||||
|
||||
> ```
|
||||
> ```python
|
||||
> YOLOv6.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5)
|
||||
> ```
|
||||
>
|
||||
|
@@ -10,7 +10,7 @@
|
||||
|
||||
## 导出ONNX模型
|
||||
|
||||
```
|
||||
```bash
|
||||
# 下载yolov7模型文件
|
||||
wget https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7.pt
|
||||
|
||||
|
@@ -9,7 +9,7 @@
|
||||
|
||||
以Linux上CPU推理为例,在本目录执行如下命令即可完成编译测试
|
||||
|
||||
```
|
||||
```bash
|
||||
mkdir build
|
||||
cd build
|
||||
wget https://https://bj.bcebos.com/paddlehub/fastdeploy/cpp/fastdeploy-linux-x64-gpu-0.2.0.tgz
|
||||
@@ -34,11 +34,14 @@ wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/0000000
|
||||
|
||||
<img width="640" src="https://user-images.githubusercontent.com/67993288/183847558-abcd9a57-9cd9-4891-b09a-710963c99b74.jpg">
|
||||
|
||||
以上命令只适用于Linux或MacOS, Windows下SDK的使用方式请参考:
|
||||
- [如何在Windows中使用FastDeploy C++ SDK](../../../../../docs/compile/how_to_use_sdk_on_windows.md)
|
||||
|
||||
## YOLOv7 C++接口
|
||||
|
||||
### YOLOv7类
|
||||
|
||||
```
|
||||
```c++
|
||||
fastdeploy::vision::detection::YOLOv7(
|
||||
const string& model_file,
|
||||
const string& params_file = "",
|
||||
@@ -57,7 +60,7 @@ YOLOv7模型加载和初始化,其中model_file为导出的ONNX模型格式。
|
||||
|
||||
#### Predict函数
|
||||
|
||||
> ```
|
||||
> ```c++
|
||||
> YOLOv7::Predict(cv::Mat* im, DetectionResult* result,
|
||||
> float conf_threshold = 0.25,
|
||||
> float nms_iou_threshold = 0.5)
|
||||
|
@@ -7,7 +7,7 @@
|
||||
|
||||
本目录下提供`infer.py`快速完成YOLOv7在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成
|
||||
|
||||
```
|
||||
```bash
|
||||
#下载部署示例代码
|
||||
git clone https://github.com/PaddlePaddle/FastDeploy.git
|
||||
cd examples/vision/detection/yolov7/python/
|
||||
@@ -30,7 +30,7 @@ python infer.py --model yolov7.onnx --image 000000014439.jpg --device gpu --use_
|
||||
|
||||
## YOLOv7 Python接口
|
||||
|
||||
```
|
||||
```python
|
||||
fastdeploy.vision.detection.YOLOv7(model_file, params_file=None, runtime_option=None, model_format=Frontend.ONNX)
|
||||
```
|
||||
|
||||
@@ -45,7 +45,7 @@ YOLOv7模型加载和初始化,其中model_file为导出的ONNX模型格式
|
||||
|
||||
### predict函数
|
||||
|
||||
> ```
|
||||
> ```python
|
||||
> YOLOv7.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5)
|
||||
> ```
|
||||
>
|
||||
|
@@ -9,7 +9,7 @@
|
||||
|
||||
以Linux上CPU推理为例,在本目录执行如下命令即可完成编译测试
|
||||
|
||||
```
|
||||
```bash
|
||||
mkdir build
|
||||
cd build
|
||||
wget https://https://bj.bcebos.com/paddlehub/fastdeploy/cpp/fastdeploy-linux-x64-gpu-0.2.0.tgz
|
||||
@@ -34,11 +34,14 @@ wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/0000000
|
||||
|
||||
<img width="640" src="https://user-images.githubusercontent.com/67993288/184301746-04595d76-454a-4f07-8c7d-6f41418f8ae3.jpg">
|
||||
|
||||
以上命令只适用于Linux或MacOS, Windows下SDK的使用方式请参考:
|
||||
- [如何在Windows中使用FastDeploy C++ SDK](../../../../../docs/compile/how_to_use_sdk_on_windows.md)
|
||||
|
||||
## YOLOX C++接口
|
||||
|
||||
### YOLOX类
|
||||
|
||||
```
|
||||
```c++
|
||||
fastdeploy::vision::detection::YOLOX(
|
||||
const string& model_file,
|
||||
const string& params_file = "",
|
||||
@@ -57,7 +60,7 @@ YOLOX模型加载和初始化,其中model_file为导出的ONNX模型格式。
|
||||
|
||||
#### Predict函数
|
||||
|
||||
> ```
|
||||
> ```c++
|
||||
> YOLOX::Predict(cv::Mat* im, DetectionResult* result,
|
||||
> float conf_threshold = 0.25,
|
||||
> float nms_iou_threshold = 0.5)
|
||||
|
@@ -7,7 +7,7 @@
|
||||
|
||||
本目录下提供`infer.py`快速完成YOLOX在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成
|
||||
|
||||
```
|
||||
```bash
|
||||
#下载部署示例代码
|
||||
git clone https://github.com/PaddlePaddle/FastDeploy.git
|
||||
cd examples/vision/detection/yolox/python/
|
||||
@@ -30,7 +30,7 @@ python infer.py --model yolox_s.onnx --image 000000014439.jpg --device gpu --use
|
||||
|
||||
## YOLOX Python接口
|
||||
|
||||
```
|
||||
```python
|
||||
fastdeploy.vision.detection.YOLOX(model_file, params_file=None, runtime_option=None, model_format=Frontend.ONNX)
|
||||
```
|
||||
|
||||
@@ -45,7 +45,7 @@ YOLOX模型加载和初始化,其中model_file为导出的ONNX模型格式
|
||||
|
||||
### predict函数
|
||||
|
||||
> ```
|
||||
> ```python
|
||||
> YOLOX.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5)
|
||||
> ```
|
||||
>
|
||||
|
@@ -9,7 +9,7 @@
|
||||
|
||||
以Linux上CPU推理为例,在本目录执行如下命令即可完成编译测试
|
||||
|
||||
```
|
||||
```bash
|
||||
mkdir build
|
||||
cd build
|
||||
wget https://https://bj.bcebos.com/paddlehub/fastdeploy/cpp/fastdeploy-linux-x64-gpu-0.2.0.tgz
|
||||
@@ -34,11 +34,13 @@ wget https://raw.githubusercontent.com/DefTruth/lite.ai.toolkit/main/examples/li
|
||||
|
||||
<img width="640" src="https://user-images.githubusercontent.com/67993288/184301763-1b950047-c17f-4819-b175-c743b699c3b1.jpg">
|
||||
|
||||
以上命令只适用于Linux或MacOS, Windows下SDK的使用方式请参考:
|
||||
- [如何在Windows中使用FastDeploy C++ SDK](../../../../../docs/compile/how_to_use_sdk_on_windows.md)
|
||||
## RetinaFace C++接口
|
||||
|
||||
### RetinaFace类
|
||||
|
||||
```
|
||||
```c++
|
||||
fastdeploy::vision::facedet::RetinaFace(
|
||||
const string& model_file,
|
||||
const string& params_file = "",
|
||||
@@ -57,7 +59,7 @@ RetinaFace模型加载和初始化,其中model_file为导出的ONNX模型格
|
||||
|
||||
#### Predict函数
|
||||
|
||||
> ```
|
||||
> ```c++
|
||||
> RetinaFace::Predict(cv::Mat* im, FaceDetectionResult* result,
|
||||
> float conf_threshold = 0.25,
|
||||
> float nms_iou_threshold = 0.5)
|
||||
|
@@ -7,7 +7,7 @@
|
||||
|
||||
本目录下提供`infer.py`快速完成RetinaFace在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成
|
||||
|
||||
```
|
||||
```bash
|
||||
#下载部署示例代码
|
||||
git clone https://github.com/PaddlePaddle/FastDeploy.git
|
||||
cd examples/vision//retinaface/python/
|
||||
@@ -30,7 +30,7 @@ python infer.py --model Pytorch_RetinaFace_mobile0.25-640-640.onnx --image test_
|
||||
|
||||
## RetinaFace Python接口
|
||||
|
||||
```
|
||||
```python
|
||||
fastdeploy.vision.facedet.RetinaFace(model_file, params_file=None, runtime_option=None, model_format=Frontend.ONNX)
|
||||
```
|
||||
|
||||
@@ -45,7 +45,7 @@ RetinaFace模型加载和初始化,其中model_file为导出的ONNX模型格
|
||||
|
||||
### predict函数
|
||||
|
||||
> ```
|
||||
> ```python
|
||||
> RetinaFace.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5)
|
||||
> ```
|
||||
>
|
||||
|
@@ -8,7 +8,7 @@
|
||||
|
||||
## 导出ONNX模型
|
||||
|
||||
```
|
||||
```bash
|
||||
#下载scrfd模型文件
|
||||
e.g. download from https://onedrive.live.com/?authkey=%21ABbFJx2JMhNjhNA&id=4A83B6B633B029CC%215542&cid=4A83B6B633B029CC
|
||||
|
||||
|
@@ -9,7 +9,7 @@
|
||||
|
||||
以Linux上CPU推理为例,在本目录执行如下命令即可完成编译测试
|
||||
|
||||
```
|
||||
```bash
|
||||
mkdir build
|
||||
cd build
|
||||
wget https://https://bj.bcebos.com/paddlehub/fastdeploy/cpp/fastdeploy-linux-x64-gpu-0.2.0.tgz
|
||||
@@ -34,11 +34,14 @@ wget https://raw.githubusercontent.com/DefTruth/lite.ai.toolkit/main/examples/li
|
||||
|
||||
<img width="640" src="https://user-images.githubusercontent.com/67993288/184301789-1981d065-208f-4a6b-857c-9a0f9a63e0b1.jpg">
|
||||
|
||||
以上命令只适用于Linux或MacOS, Windows下SDK的使用方式请参考:
|
||||
- [如何在Windows中使用FastDeploy C++ SDK](../../../../../docs/compile/how_to_use_sdk_on_windows.md)
|
||||
|
||||
## SCRFD C++接口
|
||||
|
||||
### SCRFD类
|
||||
|
||||
```
|
||||
```c++
|
||||
fastdeploy::vision::facedet::SCRFD(
|
||||
const string& model_file,
|
||||
const string& params_file = "",
|
||||
@@ -57,7 +60,7 @@ SCRFD模型加载和初始化,其中model_file为导出的ONNX模型格式。
|
||||
|
||||
#### Predict函数
|
||||
|
||||
> ```
|
||||
> ```c++
|
||||
> SCRFD::Predict(cv::Mat* im, FaceDetectionResult* result,
|
||||
> float conf_threshold = 0.25,
|
||||
> float nms_iou_threshold = 0.5)
|
||||
|
@@ -7,7 +7,7 @@
|
||||
|
||||
本目录下提供`infer.py`快速完成SCRFD在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成
|
||||
|
||||
```
|
||||
```bash
|
||||
#下载部署示例代码
|
||||
git clone https://github.com/PaddlePaddle/FastDeploy.git
|
||||
cd examples/vision/facedet/scrfd/python/
|
||||
@@ -30,7 +30,7 @@ python infer.py --model scrfd_500m_bnkps_shape640x640.onnx --image test_lite_fac
|
||||
|
||||
## SCRFD Python接口
|
||||
|
||||
```
|
||||
```python
|
||||
fastdeploy.vision.facedet.SCRFD(model_file, params_file=None, runtime_option=None, model_format=Frontend.ONNX)
|
||||
```
|
||||
|
||||
@@ -45,7 +45,7 @@ SCRFD模型加载和初始化,其中model_file为导出的ONNX模型格式
|
||||
|
||||
### predict函数
|
||||
|
||||
> ```
|
||||
> ```python
|
||||
> SCRFD.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5)
|
||||
> ```
|
||||
>
|
||||
|
@@ -9,7 +9,7 @@
|
||||
|
||||
以Linux上CPU推理为例,在本目录执行如下命令即可完成编译测试
|
||||
|
||||
```
|
||||
```bash
|
||||
mkdir build
|
||||
cd build
|
||||
wget https://https://bj.bcebos.com/paddlehub/fastdeploy/cpp/fastdeploy-linux-x64-gpu-0.2.0.tgz
|
||||
@@ -34,11 +34,14 @@ wget https://raw.githubusercontent.com/DefTruth/lite.ai.toolkit/main/examples/li
|
||||
|
||||
<img width="640" src="https://user-images.githubusercontent.com/67993288/184301821-0788483b-a72b-42b0-a566-b6430f184f6e.jpg">
|
||||
|
||||
以上命令只适用于Linux或MacOS, Windows下SDK的使用方式请参考:
|
||||
- [如何在Windows中使用FastDeploy C++ SDK](../../../../../docs/compile/how_to_use_sdk_on_windows.md)
|
||||
|
||||
## UltraFace C++接口
|
||||
|
||||
### UltraFace类
|
||||
|
||||
```
|
||||
```c++
|
||||
fastdeploy::vision::facedet::UltraFace(
|
||||
const string& model_file,
|
||||
const string& params_file = "",
|
||||
@@ -57,7 +60,7 @@ UltraFace模型加载和初始化,其中model_file为导出的ONNX模型格式
|
||||
|
||||
#### Predict函数
|
||||
|
||||
> ```
|
||||
> ```c++
|
||||
> UltraFace::Predict(cv::Mat* im, FaceDetectionResult* result,
|
||||
> float conf_threshold = 0.25,
|
||||
> float nms_iou_threshold = 0.5)
|
||||
|
@@ -7,7 +7,7 @@
|
||||
|
||||
本目录下提供`infer.py`快速完成UltraFace在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成
|
||||
|
||||
```
|
||||
```bash
|
||||
#下载部署示例代码
|
||||
git clone https://github.com/PaddlePaddle/FastDeploy.git
|
||||
cd examples/vision/facedet/ultraface/python/
|
||||
@@ -30,7 +30,7 @@ python infer.py --model version-RFB-320.onnx --image test_lite_face_detector_3.j
|
||||
|
||||
## UltraFace Python接口
|
||||
|
||||
```
|
||||
```python
|
||||
fastdeploy.vision.facedet.UltraFace(model_file, params_file=None, runtime_option=None, model_format=Frontend.ONNX)
|
||||
```
|
||||
|
||||
@@ -45,7 +45,7 @@ UltraFace模型加载和初始化,其中model_file为导出的ONNX模型格式
|
||||
|
||||
### predict函数
|
||||
|
||||
> ```
|
||||
> ```python
|
||||
> UltraFace.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5)
|
||||
> ```
|
||||
>
|
||||
|
@@ -9,7 +9,7 @@
|
||||
|
||||
以Linux上CPU推理为例,在本目录执行如下命令即可完成编译测试
|
||||
|
||||
```
|
||||
```bash
|
||||
mkdir build
|
||||
cd build
|
||||
wget https://https://bj.bcebos.com/paddlehub/fastdeploy/cpp/fastdeploy-linux-x64-gpu-0.2.0.tgz
|
||||
@@ -34,11 +34,14 @@ wget https://raw.githubusercontent.com/DefTruth/lite.ai.toolkit/main/examples/li
|
||||
|
||||
<img width="640" src="https://user-images.githubusercontent.com/67993288/184301839-a29aefae-16c9-4196-bf9d-9c6cf694f02d.jpg">
|
||||
|
||||
以上命令只适用于Linux或MacOS, Windows下SDK的使用方式请参考:
|
||||
- [如何在Windows中使用FastDeploy C++ SDK](../../../../../docs/compile/how_to_use_sdk_on_windows.md)
|
||||
|
||||
## YOLOv5Face C++接口
|
||||
|
||||
### YOLOv5Face类
|
||||
|
||||
```
|
||||
```c++
|
||||
fastdeploy::vision::facedet::YOLOv5Face(
|
||||
const string& model_file,
|
||||
const string& params_file = "",
|
||||
@@ -57,7 +60,7 @@ YOLOv5Face模型加载和初始化,其中model_file为导出的ONNX模型格
|
||||
|
||||
#### Predict函数
|
||||
|
||||
> ```
|
||||
> ```c++
|
||||
> YOLOv5Face::Predict(cv::Mat* im, FaceDetectionResult* result,
|
||||
> float conf_threshold = 0.25,
|
||||
> float nms_iou_threshold = 0.5)
|
||||
|
@@ -7,7 +7,7 @@
|
||||
|
||||
本目录下提供`infer.py`快速完成YOLOv5Face在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成
|
||||
|
||||
```
|
||||
```bash
|
||||
#下载部署示例代码
|
||||
git clone https://github.com/PaddlePaddle/FastDeploy.git
|
||||
cd examples/vision/facedet/yolov5face/python/
|
||||
@@ -30,7 +30,7 @@ python infer.py --model yolov5s-face.onnx --image test_lite_face_detector_3.jpg
|
||||
|
||||
## YOLOv5Face Python接口
|
||||
|
||||
```
|
||||
```python
|
||||
fastdeploy.vision.facedet.YOLOv5Face(model_file, params_file=None, runtime_option=None, model_format=Frontend.ONNX)
|
||||
```
|
||||
|
||||
@@ -45,7 +45,7 @@ YOLOv5Face模型加载和初始化,其中model_file为导出的ONNX模型格
|
||||
|
||||
### predict函数
|
||||
|
||||
> ```
|
||||
> ```python
|
||||
> YOLOv5Face.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5)
|
||||
> ```
|
||||
>
|
||||
|
@@ -18,7 +18,7 @@
|
||||
访问[ArcFace](https://github.com/deepinsight/insightface/tree/master/recognition/arcface_torch)官方github库,按照指引下载安装,下载pt模型文件,利用 `torch2onnx.py` 得到`onnx`格式文件。
|
||||
|
||||
* 下载ArcFace模型文件
|
||||
```
|
||||
```bash
|
||||
Link: https://pan.baidu.com/share/init?surl=CL-l4zWqsI1oDuEEYVhj-g code: e8pw
|
||||
```
|
||||
|
||||
|
@@ -9,7 +9,7 @@
|
||||
|
||||
以Linux上CPU推理为例,在本目录执行如下命令即可完成编译测试
|
||||
|
||||
```
|
||||
```bash
|
||||
mkdir build
|
||||
cd build
|
||||
wget https://https://bj.bcebos.com/paddlehub/fastdeploy/cpp/fastdeploy-linux-x64-gpu-0.2.0.tgz
|
||||
@@ -40,11 +40,14 @@ wget https://bj.bcebos.com/paddlehub/test_samples/test_lite_focal_arcface_2.JPG
|
||||
<img width="220" float="left" src="https://user-images.githubusercontent.com/67993288/184321622-d9a494c3-72f3-47f1-97c5-8a2372de491f.JPG">
|
||||
</div>
|
||||
|
||||
以上命令只适用于Linux或MacOS, Windows下SDK的使用方式请参考:
|
||||
- [如何在Windows中使用FastDeploy C++ SDK](../../../../../docs/compile/how_to_use_sdk_on_windows.md)
|
||||
|
||||
## InsightFace C++接口
|
||||
|
||||
### ArcFace类
|
||||
|
||||
```
|
||||
```c++
|
||||
fastdeploy::vision::faceid::ArcFace(
|
||||
const string& model_file,
|
||||
const string& params_file = "",
|
||||
@@ -56,7 +59,7 @@ ArcFace模型加载和初始化,其中model_file为导出的ONNX模型格式
|
||||
|
||||
### CosFace类
|
||||
|
||||
```
|
||||
```c++
|
||||
fastdeploy::vision::faceid::CosFace(
|
||||
const string& model_file,
|
||||
const string& params_file = "",
|
||||
@@ -68,7 +71,7 @@ CosFace模型加载和初始化,其中model_file为导出的ONNX模型格式
|
||||
|
||||
### PartialFC类
|
||||
|
||||
```
|
||||
```c++
|
||||
fastdeploy::vision::faceid::PartialFC(
|
||||
const string& model_file,
|
||||
const string& params_file = "",
|
||||
@@ -80,7 +83,7 @@ PartialFC模型加载和初始化,其中model_file为导出的ONNX模型格式
|
||||
|
||||
### VPL类
|
||||
|
||||
```
|
||||
```c++
|
||||
fastdeploy::vision::faceid::VPL(
|
||||
const string& model_file,
|
||||
const string& params_file = "",
|
||||
@@ -98,7 +101,7 @@ VPL模型加载和初始化,其中model_file为导出的ONNX模型格式。
|
||||
|
||||
#### Predict函数
|
||||
|
||||
> ```
|
||||
> ```c++
|
||||
> ArcFace::Predict(cv::Mat* im, FaceRecognitionResult* result,
|
||||
> float conf_threshold = 0.25,
|
||||
> float nms_iou_threshold = 0.5)
|
||||
|
@@ -8,7 +8,7 @@
|
||||
|
||||
以ArcFace为例子, 提供`infer_arcface.py`快速完成ArcFace在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成
|
||||
|
||||
```
|
||||
```bash
|
||||
#下载部署示例代码
|
||||
git clone https://github.com/PaddlePaddle/FastDeploy.git
|
||||
cd examples/vision/faceid/insightface/python/
|
||||
@@ -35,7 +35,7 @@ python infer_arcface.py --model ms1mv3_arcface_r100.onnx --face test_lite_focal_
|
||||
<img width="220" float="left" src="https://user-images.githubusercontent.com/67993288/184321622-d9a494c3-72f3-47f1-97c5-8a2372de491f.JPG">
|
||||
</div>
|
||||
|
||||
```
|
||||
```bash
|
||||
Prediction Done!
|
||||
--- [Face 0]:FaceRecognitionResult: [Dim(512), Min(-2.309220), Max(2.372197), Mean(0.016987)]
|
||||
--- [Face 1]:FaceRecognitionResult: [Dim(512), Min(-2.288258), Max(1.995104), Mean(-0.003400)]
|
||||
@@ -46,7 +46,7 @@ Detect Done! Cosine 01: 0.814385, Cosine 02:-0.059388
|
||||
|
||||
## InsightFace Python接口
|
||||
|
||||
```
|
||||
```python
|
||||
fastdeploy.vision.faceid.ArcFace(model_file, params_file=None, runtime_option=None, model_format=Frontend.ONNX)
|
||||
fastdeploy.vision.faceid.CosFace(model_file, params_file=None, runtime_option=None, model_format=Frontend.ONNX)
|
||||
fastdeploy.vision.faceid.PartialFC(model_file, params_file=None, runtime_option=None, model_format=Frontend.ONNX)
|
||||
@@ -64,7 +64,7 @@ ArcFace模型加载和初始化,其中model_file为导出的ONNX模型格式
|
||||
|
||||
### predict函数
|
||||
|
||||
> ```
|
||||
> ```python
|
||||
> ArcFace.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5)
|
||||
> ```
|
||||
>
|
||||
|
@@ -9,7 +9,7 @@
|
||||
|
||||
以Linux上CPU推理为例,在本目录执行如下命令即可完成编译测试
|
||||
|
||||
```
|
||||
```bash
|
||||
mkdir build
|
||||
cd build
|
||||
wget https://https://bj.bcebos.com/paddlehub/fastdeploy/cpp/fastdeploy-linux-x64-gpu-0.2.0.tgz
|
||||
@@ -36,11 +36,14 @@ wget https://raw.githubusercontent.com/DefTruth/lite.ai.toolkit/main/examples/li
|
||||
<img width="640" src="https://user-images.githubusercontent.com/67993288/184301892-457f7014-2dc0-4ad1-b688-43b41fac299a.jpg">
|
||||
<img width="640" src="https://user-images.githubusercontent.com/67993288/184301871-c234dfdf-3b3d-46e4-8886-e1ac156c9e4a.jpg">
|
||||
|
||||
以上命令只适用于Linux或MacOS, Windows下SDK的使用方式请参考:
|
||||
- [如何在Windows中使用FastDeploy C++ SDK](../../../../../docs/compile/how_to_use_sdk_on_windows.md)
|
||||
|
||||
## MODNet C++接口
|
||||
|
||||
### MODNet类
|
||||
|
||||
```
|
||||
```c++
|
||||
fastdeploy::vision::matting::MODNet(
|
||||
const string& model_file,
|
||||
const string& params_file = "",
|
||||
@@ -59,7 +62,7 @@ MODNet模型加载和初始化,其中model_file为导出的ONNX模型格式。
|
||||
|
||||
#### Predict函数
|
||||
|
||||
> ```
|
||||
> ```c++
|
||||
> MODNet::Predict(cv::Mat* im, MattingResult* result,
|
||||
> float conf_threshold = 0.25,
|
||||
> float nms_iou_threshold = 0.5)
|
||||
|
@@ -7,7 +7,7 @@
|
||||
|
||||
本目录下提供`infer.py`快速完成MODNet在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成
|
||||
|
||||
```
|
||||
```bash
|
||||
#下载部署示例代码
|
||||
git clone https://github.com/PaddlePaddle/FastDeploy.git
|
||||
cd examples/vision/matting/modnet/python/
|
||||
@@ -31,7 +31,7 @@ python infer.py --model modnet_photographic_portrait_matting.onnx --image test_l
|
||||
|
||||
## MODNet Python接口
|
||||
|
||||
```
|
||||
```python
|
||||
fastdeploy.vision.matting.MODNet(model_file, params_file=None, runtime_option=None, model_format=Frontend.ONNX)
|
||||
```
|
||||
|
||||
@@ -46,7 +46,7 @@ MODNet模型加载和初始化,其中model_file为导出的ONNX模型格式
|
||||
|
||||
### predict函数
|
||||
|
||||
> ```
|
||||
> ```python
|
||||
> MODNet.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5)
|
||||
> ```
|
||||
>
|
||||
|
@@ -9,13 +9,13 @@
|
||||
|
||||
以Linux上CPU推理为例,在本目录执行如下命令即可完成编译测试
|
||||
|
||||
```
|
||||
```bash
|
||||
mkdir build
|
||||
cd build
|
||||
wget https://bj.bcebos.com/paddlehub/fastdeploy/libs/0.2.0/fastdeploy-linux-x64-gpu-0.2.0.tgz
|
||||
tar xvf fastdeploy-linux-x64-gpu-0.2.0.tgz
|
||||
cd fastdeploy-linux-x64-gpu-0.2.0/examples/vision/segmentation/paddleseg/cpp/build
|
||||
cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/../../../../../../../fastdeploy-linux-x64-gpu-0.2.0
|
||||
cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/../../../../../../../fastdeploy-linux-x64-gpu-0.2.0
|
||||
make -j
|
||||
|
||||
# 下载Unet模型文件和测试图片
|
||||
@@ -33,15 +33,18 @@ wget https://paddleseg.bj.bcebos.com/dygraph/demo/cityscapes_demo.png
|
||||
```
|
||||
|
||||
运行完成可视化结果如下图所示
|
||||
<div align="center">
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/16222477/184588768-45ee673b-ef1f-40f4-9fbd-6b1a9ce17c59.png", width=512px, height=256px />
|
||||
</div>
|
||||
|
||||
以上命令只适用于Linux或MacOS, Windows下SDK的使用方式请参考:
|
||||
- [如何在Windows中使用FastDeploy C++ SDK](../../../../../docs/compile/how_to_use_sdk_on_windows.md)
|
||||
|
||||
## PaddleSeg C++接口
|
||||
|
||||
### PaddleSeg类
|
||||
|
||||
```
|
||||
```c++
|
||||
fastdeploy::vision::segmentation::PaddleSegModel(
|
||||
const string& model_file,
|
||||
const string& params_file = "",
|
||||
@@ -62,7 +65,7 @@ PaddleSegModel模型加载和初始化,其中model_file为导出的Paddle模
|
||||
|
||||
#### Predict函数
|
||||
|
||||
> ```
|
||||
> ```c++
|
||||
> PaddleSegModel::Predict(cv::Mat* im, DetectionResult* result)
|
||||
> ```
|
||||
>
|
||||
|
@@ -7,7 +7,7 @@
|
||||
|
||||
本目录下提供`infer.py`快速完成Unet在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成
|
||||
|
||||
```
|
||||
```bash
|
||||
#下载部署示例代码
|
||||
git clone https://github.com/PaddlePaddle/FastDeploy.git
|
||||
cd FastDeploy/examples/vision/segmentation/paddleseg/python
|
||||
@@ -26,13 +26,13 @@ python infer.py --model Unet_cityscapes_without_argmax_infer --image cityscapes_
|
||||
```
|
||||
|
||||
运行完成可视化结果如下图所示
|
||||
<div align="center">
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/16222477/184588768-45ee673b-ef1f-40f4-9fbd-6b1a9ce17c59.png", width=512px, height=256px />
|
||||
</div>
|
||||
|
||||
## PaddleSegModel Python接口
|
||||
|
||||
```
|
||||
```python
|
||||
fd.vision.segmentation.PaddleSegModel(model_file, params_file, config_file, runtime_option=None, model_format=Frontend.PADDLE)
|
||||
```
|
||||
|
||||
@@ -48,7 +48,7 @@ PaddleSeg模型加载和初始化,其中model_file, params_file以及config_fi
|
||||
|
||||
### predict函数
|
||||
|
||||
> ```
|
||||
> ```python
|
||||
> PaddleSegModel.predict(input_image)
|
||||
> ```
|
||||
>
|
||||
|
@@ -1,3 +1,4 @@
|
||||
wheel
|
||||
requests
|
||||
tqdm
|
||||
numpy
|
||||
|
Reference in New Issue
Block a user