mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 08:37:06 +08:00
modify2
This commit is contained in:
10
README_CN.md
10
README_CN.md
@@ -1,4 +1,4 @@
|
||||
[English](README.md) | 简体中文
|
||||
[English](README_EN.md) | 简体中文
|
||||
|
||||

|
||||
|
||||
@@ -18,13 +18,13 @@
|
||||
|
||||
**⚡️FastDeploy**是一款**易用高效**的推理部署开发套件。覆盖业界🔥**热门CV、NLP、Speech的AI模型**并提供📦**开箱即用**的部署体验,包括图像分类、目标检测、图像分割、人脸检测、人脸识别、人体关键点识别、文字识别、语义理解等多任务,满足开发者**多场景**,**多硬件**、**多平台**的产业部署需求。
|
||||
|
||||
| [Object Detection](examples/vision) | [3D Object Detection](https://github.com/PaddlePaddle/FastDeploy/issues/6) | [Semantic Segmentation](examples/vision/segmentation/paddleseg) | [Potrait Segmentation](examples/vision/segmentation/paddleseg) |
|
||||
| [Object Detection](examples/vision/detection) | [3D Object Detection](https://github.com/PaddlePaddle/FastDeploy/issues/6) | [Semantic Segmentation](examples/vision/segmentation/paddleseg) | [Potrait Segmentation](examples/vision/segmentation/paddleseg) |
|
||||
|:---------------------------------------------------------------------------------------------------------:|:---------------------------------------------------------------------------------------------------------:|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|
|
||||
| <img src='https://user-images.githubusercontent.com/54695910/188054680-2f8d1952-c120-4b67-88fc-7d2d7d2378b4.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/54695910/188270227-1a4671b3-0123-46ab-8d0f-0e4132ae8ec0.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/54695910/188054711-6119f0e7-d741-43b1-b273-9493d103d49f.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/54695910/188054718-6395321c-8937-4fa0-881c-5b20deb92aaa.gif' height="126px" width="190px"> |
|
||||
| [**Image Matting**](examples/vision/matting) | [**Real-Time Matting**](examples/vision/matting) | [**OCR**](examples/vision/ocr) |[**Face Alignment**](examples/vision/ocr)
|
||||
| [**Image Matting**](examples/vision/matting) | [**Real-Time Matting**](examples/vision/matting) | [**OCR**](examples/vision/ocr) |[**Face Alignment**](examples/vision/facealign)
|
||||
| <img src='https://user-images.githubusercontent.com/54695910/188058231-a5fe1ce1-0a38-460f-9582-e0b881514908.gif' height="126px" width="190px"> |<img src='https://user-images.githubusercontent.com/54695910/188054691-e4cb1a70-09fe-4691-bc62-5552d50bd853.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/54695910/188054669-a85996ba-f7f3-4646-ae1f-3b7e3e353e7d.gif' height="126px" width="190px" > |<img src='https://user-images.githubusercontent.com/54695910/188059460-9845e717-c30a-4252-bd80-b7f6d4cf30cb.png' height="126px" width="190px"> |
|
||||
| [**Pose Estimation**](examples/vision/keypointdetection) | [**Behavior Recognition**](https://github.com/PaddlePaddle/FastDeploy/issues/6) | [**NLP**](examples/text) |[**Speech**](examples/audio/pp-tts)
|
||||
| <img src='https://user-images.githubusercontent.com/54695910/188054671-394db8dd-537c-42b1-9d90-468d7ad1530e.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/48054808/173034825-623e4f78-22a5-4f14-9b83-dc47aa868478.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/54695910/200162475-f5d85d70-18fb-4930-8e7e-9ca065c1d618.gif' height="126px" width="190px"> | <p align="left">**input** :早上好,今天是2020<br>/10/29,最低温度是-3°C。<br><br> <p align="left">**output**: [<img src="https://user-images.githubusercontent.com/54695910/200161645-871e08da-5a31-4736-879c-a88bb171a676.png" width="170" style="max-width: 100%;">](https://paddlespeech.bj.bcebos.com/Parakeet/docs/demos/parakeet_espnet_fs2_pwg_demo/tn_g2p/parakeet/001.wav)</p>|
|
||||
| <img src='https://user-images.githubusercontent.com/54695910/188054671-394db8dd-537c-42b1-9d90-468d7ad1530e.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/48054808/173034825-623e4f78-22a5-4f14-9b83-dc47aa868478.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/54695910/200162475-f5d85d70-18fb-4930-8e7e-9ca065c1d618.gif' height="126px" width="190px"> | <p align="left">**input** :早上好今天是2020<br>/10/29,最低温度是-3°C。<br><br> <p align="left">**output**: [<img src="https://user-images.githubusercontent.com/54695910/200161645-871e08da-5a31-4736-879c-a88bb171a676.png" width="170" style="max-width: 100%;">](https://paddlespeech.bj.bcebos.com/Parakeet/docs/demos/parakeet_espnet_fs2_pwg_demo/tn_g2p/parakeet/001.wav)</p>|
|
||||
|
||||
|
||||
## 近期更新
|
||||
@@ -40,7 +40,7 @@
|
||||
- **🖥️ 服务端部署:支持推理速度更快的后端,支持更多的模型**
|
||||
- 集成 Paddle Inference TensorRT后端,并保证其使用与Paddle Inference、TensorRT、OpenVINO、ONNX Runtime、Paddle Lite等一致的开发体验;
|
||||
- 支持并测试 Graphcore IPU 通过 Paddle Inference后端;
|
||||
- 优化[一键模型量化工具](tools/quantization),支持YOLOv7、YOLOv6、YOLOv5等视觉模型,在CPU和GPU推理速度可提升1.5~2倍;
|
||||
- 优化[一键模型自动化压缩工具](./tools/auto_compression),支持YOLOv7、YOLOv6、YOLOv5等视觉模型,在CPU和GPU推理速度可提升1.5~2倍;
|
||||
- 新增 [PP-Tracking](./examples/vision/tracking/pptracking) 和 [RobustVideoMatting](./examples/vision/matting) 等模型;
|
||||
|
||||
- 🔥 **2022.10.24:Release FastDeploy [release v0.4.0](https://github.com/PaddlePaddle/FastDeploy/tree/release/0.4.0)**
|
||||
|
@@ -20,13 +20,13 @@ English | [简体中文](README_CN.md)
|
||||
|
||||
**⚡️FastDeploy** is an **accessible and efficient** deployment Development Toolkit. It covers 🔥**critical CV、NLP、Speech AI models** in the industry and provides 📦**out-of-the-box** deployment experience. It covers image classification, object detection, image segmentation, face detection, face recognition, human keypoint detection, OCR, semantic understanding and other tasks to meet developers' industrial deployment needs for **multi-scenario**, **multi-hardware** and **multi-platform** .
|
||||
|
||||
| [Object Detection](examples/vision) | [3D Object Detection](https://github.com/PaddlePaddle/FastDeploy/issues/6) | [Semantic Segmentation](examples/vision/segmentation/paddleseg) | [Potrait Segmentation](examples/vision/segmentation/paddleseg) |
|
||||
| [Object Detection](examples/vision/detection) | [3D Object Detection](https://github.com/PaddlePaddle/FastDeploy/issues/6) | [Semantic Segmentation](examples/vision/segmentation/paddleseg) | [Potrait Segmentation](examples/vision/segmentation/paddleseg) |
|
||||
|:---------------------------------------------------------------------------------------------------------:|:---------------------------------------------------------------------------------------------------------:|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|
|
||||
| <img src='https://user-images.githubusercontent.com/54695910/188054680-2f8d1952-c120-4b67-88fc-7d2d7d2378b4.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/54695910/188270227-1a4671b3-0123-46ab-8d0f-0e4132ae8ec0.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/54695910/188054711-6119f0e7-d741-43b1-b273-9493d103d49f.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/54695910/188054718-6395321c-8937-4fa0-881c-5b20deb92aaa.gif' height="126px" width="190px"> |
|
||||
| [**Image Matting**](examples/vision/matting) | [**Real-Time Matting**](examples/vision/matting) | [**OCR**](examples/vision/ocr) |[**Face Alignment**](examples/vision/ocr)
|
||||
| [**Image Matting**](examples/vision/matting) | [**Real-Time Matting**](examples/vision/matting) | [**OCR**](examples/vision/ocr) |[**Face Alignment**](examples/vision/facealign)
|
||||
| <img src='https://user-images.githubusercontent.com/54695910/188058231-a5fe1ce1-0a38-460f-9582-e0b881514908.gif' height="126px" width="190px"> |<img src='https://user-images.githubusercontent.com/54695910/188054691-e4cb1a70-09fe-4691-bc62-5552d50bd853.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/54695910/188054669-a85996ba-f7f3-4646-ae1f-3b7e3e353e7d.gif' height="126px" width="190px" > |<img src='https://user-images.githubusercontent.com/54695910/188059460-9845e717-c30a-4252-bd80-b7f6d4cf30cb.png' height="126px" width="190px"> |
|
||||
| [**Pose Estimation**](examples/vision/keypointdetection) | [**Behavior Recognition**](https://github.com/PaddlePaddle/FastDeploy/issues/6) | [**NLP**](examples/text) |[**Speech**](examples/audio/pp-tts)
|
||||
| <img src='https://user-images.githubusercontent.com/54695910/188054671-394db8dd-537c-42b1-9d90-468d7ad1530e.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/48054808/173034825-623e4f78-22a5-4f14-9b83-dc47aa868478.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/54695910/200162475-f5d85d70-18fb-4930-8e7e-9ca065c1d618.gif' height="126px" width="190px"> | <p align="left">**input** :Life was like a box of chocolates, you never know what you're gonna get.<br> <p align="left">**output**: [<img src="https://user-images.githubusercontent.com/54695910/200161645-871e08da-5a31-4736-879c-a88bb171a676.png" width="170" style="max-width: 100%;">](https://paddlespeech.bj.bcebos.com/Parakeet/docs/demos/tacotron2_ljspeech_waveflow_samples_0.2/sentence_1.wav)</p>|
|
||||
| <img src='https://user-images.githubusercontent.com/54695910/188054671-394db8dd-537c-42b1-9d90-468d7ad1530e.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/48054808/173034825-623e4f78-22a5-4f14-9b83-dc47aa868478.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/54695910/200162475-f5d85d70-18fb-4930-8e7e-9ca065c1d618.gif' height="126px" width="190px"> | <p align="left">**input** :早上好今天是2020<br>/10/29,最低温度是-3°C。<br><br> <p align="left">**output**: [<img src="https://user-images.githubusercontent.com/54695910/200161645-871e08da-5a31-4736-879c-a88bb171a676.png" width="170" style="max-width: 100%;">](https://paddlespeech.bj.bcebos.com/Parakeet/docs/demos/parakeet_espnet_fs2_pwg_demo/tn_g2p/parakeet/001.wav)</p>|
|
||||
|
||||
|
||||
## 📣 Recent Updates
|
||||
|
@@ -2,7 +2,7 @@
|
||||
|
||||
This directory help to generate Python API documents for FastDeploy.
|
||||
|
||||
1. First, to generate the latest api documents, you need to install the latest FastDeploy, refer [build and install](en/build_and_install) to build FastDeploy python wheel package with the latest code.
|
||||
1. First, to generate the latest api documents, you need to install the latest FastDeploy, refer [build and install](../../cn/build_and_install) to build FastDeploy python wheel package with the latest code.
|
||||
2. After installed FastDeploy in your python environment, there are some dependencies need to install, execute command `pip install -r requirements.txt` in this directory
|
||||
3. Execute command `make html` to generate API documents
|
||||
|
||||
|
@@ -102,4 +102,4 @@ make install
|
||||
如何使用FastDeploy Android C++ SDK 请参考使用案例文档:
|
||||
- [图像分类Android使用文档](../../../examples/vision/classification/paddleclas/android/README.md)
|
||||
- [目标检测Android使用文档](../../../examples/vision/detection/paddledetection/android/README.md)
|
||||
- [在 Android 通过 JNI 中使用 FastDeploy C++ SDK](../../../../../docs/cn/faq/use_cpp_sdk_on_android.md)
|
||||
- [在 Android 通过 JNI 中使用 FastDeploy C++ SDK](../../cn/faq/use_cpp_sdk_on_android.md)
|
||||
|
@@ -218,7 +218,7 @@ D:\qiuyanjun\fastdeploy_test\infer_ppyoloe\x64\Release\infer_ppyoloe.exe
|
||||

|
||||
|
||||
(2)其中infer_ppyoloe.cpp的代码可以直接从examples中的代码拷贝过来:
|
||||
- [examples/vision/detection/paddledetection/cpp/infer_ppyoloe.cc](../../examples/vision/detection/paddledetection/cpp/infer_ppyoloe.cc)
|
||||
- [examples/vision/detection/paddledetection/cpp/infer_ppyoloe.cc](../../../examples/vision/detection/paddledetection/cpp/infer_ppyoloe.cc)
|
||||
|
||||
(3)CMakeLists.txt主要包括配置FastDeploy C++ SDK的路径,如果是GPU版本的SDK,还需要配置CUDA_DIRECTORY为CUDA的安装路径,CMakeLists.txt的配置如下:
|
||||
|
||||
|
@@ -179,7 +179,7 @@ D:\qiuyanjun\fastdeploy_build\built\fastdeploy-win-x64-gpu-0.2.1\third_libs\inst
|
||||
|
||||

|
||||
|
||||
Compile successfully, you can see the exe saved in:
|
||||
Compile successfully, you can see the exe saved in:
|
||||
|
||||
```bat
|
||||
D:\qiuyanjun\fastdeploy_test\infer_ppyoloe\x64\Release\infer_ppyoloe.exe
|
||||
@@ -221,7 +221,7 @@ This section is for CMake users and describes how to create CMake projects in Vi
|
||||

|
||||
|
||||
(2)The code of infer_ppyoloe.cpp can be copied directly from the code in examples:
|
||||
- [examples/vision/detection/paddledetection/cpp/infer_ppyoloe.cc](../../examples/vision/detection/paddledetection/cpp/infer_ppyoloe.cc)
|
||||
- [examples/vision/detection/paddledetection/cpp/infer_ppyoloe.cc](../../../examples/vision/detection/paddledetection/cpp/infer_ppyoloe.cc)
|
||||
|
||||
(3)CMakeLists.txt mainly includes the configuration of the path of FastDeploy C++ SDK, if it is the GPU version of the SDK, you also need to configure CUDA_DIRECTORY as the installation path of CUDA, the configuration of CMakeLists.txt is as follows:
|
||||
|
||||
@@ -361,7 +361,7 @@ A brief description of the usage is as follows.
|
||||
#### 4.1.2 fastdeploy_init.bat View all dll, lib and include paths in the SDK
|
||||
<div id="CommandLineDeps12"></div>
|
||||
|
||||
Go to the root directory of the SDK and run the show command to view all the dll, lib and include paths in the SDK. In the following command, %cd% means the current directory (the root directory of the SDK).
|
||||
Go to the root directory of the SDK and run the show command to view all the dll, lib and include paths in the SDK. In the following command, %cd% means the current directory (the root directory of the SDK).
|
||||
|
||||
```bat
|
||||
D:\path-to-fastdeploy-sdk-dir>fastdeploy_init.bat show %cd%
|
||||
@@ -504,7 +504,7 @@ copy /Y %FASTDEPLOY_HOME%\third_libs\install\yaml-cpp\lib\*.dll Release\
|
||||
copy /Y %FASTDEPLOY_HOME%\third_libs\install\openvino\bin\*.dll Release\
|
||||
copy /Y %FASTDEPLOY_HOME%\third_libs\install\openvino\bin\*.xml Release\
|
||||
copy /Y %FASTDEPLOY_HOME%\third_libs\install\openvino\3rdparty\tbb\bin\*.dll Release\
|
||||
```
|
||||
```
|
||||
Note that if you compile the latest SDK or version >0.2.1 by yourself, the opencv and openvino directory structure has changed and the path needs to be modified appropriately. For example:
|
||||
```bat
|
||||
copy /Y %FASTDEPLOY_HOME%\third_libs\install\opencv\build\x64\vc15\bin\*.dll Release\
|
||||
|
@@ -27,7 +27,7 @@ FastDeploy基于PaddleSlim, 集成了一键模型量化的工具, 同时, FastDe
|
||||
|
||||
### 用户使用FastDeploy一键模型量化工具来量化模型
|
||||
Fastdeploy基于PaddleSlim, 为用户提供了一键模型量化的工具,请参考如下文档进行模型量化.
|
||||
- [FastDeploy 一键模型量化](../../tools/quantization/)
|
||||
- [FastDeploy 一键模型量化](../../tools/auto_compression/)
|
||||
当用户获得产出的量化模型之后,即可以使用FastDeploy来部署量化模型.
|
||||
|
||||
|
||||
|
@@ -1,16 +1,16 @@
|
||||
# FastDeploy Runtime推理示例
|
||||
# FastDeploy Runtime examples
|
||||
|
||||
| 示例代码 | 编程语言 | 说明 |
|
||||
| Example Code | Program Language | Description |
|
||||
| :------- | :------- | :---- |
|
||||
| python/infer_paddle_paddle_inference.py | Python | paddle模型通过paddle inference在cpu/gpu上的推理 |
|
||||
| python/infer_paddle_tensorrt.py | Python | paddle模型通过tensorrt在gpu上的推理 |
|
||||
| python/infer_paddle_openvino.py | Python | paddle模型通过openvino在cpu上的推理 |
|
||||
| python/infer_paddle_onnxruntime.py | Python | paddle模型通过onnx runtime在cpu/gpu上的推理 |
|
||||
| python/infer_onnx_openvino.py | Python | onnx模型通过openvino在cpu上的推理 |
|
||||
| python/infer_onnx_tensorrt.py | Python | onnx模型通过tensorrt在gpu上的推理 |
|
||||
| cpp/infer_paddle_paddle_inference.cc | C++ | paddle模型通过paddle inference在cpu/gpu上的推理 |
|
||||
| cpp/infer_paddle_tensorrt.cc | C++ | paddle模型通过tensorrt在gpu上的推理 |
|
||||
| cpp/infer_paddle_openvino.cc | C++ | paddle模型通过openvino在cpu上的推理 |
|
||||
| cpp/infer_paddle_onnxruntime.cc | C++ | paddle模型通过onnx runtime在cpu/gpu上的推理 |
|
||||
| cpp/infer_onnx_openvino.cc | C++ | onnx模型通过openvino在cpu上的推理 |
|
||||
| cpp/infer_onnx_tensorrt.cc | C++ | onnx模型通过tensorrt在gpu上的推理 |
|
||||
| python/infer_paddle_paddle_inference.py | Python | Deploy Paddle model with Paddle Inference(CPU/GPU) |
|
||||
| python/infer_paddle_tensorrt.py | Python | Deploy Paddle model with TensorRT(GPU) |
|
||||
| python/infer_paddle_openvino.py | Python | Deploy Paddle model with OpenVINO(CPU) |
|
||||
| python/infer_paddle_onnxruntime.py | Python | Deploy Paddle model with ONNX Runtime(CPU/GPU) |
|
||||
| python/infer_onnx_openvino.py | Python | Deploy ONNX model with OpenVINO(CPU) |
|
||||
| python/infer_onnx_tensorrt.py | Python | Deploy ONNX model with TensorRT(GPU) |
|
||||
| cpp/infer_paddle_paddle_inference.cc | C++ | Deploy Paddle model with Paddle Inference(CPU/GPU) |
|
||||
| cpp/infer_paddle_tensorrt.cc | C++ | Deploy Paddle model with TensorRT(GPU) |
|
||||
| cpp/infer_paddle_openvino.cc | C++ | Deploy Paddle model with OpenVINO(CPU |
|
||||
| cpp/infer_paddle_onnxruntime.cc | C++ | Deploy Paddle model with ONNX Runtime(CPU/GPU) |
|
||||
| cpp/infer_onnx_openvino.cc | C++ | Deploy ONNX model with OpenVINO(CPU) |
|
||||
| cpp/infer_onnx_tensorrt.cc | C++ | Deploy ONNX model with TensorRT(GPU) |
|
||||
|
@@ -168,4 +168,4 @@ entity: 华夏 label: LOC pos: [14, 15]
|
||||
|
||||
## 配置修改
|
||||
|
||||
当前分类任务(ernie_seqcls_model/config.pbtxt)默认配置在CPU上运行OpenVINO引擎; 序列标注任务默认配置在GPU上运行Paddle引擎。如果要在CPU/GPU或其他推理引擎上运行, 需要修改配置,详情请参考[配置文档](../../../../../serving/docs/zh_CN/model_configuration.md)
|
||||
当前分类任务(ernie_seqcls_model/config.pbtxt)默认配置在CPU上运行OpenVINO引擎; 序列标注任务默认配置在GPU上运行Paddle引擎。如果要在CPU/GPU或其他推理引擎上运行, 需要修改配置,详情请参考[配置文档](../../../../serving/docs/zh_CN/model_configuration.md)
|
||||
|
@@ -30,4 +30,4 @@ FastDeploy针对飞桨的视觉套件,以及外部热门模型,提供端到
|
||||
- 加载模型
|
||||
- 调用`predict`接口
|
||||
|
||||
FastDeploy在各视觉模型部署时,也支持一键切换后端推理引擎,详情参阅[如何切换模型推理引擎](../../docs/runtime/how_to_change_backend.md)。
|
||||
FastDeploy在各视觉模型部署时,也支持一键切换后端推理引擎,详情参阅[如何切换模型推理引擎](../../docs/cn/faq/how_to_change_backend.md)。
|
||||
|
@@ -8,7 +8,7 @@
|
||||
|
||||
### 量化模型准备
|
||||
- 1. 用户可以直接使用由FastDeploy提供的量化模型进行部署.
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../../../../tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.(注意: 推理量化后的分类模型仍然需要FP32模型文件夹下的inference_cls.yaml文件, 自行量化的模型文件夹内不包含此yaml文件, 用户从FP32模型文件夹下复制此yaml文件到量化后的模型文件夹内即可.)
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../../../../../tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.(注意: 推理量化后的分类模型仍然需要FP32模型文件夹下的inference_cls.yaml文件, 自行量化的模型文件夹内不包含此yaml文件, 用户从FP32模型文件夹下复制此yaml文件到量化后的模型文件夹内即可.)
|
||||
|
||||
## 以量化后的ResNet50_Vd模型为例, 进行部署
|
||||
在本目录执行如下命令即可完成编译,以及量化模型部署.
|
||||
|
@@ -8,7 +8,7 @@
|
||||
|
||||
### 量化模型准备
|
||||
- 1. 用户可以直接使用由FastDeploy提供的量化模型进行部署.
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.(注意: 推理量化后的分类模型仍然需要FP32模型文件夹下的inference_cls.yaml文件, 自行量化的模型文件夹内不包含此yaml文件, 用户从FP32模型文件夹下复制此yaml文件到量化后的模型文件夹内即可.)
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../../../../../tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.(注意: 推理量化后的分类模型仍然需要FP32模型文件夹下的inference_cls.yaml文件, 自行量化的模型文件夹内不包含此yaml文件, 用户从FP32模型文件夹下复制此yaml文件到量化后的模型文件夹内即可.)
|
||||
|
||||
|
||||
## 以量化后的ResNet50_Vd模型为例, 进行部署
|
||||
|
@@ -6,7 +6,7 @@
|
||||
|
||||
## 前端部署图像分类模型
|
||||
|
||||
图像分类模型web demo使用[**参考文档**](../../../../examples/application/js/web_demo)
|
||||
图像分类模型web demo使用[**参考文档**](../../../../application/js/web_demo/)
|
||||
|
||||
|
||||
## MobileNet js接口
|
||||
@@ -34,4 +34,3 @@ console.log(res);
|
||||
|
||||
- [PaddleClas模型 python部署](../../paddleclas/python/)
|
||||
- [PaddleClas模型 C++部署](../cpp/)
|
||||
|
||||
|
@@ -4,8 +4,8 @@
|
||||
|
||||
在部署前,需确认以下两个步骤
|
||||
|
||||
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/environment.md)
|
||||
- 2. 根据开发环境,下载预编译部署库和samples代码,参考[FastDeploy预编译库](../../../../../docs/quick_start)
|
||||
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
|
||||
- 2. 根据开发环境,下载预编译部署库和samples代码,参考[FastDeploy预编译库](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
|
||||
|
||||
以Linux上 ResNet50 推理为例,在本目录执行如下命令即可完成编译测试
|
||||
|
||||
@@ -33,7 +33,7 @@ wget https://gitee.com/paddlepaddle/PaddleClas/raw/release/2.4/deploy/images/Ima
|
||||
```
|
||||
|
||||
以上命令只适用于Linux或MacOS, Windows下SDK的使用方式请参考:
|
||||
- [如何在Windows中使用FastDeploy C++ SDK](../../../../../docs/compile/how_to_use_sdk_on_windows.md)
|
||||
- [如何在Windows中使用FastDeploy C++ SDK](../../../../../docs/cn/faq/use_sdk_on_windows.md)
|
||||
|
||||
## ResNet C++接口
|
||||
|
||||
@@ -74,4 +74,4 @@ fastdeploy::vision::classification::ResNet(
|
||||
- [模型介绍](../../)
|
||||
- [Python部署](../python)
|
||||
- [视觉模型预测结果](../../../../../docs/api/vision_results/)
|
||||
- [如何切换模型推理后端引擎](../../../../../docs/runtime/how_to_change_backend.md)
|
||||
- [如何切换模型推理后端引擎](../../../../../docs/cn/faq/how_to_change_backend.md)
|
||||
|
@@ -2,8 +2,8 @@
|
||||
|
||||
在部署前,需确认以下两个步骤
|
||||
|
||||
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/environment.md)
|
||||
- 2. FastDeploy Python whl包安装,参考[FastDeploy Python安装](../../../../../docs/quick_start)
|
||||
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
|
||||
- 2. FastDeploy Python whl包安装,参考[FastDeploy Python安装](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
|
||||
|
||||
本目录下提供`infer.py`快速完成ResNet50_vd在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成
|
||||
|
||||
@@ -69,4 +69,4 @@ fd.vision.classification.ResNet(model_file, params_file, runtime_option=None, mo
|
||||
- [ResNet 模型介绍](..)
|
||||
- [ResNet C++部署](../cpp)
|
||||
- [模型预测结果说明](../../../../../docs/api/vision_results/)
|
||||
- [如何切换模型推理后端引擎](../../../../../docs/runtime/how_to_change_backend.md)
|
||||
- [如何切换模型推理后端引擎](../../../../../docs/cn/faq/how_to_change_backend.md)
|
||||
|
@@ -9,7 +9,7 @@
|
||||
|
||||
### 量化模型准备
|
||||
- 1. 用户可以直接使用由FastDeploy提供的量化模型进行部署.
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.(注意: 推理量化后的分类模型仍然需要FP32模型文件夹下的infer_cfg.yml文件, 自行量化的模型文件夹内不包含此yaml文件, 用户从FP32模型文件夹下复制此yaml文件到量化后的模型文件夹内即可.)
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../../../../../tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.(注意: 推理量化后的分类模型仍然需要FP32模型文件夹下的infer_cfg.yml文件, 自行量化的模型文件夹内不包含此yaml文件, 用户从FP32模型文件夹下复制此yaml文件到量化后的模型文件夹内即可.)
|
||||
|
||||
## 以量化后的PP-YOLOE-l模型为例, 进行部署
|
||||
在本目录执行如下命令即可完成编译,以及量化模型部署.
|
||||
|
@@ -8,7 +8,7 @@
|
||||
|
||||
### 量化模型准备
|
||||
- 1. 用户可以直接使用由FastDeploy提供的量化模型进行部署.
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.(注意: 推理量化后的分类模型仍然需要FP32模型文件夹下的infer_cfg.yml文件, 自行量化的模型文件夹内不包含此yaml文件, 用户从FP32模型文件夹下复制此yaml文件到量化后的模型文件夹内即可.)
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../../../../../tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.(注意: 推理量化后的分类模型仍然需要FP32模型文件夹下的infer_cfg.yml文件, 自行量化的模型文件夹内不包含此yaml文件, 用户从FP32模型文件夹下复制此yaml文件到量化后的模型文件夹内即可.)
|
||||
|
||||
|
||||
## 以量化后的PP-YOLOE-l模型为例, 进行部署
|
||||
|
@@ -9,7 +9,7 @@
|
||||
|
||||
### 量化模型准备
|
||||
- 1. 用户可以直接使用由FastDeploy提供的量化模型进行部署.
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../../../../../tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.
|
||||
|
||||
## 以量化后的YOLOv5s模型为例, 进行部署
|
||||
在本目录执行如下命令即可完成编译,以及量化模型部署.
|
||||
|
@@ -8,7 +8,7 @@
|
||||
|
||||
### 量化模型准备
|
||||
- 1. 用户可以直接使用由FastDeploy提供的量化模型进行部署.
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../../../../../tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.
|
||||
|
||||
|
||||
## 以量化后的YOLOv5s模型为例, 进行部署
|
||||
|
@@ -9,7 +9,7 @@
|
||||
|
||||
### 量化模型准备
|
||||
- 1. 用户可以直接使用由FastDeploy提供的量化模型进行部署.
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../../../../../tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.
|
||||
|
||||
## 以量化后的YOLOv6s模型为例, 进行部署
|
||||
在本目录执行如下命令即可完成编译,以及量化模型部署.
|
||||
|
@@ -8,7 +8,7 @@
|
||||
|
||||
### 量化模型准备
|
||||
- 1. 用户可以直接使用由FastDeploy提供的量化模型进行部署.
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../../../../../tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.
|
||||
|
||||
## 以量化后的YOLOv6s模型为例, 进行部署
|
||||
```bash
|
||||
|
@@ -4,8 +4,8 @@ English | [简体中文](README.md)
|
||||
|
||||
Two steps before deployment:
|
||||
|
||||
- 1. The hardware and software environment meets the requirements. Please refer to [FastDeploy Environment Requirements](../../../../../docs/docs_en/environment.md)
|
||||
- 2. Install FastDeploy Python whl package. Please refer to [FastDeploy Python Installation](../../../../../docs/docs_en/quick_start)
|
||||
- 1. The hardware and software environment meets the requirements. Please refer to [FastDeploy Environment Requirements](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
|
||||
- 2. Install FastDeploy Python whl package. Please refer to [FastDeploy Python Installation](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
|
||||
|
||||
|
||||
This doc provides a quick `infer.py` demo of YOLOv7 deployment on CPU/GPU, and accelerated GPU deployment by TensorRT. Run the following command:
|
||||
@@ -21,7 +21,7 @@ wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/0000000
|
||||
|
||||
# CPU Inference
|
||||
python infer.py --model yolov7.onnx --image 000000014439.jpg --device cpu
|
||||
# GPU
|
||||
# GPU
|
||||
python infer.py --model yolov7.onnx --image 000000014439.jpg --device gpu
|
||||
# GPU上使用TensorRT推理
|
||||
python infer.py --model yolov7.onnx --image 000000014439.jpg --device gpu --use_trt True
|
||||
@@ -51,18 +51,18 @@ YOLOv7 model loading and initialisation, with model_file being the exported ONNX
|
||||
> ```python
|
||||
> YOLOv7.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5)
|
||||
> ```
|
||||
>
|
||||
>
|
||||
> Model prediction interface with direct output of detection results from the image input.
|
||||
>
|
||||
>
|
||||
> **Parameters**
|
||||
>
|
||||
>
|
||||
> > * **image_data**(np.ndarray): Input image. Images need to be in HWC or BGR format
|
||||
> > * **conf_threshold**(float): Filter threshold for detection box confidence
|
||||
> > * **nms_iou_threshold**(float): iou thresholds during NMS processing
|
||||
|
||||
> **Return**
|
||||
>
|
||||
> > Return to`fastdeploy.vision.DetectionResult`Struct. For more details, please refer to [Vision Model Results](../../../../../docs/docs_en/api/vision_results/)
|
||||
>
|
||||
> > Return to`fastdeploy.vision.DetectionResult`Struct. For more details, please refer to [Vision Model Results](../../../../../docs/api/vision_results/)
|
||||
|
||||
### Class Member Variables
|
||||
|
||||
@@ -80,5 +80,5 @@ Users can modify the following pre-processing parameters for their needs. This w
|
||||
|
||||
- [YOLOv7 Model Introduction](..)
|
||||
- [YOLOv7 C++ Deployment](../cpp)
|
||||
- [Vision Model Results](../../../../../docs/docs_en/api/vision_results/)
|
||||
- [how to change inference backend](../../../../../docs/docs_en/runtime/how_to_change_inference_backend.md)
|
||||
- [Vision Model Results](../../../../../docs/api/vision_results/)
|
||||
- [how to change inference backend](../../../../../docs/en/faq/how_to_change_backend.md)
|
||||
|
@@ -9,7 +9,7 @@
|
||||
|
||||
### 量化模型准备
|
||||
- 1. 用户可以直接使用由FastDeploy提供的量化模型进行部署.
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../../../../../tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.
|
||||
|
||||
## 以量化后的YOLOv7模型为例, 进行部署
|
||||
在本目录执行如下命令即可完成编译,以及量化模型部署.
|
||||
|
@@ -8,7 +8,7 @@
|
||||
|
||||
### 量化模型准备
|
||||
- 1. 用户可以直接使用由FastDeploy提供的量化模型进行部署.
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../../../../../tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.
|
||||
|
||||
## 以量化后的YOLOv7模型为例, 进行部署
|
||||
```bash
|
||||
|
@@ -71,4 +71,4 @@ PPTinyPosePipeline模型加载和初始化,其中det_model是使用`fd.vision.
|
||||
- [Pipeline 模型介绍](..)
|
||||
- [Pipeline C++部署](../cpp)
|
||||
- [模型预测结果说明](../../../../../docs/api/vision_results/)
|
||||
- [如何切换模型推理后端引擎](../../../../../docs/runtime/how_to_change_backend.md)
|
||||
- [如何切换模型推理后端引擎](../../../../../docs/cn/faq/how_to_change_backend.md)
|
||||
|
@@ -76,4 +76,4 @@ PP-TinyPose模型加载和初始化,其中model_file, params_file以及config_
|
||||
- [PP-TinyPose 模型介绍](..)
|
||||
- [PP-TinyPose C++部署](../cpp)
|
||||
- [模型预测结果说明](../../../../../docs/api/vision_results/)
|
||||
- [如何切换模型推理后端引擎](../../../../../docs/runtime/how_to_change_backend.md)
|
||||
- [如何切换模型推理后端引擎](../../../../../docs/cn/faq/how_to_change_backend.md)
|
||||
|
@@ -7,7 +7,7 @@
|
||||
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
|
||||
- 2. 根据开发环境,下载预编译部署库和samples代码,参考[FastDeploy预编译库](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
|
||||
|
||||
以Linux上 PP-Matting 推理为例,在本目录执行如下命令即可完成编译测试(如若只需在CPU上部署,可在[Fastdeploy C++预编译库](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md/CPP_prebuilt_libraries.md)下载CPU推理库)
|
||||
以Linux上 PP-Matting 推理为例,在本目录执行如下命令即可完成编译测试(如若只需在CPU上部署,可在[Fastdeploy C++预编译库](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)下载CPU推理库)
|
||||
|
||||
```bash
|
||||
#下载SDK,编译模型examples代码(SDK中包含了examples代码)
|
||||
|
@@ -5,7 +5,7 @@
|
||||
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
|
||||
- 2. 根据开发环境,下载预编译部署库和samples代码,参考[FastDeploy预编译库](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
|
||||
|
||||
以Linux上 RobustVideoMatting 推理为例,在本目录执行如下命令即可完成编译测试(如若只需在CPU上部署,可在[Fastdeploy C++预编译库](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md/CPP_prebuilt_libraries.md)下载CPU推理库)
|
||||
以Linux上 RobustVideoMatting 推理为例,在本目录执行如下命令即可完成编译测试(如若只需在CPU上部署,可在[Fastdeploy C++预编译库](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)下载CPU推理库)
|
||||
|
||||
本目录下提供`infer.cc`快速完成RobustVideoMatting在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成
|
||||
|
||||
|
@@ -16,7 +16,7 @@ import * as ocr from "@paddle-js-models/ocr";
|
||||
await ocr.init(detConfig, recConfig);
|
||||
const res = await ocr.recognize(img, option, postConfig);
|
||||
```
|
||||
ocr模型加载和初始化,其中模型为Paddle.js模型格式,js模型转换方式参考[文档](../../../../application/web_demo/README.md)
|
||||
ocr模型加载和初始化,其中模型为Paddle.js模型格式,js模型转换方式参考[文档](../../../../application/js/web_demo/README.md)
|
||||
|
||||
**init函数参数**
|
||||
|
||||
@@ -37,5 +37,4 @@ ocr模型加载和初始化,其中模型为Paddle.js模型格式,js模型转
|
||||
- [PP-OCRv3 C++部署](../cpp)
|
||||
- [模型预测结果说明](../../../../../docs/api/vision_results/)
|
||||
- [如何切换模型推理后端引擎](../../../../../docs/cn/faq/how_to_change_backend.md)
|
||||
- [PP-OCRv3模型web demo文档](../../../../application/web_demo/README.md)
|
||||
|
||||
- [PP-OCRv3模型web demo文档](../../../../application/js/web_demo/README.md)
|
||||
|
@@ -16,7 +16,7 @@ import * as ocr from "@paddle-js-models/ocr";
|
||||
await ocr.init(detConfig, recConfig);
|
||||
const res = await ocr.recognize(img, option, postConfig);
|
||||
```
|
||||
ocr模型加载和初始化,其中模型为Paddle.js模型格式,js模型转换方式参考[文档](../../../../application/web_demo/README.md)
|
||||
ocr模型加载和初始化,其中模型为Paddle.js模型格式,js模型转换方式参考[文档](../../../../application/js/web_demo/README.md)
|
||||
|
||||
**init函数参数**
|
||||
|
||||
|
@@ -8,7 +8,7 @@
|
||||
|
||||
### 量化模型准备
|
||||
- 1. 用户可以直接使用由FastDeploy提供的量化模型进行部署.
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.(注意: 推理量化后的分类模型仍然需要FP32模型文件夹下的deploy.yaml文件, 自行量化的模型文件夹内不包含此yaml文件, 用户从FP32模型文件夹下复制此yaml文件到量化后的模型文件夹内即可.)
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../../../../../tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.(注意: 推理量化后的分类模型仍然需要FP32模型文件夹下的deploy.yaml文件, 自行量化的模型文件夹内不包含此yaml文件, 用户从FP32模型文件夹下复制此yaml文件到量化后的模型文件夹内即可.)
|
||||
|
||||
## 以量化后的PP_LiteSeg_T_STDC1_cityscapes模型为例, 进行部署
|
||||
在本目录执行如下命令即可完成编译,以及量化模型部署.
|
||||
|
@@ -8,7 +8,7 @@
|
||||
|
||||
### 量化模型准备
|
||||
- 1. 用户可以直接使用由FastDeploy提供的量化模型进行部署.
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.(注意: 推理量化后的分类模型仍然需要FP32模型文件夹下的deploy.yaml文件, 自行量化的模型文件夹内不包含此yaml文件, 用户从FP32模型文件夹下复制此yaml文件到量化后的模型文件夹内即可.)
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../../../../../tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.(注意: 推理量化后的分类模型仍然需要FP32模型文件夹下的deploy.yaml文件, 自行量化的模型文件夹内不包含此yaml文件, 用户从FP32模型文件夹下复制此yaml文件到量化后的模型文件夹内即可.)
|
||||
|
||||
|
||||
## 以量化后的PP_LiteSeg_T_STDC1_cityscapes模型为例, 进行部署
|
||||
|
@@ -4,7 +4,7 @@
|
||||
|
||||
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../../docs/cn/build_and_install/rknpu2.md)
|
||||
|
||||
【注意】如你部署的为**PP-Matting**、**PP-HumanMatting**以及**ModNet**请参考[Matting模型部署](../../../matting)
|
||||
【注意】如你部署的为**PP-Matting**、**PP-HumanMatting**以及**ModNet**请参考[Matting模型部署](../../../../matting/)
|
||||
|
||||
本目录下提供`infer.py`快速完成PPHumanseg在RKNPU上部署的示例。执行如下脚本即可完成
|
||||
|
||||
|
@@ -7,7 +7,7 @@
|
||||
|
||||
## 前端部署PP-Humanseg v1模型
|
||||
|
||||
PP-Humanseg v1模型web demo部署及使用参考[文档](../../../../application/web_demo/README.md)
|
||||
PP-Humanseg v1模型web demo部署及使用参考[文档](../../../../application/js/web_demo/README.md)
|
||||
|
||||
|
||||
## PP-Humanseg v1 js接口
|
||||
@@ -41,7 +41,3 @@ humanSeg.blurBackground(res)
|
||||
|
||||
**drawHumanSeg()函数参数**
|
||||
> * **seg_values**(number[]): 输入参数,一般是getGrayValue函数计算的结果作为输入
|
||||
|
||||
|
||||
|
||||
|
||||
|
@@ -7,7 +7,7 @@
|
||||
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
|
||||
- 2. 根据开发环境,下载预编译部署库和samples代码,参考[FastDeploy预编译库](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
|
||||
|
||||
以Linux上 PP-Tracking 推理为例,在本目录执行如下命令即可完成编译测试(如若只需在CPU上部署,可在[Fastdeploy C++预编译库](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md/CPP_prebuilt_libraries.md)下载CPU推理库)
|
||||
以Linux上 PP-Tracking 推理为例,在本目录执行如下命令即可完成编译测试(如若只需在CPU上部署,可在[Fastdeploy C++预编译库](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)下载CPU推理库)
|
||||
|
||||
```bash
|
||||
#下载SDK,编译模型examples代码(SDK中包含了examples代码)
|
||||
|
@@ -19,7 +19,7 @@ namespace vision {
|
||||
Normalize::Normalize(const std::vector<float>& mean,
|
||||
const std::vector<float>& std, bool is_scale,
|
||||
const std::vector<float>& min,
|
||||
const std::vector<float>& max) {
|
||||
const std::vector<float>& max, bool swap_rb) {
|
||||
FDASSERT(mean.size() == std.size(),
|
||||
"Normalize: requires the size of mean equal to the size of std.");
|
||||
std::vector<double> mean_(mean.begin(), mean.end());
|
||||
@@ -50,6 +50,7 @@ Normalize::Normalize(const std::vector<float>& mean,
|
||||
alpha_.push_back(alpha);
|
||||
beta_.push_back(beta);
|
||||
}
|
||||
swap_rb_ = swap_rb;
|
||||
}
|
||||
|
||||
bool Normalize::ImplByOpenCV(Mat* mat) {
|
||||
@@ -57,6 +58,7 @@ bool Normalize::ImplByOpenCV(Mat* mat) {
|
||||
|
||||
std::vector<cv::Mat> split_im;
|
||||
cv::split(*im, split_im);
|
||||
if (swap_rb_) std::swap(split_im[0], split_im[2]);
|
||||
for (int c = 0; c < im->channels(); c++) {
|
||||
split_im[c].convertTo(split_im[c], CV_32FC1, alpha_[c], beta_[c]);
|
||||
}
|
||||
@@ -79,9 +81,13 @@ bool Normalize::ImplByFlyCV(Mat* mat) {
|
||||
std[i] = 1.0 / alpha_[i];
|
||||
mean[i] = -1 * beta_[i] * std[i];
|
||||
}
|
||||
|
||||
std::vector<uint32_t> channel_reorder_index = {0, 1, 2};
|
||||
if (swap_rb_) std::swap(channel_reorder_index[0], channel_reorder_index[2]);
|
||||
|
||||
fcv::Mat new_im(im->width(), im->height(),
|
||||
fcv::FCVImageType::PKG_BGR_F32);
|
||||
fcv::normalize_to_submean_to_reorder(*im, mean, std, std::vector<uint32_t>(),
|
||||
fcv::normalize_to_submean_to_reorder(*im, mean, std, channel_reorder_index,
|
||||
new_im, true);
|
||||
mat->SetMat(new_im);
|
||||
return true;
|
||||
@@ -91,8 +97,8 @@ bool Normalize::ImplByFlyCV(Mat* mat) {
|
||||
bool Normalize::Run(Mat* mat, const std::vector<float>& mean,
|
||||
const std::vector<float>& std, bool is_scale,
|
||||
const std::vector<float>& min,
|
||||
const std::vector<float>& max, ProcLib lib) {
|
||||
auto n = Normalize(mean, std, is_scale, min, max);
|
||||
const std::vector<float>& max, ProcLib lib, bool swap_rb) {
|
||||
auto n = Normalize(mean, std, is_scale, min, max, swap_rb);
|
||||
return n(mat, lib);
|
||||
}
|
||||
|
||||
|
@@ -23,7 +23,8 @@ class FASTDEPLOY_DECL Normalize : public Processor {
|
||||
Normalize(const std::vector<float>& mean, const std::vector<float>& std,
|
||||
bool is_scale = true,
|
||||
const std::vector<float>& min = std::vector<float>(),
|
||||
const std::vector<float>& max = std::vector<float>());
|
||||
const std::vector<float>& max = std::vector<float>(),
|
||||
bool swap_rb = false);
|
||||
bool ImplByOpenCV(Mat* mat);
|
||||
#ifdef ENABLE_FLYCV
|
||||
bool ImplByFlyCV(Mat* mat);
|
||||
@@ -44,14 +45,23 @@ class FASTDEPLOY_DECL Normalize : public Processor {
|
||||
const std::vector<float>& std, bool is_scale = true,
|
||||
const std::vector<float>& min = std::vector<float>(),
|
||||
const std::vector<float>& max = std::vector<float>(),
|
||||
ProcLib lib = ProcLib::DEFAULT);
|
||||
ProcLib lib = ProcLib::DEFAULT, bool swap_rb = false);
|
||||
|
||||
std::vector<float> GetAlpha() const { return alpha_; }
|
||||
std::vector<float> GetBeta() const { return beta_; }
|
||||
|
||||
bool GetSwapRB() {
|
||||
return swap_rb_;
|
||||
}
|
||||
|
||||
void SetSwapRB(bool swap_rb) {
|
||||
swap_rb_ = swap_rb;
|
||||
}
|
||||
|
||||
private:
|
||||
std::vector<float> alpha_;
|
||||
std::vector<float> beta_;
|
||||
bool swap_rb_;
|
||||
};
|
||||
} // namespace vision
|
||||
} // namespace fastdeploy
|
||||
|
@@ -21,7 +21,8 @@ NormalizeAndPermute::NormalizeAndPermute(const std::vector<float>& mean,
|
||||
const std::vector<float>& std,
|
||||
bool is_scale,
|
||||
const std::vector<float>& min,
|
||||
const std::vector<float>& max) {
|
||||
const std::vector<float>& max,
|
||||
bool swap_rb) {
|
||||
FDASSERT(mean.size() == std.size(),
|
||||
"Normalize: requires the size of mean equal to the size of std.");
|
||||
std::vector<double> mean_(mean.begin(), mean.end());
|
||||
@@ -52,6 +53,7 @@ NormalizeAndPermute::NormalizeAndPermute(const std::vector<float>& mean,
|
||||
alpha_.push_back(alpha);
|
||||
beta_.push_back(beta);
|
||||
}
|
||||
swap_rb_ = swap_rb;
|
||||
}
|
||||
|
||||
bool NormalizeAndPermute::ImplByOpenCV(Mat* mat) {
|
||||
@@ -60,6 +62,7 @@ bool NormalizeAndPermute::ImplByOpenCV(Mat* mat) {
|
||||
int origin_h = im->rows;
|
||||
std::vector<cv::Mat> split_im;
|
||||
cv::split(*im, split_im);
|
||||
if (swap_rb_) std::swap(split_im[0], split_im[2]);
|
||||
for (int c = 0; c < im->channels(); c++) {
|
||||
split_im[c].convertTo(split_im[c], CV_32FC1, alpha_[c], beta_[c]);
|
||||
}
|
||||
@@ -94,8 +97,12 @@ bool NormalizeAndPermute::ImplByFlyCV(Mat* mat) {
|
||||
std[i] = 1.0 / alpha_[i];
|
||||
mean[i] = -1 * beta_[i] * std[i];
|
||||
}
|
||||
|
||||
std::vector<uint32_t> channel_reorder_index = {0, 1, 2};
|
||||
if (swap_rb_) std::swap(channel_reorder_index[0], channel_reorder_index[2]);
|
||||
|
||||
fcv::Mat new_im;
|
||||
fcv::normalize_to_submean_to_reorder(*im, mean, std, std::vector<uint32_t>(),
|
||||
fcv::normalize_to_submean_to_reorder(*im, mean, std, channel_reorder_index,
|
||||
new_im, false);
|
||||
mat->SetMat(new_im);
|
||||
mat->layout = Layout::CHW;
|
||||
@@ -106,8 +113,9 @@ bool NormalizeAndPermute::ImplByFlyCV(Mat* mat) {
|
||||
bool NormalizeAndPermute::Run(Mat* mat, const std::vector<float>& mean,
|
||||
const std::vector<float>& std, bool is_scale,
|
||||
const std::vector<float>& min,
|
||||
const std::vector<float>& max, ProcLib lib) {
|
||||
auto n = NormalizeAndPermute(mean, std, is_scale, min, max);
|
||||
const std::vector<float>& max, ProcLib lib,
|
||||
bool swap_rb) {
|
||||
auto n = NormalizeAndPermute(mean, std, is_scale, min, max, swap_rb);
|
||||
return n(mat, lib);
|
||||
}
|
||||
|
||||
|
@@ -23,7 +23,8 @@ class FASTDEPLOY_DECL NormalizeAndPermute : public Processor {
|
||||
NormalizeAndPermute(const std::vector<float>& mean,
|
||||
const std::vector<float>& std, bool is_scale = true,
|
||||
const std::vector<float>& min = std::vector<float>(),
|
||||
const std::vector<float>& max = std::vector<float>());
|
||||
const std::vector<float>& max = std::vector<float>(),
|
||||
bool swap_rb = false);
|
||||
bool ImplByOpenCV(Mat* mat);
|
||||
#ifdef ENABLE_FLYCV
|
||||
bool ImplByFlyCV(Mat* mat);
|
||||
@@ -44,7 +45,7 @@ class FASTDEPLOY_DECL NormalizeAndPermute : public Processor {
|
||||
const std::vector<float>& std, bool is_scale = true,
|
||||
const std::vector<float>& min = std::vector<float>(),
|
||||
const std::vector<float>& max = std::vector<float>(),
|
||||
ProcLib lib = ProcLib::DEFAULT);
|
||||
ProcLib lib = ProcLib::DEFAULT, bool swap_rb = false);
|
||||
|
||||
void SetAlpha(const std::vector<float>& alpha) {
|
||||
alpha_.clear();
|
||||
@@ -58,9 +59,18 @@ class FASTDEPLOY_DECL NormalizeAndPermute : public Processor {
|
||||
beta_.assign(beta.begin(), beta.end());
|
||||
}
|
||||
|
||||
bool GetSwapRB() {
|
||||
return swap_rb_;
|
||||
}
|
||||
|
||||
void SetSwapRB(bool swap_rb) {
|
||||
swap_rb_ = swap_rb;
|
||||
}
|
||||
|
||||
private:
|
||||
std::vector<float> alpha_;
|
||||
std::vector<float> beta_;
|
||||
bool swap_rb_;
|
||||
};
|
||||
} // namespace vision
|
||||
} // namespace fastdeploy
|
||||
|
@@ -95,10 +95,77 @@ void FuseNormalizeHWC2CHW(
|
||||
<< std::endl;
|
||||
}
|
||||
|
||||
void FuseNormalizeColorConvert(
|
||||
std::vector<std::shared_ptr<Processor>>* processors) {
|
||||
// Fuse Normalize and BGR2RGB/RGB2BGR
|
||||
int normalize_index = -1;
|
||||
int color_convert_index = -1;
|
||||
// If these middle processors are after BGR2RGB/RGB2BGR and before Normalize,
|
||||
// we can still fuse Normalize and BGR2RGB/RGB2BGR
|
||||
static std::unordered_set<std::string> middle_processors(
|
||||
{"Resize", "ResizeByShort", "ResizeByLong", "Crop", "CenterCrop",
|
||||
"LimitByStride", "LimitShort", "Pad", "PadToSize", "StridePad",
|
||||
"WarpAffine"});
|
||||
|
||||
for (size_t i = 0; i < processors->size(); ++i) {
|
||||
if ((*processors)[i]->Name() == "BGR2RGB" ||
|
||||
(*processors)[i]->Name() == "RGB2BGR") {
|
||||
color_convert_index = i;
|
||||
for (size_t j = color_convert_index + 1; j < processors->size(); ++j) {
|
||||
if ((*processors)[j]->Name() == "Normalize" ||
|
||||
(*processors)[j]->Name() == "NormalizeAndPermute") {
|
||||
normalize_index = j;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (normalize_index < 0) {
|
||||
return;
|
||||
}
|
||||
for (size_t j = color_convert_index + 1; j < normalize_index; ++j) {
|
||||
if (middle_processors.count((*processors)[j]->Name())) {
|
||||
continue;
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (color_convert_index < 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Delete Color Space Convert
|
||||
std::string color_processor_name = (*processors)[color_convert_index]->Name();
|
||||
processors->erase(processors->begin() + color_convert_index);
|
||||
|
||||
// Toggle the swap_rb option of the Normalize processor
|
||||
std::string normalize_processor_name =
|
||||
(*processors)[normalize_index - 1]->Name();
|
||||
bool swap_rb;
|
||||
if (normalize_processor_name == "Normalize") {
|
||||
auto processor = dynamic_cast<Normalize*>(
|
||||
(*processors)[normalize_index - 1].get());
|
||||
swap_rb = processor->GetSwapRB();
|
||||
processor->SetSwapRB(!swap_rb);
|
||||
} else if (normalize_processor_name == "NormalizeAndPermute") {
|
||||
auto processor = dynamic_cast<NormalizeAndPermute*>(
|
||||
(*processors)[normalize_index - 1].get());
|
||||
swap_rb = processor->GetSwapRB();
|
||||
processor->SetSwapRB(!swap_rb);
|
||||
} else {
|
||||
FDASSERT(false, "Something wrong in FuseNormalizeColorConvert().");
|
||||
}
|
||||
|
||||
FDINFO << color_processor_name << " and " << normalize_processor_name
|
||||
<< " are fused to " << normalize_processor_name
|
||||
<< " with swap_rb=" << !swap_rb << std::endl;
|
||||
}
|
||||
|
||||
void FuseTransforms(
|
||||
std::vector<std::shared_ptr<Processor>>* processors) {
|
||||
FuseNormalizeCast(processors);
|
||||
FuseNormalizeHWC2CHW(processors);
|
||||
FuseNormalizeColorConvert(processors);
|
||||
}
|
||||
|
||||
|
||||
|
@@ -31,6 +31,7 @@
|
||||
#include "fastdeploy/vision/common/processors/resize_by_short.h"
|
||||
#include "fastdeploy/vision/common/processors/stride_pad.h"
|
||||
#include "fastdeploy/vision/common/processors/warp_affine.h"
|
||||
#include <unordered_set>
|
||||
|
||||
namespace fastdeploy {
|
||||
namespace vision {
|
||||
@@ -41,6 +42,9 @@ void FuseTransforms(std::vector<std::shared_ptr<Processor>>* processors);
|
||||
void FuseNormalizeCast(std::vector<std::shared_ptr<Processor>>* processors);
|
||||
// Fuse Normalize + HWC2CHW to NormalizeAndPermute
|
||||
void FuseNormalizeHWC2CHW(std::vector<std::shared_ptr<Processor>>* processors);
|
||||
// Fuse Normalize + Color Convert
|
||||
void FuseNormalizeColorConvert(
|
||||
std::vector<std::shared_ptr<Processor>>* processors);
|
||||
|
||||
} // namespace vision
|
||||
} // namespace fastdeploy
|
||||
|
@@ -5,10 +5,6 @@ import android.os.Handler;
|
||||
import android.util.AttributeSet;
|
||||
import android.widget.ListView;
|
||||
|
||||
/**
|
||||
* Created by ruanshimin on 2018/5/14.
|
||||
*/
|
||||
|
||||
public class ResultListView extends ListView {
|
||||
public ResultListView(Context context) {
|
||||
super(context);
|
||||
|
@@ -15,10 +15,6 @@ import com.baidu.paddle.fastdeploy.app.ui.view.model.BaseResultModel;
|
||||
import java.text.DecimalFormat;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Created by ruanshimin on 2018/5/13.
|
||||
*/
|
||||
|
||||
public class DetectResultAdapter extends ArrayAdapter<BaseResultModel> {
|
||||
private int resourceId;
|
||||
|
||||
|
@@ -1,9 +1,5 @@
|
||||
package com.baidu.paddle.fastdeploy.app.ui.view.model;
|
||||
|
||||
/**
|
||||
* Created by ruanshimin on 2018/5/16.
|
||||
*/
|
||||
|
||||
public class BaseResultModel {
|
||||
private int index;
|
||||
private String name;
|
||||
|
@@ -2,8 +2,8 @@
|
||||
<!-- Default App name -->
|
||||
<string name="app_name">EasyEdge</string>
|
||||
<!-- Other App name -->
|
||||
<string name="detection_app_name">FastDeploy PicoDet</string>
|
||||
<string name="ocr_app_name">FastDeploy PP-OCRv2</string>
|
||||
<string name="detection_app_name">EasyEdge</string>
|
||||
<string name="ocr_app_name">EasyEdge</string>
|
||||
<!-- Keys for PreferenceScreen -->
|
||||
<string name="CHOOSE_PRE_INSTALLED_MODEL_KEY">CHOOSE_INSTALLED_MODEL_KEY</string>
|
||||
<string name="MODEL_DIR_KEY">MODEL_DIR_KEY</string>
|
||||
|
@@ -48,12 +48,10 @@ def test_classification_mobilenetv2():
|
||||
im1 = cv2.imread("./resources/ILSVRC2012_val_00000010.jpeg")
|
||||
im2 = cv2.imread("./resources/ILSVRC2012_val_00030010.jpeg")
|
||||
|
||||
# for i in range(3000000):
|
||||
while True:
|
||||
for i in range(3):
|
||||
# test single predict
|
||||
model.postprocessor.topk = 6
|
||||
result1 = model.predict(im1)
|
||||
result2 = model.predict(im2)
|
||||
result1 = model.predict(im1, 6)
|
||||
result2 = model.predict(im2, 6)
|
||||
|
||||
diff_label_1 = np.fabs(
|
||||
np.array(result1.label_ids) - np.array(expected_label_ids_1))
|
||||
|
Reference in New Issue
Block a user