diff --git a/.github/ISSUE_TEMPLATE/报告issue.md b/.github/ISSUE_TEMPLATE/报告issue.md
index 34085055f..be93cd251 100644
--- a/.github/ISSUE_TEMPLATE/报告issue.md
+++ b/.github/ISSUE_TEMPLATE/报告issue.md
@@ -7,9 +7,16 @@ assignees: ''
---
+*********************************************
+开源不易,工程师每天有大量研发工作,请直接按此issue模版进行提问
+
+这会大大减少工程师与你确认使用环境,编译过程中的基础信息时间
+*********************************************
+
## 环境
-- FastDeploy版本: 说明具体的版本,如fastdeploy-linux-gpu-0.8.0或自行编译的develop代码(附上自行编译的方式,及cmake时print的编译选项截图)
+- FastDeploy版本: 说明具体的版本,如fastdeploy-linux-gpu-0.8.0
+- 如果您是自行编译的FastDeploy,请说明您的编译方式(参数命令)
- 系统平台: Linux x64(Ubuntu 18.04) / Windows x64(Windows10) / Mac OSX arm(12.0) / Mac OSX intel(12.0)
- 硬件: 说明具体硬件型号,如 Nvidia GPU 3080TI, CUDA 11.2 CUDNN 8.3
- 编译语言: C++ / Python(3.7或3.8等)
@@ -23,3 +30,10 @@ assignees: ''
- - 先执行`examples`下的部署示例,包括使用examples提供的模型,确认是否可以正确执行
- - 如若`examples`下的代码可以运行,但自己的模型,或自己的代码不能运行
- - - 提供自己的代码使用方式或自己的模型,供工程师快速定位问题
+
+
+*********************************************
+开源不易,工程师每天有大量研发工作,请直接按此issue模版进行提问
+
+这会大大减少工程师与你确认使用环境,编译过程中的基础信息时间
+*********************************************
diff --git a/CMakeLists.txt b/CMakeLists.txt
index dda90278f..ab23efc3d 100755
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -40,7 +40,7 @@ if(NOT MSVC)
add_definitions(-D_GLIBCXX_USE_CXX11_ABI=1)
endif(NOT MSVC)
-if(UNIX AND (NOT APPLE) AND (NOT ANDROID) AND (NOT ENABLE_TIMVX))
+if(UNIX AND (NOT APPLE) AND (NOT ANDROID) AND (NOT WITH_TIMVX))
include(${PROJECT_SOURCE_DIR}/cmake/patchelf.cmake)
endif()
@@ -67,6 +67,8 @@ option(ENABLE_FLYCV "Whether to enable flycv to boost image preprocess." OFF)
option(ENABLE_TIMVX "Whether to compile for TIMVX deploy." OFF)
option(WITH_CANN "Whether to compile for Huawei Ascend deploy with CANN." OFF)
option(WITH_CANN_PY "Whether to compile for Huawei Ascend deploy with CANN using python." OFF)
+option(WITH_TIMVX "Whether to compile for TIMVX deploy." OFF)
+option(WITH_XPU "Whether to compile for KunlunXin XPU deploy." OFF)
option(WITH_TESTING "Whether to compile with unittest." OFF)
############################# Options for Android cross compiling #########################
option(WITH_OPENCV_STATIC "Use OpenCV static lib for Android." OFF)
@@ -140,14 +142,19 @@ set(HEAD_DIR "${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}")
include_directories(${HEAD_DIR})
include_directories(${CMAKE_CURRENT_BINARY_DIR})
-if (ENABLE_TIMVX)
+if (WITH_TIMVX)
include(${PROJECT_SOURCE_DIR}/cmake/timvx.cmake)
endif()
if (WITH_CANN)
if(NOT ${ENABLE_LITE_BACKEND})
- message(WARNING "While compiling with -DWITH_CANN=ON, will force to set -DENABLE_LITE_BACKEND=ON")
- set(ENABLE_LITE_BACKEND ON)
+ set(ENABLE_LITE_BACKEND ON)
+ endif()
+ if(NOT CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "aarch64")
+ message(FATAL_ERROR "Huawei Ascend NPU is supported on Linux aarch64 platform for now.")
+ endif()
+ if(NOT PADDLELITE_URL)
+ set(PADDLELITE_URL "https://bj.bcebos.com/fastdeploy/test/lite-linux_arm64_huawei_ascend_npu_1121.tgz")
endif()
endif()
@@ -156,17 +163,34 @@ if (WITH_CANN_PY)
if(NOT ${ENABLE_LITE_BACKEND})
set(ENABLE_LITE_BACKEND ON)
endif()
+ if(NOT CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "aarch64")
+ message(FATAL_ERROR "Huawei Ascend NPU is supported on Linux aarch64 platform for now.")
+ endif()
+ if(NOT PADDLELITE_URL)
+ set(PADDLELITE_URL "https://bj.bcebos.com/fastdeploy/test/lite-linux_arm64_huawei_ascend_npu_python_1207.tgz")
+ endif()
execute_process(COMMAND sh -c "ls *.so*" WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/paddlelite/lib
COMMAND sh -c "xargs ${PATCHELF_EXE} --set-rpath '$ORIGIN'" WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/paddlelite/lib
RESULT_VARIABLE result
OUTPUT_VARIABLE curr_out
ERROR_VARIABLE curr_out)
if(ret EQUAL "1")
- message(FATAL_ERROR "Failed to patchelf tensorrt libraries.")
+ message(FATAL_ERROR "Failed to patchelf CANN libraries.")
endif()
message(STATUS "result:${result} out:${curr_out}")
endif()
+if (WITH_XPU)
+ if(NOT ENABLE_LITE_BACKEND)
+ set(ENABLE_LITE_BACKEND ON)
+ endif()
+ if(NOT CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "x86_64")
+ message(FATAL_ERROR "XPU is only supported on Linux x64 platform")
+ endif()
+ if(NOT PADDLELITE_URL)
+ set(PADDLELITE_URL "https://bj.bcebos.com/fastdeploy/third_libs/lite-linux-x64-xpu-20221215.tgz")
+ endif()
+endif()
if(ANDROID OR IOS)
if(ENABLE_ORT_BACKEND)
@@ -373,14 +397,20 @@ if(ENABLE_TRT_BACKEND)
endif()
if(NOT BUILD_ON_JETSON)
if(NOT TRT_DIRECTORY)
- message(FATAL_ERROR "While -DENABLE_TRT_BACKEND=ON, must define -DTRT_DIRECTORY, e.g -DTRT_DIRECTORY=/Downloads/TensorRT-8.4")
+ set(TRT_INC_DIR /usr/include/x86_64-linux-gnu/)
+ set(TRT_LIB_DIR /usr/lib/x86_64-linux-gnu/)
endif()
endif()
- set(TRT_INC_DIR /usr/include/aarch64-linux-gnu/)
- set(TRT_LIB_DIR /usr/lib/aarch64-linux-gnu/)
- if(NOT BUILD_ON_JETSON)
- set(TRT_INC_DIR ${TRT_DIRECTORY}/include)
- set(TRT_LIB_DIR ${TRT_DIRECTORY}/lib)
+ if(BUILD_ON_JETSON)
+ set(TRT_INC_DIR /usr/include/aarch64-linux-gnu/)
+ set(TRT_LIB_DIR /usr/lib/aarch64-linux-gnu/)
+ else()
+ set(TRT_INC_DIR /usr/include/x86_64-linux-gnu/)
+ set(TRT_LIB_DIR /usr/lib/x86_64-linux-gnu/)
+ if(TRT_DIRECTORY)
+ set(TRT_INC_DIR ${TRT_DIRECTORY}/include)
+ set(TRT_LIB_DIR ${TRT_DIRECTORY}/lib)
+ endif()
endif()
add_definitions(-DENABLE_TRT_BACKEND)
@@ -393,7 +423,7 @@ if(ENABLE_TRT_BACKEND)
list(APPEND DEPEND_LIBS ${TRT_INFER_LIB} ${TRT_ONNX_LIB} ${TRT_PLUGIN_LIB})
list(APPEND ALL_DEPLOY_SRCS ${DEPLOY_OP_CUDA_KERNEL_SRCS})
- if(NOT BUILD_ON_JETSON)
+ if(NOT BUILD_ON_JETSON AND TRT_DIRECTORY)
if(NOT EXISTS "${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/tensorrt")
file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/tensorrt")
endif()
@@ -614,7 +644,6 @@ install(
DESTINATION ${CMAKE_INSTALL_PREFIX}
)
-
install(
DIRECTORY ${PROJECT_SOURCE_DIR}/examples
DESTINATION ${CMAKE_INSTALL_PREFIX}
diff --git a/FastDeploy.cmake.in b/FastDeploy.cmake.in
index fd0ea847a..17f83eb69 100755
--- a/FastDeploy.cmake.in
+++ b/FastDeploy.cmake.in
@@ -27,6 +27,7 @@ set(OPENCV_DIRECTORY "@OPENCV_DIRECTORY@")
set(ORT_DIRECTORY "@ORT_DIRECTORY@")
set(OPENVINO_DIRECTORY "@OPENVINO_DIRECTORY@")
set(RKNN2_TARGET_SOC "@RKNN2_TARGET_SOC@")
+set(WITH_XPU @WITH_XPU@)
set(FASTDEPLOY_LIBS "")
set(FASTDEPLOY_INCS "")
@@ -140,13 +141,19 @@ if(WITH_GPU)
if (ENABLE_TRT_BACKEND)
if(BUILD_ON_JETSON)
- find_library(TRT_INFER_LIB nvinfer /usr/include/aarch64-linux-gnu/)
- find_library(TRT_ONNX_LIB nvonnxparser /usr/include/aarch64-linux-gnu/)
- find_library(TRT_PLUGIN_LIB nvinfer_plugin /usr/include/aarch64-linux-gnu/)
+ find_library(TRT_INFER_LIB nvinfer /usr/lib/aarch64-linux-gnu/)
+ find_library(TRT_ONNX_LIB nvonnxparser /usr/lib/aarch64-linux-gnu/)
+ find_library(TRT_PLUGIN_LIB nvinfer_plugin /usr/lib/aarch64-linux-gnu/)
else()
- find_library(TRT_INFER_LIB nvinfer ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/tensorrt/lib NO_DEFAULT_PATH)
- find_library(TRT_ONNX_LIB nvonnxparser ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/tensorrt/lib NO_DEFAULT_PATH)
- find_library(TRT_PLUGIN_LIB nvinfer_plugin ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/tensorrt/lib NO_DEFAULT_PATH)
+ if(TRT_DIRECTORY)
+ find_library(TRT_INFER_LIB nvinfer ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/tensorrt/lib NO_DEFAULT_PATH)
+ find_library(TRT_ONNX_LIB nvonnxparser ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/tensorrt/lib NO_DEFAULT_PATH)
+ find_library(TRT_PLUGIN_LIB nvinfer_plugin ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/tensorrt/lib NO_DEFAULT_PATH)
+ else()
+ find_library(TRT_INFER_LIB nvinfer /usr/lib/x86_64-linux-gnu/)
+ find_library(TRT_ONNX_LIB nvonnxparser /usr/lib/x86_64-linux-gnu/)
+ find_library(TRT_PLUGIN_LIB nvinfer_plugin /usr/lib/x86_64-linux-gnu/)
+ endif()
endif()
list(APPEND FASTDEPLOY_LIBS ${TRT_INFER_LIB} ${TRT_ONNX_LIB} ${TRT_PLUGIN_LIB})
endif()
@@ -237,6 +244,10 @@ if(ENABLE_PADDLE_FRONTEND)
list(APPEND FASTDEPLOY_LIBS ${PADDLE2ONNX_LIB})
endif()
+if(WITH_XPU)
+ list(APPEND FASTDEPLOY_LIBS -lpthread -lrt -ldl)
+endif()
+
remove_duplicate_libraries(FASTDEPLOY_LIBS)
# Print compiler information
@@ -270,6 +281,7 @@ message(STATUS " ENABLE_VISION : ${ENABLE_VISION}")
message(STATUS " ENABLE_TEXT : ${ENABLE_TEXT}")
if(WITH_GPU)
message(STATUS " CUDA_DIRECTORY : ${CUDA_DIRECTORY}")
+ message(STATUS " TRT_DIRECTORY : ${TRT_DIRECTORY}")
endif()
if(OPENCV_DIRECTORY)
message(STATUS " OPENCV_DIRECTORY : ${OPENCV_DIRECTORY}")
diff --git a/README.md b/README.md
index b1dd06cd5..e8f014f6f 120000
--- a/README.md
+++ b/README.md
@@ -1 +1 @@
-README_EN.md
+README_CN.md
diff --git a/README_CN.md b/README_CN.md
index 8814215fe..9cb3ecf6c 100755
--- a/README_CN.md
+++ b/README_CN.md
@@ -1,4 +1,4 @@
-[English](README_EN.md) | 简体中文
+[English](README_EN.md) | 简体中文 | [हिन्दी](./docs/docs_i18n/README_हिन्दी.md) | [日本語](./docs/docs_i18n/README_日本語.md) | [한국인](./docs/docs_i18n/README_한국어.md) | [Pу́сский язы́к](./docs/docs_i18n/README_Ру́сский_язы́к.md)

@@ -36,16 +36,15 @@
|
|
|
|
**input** :早上好今天是2020
/10/29,最低温度是-3°C。
**output**: [
](https://paddlespeech.bj.bcebos.com/Parakeet/docs/demos/parakeet_espnet_fs2_pwg_demo/tn_g2p/parakeet/001.wav)
+
-| 任务场景 | 模型 | Linux | Linux | Win | Win | Mac | Mac | Linux | Linux | Linux | Linux | Linux |
-|:----------------------:|:--------------------------------------------------------------------------------------------:|:------------------------------------------------:|:----------:|:-------:|:----------:|:-------:|:-------:|:-----------:|:---------------:|:-------------:|:-------------:|:-------:|
-| --- | --- | X86 CPU | NVIDIA GPU | X86 CPU | NVIDIA GPU | X86 CPU | Arm CPU | AArch64 CPU | 飞腾D2000 aarch64 | NVIDIA Jetson | Graphcore IPU | Serving |
-| Classification | [PaddleClas/ResNet50](./examples/vision/classification/paddleclas) | [✅](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
-| Classification | [TorchVison/ResNet](examples/vision/classification/resnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| Classification | [ltralytics/YOLOv5Cls](examples/vision/classification/yolov5cls) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| Classification | [PaddleClas/PP-LCNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
-| Classification | [PaddleClas/PP-LCNetv2](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
-| Classification | [PaddleClas/EfficientNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
-| Classification | [PaddleClas/GhostNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
-| Classification | [PaddleClas/MobileNetV1](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
-| Classification | [PaddleClas/MobileNetV2](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
-| Classification | [PaddleClas/MobileNetV3](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
-| Classification | [PaddleClas/ShuffleNetV2](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
-| Classification | [PaddleClas/SqueeezeNetV1.1](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
-| Classification | [PaddleClas/Inceptionv3](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
-| Classification | [PaddleClas/PP-HGNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
-| Detection | [PaddleDetection/PP-YOLOE](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
-| Detection | [PaddleDetection/PicoDet](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
-| Detection | [PaddleDetection/YOLOX](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
-| Detection | [PaddleDetection/YOLOv3](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
-| Detection | [PaddleDetection/PP-YOLO](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
-| Detection | [PaddleDetection/PP-YOLOv2](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
-| Detection | [PaddleDetection/Faster-RCNN](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
-| Detection | [PaddleDetection/Mask-RCNN](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
-| Detection | [Megvii-BaseDetection/YOLOX](./examples/vision/detection/yolox) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| Detection | [WongKinYiu/YOLOv7](./examples/vision/detection/yolov7) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| Detection | [WongKinYiu/YOLOv7end2end_trt](./examples/vision/detection/yolov7end2end_trt) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
-| Detection | [WongKinYiu/YOLOv7end2end_ort_](./examples/vision/detection/yolov7end2end_ort) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| Detection | [meituan/YOLOv6](./examples/vision/detection/yolov6) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| Detection | [ultralytics/YOLOv5](./examples/vision/detection/yolov5) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
-| Detection | [WongKinYiu/YOLOR](./examples/vision/detection/yolor) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
-| Detection | [WongKinYiu/ScaledYOLOv4](./examples/vision/detection/scaledyolov4) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| Detection | [ppogg/YOLOv5Lite](./examples/vision/detection/yolov5lite) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| Detection | [RangiLyu/NanoDetPlus](./examples/vision/detection/nanodet_plus) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| KeyPoint | [PaddleDetection/TinyPose](./examples/vision/keypointdetection/tiny_pose) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| KeyPoint | [PaddleDetection/PicoDet + TinyPose](./examples/vision/keypointdetection/det_keypoint_unite) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| HeadPose | [omasaht/headpose](examples/vision/headpose) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
-| Tracking | [PaddleDetection/PP-Tracking](examples/vision/tracking/pptracking) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| OCR | [PaddleOCR/PP-OCRv2](./examples/vision/ocr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
-| OCR | [PaddleOCR/PP-OCRv3](./examples/vision/ocr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
-| Segmentation | [PaddleSeg/PP-LiteSeg](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
-| Segmentation | [PaddleSeg/PP-HumanSegLite](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
-| Segmentation | [PaddleSeg/HRNet](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
-| Segmentation | [PaddleSeg/PP-HumanSegServer](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
-| Segmentation | [PaddleSeg/Unet](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
-| Segmentation | [PaddleSeg/Deeplabv3](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
-| FaceDetection | [biubug6/RetinaFace](./examples/vision/facedet/retinaface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| FaceDetection | [Linzaer/UltraFace](./examples/vision/facedet/ultraface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| FaceDetection | [deepcam-cn/YOLOv5Face](./examples/vision/facedet/yolov5face) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| FaceDetection | [insightface/SCRFD](./examples/vision/facedet/scrfd) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| FaceAlign | [Hsintao/PFLD](examples/vision/facealign/pfld) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| FaceAlign | [Single430FaceLandmark1000](./examples/vision/facealign/face_landmark_1000) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
-| FaceAlign | [jhb86253817/PIPNet](./examples/vision/facealign) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
-| FaceRecognition | [insightface/ArcFace](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| FaceRecognition | [insightface/CosFace](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| FaceRecognition | [insightface/PartialFC](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| FaceRecognition | [insightface/VPL](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| Matting | [ZHKKKe/MODNet](./examples/vision/matting/modnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
-| Matting | [PeterL1n/RobustVideoMatting]() | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
-| Matting | [PaddleSeg/PP-Matting](./examples/vision/matting/ppmatting) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| Matting | [PaddleSeg/PP-HumanMatting](./examples/vision/matting/modnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| Matting | [PaddleSeg/ModNet](./examples/vision/matting/modnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| Video Super-Resolution | [PaddleGAN/BasicVSR](./) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
-| Video Super-Resolution | [PaddleGAN/EDVR](./examples/vision/sr/edvr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
-| Video Super-Resolution | [PaddleGAN/PP-MSVSR](./examples/vision/sr/ppmsvsr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
-| Information Extraction | [PaddleNLP/UIE](./examples/text/uie) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | |
-| NLP | [PaddleNLP/ERNIE-3.0](./examples/text/ernie-3.0) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ | ✅ |
-| Speech | [PaddleSpeech/PP-TTS](./examples/audio/pp-tts) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | -- | ✅ |
+| 任务场景 | 模型 | Linux | Linux | Win | Win | Mac | Mac | Linux | Linux | Linux | Linux | Linux | Linux |
+|:----------------------:|:--------------------------------------------------------------------------------------------:|:------------------------------------------------:|:----------:|:-------:|:----------:|:-------:|:-------:|:-----------:|:---------------:|:-------------:|:-------------:|:-------:|:-------:|
+| --- | --- | X86 CPU | NVIDIA GPU | X86 CPU | NVIDIA GPU | X86 CPU | Arm CPU | AArch64 CPU | 飞腾D2000 aarch64 | NVIDIA Jetson | Graphcore IPU | KunlunXin XPU | Serving |
+| Classification | [PaddleClas/ResNet50](./examples/vision/classification/paddleclas) | [✅](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
+| Classification | [TorchVison/ResNet](examples/vision/classification/resnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ |
+| Classification | [ltralytics/YOLOv5Cls](examples/vision/classification/yolov5cls) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ |
+| Classification | [PaddleClas/PP-LCNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
+| Classification | [PaddleClas/PP-LCNetv2](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
+| Classification | [PaddleClas/EfficientNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
+| Classification | [PaddleClas/GhostNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
+| Classification | [PaddleClas/MobileNetV1](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
+| Classification | [PaddleClas/MobileNetV2](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
+| Classification | [PaddleClas/MobileNetV3](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
+| Classification | [PaddleClas/ShuffleNetV2](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
+| Classification | [PaddleClas/SqueeezeNetV1.1](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
+| Classification | [PaddleClas/Inceptionv3](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
+| Classification | [PaddleClas/PP-HGNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
+| Detection | [PaddleDetection/PP-YOLOE](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
+| Detection | [PaddleDetection/PicoDet](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
+| Detection | [PaddleDetection/YOLOX](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
+| Detection | [PaddleDetection/YOLOv3](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
+| Detection | [PaddleDetection/PP-YOLO](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
+| Detection | [PaddleDetection/PP-YOLOv2](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
+| Detection | [PaddleDetection/Faster-RCNN](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
+| Detection | [PaddleDetection/Mask-RCNN](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
+| Detection | [Megvii-BaseDetection/YOLOX](./examples/vision/detection/yolox) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ |
+| Detection | [WongKinYiu/YOLOv7](./examples/vision/detection/yolov7) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ |
+| Detection | [WongKinYiu/YOLOv7end2end_trt](./examples/vision/detection/yolov7end2end_trt) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | ❔ |
+| Detection | [WongKinYiu/YOLOv7end2end_ort_](./examples/vision/detection/yolov7end2end_ort) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
+| Detection | [meituan/YOLOv6](./examples/vision/detection/yolov6) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ |
+| Detection | [ultralytics/YOLOv5](./examples/vision/detection/yolov5) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
+| Detection | [WongKinYiu/YOLOR](./examples/vision/detection/yolor) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | ❔ |
+| Detection | [WongKinYiu/ScaledYOLOv4](./examples/vision/detection/scaledyolov4) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
+| Detection | [ppogg/YOLOv5Lite](./examples/vision/detection/yolov5lite) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ? | ❔ |❔ |
+| Detection | [RangiLyu/NanoDetPlus](./examples/vision/detection/nanodet_plus) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
+| KeyPoint | [PaddleDetection/TinyPose](./examples/vision/keypointdetection/tiny_pose) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ |
+| KeyPoint | [PaddleDetection/PicoDet + TinyPose](./examples/vision/keypointdetection/det_keypoint_unite) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ |
+| HeadPose | [omasaht/headpose](examples/vision/headpose) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | ❔ |
+| Tracking | [PaddleDetection/PP-Tracking](examples/vision/tracking/pptracking) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
+| OCR | [PaddleOCR/PP-OCRv2](./examples/vision/ocr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ✅ | ❔ |
+| OCR | [PaddleOCR/PP-OCRv3](./examples/vision/ocr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
+| Segmentation | [PaddleSeg/PP-LiteSeg](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ✅ | ❔ |
+| Segmentation | [PaddleSeg/PP-HumanSegLite](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ✅ | ❔ |
+| Segmentation | [PaddleSeg/HRNet](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ✅ | ❔ |
+| Segmentation | [PaddleSeg/PP-HumanSegServer](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ✅ | ❔ |
+| Segmentation | [PaddleSeg/Unet](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ | ✅ | ❔ |
+| Segmentation | [PaddleSeg/Deeplabv3](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ✅ | ❔ |
+| FaceDetection | [biubug6/RetinaFace](./examples/vision/facedet/retinaface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
+| FaceDetection | [Linzaer/UltraFace](./examples/vision/facedet/ultraface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
+| FaceDetection | [deepcam-cn/YOLOv5Face](./examples/vision/facedet/yolov5face) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
+| FaceDetection | [insightface/SCRFD](./examples/vision/facedet/scrfd) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
+| FaceAlign | [Hsintao/PFLD](examples/vision/facealign/pfld) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |❔ |
+| FaceAlign | [Single430/FaceLandmark1000](./examples/vision/facealign/face_landmark_1000) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | ❔ |
+| FaceAlign | [jhb86253817/PIPNet](./examples/vision/facealign) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |❔ |
+| FaceRecognition | [insightface/ArcFace](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
+| FaceRecognition | [insightface/CosFace](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
+| FaceRecognition | [insightface/PartialFC](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
+| FaceRecognition | [insightface/VPL](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
+| Matting | [ZHKKKe/MODNet](./examples/vision/matting/modnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |❔ |
+| Matting | [PeterL1n/RobustVideoMatting]() | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | ❔ |
+| Matting | [PaddleSeg/PP-Matting](./examples/vision/matting/ppmatting) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ |
+| Matting | [PaddleSeg/PP-HumanMatting](./examples/vision/matting/modnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |❔ |
+| Matting | [PaddleSeg/ModNet](./examples/vision/matting/modnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
+| Video Super-Resolution | [PaddleGAN/BasicVSR](./) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | ❔ |
+| Video Super-Resolution | [PaddleGAN/EDVR](./examples/vision/sr/edvr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | ❔ |
+| Video Super-Resolution | [PaddleGAN/PP-MSVSR](./examples/vision/sr/ppmsvsr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | ❔ |
+| Information Extraction | [PaddleNLP/UIE](./examples/text/uie) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | |
+| NLP | [PaddleNLP/ERNIE-3.0](./examples/text/ernie-3.0) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ | ✅ | ✅ |
+| Speech | [PaddleSpeech/PP-TTS](./examples/audio/pp-tts) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | -- |❔ | ✅ |
@@ -322,6 +321,16 @@ int main(int argc, char* argv[]) {
+## **社区交流**
+
+* **Slack**:Join our [Slack community](https://join.slack.com/t/fastdeployworkspace/shared_invite/zt-1jznah134-3rxY~ytRb8rcPqkn9g~PDg) and chat with other community members about ideas
+
+* **微信**:扫描二维码,填写问卷加入技术社区,与社区开发者探讨部署的痛点与方案
+
+
+
|
|
| **input**:Life was like a box
of chocolates, you never
know what you're
gonna get.
**output**: [
](https://paddlespeech.bj.bcebos.com/Parakeet/docs/demos/tacotron2_ljspeech_waveflow_samples_0.2/sentence_1.wav)
-
+
-| Task | Model | Linux | Linux | Win | Win | Mac | Mac | Linux | Linux | Linux | Linux | Linux |
-|:----------------------:|:--------------------------------------------------------------------------------------------:|:------------------------------------------------:|:------------------------:|:------------------------:|:------------------------:|:-----------------------:|:---------------------:|:--------------------------:| :----------------: |:---------------------------:|:---------------------------:|:-------:|
-| --- | --- | X86 CPU | NVIDIA GPU | Intel CPU | NVIDIA GPU | Intel CPU | Arm CPU | AArch64 CPU | Phytium D2000CPU | NVIDIA Jetson | Graphcore IPU | Serving |
-| Classification | [PaddleClas/ResNet50](./examples/vision/classification/paddleclas) | [✅](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
-| Classification | [TorchVison/ResNet](examples/vision/classification/resnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| Classification | [ltralytics/YOLOv5Cls](examples/vision/classification/yolov5cls) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| Classification | [PaddleClas/PP-LCNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
-| Classification | [PaddleClas/PP-LCNetv2](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
-| Classification | [PaddleClas/EfficientNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
-| Classification | [PaddleClas/GhostNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
-| Classification | [PaddleClas/MobileNetV1](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
-| Classification | [PaddleClas/MobileNetV2](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
-| Classification | [PaddleClas/MobileNetV3](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
-| Classification | [PaddleClas/ShuffleNetV2](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
-| Classification | [PaddleClas/SqueeezeNetV1.1](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
-| Classification | [PaddleClas/Inceptionv3](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
-| Classification | [PaddleClas/PP-HGNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
-| Detection | [PaddleDetection/PP-YOLOE](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
-| Detection | [PaddleDetection/PicoDet](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
-| Detection | [PaddleDetection/YOLOX](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
-| Detection | [PaddleDetection/YOLOv3](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
-| Detection | [PaddleDetection/PP-YOLO](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
-| Detection | [PaddleDetection/PP-YOLOv2](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
-| Detection | [PaddleDetection/Faster-RCNN](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
-| Detection | [PaddleDetection/Mask-RCNN](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
-| Detection | [Megvii-BaseDetection/YOLOX](./examples/vision/detection/yolox) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| Detection | [WongKinYiu/YOLOv7](./examples/vision/detection/yolov7) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| Detection | [WongKinYiu/YOLOv7end2end_trt](./examples/vision/detection/yolov7end2end_trt) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
-| Detection | [WongKinYiu/YOLOv7end2end_ort_](./examples/vision/detection/yolov7end2end_ort) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| Detection | [meituan/YOLOv6](./examples/vision/detection/yolov6) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| Detection | [ultralytics/YOLOv5](./examples/vision/detection/yolov5) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
-| Detection | [WongKinYiu/YOLOR](./examples/vision/detection/yolor) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
-| Detection | [WongKinYiu/ScaledYOLOv4](./examples/vision/detection/scaledyolov4) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| Detection | [ppogg/YOLOv5Lite](./examples/vision/detection/yolov5lite) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| Detection | [RangiLyu/NanoDetPlus](./examples/vision/detection/nanodet_plus) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| KeyPoint | [PaddleDetection/TinyPose](./examples/vision/keypointdetection/tiny_pose) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| KeyPoint | [PaddleDetection/PicoDet + TinyPose](./examples/vision/keypointdetection/det_keypoint_unite) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| HeadPose | [omasaht/headpose](examples/vision/headpose) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
-| Tracking | [PaddleDetection/PP-Tracking](examples/vision/tracking/pptracking) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| OCR | [PaddleOCR/PP-OCRv2](./examples/vision/ocr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
-| OCR | [PaddleOCR/PP-OCRv3](./examples/vision/ocr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
-| Segmentation | [PaddleSeg/PP-LiteSeg](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
-| Segmentation | [PaddleSeg/PP-HumanSegLite](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
-| Segmentation | [PaddleSeg/HRNet](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
-| Segmentation | [PaddleSeg/PP-HumanSegServer](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
-| Segmentation | [PaddleSeg/Unet](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
-| Segmentation | [PaddleSeg/Deeplabv3](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
-| FaceDetection | [biubug6/RetinaFace](./examples/vision/facedet/retinaface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| FaceDetection | [Linzaer/UltraFace](./examples/vision/facedet/ultraface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| FaceDetection | [deepcam-cn/YOLOv5Face](./examples/vision/facedet/yolov5face) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| FaceDetection | [insightface/SCRFD](./examples/vision/facedet/scrfd) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| FaceAlign | [Hsintao/PFLD](examples/vision/facealign/pfld) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| FaceAlign | [Single430FaceLandmark1000](./examples/vision/facealign/face_landmark_1000) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
-| FaceAlign | [jhb86253817/PIPNet](./examples/vision/facealign) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
-| FaceRecognition | [insightface/ArcFace](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| FaceRecognition | [insightface/CosFace](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| FaceRecognition | [insightface/PartialFC](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| FaceRecognition | [insightface/VPL](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| Matting | [ZHKKKe/MODNet](./examples/vision/matting/modnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
-| Matting | [PeterL1n/RobustVideoMatting]() | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
-| Matting | [PaddleSeg/PP-Matting](./examples/vision/matting/ppmatting) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| Matting | [PaddleSeg/PP-HumanMatting](./examples/vision/matting/modnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| Matting | [PaddleSeg/ModNet](./examples/vision/matting/modnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
-| Video Super-Resolution | [PaddleGAN/BasicVSR](./) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
-| Video Super-Resolution | [PaddleGAN/EDVR](./examples/vision/sr/edvr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
-| Video Super-Resolution | [PaddleGAN/PP-MSVSR](./examples/vision/sr/ppmsvsr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
-| Information Extraction | [PaddleNLP/UIE](./examples/text/uie) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | |
-| NLP | [PaddleNLP/ERNIE-3.0](./examples/text/ernie-3.0) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ | ✅ |
-| Speech | [PaddleSpeech/PP-TTS](./examples/audio/pp-tts) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | -- | ✅ |
+| Task | Model | Linux | Linux | Win | Win | Mac | Mac | Linux | Linux | Linux | Linux | Linux | Linux |
+|:----------------------:|:--------------------------------------------------------------------------------------------:|:------------------------------------------------:|:------------------------:|:------------------------:|:------------------------:|:-----------------------:|:---------------------:|:--------------------------:| :----------------: |:---------------------------:|:---------------------------:|:-------:|:-------:|
+| --- | --- | X86 CPU | NVIDIA GPU | Intel CPU | NVIDIA GPU | Intel CPU | Arm CPU | AArch64 CPU | Phytium D2000CPU | NVIDIA Jetson | Graphcore IPU | KunlunXin XPU |Serving |
+| Classification | [PaddleClas/ResNet50](./examples/vision/classification/paddleclas) | [✅](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
+| Classification | [TorchVison/ResNet](examples/vision/classification/resnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ |
+| Classification | [ltralytics/YOLOv5Cls](examples/vision/classification/yolov5cls) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ |
+| Classification | [PaddleClas/PP-LCNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
+| Classification | [PaddleClas/PP-LCNetv2](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
+| Classification | [PaddleClas/EfficientNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
+| Classification | [PaddleClas/GhostNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
+| Classification | [PaddleClas/MobileNetV1](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
+| Classification | [PaddleClas/MobileNetV2](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
+| Classification | [PaddleClas/MobileNetV3](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
+| Classification | [PaddleClas/ShuffleNetV2](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
+| Classification | [PaddleClas/SqueeezeNetV1.1](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
+| Classification | [PaddleClas/Inceptionv3](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
+| Classification | [PaddleClas/PP-HGNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
+| Detection | [PaddleDetection/PP-YOLOE](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
+| Detection | [PaddleDetection/PicoDet](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
+| Detection | [PaddleDetection/YOLOX](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
+| Detection | [PaddleDetection/YOLOv3](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
+| Detection | [PaddleDetection/PP-YOLO](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
+| Detection | [PaddleDetection/PP-YOLOv2](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
+| Detection | [PaddleDetection/Faster-RCNN](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
+| Detection | [PaddleDetection/Mask-RCNN](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
+| Detection | [Megvii-BaseDetection/YOLOX](./examples/vision/detection/yolox) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ |
+| Detection | [WongKinYiu/YOLOv7](./examples/vision/detection/yolov7) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ |
+| Detection | [WongKinYiu/YOLOv7end2end_trt](./examples/vision/detection/yolov7end2end_trt) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | ❔ |
+| Detection | [WongKinYiu/YOLOv7end2end_ort_](./examples/vision/detection/yolov7end2end_ort) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
+| Detection | [meituan/YOLOv6](./examples/vision/detection/yolov6) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ |
+| Detection | [ultralytics/YOLOv5](./examples/vision/detection/yolov5) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
+| Detection | [WongKinYiu/YOLOR](./examples/vision/detection/yolor) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | ❔ |
+| Detection | [WongKinYiu/ScaledYOLOv4](./examples/vision/detection/scaledyolov4) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
+| Detection | [ppogg/YOLOv5Lite](./examples/vision/detection/yolov5lite) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
+| Detection | [RangiLyu/NanoDetPlus](./examples/vision/detection/nanodet_plus) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
+| KeyPoint | [PaddleDetection/TinyPose](./examples/vision/keypointdetection/tiny_pose) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ |
+| KeyPoint | [PaddleDetection/PicoDet + TinyPose](./examples/vision/keypointdetection/det_keypoint_unite) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ |
+| HeadPose | [omasaht/headpose](examples/vision/headpose) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | ❔ |
+| Tracking | [PaddleDetection/PP-Tracking](examples/vision/tracking/pptracking) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
+| OCR | [PaddleOCR/PP-OCRv2](./examples/vision/ocr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ✅ | ❔ |
+| OCR | [PaddleOCR/PP-OCRv3](./examples/vision/ocr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
+| Segmentation | [PaddleSeg/PP-LiteSeg](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ✅ | ❔ |
+| Segmentation | [PaddleSeg/PP-HumanSegLite](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ✅ | ❔ |
+| Segmentation | [PaddleSeg/HRNet](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ✅ | ❔ |
+| Segmentation | [PaddleSeg/PP-HumanSegServer](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ✅ | ❔ |
+| Segmentation | [PaddleSeg/Unet](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ✅ | ❔ |
+| Segmentation | [PaddleSeg/Deeplabv3](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ✅ | ❔ |
+| FaceDetection | [biubug6/RetinaFace](./examples/vision/facedet/retinaface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
+| FaceDetection | [Linzaer/UltraFace](./examples/vision/facedet/ultraface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
+| FaceDetection | [deepcam-cn/YOLOv5Face](./examples/vision/facedet/yolov5face) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
+| FaceDetection | [insightface/SCRFD](./examples/vision/facedet/scrfd) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
+| FaceAlign | [Hsintao/PFLD](examples/vision/facealign/pfld) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
+| FaceAlign | [Single430FaceLandmark1000](./examples/vision/facealign/face_landmark_1000) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | ❔ |
+| FaceAlign | [jhb86253817/PIPNet](./examples/vision/facealign) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | ❔ |
+| FaceRecognition | [insightface/ArcFace](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
+| FaceRecognition | [insightface/CosFace](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
+| FaceRecognition | [insightface/PartialFC](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
+| FaceRecognition | [insightface/VPL](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
+| Matting | [ZHKKKe/MODNet](./examples/vision/matting/modnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | ❔ |
+| Matting | [PeterL1n/RobustVideoMatting]() | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | ❔ |
+| Matting | [PaddleSeg/PP-Matting](./examples/vision/matting/ppmatting) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ |
+| Matting | [PaddleSeg/PP-HumanMatting](./examples/vision/matting/modnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ |
+| Matting | [PaddleSeg/ModNet](./examples/vision/matting/modnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
+| Video Super-Resolution | [PaddleGAN/BasicVSR](./) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | ❔ |
+| Video Super-Resolution | [PaddleGAN/EDVR](./examples/vision/sr/edvr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | ❔ |
+| Video Super-Resolution | [PaddleGAN/PP-MSVSR](./examples/vision/sr/ppmsvsr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | ❔ |
+| Information Extraction | [PaddleNLP/UIE](./examples/text/uie) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | |
+| NLP | [PaddleNLP/ERNIE-3.0](./examples/text/ernie-3.0) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ | ✅ | ✅ |
+| Speech | [PaddleSpeech/PP-TTS](./examples/audio/pp-tts) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | -- | ❔ | ✅ |
@@ -318,6 +319,16 @@ Notes: ✅: already supported; ❔: to be supported in the future; N/A: Not Ava
| OCR | [PaddleOCR/PP-OCRv3](./examples/application/js/web_demo/src/pages/cv/ocr) | ✅ |
+
+## 👬 Community
+
+ - **Slack**:Join our [Slack community](https://join.slack.com/t/fastdeployworkspace/shared_invite/zt-1jznah134-3rxY~ytRb8rcPqkn9g~PDg) and chat with other community members about ideas.
+
+ - **WeChat**:Scan the QR code below using WeChat, follow the PaddlePaddle official account and fill out the questionnaire to join the WeChat group.
+
+
+ + Установка + | + Использование документации + | + API документация + | + Журнал обновления +
+ +**⚡️FastDeploy**- это**всесценарный**,**простой в использовании и гибкий**,**чрезвычайно эффективный**инструмент развертывания выводов ИИ. Он обеспечивает 📦**из коробки**опыт развертывания с поддержкой более 🔥150+ **текстовых**,**зрительных**, **речевых** и **кросс-модальных** моделей и 🔚 **сквозной** оптимизацией производительности вывода. Сюда входят классификация изображений, обнаружение объектов, сегментация изображений, обнаружение лиц, распознавание лиц, обнаружение ключевых точек, распознавание ключей, OCR, NLP, TTS и другие задачи для удовлетворения потребностей разработчиков с **многосценическими, многоаппаратными, многоплатформенными** промышленными развертываниями. +| [Image Classification](examples/vision/classification) | [Object Detection](examples/vision/detection) | [Semantic Segmentation](examples/vision/segmentation/paddleseg) | [Potrait Segmentation](examples/vision/segmentation/paddleseg) | +|:----------------------------------------------------------------------------------------------------------------------------------------------:|:----------------------------------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:| +|
|
|
|
|
+| [**Image Matting**](examples/vision/matting) | [**Real-Time Matting**](examples/vision/matting) | [**OCR**](examples/vision/ocr) | [**Face Alignment**](examples/vision/facealign) |
+|
|
|
|
|
+| [**Pose Estimation**](examples/vision/keypointdetection) | [**Behavior Recognition**](https://github.com/PaddlePaddle/FastDeploy/issues/6) | [**NLP**](examples/text) | [**Speech**](examples/audio/pp-tts) |
+|
|
|
| **input** :早上好今天是2020
/10/29,最低温度是-3°C。
**output**: [
](https://paddlespeech.bj.bcebos.com/Parakeet/docs/demos/parakeet_espnet_fs2_pwg_demo/tn_g2p/parakeet/001.wav)
+
+
++ संस्थापन + | + दस्तावेज़ीकरण का उपयोग करें + | + APIप्रलेखन + | + चेंजलॉग +
+ +**⚡️फास्टडिप्लोय**एक एआई अनुमान तैनाती उपकरण है जो **सभी परिदृश्य**, **उपयोग करने में आसान और लचीला** और **बेहद कुशल** है। एक📦**आउट-ऑफ-द-बॉक्स** **क्लाउड-एज** परिनियोजन अनुभव प्रदान करता है, 🔥150+ से अधिक **टेक्स्ट**, **विजन**, **स्पीच** और **क्रॉस-मोडल** मॉडल का समर्थन करता है, और 🔚 **एंड-टू-एंड** अनुमान प्रदर्शन अनुकूलन को लागू करता है। डेवलपर्स की जरूरतों को पूरा करने के लिए छवि वर्गीकरण, ऑब्जेक्ट डिटेक्शन, छवि विभाजन, चेहरे का पता लगाने, चेहरे की पहचान, मुख्य बिंदु का पता लगाने, कटआउट, ओसीआर, एनएलपी, टीटीएस और अन्य कार्यों सहित **बहु-परिदृश्य, बहु-हार्डवेयर, बहु-मंच** उद्योग की तैनाती की जरूरत है। +| [Image Classification](examples/vision/classification) | [Object Detection](examples/vision/detection) | [Semantic Segmentation](examples/vision/segmentation/paddleseg) | [Potrait Segmentation](examples/vision/segmentation/paddleseg) | +|:----------------------------------------------------------------------------------------------------------------------------------------------:|:----------------------------------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:| +|
|
|
|
|
+| [**Image Matting**](examples/vision/matting) | [**Real-Time Matting**](examples/vision/matting) | [**OCR**](examples/vision/ocr) | [**Face Alignment**](examples/vision/facealign) |
+|
|
|
|
|
+| [**Pose Estimation**](examples/vision/keypointdetection) | [**Behavior Recognition**](https://github.com/PaddlePaddle/FastDeploy/issues/6) | [**NLP**](examples/text) | [**Speech**](examples/audio/pp-tts) |
+|
|
|
| **input** :早上好今天是2020
/10/29,最低温度是-3°C。
**output**: [
](https://paddlespeech.bj.bcebos.com/Parakeet/docs/demos/parakeet_espnet_fs2_pwg_demo/tn_g2p/parakeet/001.wav)
+
+
++ インストール + | + ドキュメント + | + APIドキュメンテーション + | + Changelog +
+ +**⚡️FastDeploy**は、**オールシナリオで使いやすく**、**柔軟で非常に効率的な**AI推論デプロイツールです。 🔥150以上の**テキスト**、**ビジョン**、**スピーチ**および🔚クロスモーダルモデルをサポートし、エンドツーエンドの推論パフォーマンスの最適化を可能にする、すぐに使えるクラウド側のデプロイメントエクスペリエンスを提供します。 これには、画像分類、物体検出、画像分割、顔検出、顔認識、キーポイント検出、キーイング、OCR、NLP、TTSなどのタスクが含まれ、**マルチシーン**、**マルチハードウェア**、**マルチプラットフォーム**の産業展開に対する開発者のニーズに応えています。 +| [Image Classification](examples/vision/classification) | [Object Detection](examples/vision/detection) | [Semantic Segmentation](examples/vision/segmentation/paddleseg) | [Potrait Segmentation](examples/vision/segmentation/paddleseg) | +|:----------------------------------------------------------------------------------------------------------------------------------------------:|:----------------------------------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:| +|
|
|
|
|
+| [**Image Matting**](examples/vision/matting) | [**Real-Time Matting**](examples/vision/matting) | [**OCR**](examples/vision/ocr) | [**Face Alignment**](examples/vision/facealign) |
+|
|
|
|
|
+| [**Pose Estimation**](examples/vision/keypointdetection) | [**Behavior Recognition**](https://github.com/PaddlePaddle/FastDeploy/issues/6) | [**NLP**](examples/text) | [**Speech**](examples/audio/pp-tts) |
+|
|
|
| **input** :早上好今天是2020
/10/29,最低温度是-3°C。
**output**: [
](https://paddlespeech.bj.bcebos.com/Parakeet/docs/demos/parakeet_espnet_fs2_pwg_demo/tn_g2p/parakeet/001.wav)
+
+
++ 설치 + | + 문서 사용하기 + | + API문서 + | + 로그 업데이트 +
+ +**⚡Fastdeploy** 장면쉽게 유연 한 극,효율적 AI 추리 도구 가 배치 돼 있다.📦 제공 개표 즉의**구름을 단**부처 체험 지원 넘 🔥 150 +**text**,**비전**,**speech**과**다른 모드**모델 🔚 실현에 차 려 단'의 추리 성능 최적화 한다.이미지 분류, 객체 검출, 이미지 분할, 얼굴 검출, 얼굴 인식, 포인트 검출, 퍼팅, OCR, NLP, TTS 등의 작업을 포함하고 있어 개발자의**다중 장면, 다중 하드웨어, 다중 플랫폼**을 위한 산업 배치 요구를 충족시킨다. + +| [Image Classification](examples/vision/classification) | [Object Detection](examples/vision/detection) | [Semantic Segmentation](examples/vision/segmentation/paddleseg) | [Potrait Segmentation](examples/vision/segmentation/paddleseg) | +|:----------------------------------------------------------------------------------------------------------------------------------------------:|:----------------------------------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:| +|
|
|
|
|
+| [**Image Matting**](examples/vision/matting) | [**Real-Time Matting**](examples/vision/matting) | [**OCR**](examples/vision/ocr) | [**Face Alignment**](examples/vision/facealign) |
+|
|
|
|
|
+| [**Pose Estimation**](examples/vision/keypointdetection) | [**Behavior Recognition**](https://github.com/PaddlePaddle/FastDeploy/issues/6) | [**NLP**](examples/text) | [**Speech**](examples/audio/pp-tts) |
+|
|
|
| **input** :早上好今天是2020
/10/29,最低温度是-3°C。
**output**: [
](https://paddlespeech.bj.bcebos.com/Parakeet/docs/demos/parakeet_espnet_fs2_pwg_demo/tn_g2p/parakeet/001.wav)
+
+
+
+
+需要特别注意的是,在 A311D 上部署的模型需要是量化后的模型,模型的量化请参考:[模型量化](../../../../../../docs/cn/quantize.md)
diff --git a/examples/vision/classification/paddleclas/a311d/cpp/infer.cc b/examples/vision/classification/paddleclas/a311d/cpp/infer.cc
new file mode 100755
index 000000000..140311eec
--- /dev/null
+++ b/examples/vision/classification/paddleclas/a311d/cpp/infer.cc
@@ -0,0 +1,60 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#include
+
+需要特别注意的是,在 A311D 上部署的模型需要是量化后的模型,模型的量化请参考:[模型量化](../../../../../../docs/cn/quantize.md)
diff --git a/examples/vision/detection/paddledetection/a311d/cpp/infer_ppyoloe.cc b/examples/vision/detection/paddledetection/a311d/cpp/infer_ppyoloe.cc
new file mode 100755
index 000000000..609a41d4b
--- /dev/null
+++ b/examples/vision/detection/paddledetection/a311d/cpp/infer_ppyoloe.cc
@@ -0,0 +1,65 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "fastdeploy/vision.h"
+#ifdef WIN32
+const char sep = '\\';
+#else
+const char sep = '/';
+#endif
+
+void InitAndInfer(const std::string& model_dir, const std::string& image_file) {
+ auto model_file = model_dir + sep + "model.pdmodel";
+ auto params_file = model_dir + sep + "model.pdiparams";
+ auto config_file = model_dir + sep + "infer_cfg.yml";
+ auto subgraph_file = model_dir + sep + "subgraph.txt";
+
+ fastdeploy::RuntimeOption option;
+ option.UseTimVX();
+ option.SetLiteSubgraphPartitionPath(subgraph_file);
+
+ auto model = fastdeploy::vision::detection::PPYOLOE(model_file, params_file,
+ config_file, option);
+ assert(model.Initialized());
+
+ auto im = cv::imread(image_file);
+
+ fastdeploy::vision::DetectionResult res;
+ if (!model.Predict(im, &res)) {
+ std::cerr << "Failed to predict." << std::endl;
+ return;
+ }
+
+ std::cout << res.Str() << std::endl;
+
+ auto vis_im = fastdeploy::vision::VisDetection(im, res, 0.5);
+ cv::imwrite("vis_result.jpg", vis_im);
+ std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
+
+}
+
+int main(int argc, char* argv[]) {
+ if (argc < 3) {
+ std::cout << "Usage: infer_demo path/to/quant_model "
+ "path/to/image "
+ "e.g ./infer_demo ./PPYOLOE_L_quant ./test.jpeg"
+ << std::endl;
+ return -1;
+ }
+
+ std::string model_dir = argv[1];
+ std::string test_image = argv[2];
+ InitAndInfer(model_dir, test_image);
+ return 0;
+}
diff --git a/examples/vision/detection/paddledetection/a311d/cpp/run_with_adb.sh b/examples/vision/detection/paddledetection/a311d/cpp/run_with_adb.sh
new file mode 100755
index 000000000..dd7d7b47d
--- /dev/null
+++ b/examples/vision/detection/paddledetection/a311d/cpp/run_with_adb.sh
@@ -0,0 +1,47 @@
+#!/bin/bash
+HOST_SPACE=${PWD}
+echo ${HOST_SPACE}
+WORK_SPACE=/data/local/tmp/test
+
+# The first parameter represents the demo name
+DEMO_NAME=image_classification_demo
+if [ -n "$1" ]; then
+ DEMO_NAME=$1
+fi
+
+# The second parameter represents the model name
+MODEL_NAME=mobilenet_v1_fp32_224
+if [ -n "$2" ]; then
+ MODEL_NAME=$2
+fi
+
+# The third parameter indicates the name of the image to be tested
+IMAGE_NAME=0001.jpg
+if [ -n "$3" ]; then
+ IMAGE_NAME=$3
+fi
+
+# The fourth parameter represents the ID of the device
+ADB_DEVICE_NAME=
+if [ -n "$4" ]; then
+ ADB_DEVICE_NAME="-s $4"
+fi
+
+# Set the environment variables required during the running process
+EXPORT_ENVIRONMENT_VARIABLES="export GLOG_v=5; export SUBGRAPH_ONLINE_MODE=true; export RKNPU_LOGLEVEL=5; export RKNN_LOG_LEVEL=5; ulimit -c unlimited; export VIV_VX_ENABLE_GRAPH_TRANSFORM=-pcq:1; export VIV_VX_SET_PER_CHANNEL_ENTROPY=100; export TIMVX_BATCHNORM_FUSION_MAX_ALLOWED_QUANT_SCALE_DEVIATION=300000; export VSI_NN_LOG_LEVEL=5;"
+
+EXPORT_ENVIRONMENT_VARIABLES="${EXPORT_ENVIRONMENT_VARIABLES}export LD_LIBRARY_PATH=${WORK_SPACE}/lib:\$LD_LIBRARY_PATH;"
+
+# Please install adb, and DON'T run this in the docker.
+set -e
+adb $ADB_DEVICE_NAME shell "rm -rf $WORK_SPACE"
+adb $ADB_DEVICE_NAME shell "mkdir -p $WORK_SPACE"
+
+# Upload the demo, librarys, model and test images to the device
+adb $ADB_DEVICE_NAME push ${HOST_SPACE}/lib $WORK_SPACE
+adb $ADB_DEVICE_NAME push ${HOST_SPACE}/${DEMO_NAME} $WORK_SPACE
+adb $ADB_DEVICE_NAME push models $WORK_SPACE
+adb $ADB_DEVICE_NAME push images $WORK_SPACE
+
+# Execute the deployment demo
+adb $ADB_DEVICE_NAME shell "cd $WORK_SPACE; ${EXPORT_ENVIRONMENT_VARIABLES} chmod +x ./${DEMO_NAME}; ./${DEMO_NAME} ./models/${MODEL_NAME} ./images/$IMAGE_NAME"
diff --git a/examples/vision/detection/paddledetection/cpp/CMakeLists.txt b/examples/vision/detection/paddledetection/cpp/CMakeLists.txt
index 9bbbdf32e..9382931a1 100644
--- a/examples/vision/detection/paddledetection/cpp/CMakeLists.txt
+++ b/examples/vision/detection/paddledetection/cpp/CMakeLists.txt
@@ -29,3 +29,18 @@ target_link_libraries(infer_ppyolo_demo ${FASTDEPLOY_LIBS})
add_executable(infer_mask_rcnn_demo ${PROJECT_SOURCE_DIR}/infer_mask_rcnn.cc)
target_link_libraries(infer_mask_rcnn_demo ${FASTDEPLOY_LIBS})
+
+add_executable(infer_ssd_demo ${PROJECT_SOURCE_DIR}/infer_ssd.cc)
+target_link_libraries(infer_ssd_demo ${FASTDEPLOY_LIBS})
+
+add_executable(infer_yolov5_demo ${PROJECT_SOURCE_DIR}/infer_yolov5.cc)
+target_link_libraries(infer_yolov5_demo ${FASTDEPLOY_LIBS})
+
+add_executable(infer_yolov6_demo ${PROJECT_SOURCE_DIR}/infer_yolov6.cc)
+target_link_libraries(infer_yolov6_demo ${FASTDEPLOY_LIBS})
+
+add_executable(infer_yolov7_demo ${PROJECT_SOURCE_DIR}/infer_yolov7.cc)
+target_link_libraries(infer_yolov7_demo ${FASTDEPLOY_LIBS})
+
+add_executable(infer_rtmdet_demo ${PROJECT_SOURCE_DIR}/infer_rtmdet.cc)
+target_link_libraries(infer_rtmdet_demo ${FASTDEPLOY_LIBS})
diff --git a/examples/vision/detection/paddledetection/cpp/README.md b/examples/vision/detection/paddledetection/cpp/README.md
index 63df0365a..d9217ab1e 100644
--- a/examples/vision/detection/paddledetection/cpp/README.md
+++ b/examples/vision/detection/paddledetection/cpp/README.md
@@ -1,6 +1,6 @@
# PaddleDetection C++部署示例
-本目录下提供`infer_xxx.cc`快速完成PaddleDetection模型包括PPYOLOE/PicoDet/YOLOX/YOLOv3/PPYOLO/FasterRCNN在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。
+本目录下提供`infer_xxx.cc`快速完成PaddleDetection模型包括PPYOLOE/PicoDet/YOLOX/YOLOv3/PPYOLO/FasterRCNN/YOLOv5/YOLOv6/YOLOv7/RTMDet在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。
在部署前,需确认以下两个步骤
@@ -41,7 +41,7 @@ tar xvf ppyoloe_crn_l_300e_coco.tgz
### 模型类
-PaddleDetection目前支持6种模型系列,类名分别为`PPYOLOE`, `PicoDet`, `PaddleYOLOX`, `PPYOLO`, `FasterRCNN`,所有类名的构造函数和预测函数在参数上完全一致,本文档以PPYOLOE为例讲解API
+PaddleDetection目前支持6种模型系列,类名分别为`PPYOLOE`, `PicoDet`, `PaddleYOLOX`, `PPYOLO`, `FasterRCNN`,`SSD`,`PaddleYOLOv5`,`PaddleYOLOv6`,`PaddleYOLOv7`,`RTMDet`所有类名的构造函数和预测函数在参数上完全一致,本文档以PPYOLOE为例讲解API
```c++
fastdeploy::vision::detection::PPYOLOE(
const string& model_file,
diff --git a/examples/vision/detection/paddledetection/cpp/infer_rtmdet.cc b/examples/vision/detection/paddledetection/cpp/infer_rtmdet.cc
new file mode 100644
index 000000000..6262353ff
--- /dev/null
+++ b/examples/vision/detection/paddledetection/cpp/infer_rtmdet.cc
@@ -0,0 +1,129 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "fastdeploy/vision.h"
+
+#ifdef WIN32
+const char sep = '\\';
+#else
+const char sep = '/';
+#endif
+
+void CpuInfer(const std::string& model_dir, const std::string& image_file) {
+ auto model_file = model_dir + sep + "model.pdmodel";
+ auto params_file = model_dir + sep + "model.pdiparams";
+ auto config_file = model_dir + sep + "infer_cfg.yml";
+ auto option = fastdeploy::RuntimeOption();
+ option.UseCpu();
+ auto model = fastdeploy::vision::detection::RTMDet(model_file, params_file,
+ config_file, option);
+ if (!model.Initialized()) {
+ std::cerr << "Failed to initialize." << std::endl;
+ return;
+ }
+
+ auto im = cv::imread(image_file);
+ auto im_bak = im.clone();
+
+ fastdeploy::vision::DetectionResult res;
+ if (!model.Predict(&im, &res)) {
+ std::cerr << "Failed to predict." << std::endl;
+ return;
+ }
+
+ std::cout << res.Str() << std::endl;
+ auto vis_im = fastdeploy::vision::VisDetection(im_bak, res, 0.5);
+ cv::imwrite("vis_result.jpg", vis_im);
+ std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
+}
+
+void GpuInfer(const std::string& model_dir, const std::string& image_file) {
+ auto model_file = model_dir + sep + "model.pdmodel";
+ auto params_file = model_dir + sep + "model.pdiparams";
+ auto config_file = model_dir + sep + "infer_cfg.yml";
+
+ auto option = fastdeploy::RuntimeOption();
+ option.UseGpu();
+ auto model = fastdeploy::vision::detection::RTMDet(model_file, params_file,
+ config_file, option);
+ if (!model.Initialized()) {
+ std::cerr << "Failed to initialize." << std::endl;
+ return;
+ }
+
+ auto im = cv::imread(image_file);
+ auto im_bak = im.clone();
+
+ fastdeploy::vision::DetectionResult res;
+ if (!model.Predict(&im, &res)) {
+ std::cerr << "Failed to predict." << std::endl;
+ return;
+ }
+
+ std::cout << res.Str() << std::endl;
+ auto vis_im = fastdeploy::vision::VisDetection(im_bak, res, 0.5);
+ cv::imwrite("vis_result.jpg", vis_im);
+ std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
+}
+
+void TrtInfer(const std::string& model_dir, const std::string& image_file) {
+ auto model_file = model_dir + sep + "model.pdmodel";
+ auto params_file = model_dir + sep + "model.pdiparams";
+ auto config_file = model_dir + sep + "infer_cfg.yml";
+
+ auto option = fastdeploy::RuntimeOption();
+ option.UseGpu();
+ option.UseTrtBackend();
+ auto model = fastdeploy::vision::detection::RTMDet(model_file, params_file,
+ config_file, option);
+ if (!model.Initialized()) {
+ std::cerr << "Failed to initialize." << std::endl;
+ return;
+ }
+
+ auto im = cv::imread(image_file);
+
+ fastdeploy::vision::DetectionResult res;
+ if (!model.Predict(&im, &res)) {
+ std::cerr << "Failed to predict." << std::endl;
+ return;
+ }
+
+ std::cout << res.Str() << std::endl;
+ auto vis_im = fastdeploy::vision::VisDetection(im, res, 0.5);
+ cv::imwrite("vis_result.jpg", vis_im);
+ std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
+}
+
+int main(int argc, char* argv[]) {
+ if (argc < 4) {
+ std::cout
+ << "Usage: infer_demo path/to/model_dir path/to/image run_option, "
+ "e.g ./infer_model ./ppyolo_dirname ./test.jpeg 0"
+ << std::endl;
+ std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
+ "with gpu."
+ << std::endl;
+ return -1;
+ }
+
+ if (std::atoi(argv[3]) == 0) {
+ CpuInfer(argv[1], argv[2]);
+ } else if (std::atoi(argv[3]) == 1) {
+ GpuInfer(argv[1], argv[2]);
+ } else if(std::atoi(argv[3]) == 2){
+ TrtInfer(argv[1], argv[2]);
+ }
+ return 0;
+}
diff --git a/examples/vision/detection/paddledetection/cpp/infer_yolov5.cc b/examples/vision/detection/paddledetection/cpp/infer_yolov5.cc
new file mode 100644
index 000000000..c33441b7d
--- /dev/null
+++ b/examples/vision/detection/paddledetection/cpp/infer_yolov5.cc
@@ -0,0 +1,129 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "fastdeploy/vision.h"
+
+#ifdef WIN32
+const char sep = '\\';
+#else
+const char sep = '/';
+#endif
+
+void CpuInfer(const std::string& model_dir, const std::string& image_file) {
+ auto model_file = model_dir + sep + "model.pdmodel";
+ auto params_file = model_dir + sep + "model.pdiparams";
+ auto config_file = model_dir + sep + "infer_cfg.yml";
+ auto option = fastdeploy::RuntimeOption();
+ option.UseCpu();
+ auto model = fastdeploy::vision::detection::PaddleYOLOv5(model_file, params_file,
+ config_file, option);
+ if (!model.Initialized()) {
+ std::cerr << "Failed to initialize." << std::endl;
+ return;
+ }
+
+ auto im = cv::imread(image_file);
+ auto im_bak = im.clone();
+
+ fastdeploy::vision::DetectionResult res;
+ if (!model.Predict(&im, &res)) {
+ std::cerr << "Failed to predict." << std::endl;
+ return;
+ }
+
+ std::cout << res.Str() << std::endl;
+ auto vis_im = fastdeploy::vision::VisDetection(im_bak, res, 0.5);
+ cv::imwrite("vis_result.jpg", vis_im);
+ std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
+}
+
+void GpuInfer(const std::string& model_dir, const std::string& image_file) {
+ auto model_file = model_dir + sep + "model.pdmodel";
+ auto params_file = model_dir + sep + "model.pdiparams";
+ auto config_file = model_dir + sep + "infer_cfg.yml";
+
+ auto option = fastdeploy::RuntimeOption();
+ option.UseGpu();
+ auto model = fastdeploy::vision::detection::PaddleYOLOv5(model_file, params_file,
+ config_file, option);
+ if (!model.Initialized()) {
+ std::cerr << "Failed to initialize." << std::endl;
+ return;
+ }
+
+ auto im = cv::imread(image_file);
+ auto im_bak = im.clone();
+
+ fastdeploy::vision::DetectionResult res;
+ if (!model.Predict(&im, &res)) {
+ std::cerr << "Failed to predict." << std::endl;
+ return;
+ }
+
+ std::cout << res.Str() << std::endl;
+ auto vis_im = fastdeploy::vision::VisDetection(im_bak, res, 0.5);
+ cv::imwrite("vis_result.jpg", vis_im);
+ std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
+}
+
+void TrtInfer(const std::string& model_dir, const std::string& image_file) {
+ auto model_file = model_dir + sep + "model.pdmodel";
+ auto params_file = model_dir + sep + "model.pdiparams";
+ auto config_file = model_dir + sep + "infer_cfg.yml";
+
+ auto option = fastdeploy::RuntimeOption();
+ option.UseGpu();
+ option.UseTrtBackend();
+ auto model = fastdeploy::vision::detection::PaddleYOLOv5(model_file, params_file,
+ config_file, option);
+ if (!model.Initialized()) {
+ std::cerr << "Failed to initialize." << std::endl;
+ return;
+ }
+
+ auto im = cv::imread(image_file);
+
+ fastdeploy::vision::DetectionResult res;
+ if (!model.Predict(&im, &res)) {
+ std::cerr << "Failed to predict." << std::endl;
+ return;
+ }
+
+ std::cout << res.Str() << std::endl;
+ auto vis_im = fastdeploy::vision::VisDetection(im, res, 0.5);
+ cv::imwrite("vis_result.jpg", vis_im);
+ std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
+}
+
+int main(int argc, char* argv[]) {
+ if (argc < 4) {
+ std::cout
+ << "Usage: infer_demo path/to/model_dir path/to/image run_option, "
+ "e.g ./infer_model ./ppyolo_dirname ./test.jpeg 0"
+ << std::endl;
+ std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
+ "with gpu."
+ << std::endl;
+ return -1;
+ }
+
+ if (std::atoi(argv[3]) == 0) {
+ CpuInfer(argv[1], argv[2]);
+ } else if (std::atoi(argv[3]) == 1) {
+ GpuInfer(argv[1], argv[2]);
+ } else if(std::atoi(argv[3]) == 2){
+ TrtInfer(argv[1], argv[2]);
+ }
+ return 0;
+}
diff --git a/examples/vision/detection/paddledetection/cpp/infer_yolov6.cc b/examples/vision/detection/paddledetection/cpp/infer_yolov6.cc
new file mode 100644
index 000000000..8b7405ff6
--- /dev/null
+++ b/examples/vision/detection/paddledetection/cpp/infer_yolov6.cc
@@ -0,0 +1,129 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "fastdeploy/vision.h"
+
+#ifdef WIN32
+const char sep = '\\';
+#else
+const char sep = '/';
+#endif
+
+void CpuInfer(const std::string& model_dir, const std::string& image_file) {
+ auto model_file = model_dir + sep + "model.pdmodel";
+ auto params_file = model_dir + sep + "model.pdiparams";
+ auto config_file = model_dir + sep + "infer_cfg.yml";
+ auto option = fastdeploy::RuntimeOption();
+ option.UseCpu();
+ auto model = fastdeploy::vision::detection::PaddleYOLOv6(model_file, params_file,
+ config_file, option);
+ if (!model.Initialized()) {
+ std::cerr << "Failed to initialize." << std::endl;
+ return;
+ }
+
+ auto im = cv::imread(image_file);
+ auto im_bak = im.clone();
+
+ fastdeploy::vision::DetectionResult res;
+ if (!model.Predict(&im, &res)) {
+ std::cerr << "Failed to predict." << std::endl;
+ return;
+ }
+
+ std::cout << res.Str() << std::endl;
+ auto vis_im = fastdeploy::vision::VisDetection(im_bak, res, 0.5);
+ cv::imwrite("vis_result.jpg", vis_im);
+ std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
+}
+
+void GpuInfer(const std::string& model_dir, const std::string& image_file) {
+ auto model_file = model_dir + sep + "model.pdmodel";
+ auto params_file = model_dir + sep + "model.pdiparams";
+ auto config_file = model_dir + sep + "infer_cfg.yml";
+
+ auto option = fastdeploy::RuntimeOption();
+ option.UseGpu();
+ auto model = fastdeploy::vision::detection::PaddleYOLOv6(model_file, params_file,
+ config_file, option);
+ if (!model.Initialized()) {
+ std::cerr << "Failed to initialize." << std::endl;
+ return;
+ }
+
+ auto im = cv::imread(image_file);
+ auto im_bak = im.clone();
+
+ fastdeploy::vision::DetectionResult res;
+ if (!model.Predict(&im, &res)) {
+ std::cerr << "Failed to predict." << std::endl;
+ return;
+ }
+
+ std::cout << res.Str() << std::endl;
+ auto vis_im = fastdeploy::vision::VisDetection(im_bak, res, 0.5);
+ cv::imwrite("vis_result.jpg", vis_im);
+ std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
+}
+
+void TrtInfer(const std::string& model_dir, const std::string& image_file) {
+ auto model_file = model_dir + sep + "model.pdmodel";
+ auto params_file = model_dir + sep + "model.pdiparams";
+ auto config_file = model_dir + sep + "infer_cfg.yml";
+
+ auto option = fastdeploy::RuntimeOption();
+ option.UseGpu();
+ option.UseTrtBackend();
+ auto model = fastdeploy::vision::detection::PaddleYOLOv6(model_file, params_file,
+ config_file, option);
+ if (!model.Initialized()) {
+ std::cerr << "Failed to initialize." << std::endl;
+ return;
+ }
+
+ auto im = cv::imread(image_file);
+
+ fastdeploy::vision::DetectionResult res;
+ if (!model.Predict(&im, &res)) {
+ std::cerr << "Failed to predict." << std::endl;
+ return;
+ }
+
+ std::cout << res.Str() << std::endl;
+ auto vis_im = fastdeploy::vision::VisDetection(im, res, 0.5);
+ cv::imwrite("vis_result.jpg", vis_im);
+ std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
+}
+
+int main(int argc, char* argv[]) {
+ if (argc < 4) {
+ std::cout
+ << "Usage: infer_demo path/to/model_dir path/to/image run_option, "
+ "e.g ./infer_model ./ppyolo_dirname ./test.jpeg 0"
+ << std::endl;
+ std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
+ "with gpu."
+ << std::endl;
+ return -1;
+ }
+
+ if (std::atoi(argv[3]) == 0) {
+ CpuInfer(argv[1], argv[2]);
+ } else if (std::atoi(argv[3]) == 1) {
+ GpuInfer(argv[1], argv[2]);
+ } else if(std::atoi(argv[3]) == 2){
+ TrtInfer(argv[1], argv[2]);
+ }
+ return 0;
+}
diff --git a/examples/vision/detection/paddledetection/cpp/infer_yolov7.cc b/examples/vision/detection/paddledetection/cpp/infer_yolov7.cc
new file mode 100644
index 000000000..e13f5f545
--- /dev/null
+++ b/examples/vision/detection/paddledetection/cpp/infer_yolov7.cc
@@ -0,0 +1,128 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "fastdeploy/vision.h"
+
+#ifdef WIN32
+const char sep = '\\';
+#else
+const char sep = '/';
+#endif
+
+void CpuInfer(const std::string& model_dir, const std::string& image_file) {
+ auto model_file = model_dir + sep + "model.pdmodel";
+ auto params_file = model_dir + sep + "model.pdiparams";
+ auto config_file = model_dir + sep + "infer_cfg.yml";
+ auto option = fastdeploy::RuntimeOption();
+ option.UseCpu();
+ auto model = fastdeploy::vision::detection::PaddleYOLOv7(model_file, params_file,
+ config_file, option);
+ if (!model.Initialized()) {
+ std::cerr << "Failed to initialize." << std::endl;
+ return;
+ }
+
+ auto im = cv::imread(image_file);
+ auto im_bak = im.clone();
+
+ fastdeploy::vision::DetectionResult res;
+ if (!model.Predict(&im, &res)) {
+ std::cerr << "Failed to predict." << std::endl;
+ return;
+ }
+
+ std::cout << res.Str() << std::endl;
+ auto vis_im = fastdeploy::vision::VisDetection(im_bak, res, 0.5);
+ cv::imwrite("vis_result.jpg", vis_im);
+ std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
+}
+
+void GpuInfer(const std::string& model_dir, const std::string& image_file) {
+ auto model_file = model_dir + sep + "model.pdmodel";
+ auto params_file = model_dir + sep + "model.pdiparams";
+ auto config_file = model_dir + sep + "infer_cfg.yml";
+
+ auto option = fastdeploy::RuntimeOption();
+ option.UseGpu();
+ auto model = fastdeploy::vision::detection::PaddleYOLOv7(model_file, params_file,config_file, option);
+ if (!model.Initialized()) {
+ std::cerr << "Failed to initialize." << std::endl;
+ return;
+ }
+
+ auto im = cv::imread(image_file);
+ auto im_bak = im.clone();
+
+ fastdeploy::vision::DetectionResult res;
+ if (!model.Predict(&im, &res)) {
+ std::cerr << "Failed to predict." << std::endl;
+ return;
+ }
+
+ std::cout << res.Str() << std::endl;
+ auto vis_im = fastdeploy::vision::VisDetection(im_bak, res, 0.5);
+ cv::imwrite("vis_result.jpg", vis_im);
+ std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
+}
+
+void TrtInfer(const std::string& model_dir, const std::string& image_file) {
+ auto model_file = model_dir + sep + "model.pdmodel";
+ auto params_file = model_dir + sep + "model.pdiparams";
+ auto config_file = model_dir + sep + "infer_cfg.yml";
+
+ auto option = fastdeploy::RuntimeOption();
+ option.UseGpu();
+ option.UseTrtBackend();
+ auto model = fastdeploy::vision::detection::PaddleYOLOv7(model_file, params_file,
+ config_file, option);
+ if (!model.Initialized()) {
+ std::cerr << "Failed to initialize." << std::endl;
+ return;
+ }
+
+ auto im = cv::imread(image_file);
+
+ fastdeploy::vision::DetectionResult res;
+ if (!model.Predict(&im, &res)) {
+ std::cerr << "Failed to predict." << std::endl;
+ return;
+ }
+
+ std::cout << res.Str() << std::endl;
+ auto vis_im = fastdeploy::vision::VisDetection(im, res, 0.5);
+ cv::imwrite("vis_result.jpg", vis_im);
+ std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
+}
+
+int main(int argc, char* argv[]) {
+ if (argc < 4) {
+ std::cout
+ << "Usage: infer_demo path/to/model_dir path/to/image run_option, "
+ "e.g ./infer_model ./ppyolo_dirname ./test.jpeg 0"
+ << std::endl;
+ std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
+ "with gpu."
+ << std::endl;
+ return -1;
+ }
+
+ if (std::atoi(argv[3]) == 0) {
+ CpuInfer(argv[1], argv[2]);
+ } else if (std::atoi(argv[3]) == 1) {
+ GpuInfer(argv[1], argv[2]);
+ } else if(std::atoi(argv[3]) == 2){
+ TrtInfer(argv[1], argv[2]);
+ }
+ return 0;
+}
diff --git a/examples/vision/detection/paddledetection/python/README.md b/examples/vision/detection/paddledetection/python/README.md
index 7747ca071..993c1695d 100644
--- a/examples/vision/detection/paddledetection/python/README.md
+++ b/examples/vision/detection/paddledetection/python/README.md
@@ -41,6 +41,10 @@ fastdeploy.vision.detection.PPYOLO(model_file, params_file, config_file, runtime
fastdeploy.vision.detection.FasterRCNN(model_file, params_file, config_file, runtime_option=None, model_format=ModelFormat.PADDLE)
fastdeploy.vision.detection.MaskRCNN(model_file, params_file, config_file, runtime_option=None, model_format=ModelFormat.PADDLE)
fastdeploy.vision.detection.SSD(model_file, params_file, config_file, runtime_option=None, model_format=ModelFormat.PADDLE)
+fastdeploy.vision.detection.PaddleYOLOv5(model_file, params_file, config_file, runtime_option=None, model_format=ModelFormat.PADDLE)
+fastdeploy.vision.detection.PaddleYOLOv6(model_file, params_file, config_file, runtime_option=None, model_format=ModelFormat.PADDLE)
+fastdeploy.vision.detection.PaddleYOLOv7(model_file, params_file, config_file, runtime_option=None, model_format=ModelFormat.PADDLE)
+fastdeploy.vision.detection.RTMDet(model_file, params_file, config_file, runtime_option=None, model_format=ModelFormat.PADDLE)
```
PaddleDetection模型加载和初始化,其中model_file, params_file为导出的Paddle部署模型格式, config_file为PaddleDetection同时导出的部署配置yaml文件
diff --git a/examples/vision/detection/paddledetection/python/infer_rtmdet.py b/examples/vision/detection/paddledetection/python/infer_rtmdet.py
new file mode 100644
index 000000000..ec4c6cda8
--- /dev/null
+++ b/examples/vision/detection/paddledetection/python/infer_rtmdet.py
@@ -0,0 +1,59 @@
+import fastdeploy as fd
+import cv2
+import os
+
+
+def parse_arguments():
+ import argparse
+ import ast
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--model_dir",
+ required=True,
+ help="Path of PaddleDetection model directory")
+ parser.add_argument(
+ "--image", required=True, help="Path of test image file.")
+ parser.add_argument(
+ "--device",
+ type=str,
+ default='cpu',
+ help="Type of inference device, support 'cpu' or 'gpu'.")
+ parser.add_argument(
+ "--use_trt",
+ type=ast.literal_eval,
+ default=False,
+ help="Wether to use tensorrt.")
+ return parser.parse_args()
+
+
+def build_option(args):
+ option = fd.RuntimeOption()
+
+ if args.device.lower() == "gpu":
+ option.use_gpu()
+
+ if args.use_trt:
+ option.use_trt_backend()
+ return option
+
+
+args = parse_arguments()
+
+model_file = os.path.join(args.model_dir, "model.pdmodel")
+params_file = os.path.join(args.model_dir, "model.pdiparams")
+config_file = os.path.join(args.model_dir, "infer_cfg.yml")
+
+# 配置runtime,加载模型
+runtime_option = build_option(args)
+model = fd.vision.detection.RTMDet(
+ model_file, params_file, config_file, runtime_option=runtime_option)
+
+# 预测图片检测结果
+im = cv2.imread(args.image)
+result = model.predict(im.copy())
+print(result)
+
+# 预测结果可视化
+vis_im = fd.vision.vis_detection(im, result, score_threshold=0.5)
+cv2.imwrite("visualized_result.jpg", vis_im)
+print("Visualized result save in ./visualized_result.jpg")
diff --git a/examples/vision/detection/paddledetection/python/infer_yolov5.py b/examples/vision/detection/paddledetection/python/infer_yolov5.py
new file mode 100644
index 000000000..ea6c764e2
--- /dev/null
+++ b/examples/vision/detection/paddledetection/python/infer_yolov5.py
@@ -0,0 +1,59 @@
+import fastdeploy as fd
+import cv2
+import os
+
+
+def parse_arguments():
+ import argparse
+ import ast
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--model_dir",
+ required=True,
+ help="Path of PaddleDetection model directory")
+ parser.add_argument(
+ "--image", required=True, help="Path of test image file.")
+ parser.add_argument(
+ "--device",
+ type=str,
+ default='cpu',
+ help="Type of inference device, support 'cpu' or 'gpu'.")
+ parser.add_argument(
+ "--use_trt",
+ type=ast.literal_eval,
+ default=False,
+ help="Wether to use tensorrt.")
+ return parser.parse_args()
+
+
+def build_option(args):
+ option = fd.RuntimeOption()
+
+ if args.device.lower() == "gpu":
+ option.use_gpu()
+
+ if args.use_trt:
+ option.use_trt_backend()
+ return option
+
+
+args = parse_arguments()
+
+model_file = os.path.join(args.model_dir, "model.pdmodel")
+params_file = os.path.join(args.model_dir, "model.pdiparams")
+config_file = os.path.join(args.model_dir, "infer_cfg.yml")
+
+# 配置runtime,加载模型
+runtime_option = build_option(args)
+model = fd.vision.detection.PaddleYOLOv5(
+ model_file, params_file, config_file, runtime_option=runtime_option)
+
+# 预测图片检测结果
+im = cv2.imread(args.image)
+result = model.predict(im.copy())
+print(result)
+
+# 预测结果可视化
+vis_im = fd.vision.vis_detection(im, result, score_threshold=0.5)
+cv2.imwrite("visualized_result.jpg", vis_im)
+print("Visualized result save in ./visualized_result.jpg")
diff --git a/examples/vision/detection/paddledetection/python/infer_yolov6.py b/examples/vision/detection/paddledetection/python/infer_yolov6.py
new file mode 100644
index 000000000..81dfab331
--- /dev/null
+++ b/examples/vision/detection/paddledetection/python/infer_yolov6.py
@@ -0,0 +1,59 @@
+import fastdeploy as fd
+import cv2
+import os
+
+
+def parse_arguments():
+ import argparse
+ import ast
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--model_dir",
+ required=True,
+ help="Path of PaddleDetection model directory")
+ parser.add_argument(
+ "--image", required=True, help="Path of test image file.")
+ parser.add_argument(
+ "--device",
+ type=str,
+ default='cpu',
+ help="Type of inference device, support 'cpu' or 'gpu'.")
+ parser.add_argument(
+ "--use_trt",
+ type=ast.literal_eval,
+ default=False,
+ help="Wether to use tensorrt.")
+ return parser.parse_args()
+
+
+def build_option(args):
+ option = fd.RuntimeOption()
+
+ if args.device.lower() == "gpu":
+ option.use_gpu()
+
+ if args.use_trt:
+ option.use_trt_backend()
+ return option
+
+
+args = parse_arguments()
+
+model_file = os.path.join(args.model_dir, "model.pdmodel")
+params_file = os.path.join(args.model_dir, "model.pdiparams")
+config_file = os.path.join(args.model_dir, "infer_cfg.yml")
+
+# 配置runtime,加载模型
+runtime_option = build_option(args)
+model = fd.vision.detection.PaddleYOLOv6(
+ model_file, params_file, config_file, runtime_option=runtime_option)
+
+# 预测图片检测结果
+im = cv2.imread(args.image)
+result = model.predict(im.copy())
+print(result)
+
+# 预测结果可视化
+vis_im = fd.vision.vis_detection(im, result, score_threshold=0.5)
+cv2.imwrite("visualized_result.jpg", vis_im)
+print("Visualized result save in ./visualized_result.jpg")
diff --git a/examples/vision/detection/paddledetection/python/infer_yolov7.py b/examples/vision/detection/paddledetection/python/infer_yolov7.py
new file mode 100644
index 000000000..d06332e02
--- /dev/null
+++ b/examples/vision/detection/paddledetection/python/infer_yolov7.py
@@ -0,0 +1,59 @@
+import fastdeploy as fd
+import cv2
+import os
+
+
+def parse_arguments():
+ import argparse
+ import ast
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--model_dir",
+ required=True,
+ help="Path of PaddleDetection model directory")
+ parser.add_argument(
+ "--image", required=True, help="Path of test image file.")
+ parser.add_argument(
+ "--device",
+ type=str,
+ default='cpu',
+ help="Type of inference device, support 'cpu' or 'gpu'.")
+ parser.add_argument(
+ "--use_trt",
+ type=ast.literal_eval,
+ default=False,
+ help="Wether to use tensorrt.")
+ return parser.parse_args()
+
+
+def build_option(args):
+ option = fd.RuntimeOption()
+
+ if args.device.lower() == "gpu":
+ option.use_gpu()
+
+ if args.use_trt:
+ option.use_trt_backend()
+ return option
+
+
+args = parse_arguments()
+
+model_file = os.path.join(args.model_dir, "model.pdmodel")
+params_file = os.path.join(args.model_dir, "model.pdiparams")
+config_file = os.path.join(args.model_dir, "infer_cfg.yml")
+
+# 配置runtime,加载模型
+runtime_option = build_option(args)
+model = fd.vision.detection.PaddleYOLOv7(
+ model_file, params_file, config_file, runtime_option=runtime_option)
+
+# 预测图片检测结果
+im = cv2.imread(args.image)
+result = model.predict(im.copy())
+print(result)
+
+# 预测结果可视化
+vis_im = fd.vision.vis_detection(im, result, score_threshold=0.5)
+cv2.imwrite("visualized_result.jpg", vis_im)
+print("Visualized result save in ./visualized_result.jpg")
diff --git a/examples/vision/detection/paddledetection/rknpu2/README.md b/examples/vision/detection/paddledetection/rknpu2/README.md
index d5f339db5..d242cf339 100644
--- a/examples/vision/detection/paddledetection/rknpu2/README.md
+++ b/examples/vision/detection/paddledetection/rknpu2/README.md
@@ -45,8 +45,8 @@ model_path: ./picodet_s_416_coco_lcnet/picodet_s_416_coco_lcnet.onnx
output_folder: ./picodet_s_416_coco_lcnet
target_platform: RK3568
normalize:
- mean: [[0.485,0.456,0.406],[0,0,0]]
- std: [[0.229,0.224,0.225],[0.003921,0.003921]]
+ mean: [[0.485,0.456,0.406]]
+ std: [[0.229,0.224,0.225]]
outputs: ['tmp_17','p2o.Concat.9']
```
@@ -113,5 +113,7 @@ Preprocess:
type: Resize
```
+## 其他链接
+- [Cpp部署](./cpp)
- [Python部署](./python)
- [视觉模型预测结果](../../../../../docs/api/vision_results/)
diff --git a/examples/vision/detection/paddledetection/rknpu2/cpp/infer_picodet.cc b/examples/vision/detection/paddledetection/rknpu2/cpp/infer_picodet.cc
index 12f405b52..8535aa338 100644
--- a/examples/vision/detection/paddledetection/rknpu2/cpp/infer_picodet.cc
+++ b/examples/vision/detection/paddledetection/rknpu2/cpp/infer_picodet.cc
@@ -15,26 +15,39 @@
#include
+
+需要特别注意的是,在 A311D 上部署的模型需要是量化后的模型,模型的量化请参考:[模型量化](../../../../../../docs/cn/quantize.md)
diff --git a/examples/vision/detection/yolov5/a311d/cpp/infer.cc b/examples/vision/detection/yolov5/a311d/cpp/infer.cc
new file mode 100755
index 000000000..f1cf9e8dc
--- /dev/null
+++ b/examples/vision/detection/yolov5/a311d/cpp/infer.cc
@@ -0,0 +1,64 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "fastdeploy/vision.h"
+#ifdef WIN32
+const char sep = '\\';
+#else
+const char sep = '/';
+#endif
+
+void InitAndInfer(const std::string& model_dir, const std::string& image_file) {
+ auto model_file = model_dir + sep + "model.pdmodel";
+ auto params_file = model_dir + sep + "model.pdiparams";
+ auto subgraph_file = model_dir + sep + "subgraph.txt";
+
+ fastdeploy::RuntimeOption option;
+ option.UseTimVX();
+ option.SetLiteSubgraphPartitionPath(subgraph_file);
+
+ auto model = fastdeploy::vision::detection::YOLOv5(
+ model_file, params_file, option, fastdeploy::ModelFormat::PADDLE);
+ assert(model.Initialized());
+
+ auto im = cv::imread(image_file);
+
+ fastdeploy::vision::DetectionResult res;
+ if (!model.Predict(im, &res)) {
+ std::cerr << "Failed to predict." << std::endl;
+ return;
+ }
+
+ std::cout << res.Str() << std::endl;
+
+ auto vis_im = fastdeploy::vision::VisDetection(im, res);
+ cv::imwrite("vis_result.jpg", vis_im);
+ std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
+}
+
+int main(int argc, char* argv[]) {
+ if (argc < 3) {
+ std::cout << "Usage: infer_demo path/to/quant_model "
+ "path/to/image "
+ "run_option, "
+ "e.g ./infer_demo ./yolov5s_quant ./000000014439.jpg"
+ << std::endl;
+ return -1;
+ }
+
+ std::string model_dir = argv[1];
+ std::string test_image = argv[2];
+ InitAndInfer(model_dir, test_image);
+ return 0;
+}
diff --git a/examples/vision/detection/yolov5/a311d/cpp/run_with_adb.sh b/examples/vision/detection/yolov5/a311d/cpp/run_with_adb.sh
new file mode 100755
index 000000000..aacaed4c5
--- /dev/null
+++ b/examples/vision/detection/yolov5/a311d/cpp/run_with_adb.sh
@@ -0,0 +1,47 @@
+#!/bin/bash
+HOST_SPACE=${PWD}
+echo ${HOST_SPACE}
+WORK_SPACE=/data/local/tmp/test
+
+# The first parameter represents the demo name
+DEMO_NAME=image_classification_demo
+if [ -n "$1" ]; then
+ DEMO_NAME=$1
+fi
+
+# The second parameter represents the model name
+MODEL_NAME=mobilenet_v1_fp32_224
+if [ -n "$2" ]; then
+ MODEL_NAME=$2
+fi
+
+# The third parameter indicates the name of the image to be tested
+IMAGE_NAME=0001.jpg
+if [ -n "$3" ]; then
+ IMAGE_NAME=$3
+fi
+
+# The fourth parameter represents the ID of the device
+ADB_DEVICE_NAME=
+if [ -n "$4" ]; then
+ ADB_DEVICE_NAME="-s $4"
+fi
+
+# Set the environment variables required during the running process
+EXPORT_ENVIRONMENT_VARIABLES="export GLOG_v=5; export VIV_VX_ENABLE_GRAPH_TRANSFORM=-pcq:1; export VIV_VX_SET_PER_CHANNEL_ENTROPY=100; export TIMVX_BATCHNORM_FUSION_MAX_ALLOWED_QUANT_SCALE_DEVIATION=300000; export VSI_NN_LOG_LEVEL=5;"
+
+EXPORT_ENVIRONMENT_VARIABLES="${EXPORT_ENVIRONMENT_VARIABLES}export LD_LIBRARY_PATH=${WORK_SPACE}/lib:\$LD_LIBRARY_PATH;"
+
+# Please install adb, and DON'T run this in the docker.
+set -e
+adb $ADB_DEVICE_NAME shell "rm -rf $WORK_SPACE"
+adb $ADB_DEVICE_NAME shell "mkdir -p $WORK_SPACE"
+
+# Upload the demo, librarys, model and test images to the device
+adb $ADB_DEVICE_NAME push ${HOST_SPACE}/lib $WORK_SPACE
+adb $ADB_DEVICE_NAME push ${HOST_SPACE}/${DEMO_NAME} $WORK_SPACE
+adb $ADB_DEVICE_NAME push models $WORK_SPACE
+adb $ADB_DEVICE_NAME push images $WORK_SPACE
+
+# Execute the deployment demo
+adb $ADB_DEVICE_NAME shell "cd $WORK_SPACE; ${EXPORT_ENVIRONMENT_VARIABLES} chmod +x ./${DEMO_NAME}; ./${DEMO_NAME} ./models/${MODEL_NAME} ./images/$IMAGE_NAME"
diff --git a/examples/vision/detection/yolov5/cpp/CMakeLists.txt b/examples/vision/detection/yolov5/cpp/CMakeLists.txt
old mode 100644
new mode 100755
index 93540a7e8..2b3f8c54f
--- a/examples/vision/detection/yolov5/cpp/CMakeLists.txt
+++ b/examples/vision/detection/yolov5/cpp/CMakeLists.txt
@@ -12,3 +12,7 @@ include_directories(${FASTDEPLOY_INCS})
add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cc)
# 添加FastDeploy库依赖
target_link_libraries(infer_demo ${FASTDEPLOY_LIBS})
+
+add_executable(infer_paddle_demo ${PROJECT_SOURCE_DIR}/infer_paddle_model.cc)
+# 添加FastDeploy库依赖
+target_link_libraries(infer_paddle_demo ${FASTDEPLOY_LIBS})
diff --git a/examples/vision/detection/yolov5/cpp/README.md b/examples/vision/detection/yolov5/cpp/README.md
old mode 100644
new mode 100755
index ece3826a5..581f1c49b
--- a/examples/vision/detection/yolov5/cpp/README.md
+++ b/examples/vision/detection/yolov5/cpp/README.md
@@ -12,16 +12,33 @@
```bash
mkdir build
cd build
-# 下载FastDeploy预编译库,用户可在上文提到的`FastDeploy预编译库`中自行选择合适的版本使用
+# 下载 FastDeploy 预编译库,用户可在上文提到的`FastDeploy预编译库`中自行选择合适的版本使用
wget https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-x.x.x.tgz
tar xvf fastdeploy-linux-x64-x.x.x.tgz
cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-x.x.x
make -j
-#下载官方转换好的yolov5模型文件和测试图片
-wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov5s.onnx
+#下载官方转换好的 yolov5 Paddle 模型文件和测试图片
+wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov5s_infer.tar
+tar -xvf yolov5s_infer.tar
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
+# CPU推理
+./infer_paddle_demo yolov5s_infer 000000014439.jpg 0
+# GPU推理
+./infer_paddle_demo yolov5s_infer 000000014439.jpg 1
+# GPU上TensorRT推理
+./infer_paddle_demo yolov5s_infer 000000014439.jpg 2
+# XPU推理
+./infer_paddle_demo yolov5s_infer 000000014439.jpg 3
+```
+
+上述的模型为 Paddle 模型的推理,如果想要做 ONNX 模型的推理,可以按照如下步骤:
+```bash
+# 1. 下载官方转换好的 yolov5 ONNX 模型文件和测试图片
+wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov5s.onnx
+wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
+
# CPU推理
./infer_demo yolov5s.onnx 000000014439.jpg 0
# GPU推理
@@ -29,7 +46,6 @@ wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/0000000
# GPU上TensorRT推理
./infer_demo yolov5s.onnx 000000014439.jpg 2
```
-
运行完成可视化结果如下图所示
diff --git a/examples/vision/detection/yolov5/cpp/infer.cc b/examples/vision/detection/yolov5/cpp/infer.cc
old mode 100644
new mode 100755
index a7ac1fe81..fb20686de
--- a/examples/vision/detection/yolov5/cpp/infer.cc
+++ b/examples/vision/detection/yolov5/cpp/infer.cc
@@ -22,7 +22,6 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) {
}
auto im = cv::imread(image_file);
- auto im_bak = im.clone();
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
@@ -31,7 +30,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) {
}
std::cout << res.Str() << std::endl;
- auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res);
+ auto vis_im = fastdeploy::vision::VisDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
@@ -46,7 +45,6 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) {
}
auto im = cv::imread(image_file);
- auto im_bak = im.clone();
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
@@ -55,7 +53,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) {
}
std::cout << res.Str() << std::endl;
- auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res);
+ auto vis_im = fastdeploy::vision::VisDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
@@ -72,7 +70,6 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) {
}
auto im = cv::imread(image_file);
- auto im_bak = im.clone();
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
@@ -81,7 +78,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) {
}
std::cout << res.Str() << std::endl;
- auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res);
+ auto vis_im = fastdeploy::vision::VisDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
@@ -105,4 +102,4 @@ int main(int argc, char* argv[]) {
TrtInfer(argv[1], argv[2]);
}
return 0;
-}
+}
\ No newline at end of file
diff --git a/examples/vision/detection/yolov5/cpp/infer_paddle_model.cc b/examples/vision/detection/yolov5/cpp/infer_paddle_model.cc
new file mode 100755
index 000000000..d5692ce7c
--- /dev/null
+++ b/examples/vision/detection/yolov5/cpp/infer_paddle_model.cc
@@ -0,0 +1,154 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "fastdeploy/vision.h"
+#ifdef WIN32
+const char sep = '\\';
+#else
+const char sep = '/';
+#endif
+
+void CpuInfer(const std::string& model_dir, const std::string& image_file) {
+ auto model_file = model_dir + sep + "model.pdmodel";
+ auto params_file = model_dir + sep + "model.pdiparams";
+ fastdeploy::RuntimeOption option;
+ option.UseCpu();
+ auto model = fastdeploy::vision::detection::YOLOv5(
+ model_file, params_file, option, fastdeploy::ModelFormat::PADDLE);
+ if (!model.Initialized()) {
+ std::cerr << "Failed to initialize." << std::endl;
+ return;
+ }
+
+ auto im = cv::imread(image_file);
+
+ fastdeploy::vision::DetectionResult res;
+ if (!model.Predict(im, &res)) {
+ std::cerr << "Failed to predict." << std::endl;
+ return;
+ }
+ std::cout << res.Str() << std::endl;
+
+ auto vis_im = fastdeploy::vision::VisDetection(im, res);
+
+ cv::imwrite("vis_result.jpg", vis_im);
+ std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
+}
+
+void GpuInfer(const std::string& model_dir, const std::string& image_file) {
+ auto model_file = model_dir + sep + "model.pdmodel";
+ auto params_file = model_dir + sep + "model.pdiparams";
+ auto option = fastdeploy::RuntimeOption();
+ option.UseGpu();
+ auto model = fastdeploy::vision::detection::YOLOv5(
+ model_file, params_file, option, fastdeploy::ModelFormat::PADDLE);
+ if (!model.Initialized()) {
+ std::cerr << "Failed to initialize." << std::endl;
+ return;
+ }
+
+ auto im = cv::imread(image_file);
+
+ fastdeploy::vision::DetectionResult res;
+ if (!model.Predict(im, &res)) {
+ std::cerr << "Failed to predict." << std::endl;
+ return;
+ }
+ std::cout << res.Str() << std::endl;
+
+ auto vis_im = fastdeploy::vision::VisDetection(im, res);
+
+ cv::imwrite("vis_result.jpg", vis_im);
+ std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
+}
+
+void TrtInfer(const std::string& model_dir, const std::string& image_file) {
+ auto model_file = model_dir + sep + "model.pdmodel";
+ auto params_file = model_dir + sep + "model.pdiparams";
+ auto option = fastdeploy::RuntimeOption();
+ option.UseGpu();
+ option.UseTrtBackend();
+ option.SetTrtInputShape("images", {1, 3, 640, 640});
+ auto model = fastdeploy::vision::detection::YOLOv5(
+ model_file, params_file, option, fastdeploy::ModelFormat::PADDLE);
+
+ if (!model.Initialized()) {
+ std::cerr << "Failed to initialize." << std::endl;
+ return;
+ }
+
+ auto im = cv::imread(image_file);
+
+ fastdeploy::vision::DetectionResult res;
+ if (!model.Predict(im, &res)) {
+ std::cerr << "Failed to predict." << std::endl;
+ return;
+ }
+ std::cout << res.Str() << std::endl;
+
+ auto vis_im = fastdeploy::vision::Visualize::VisDetection(im, res);
+ cv::imwrite("vis_result.jpg", vis_im);
+ std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
+}
+
+void XpuInfer(const std::string& model_dir, const std::string& image_file) {
+ auto model_file = model_dir + sep + "model.pdmodel";
+ auto params_file = model_dir + sep + "model.pdiparams";
+ fastdeploy::RuntimeOption option;
+ option.UseXpu();
+ auto model = fastdeploy::vision::detection::YOLOv5(
+ model_file, params_file, option, fastdeploy::ModelFormat::PADDLE);
+
+ if (!model.Initialized()) {
+ std::cerr << "Failed to initialize." << std::endl;
+ return;
+ }
+
+ auto im = cv::imread(image_file);
+
+ fastdeploy::vision::DetectionResult res;
+ if (!model.Predict(im, &res)) {
+ std::cerr << "Failed to predict." << std::endl;
+ return;
+ }
+ std::cout << res.Str() << std::endl;
+
+ auto vis_im = fastdeploy::vision::VisDetection(im, res);
+
+ cv::imwrite("vis_result.jpg", vis_im);
+ std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
+}
+
+int main(int argc, char* argv[]) {
+ if (argc < 4) {
+ std::cout << "Usage: infer_demo path/to/model path/to/image run_option, "
+ "e.g ./infer_model ./yolov5s_infer ./test.jpeg 0"
+ << std::endl;
+ std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
+ "with gpu; 2: run with gpu and use tensorrt backend; 3: run with KunlunXin XPU."
+ << std::endl;
+ return -1;
+ }
+
+ if (std::atoi(argv[3]) == 0) {
+ CpuInfer(argv[1], argv[2]);
+ } else if (std::atoi(argv[3]) == 1) {
+ GpuInfer(argv[1], argv[2]);
+ } else if (std::atoi(argv[3]) == 2) {
+ TrtInfer(argv[1], argv[2]);
+ } else if (std::atoi(argv[3]) == 3) {
+ XpuInfer(argv[1], argv[2]);
+ }
+ return 0;
+}
diff --git a/examples/vision/detection/yolov5/python/README.md b/examples/vision/detection/yolov5/python/README.md
old mode 100644
new mode 100755
index 4f47b3084..83f6ed781
--- a/examples/vision/detection/yolov5/python/README.md
+++ b/examples/vision/detection/yolov5/python/README.md
@@ -13,15 +13,18 @@ git clone https://github.com/PaddlePaddle/FastDeploy.git
cd examples/vision/detection/yolov5/python/
#下载yolov5模型文件和测试图片
-wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov5s.onnx
+wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov5s_infer.tar
+tar -xf yolov5s_infer.tar
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
# CPU推理
-python infer.py --model yolov5s.onnx --image 000000014439.jpg --device cpu
+python infer.py --model yolov5s_infer --image 000000014439.jpg --device cpu
# GPU推理
-python infer.py --model yolov5s.onnx --image 000000014439.jpg --device gpu
+python infer.py --model yolov5s_infer --image 000000014439.jpg --device gpu
# GPU上使用TensorRT推理
-python infer.py --model yolov5s.onnx --image 000000014439.jpg --device gpu --use_trt True
+python infer.py --model yolov5s_infer --image 000000014439.jpg --device gpu --use_trt True
+# XPU推理
+python infer.py --model yolov5s_infer --image 000000014439.jpg --device xpu
```
运行完成可视化结果如下图所示
diff --git a/examples/vision/detection/yolov5/python/infer.py b/examples/vision/detection/yolov5/python/infer.py
old mode 100644
new mode 100755
index fdded06c9..b155af0ed
--- a/examples/vision/detection/yolov5/python/infer.py
+++ b/examples/vision/detection/yolov5/python/infer.py
@@ -1,20 +1,20 @@
import fastdeploy as fd
import cv2
+import os
def parse_arguments():
import argparse
import ast
parser = argparse.ArgumentParser()
- parser.add_argument(
- "--model", default=None, help="Path of yolov5 onnx model.")
+ parser.add_argument("--model", default=None, help="Path of yolov5 model.")
parser.add_argument(
"--image", default=None, help="Path of test image file.")
parser.add_argument(
"--device",
type=str,
default='cpu',
- help="Type of inference device, support 'cpu' or 'gpu'.")
+ help="Type of inference device, support 'cpu' or 'gpu' or 'xpu'.")
parser.add_argument(
"--use_trt",
type=ast.literal_eval,
@@ -25,6 +25,8 @@ def parse_arguments():
def build_option(args):
option = fd.RuntimeOption()
+ if args.device.lower() == "xpu":
+ option.use_xpu()
if args.device.lower() == "gpu":
option.use_gpu()
@@ -37,14 +39,15 @@ def build_option(args):
args = parse_arguments()
-if args.model is None:
- model = fd.download_model(name='YOLOv5s')
-else:
- model = args.model
-
# 配置runtime,加载模型
runtime_option = build_option(args)
-model = fd.vision.detection.YOLOv5(model, runtime_option=runtime_option)
+model_file = os.path.join(args.model, "model.pdmodel")
+params_file = os.path.join(args.model, "model.pdiparams")
+model = fd.vision.detection.YOLOv5(
+ model_file,
+ params_file,
+ runtime_option=runtime_option,
+ model_format=fd.ModelFormat.PADDLE)
# 预测图片检测结果
if args.image is None:
@@ -52,7 +55,7 @@ if args.image is None:
else:
image = args.image
im = cv2.imread(image)
-result = model.predict(im.copy())
+result = model.predict(im)
print(result)
# 预测结果可视化
diff --git a/examples/vision/detection/yolov5/quantize/cpp/infer.cc b/examples/vision/detection/yolov5/quantize/cpp/infer.cc
index e429b2aad..1addf1507 100644
--- a/examples/vision/detection/yolov5/quantize/cpp/infer.cc
+++ b/examples/vision/detection/yolov5/quantize/cpp/infer.cc
@@ -29,7 +29,6 @@ void InitAndInfer(const std::string& model_dir, const std::string& image_file,
assert(model.Initialized());
auto im = cv::imread(image_file);
- auto im_bak = im.clone();
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
@@ -39,7 +38,7 @@ void InitAndInfer(const std::string& model_dir, const std::string& image_file,
std::cout << res.Str() << std::endl;
- auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res);
+ auto vis_im = fastdeploy::vision::VisDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
diff --git a/examples/vision/detection/yolov5/quantize/python/infer.py b/examples/vision/detection/yolov5/quantize/python/infer.py
index da502fe93..996bc5419 100644
--- a/examples/vision/detection/yolov5/quantize/python/infer.py
+++ b/examples/vision/detection/yolov5/quantize/python/infer.py
@@ -71,7 +71,7 @@ model = fd.vision.detection.YOLOv5(
# 预测图片检测结果
im = cv2.imread(args.image)
-result = model.predict(im.copy())
+result = model.predict(im)
print(result)
# 预测结果可视化
diff --git a/examples/vision/detection/yolov5/rv1126/cpp/README.md b/examples/vision/detection/yolov5/rv1126/cpp/README.md
index 9711577f2..b974a8ebe 100755
--- a/examples/vision/detection/yolov5/rv1126/cpp/README.md
+++ b/examples/vision/detection/yolov5/rv1126/cpp/README.md
@@ -33,7 +33,7 @@ cp -r 000000014439.jpg images
4. 编译部署示例,可使入如下命令:
```bash
mkdir build && cd build
-cmake -DCMAKE_TOOLCHAIN_FILE=${PWD}/../fastdeploy-tmivx/timvx.cmake -DFASTDEPLOY_INSTALL_DIR=${PWD}/../fastdeploy-tmivx ..
+cmake -DCMAKE_TOOLCHAIN_FILE=${PWD}/../fastdeploy-tmivx/toolchain.cmake -DFASTDEPLOY_INSTALL_DIR=${PWD}/../fastdeploy-tmivx -DTARGET_ABI=armhf ..
make -j8
make install
# 成功编译之后,会生成 install 文件夹,里面有一个运行 demo 和部署所需的库
diff --git a/examples/vision/detection/yolov5/serving/models/postprocess/1/model.py b/examples/vision/detection/yolov5/serving/models/postprocess/1/model.py
index 30a744b68..7c608db43 100644
--- a/examples/vision/detection/yolov5/serving/models/postprocess/1/model.py
+++ b/examples/vision/detection/yolov5/serving/models/postprocess/1/model.py
@@ -61,31 +61,7 @@ class TritonPythonModel:
dtype = pb_utils.triton_string_to_numpy(output_config["data_type"])
self.output_dtype.append(dtype)
print("postprocess output names:", self.output_names)
-
- def yolov5_postprocess(self, infer_outputs, im_infos):
- """
- Parameters
- ----------
- infer_outputs : numpy.array
- Contains the batch of inference results
- im_infos : numpy.array(b'{}')
- Returns
- -------
- numpy.array
- yolov5 postprocess result
- """
- results = []
- for i_batch in range(len(im_infos)):
- new_infer_output = infer_outputs[i_batch:i_batch + 1]
- new_im_info = im_infos[i_batch].decode('utf-8').replace("'", '"')
- new_im_info = json.loads(new_im_info)
-
- result = fd.vision.detection.YOLOv5.postprocess(
- [new_infer_output, ], new_im_info)
-
- r_str = fd.vision.utils.fd_result_to_json(result)
- results.append(r_str)
- return np.array(results, dtype=np.object)
+ self.postprocessor_ = fd.vision.detection.YOLOv5Postprocessor()
def execute(self, requests):
"""`execute` must be implemented in every Python model. `execute`
@@ -107,7 +83,6 @@ class TritonPythonModel:
be the same as `requests`
"""
responses = []
- # print("num:", len(requests), flush=True)
for request in requests:
infer_outputs = pb_utils.get_input_tensor_by_name(
request, self.input_names[0])
@@ -115,10 +90,15 @@ class TritonPythonModel:
self.input_names[1])
infer_outputs = infer_outputs.as_numpy()
im_infos = im_infos.as_numpy()
+ for i in range(im_infos.shape[0]):
+ im_infos[i] = json.loads(im_infos[i].decode('utf-8').replace(
+ "'", '"'))
- results = self.yolov5_postprocess(infer_outputs, im_infos)
+ results = self.postprocessor_.run([infer_outputs], im_infos)
+ r_str = fd.vision.utils.fd_result_to_json(results)
+ r_np = np.array(r_str, dtype=np.object)
- out_tensor = pb_utils.Tensor(self.output_names[0], results)
+ out_tensor = pb_utils.Tensor(self.output_names[0], r_np)
inference_response = pb_utils.InferenceResponse(
output_tensors=[out_tensor, ])
responses.append(inference_response)
diff --git a/examples/vision/detection/yolov5/serving/models/preprocess/1/model.py b/examples/vision/detection/yolov5/serving/models/preprocess/1/model.py
index cd22aa37b..cf4f7e8e8 100644
--- a/examples/vision/detection/yolov5/serving/models/preprocess/1/model.py
+++ b/examples/vision/detection/yolov5/serving/models/preprocess/1/model.py
@@ -61,21 +61,7 @@ class TritonPythonModel:
dtype = pb_utils.triton_string_to_numpy(output_config["data_type"])
self.output_dtype.append(dtype)
print("preprocess output names:", self.output_names)
-
- def yolov5_preprocess(self, input_data):
- """
- According to Triton input, the preprocessing results of YoloV5 model are obtained.
- """
- im_infos = []
- pre_outputs = []
- for i_batch in input_data:
- pre_output, im_info = fd.vision.detection.YOLOv5.preprocess(
- i_batch)
- pre_outputs.append(pre_output)
- im_infos.append(im_info)
- im_infos = np.array(im_infos, dtype=np.object)
- pre_outputs = np.concatenate(pre_outputs, axis=0)
- return pre_outputs, im_infos
+ self.preprocessor_ = fd.vision.detection.YOLOv5Preprocessor()
def execute(self, requests):
"""`execute` must be implemented in every Python model. `execute`
@@ -97,18 +83,21 @@ class TritonPythonModel:
be the same as `requests`
"""
responses = []
- # print("num:", len(requests), flush=True)
for request in requests:
data = pb_utils.get_input_tensor_by_name(request,
self.input_names[0])
data = data.as_numpy()
- outputs = self.yolov5_preprocess(data)
- output_tensors = []
- for idx, output in enumerate(outputs):
- output_tensors.append(
- pb_utils.Tensor(self.output_names[idx], output))
+ outputs, im_infos = self.preprocessor_.run(data)
+
+ # YOLOv5 preprocess has two output
+ dlpack_tensor = outputs[0].to_dlpack()
+ output_tensor_0 = pb_utils.Tensor.from_dlpack(self.output_names[0],
+ dlpack_tensor)
+ output_tensor_1 = pb_utils.Tensor(
+ self.output_names[1], np.array(
+ im_infos, dtype=np.object))
inference_response = pb_utils.InferenceResponse(
- output_tensors=output_tensors)
+ output_tensors=[output_tensor_0, output_tensor_1])
responses.append(inference_response)
return responses
diff --git a/examples/vision/detection/yolov5lite/cpp/infer.cc b/examples/vision/detection/yolov5lite/cpp/infer.cc
index ac32bca93..0a3f7b81b 100644
--- a/examples/vision/detection/yolov5lite/cpp/infer.cc
+++ b/examples/vision/detection/yolov5lite/cpp/infer.cc
@@ -22,7 +22,6 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) {
}
auto im = cv::imread(image_file);
- auto im_bak = im.clone();
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
@@ -31,7 +30,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) {
}
std::cout << res.Str() << std::endl;
- auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res);
+ auto vis_im = fastdeploy::vision::VisDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
@@ -47,7 +46,6 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) {
}
auto im = cv::imread(image_file);
- auto im_bak = im.clone();
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
@@ -56,7 +54,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) {
}
std::cout << res.Str() << std::endl;
- auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res);
+ auto vis_im = fastdeploy::vision::VisDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
@@ -74,7 +72,6 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) {
}
auto im = cv::imread(image_file);
- auto im_bak = im.clone();
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
@@ -83,7 +80,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) {
}
std::cout << res.Str() << std::endl;
- auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res);
+ auto vis_im = fastdeploy::vision::VisDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
diff --git a/examples/vision/detection/yolov5lite/python/infer.py b/examples/vision/detection/yolov5lite/python/infer.py
index b1ec69046..2242a33a5 100644
--- a/examples/vision/detection/yolov5lite/python/infer.py
+++ b/examples/vision/detection/yolov5lite/python/infer.py
@@ -52,7 +52,7 @@ if args.image is None:
else:
image = args.image
im = cv2.imread(image)
-result = model.predict(im.copy())
+result = model.predict(im)
print(result)
# 预测结果可视化
diff --git a/examples/vision/detection/yolov6/cpp/infer.cc b/examples/vision/detection/yolov6/cpp/infer.cc
index 72b2e7bed..dbca64f40 100644
--- a/examples/vision/detection/yolov6/cpp/infer.cc
+++ b/examples/vision/detection/yolov6/cpp/infer.cc
@@ -22,7 +22,6 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) {
}
auto im = cv::imread(image_file);
- auto im_bak = im.clone();
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
@@ -31,7 +30,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) {
}
std::cout << res.Str() << std::endl;
- auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res);
+ auto vis_im = fastdeploy::vision::VisDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
@@ -46,7 +45,6 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) {
}
auto im = cv::imread(image_file);
- auto im_bak = im.clone();
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
@@ -55,7 +53,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) {
}
std::cout << res.Str() << std::endl;
- auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res);
+ auto vis_im = fastdeploy::vision::VisDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
@@ -72,7 +70,6 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) {
}
auto im = cv::imread(image_file);
- auto im_bak = im.clone();
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
@@ -81,7 +78,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) {
}
std::cout << res.Str() << std::endl;
- auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res);
+ auto vis_im = fastdeploy::vision::VisDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
diff --git a/examples/vision/detection/yolov6/python/infer.py b/examples/vision/detection/yolov6/python/infer.py
index 0a0a163fb..47bf3e689 100644
--- a/examples/vision/detection/yolov6/python/infer.py
+++ b/examples/vision/detection/yolov6/python/infer.py
@@ -52,7 +52,7 @@ if args.image is None:
else:
image = args.image
im = cv2.imread(image)
-result = model.predict(im.copy())
+result = model.predict(im)
print(result)
# 预测结果可视化
diff --git a/examples/vision/detection/yolov6/quantize/cpp/README.md b/examples/vision/detection/yolov6/quantize/cpp/README.md
index 7ad762100..a929f8ced 100755
--- a/examples/vision/detection/yolov6/quantize/cpp/README.md
+++ b/examples/vision/detection/yolov6/quantize/cpp/README.md
@@ -23,7 +23,7 @@ cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-x.x.x
make -j
#下载FastDeloy提供的yolov6s量化模型文件和测试图片
-wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov6s_qat_model.tar
+wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov6s_qat_model_new.tar
tar -xvf yolov6s_qat_model.tar
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
diff --git a/examples/vision/detection/yolov6/quantize/cpp/infer.cc b/examples/vision/detection/yolov6/quantize/cpp/infer.cc
index b40200962..adda977f3 100644
--- a/examples/vision/detection/yolov6/quantize/cpp/infer.cc
+++ b/examples/vision/detection/yolov6/quantize/cpp/infer.cc
@@ -29,7 +29,6 @@ void InitAndInfer(const std::string& model_dir, const std::string& image_file,
assert(model.Initialized());
auto im = cv::imread(image_file);
- auto im_bak = im.clone();
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
@@ -39,7 +38,7 @@ void InitAndInfer(const std::string& model_dir, const std::string& image_file,
std::cout << res.Str() << std::endl;
- auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res);
+ auto vis_im = fastdeploy::vision::VisDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
diff --git a/examples/vision/detection/yolov6/quantize/python/README.md b/examples/vision/detection/yolov6/quantize/python/README.md
index 057e13f9a..e9b80bb6f 100755
--- a/examples/vision/detection/yolov6/quantize/python/README.md
+++ b/examples/vision/detection/yolov6/quantize/python/README.md
@@ -17,7 +17,7 @@ git clone https://github.com/PaddlePaddle/FastDeploy.git
cd examples/slim/yolov6/python
#下载FastDeloy提供的yolov6s量化模型文件和测试图片
-wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov6s_qat_model.tar
+wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov6s_qat_model_new.tar
tar -xvf yolov6s_qat_model.tar
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
diff --git a/examples/vision/detection/yolov6/quantize/python/infer.py b/examples/vision/detection/yolov6/quantize/python/infer.py
index da9fa3d97..77f46d4c2 100644
--- a/examples/vision/detection/yolov6/quantize/python/infer.py
+++ b/examples/vision/detection/yolov6/quantize/python/infer.py
@@ -71,7 +71,7 @@ model = fd.vision.detection.YOLOv6(
# 预测图片检测结果
im = cv2.imread(args.image)
-result = model.predict(im.copy())
+result = model.predict(im)
print(result)
# 预测结果可视化
diff --git a/examples/vision/detection/yolov7/cpp/infer.cc b/examples/vision/detection/yolov7/cpp/infer.cc
index cf79a16ad..5fd848b8e 100644
--- a/examples/vision/detection/yolov7/cpp/infer.cc
+++ b/examples/vision/detection/yolov7/cpp/infer.cc
@@ -22,7 +22,6 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) {
}
auto im = cv::imread(image_file);
- auto im_bak = im.clone();
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
@@ -31,7 +30,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) {
}
std::cout << res.Str() << std::endl;
- auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res);
+ auto vis_im = fastdeploy::vision::VisDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
@@ -46,7 +45,6 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) {
}
auto im = cv::imread(image_file);
- auto im_bak = im.clone();
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
@@ -55,7 +53,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) {
}
std::cout << res.Str() << std::endl;
- auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res);
+ auto vis_im = fastdeploy::vision::VisDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
@@ -72,7 +70,6 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) {
}
auto im = cv::imread(image_file);
- auto im_bak = im.clone();
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
@@ -81,7 +78,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) {
}
std::cout << res.Str() << std::endl;
- auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res);
+ auto vis_im = fastdeploy::vision::VisDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
diff --git a/examples/vision/detection/yolov7/python/infer.py b/examples/vision/detection/yolov7/python/infer.py
index b0ece7e47..468b47dc5 100644
--- a/examples/vision/detection/yolov7/python/infer.py
+++ b/examples/vision/detection/yolov7/python/infer.py
@@ -52,7 +52,7 @@ if args.image is None:
else:
image = args.image
im = cv2.imread(image)
-result = model.predict(im.copy())
+result = model.predict(im)
# 预测结果可视化
vis_im = fd.vision.vis_detection(im, result)
diff --git a/examples/vision/detection/yolov7/quantize/cpp/infer.cc b/examples/vision/detection/yolov7/quantize/cpp/infer.cc
index 0eda80b6c..9ae42b582 100644
--- a/examples/vision/detection/yolov7/quantize/cpp/infer.cc
+++ b/examples/vision/detection/yolov7/quantize/cpp/infer.cc
@@ -29,7 +29,6 @@ void InitAndInfer(const std::string& model_dir, const std::string& image_file,
assert(model.Initialized());
auto im = cv::imread(image_file);
- auto im_bak = im.clone();
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
@@ -39,7 +38,7 @@ void InitAndInfer(const std::string& model_dir, const std::string& image_file,
std::cout << res.Str() << std::endl;
- auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res);
+ auto vis_im = fastdeploy::vision::VisDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
diff --git a/examples/vision/detection/yolov7/quantize/python/infer.py b/examples/vision/detection/yolov7/quantize/python/infer.py
index de84e4061..d07834c7c 100644
--- a/examples/vision/detection/yolov7/quantize/python/infer.py
+++ b/examples/vision/detection/yolov7/quantize/python/infer.py
@@ -71,7 +71,7 @@ model = fd.vision.detection.YOLOv7(
# 预测图片检测结果
im = cv2.imread(args.image)
-result = model.predict(im.copy())
+result = model.predict(im)
print(result)
# 预测结果可视化
diff --git a/examples/vision/detection/yolov7end2end_ort/cpp/infer.cc b/examples/vision/detection/yolov7end2end_ort/cpp/infer.cc
index a0e70544a..5c9df2816 100644
--- a/examples/vision/detection/yolov7end2end_ort/cpp/infer.cc
+++ b/examples/vision/detection/yolov7end2end_ort/cpp/infer.cc
@@ -22,7 +22,6 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) {
}
auto im = cv::imread(image_file);
- auto im_bak = im.clone();
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
@@ -31,7 +30,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) {
}
std::cout << res.Str() << std::endl;
- auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res);
+ auto vis_im = fastdeploy::vision::VisDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
@@ -47,7 +46,6 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) {
}
auto im = cv::imread(image_file);
- auto im_bak = im.clone();
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
@@ -56,7 +54,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) {
}
std::cout << res.Str() << std::endl;
- auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res);
+ auto vis_im = fastdeploy::vision::VisDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
@@ -74,7 +72,6 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) {
}
auto im = cv::imread(image_file);
- auto im_bak = im.clone();
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
@@ -83,7 +80,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) {
}
std::cout << res.Str() << std::endl;
- auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res);
+ auto vis_im = fastdeploy::vision::VisDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
diff --git a/examples/vision/detection/yolov7end2end_ort/python/infer.py b/examples/vision/detection/yolov7end2end_ort/python/infer.py
index 2b812b71a..914facb59 100644
--- a/examples/vision/detection/yolov7end2end_ort/python/infer.py
+++ b/examples/vision/detection/yolov7end2end_ort/python/infer.py
@@ -44,7 +44,7 @@ model = fd.vision.detection.YOLOv7End2EndORT(
# 预测图片检测结果
im = cv2.imread(args.image)
-result = model.predict(im.copy())
+result = model.predict(im)
print(result)
# 预测结果可视化
diff --git a/examples/vision/detection/yolov7end2end_trt/cpp/infer.cc b/examples/vision/detection/yolov7end2end_trt/cpp/infer.cc
index 1c7a17d37..4f7a2f791 100644
--- a/examples/vision/detection/yolov7end2end_trt/cpp/infer.cc
+++ b/examples/vision/detection/yolov7end2end_trt/cpp/infer.cc
@@ -22,7 +22,6 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) {
}
auto im = cv::imread(image_file);
- auto im_bak = im.clone();
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
@@ -31,7 +30,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) {
}
std::cout << res.Str() << std::endl;
- auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res);
+ auto vis_im = fastdeploy::vision::VisDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
@@ -47,7 +46,6 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) {
}
auto im = cv::imread(image_file);
- auto im_bak = im.clone();
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
@@ -56,7 +54,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) {
}
std::cout << res.Str() << std::endl;
- auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res);
+ auto vis_im = fastdeploy::vision::VisDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
@@ -74,7 +72,6 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) {
}
auto im = cv::imread(image_file);
- auto im_bak = im.clone();
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
@@ -83,7 +80,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) {
}
std::cout << res.Str() << std::endl;
- auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res);
+ auto vis_im = fastdeploy::vision::VisDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
@@ -107,4 +104,4 @@ int main(int argc, char* argv[]) {
TrtInfer(argv[1], argv[2]);
}
return 0;
-}
\ No newline at end of file
+}
diff --git a/examples/vision/detection/yolov7end2end_trt/python/infer.py b/examples/vision/detection/yolov7end2end_trt/python/infer.py
index d179de06f..b1ad8ea0c 100644
--- a/examples/vision/detection/yolov7end2end_trt/python/infer.py
+++ b/examples/vision/detection/yolov7end2end_trt/python/infer.py
@@ -44,7 +44,7 @@ model = fd.vision.detection.YOLOv7End2EndTRT(
# 预测图片检测结果
im = cv2.imread(args.image)
-result = model.predict(im.copy())
+result = model.predict(im)
print(result)
# 预测结果可视化
diff --git a/examples/vision/detection/yolox/cpp/infer.cc b/examples/vision/detection/yolox/cpp/infer.cc
index 2eeaccbf8..836ab7e63 100644
--- a/examples/vision/detection/yolox/cpp/infer.cc
+++ b/examples/vision/detection/yolox/cpp/infer.cc
@@ -22,7 +22,6 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) {
}
auto im = cv::imread(image_file);
- auto im_bak = im.clone();
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
@@ -31,7 +30,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) {
}
std::cout << res.Str() << std::endl;
- auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res);
+ auto vis_im = fastdeploy::vision::VisDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
@@ -46,7 +45,6 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) {
}
auto im = cv::imread(image_file);
- auto im_bak = im.clone();
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
@@ -55,7 +53,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) {
}
std::cout << res.Str() << std::endl;
- auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res);
+ auto vis_im = fastdeploy::vision::VisDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
@@ -72,7 +70,6 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) {
}
auto im = cv::imread(image_file);
- auto im_bak = im.clone();
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
@@ -81,7 +78,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) {
}
std::cout << res.Str() << std::endl;
- auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res);
+ auto vis_im = fastdeploy::vision::VisDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
diff --git a/examples/vision/detection/yolox/python/infer.py b/examples/vision/detection/yolox/python/infer.py
index 69203010b..c5e71754a 100644
--- a/examples/vision/detection/yolox/python/infer.py
+++ b/examples/vision/detection/yolox/python/infer.py
@@ -52,7 +52,7 @@ if args.image is None:
else:
image = args.image
im = cv2.imread(image)
-result = model.predict(im.copy())
+result = model.predict(im)
print(result)
# 预测结果可视化
vis_im = fd.vision.vis_detection(im, result)
diff --git a/examples/vision/facealign/face_landmark_1000/cpp/CMakeLists.txt b/examples/vision/facealign/face_landmark_1000/cpp/CMakeLists.txt
index c417fcb38..74e6eb7e7 100644
--- a/examples/vision/facealign/face_landmark_1000/cpp/CMakeLists.txt
+++ b/examples/vision/facealign/face_landmark_1000/cpp/CMakeLists.txt
@@ -10,9 +10,5 @@ include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
include_directories(${FASTDEPLOY_INCS})
add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cc)
-# 添加FastDeploy库依赖
-if(UNIX AND (NOT APPLE) AND (NOT ANDROID))
- target_link_libraries(infer_demo ${FASTDEPLOY_LIBS} gflags pthread)
-else()
- target_link_libraries(infer_demo ${FASTDEPLOY_LIBS} gflags)
-endif()
+
+target_link_libraries(infer_demo ${FASTDEPLOY_LIBS} ${GFLAGS_LIBRARIES})
diff --git a/examples/vision/facealign/face_landmark_1000/cpp/README.md b/examples/vision/facealign/face_landmark_1000/cpp/README.md
index 755a20664..00b5391b5 100644
--- a/examples/vision/facealign/face_landmark_1000/cpp/README.md
+++ b/examples/vision/facealign/face_landmark_1000/cpp/README.md
@@ -7,7 +7,7 @@
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
- 2. 根据开发环境,下载预编译部署库和samples代码,参考[FastDeploy预编译库](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
-以Linux上CPU推理为例,在本目录执行如下命令即可完成编译测试,支持此模型需保证FastDeploy版本0.7.0以上(x.x.x>=0.7.0)
+以Linux上CPU推理为例,在本目录执行如下命令即可完成编译测试,支持此模型需保证FastDeploy版本1.0.2以上(x.x.x>=1.0.2), 或使用nightly built版本
```bash
mkdir build
diff --git a/examples/vision/facealign/face_landmark_1000/python/infer.py b/examples/vision/facealign/face_landmark_1000/python/infer.py
index bbb3a19c8..9eaedfb63 100644
--- a/examples/vision/facealign/face_landmark_1000/python/infer.py
+++ b/examples/vision/facealign/face_landmark_1000/python/infer.py
@@ -82,7 +82,7 @@ model = fd.vision.facealign.FaceLandmark1000(
# for image
im = cv2.imread(args.image)
-result = model.predict(im.copy())
+result = model.predict(im)
print(result)
# 可视化结果
vis_im = fd.vision.vis_face_alignment(im, result)
diff --git a/examples/vision/facealign/pfld/cpp/CMakeLists.txt b/examples/vision/facealign/pfld/cpp/CMakeLists.txt
index c417fcb38..7d1bd2ee1 100755
--- a/examples/vision/facealign/pfld/cpp/CMakeLists.txt
+++ b/examples/vision/facealign/pfld/cpp/CMakeLists.txt
@@ -10,9 +10,4 @@ include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
include_directories(${FASTDEPLOY_INCS})
add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cc)
-# 添加FastDeploy库依赖
-if(UNIX AND (NOT APPLE) AND (NOT ANDROID))
- target_link_libraries(infer_demo ${FASTDEPLOY_LIBS} gflags pthread)
-else()
- target_link_libraries(infer_demo ${FASTDEPLOY_LIBS} gflags)
-endif()
+target_link_libraries(infer_demo ${FASTDEPLOY_LIBS} ${GFLAGS_LIBRARIES})
diff --git a/examples/vision/facealign/pfld/cpp/README.md b/examples/vision/facealign/pfld/cpp/README.md
index d061b010a..06ce4a2f4 100644
--- a/examples/vision/facealign/pfld/cpp/README.md
+++ b/examples/vision/facealign/pfld/cpp/README.md
@@ -7,7 +7,7 @@
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
- 2. 根据开发环境,下载预编译部署库和samples代码,参考[FastDeploy预编译库](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
-以Linux上CPU推理为例,在本目录执行如下命令即可完成编译测试,支持此模型需保证FastDeploy版本0.7.0以上(x.x.x>=0.7.0)
+以Linux上CPU推理为例,在本目录执行如下命令即可完成编译测试,支持此模型需保证FastDeploy版本1.0.2以上(x.x.x>=1.0.2), 或使用nightly built版本
```bash
mkdir build
diff --git a/examples/vision/facealign/pfld/python/infer.py b/examples/vision/facealign/pfld/python/infer.py
index 622fbf822..0a695e8c6 100755
--- a/examples/vision/facealign/pfld/python/infer.py
+++ b/examples/vision/facealign/pfld/python/infer.py
@@ -80,7 +80,7 @@ model = fd.vision.facealign.PFLD(args.model, runtime_option=runtime_option)
# for image
im = cv2.imread(args.image)
-result = model.predict(im.copy())
+result = model.predict(im)
print(result)
# 可视化结果
vis_im = fd.vision.vis_face_alignment(im, result)
diff --git a/examples/vision/facealign/pipnet/cpp/CMakeLists.txt b/examples/vision/facealign/pipnet/cpp/CMakeLists.txt
index c417fcb38..7d1bd2ee1 100644
--- a/examples/vision/facealign/pipnet/cpp/CMakeLists.txt
+++ b/examples/vision/facealign/pipnet/cpp/CMakeLists.txt
@@ -10,9 +10,4 @@ include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
include_directories(${FASTDEPLOY_INCS})
add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cc)
-# 添加FastDeploy库依赖
-if(UNIX AND (NOT APPLE) AND (NOT ANDROID))
- target_link_libraries(infer_demo ${FASTDEPLOY_LIBS} gflags pthread)
-else()
- target_link_libraries(infer_demo ${FASTDEPLOY_LIBS} gflags)
-endif()
+target_link_libraries(infer_demo ${FASTDEPLOY_LIBS} ${GFLAGS_LIBRARIES})
diff --git a/examples/vision/facealign/pipnet/python/infer.py b/examples/vision/facealign/pipnet/python/infer.py
index 628b86f79..46f9c2d90 100644
--- a/examples/vision/facealign/pipnet/python/infer.py
+++ b/examples/vision/facealign/pipnet/python/infer.py
@@ -85,7 +85,7 @@ model = fd.vision.facealign.PIPNet(args.model, runtime_option=runtime_option)
model.num_landmarks = args.num_landmarks
# for image
im = cv2.imread(args.image)
-result = model.predict(im.copy())
+result = model.predict(im)
print(result)
# 可视化结果
vis_im = fd.vision.vis_face_alignment(im, result)
diff --git a/examples/vision/facedet/retinaface/cpp/infer.cc b/examples/vision/facedet/retinaface/cpp/infer.cc
index a1fd27b6e..f125e4ba7 100644
--- a/examples/vision/facedet/retinaface/cpp/infer.cc
+++ b/examples/vision/facedet/retinaface/cpp/infer.cc
@@ -22,7 +22,6 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) {
}
auto im = cv::imread(image_file);
- auto im_bak = im.clone();
fastdeploy::vision::FaceDetectionResult res;
if (!model.Predict(&im, &res)) {
@@ -31,7 +30,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) {
}
std::cout << res.Str() << std::endl;
- auto vis_im = fastdeploy::vision::Visualize::VisFaceDetection(im_bak, res);
+ auto vis_im = fastdeploy::vision::VisFaceDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
@@ -46,7 +45,6 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) {
}
auto im = cv::imread(image_file);
- auto im_bak = im.clone();
fastdeploy::vision::FaceDetectionResult res;
if (!model.Predict(&im, &res)) {
@@ -55,7 +53,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) {
}
std::cout << res.Str() << std::endl;
- auto vis_im = fastdeploy::vision::Visualize::VisFaceDetection(im_bak, res);
+ auto vis_im = fastdeploy::vision::VisFaceDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
@@ -72,7 +70,6 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) {
}
auto im = cv::imread(image_file);
- auto im_bak = im.clone();
fastdeploy::vision::FaceDetectionResult res;
if (!model.Predict(&im, &res)) {
@@ -81,7 +78,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) {
}
std::cout << res.Str() << std::endl;
- auto vis_im = fastdeploy::vision::Visualize::VisFaceDetection(im_bak, res);
+ auto vis_im = fastdeploy::vision::VisFaceDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
diff --git a/examples/vision/facedet/retinaface/python/infer.py b/examples/vision/facedet/retinaface/python/infer.py
index 3b0152b1c..6b7f63e5e 100644
--- a/examples/vision/facedet/retinaface/python/infer.py
+++ b/examples/vision/facedet/retinaface/python/infer.py
@@ -43,7 +43,7 @@ model = fd.vision.facedet.RetinaFace(args.model, runtime_option=runtime_option)
# 预测图片检测结果
im = cv2.imread(args.image)
-result = model.predict(im.copy())
+result = model.predict(im)
print(result)
# 预测结果可视化
vis_im = fd.vision.vis_face_detection(im, result)
diff --git a/examples/vision/facedet/scrfd/cpp/infer.cc b/examples/vision/facedet/scrfd/cpp/infer.cc
index c804218ee..c09403741 100644
--- a/examples/vision/facedet/scrfd/cpp/infer.cc
+++ b/examples/vision/facedet/scrfd/cpp/infer.cc
@@ -22,7 +22,6 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) {
}
auto im = cv::imread(image_file);
- auto im_bak = im.clone();
fastdeploy::vision::FaceDetectionResult res;
if (!model.Predict(&im, &res)) {
@@ -31,7 +30,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) {
}
std::cout << res.Str() << std::endl;
- auto vis_im = fastdeploy::vision::Visualize::VisFaceDetection(im_bak, res);
+ auto vis_im = fastdeploy::vision::VisFaceDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
@@ -46,7 +45,6 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) {
}
auto im = cv::imread(image_file);
- auto im_bak = im.clone();
fastdeploy::vision::FaceDetectionResult res;
if (!model.Predict(&im, &res)) {
@@ -55,7 +53,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) {
}
std::cout << res.Str() << std::endl;
- auto vis_im = fastdeploy::vision::Visualize::VisFaceDetection(im_bak, res);
+ auto vis_im = fastdeploy::vision::VisFaceDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
@@ -72,7 +70,6 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) {
}
auto im = cv::imread(image_file);
- auto im_bak = im.clone();
fastdeploy::vision::FaceDetectionResult res;
if (!model.Predict(&im, &res)) {
@@ -81,7 +78,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) {
}
std::cout << res.Str() << std::endl;
- auto vis_im = fastdeploy::vision::Visualize::VisFaceDetection(im_bak, res);
+ auto vis_im = fastdeploy::vision::VisFaceDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
diff --git a/examples/vision/facedet/scrfd/python/infer.py b/examples/vision/facedet/scrfd/python/infer.py
index a99e66385..af141e011 100644
--- a/examples/vision/facedet/scrfd/python/infer.py
+++ b/examples/vision/facedet/scrfd/python/infer.py
@@ -43,7 +43,7 @@ model = fd.vision.facedet.SCRFD(args.model, runtime_option=runtime_option)
# 预测图片检测结果
im = cv2.imread(args.image)
-result = model.predict(im.copy())
+result = model.predict(im)
print(result)
# 预测结果可视化
vis_im = fd.vision.vis_face_detection(im, result)
diff --git a/examples/vision/facedet/scrfd/rknpu2/cpp/infer.cc b/examples/vision/facedet/scrfd/rknpu2/cpp/infer.cc
index a01f1b184..e009d2ead 100644
--- a/examples/vision/facedet/scrfd/rknpu2/cpp/infer.cc
+++ b/examples/vision/facedet/scrfd/rknpu2/cpp/infer.cc
@@ -2,50 +2,13 @@
#include
+
+以上命令只适用于Linux或MacOS, Windows下SDK的使用方式请参考:
+- [如何在Windows中使用FastDeploy C++ SDK](../../../../../docs/cn/faq/use_sdk_on_windows.md)
+
+## YOLOv7Face C++接口
+
+### YOLOv7Face类
+
+```c++
+fastdeploy::vision::facedet::YOLOv7Face(
+ const string& model_file,
+ const string& params_file = "",
+ const RuntimeOption& runtime_option = RuntimeOption(),
+ const ModelFormat& model_format = ModelFormat::ONNX)
+```
+
+YOLOv7Face模型加载和初始化,其中model_file为导出的ONNX模型格式。
+
+**参数**
+
+> * **model_file**(str): 模型文件路径
+> * **params_file**(str): 参数文件路径,当模型格式为ONNX时,此参数传入空字符串即可
+> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置
+> * **model_format**(ModelFormat): 模型格式,默认为ONNX格式
+
+#### Predict函数
+
+> ```c++
+> YOLOv7Face::Predict(cv::Mat* im, FaceDetectionResult* result,
+> float conf_threshold = 0.3,
+> float nms_iou_threshold = 0.5)
+> ```
+>
+> 模型预测接口,输入图像直接输出检测结果。
+>
+> **参数**
+>
+> > * **im**: 输入图像,注意需为HWC,BGR格式
+> > * **result**: 检测结果,包括检测框,各个框的置信度, FaceDetectionResult说明参考[视觉模型预测结果](../../../../../docs/api/vision_results/)
+> > * **conf_threshold**: 检测框置信度过滤阈值
+> > * **nms_iou_threshold**: NMS处理过程中iou阈值
+
+- [模型介绍](../../)
+- [Python部署](../python)
+- [视觉模型预测结果](../../../../../docs/api/vision_results/)
diff --git a/examples/vision/facedet/yolov7face/cpp/infer.cc b/examples/vision/facedet/yolov7face/cpp/infer.cc
new file mode 100644
index 000000000..973b65be3
--- /dev/null
+++ b/examples/vision/facedet/yolov7face/cpp/infer.cc
@@ -0,0 +1,105 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "fastdeploy/vision.h"
+
+void CpuInfer(const std::string& model_file, const std::string& image_file) {
+ auto model = fastdeploy::vision::facedet::YOLOv7Face(model_file);
+ if (!model.Initialized()) {
+ std::cerr << "Failed to initialize." << std::endl;
+ return;
+ }
+
+ auto im = cv::imread(image_file);
+
+ fastdeploy::vision::FaceDetectionResult res;
+ if (!model.Predict(im, &res)) {
+ std::cerr << "Failed to predict." << std::endl;
+ return;
+ }
+ std::cout << res.Str() << std::endl;
+
+ auto vis_im = fastdeploy::vision::VisFaceDetection(im, res);
+ cv::imwrite("vis_result.jpg", vis_im);
+ std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
+}
+
+void GpuInfer(const std::string& model_file, const std::string& image_file) {
+ auto option = fastdeploy::RuntimeOption();
+ option.UseGpu();
+ auto model = fastdeploy::vision::facedet::YOLOv7Face(model_file, "", option);
+ if (!model.Initialized()) {
+ std::cerr << "Failed to initialize." << std::endl;
+ return;
+ }
+
+ auto im = cv::imread(image_file);
+
+ fastdeploy::vision::FaceDetectionResult res;
+ if (!model.Predict(im, &res)) {
+ std::cerr << "Failed to predict." << std::endl;
+ return;
+ }
+ std::cout << res.Str() << std::endl;
+
+ auto vis_im = fastdeploy::vision::VisFaceDetection(im, res);
+ cv::imwrite("vis_result.jpg", vis_im);
+ std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
+}
+
+void TrtInfer(const std::string& model_file, const std::string& image_file) {
+ auto option = fastdeploy::RuntimeOption();
+ option.UseGpu();
+ option.UseTrtBackend();
+ option.SetTrtInputShape("images", {1, 3, 640, 640});
+ auto model = fastdeploy::vision::facedet::YOLOv7Face(model_file, "", option);
+ if (!model.Initialized()) {
+ std::cerr << "Failed to initialize." << std::endl;
+ return;
+ }
+
+ auto im = cv::imread(image_file);
+
+ fastdeploy::vision::FaceDetectionResult res;
+ if (!model.Predict(im, &res)) {
+ std::cerr << "Failed to predict." << std::endl;
+ return;
+ }
+ std::cout << res.Str() << std::endl;
+
+ auto vis_im = fastdeploy::vision::VisFaceDetection(im, res);
+ cv::imwrite("vis_result.jpg", vis_im);
+ std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
+}
+
+int main(int argc, char* argv[]) {
+ if (argc < 4) {
+ std::cout << "Usage: infer_demo path/to/model path/to/image run_option, "
+ "e.g ./infer_model yolov5s-face.onnx ./test.jpeg 0"
+ << std::endl;
+ std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
+ "with gpu; 2: run with gpu and use tensorrt backend."
+ << std::endl;
+ return -1;
+ }
+
+ if (std::atoi(argv[3]) == 0) {
+ CpuInfer(argv[1], argv[2]);
+ } else if (std::atoi(argv[3]) == 1) {
+ GpuInfer(argv[1], argv[2]);
+ } else if (std::atoi(argv[3]) == 2) {
+ TrtInfer(argv[1], argv[2]);
+ }
+ return 0;
+}
diff --git a/examples/vision/facedet/yolov7face/python/README.md b/examples/vision/facedet/yolov7face/python/README.md
new file mode 100644
index 000000000..0c4dd0f57
--- /dev/null
+++ b/examples/vision/facedet/yolov7face/python/README.md
@@ -0,0 +1,87 @@
+# YOLOv7Face Python部署示例
+
+在部署前,需确认以下两个步骤
+
+- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
+- 2. FastDeploy Python whl包安装,参考[FastDeploy Python安装](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
+
+本目录下提供`infer.py`快速完成YOLOv7Face在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成
+
+```bash
+#下载部署示例代码
+git clone https://github.com/PaddlePaddle/FastDeploy.git
+cd examples/vision/facedet/yolov7face/python/
+
+#下载YOLOv7Face模型文件和测试图片
+wget https://raw.githubusercontent.com/DefTruth/lite.ai.toolkit/main/examples/lite/resources/test_lite_face_detector_3.jpg
+wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov7-lite-e.onnx
+
+#使用yolov7-tiny-face.onnx模型
+# CPU推理
+python infer.py --model yolov7-tiny-face.onnx --image test_lite_face_detector_3.jpg --device cpu
+# GPU推理
+python infer.py --model yolov7-tiny-face.onnx --image test_lite_face_detector_3.jpg --device gpu
+# GPU上使用TensorRT推理
+python infer.py --model yolov7-tiny-face.onnx --image test_lite_face_detector_3.jpg --device gpu --use_trt True
+
+#使用yolov7-lite-e.onnx模型
+# CPU推理
+python infer.py --model yolov7-lite-e.onnx --image test_lite_face_detector_3.jpg --device cpu
+# GPU推理
+python infer.py --model yolov7-lite-e.onnx --image test_lite_face_detector_3.jpg --device gpu
+# GPU上使用TensorRT推理
+python infer.py --model yolov7-lite-e.onnx --image test_lite_face_detector_3.jpg --device gpu --use_trt True
+```
+
+运行完成可视化结果如下图所示
+
+
+
+## YOLOv7Face Python接口
+
+```python
+fastdeploy.vision.facedet.YOLOv7Face(model_file, params_file=None, runtime_option=None, model_format=ModelFormat.ONNX)
+```
+
+YOLOv7Face模型加载和初始化,其中model_file为导出的ONNX模型格式
+
+**参数**
+
+> * **model_file**(str): 模型文件路径
+> * **params_file**(str): 参数文件路径,当模型格式为ONNX格式时,此参数无需设定
+> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置
+> * **model_format**(ModelFormat): 模型格式,默认为ONNX
+
+### predict函数
+
+> ```python
+> YOLOv7Face.predict(image_data, conf_threshold=0.3, nms_iou_threshold=0.5)
+> ```
+>
+> 模型预测结口,输入图像直接输出检测结果。
+>
+> **参数**
+>
+> > * **image_data**(np.ndarray): 输入数据,注意需为HWC,BGR格式
+> > * **conf_threshold**(float): 检测框置信度过滤阈值
+> > * **nms_iou_threshold**(float): NMS处理过程中iou阈值
+
+> **返回**
+>
+> > 返回`fastdeploy.vision.FaceDetectionResult`结构体,结构体说明参考文档[视觉模型预测结果](../../../../../docs/api/vision_results/)
+
+### 类成员属性
+#### 预处理参数
+用户可按照自己的实际需求,修改下列预处理参数,从而影响最终的推理和部署效果
+
+> > * **size**(list[int]): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[640, 640]
+> > * **padding_value**(list[float]): 通过此参数可以修改图片在resize时候做填充(padding)的值, 包含三个浮点型元素, 分别表示三个通道的值, 默认值为[114, 114, 114]
+> > * **is_no_pad**(bool): 通过此参数让图片是否通过填充的方式进行resize, `is_no_pad=True` 表示不使用填充的方式,默认值为`is_no_pad=False`
+> > * **is_mini_pad**(bool): 通过此参数可以将resize之后图像的宽高这是为最接近`size`成员变量的值, 并且满足填充的像素大小是可以被`stride`成员变量整除的。默认值为`is_mini_pad=False`
+> > * **stride**(int): 配合`is_mini_pad`成员变量使用, 默认值为`stride=32`
+
+## 其它文档
+
+- [YOLOv7Face 模型介绍](..)
+- [YOLOv7Face C++部署](../cpp)
+- [模型预测结果说明](../../../../../docs/api/vision_results/)
diff --git a/examples/vision/facedet/yolov7face/python/infer.py b/examples/vision/facedet/yolov7face/python/infer.py
new file mode 100644
index 000000000..d06b507ee
--- /dev/null
+++ b/examples/vision/facedet/yolov7face/python/infer.py
@@ -0,0 +1,51 @@
+import fastdeploy as fd
+import cv2
+
+
+def parse_arguments():
+ import argparse
+ import ast
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--model", required=True, help="Path of yolov7face onnx model.")
+ parser.add_argument(
+ "--image", required=True, help="Path of test image file.")
+ parser.add_argument(
+ "--device",
+ type=str,
+ default='cpu',
+ help="Type of inference device, support 'cpu' or 'gpu'.")
+ parser.add_argument(
+ "--use_trt",
+ type=ast.literal_eval,
+ default=False,
+ help="Wether to use tensorrt.")
+ return parser.parse_args()
+
+
+def build_option(args):
+ option = fd.RuntimeOption()
+
+ if args.device.lower() == "gpu":
+ option.use_gpu()
+
+ if args.use_trt:
+ option.use_trt_backend()
+ option.set_trt_input_shape("images", [1, 3, 640, 640])
+ return option
+
+
+args = parse_arguments()
+
+# Configure runtime and load the model
+runtime_option = build_option(args)
+model = fd.vision.facedet.YOLOv7Face(args.model, runtime_option=runtime_option)
+
+# Predict image detection results
+im = cv2.imread(args.image)
+result = model.predict(im)
+print(result)
+# Visualization of prediction Results
+vis_im = fd.vision.vis_face_detection(im, result)
+cv2.imwrite("visualized_result.jpg", vis_im)
+print("Visualized result save in ./visualized_result.jpg")
diff --git a/examples/vision/headpose/fsanet/cpp/CMakeLists.txt b/examples/vision/headpose/fsanet/cpp/CMakeLists.txt
index c417fcb38..7d1bd2ee1 100755
--- a/examples/vision/headpose/fsanet/cpp/CMakeLists.txt
+++ b/examples/vision/headpose/fsanet/cpp/CMakeLists.txt
@@ -10,9 +10,4 @@ include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
include_directories(${FASTDEPLOY_INCS})
add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cc)
-# 添加FastDeploy库依赖
-if(UNIX AND (NOT APPLE) AND (NOT ANDROID))
- target_link_libraries(infer_demo ${FASTDEPLOY_LIBS} gflags pthread)
-else()
- target_link_libraries(infer_demo ${FASTDEPLOY_LIBS} gflags)
-endif()
+target_link_libraries(infer_demo ${FASTDEPLOY_LIBS} ${GFLAGS_LIBRARIES})
diff --git a/examples/vision/headpose/fsanet/cpp/README.md b/examples/vision/headpose/fsanet/cpp/README.md
index 51e0a179d..1d1b1e943 100755
--- a/examples/vision/headpose/fsanet/cpp/README.md
+++ b/examples/vision/headpose/fsanet/cpp/README.md
@@ -7,7 +7,7 @@
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
- 2. 根据开发环境,下载预编译部署库和samples代码,参考[FastDeploy预编译库](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
-以Linux上CPU推理为例,在本目录执行如下命令即可完成编译测试,支持此模型需保证FastDeploy版本0.7.0以上(x.x.x>=0.7.0)
+以Linux上CPU推理为例,在本目录执行如下命令即可完成编译测试,支持此模型需保证FastDeploy版本1.0.2以上(x.x.x>=1.0.2), 或使用nightly built版本
```bash
mkdir build
diff --git a/examples/vision/headpose/fsanet/python/infer.py b/examples/vision/headpose/fsanet/python/infer.py
index 866ce6d5c..488e35153 100644
--- a/examples/vision/headpose/fsanet/python/infer.py
+++ b/examples/vision/headpose/fsanet/python/infer.py
@@ -80,7 +80,7 @@ model = fd.vision.headpose.FSANet(args.model, runtime_option=runtime_option)
# for image
im = cv2.imread(args.image)
-result = model.predict(im.copy())
+result = model.predict(im)
print(result)
# 可视化结果
vis_im = fd.vision.vis_headpose(im, result)
diff --git a/examples/vision/matting/modnet/cpp/infer.cc b/examples/vision/matting/modnet/cpp/infer.cc
index fe1ebc910..aa280eddd 100644
--- a/examples/vision/matting/modnet/cpp/infer.cc
+++ b/examples/vision/matting/modnet/cpp/infer.cc
@@ -23,7 +23,6 @@ void CpuInfer(const std::string& model_file, const std::string& image_file,
}
model.size = {256, 256};
auto im = cv::imread(image_file);
- auto im_bak = im.clone();
cv::Mat bg = cv::imread(background_file);
fastdeploy::vision::MattingResult res;
@@ -32,9 +31,9 @@ void CpuInfer(const std::string& model_file, const std::string& image_file,
return;
}
- auto vis_im = fastdeploy::vision::VisMatting(im_bak, res);
+ auto vis_im = fastdeploy::vision::VisMatting(im, res);
auto vis_im_with_bg =
- fastdeploy::vision::Visualize::SwapBackgroundMatting(im_bak, bg, res);
+ fastdeploy::vision::SwapBackground(im, bg, res);
cv::imwrite("visualized_result.jpg", vis_im_with_bg);
cv::imwrite("visualized_result_fg.jpg", vis_im);
std::cout << "Visualized result save in ./visualized_result_replaced_bg.jpg "
@@ -54,7 +53,6 @@ void GpuInfer(const std::string& model_file, const std::string& image_file,
model.size = {256, 256};
auto im = cv::imread(image_file);
- auto im_bak = im.clone();
cv::Mat bg = cv::imread(background_file);
fastdeploy::vision::MattingResult res;
@@ -63,9 +61,9 @@ void GpuInfer(const std::string& model_file, const std::string& image_file,
return;
}
- auto vis_im = fastdeploy::vision::VisMatting(im_bak, res);
+ auto vis_im = fastdeploy::vision::VisMatting(im, res);
auto vis_im_with_bg =
- fastdeploy::vision::Visualize::SwapBackgroundMatting(im_bak, bg, res);
+ fastdeploy::vision::SwapBackground(im, bg, res);
cv::imwrite("visualized_result.jpg", vis_im_with_bg);
cv::imwrite("visualized_result_fg.jpg", vis_im);
std::cout << "Visualized result save in ./visualized_result_replaced_bg.jpg "
@@ -86,7 +84,6 @@ void TrtInfer(const std::string& model_file, const std::string& image_file,
}
model.size = {256, 256};
auto im = cv::imread(image_file);
- auto im_bak = im.clone();
cv::Mat bg = cv::imread(background_file);
fastdeploy::vision::MattingResult res;
@@ -95,9 +92,9 @@ void TrtInfer(const std::string& model_file, const std::string& image_file,
return;
}
- auto vis_im = fastdeploy::vision::VisMatting(im_bak, res);
+ auto vis_im = fastdeploy::vision::VisMatting(im, res);
auto vis_im_with_bg =
- fastdeploy::vision::Visualize::SwapBackgroundMatting(im_bak, bg, res);
+ fastdeploy::vision::SwapBackground(im, bg, res);
cv::imwrite("visualized_result.jpg", vis_im_with_bg);
cv::imwrite("visualized_result_fg.jpg", vis_im);
std::cout << "Visualized result save in ./visualized_result_replaced_bg.jpg "
diff --git a/examples/vision/matting/modnet/python/infer.py b/examples/vision/matting/modnet/python/infer.py
index 408ba2340..37c749010 100644
--- a/examples/vision/matting/modnet/python/infer.py
+++ b/examples/vision/matting/modnet/python/infer.py
@@ -52,11 +52,11 @@ model.size = (256, 256)
# 预测图片抠图结果
im = cv2.imread(args.image)
bg = cv2.imread(args.bg)
-result = model.predict(im.copy())
+result = model.predict(im)
print(result)
# 可视化结果
vis_im = fd.vision.vis_matting_alpha(im, result)
-vis_im_with_bg = fd.vision.swap_background_matting(im, bg, result)
+vis_im_with_bg = fd.vision.swap_background(im, bg, result)
cv2.imwrite("visualized_result_fg.jpg", vis_im)
cv2.imwrite("visualized_result_replaced_bg.jpg", vis_im_with_bg)
print(
diff --git a/examples/vision/matting/ppmatting/cpp/infer.cc b/examples/vision/matting/ppmatting/cpp/infer.cc
index 304e4239a..2acb2a8ca 100644
--- a/examples/vision/matting/ppmatting/cpp/infer.cc
+++ b/examples/vision/matting/ppmatting/cpp/infer.cc
@@ -35,16 +35,15 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file,
}
auto im = cv::imread(image_file);
- auto im_bak = im.clone();
cv::Mat bg = cv::imread(background_file);
fastdeploy::vision::MattingResult res;
if (!model.Predict(&im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
- auto vis_im = fastdeploy::vision::VisMatting(im_bak, res);
+ auto vis_im = fastdeploy::vision::VisMatting(im, res);
auto vis_im_with_bg =
- fastdeploy::vision::Visualize::SwapBackgroundMatting(im_bak, bg, res);
+ fastdeploy::vision::SwapBackground(im, bg, res);
cv::imwrite("visualized_result.jpg", vis_im_with_bg);
cv::imwrite("visualized_result_fg.jpg", vis_im);
std::cout << "Visualized result save in ./visualized_result_replaced_bg.jpg "
@@ -69,16 +68,15 @@ void GpuInfer(const std::string& model_dir, const std::string& image_file,
}
auto im = cv::imread(image_file);
- auto im_bak = im.clone();
cv::Mat bg = cv::imread(background_file);
fastdeploy::vision::MattingResult res;
if (!model.Predict(&im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
- auto vis_im = fastdeploy::vision::VisMatting(im_bak, res);
+ auto vis_im = fastdeploy::vision::VisMatting(im, res);
auto vis_im_with_bg =
- fastdeploy::vision::Visualize::SwapBackgroundMatting(im_bak, bg, res);
+ fastdeploy::vision::SwapBackground(im, bg, res);
cv::imwrite("visualized_result.jpg", vis_im_with_bg);
cv::imwrite("visualized_result_fg.jpg", vis_im);
std::cout << "Visualized result save in ./visualized_result_replaced_bg.jpg "
@@ -104,16 +102,15 @@ void TrtInfer(const std::string& model_dir, const std::string& image_file,
}
auto im = cv::imread(image_file);
- auto im_bak = im.clone();
cv::Mat bg = cv::imread(background_file);
fastdeploy::vision::MattingResult res;
if (!model.Predict(&im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
- auto vis_im = fastdeploy::vision::VisMatting(im_bak, res);
+ auto vis_im = fastdeploy::vision::VisMatting(im, res);
auto vis_im_with_bg =
- fastdeploy::vision::Visualize::SwapBackgroundMatting(im_bak, bg, res);
+ fastdeploy::vision::SwapBackground(im, bg, res);
cv::imwrite("visualized_result.jpg", vis_im_with_bg);
cv::imwrite("visualized_result_fg.jpg", vis_im);
std::cout << "Visualized result save in ./visualized_result_replaced_bg.jpg "
diff --git a/examples/vision/matting/ppmatting/python/infer.py b/examples/vision/matting/ppmatting/python/infer.py
index 61031e1b6..89913cd11 100644
--- a/examples/vision/matting/ppmatting/python/infer.py
+++ b/examples/vision/matting/ppmatting/python/infer.py
@@ -56,11 +56,11 @@ model = fd.vision.matting.PPMatting(
# 预测图片抠图结果
im = cv2.imread(args.image)
bg = cv2.imread(args.bg)
-result = model.predict(im.copy())
+result = model.predict(im)
print(result)
# 可视化结果
vis_im = fd.vision.vis_matting(im, result)
-vis_im_with_bg = fd.vision.swap_background_matting(im, bg, result)
+vis_im_with_bg = fd.vision.swap_background(im, bg, result)
cv2.imwrite("visualized_result_fg.jpg", vis_im)
cv2.imwrite("visualized_result_replaced_bg.jpg", vis_im_with_bg)
print(
diff --git a/examples/vision/matting/rvm/python/infer.py b/examples/vision/matting/rvm/python/infer.py
index 11951b00f..0e9eb6b21 100755
--- a/examples/vision/matting/rvm/python/infer.py
+++ b/examples/vision/matting/rvm/python/infer.py
@@ -73,7 +73,7 @@ if args.video is not None:
# for video
cap = cv2.VideoCapture(args.video)
# Define the codec and create VideoWriter object
- fourcc = cv2.VideoWriter_fourcc(*'mp4v')
+ fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
composition = cv2.VideoWriter(output_composition, fourcc, 20.0,
(1080, 1920))
alpha = cv2.VideoWriter(output_alpha, fourcc, 20.0, (1080, 1920))
@@ -86,7 +86,7 @@ if args.video is not None:
break
result = model.predict(frame)
vis_im = fd.vision.vis_matting(frame, result)
- vis_im_with_bg = fd.vision.swap_background_matting(frame, bg, result)
+ vis_im_with_bg = fd.vision.swap_background(frame, bg, result)
alpha.write(vis_im)
composition.write(vis_im_with_bg)
cv2.waitKey(30)
@@ -100,11 +100,11 @@ if args.video is not None:
if args.image is not None:
# for image
im = cv2.imread(args.image)
- result = model.predict(im.copy())
+ result = model.predict(im)
print(result)
# 可视化结果
vis_im = fd.vision.vis_matting(im, result)
- vis_im_with_bg = fd.vision.swap_background_matting(im, bg, result)
+ vis_im_with_bg = fd.vision.swap_background(im, bg, result)
cv2.imwrite("visualized_result_fg.jpg", vis_im)
cv2.imwrite("visualized_result_replaced_bg.jpg", vis_im_with_bg)
print(
diff --git a/examples/vision/ocr/PP-OCRv2/cpp/README.md b/examples/vision/ocr/PP-OCRv2/cpp/README.md
index 965ece716..1bb794f5e 100644
--- a/examples/vision/ocr/PP-OCRv2/cpp/README.md
+++ b/examples/vision/ocr/PP-OCRv2/cpp/README.md
@@ -82,6 +82,7 @@ PPOCRv2 的初始化,由检测,识别模型串联构成(无分类器)
> ```
> bool Predict(cv::Mat* img, fastdeploy::vision::OCRResult* result);
+> bool Predict(const cv::Mat& img, fastdeploy::vision::OCRResult* result);
> ```
>
> 模型预测接口,输入一张图片,返回OCR预测结果
diff --git a/examples/vision/ocr/PP-OCRv2/cpp/infer.cc b/examples/vision/ocr/PP-OCRv2/cpp/infer.cc
index 7bac320d5..6cde6390f 100644
--- a/examples/vision/ocr/PP-OCRv2/cpp/infer.cc
+++ b/examples/vision/ocr/PP-OCRv2/cpp/infer.cc
@@ -33,13 +33,18 @@ void InitAndInfer(const std::string& det_model_dir, const std::string& cls_model
auto cls_option = option;
auto rec_option = option;
+ // The cls and rec model can inference a batch of images now.
+ // User could initialize the inference batch size and set them after create PPOCR model.
+ int cls_batch_size = 1;
+ int rec_batch_size = 6;
+
// If use TRT backend, the dynamic shape will be set as follow.
// We recommend that users set the length and height of the detection model to a multiple of 32.
det_option.SetTrtInputShape("x", {1, 3, 64,64}, {1, 3, 640, 640},
{1, 3, 960, 960});
- cls_option.SetTrtInputShape("x", {1, 3, 48, 10}, {10, 3, 48, 320}, {32, 3, 48, 1024});
- rec_option.SetTrtInputShape("x", {1, 3, 32, 10}, {10, 3, 32, 320},
- {32, 3, 32, 2304});
+ cls_option.SetTrtInputShape("x", {1, 3, 48, 10}, {cls_batch_size, 3, 48, 320}, {cls_batch_size, 3, 48, 1024});
+ rec_option.SetTrtInputShape("x", {1, 3, 32, 10}, {rec_batch_size, 3, 32, 320},
+ {rec_batch_size, 3, 32, 2304});
// Users could save TRT cache file to disk as follow.
// det_option.SetTrtCacheFile(det_model_dir + sep + "det_trt_cache.trt");
@@ -58,6 +63,12 @@ void InitAndInfer(const std::string& det_model_dir, const std::string& cls_model
// auto ppocr_v2 = fastdeploy::pipeline::PPOCRv2(&det_model, &rec_model);
auto ppocr_v2 = fastdeploy::pipeline::PPOCRv2(&det_model, &cls_model, &rec_model);
+ // Set inference batch size for cls model and rec model, the value could be -1 and 1 to positive infinity.
+ // When inference batch size is set to -1, it means that the inference batch size
+ // of the cls and rec models will be the same as the number of boxes detected by the det model.
+ ppocr_v2.SetClsBatchSize(cls_batch_size);
+ ppocr_v2.SetRecBatchSize(rec_batch_size);
+
if(!ppocr_v2.Initialized()){
std::cerr << "Failed to initialize PP-OCR." << std::endl;
return;
diff --git a/examples/vision/ocr/PP-OCRv2/python/infer.py b/examples/vision/ocr/PP-OCRv2/python/infer.py
index af915143a..1487d795f 100644
--- a/examples/vision/ocr/PP-OCRv2/python/infer.py
+++ b/examples/vision/ocr/PP-OCRv2/python/infer.py
@@ -106,6 +106,11 @@ rec_label_file = args.rec_label_file
# 用户也可根据自行需求分别配置
runtime_option = build_option(args)
+# PPOCR的cls和rec模型现在已经支持推理一个Batch的数据
+# 定义下面两个变量后, 可用于设置trt输入shape, 并在PPOCR模型初始化后, 完成Batch推理设置
+cls_batch_size = 1
+rec_batch_size = 6
+
# 当使用TRT时,分别给三个模型的runtime设置动态shape,并完成模型的创建.
# 注意: 需要在检测模型创建完成后,再设置分类模型的动态输入并创建分类模型, 识别模型同理.
# 如果用户想要自己改动检测模型的输入shape, 我们建议用户把检测模型的长和高设置为32的倍数.
@@ -118,16 +123,18 @@ det_model = fd.vision.ocr.DBDetector(
det_model_file, det_params_file, runtime_option=det_option)
cls_option = runtime_option
-cls_option.set_trt_input_shape("x", [1, 3, 48, 10], [10, 3, 48, 320],
- [32, 3, 48, 1024])
+cls_option.set_trt_input_shape("x", [1, 3, 48, 10],
+ [cls_batch_size, 3, 48, 320],
+ [cls_batch_size, 3, 48, 1024])
# 用户可以把TRT引擎文件保存至本地
# cls_option.set_trt_cache_file(args.cls_model + "/cls_trt_cache.trt")
cls_model = fd.vision.ocr.Classifier(
cls_model_file, cls_params_file, runtime_option=cls_option)
rec_option = runtime_option
-rec_option.set_trt_input_shape("x", [1, 3, 32, 10], [10, 3, 32, 320],
- [32, 3, 32, 2304])
+rec_option.set_trt_input_shape("x", [1, 3, 32, 10],
+ [rec_batch_size, 3, 32, 320],
+ [rec_batch_size, 3, 32, 2304])
# 用户可以把TRT引擎文件保存至本地
# rec_option.set_trt_cache_file(args.rec_model + "/rec_trt_cache.trt")
rec_model = fd.vision.ocr.Recognizer(
@@ -137,6 +144,12 @@ rec_model = fd.vision.ocr.Recognizer(
ppocr_v2 = fd.vision.ocr.PPOCRv2(
det_model=det_model, cls_model=cls_model, rec_model=rec_model)
+# 给cls和rec模型设置推理时的batch size
+# 此值能为-1, 和1到正无穷
+# 当此值为-1时, cls和rec模型的batch size将默认和det模型检测出的框的数量相同
+ppocr_v2.cls_batch_size = cls_batch_size
+ppocr_v2.rec_batch_size = rec_batch_size
+
# 预测图片准备
im = cv2.imread(args.image)
diff --git a/examples/vision/ocr/PP-OCRv3/cpp/infer.cc b/examples/vision/ocr/PP-OCRv3/cpp/infer.cc
index 911b311e3..90b77679f 100644
--- a/examples/vision/ocr/PP-OCRv3/cpp/infer.cc
+++ b/examples/vision/ocr/PP-OCRv3/cpp/infer.cc
@@ -33,13 +33,19 @@ void InitAndInfer(const std::string& det_model_dir, const std::string& cls_model
auto cls_option = option;
auto rec_option = option;
+ // The cls and rec model can inference a batch of images now.
+ // User could initialize the inference batch size and set them after create PPOCR model.
+ int cls_batch_size = 1;
+ int rec_batch_size = 6;
+
// If use TRT backend, the dynamic shape will be set as follow.
// We recommend that users set the length and height of the detection model to a multiple of 32.
+ // We also recommend that users set the Trt input shape as follow.
det_option.SetTrtInputShape("x", {1, 3, 64,64}, {1, 3, 640, 640},
{1, 3, 960, 960});
- cls_option.SetTrtInputShape("x", {1, 3, 48, 10}, {10, 3, 48, 320}, {64, 3, 48, 1024});
- rec_option.SetTrtInputShape("x", {1, 3, 48, 10}, {10, 3, 48, 320},
- {64, 3, 48, 2304});
+ cls_option.SetTrtInputShape("x", {1, 3, 48, 10}, {cls_batch_size, 3, 48, 320}, {cls_batch_size, 3, 48, 1024});
+ rec_option.SetTrtInputShape("x", {1, 3, 48, 10}, {rec_batch_size, 3, 48, 320},
+ {rec_batch_size, 3, 48, 2304});
// Users could save TRT cache file to disk as follow.
// det_option.SetTrtCacheFile(det_model_dir + sep + "det_trt_cache.trt");
@@ -57,6 +63,12 @@ void InitAndInfer(const std::string& det_model_dir, const std::string& cls_model
// The classification model is optional, so the PP-OCR can also be connected in series as follows
// auto ppocr_v3 = fastdeploy::pipeline::PPOCRv3(&det_model, &rec_model);
auto ppocr_v3 = fastdeploy::pipeline::PPOCRv3(&det_model, &cls_model, &rec_model);
+
+ // Set inference batch size for cls model and rec model, the value could be -1 and 1 to positive infinity.
+ // When inference batch size is set to -1, it means that the inference batch size
+ // of the cls and rec models will be the same as the number of boxes detected by the det model.
+ ppocr_v3.SetClsBatchSize(cls_batch_size);
+ ppocr_v3.SetRecBatchSize(rec_batch_size);
if(!ppocr_v3.Initialized()){
std::cerr << "Failed to initialize PP-OCR." << std::endl;
diff --git a/examples/vision/ocr/PP-OCRv3/python/infer.py b/examples/vision/ocr/PP-OCRv3/python/infer.py
index b6b27b240..1ec962cb5 100644
--- a/examples/vision/ocr/PP-OCRv3/python/infer.py
+++ b/examples/vision/ocr/PP-OCRv3/python/infer.py
@@ -106,6 +106,11 @@ rec_label_file = args.rec_label_file
# 用户也可根据自行需求分别配置
runtime_option = build_option(args)
+# PPOCR的cls和rec模型现在已经支持推理一个Batch的数据
+# 定义下面两个变量后, 可用于设置trt输入shape, 并在PPOCR模型初始化后, 完成Batch推理设置
+cls_batch_size = 1
+rec_batch_size = 6
+
# 当使用TRT时,分别给三个模型的runtime设置动态shape,并完成模型的创建.
# 注意: 需要在检测模型创建完成后,再设置分类模型的动态输入并创建分类模型, 识别模型同理.
# 如果用户想要自己改动检测模型的输入shape, 我们建议用户把检测模型的长和高设置为32的倍数.
@@ -118,16 +123,18 @@ det_model = fd.vision.ocr.DBDetector(
det_model_file, det_params_file, runtime_option=det_option)
cls_option = runtime_option
-cls_option.set_trt_input_shape("x", [1, 3, 48, 10], [10, 3, 48, 320],
- [64, 3, 48, 1024])
+cls_option.set_trt_input_shape("x", [1, 3, 48, 10],
+ [cls_batch_size, 3, 48, 320],
+ [cls_batch_size, 3, 48, 1024])
# 用户可以把TRT引擎文件保存至本地
# cls_option.set_trt_cache_file(args.cls_model + "/cls_trt_cache.trt")
cls_model = fd.vision.ocr.Classifier(
cls_model_file, cls_params_file, runtime_option=cls_option)
rec_option = runtime_option
-rec_option.set_trt_input_shape("x", [1, 3, 48, 10], [10, 3, 48, 320],
- [64, 3, 48, 2304])
+rec_option.set_trt_input_shape("x", [1, 3, 48, 10],
+ [rec_batch_size, 3, 48, 320],
+ [rec_batch_size, 3, 48, 2304])
# 用户可以把TRT引擎文件保存至本地
# rec_option.set_trt_cache_file(args.rec_model + "/rec_trt_cache.trt")
rec_model = fd.vision.ocr.Recognizer(
@@ -137,6 +144,12 @@ rec_model = fd.vision.ocr.Recognizer(
ppocr_v3 = fd.vision.ocr.PPOCRv3(
det_model=det_model, cls_model=cls_model, rec_model=rec_model)
+# 给cls和rec模型设置推理时的batch size
+# 此值能为-1, 和1到正无穷
+# 当此值为-1时, cls和rec模型的batch size将默认和det模型检测出的框的数量相同
+ppocr_v3.cls_batch_size = cls_batch_size
+ppocr_v3.rec_batch_size = rec_batch_size
+
# 预测图片准备
im = cv2.imread(args.image)
diff --git a/examples/vision/segmentation/paddleseg/a311d/README.md b/examples/vision/segmentation/paddleseg/a311d/README.md
new file mode 100755
index 000000000..f65172cdd
--- /dev/null
+++ b/examples/vision/segmentation/paddleseg/a311d/README.md
@@ -0,0 +1,11 @@
+# PP-LiteSeg 量化模型在 A311D 上的部署
+目前 FastDeploy 已经支持基于 PaddleLite 部署 PP-LiteSeg 量化模型到 A311D 上。
+
+模型的量化和量化模型的下载请参考:[模型量化](../quantize/README.md)
+
+
+## 详细部署文档
+
+在 A311D 上只支持 C++ 的部署。
+
+- [C++部署](cpp)
diff --git a/examples/vision/segmentation/paddleseg/a311d/cpp/CMakeLists.txt b/examples/vision/segmentation/paddleseg/a311d/cpp/CMakeLists.txt
new file mode 100755
index 000000000..baaf8331f
--- /dev/null
+++ b/examples/vision/segmentation/paddleseg/a311d/cpp/CMakeLists.txt
@@ -0,0 +1,38 @@
+PROJECT(infer_demo C CXX)
+CMAKE_MINIMUM_REQUIRED (VERSION 3.10)
+
+# 指定下载解压后的fastdeploy库路径
+option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.")
+
+include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
+
+# 添加FastDeploy依赖头文件
+include_directories(${FASTDEPLOY_INCS})
+include_directories(${FastDeploy_INCLUDE_DIRS})
+
+add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cc)
+# 添加FastDeploy库依赖
+target_link_libraries(infer_demo ${FASTDEPLOY_LIBS})
+
+set(CMAKE_INSTALL_PREFIX ${CMAKE_SOURCE_DIR}/build/install)
+
+install(TARGETS infer_demo DESTINATION ./)
+
+install(DIRECTORY models DESTINATION ./)
+install(DIRECTORY images DESTINATION ./)
+# install(DIRECTORY run_with_adb.sh DESTINATION ./)
+
+file(GLOB FASTDEPLOY_LIBS ${FASTDEPLOY_INSTALL_DIR}/lib/*)
+install(PROGRAMS ${FASTDEPLOY_LIBS} DESTINATION lib)
+
+file(GLOB OPENCV_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/opencv/lib/lib*)
+install(PROGRAMS ${OPENCV_LIBS} DESTINATION lib)
+
+file(GLOB PADDLELITE_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/paddlelite/lib/lib*)
+install(PROGRAMS ${PADDLELITE_LIBS} DESTINATION lib)
+
+file(GLOB TIMVX_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/paddlelite/lib/verisilicon_timvx/*)
+install(PROGRAMS ${TIMVX_LIBS} DESTINATION lib)
+
+file(GLOB ADB_TOOLS run_with_adb.sh)
+install(PROGRAMS ${ADB_TOOLS} DESTINATION ./)
diff --git a/examples/vision/segmentation/paddleseg/a311d/cpp/README.md b/examples/vision/segmentation/paddleseg/a311d/cpp/README.md
new file mode 100755
index 000000000..872784188
--- /dev/null
+++ b/examples/vision/segmentation/paddleseg/a311d/cpp/README.md
@@ -0,0 +1,54 @@
+# PP-LiteSeg 量化模型 C++ 部署示例
+
+本目录下提供的 `infer.cc`,可以帮助用户快速完成 PP-LiteSeg 量化模型在 A311D 上的部署推理加速。
+
+## 部署准备
+### FastDeploy 交叉编译环境准备
+- 1. 软硬件环境满足要求,以及交叉编译环境的准备,请参考:[FastDeploy 交叉编译环境准备](../../../../../../docs/cn/build_and_install/a311d.md#交叉编译环境搭建)
+
+### 模型准备
+- 1. 用户可以直接使用由 FastDeploy 提供的量化模型进行部署。
+- 2. 用户可以使用 FastDeploy 提供的一键模型自动化压缩工具,自行进行模型量化, 并使用产出的量化模型进行部署.(注意: 推理量化后的分类模型仍然需要FP32模型文件夹下的 deploy.yaml 文件, 自行量化的模型文件夹内不包含此 yaml 文件, 用户从FP32模型文件夹下复制此yaml文件到量化后的模型文件夹内即可.)
+- 更多量化相关相关信息可查阅[模型量化](../../quantize/README.md)
+
+## 在 A311D 上部署量化后的 PP-LiteSeg 分割模型
+请按照以下步骤完成在 A311D 上部署 PP-LiteSeg 量化模型:
+1. 交叉编译编译 FastDeploy 库,具体请参考:[交叉编译 FastDeploy](../../../../../../docs/cn/build_and_install/a311d.md#基于-paddlelite-的-fastdeploy-交叉编译库编译)
+
+2. 将编译后的库拷贝到当前目录,可使用如下命令:
+```bash
+cp -r FastDeploy/build/fastdeploy-tmivx/ FastDeploy/examples/vision/segmentation/paddleseg/a311d/cpp
+```
+
+3. 在当前路径下载部署所需的模型和示例图片:
+```bash
+mkdir models && mkdir images
+wget https://bj.bcebos.com/fastdeploy/models/rk1/ppliteseg.tar.gz
+tar -xvf ppliteseg.tar.gz
+cp -r ppliteseg models
+wget https://paddleseg.bj.bcebos.com/dygraph/demo/cityscapes_demo.png
+cp -r cityscapes_demo.png images
+```
+
+4. 编译部署示例,可使入如下命令:
+```bash
+mkdir build && cd build
+cmake -DCMAKE_TOOLCHAIN_FILE=${PWD}/../fastdeploy-tmivx/toolchain.cmake -DFASTDEPLOY_INSTALL_DIR=${PWD}/../fastdeploy-tmivx -DTARGET_ABI=arm64 ..
+make -j8
+make install
+# 成功编译之后,会生成 install 文件夹,里面有一个运行 demo 和部署所需的库
+```
+
+5. 基于 adb 工具部署 PP-LiteSeg 分割模型到晶晨 A311D,可使用如下命令:
+```bash
+# 进入 install 目录
+cd FastDeploy/examples/vision/segmentation/paddleseg/a311d/cpp/build/install/
+# 如下命令表示:bash run_with_adb.sh 需要运行的demo 模型路径 图片路径 设备的DEVICE_ID
+bash run_with_adb.sh infer_demo ppliteseg cityscapes_demo.png $DEVICE_ID
+```
+
+部署成功后运行结果如下:
+
+
+
+需要特别注意的是,在 A311D 上部署的模型需要是量化后的模型,模型的量化请参考:[模型量化](../../../../../../docs/cn/quantize.md)
diff --git a/examples/vision/segmentation/paddleseg/a311d/cpp/infer.cc b/examples/vision/segmentation/paddleseg/a311d/cpp/infer.cc
new file mode 100755
index 000000000..b6138e8fb
--- /dev/null
+++ b/examples/vision/segmentation/paddleseg/a311d/cpp/infer.cc
@@ -0,0 +1,65 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "fastdeploy/vision.h"
+#ifdef WIN32
+const char sep = '\\';
+#else
+const char sep = '/';
+#endif
+
+void InitAndInfer(const std::string& model_dir, const std::string& image_file) {
+ auto model_file = model_dir + sep + "model.pdmodel";
+ auto params_file = model_dir + sep + "model.pdiparams";
+ auto config_file = model_dir + sep + "deploy.yaml";
+ auto subgraph_file = model_dir + sep + "subgraph.txt";
+
+ fastdeploy::RuntimeOption option;
+ option.UseTimVX();
+ option.SetLiteSubgraphPartitionPath(subgraph_file);
+
+ auto model = fastdeploy::vision::segmentation::PaddleSegModel(
+ model_file, params_file, config_file,option);
+
+ assert(model.Initialized());
+
+ auto im = cv::imread(image_file);
+
+ fastdeploy::vision::SegmentationResult res;
+ if (!model.Predict(im, &res)) {
+ std::cerr << "Failed to predict." << std::endl;
+ return;
+ }
+
+ std::cout << res.Str() << std::endl;
+
+ auto vis_im = fastdeploy::vision::VisSegmentation(im, res, 0.5);
+ cv::imwrite("vis_result.jpg", vis_im);
+ std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
+}
+
+int main(int argc, char* argv[]) {
+ if (argc < 3) {
+ std::cout << "Usage: infer_demo path/to/quant_model "
+ "path/to/image "
+ "e.g ./infer_demo ./ResNet50_vd_quant ./test.jpeg"
+ << std::endl;
+ return -1;
+ }
+
+ std::string model_dir = argv[1];
+ std::string test_image = argv[2];
+ InitAndInfer(model_dir, test_image);
+ return 0;
+}
diff --git a/examples/vision/segmentation/paddleseg/a311d/cpp/run_with_adb.sh b/examples/vision/segmentation/paddleseg/a311d/cpp/run_with_adb.sh
new file mode 100755
index 000000000..aacaed4c5
--- /dev/null
+++ b/examples/vision/segmentation/paddleseg/a311d/cpp/run_with_adb.sh
@@ -0,0 +1,47 @@
+#!/bin/bash
+HOST_SPACE=${PWD}
+echo ${HOST_SPACE}
+WORK_SPACE=/data/local/tmp/test
+
+# The first parameter represents the demo name
+DEMO_NAME=image_classification_demo
+if [ -n "$1" ]; then
+ DEMO_NAME=$1
+fi
+
+# The second parameter represents the model name
+MODEL_NAME=mobilenet_v1_fp32_224
+if [ -n "$2" ]; then
+ MODEL_NAME=$2
+fi
+
+# The third parameter indicates the name of the image to be tested
+IMAGE_NAME=0001.jpg
+if [ -n "$3" ]; then
+ IMAGE_NAME=$3
+fi
+
+# The fourth parameter represents the ID of the device
+ADB_DEVICE_NAME=
+if [ -n "$4" ]; then
+ ADB_DEVICE_NAME="-s $4"
+fi
+
+# Set the environment variables required during the running process
+EXPORT_ENVIRONMENT_VARIABLES="export GLOG_v=5; export VIV_VX_ENABLE_GRAPH_TRANSFORM=-pcq:1; export VIV_VX_SET_PER_CHANNEL_ENTROPY=100; export TIMVX_BATCHNORM_FUSION_MAX_ALLOWED_QUANT_SCALE_DEVIATION=300000; export VSI_NN_LOG_LEVEL=5;"
+
+EXPORT_ENVIRONMENT_VARIABLES="${EXPORT_ENVIRONMENT_VARIABLES}export LD_LIBRARY_PATH=${WORK_SPACE}/lib:\$LD_LIBRARY_PATH;"
+
+# Please install adb, and DON'T run this in the docker.
+set -e
+adb $ADB_DEVICE_NAME shell "rm -rf $WORK_SPACE"
+adb $ADB_DEVICE_NAME shell "mkdir -p $WORK_SPACE"
+
+# Upload the demo, librarys, model and test images to the device
+adb $ADB_DEVICE_NAME push ${HOST_SPACE}/lib $WORK_SPACE
+adb $ADB_DEVICE_NAME push ${HOST_SPACE}/${DEMO_NAME} $WORK_SPACE
+adb $ADB_DEVICE_NAME push models $WORK_SPACE
+adb $ADB_DEVICE_NAME push images $WORK_SPACE
+
+# Execute the deployment demo
+adb $ADB_DEVICE_NAME shell "cd $WORK_SPACE; ${EXPORT_ENVIRONMENT_VARIABLES} chmod +x ./${DEMO_NAME}; ./${DEMO_NAME} ./models/${MODEL_NAME} ./images/$IMAGE_NAME"
diff --git a/examples/vision/segmentation/paddleseg/python/infer.py b/examples/vision/segmentation/paddleseg/python/infer.py
index 866e32bfb..9df7665a2 100644
--- a/examples/vision/segmentation/paddleseg/python/infer.py
+++ b/examples/vision/segmentation/paddleseg/python/infer.py
@@ -49,7 +49,7 @@ model = fd.vision.segmentation.PaddleSegModel(
# 预测图片分割结果
im = cv2.imread(args.image)
-result = model.predict(im.copy())
+result = model.predict(im)
print(result)
# 可视化结果
diff --git a/examples/vision/segmentation/paddleseg/quantize/python/infer.py b/examples/vision/segmentation/paddleseg/quantize/python/infer.py
index f95f04d17..85a875c1e 100644
--- a/examples/vision/segmentation/paddleseg/quantize/python/infer.py
+++ b/examples/vision/segmentation/paddleseg/quantize/python/infer.py
@@ -72,5 +72,5 @@ model = fd.vision.segmentation.PaddleSegModel(
# 预测图片检测结果
im = cv2.imread(args.image)
-result = model.predict(im.copy())
+result = model.predict(im)
print(result)
diff --git a/examples/vision/segmentation/paddleseg/rknpu2/cpp/infer.cc b/examples/vision/segmentation/paddleseg/rknpu2/cpp/infer.cc
index bfc108d05..4e02ae32e 100644
--- a/examples/vision/segmentation/paddleseg/rknpu2/cpp/infer.cc
+++ b/examples/vision/segmentation/paddleseg/rknpu2/cpp/infer.cc
@@ -15,83 +15,84 @@
#include