fix conflicts for ascend

This commit is contained in:
yunyaoXYY
2022-12-19 09:14:04 +00:00
330 changed files with 12347 additions and 981 deletions

View File

@@ -7,9 +7,16 @@ assignees: ''
---
*********************************************
开源不易工程师每天有大量研发工作请直接按此issue模版进行提问
这会大大减少工程师与你确认使用环境,编译过程中的基础信息时间
*********************************************
## 环境
- FastDeploy版本 说明具体的版本如fastdeploy-linux-gpu-0.8.0或自行编译的develop代码附上自行编译的方式及cmake时print的编译选项截图
- FastDeploy版本 说明具体的版本如fastdeploy-linux-gpu-0.8.0
- 如果您是自行编译的FastDeploy请说明您的编译方式参数命令
- 系统平台: Linux x64(Ubuntu 18.04) / Windows x64(Windows10) / Mac OSX arm(12.0) / Mac OSX intel(12.0)
- 硬件: 说明具体硬件型号,如 Nvidia GPU 3080TI CUDA 11.2 CUDNN 8.3
- 编译语言: C++ / Python(3.7或3.8等)
@@ -23,3 +30,10 @@ assignees: ''
- - 先执行`examples`下的部署示例包括使用examples提供的模型确认是否可以正确执行
- - 如若`examples`下的代码可以运行,但自己的模型,或自己的代码不能运行
- - - 提供自己的代码使用方式或自己的模型,供工程师快速定位问题
*********************************************
开源不易工程师每天有大量研发工作请直接按此issue模版进行提问
这会大大减少工程师与你确认使用环境,编译过程中的基础信息时间
*********************************************

View File

@@ -40,7 +40,7 @@ if(NOT MSVC)
add_definitions(-D_GLIBCXX_USE_CXX11_ABI=1)
endif(NOT MSVC)
if(UNIX AND (NOT APPLE) AND (NOT ANDROID) AND (NOT ENABLE_TIMVX))
if(UNIX AND (NOT APPLE) AND (NOT ANDROID) AND (NOT WITH_TIMVX))
include(${PROJECT_SOURCE_DIR}/cmake/patchelf.cmake)
endif()
@@ -67,6 +67,8 @@ option(ENABLE_FLYCV "Whether to enable flycv to boost image preprocess." OFF)
option(ENABLE_TIMVX "Whether to compile for TIMVX deploy." OFF)
option(WITH_CANN "Whether to compile for Huawei Ascend deploy with CANN." OFF)
option(WITH_CANN_PY "Whether to compile for Huawei Ascend deploy with CANN using python." OFF)
option(WITH_TIMVX "Whether to compile for TIMVX deploy." OFF)
option(WITH_XPU "Whether to compile for KunlunXin XPU deploy." OFF)
option(WITH_TESTING "Whether to compile with unittest." OFF)
############################# Options for Android cross compiling #########################
option(WITH_OPENCV_STATIC "Use OpenCV static lib for Android." OFF)
@@ -140,15 +142,20 @@ set(HEAD_DIR "${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}")
include_directories(${HEAD_DIR})
include_directories(${CMAKE_CURRENT_BINARY_DIR})
if (ENABLE_TIMVX)
if (WITH_TIMVX)
include(${PROJECT_SOURCE_DIR}/cmake/timvx.cmake)
endif()
if (WITH_CANN)
if(NOT ${ENABLE_LITE_BACKEND})
message(WARNING "While compiling with -DWITH_CANN=ON, will force to set -DENABLE_LITE_BACKEND=ON")
set(ENABLE_LITE_BACKEND ON)
endif()
if(NOT CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "aarch64")
message(FATAL_ERROR "Huawei Ascend NPU is supported on Linux aarch64 platform for now.")
endif()
if(NOT PADDLELITE_URL)
set(PADDLELITE_URL "https://bj.bcebos.com/fastdeploy/test/lite-linux_arm64_huawei_ascend_npu_1121.tgz")
endif()
endif()
if (WITH_CANN_PY)
@@ -156,17 +163,34 @@ if (WITH_CANN_PY)
if(NOT ${ENABLE_LITE_BACKEND})
set(ENABLE_LITE_BACKEND ON)
endif()
if(NOT CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "aarch64")
message(FATAL_ERROR "Huawei Ascend NPU is supported on Linux aarch64 platform for now.")
endif()
if(NOT PADDLELITE_URL)
set(PADDLELITE_URL "https://bj.bcebos.com/fastdeploy/test/lite-linux_arm64_huawei_ascend_npu_python_1207.tgz")
endif()
execute_process(COMMAND sh -c "ls *.so*" WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/paddlelite/lib
COMMAND sh -c "xargs ${PATCHELF_EXE} --set-rpath '$ORIGIN'" WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/paddlelite/lib
RESULT_VARIABLE result
OUTPUT_VARIABLE curr_out
ERROR_VARIABLE curr_out)
if(ret EQUAL "1")
message(FATAL_ERROR "Failed to patchelf tensorrt libraries.")
message(FATAL_ERROR "Failed to patchelf CANN libraries.")
endif()
message(STATUS "result:${result} out:${curr_out}")
endif()
if (WITH_XPU)
if(NOT ENABLE_LITE_BACKEND)
set(ENABLE_LITE_BACKEND ON)
endif()
if(NOT CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "x86_64")
message(FATAL_ERROR "XPU is only supported on Linux x64 platform")
endif()
if(NOT PADDLELITE_URL)
set(PADDLELITE_URL "https://bj.bcebos.com/fastdeploy/third_libs/lite-linux-x64-xpu-20221215.tgz")
endif()
endif()
if(ANDROID OR IOS)
if(ENABLE_ORT_BACKEND)
@@ -373,15 +397,21 @@ if(ENABLE_TRT_BACKEND)
endif()
if(NOT BUILD_ON_JETSON)
if(NOT TRT_DIRECTORY)
message(FATAL_ERROR "While -DENABLE_TRT_BACKEND=ON, must define -DTRT_DIRECTORY, e.g -DTRT_DIRECTORY=/Downloads/TensorRT-8.4")
set(TRT_INC_DIR /usr/include/x86_64-linux-gnu/)
set(TRT_LIB_DIR /usr/lib/x86_64-linux-gnu/)
endif()
endif()
if(BUILD_ON_JETSON)
set(TRT_INC_DIR /usr/include/aarch64-linux-gnu/)
set(TRT_LIB_DIR /usr/lib/aarch64-linux-gnu/)
if(NOT BUILD_ON_JETSON)
else()
set(TRT_INC_DIR /usr/include/x86_64-linux-gnu/)
set(TRT_LIB_DIR /usr/lib/x86_64-linux-gnu/)
if(TRT_DIRECTORY)
set(TRT_INC_DIR ${TRT_DIRECTORY}/include)
set(TRT_LIB_DIR ${TRT_DIRECTORY}/lib)
endif()
endif()
add_definitions(-DENABLE_TRT_BACKEND)
include_directories(${TRT_INC_DIR})
@@ -393,7 +423,7 @@ if(ENABLE_TRT_BACKEND)
list(APPEND DEPEND_LIBS ${TRT_INFER_LIB} ${TRT_ONNX_LIB} ${TRT_PLUGIN_LIB})
list(APPEND ALL_DEPLOY_SRCS ${DEPLOY_OP_CUDA_KERNEL_SRCS})
if(NOT BUILD_ON_JETSON)
if(NOT BUILD_ON_JETSON AND TRT_DIRECTORY)
if(NOT EXISTS "${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/tensorrt")
file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/tensorrt")
endif()
@@ -614,7 +644,6 @@ install(
DESTINATION ${CMAKE_INSTALL_PREFIX}
)
install(
DIRECTORY ${PROJECT_SOURCE_DIR}/examples
DESTINATION ${CMAKE_INSTALL_PREFIX}

View File

@@ -27,6 +27,7 @@ set(OPENCV_DIRECTORY "@OPENCV_DIRECTORY@")
set(ORT_DIRECTORY "@ORT_DIRECTORY@")
set(OPENVINO_DIRECTORY "@OPENVINO_DIRECTORY@")
set(RKNN2_TARGET_SOC "@RKNN2_TARGET_SOC@")
set(WITH_XPU @WITH_XPU@)
set(FASTDEPLOY_LIBS "")
set(FASTDEPLOY_INCS "")
@@ -140,13 +141,19 @@ if(WITH_GPU)
if (ENABLE_TRT_BACKEND)
if(BUILD_ON_JETSON)
find_library(TRT_INFER_LIB nvinfer /usr/include/aarch64-linux-gnu/)
find_library(TRT_ONNX_LIB nvonnxparser /usr/include/aarch64-linux-gnu/)
find_library(TRT_PLUGIN_LIB nvinfer_plugin /usr/include/aarch64-linux-gnu/)
find_library(TRT_INFER_LIB nvinfer /usr/lib/aarch64-linux-gnu/)
find_library(TRT_ONNX_LIB nvonnxparser /usr/lib/aarch64-linux-gnu/)
find_library(TRT_PLUGIN_LIB nvinfer_plugin /usr/lib/aarch64-linux-gnu/)
else()
if(TRT_DIRECTORY)
find_library(TRT_INFER_LIB nvinfer ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/tensorrt/lib NO_DEFAULT_PATH)
find_library(TRT_ONNX_LIB nvonnxparser ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/tensorrt/lib NO_DEFAULT_PATH)
find_library(TRT_PLUGIN_LIB nvinfer_plugin ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/tensorrt/lib NO_DEFAULT_PATH)
else()
find_library(TRT_INFER_LIB nvinfer /usr/lib/x86_64-linux-gnu/)
find_library(TRT_ONNX_LIB nvonnxparser /usr/lib/x86_64-linux-gnu/)
find_library(TRT_PLUGIN_LIB nvinfer_plugin /usr/lib/x86_64-linux-gnu/)
endif()
endif()
list(APPEND FASTDEPLOY_LIBS ${TRT_INFER_LIB} ${TRT_ONNX_LIB} ${TRT_PLUGIN_LIB})
endif()
@@ -237,6 +244,10 @@ if(ENABLE_PADDLE_FRONTEND)
list(APPEND FASTDEPLOY_LIBS ${PADDLE2ONNX_LIB})
endif()
if(WITH_XPU)
list(APPEND FASTDEPLOY_LIBS -lpthread -lrt -ldl)
endif()
remove_duplicate_libraries(FASTDEPLOY_LIBS)
# Print compiler information
@@ -270,6 +281,7 @@ message(STATUS " ENABLE_VISION : ${ENABLE_VISION}")
message(STATUS " ENABLE_TEXT : ${ENABLE_TEXT}")
if(WITH_GPU)
message(STATUS " CUDA_DIRECTORY : ${CUDA_DIRECTORY}")
message(STATUS " TRT_DIRECTORY : ${TRT_DIRECTORY}")
endif()
if(OPENCV_DIRECTORY)
message(STATUS " OPENCV_DIRECTORY : ${OPENCV_DIRECTORY}")

View File

@@ -1 +1 @@
README_EN.md
README_CN.md

View File

@@ -1,4 +1,4 @@
[English](README_EN.md) | 简体中文
[English](README_EN.md) | 简体中文 | [हिन्दी](./docs/docs_i18n/README_हिन्दी.md) | [日本語](./docs/docs_i18n/README_日本語.md) | [한국인](./docs/docs_i18n/README_한국어.md) | [Pу́сский язы́к](./docs/docs_i18n/README_Ру́сский_язы́к.md)
![FastDeploy](https://user-images.githubusercontent.com/31974251/185771818-5d4423cd-c94c-4a49-9894-bc7a8d1c29d0.png)
@@ -36,16 +36,15 @@
| <img src='https://user-images.githubusercontent.com/54695910/188054671-394db8dd-537c-42b1-9d90-468d7ad1530e.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/48054808/173034825-623e4f78-22a5-4f14-9b83-dc47aa868478.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/54695910/200162475-f5d85d70-18fb-4930-8e7e-9ca065c1d618.gif' height="126px" width="190px"> | <p align="left">**input** 早上好今天是2020<br>/10/29最低温度是-3°C。<br><br> <p align="left">**output**: [<img src="https://user-images.githubusercontent.com/54695910/200161645-871e08da-5a31-4736-879c-a88bb171a676.png" width="170" style="max-width: 100%;">](https://paddlespeech.bj.bcebos.com/Parakeet/docs/demos/parakeet_espnet_fs2_pwg_demo/tn_g2p/parakeet/001.wav)</p> |
## **社区交流**
## 📣 最新消息
* **Slack**Join our [Slack community](https://join.slack.com/t/fastdeployworkspace/shared_invite/zt-1jznah134-3rxY~ytRb8rcPqkn9g~PDg) and chat with other community members about ideas
* **微信**:扫描二维码,填写问卷加入技术社区,与社区开发者探讨部署的痛点与方案
- **💥直播预告2022.12.12 ~ 2022.12.30日每晚20:30联合十家硬件厂商伙伴推出部署月《产业级AI模型部署全攻略》**。微信扫描下方二维码关注公众号并填写问卷后进入官方交流群与行业精英共同探讨AI部署话题。</font>
<div align="center">
<img src="https://user-images.githubusercontent.com/54695910/200145290-d5565d18-6707-4a0b-a9af-85fd36d35d13.jpg" width = "220" height = "220" />
<img src="https://user-images.githubusercontent.com/54695910/207262688-4225bc39-4337-4966-a5cc-26bd6557d226.jpg" width = "150" height = "150" />
</div>
## 目录
* **🖥️ 服务器端部署**
@@ -118,7 +117,7 @@ model = vision.detection.PPYOLOE("ppyoloe_crn_l_300e_coco/model.pdmodel",
"ppyoloe_crn_l_300e_coco/model.pdiparams",
"ppyoloe_crn_l_300e_coco/infer_cfg.yml")
im = cv2.imread("000000014439.jpg")
result = model.predict(im.copy())
result = model.predict(im)
print(result)
vis_im = vision.vis_detection(im, result, score_threshold=0.5)
@@ -163,9 +162,9 @@ int main(int argc, char* argv[]) {
auto im = cv::imread("000000014439.jpg");
vision::DetectionResult res;
model.Predict(&im, &res);
model.Predict(im, &res);
auto vis_im = vision::Visualize::VisDetection(im, res, 0.5);
auto vis_im = vision::VisDetection(im, res, 0.5);
cv::imwrite("vis_image.jpg", vis_im);
return 0;
}
@@ -187,75 +186,75 @@ int main(int argc, char* argv[]) {
<img src="https://user-images.githubusercontent.com/54695910/198619323-c9b1cbce-1c1c-4f92-9737-4805c7c0ff2f.png" />
</div>
| 任务场景 | 模型 | Linux | Linux | Win | Win | Mac | Mac | Linux | Linux | Linux | Linux | Linux |
|:----------------------:|:--------------------------------------------------------------------------------------------:|:------------------------------------------------:|:----------:|:-------:|:----------:|:-------:|:-------:|:-----------:|:---------------:|:-------------:|:-------------:|:-------:|
| --- | --- | X86 CPU | NVIDIA GPU | X86 CPU | NVIDIA GPU | X86 CPU | Arm CPU | AArch64 CPU | 飞腾D2000 aarch64 | NVIDIA Jetson | Graphcore IPU | Serving |
| Classification | [PaddleClas/ResNet50](./examples/vision/classification/paddleclas) | [](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [TorchVison/ResNet](examples/vision/classification/resnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Classification | [ltralytics/YOLOv5Cls](examples/vision/classification/yolov5cls) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Classification | [PaddleClas/PP-LCNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/PP-LCNetv2](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/EfficientNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/GhostNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/MobileNetV1](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/MobileNetV2](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/MobileNetV3](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/ShuffleNetV2](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/SqueeezeNetV1.1](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/Inceptionv3](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Classification | [PaddleClas/PP-HGNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Detection | [PaddleDetection/PP-YOLOE](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/PicoDet](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/YOLOX](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/YOLOv3](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/PP-YOLO](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/PP-YOLOv2](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/Faster-RCNN](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/Mask-RCNN](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [Megvii-BaseDetection/YOLOX](./examples/vision/detection/yolox) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Detection | [WongKinYiu/YOLOv7](./examples/vision/detection/yolov7) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Detection | [WongKinYiu/YOLOv7end2end_trt](./examples/vision/detection/yolov7end2end_trt) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Detection | [WongKinYiu/YOLOv7end2end_ort_](./examples/vision/detection/yolov7end2end_ort) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Detection | [meituan/YOLOv6](./examples/vision/detection/yolov6) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Detection | [ultralytics/YOLOv5](./examples/vision/detection/yolov5) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [WongKinYiu/YOLOR](./examples/vision/detection/yolor) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Detection | [WongKinYiu/ScaledYOLOv4](./examples/vision/detection/scaledyolov4) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Detection | [ppogg/YOLOv5Lite](./examples/vision/detection/yolov5lite) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | ❔ |
| Detection | [RangiLyu/NanoDetPlus](./examples/vision/detection/nanodet_plus) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| KeyPoint | [PaddleDetection/TinyPose](./examples/vision/keypointdetection/tiny_pose) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| KeyPoint | [PaddleDetection/PicoDet + TinyPose](./examples/vision/keypointdetection/det_keypoint_unite) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| HeadPose | [omasaht/headpose](examples/vision/headpose) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Tracking | [PaddleDetection/PP-Tracking](examples/vision/tracking/pptracking) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| OCR | [PaddleOCR/PP-OCRv2](./examples/vision/ocr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| OCR | [PaddleOCR/PP-OCRv3](./examples/vision/ocr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Segmentation | [PaddleSeg/PP-LiteSeg](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Segmentation | [PaddleSeg/PP-HumanSegLite](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Segmentation | [PaddleSeg/HRNet](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Segmentation | [PaddleSeg/PP-HumanSegServer](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Segmentation | [PaddleSeg/Unet](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | | ❔ |
| Segmentation | [PaddleSeg/Deeplabv3](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| FaceDetection | [biubug6/RetinaFace](./examples/vision/facedet/retinaface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceDetection | [Linzaer/UltraFace](./examples/vision/facedet/ultraface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceDetection | [deepcam-cn/YOLOv5Face](./examples/vision/facedet/yolov5face) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceDetection | [insightface/SCRFD](./examples/vision/facedet/scrfd) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceAlign | [Hsintao/PFLD](examples/vision/facealign/pfld) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceAlign | [Single430FaceLandmark1000](./examples/vision/facealign/face_landmark_1000) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| FaceAlign | [jhb86253817/PIPNet](./examples/vision/facealign) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| FaceRecognition | [insightface/ArcFace](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceRecognition | [insightface/CosFace](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceRecognition | [insightface/PartialFC](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceRecognition | [insightface/VPL](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Matting | [ZHKKKe/MODNet](./examples/vision/matting/modnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Matting | [PeterL1n/RobustVideoMatting]() | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Matting | [PaddleSeg/PP-Matting](./examples/vision/matting/ppmatting) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Matting | [PaddleSeg/PP-HumanMatting](./examples/vision/matting/modnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Matting | [PaddleSeg/ModNet](./examples/vision/matting/modnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Video Super-Resolution | [PaddleGAN/BasicVSR](./) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Video Super-Resolution | [PaddleGAN/EDVR](./examples/vision/sr/edvr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Video Super-Resolution | [PaddleGAN/PP-MSVSR](./examples/vision/sr/ppmsvsr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Information Extraction | [PaddleNLP/UIE](./examples/text/uie) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | |
| NLP | [PaddleNLP/ERNIE-3.0](./examples/text/ernie-3.0) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ | ✅ |
| Speech | [PaddleSpeech/PP-TTS](./examples/audio/pp-tts) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | -- | ✅ |
| 任务场景 | 模型 | Linux | Linux | Win | Win | Mac | Mac | Linux | Linux | Linux | Linux | Linux | Linux |
|:----------------------:|:--------------------------------------------------------------------------------------------:|:------------------------------------------------:|:----------:|:-------:|:----------:|:-------:|:-------:|:-----------:|:---------------:|:-------------:|:-------------:|:-------:|:-------:|
| --- | --- | X86 CPU | NVIDIA GPU | X86 CPU | NVIDIA GPU | X86 CPU | Arm CPU | AArch64 CPU | 飞腾D2000 aarch64 | NVIDIA Jetson | Graphcore IPU | KunlunXin XPU | Serving |
| Classification | [PaddleClas/ResNet50](./examples/vision/classification/paddleclas) | [](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [TorchVison/ResNet](examples/vision/classification/resnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ |
| Classification | [ltralytics/YOLOv5Cls](examples/vision/classification/yolov5cls) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ |
| Classification | [PaddleClas/PP-LCNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/PP-LCNetv2](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/EfficientNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/GhostNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/MobileNetV1](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/MobileNetV2](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/MobileNetV3](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/ShuffleNetV2](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/SqueeezeNetV1.1](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/Inceptionv3](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
| Classification | [PaddleClas/PP-HGNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Detection | [PaddleDetection/PP-YOLOE](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
| Detection | [PaddleDetection/PicoDet](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
| Detection | [PaddleDetection/YOLOX](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
| Detection | [PaddleDetection/YOLOv3](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
| Detection | [PaddleDetection/PP-YOLO](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
| Detection | [PaddleDetection/PP-YOLOv2](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
| Detection | [PaddleDetection/Faster-RCNN](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
| Detection | [PaddleDetection/Mask-RCNN](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
| Detection | [Megvii-BaseDetection/YOLOX](./examples/vision/detection/yolox) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ |
| Detection | [WongKinYiu/YOLOv7](./examples/vision/detection/yolov7) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ |
| Detection | [WongKinYiu/YOLOv7end2end_trt](./examples/vision/detection/yolov7end2end_trt) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | ❔ |
| Detection | [WongKinYiu/YOLOv7end2end_ort_](./examples/vision/detection/yolov7end2end_ort) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
| Detection | [meituan/YOLOv6](./examples/vision/detection/yolov6) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ |
| Detection | [ultralytics/YOLOv5](./examples/vision/detection/yolov5) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
| Detection | [WongKinYiu/YOLOR](./examples/vision/detection/yolor) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | ❔ |
| Detection | [WongKinYiu/ScaledYOLOv4](./examples/vision/detection/scaledyolov4) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
| Detection | [ppogg/YOLOv5Lite](./examples/vision/detection/yolov5lite) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ? | ❔ |❔ |
| Detection | [RangiLyu/NanoDetPlus](./examples/vision/detection/nanodet_plus) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
| KeyPoint | [PaddleDetection/TinyPose](./examples/vision/keypointdetection/tiny_pose) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ |
| KeyPoint | [PaddleDetection/PicoDet + TinyPose](./examples/vision/keypointdetection/det_keypoint_unite) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ |
| HeadPose | [omasaht/headpose](examples/vision/headpose) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | ❔ |
| Tracking | [PaddleDetection/PP-Tracking](examples/vision/tracking/pptracking) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
| OCR | [PaddleOCR/PP-OCRv2](./examples/vision/ocr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ✅ | ❔ |
| OCR | [PaddleOCR/PP-OCRv3](./examples/vision/ocr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
| Segmentation | [PaddleSeg/PP-LiteSeg](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ✅ | ❔ |
| Segmentation | [PaddleSeg/PP-HumanSegLite](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ✅ | ❔ |
| Segmentation | [PaddleSeg/HRNet](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ✅ | ❔ |
| Segmentation | [PaddleSeg/PP-HumanSegServer](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ✅ | ❔ |
| Segmentation | [PaddleSeg/Unet](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | | ✅ | ❔ |
| Segmentation | [PaddleSeg/Deeplabv3](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ✅ | ❔ |
| FaceDetection | [biubug6/RetinaFace](./examples/vision/facedet/retinaface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
| FaceDetection | [Linzaer/UltraFace](./examples/vision/facedet/ultraface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
| FaceDetection | [deepcam-cn/YOLOv5Face](./examples/vision/facedet/yolov5face) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
| FaceDetection | [insightface/SCRFD](./examples/vision/facedet/scrfd) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
| FaceAlign | [Hsintao/PFLD](examples/vision/facealign/pfld) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |❔ |
| FaceAlign | [Single430/FaceLandmark1000](./examples/vision/facealign/face_landmark_1000) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | ❔ |
| FaceAlign | [jhb86253817/PIPNet](./examples/vision/facealign) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |❔ |
| FaceRecognition | [insightface/ArcFace](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
| FaceRecognition | [insightface/CosFace](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
| FaceRecognition | [insightface/PartialFC](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
| FaceRecognition | [insightface/VPL](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
| Matting | [ZHKKKe/MODNet](./examples/vision/matting/modnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |❔ |
| Matting | [PeterL1n/RobustVideoMatting]() | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | ❔ |
| Matting | [PaddleSeg/PP-Matting](./examples/vision/matting/ppmatting) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ |
| Matting | [PaddleSeg/PP-HumanMatting](./examples/vision/matting/modnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |❔ |
| Matting | [PaddleSeg/ModNet](./examples/vision/matting/modnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
| Video Super-Resolution | [PaddleGAN/BasicVSR](./) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | ❔ |
| Video Super-Resolution | [PaddleGAN/EDVR](./examples/vision/sr/edvr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | ❔ |
| Video Super-Resolution | [PaddleGAN/PP-MSVSR](./examples/vision/sr/ppmsvsr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | ❔ |
| Information Extraction | [PaddleNLP/UIE](./examples/text/uie) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | |
| NLP | [PaddleNLP/ERNIE-3.0](./examples/text/ernie-3.0) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ | ✅ | ✅ |
| Speech | [PaddleSpeech/PP-TTS](./examples/audio/pp-tts) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | -- |❔ | ✅ |
</div></details>
@@ -322,6 +321,16 @@ int main(int argc, char* argv[]) {
</div></details>
## **社区交流**
* **Slack**Join our [Slack community](https://join.slack.com/t/fastdeployworkspace/shared_invite/zt-1jznah134-3rxY~ytRb8rcPqkn9g~PDg) and chat with other community members about ideas
* **微信**:扫描二维码,填写问卷加入技术社区,与社区开发者探讨部署的痛点与方案
<div align="center">
<img src="https://user-images.githubusercontent.com/54695910/200145290-d5565d18-6707-4a0b-a9af-85fd36d35d13.jpg" width = "220" height = "220" />
</div>
<div id="fastdeploy-acknowledge"></div>

View File

@@ -1,4 +1,5 @@
English | [简体中文](README_CN.md)
English | [简体中文](README_CN.md) | [हिन्दी](./docs/docs_i18n/README_हिन्दी.md) | [日本語](./docs/docs_i18n/README_日本語.md) | [한국인](./docs/docs_i18n/README_한국어.md) | [Pу́сский язы́к](./docs/docs_i18n/README_Ру́сский_язы́к.md)
![FastDeploy](https://user-images.githubusercontent.com/31974251/185771818-5d4423cd-c94c-4a49-9894-bc7a8d1c29d0.png)
@@ -36,14 +37,14 @@ Including image classification, object detection, image segmentation, face detec
| [**Pose Estimation**](examples/vision/keypointdetection) | [**Behavior Recognition**](https://github.com/PaddlePaddle/FastDeploy/issues/6) | [**NLP**](examples/text) | [**Speech**](examples/audio/pp-tts) |
| <img src='https://user-images.githubusercontent.com/54695910/188054671-394db8dd-537c-42b1-9d90-468d7ad1530e.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/48054808/173034825-623e4f78-22a5-4f14-9b83-dc47aa868478.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/54695910/200162475-f5d85d70-18fb-4930-8e7e-9ca065c1d618.gif' height="126px" width="190px"> | <p align="left">**input**:Life was like a box<br> of chocolates, you never<br> know what you're <br>gonna get.<br> <p align="left">**output**: [<img src="https://user-images.githubusercontent.com/54695910/200161645-871e08da-5a31-4736-879c-a88bb171a676.png" width="150" style="max-width: 100%;">](https://paddlespeech.bj.bcebos.com/Parakeet/docs/demos/tacotron2_ljspeech_waveflow_samples_0.2/sentence_1.wav)</p> |
## 👬 Community
- **Slack**Join our [Slack community](https://join.slack.com/t/fastdeployworkspace/shared_invite/zt-1jznah134-3rxY~ytRb8rcPqkn9g~PDg) and chat with other community members about ideas.
- **WeChat**Scan the QR code below using WeChat, follow the PaddlePaddle official account and fill out the questionnaire to join the WeChat group.
## 📣 Recent updates
- 💥 **Live Preview: Dec 12 - Dec 30, China Standard Time, 20:30,** FastDeploy has joined hands with ten hardware vendor partners to launch the deployment month `The Complete Guide to Deploying Industrial Grade AI Models`.
- Scan the QR code below using WeChat, follow the PaddlePaddle official account and fill out the questionnaire to join the WeChat group
<div align="center">
<img src="https://user-images.githubusercontent.com/54695910/200145290-d5565d18-6707-4a0b-a9af-85fd36d35d13.jpg" width = "200" height = "200" />
<img src="https://user-images.githubusercontent.com/54695910/207262688-4225bc39-4337-4966-a5cc-26bd6557d226.jpg" width = "150" height = "150" />
</div>
## Contents
@@ -185,75 +186,75 @@ Notes: ✅: already supported; ❔: to be supported in the future; N/A: Not Ava
<img src="https://user-images.githubusercontent.com/54695910/198620704-741523c1-dec7-44e5-9f2b-29ddd9997344.png" />
</div>
| Task | Model | Linux | Linux | Win | Win | Mac | Mac | Linux | Linux | Linux | Linux | Linux |
|:----------------------:|:--------------------------------------------------------------------------------------------:|:------------------------------------------------:|:------------------------:|:------------------------:|:------------------------:|:-----------------------:|:---------------------:|:--------------------------:| :----------------: |:---------------------------:|:---------------------------:|:-------:|
| --- | --- | <font size=2> X86 CPU | <font size=2> NVIDIA GPU | <font size=2> Intel CPU | <font size=2> NVIDIA GPU | <font size=2> Intel CPU | <font size=2> Arm CPU | <font size=2> AArch64 CPU | Phytium D2000CPU | <font size=2> NVIDIA Jetson | <font size=2> Graphcore IPU | Serving |
| Classification | [PaddleClas/ResNet50](./examples/vision/classification/paddleclas) | [](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [TorchVison/ResNet](examples/vision/classification/resnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Classification | [ltralytics/YOLOv5Cls](examples/vision/classification/yolov5cls) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Classification | [PaddleClas/PP-LCNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/PP-LCNetv2](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/EfficientNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/GhostNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/MobileNetV1](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/MobileNetV2](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/MobileNetV3](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/ShuffleNetV2](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/SqueeezeNetV1.1](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/Inceptionv3](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Classification | [PaddleClas/PP-HGNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Detection | [PaddleDetection/PP-YOLOE](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/PicoDet](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/YOLOX](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/YOLOv3](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/PP-YOLO](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/PP-YOLOv2](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/Faster-RCNN](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/Mask-RCNN](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [Megvii-BaseDetection/YOLOX](./examples/vision/detection/yolox) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Detection | [WongKinYiu/YOLOv7](./examples/vision/detection/yolov7) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Detection | [WongKinYiu/YOLOv7end2end_trt](./examples/vision/detection/yolov7end2end_trt) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Detection | [WongKinYiu/YOLOv7end2end_ort_](./examples/vision/detection/yolov7end2end_ort) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Detection | [meituan/YOLOv6](./examples/vision/detection/yolov6) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Detection | [ultralytics/YOLOv5](./examples/vision/detection/yolov5) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [WongKinYiu/YOLOR](./examples/vision/detection/yolor) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Detection | [WongKinYiu/ScaledYOLOv4](./examples/vision/detection/scaledyolov4) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Detection | [ppogg/YOLOv5Lite](./examples/vision/detection/yolov5lite) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Detection | [RangiLyu/NanoDetPlus](./examples/vision/detection/nanodet_plus) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| KeyPoint | [PaddleDetection/TinyPose](./examples/vision/keypointdetection/tiny_pose) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| KeyPoint | [PaddleDetection/PicoDet + TinyPose](./examples/vision/keypointdetection/det_keypoint_unite) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| HeadPose | [omasaht/headpose](examples/vision/headpose) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Tracking | [PaddleDetection/PP-Tracking](examples/vision/tracking/pptracking) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| OCR | [PaddleOCR/PP-OCRv2](./examples/vision/ocr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| OCR | [PaddleOCR/PP-OCRv3](./examples/vision/ocr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Segmentation | [PaddleSeg/PP-LiteSeg](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Segmentation | [PaddleSeg/PP-HumanSegLite](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Segmentation | [PaddleSeg/HRNet](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Segmentation | [PaddleSeg/PP-HumanSegServer](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Segmentation | [PaddleSeg/Unet](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Segmentation | [PaddleSeg/Deeplabv3](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| FaceDetection | [biubug6/RetinaFace](./examples/vision/facedet/retinaface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceDetection | [Linzaer/UltraFace](./examples/vision/facedet/ultraface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceDetection | [deepcam-cn/YOLOv5Face](./examples/vision/facedet/yolov5face) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceDetection | [insightface/SCRFD](./examples/vision/facedet/scrfd) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceAlign | [Hsintao/PFLD](examples/vision/facealign/pfld) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceAlign | [Single430FaceLandmark1000](./examples/vision/facealign/face_landmark_1000) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| FaceAlign | [jhb86253817/PIPNet](./examples/vision/facealign) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| FaceRecognition | [insightface/ArcFace](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceRecognition | [insightface/CosFace](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceRecognition | [insightface/PartialFC](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceRecognition | [insightface/VPL](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Matting | [ZHKKKe/MODNet](./examples/vision/matting/modnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Matting | [PeterL1n/RobustVideoMatting]() | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Matting | [PaddleSeg/PP-Matting](./examples/vision/matting/ppmatting) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Matting | [PaddleSeg/PP-HumanMatting](./examples/vision/matting/modnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Matting | [PaddleSeg/ModNet](./examples/vision/matting/modnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Video Super-Resolution | [PaddleGAN/BasicVSR](./) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Video Super-Resolution | [PaddleGAN/EDVR](./examples/vision/sr/edvr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Video Super-Resolution | [PaddleGAN/PP-MSVSR](./examples/vision/sr/ppmsvsr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Information Extraction | [PaddleNLP/UIE](./examples/text/uie) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | |
| NLP | [PaddleNLP/ERNIE-3.0](./examples/text/ernie-3.0) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ | ✅ |
| Speech | [PaddleSpeech/PP-TTS](./examples/audio/pp-tts) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | -- | ✅ |
| Task | Model | Linux | Linux | Win | Win | Mac | Mac | Linux | Linux | Linux | Linux | Linux | Linux |
|:----------------------:|:--------------------------------------------------------------------------------------------:|:------------------------------------------------:|:------------------------:|:------------------------:|:------------------------:|:-----------------------:|:---------------------:|:--------------------------:| :----------------: |:---------------------------:|:---------------------------:|:-------:|:-------:|
| --- | --- | <font size=2> X86 CPU | <font size=2> NVIDIA GPU | <font size=2> Intel CPU | <font size=2> NVIDIA GPU | <font size=2> Intel CPU | <font size=2> Arm CPU | <font size=2> AArch64 CPU | Phytium D2000CPU | <font size=2> NVIDIA Jetson | <font size=2> Graphcore IPU | KunlunXin XPU |Serving |
| Classification | [PaddleClas/ResNet50](./examples/vision/classification/paddleclas) | [](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [TorchVison/ResNet](examples/vision/classification/resnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ |
| Classification | [ltralytics/YOLOv5Cls](examples/vision/classification/yolov5cls) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ |
| Classification | [PaddleClas/PP-LCNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/PP-LCNetv2](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/EfficientNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/GhostNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/MobileNetV1](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/MobileNetV2](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/MobileNetV3](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/ShuffleNetV2](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/SqueeezeNetV1.1](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/Inceptionv3](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
| Classification | [PaddleClas/PP-HGNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Detection | [PaddleDetection/PP-YOLOE](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
| Detection | [PaddleDetection/PicoDet](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
| Detection | [PaddleDetection/YOLOX](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
| Detection | [PaddleDetection/YOLOv3](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
| Detection | [PaddleDetection/PP-YOLO](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
| Detection | [PaddleDetection/PP-YOLOv2](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
| Detection | [PaddleDetection/Faster-RCNN](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
| Detection | [PaddleDetection/Mask-RCNN](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
| Detection | [Megvii-BaseDetection/YOLOX](./examples/vision/detection/yolox) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ |
| Detection | [WongKinYiu/YOLOv7](./examples/vision/detection/yolov7) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ |
| Detection | [WongKinYiu/YOLOv7end2end_trt](./examples/vision/detection/yolov7end2end_trt) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | ❔ |
| Detection | [WongKinYiu/YOLOv7end2end_ort_](./examples/vision/detection/yolov7end2end_ort) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
| Detection | [meituan/YOLOv6](./examples/vision/detection/yolov6) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ |
| Detection | [ultralytics/YOLOv5](./examples/vision/detection/yolov5) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
| Detection | [WongKinYiu/YOLOR](./examples/vision/detection/yolor) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | ❔ |
| Detection | [WongKinYiu/ScaledYOLOv4](./examples/vision/detection/scaledyolov4) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
| Detection | [ppogg/YOLOv5Lite](./examples/vision/detection/yolov5lite) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
| Detection | [RangiLyu/NanoDetPlus](./examples/vision/detection/nanodet_plus) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
| KeyPoint | [PaddleDetection/TinyPose](./examples/vision/keypointdetection/tiny_pose) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ |
| KeyPoint | [PaddleDetection/PicoDet + TinyPose](./examples/vision/keypointdetection/det_keypoint_unite) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ |
| HeadPose | [omasaht/headpose](examples/vision/headpose) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | ❔ |
| Tracking | [PaddleDetection/PP-Tracking](examples/vision/tracking/pptracking) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
| OCR | [PaddleOCR/PP-OCRv2](./examples/vision/ocr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ✅ | ❔ |
| OCR | [PaddleOCR/PP-OCRv3](./examples/vision/ocr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ✅ |
| Segmentation | [PaddleSeg/PP-LiteSeg](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ✅ | ❔ |
| Segmentation | [PaddleSeg/PP-HumanSegLite](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ✅ | ❔ |
| Segmentation | [PaddleSeg/HRNet](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ✅ | ❔ |
| Segmentation | [PaddleSeg/PP-HumanSegServer](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ✅ | ❔ |
| Segmentation | [PaddleSeg/Unet](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ✅ | ❔ |
| Segmentation | [PaddleSeg/Deeplabv3](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ✅ | ❔ |
| FaceDetection | [biubug6/RetinaFace](./examples/vision/facedet/retinaface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
| FaceDetection | [Linzaer/UltraFace](./examples/vision/facedet/ultraface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
| FaceDetection | [deepcam-cn/YOLOv5Face](./examples/vision/facedet/yolov5face) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
| FaceDetection | [insightface/SCRFD](./examples/vision/facedet/scrfd) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
| FaceAlign | [Hsintao/PFLD](examples/vision/facealign/pfld) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
| FaceAlign | [Single430FaceLandmark1000](./examples/vision/facealign/face_landmark_1000) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | ❔ |
| FaceAlign | [jhb86253817/PIPNet](./examples/vision/facealign) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | ❔ |
| FaceRecognition | [insightface/ArcFace](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
| FaceRecognition | [insightface/CosFace](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
| FaceRecognition | [insightface/PartialFC](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
| FaceRecognition | [insightface/VPL](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
| Matting | [ZHKKKe/MODNet](./examples/vision/matting/modnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | ❔ |
| Matting | [PeterL1n/RobustVideoMatting]() | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | ❔ |
| Matting | [PaddleSeg/PP-Matting](./examples/vision/matting/ppmatting) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ |
| Matting | [PaddleSeg/PP-HumanMatting](./examples/vision/matting/modnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ |
| Matting | [PaddleSeg/ModNet](./examples/vision/matting/modnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ |
| Video Super-Resolution | [PaddleGAN/BasicVSR](./) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | ❔ |
| Video Super-Resolution | [PaddleGAN/EDVR](./examples/vision/sr/edvr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | ❔ |
| Video Super-Resolution | [PaddleGAN/PP-MSVSR](./examples/vision/sr/ppmsvsr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | ❔ |
| Information Extraction | [PaddleNLP/UIE](./examples/text/uie) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | |
| NLP | [PaddleNLP/ERNIE-3.0](./examples/text/ernie-3.0) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ | ✅ | ✅ |
| Speech | [PaddleSpeech/PP-TTS](./examples/audio/pp-tts) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | -- | ❔ | ✅ |
</div></details>
@@ -319,6 +320,16 @@ Notes: ✅: already supported; ❔: to be supported in the future; N/A: Not Ava
</div></details>
## 👬 Community
- **Slack**Join our [Slack community](https://join.slack.com/t/fastdeployworkspace/shared_invite/zt-1jznah134-3rxY~ytRb8rcPqkn9g~PDg) and chat with other community members about ideas.
- **WeChat**Scan the QR code below using WeChat, follow the PaddlePaddle official account and fill out the questionnaire to join the WeChat group.
<div align="center">
<img src="https://user-images.githubusercontent.com/54695910/200145290-d5565d18-6707-4a0b-a9af-85fd36d35d13.jpg" width = "200" height = "200" />
</div>
## Acknowledge

View File

@@ -92,6 +92,7 @@ def build_option(args):
elif backend in ["trt", "paddle_trt"]:
option.use_trt_backend()
if backend == "paddle_trt":
option.enable_paddle_trt_collect_shape()
option.enable_paddle_to_trt()
if enable_trt_fp16:
option.enable_trt_fp16()
@@ -267,7 +268,6 @@ if __name__ == '__main__':
f.writelines("===={}====: \n".format(os.path.split(file_path)[-1][:-4]))
try:
rec_option = option
if "OCRv2" in args.model_dir:
det_option = option
if args.backend in ["trt", "paddle_trt"]:
@@ -293,16 +293,19 @@ if __name__ == '__main__':
model = fd.vision.ocr.PPOCRv2(
det_model=det_model, cls_model=cls_model, rec_model=rec_model)
elif "OCRv3" in args.model_dir:
det_option = option
if args.backend in ["trt", "paddle_trt"]:
det_option.set_trt_input_shape(
"x", [1, 3, 64, 64], [1, 3, 640, 640], [1, 3, 960, 960])
det_model = fd.vision.ocr.DBDetector(
det_model_file, det_params_file, runtime_option=det_option)
cls_option = option
if args.backend in ["trt", "paddle_trt"]:
cls_option.set_trt_input_shape(
"x", [1, 3, 48, 10], [10, 3, 48, 320], [64, 3, 48, 1024])
cls_model = fd.vision.ocr.Classifier(
cls_model_file, cls_params_file, runtime_option=cls_option)
rec_option = option
if args.backend in ["trt", "paddle_trt"]:
rec_option.set_trt_input_shape(
"x", [1, 3, 48, 10], [10, 3, 48, 320], [64, 3, 48, 2304])

View File

@@ -70,10 +70,8 @@ for i in range(line_nums):
cpu_rss_mb_list = cpu_rss_mb_ori.split(".")
cpu_rss_mb = cpu_rss_mb_list[0] + "." + cpu_rss_mb_list[1][:2]
if "gpu_rss_mb" in lines[i + 4]:
gpu_rss_mb_ori = lines[i + 4].split(": ")[1]
# two decimal places
gpu_rss_mb_list = gpu_rss_mb_ori.split(".")
gpu_rss_mb = gpu_rss_mb_list[0] + "." + gpu_rss_mb_list[1][:2]
gpu_rss_mb_ori = lines[i + 4].split(": ")[1].strip()
gpu_rss_mb = str(gpu_rss_mb_ori) + ".0"
if "ort_cpu_1" in lines[i]:
ort_cpu_thread1[
model_name] = runtime + "\t" + end2end + "\t" + cpu_rss_mb
@@ -111,7 +109,7 @@ for i in range(line_nums):
f2 = open("struct_cpu_" + domain + ".txt", "w")
f2.writelines(
"model_name\tthread_nums\tort_run\tort_end2end\tcpu_rss_mb\tov_run\tov_end2end\tcpu_rss_mb\tpaddle_run\tpaddle_end2end\tcpu_rss_mb\n"
"model_name\tthread_nums\tort_run\tort_end2end\tcpu_mem\tov_run\tov_end2end\tcpu_mem\tpaddle_run\tpaddle_end2end\tcpu_mem\n"
)
for model_name in model_name_set:
lines1 = model_name + '\t1\t'
@@ -148,7 +146,7 @@ f2.close()
f3 = open("struct_gpu_" + domain + ".txt", "w")
f3.writelines(
"model_name\tort_run\tort_end2end\tgpu_rss_mb\tpaddle_run\tpaddle_end2end\tgpu_rss_mb\tpaddle_trt_run\tpaddle_trt_end2end\tgpu_rss_mb\tpaddle_trt_fp16_run\tpaddle_trt_fp16_end2end\tgpu_rss_mb\ttrt_run\ttrt_end2end\tgpu_rss_mb\ttrt_fp16_run\ttrt_fp16_end2end\tgpu_rss_mb\n"
"model_name\tort_run\tort_end2end\tgpu_mem\tpaddle_run\tpaddle_end2end\tgpu_mem\tpaddle_trt_run\tpaddle_trt_end2end\tgpu_mem\tpaddle_trt_fp16_run\tpaddle_trt_fp16_end2end\tgpu_mem\ttrt_run\ttrt_end2end\tgpu_mem\ttrt_fp16_run\ttrt_fp16_end2end\tgpu_mem\n"
)
for model_name in model_name_set:
lines1 = model_name + '\t'

View File

@@ -14,9 +14,6 @@
INCLUDE(ExternalProject)
if(NOT GIT_URL)
SET(GIT_URL "https://github.com")
endif()
if(THIRD_PARTY_PATH)
SET(GFLAGS_PREFIX_DIR ${THIRD_PARTY_PATH}/gflags)
SET(GFLAGS_INSTALL_DIR ${THIRD_PARTY_PATH}/install/gflags)
@@ -26,8 +23,16 @@ else()
SET(GFLAGS_INSTALL_DIR ${FASTDEPLOY_INSTALL_DIR}/installed_fastdeploy/cmake/gflags)
endif()
SET(GFLAGS_INCLUDE_DIR "${GFLAGS_INSTALL_DIR}/include" CACHE PATH "gflags include directory." FORCE)
set(GFLAGS_REPOSITORY ${GIT_URL}/gflags/gflags.git)
set(GFLAGS_TAG "v2.2.2")
set(GFLAGS_SOURCE_FILE ${GFLAGS_PREFIX_DIR}/src/gflags.tgz CACHE PATH "gflags source file." FORCE)
set(GFLAGS_URL_PREFIX "https://bj.bcebos.com/fastdeploy/third_libs")
set(GFLAGS_URL ${GFLAGS_URL_PREFIX}/gflags.tgz)
set(GFLAGS_CACHE_FILE ${CMAKE_CURRENT_LIST_DIR}/gflags.tgz)
if(EXISTS ${GFLAGS_CACHE_FILE})
set(GFLAGS_URL ${GFLAGS_CACHE_FILE} CACHE PATH "gflags cache file." FORCE)
set(GFLAGS_SOURCE_FILE ${GFLAGS_CACHE_FILE} CACHE PATH "gflags source file." FORCE)
endif()
IF(WIN32)
set(GFLAGS_LIBRARIES "${GFLAGS_INSTALL_DIR}/lib/gflags_static.lib" CACHE FILEPATH "GFLAGS_LIBRARIES" FORCE)
ELSE(WIN32)
@@ -42,9 +47,7 @@ if(ANDROID)
ExternalProject_Add(
extern_gflags
${EXTERNAL_PROJECT_LOG_ARGS}
${SHALLOW_CLONE}
GIT_REPOSITORY ${GFLAGS_REPOSITORY}
GIT_TAG ${GFLAGS_TAG}
URL ${GFLAGS_URL}
PREFIX ${GFLAGS_PREFIX_DIR}
UPDATE_COMMAND ""
BUILD_COMMAND ${BUILD_COMMAND}
@@ -76,9 +79,7 @@ else()
ExternalProject_Add(
extern_gflags
${EXTERNAL_PROJECT_LOG_ARGS}
${SHALLOW_CLONE}
GIT_REPOSITORY ${GFLAGS_REPOSITORY}
GIT_TAG ${GFLAGS_TAG}
URL ${GFLAGS_URL}
PREFIX ${GFLAGS_PREFIX_DIR}
UPDATE_COMMAND ""
BUILD_COMMAND ${BUILD_COMMAND}
@@ -107,11 +108,16 @@ ADD_LIBRARY(gflags STATIC IMPORTED GLOBAL)
SET_PROPERTY(TARGET gflags PROPERTY IMPORTED_LOCATION ${GFLAGS_LIBRARIES})
ADD_DEPENDENCIES(gflags extern_gflags)
if(UNIX AND (NOT APPLE) AND (NOT ANDROID))
list(APPEND GFLAGS_LIBRARIES pthread)
endif()
# On Windows (including MinGW), the Shlwapi library is used by gflags if available.
if (WIN32)
include(CheckIncludeFileCXX)
check_include_file_cxx("shlwapi.h" HAVE_SHLWAPI)
if (HAVE_SHLWAPI)
set_property(GLOBAL PROPERTY OS_DEPENDENCY_MODULES shlwapi.lib)
list(APPEND GFLAGS_LIBRARIES shlwapi.lib)
endif(HAVE_SHLWAPI)
endif (WIN32)

View File

@@ -41,12 +41,6 @@ elseif(IOS)
else()
if(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "aarch64")
set(OPENCV_FILENAME "opencv-linux-aarch64-3.4.14")
else()
if(ENABLE_TIMVX)
set(OPENCV_FILENAME "opencv-armv7hf")
else()
set(OPENCV_FILENAME "opencv-linux-x64-3.4.16")
endif()
endif()
if(ENABLE_OPENCV_CUDA)
if(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "aarch64")
@@ -56,15 +50,20 @@ else()
endif()
endif()
if(NOT OPENCV_FILENAME)
set(OPENCV_FILENAME "opencv-linux-x64-3.4.16")
endif()
set(OPENCV_INSTALL_DIR ${THIRD_PARTY_PATH}/install/)
if(ANDROID)
set(OPENCV_URL_PREFIX "https://bj.bcebos.com/fastdeploy/third_libs")
elseif(ENABLE_TIMVX)
set(OPENCV_URL_PREFIX "https://bj.bcebos.com/fastdeploy/test")
else() # TODO: use fastdeploy/third_libs instead.
set(OPENCV_URL_PREFIX "https://bj.bcebos.com/paddle2onnx/libs")
endif()
if(NOT OPENCV_URL)
set(OPENCV_URL ${OPENCV_URL_PREFIX}/${OPENCV_FILENAME}${COMPRESSED_SUFFIX})
endif()
if(BUILD_ON_JETSON)
if(EXISTS /usr/lib/aarch64-linux-gnu/cmake/opencv4/)
@@ -186,9 +185,8 @@ else()
endif()
file(RENAME ${THIRD_PARTY_PATH}/install/${OPENCV_FILENAME}/ ${THIRD_PARTY_PATH}/install/opencv)
set(OPENCV_FILENAME opencv)
if(NOT OpenCV_DIR)
set(OpenCV_DIR ${THIRD_PARTY_PATH}/install/${OPENCV_FILENAME})
if(ENABLE_TIMVX)
set(OpenCV_DIR ${OpenCV_DIR}/lib/cmake/opencv4)
endif()
if (WIN32)
set(OpenCV_DIR ${OpenCV_DIR}/build)

View File

@@ -13,6 +13,8 @@
# limitations under the License.
include(ExternalProject)
option(PADDLEINFERENCE_DIRECTORY "Directory of Paddle Inference library" OFF)
set(PADDLEINFERENCE_PROJECT "extern_paddle_inference")
set(PADDLEINFERENCE_PREFIX_DIR ${THIRD_PARTY_PATH}/paddle_inference)
set(PADDLEINFERENCE_SOURCE_DIR
@@ -27,6 +29,10 @@ set(PADDLEINFERENCE_LIB_DIR
set(CMAKE_BUILD_RPATH "${CMAKE_BUILD_RPATH}"
"${PADDLEINFERENCE_LIB_DIR}")
if(PADDLEINFERENCE_DIRECTORY)
set(PADDLEINFERENCE_INC_DIR ${PADDLEINFERENCE_DIRECTORY}/paddle/include)
endif()
include_directories(${PADDLEINFERENCE_INC_DIR})
if(WIN32)
set(PADDLEINFERENCE_COMPILE_LIB
@@ -47,6 +53,14 @@ else()
endif(WIN32)
if(PADDLEINFERENCE_DIRECTORY)
if(EXISTS "${THIRD_PARTY_PATH}/install/paddle_inference")
file(REMOVE_RECURSE "${THIRD_PARTY_PATH}/install/paddle_inference")
endif()
find_package(Python COMPONENTS Interpreter Development REQUIRED)
message(STATUS "Copying ${PADDLEINFERENCE_DIRECTORY} to ${THIRD_PARTY_PATH}/install/paddle_inference ...")
execute_process(COMMAND ${Python_EXECUTABLE} ${PROJECT_SOURCE_DIR}/scripts/copy_directory.py ${PADDLEINFERENCE_DIRECTORY} ${THIRD_PARTY_PATH}/install/paddle_inference)
else()
set(PADDLEINFERENCE_URL_BASE "https://bj.bcebos.com/fastdeploy/third_libs/")
set(PADDLEINFERENCE_VERSION "2.4-dev3")
if(WIN32)
@@ -91,6 +105,7 @@ ExternalProject_Add(
INSTALL_COMMAND
${CMAKE_COMMAND} -E copy_directory ${PADDLEINFERENCE_SOURCE_DIR} ${PADDLEINFERENCE_INSTALL_DIR}
BUILD_BYPRODUCTS ${PADDLEINFERENCE_COMPILE_LIB})
endif(PADDLEINFERENCE_DIRECTORY)
if(UNIX AND (NOT APPLE) AND (NOT ANDROID))
add_custom_target(patchelf_paddle_inference ALL COMMAND bash -c "PATCHELF_EXE=${PATCHELF_EXE} python ${PROJECT_SOURCE_DIR}/scripts/patch_paddle_inference.py ${PADDLEINFERENCE_INSTALL_DIR}/paddle/lib/libpaddle_inference.so" DEPENDS ${LIBRARY_NAME})

View File

@@ -49,27 +49,19 @@ if(ANDROID)
endif()
endif()
if(NOT PADDLELITE_URL)
if(WIN32 OR APPLE OR IOS)
message(FATAL_ERROR "Doesn't support windows/mac/ios platform with backend Paddle Lite now.")
elseif(ANDROID)
set(PADDLELITE_URL "${PADDLELITE_URL_PREFIX}/lite-android-${ANDROID_ABI}-latest.tgz")
set(PADDLELITE_URL "${PADDLELITE_URL_PREFIX}/lite-android-${ANDROID_ABI}-latest-dev.tgz")
if(ANDROID_ABI MATCHES "arm64-v8a")
set(PADDLELITE_URL "${PADDLELITE_URL_PREFIX}/lite-android-${ANDROID_ABI}-fp16-latest.tgz")
set(PADDLELITE_URL "${PADDLELITE_URL_PREFIX}/lite-android-${ANDROID_ABI}-fp16-latest-dev.tgz")
endif()
else() # Linux
if(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "aarch64")
if(WITH_CANN)
set(PADDLELITE_URL "https://bj.bcebos.com/fastdeploy/test/lite-linux_arm64_huawei_ascend_npu_1121.tgz")
elseif(WITH_CANN_PY)
set(PADDLELITE_URL "https://bj.bcebos.com/fastdeploy/test/lite-linux_arm64_huawei_ascend_npu_python_1207.tgz")
set(PADDLELITE_URL "${PADDLELITE_URL_PREFIX}/lite-linux-arm64-20221209.tgz")
else()
set(PADDLELITE_URL "${PADDLELITE_URL_PREFIX}/lite-linux-arm64-20220920.tgz")
endif()
else()
if(ENABLE_TIMVX)
set(PADDLELITE_URL "https://bj.bcebos.com/fastdeploy/test/lite-linux_armhf_1130.tgz")
else()
message(FATAL_ERROR "Only support Linux aarch64 or ENABLE_TIMVX now, x64 is not supported with backend Paddle Lite.")
message(FATAL_ERROR "Only support Linux aarch64 now, x64 is not supported with backend Paddle Lite.")
endif()
endif()
endif()

View File

@@ -39,6 +39,8 @@ function(fastdeploy_summary)
message(STATUS " ENABLE_OPENVINO_BACKEND : ${ENABLE_OPENVINO_BACKEND}")
message(STATUS " WITH_CANN : ${WITH_CANN}")
message(STATUS " WITH_CANN_PY : ${WITH_CANN_PY}")
message(STATUS " WITH_TIMVX : ${WITH_TIMVX}")
message(STATUS " WITH_XPU : ${WITH_XPU}")
if(ENABLE_ORT_BACKEND)
message(STATUS " ONNXRuntime version : ${ONNXRUNTIME_VERSION}")
endif()

View File

@@ -1,12 +1,4 @@
if (NOT DEFINED CMAKE_SYSTEM_PROCESSOR)
set(CMAKE_SYSTEM_NAME Linux)
set(CMAKE_SYSTEM_PROCESSOR arm)
set(CMAKE_C_COMPILER "arm-linux-gnueabihf-gcc")
set(CMAKE_CXX_COMPILER "arm-linux-gnueabihf-g++")
set(CMAKE_CXX_FLAGS "-march=armv7-a -mfloat-abi=hard -mfpu=neon-vfpv4 ${CMAKE_CXX_FLAGS}")
set(CMAKE_C_FLAGS "-march=armv7-a -mfloat-abi=hard -mfpu=neon-vfpv4 ${CMAKE_C_FLAGS}" )
set(CMAKE_BUILD_TYPE MinSizeRel)
else()
if(NOT ${ENABLE_LITE_BACKEND})
message(WARNING "While compiling with -DWITH_TIMVX=ON, will force to set -DENABLE_LITE_BACKEND=ON")
set(ENABLE_LITE_BACKEND ON)
@@ -46,9 +38,8 @@ else()
set(ENABLE_TEXT OFF CACHE BOOL "Force ENABLE_TEXT OFF" FORCE)
message(STATUS "While compiling with -DWITH_TIMVX=ON, will force to set -DENABLE_TEXT=OFF")
endif()
if (DEFINED CMAKE_INSTALL_PREFIX)
install(FILES ${PROJECT_SOURCE_DIR}/cmake/timvx.cmake DESTINATION ${CMAKE_INSTALL_PREFIX})
endif()
endif()
install(FILES ${PROJECT_SOURCE_DIR}/cmake/toolchain.cmake DESTINATION ${CMAKE_INSTALL_PREFIX})

38
cmake/toolchain.cmake Executable file
View File

@@ -0,0 +1,38 @@
if (DEFINED TARGET_ABI)
set(CMAKE_SYSTEM_NAME Linux)
set(CMAKE_BUILD_TYPE MinSizeRel)
if(${TARGET_ABI} MATCHES "armhf")
set(CMAKE_SYSTEM_PROCESSOR arm)
set(CMAKE_C_COMPILER "arm-linux-gnueabihf-gcc")
set(CMAKE_CXX_COMPILER "arm-linux-gnueabihf-g++")
set(CMAKE_CXX_FLAGS "-march=armv7-a -mfloat-abi=hard -mfpu=neon-vfpv4 ${CMAKE_CXX_FLAGS}")
set(CMAKE_C_FLAGS "-march=armv7-a -mfloat-abi=hard -mfpu=neon-vfpv4 ${CMAKE_C_FLAGS}" )
set(OPENCV_URL "https://bj.bcebos.com/fastdeploy/third_libs/opencv-linux-armv7hf-4.6.0.tgz")
set(OPENCV_FILENAME "opencv-linux-armv7hf-4.6.0")
if(WITH_TIMVX)
set(PADDLELITE_URL "https://bj.bcebos.com/fastdeploy/third_libs/lite-linux-armhf-timvx-1130.tgz")
else()
message(STATUS "PADDLELITE_URL will be configured if WITH_TIMVX=ON.")
endif()
set(THIRD_PARTY_PATH ${CMAKE_CURRENT_BINARY_DIR}/third_libs)
set(OpenCV_DIR ${THIRD_PARTY_PATH}/install/opencv/lib/cmake/opencv4)
elseif(${TARGET_ABI} MATCHES "arm64")
set(CMAKE_SYSTEM_PROCESSOR aarch64)
set(CMAKE_C_COMPILER "aarch64-linux-gnu-gcc")
set(CMAKE_CXX_COMPILER "aarch64-linux-gnu-g++")
set(CMAKE_CXX_FLAGS "-march=armv8-a ${CMAKE_CXX_FLAGS}")
set(CMAKE_C_FLAGS "-march=armv8-a ${CMAKE_C_FLAGS}")
set(OPENCV_URL "https://bj.bcebos.com/fastdeploy/third_libs/opencv-linux-aarch64-4.6.0.tgz")
set(OPENCV_FILENAME "opencv-linux-aarch64-4.6.0")
if(WITH_TIMVX)
set(PADDLELITE_URL "https://bj.bcebos.com/fastdeploy/third_libs/lite-linux-aarch64-timvx-20221209.tgz")
else()
set(PADDLELITE_URL "https://bj.bcebos.com/fastdeploy/third_libs/lite-linux-arm64-20221209.tgz")
endif()
set(THIRD_PARTY_PATH ${CMAKE_CURRENT_BINARY_DIR}/third_libs)
set(OpenCV_DIR ${THIRD_PARTY_PATH}/install/opencv/lib/cmake/opencv4)
else()
message(FATAL_ERROR "When cross-compiling, please set the -DTARGET_ABI to arm64 or armhf.")
endif()
endif()

3
docs/README_CN.md Normal file → Executable file
View File

@@ -8,6 +8,7 @@
- [GPU部署环境编译安装](cn/build_and_install/gpu.md)
- [CPU部署环境编译安装](cn/build_and_install/cpu.md)
- [IPU部署环境编译安装](cn/build_and_install/ipu.md)
- [昆仑芯XPU部署环境编译安装](cn/build_and_install/xpu.md)
- [Jetson部署环境编译安装](cn/build_and_install/jetson.md)
- [Android平台部署环境编译安装](cn/build_and_install/android.md)
- [服务化部署镜像编译安装](../serving/docs/zh_CN/compile.md)
@@ -19,7 +20,7 @@
- [Runtime Python使用示例](cn/quick_start/runtime/python.md)
- [Runtime C++使用示例](cn/quick_start/runtime/cpp.md)
## API文档(进行中)
## API文档
- [Python API文档](https://www.paddlepaddle.org.cn/fastdeploy-api-doc/python/html/)
- [C++ API文档](https://www.paddlepaddle.org.cn/fastdeploy-api-doc/cpp/html/)

1
docs/README_EN.md Normal file → Executable file
View File

@@ -8,6 +8,7 @@
- [Build and Install FastDeploy Library on GPU Platform](en/build_and_install/gpu.md)
- [Build and Install FastDeploy Library on CPU Platform](en/build_and_install/cpu.md)
- [Build and Install FastDeploy Library on IPU Platform](en/build_and_install/ipu.md)
- [Build and Install FastDeploy Library on KunlunXin XPU Platform](en/build_and_install/xpu.md)
- [Build and Install FastDeploy Library on Nvidia Jetson Platform](en/build_and_install/jetson.md)
- [Build and Install FastDeploy Library on Android Platform](en/build_and_install/android.md)
- [Build and Install FastDeploy Serving Deployment Image](../serving/docs/EN/compile-en.md)

View File

@@ -12,6 +12,8 @@
- [Jetson部署环境](jetson.md)
- [Android平台部署环境](android.md)
- [瑞芯微RV1126部署环境](rv1126.md)
- [晶晨A311D部署环境](a311d.md)
- [昆仑芯XPU部署环境](xpu.md)
## FastDeploy编译选项说明
@@ -23,7 +25,10 @@
| ENABLE_LITE_BACKEND | 默认OFF是否编译集成Paddle Lite后端(编译Android库时需要设置为ON) |
| ENABLE_RKNPU2_BACKEND | 默认OFF是否编译集成RKNPU2后端(RK3588/RK3568/RK3566上推荐打开) |
| WITH_CANN | 默认OFF当在华为昇腾NPU上部署时, 需要设置为ON |
| WITH_CANN_PY | 默认OFF当在华为昇腾NPU上,并使用Python部署时, 需要设置为ON |
| ENABLE_TIMVX | 默认OFF需要在RV1126/RV1109上部署时需设置为ON |
| WITH_XPU | 默认OFF当在昆仑芯XPU上部署时需设置为ON |
| WITH_TIMVX | 默认OFF需要在RV1126/RV1109/A311D上部署时需设置为ON |
| ENABLE_TRT_BACKEND | 默认OFF是否编译集成TensorRT后端(GPU上推荐打开) |
| ENABLE_OPENVINO_BACKEND | 默认OFF是否编译集成OpenVINO后端(CPU上推荐打开) |
| ENABLE_VISION | 默认OFF是否编译集成视觉模型的部署模块 |

View File

@@ -0,0 +1,108 @@
# 晶晨 A311D 部署环境编译安装
FastDeploy 基于 Paddle-Lite 后端支持在晶晨 NPU 上进行部署推理。
更多详细的信息请参考:[PaddleLite部署示例](https://www.paddlepaddle.org.cn/lite/develop/demo_guides/verisilicon_timvx.html)。
本文档介绍如何编译基于 PaddleLite 的 C++ FastDeploy 交叉编译库。
相关编译选项说明如下:
|编译选项|默认值|说明|备注|
|:---|:---|:---|:---|
|ENABLE_LITE_BACKEND|OFF|编译A311D部署库时需要设置为ON| - |
|WITH_TIMVX|OFF|编译A311D部署库时需要设置为ON| - |
|TARGET_ABI|NONE|编译RK库时需要设置为arm64| - |
更多编译选项请参考[FastDeploy编译选项说明](./README.md)
## 交叉编译环境搭建
### 宿主机环境需求
- osUbuntu == 16.04
- cmake version >= 3.10.0
### 环境搭建
可以进入 FastDeploy/tools/timvx 目录,使用如下命令一键安装:
```bash
cd FastDeploy/tools/timvx
bash install.sh
```
也可以按照如下命令安装:
```bash
# 1. Install basic software
apt update
apt-get install -y --no-install-recommends \
gcc g++ git make wget python unzip
# 2. Install arm gcc toolchains
apt-get install -y --no-install-recommends \
g++-arm-linux-gnueabi gcc-arm-linux-gnueabi \
g++-arm-linux-gnueabihf gcc-arm-linux-gnueabihf \
gcc-aarch64-linux-gnu g++-aarch64-linux-gnu
# 3. Install cmake 3.10 or above
wget -c https://mms-res.cdn.bcebos.com/cmake-3.10.3-Linux-x86_64.tar.gz && \
tar xzf cmake-3.10.3-Linux-x86_64.tar.gz && \
mv cmake-3.10.3-Linux-x86_64 /opt/cmake-3.10 && \
ln -s /opt/cmake-3.10/bin/cmake /usr/bin/cmake && \
ln -s /opt/cmake-3.10/bin/ccmake /usr/bin/ccmake
```
## 基于 PaddleLite 的 FastDeploy 交叉编译库编译
搭建好交叉编译环境之后,编译命令如下:
```bash
# Download the latest source code
git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy
mkdir build && cd build
# CMake configuration with A311D toolchain
cmake -DCMAKE_TOOLCHAIN_FILE=./../cmake/toolchain.cmake \
-DWITH_TIMVX=ON \
-DTARGET_ABI=arm64 \
-DCMAKE_INSTALL_PREFIX=fastdeploy-tmivx \
-DENABLE_VISION=ON \ # 是否编译集成视觉模型的部署模块,可选择开启
-Wno-dev ..
# Build FastDeploy A311D C++ SDK
make -j8
make install
```
编译完成之后,会生成 fastdeploy-tmivx 目录,表示基于 PadddleLite TIM-VX 的 FastDeploy 库编译完成。
## 准备设备运行环境
部署前要保证晶晨 Linux Kernel NPU 驱动 galcore.so 版本及所适用的芯片型号与依赖库保持一致,在部署前,请登录开发板,并通过命令行输入以下命令查询 NPU 驱动版本晶晨建议的驱动版本为6.4.4.3
```bash
dmesg | grep Galcore
```
如果当前版本不符合上述,请用户仔细阅读以下内容,以保证底层 NPU 驱动环境正确。
有两种方式可以修改当前的 NPU 驱动版本:
1. 手动替换 NPU 驱动版本。(推荐)
2. 刷机,刷取 NPU 驱动版本符合要求的固件。
### 手动替换 NPU 驱动版本
1. 使用如下命令下载解压 PaddleLite demo其中提供了现成的驱动文件
```bash
wget https://paddlelite-demo.bj.bcebos.com/devices/generic/PaddleLite-generic-demo.tar.gz
tar -xf PaddleLite-generic-demo.tar.gz
```
2. 使用 `uname -a` 查看 `Linux Kernel` 版本,确定为 `Linux` 系统 4.19.111 版本,
3.`PaddleLite-generic-demo/libs/PaddleLite/linux/arm64/lib/verisilicon_timvx/viv_sdk_6_4_4_3/lib/a311d/4.9.113` 路径下的 `galcore.ko` 上传至开发板。
4. 登录开发板,命令行输入 `sudo rmmod galcore` 来卸载原始驱动,输入 `sudo insmod galcore.ko` 来加载传上设备的驱动。(是否需要 sudo 根据开发板实际情况,部分 adb 链接的设备请提前 adb root。此步骤如果操作失败请跳转至方法 2。
5. 在开发板中输入 `dmesg | grep Galcore` 查询 NPU 驱动版本确定为6.4.4.3
### 刷机
根据具体的开发板型号,向开发板卖家或官网客服索要 6.4.4.3 版本 NPU 驱动对应的固件和刷机方法。
更多细节请参考:[PaddleLite准备设备环境](https://www.paddlepaddle.org.cn/lite/develop/demo_guides/verisilicon_timvx.html#zhunbeishebeihuanjing)
## 基于 FastDeploy 在 A311D 上的部署示例
1. A311D 上部署 PaddleClas 分类模型请参考:[PaddleClas 分类模型在 A311D 上的 C++ 部署示例](../../../examples/vision/classification/paddleclas/a311d/README.md)
2. A311D 上部署 PPYOLOE 检测模型请参考:[PPYOLOE 检测模型在 A311D 上的 C++ 部署示例](../../../examples/vision/detection/paddledetection/a311d/README.md)
3. A311D 上部署 YOLOv5 检测模型请参考:[YOLOv5 检测模型在 A311D 上的 C++ 部署示例](../../../examples/vision/detection/yolov5/a311d/README.md)
4. A311D 上部署 PP-LiteSeg 分割模型请参考:[PP-LiteSeg 分割模型在 A311D 上的 C++ 部署示例](../../../examples/vision/segmentation/paddleseg/a311d/README.md)

View File

@@ -27,6 +27,11 @@ Linux上编译需满足
- gcc/g++ >= 5.4(推荐8.2)
- cmake >= 3.18.0
此外更推荐开发者自行安装,编译时通过`-DOPENCV_DIRECTORY`来指定环境中的OpenCV如若不指定-DOPENCV_DIRECTORY会自动下载FastDeploy提供的预编译的OpenCV但在**Linux平台**无法支持Video的读取以及imshow等可视化界面功能
```
sudo apt-get install libopencv-dev
```
```bash
git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy
@@ -36,6 +41,7 @@ cmake .. -DENABLE_ORT_BACKEND=ON \
-DENABLE_OPENVINO_BACKEND=ON \
-DCMAKE_INSTALL_PREFIX=${PWD}/compiled_fastdeploy_sdk \
-DENABLE_VISION=ON \
-DOPENCV_DIRECTORY=/usr/lib/x86_64-linux-gnu/cmake/opencv4 \
-DENABLE_TEXT=ON
make -j12
make install
@@ -90,6 +96,8 @@ export ENABLE_PADDLE_BACKEND=ON
export ENABLE_OPENVINO_BACKEND=ON
export ENABLE_VISION=ON
export ENABLE_TEXT=ON
# OPENCV_DIRECTORY可选不指定会自动下载FastDeploy提供的预编译OpenCV库
export OPENCV_DIRECTORY=/usr/lib/x86_64-linux-gnu/cmake/opencv4
python setup.py build
python setup.py bdist_wheel

View File

@@ -20,7 +20,7 @@ FastDeploy提供各平台预编译库供开发者直接下载安装使用。
### Python安装
Release版本当前最新1.0.0)安装
Release版本当前最新1.0.1)安装
```bash
pip install fastdeploy-gpu-python -f https://www.paddlepaddle.org.cn/whl/fastdeploy.html
```
@@ -41,8 +41,8 @@ Release版本
| 平台 | 文件 | 说明 |
| :--- | :--- | :---- |
| Linux x64 | [fastdeploy-linux-x64-gpu-1.0.0.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-gpu-1.0.0.tgz) | g++ 8.2, CUDA 11.2, cuDNN 8.2编译产出 |
| Windows x64 | [fastdeploy-win-x64-gpu-1.0.0.zip](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-win-x64-gpu-1.0.0.zip) | Visual Studio 16 2019, CUDA 11.2, cuDNN 8.2编译产出 |
| Linux x64 | [fastdeploy-linux-x64-gpu-1.0.1.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-gpu-1.0.1.tgz) | g++ 8.2, CUDA 11.2, cuDNN 8.2编译产出 |
| Windows x64 | [fastdeploy-win-x64-gpu-1.0.1.zip](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-win-x64-gpu-1.0.1.zip) | Visual Studio 16 2019, CUDA 11.2, cuDNN 8.2编译产出 |
Develop版本Nightly build
@@ -63,7 +63,7 @@ Develop版本Nightly build
### Python安装
Release版本当前最新1.0.0)安装
Release版本当前最新1.0.1)安装
```bash
pip install fastdeploy-python -f https://www.paddlepaddle.org.cn/whl/fastdeploy.html
```
@@ -79,11 +79,11 @@ Release版本
| 平台 | 文件 | 说明 |
| :--- | :--- | :---- |
| Linux x64 | [fastdeploy-linux-x64-1.0.0.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-1.0.0.tgz) | g++ 8.2编译产出 |
| Windows x64 | [fastdeploy-win-x64-1.0.0.zip](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-win-x64-1.0.0.zip) | Visual Studio 16 2019编译产出 |
| Mac OSX x64 | [fastdeploy-osx-x86_64-1.0.0.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-osx-x86_64-1.0.0.tgz) | clang++ 10.0.0编译产出|
| Mac OSX arm64 | [fastdeploy-osx-arm64-1.0.0.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-osx-arm64-1.0.0.tgz) | clang++ 13.0.0编译产出 |
| Linux aarch64 | [fastdeploy-osx-arm64-1.0.0.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-aarch64-1.0.0.tgz) | gcc 6.3编译产出 |
| Linux x64 | [fastdeploy-linux-x64-1.0.1.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-1.0.1.tgz) | g++ 8.2编译产出 |
| Windows x64 | [fastdeploy-win-x64-1.0.1.zip](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-win-x64-1.0.1.zip) | Visual Studio 16 2019编译产出 |
| Mac OSX x64 | [fastdeploy-osx-x86_64-1.0.1.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-osx-x86_64-1.0.1.tgz) | clang++ 10.0.0编译产出|
| Mac OSX arm64 | [fastdeploy-osx-arm64-1.0.1.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-osx-arm64-1.0.1.tgz) | clang++ 13.0.0编译产出 |
| Linux aarch64 | [fastdeploy-linux-aarch64-1.0.1.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-aarch64-1.0.1.tgz) | gcc 6.3编译产出 |
| Android armv7&v8 | [fastdeploy-android-1.0.0-shared.tgz](https://bj.bcebos.com/fastdeploy/release/android/fastdeploy-android-1.0.0-shared.tgz) | NDK 25及clang++编译产出, 支持arm64-v8a及armeabi-v7a |
## Java SDK安装

View File

@@ -33,6 +33,11 @@ Linux上编译需满足
- cuda >= 11.2
- cudnn >= 8.2
此外更推荐开发者自行安装,编译时通过`-DOPENCV_DIRECTORY`来指定环境中的OpenCV如若不指定-DOPENCV_DIRECTORY会自动下载FastDeploy提供的预编译的OpenCV但在**Linux平台**无法支持Video的读取以及imshow等可视化界面功能
```
sudo apt-get install libopencv-dev
```
```bash
git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy
@@ -46,6 +51,7 @@ cmake .. -DENABLE_ORT_BACKEND=ON \
-DCUDA_DIRECTORY=/usr/local/cuda \
-DCMAKE_INSTALL_PREFIX=${PWD}/compiled_fastdeploy_sdk \
-DENABLE_VISION=ON \
-DOPENCV_DIRECTORY=/usr/lib/x86_64-linux-gnu/cmake/opencv4 \
-DENABLE_TEXT=ON
make -j12
make install
@@ -115,6 +121,8 @@ export ENABLE_TRT_BACKEND=ON
export WITH_GPU=ON
export TRT_DIRECTORY=/Paddle/TensorRT-8.4.1.5
export CUDA_DIRECTORY=/usr/local/cuda
# OPENCV_DIRECTORY可选不指定会在编译过程下载FastDeploy预编译的OpenCV库
export OPENCV_DIRECTORY=/usr/lib/x86_64-linux-gnu/cmake/opencv4 \
python setup.py build
python setup.py bdist_wheel

View File

@@ -1,7 +1,7 @@
# Jetson部署库编译
FastDeploy当前在Jetson仅支持ONNX Runtime CPU和TensorRT GPU种后端推理
FastDeploy当前在Jetson仅支持ONNX Runtime CPU和TensorRT GPU/Paddle Inference三种后端推理
## C++ SDK编译安装
@@ -10,12 +10,17 @@ FastDeploy当前在Jetson仅支持ONNX Runtime CPU和TensorRT GPU两种后端推
- cmake >= 3.10.0
- jetpack >= 4.6.1
如果需要集成Paddle Inference后端在[Paddle Inference预编译库](https://www.paddlepaddle.org.cn/inference/v2.4/guides/install/download_lib.html#c)页面根据开发环境选择对应的Jetpack C++包下载,并解压。
```bash
git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy
mkdir build && cd build
cmake .. -DBUILD_ON_JETSON=ON \
-DENABLE_VISION=ON \
-DENABLE_PADDLE_BACKEND=ON \ # 可选项如若不需要Paddle Inference后端可关闭
-DPADDLEINFERENCE_DIRECTORY=/Download/paddle_inference_jetson \
-DCMAKE_INSTALL_PREFIX=${PWD}/installed_fastdeploy
make -j8
make install
@@ -34,6 +39,8 @@ make install
Python打包依赖`wheel`,编译前请先执行`pip install wheel`
如果需要集成Paddle Inference后端在[Paddle Inference预编译库](https://www.paddlepaddle.org.cn/inference/v2.4/guides/install/download_lib.html#c)页面根据开发环境选择对应的Jetpack C++包下载,并解压。
所有编译选项通过环境变量导入
```bash
@@ -42,6 +49,10 @@ cd FastDeploy/python
export BUILD_ON_JETSON=ON
export ENABLE_VISION=ON
# ENABLE_PADDLE_BACKEND & PADDLEINFERENCE_DIRECTORY为可选项
export ENABLE_PADDLE_BACKEND=ON
export PADDLEINFERENCE_DIRECTORY=/Download/paddle_inference_jetson
python setup.py build
python setup.py bdist_wheel
```

View File

@@ -9,7 +9,8 @@ FastDeploy基于 Paddle-Lite 后端支持在瑞芯微RockchipSoc 上进行
|编译选项|默认值|说明|备注|
|:---|:---|:---|:---|
|ENABLE_LITE_BACKEND|OFF|编译RK库时需要设置为ON| - |
|ENABLE_TIMVX|OFF|编译RK库时需要设置为ON| - |
|WITH_TIMVX|OFF|编译RK库时需要设置为ON| - |
|TARGET_ABI|NONE|编译RK库时需要设置为armhf| - |
更多编译选项请参考[FastDeploy编译选项说明](./README.md)
@@ -20,6 +21,12 @@ FastDeploy基于 Paddle-Lite 后端支持在瑞芯微RockchipSoc 上进行
- cmake version >= 3.10.0
### 环境搭建
可以进入 FastDeploy/tools/timvx 目录,使用如下命令一键安装:
```bash
cd FastDeploy/tools/timvx
bash install.sh
```
也可以按照如下命令安装:
```bash
# 1. Install basic software
apt update
@@ -49,8 +56,9 @@ cd FastDeploy
mkdir build && cd build
# CMake configuration with RK toolchain
cmake -DCMAKE_TOOLCHAIN_FILE=./../cmake/timvx.cmake \
-DENABLE_TIMVX=ON \
cmake -DCMAKE_TOOLCHAIN_FILE=./../cmake/toolchain.cmake \
-DWITH_TIMVX=ON \
-DTARGET_ABI=armhf \
-DCMAKE_INSTALL_PREFIX=fastdeploy-tmivx \
-DENABLE_VISION=ON \ # 是否编译集成视觉模型的部署模块,可选择开启
-Wno-dev ..
@@ -79,7 +87,7 @@ dmesg | grep Galcore
wget https://paddlelite-demo.bj.bcebos.com/devices/generic/PaddleLite-generic-demo.tar.gz
tar -xf PaddleLite-generic-demo.tar.gz
```
2. 使用 `uname -a` 查看 `Linux Kernel` 版本,确定为 `Linux` 系统 4.19.111 版本
2. 使用 `uname -a` 查看 `Linux Kernel` 版本,确定为 `Linux` 系统 4.19.111 版本
3.`PaddleLite-generic-demo/libs/PaddleLite/linux/armhf/lib/verisilicon_timvx/viv_sdk_6_4_6_5/lib/1126/4.19.111/` 路径下的 `galcore.ko` 上传至开发板。
4. 登录开发板,命令行输入 `sudo rmmod galcore` 来卸载原始驱动,输入 `sudo insmod galcore.ko` 来加载传上设备的驱动。(是否需要 sudo 根据开发板实际情况,部分 adb 链接的设备请提前 adb root。此步骤如果操作失败请跳转至方法 2。

View File

@@ -0,0 +1,75 @@
# 昆仑芯 XPU 部署环境编译安装
FastDeploy 基于 Paddle-Lite 后端支持在昆仑芯 XPU 上进行部署推理。
更多详细的信息请参考:[PaddleLite部署示例](https://www.paddlepaddle.org.cn/lite/develop/demo_guides/kunlunxin_xpu.html#xpu)。
本文档介绍如何编译基于 PaddleLite 的 C++ FastDeploy 编译库。
相关编译选项说明如下:
|编译选项|默认值|说明|备注|
|:---|:---|:---|:---|
| WITH_XPU| OFF | 需要在XPU上部署时需要设置为ON | - |
| ENABLE_ORT_BACKEND | OFF | 是否编译集成ONNX Runtime后端 | - |
| ENABLE_PADDLE_BACKEND | OFF | 是否编译集成Paddle Inference后端 | - |
| ENABLE_OPENVINO_BACKEND | OFF | 是否编译集成OpenVINO后端 | - |
| ENABLE_VISION | OFF | 是否编译集成视觉模型的部署模块 | - |
| ENABLE_TEXT | OFF | 是否编译集成文本NLP模型的部署模块 | - |
第三方库依赖指定(不设定如下参数,会自动下载预编译库)
| 选项 | 说明 |
| :---------------------- | :--------------------------------------------------------------------------------------------- |
| ORT_DIRECTORY | 当开启ONNX Runtime后端时用于指定用户本地的ONNX Runtime库路径如果不指定编译过程会自动下载ONNX Runtime库 |
| OPENCV_DIRECTORY | 当ENABLE_VISION=ON时用于指定用户本地的OpenCV库路径如果不指定编译过程会自动下载OpenCV库 |
| OPENVINO_DIRECTORY | 当开启OpenVINO后端时, 用于指定用户本地的OpenVINO库路径如果不指定编译过程会自动下载OpenVINO库 |
更多编译选项请参考[FastDeploy编译选项说明](./README.md)
## 基于 PaddleLite 的 C++ FastDeploy 库编译
- OS: Linux
- gcc/g++: version >= 8.2
- cmake: version >= 3.15
此外更推荐开发者自行安装,编译时通过`-DOPENCV_DIRECTORY`来指定环境中的OpenCV如若不指定-DOPENCV_DIRECTORY会自动下载FastDeploy提供的预编译的OpenCV但在**Linux平台**无法支持Video的读取以及imshow等可视化界面功能
```
sudo apt-get install libopencv-dev
```
编译命令如下:
```bash
# Download the latest source code
git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy
mkdir build && cd build
# CMake configuration with KunlunXin xpu toolchain
cmake -DWITH_XPU=ON \
-DWITH_GPU=OFF \ # 不编译 GPU
-DENABLE_ORT_BACKEND=ON \ # 可选择开启 ORT 后端
-DENABLE_PADDLE_BACKEND=ON \ # 可选择开启 Paddle 后端
-DCMAKE_INSTALL_PREFIX=fastdeploy-xpu \
-DENABLE_VISION=ON \ # 是否编译集成视觉模型的部署模块,可选择开启
-DOPENCV_DIRECTORY=/usr/lib/x86_64-linux-gnu/cmake/opencv4 \
..
# Build FastDeploy KunlunXin XPU C++ SDK
make -j8
make install
```
编译完成之后,会生成 fastdeploy-xpu 目录,表示基于 PadddleLite 的 FastDeploy 库编译完成。
## Python 编译
编译命令如下:
```bash
git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy/python
export WITH_XPU=ON
export WITH_GPU=OFF
export ENABLE_ORT_BACKEND=ON
export ENABLE_PADDLE_BACKEND=ON
export ENABLE_VISION=ON
# OPENCV_DIRECTORY可选不指定会自动下载FastDeploy提供的预编译OpenCV库
export OPENCV_DIRECTORY=/usr/lib/x86_64-linux-gnu/cmake/opencv4
python setup.py build
python setup.py bdist_wheel
```
编译完成即会在 `FastDeploy/python/dist` 目录下生成编译后的 `wheel` 包,直接 pip install 即可
编译过程中,如若修改编译参数,为避免带来缓存影响,可删除 `FastDeploy/python` 目录下的 `build` 和 `.setuptools-cmake-build` 两个子目录后再重新编译

View File

@@ -14,19 +14,19 @@ ONNX模型不能直接调用RK芯片中的NPU进行运算需要把ONNX模型
| 任务场景 | 模型 | 模型版本(表示已经测试的版本) | ARM CPU/RKNN速度(ms) |
|------------------|-------------------|-------------------------------|--------------------|
| Detection | Picodet | Picodet-s | 599/136 |
| Detection | Picodet | Picodet-s | 162/112 |
| Detection | RKYOLOV5 | YOLOV5-S-Relu(int8) | -/57 |
| Segmentation | Unet | Unet-cityscapes | -/- |
| Segmentation | PP-LiteSeg | PP_LiteSeg_T_STDC1_cityscapes | 6634/5598 |
| Segmentation | PP-HumanSegV2Lite | portrait | 456/266 |
| Segmentation | PP-HumanSegV2Lite | human | 496/256 |
| Face Detection | SCRFD | SCRFD-2.5G-kps-640 | 963/142 |
| Segmentation | PP-LiteSeg | PP_LiteSeg_T_STDC1_cityscapes | -/- |
| Segmentation | PP-HumanSegV2Lite | portrait | 53/50 |
| Segmentation | PP-HumanSegV2Lite | human | 53/50 |
| Face Detection | SCRFD | SCRFD-2.5G-kps-640 | 112/108 |
## TODO
以下为TODO计划表示还正在准备支持但是还存在问题或还可以改进的模型。
| 任务场景 | 模型 | 模型版本(表示已经测试的版本) | ARM CPU/RKNN速度(ms) |
|------------------|---------|---------------------|--------------------|
| Detection | Picodet | Picodet-s(int8) | -/- |
| Detection | PPYOLOE | PPYOLOE(int8) | -/- |
| Detection | YOLOv5 | YOLOv5-s_v6.2(int8) | -/- |
| Face Recognition | ArcFace | ArcFace_r18 | 600/3 |

View File

@@ -1,3 +1,6 @@
[English](../../en/faq/use_sdk_on_linux.md) | 中文
# Linux上C++部署
1. 编译完成运行,提示找不到.so文件

View File

@@ -1,3 +1,5 @@
[English](../../en/faq/use_sdk_on_windows.md) | 中文
# 在 Windows 使用 FastDeploy C++ SDK
## 目录

View File

@@ -0,0 +1,334 @@
[English](../../README_EN.md) | [简体中文](../../README_CN.md) | [हिन्दी](./README_हिन्दी.md) | [日本語](./README_日本語.md) | [한국인](./README_한국어.md) | Pу́сский язы́к
![FastDeploy](https://user-images.githubusercontent.com/31974251/185771818-5d4423cd-c94c-4a49-9894-bc7a8d1c29d0.png)
</p>
<p align="center">
<a href="./LICENSE"><img src="https://img.shields.io/badge/license-Apache%202-dfd.svg"></a>
<a href="https://github.com/PaddlePaddle/FastDeploy/releases"><img src="https://img.shields.io/github/v/release/PaddlePaddle/FastDeploy?color=ffa"></a>
<a href=""><img src="https://img.shields.io/badge/python-3.7+-aff.svg"></a>
<a href=""><img src="https://img.shields.io/badge/os-linux%2C%20win%2C%20mac-pink.svg"></a>
<a href="https://github.com/PaddlePaddle/FastDeploy/graphs/contributors"><img src="https://img.shields.io/github/contributors/PaddlePaddle/FastDeploy?color=9ea"></a>
<a href="https://github.com/PaddlePaddle/FastDeploy/commits"><img src="https://img.shields.io/github/commit-activity/m/PaddlePaddle/FastDeploy?color=3af"></a>
<a href="https://github.com/PaddlePaddle/FastDeploy/issues"><img src="https://img.shields.io/github/issues/PaddlePaddle/FastDeploy?color=9cc"></a>
<a href="https://github.com/PaddlePaddle/FastDeploy/stargazers"><img src="https://img.shields.io/github/stars/PaddlePaddle/FastDeploy?color=ccf"></a>
</p>
<p align="center">
<a href="/docs/cn/build_and_install"><b> Установка </b></a>
|
<a href="docs/README_CN.md"><b> Использование документации </b></a>
|
<a href="https://baidu-paddle.github.io/fastdeploy-api/"><b> API документация </b></a>
|
<a href="https://github.com/PaddlePaddle/FastDeploy/releases"><b> Журнал обновления </b></a>
</p>
**FastDeploy**- это**всесценарный**,**простой в использовании и гибкий**,**чрезвычайно эффективный**инструмент развертывания выводов ИИ. Он обеспечивает 📦**из коробки**опыт развертывания с поддержкой более 🔥150+ **текстовых**,**зрительных**, **речевых** и **кросс-модальных** моделей и 🔚 **сквозной** оптимизацией производительности вывода. Сюда входят классификация изображений, обнаружение объектов, сегментация изображений, обнаружение лиц, распознавание лиц, обнаружение ключевых точек, распознавание ключей, OCR, NLP, TTS и другие задачи для удовлетворения потребностей разработчиков с **многосценическими, многоаппаратными, многоплатформенными** промышленными развертываниями.
| [Image Classification](examples/vision/classification) | [Object Detection](examples/vision/detection) | [Semantic Segmentation](examples/vision/segmentation/paddleseg) | [Potrait Segmentation](examples/vision/segmentation/paddleseg) |
|:----------------------------------------------------------------------------------------------------------------------------------------------:|:----------------------------------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|
| <img src='https://user-images.githubusercontent.com/54695910/200465949-da478e1b-21ce-43b8-9f3f-287460e786bd.png' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/54695910/188054680-2f8d1952-c120-4b67-88fc-7d2d7d2378b4.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/54695910/188054711-6119f0e7-d741-43b1-b273-9493d103d49f.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/54695910/188054718-6395321c-8937-4fa0-881c-5b20deb92aaa.gif' height="126px" width="190px"> |
| [**Image Matting**](examples/vision/matting) | [**Real-Time Matting**](examples/vision/matting) | [**OCR**](examples/vision/ocr) | [**Face Alignment**](examples/vision/facealign) |
| <img src='https://user-images.githubusercontent.com/54695910/188058231-a5fe1ce1-0a38-460f-9582-e0b881514908.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/54695910/188054691-e4cb1a70-09fe-4691-bc62-5552d50bd853.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/54695910/188054669-a85996ba-f7f3-4646-ae1f-3b7e3e353e7d.gif' height="126px" width="190px" > | <img src='https://user-images.githubusercontent.com/54695910/188059460-9845e717-c30a-4252-bd80-b7f6d4cf30cb.png' height="126px" width="190px"> |
| [**Pose Estimation**](examples/vision/keypointdetection) | [**Behavior Recognition**](https://github.com/PaddlePaddle/FastDeploy/issues/6) | [**NLP**](examples/text) | [**Speech**](examples/audio/pp-tts) |
| <img src='https://user-images.githubusercontent.com/54695910/188054671-394db8dd-537c-42b1-9d90-468d7ad1530e.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/48054808/173034825-623e4f78-22a5-4f14-9b83-dc47aa868478.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/54695910/200162475-f5d85d70-18fb-4930-8e7e-9ca065c1d618.gif' height="126px" width="190px"> | <p align="left">**input** 早上好今天是2020<br>/10/29最低温度是-3°C。<br><br> <p align="left">**output**: [<img src="https://user-images.githubusercontent.com/54695910/200161645-871e08da-5a31-4736-879c-a88bb171a676.png" width="170" style="max-width: 100%;">](https://paddlespeech.bj.bcebos.com/Parakeet/docs/demos/parakeet_espnet_fs2_pwg_demo/tn_g2p/parakeet/001.wav)</p> |
## **Обмен сообществами**
* **Slack**Join our [Slack community](https://join.slack.com/t/fastdeployworkspace/shared_invite/zt-1jznah134-3rxY~ytRb8rcPqkn9g~PDg) and chat with other community members about ideas
* **WeChat**: Отсканируйте QR-код и заполните анкету, чтобы присоединиться к техническому сообществу и обсудить болевые точки развертывания и решения с разработчиками сообщества
<div align="center">
<img src="https://user-images.githubusercontent.com/54695910/200145290-d5565d18-6707-4a0b-a9af-85fd36d35d13.jpg" width = "220" height = "220" />
</div>
## Каталог
* **🖥️Развертывание на стороне сервера**
* [Python SDK Quick Start](#fastdeploy-quick-start-python)
* [C++ SDK Quick Start](#fastdeploy-quick-start-cpp)
* [Список поддержки моделей на стороне сервера](#fastdeploy-server-models)
* **📲 Мобильные и конечные развертывания**
* [Список поддержки конечных моделей](#fastdeploy-edge-models)
* **🌐 Развертывание веб и апплетов**
* [Список поддержки веб-моделей](#fastdeploy-web-models)
* [Acknowledge](#fastdeploy-acknowledge)
* [License](#fastdeploy-license)
## 🖥️ Развертывание на стороне сервера
<div id="fastdeploy-quick-start-python"></div>
<details close>
<summary><b> Быстрый старт Python SDK (нажмите для получения подробной информации)</b></summary><div>
#### Быстрая установка
##### Предварительные зависимости
- CUDA >= 11.2、cuDNN >= 8.0、Python >= 3.6
- OS: Linux x86_64/macOS/Windows 10
##### Установка версии GPU
```bash
pip install numpy opencv-python fastdeploy-gpu-python -f https://www.paddlepaddle.org.cn/whl/fastdeploy.html
```
##### [установка Conda (рекомендуется)](docs/cn/build_and_install/download_prebuilt_libraries.md)
```bash
conda config --add channels conda-forge && conda install cudatoolkit=11.2 cudnn=8.2
```
##### Установка CPU процессора
```bash
pip install numpy opencv-python fastdeploy-python -f https://www.paddlepaddle.org.cn/whl/fastdeploy.html
```
#### Пример умозаключения в Python
* Подготовка модели и изображений
```bash
wget https://bj.bcebos.com/paddlehub/fastdeploy/ppyoloe_crn_l_300e_coco.tgz
tar xvf ppyoloe_crn_l_300e_coco.tgz
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
```
* Проверка результатов вывода
```python
# GPU/TensorRT Справочник по развертыванию examples/vision/detection/paddledetection/python
import cv2
import fastdeploy.vision as vision
model = vision.detection.PPYOLOE("ppyoloe_crn_l_300e_coco/model.pdmodel",
"ppyoloe_crn_l_300e_coco/model.pdiparams",
"ppyoloe_crn_l_300e_coco/infer_cfg.yml")
im = cv2.imread("000000014439.jpg")
result = model.predict(im.copy())
print(result)
vis_im = vision.vis_detection(im, result, score_threshold=0.5)
cv2.imwrite("vis_image.jpg", vis_im)
```
</div></details>
<div id="fastdeploy-quick-start-cpp"></div>
<details close>
<summary><b> C++ SDK Quick Start (нажмите для получения подробной информации)</b></summary><div>
#### Установка
Обратитесь к документации [C++ prebuilt libraries download](docs/cn/build_and_install/download_prebuilt_libraries.md)
#### Пример вывода в C++
* Подготовка моделей и фотографий
```bash
wget https://bj.bcebos.com/paddlehub/fastdeploy/ppyoloe_crn_l_300e_coco.tgz
tar xvf ppyoloe_crn_l_300e_coco.tgz
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
```
* Результаты рассуждений при тестировании
```C++
// GPU/TensorRT Ссылка на развертывание examples/vision/detection/paddledetection/cpp
#include "fastdeploy/vision.h"
int main(int argc, char* argv[]) {
namespace vision = fastdeploy::vision;
auto model = vision::detection::PPYOLOE("ppyoloe_crn_l_300e_coco/model.pdmodel",
"ppyoloe_crn_l_300e_coco/model.pdiparams",
"ppyoloe_crn_l_300e_coco/infer_cfg.yml");
auto im = cv::imread("000000014439.jpg");
vision::DetectionResult res;
model.Predict(&im, &res);
auto vis_im = vision::Visualize::VisDetection(im, res, 0.5);
cv::imwrite("vis_image.jpg", vis_im);
return 0;
}
```
</div></details>
Дополнительные примеры развертывания см. в разделе [Примеры развертывания модели] (examples).
<div id="fastdeploy-server-models"></div>
### Список поддержки моделей на стороне сервера🔥🔥🔥🔥🔥
Описание символов: (1) ✅: Уже поддерживается; (2) ❔:Текущий; (3) N/A:В настоящее время не поддерживается;<br>
<details open><summary><b> Список поддержки моделей на стороне сервера (нажмите, чтобы уменьшить)</b></summary><div>
<div align="center">
<img src="https://user-images.githubusercontent.com/54695910/198620704-741523c1-dec7-44e5-9f2b-29ddd9997344.png"/>
</div>
| Сценарии миссий | Модели | Linux | Linux | Win | Win | Mac | Mac | Linux | Linux | Linux | Linux | Linux |
|:----------------------:|:--------------------------------------------------------------------------------------------:|:------------------------------------------------:|:----------:|:-------:|:----------:|:-------:|:-------:|:-----------:|:---------------:|:-------------:|:-------------:|:-------:|
| --- | --- | X86 CPU | NVIDIA GPU | X86 CPU | NVIDIA GPU | X86 CPU | Arm CPU | AArch64 CPU | Phytium D2000CPU | NVIDIA Jetson | Graphcore IPU | Serving |
| Classification | [PaddleClas/ResNet50](./examples/vision/classification/paddleclas) | [](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [TorchVison/ResNet](examples/vision/classification/resnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Classification | [ltralytics/YOLOv5Cls](examples/vision/classification/yolov5cls) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Classification | [PaddleClas/PP-LCNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/PP-LCNetv2](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/EfficientNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/GhostNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/MobileNetV1](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/MobileNetV2](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/MobileNetV3](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/ShuffleNetV2](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/SqueeezeNetV1.1](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/Inceptionv3](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Classification | [PaddleClas/PP-HGNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Detection | [PaddleDetection/PP-YOLOE](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/PicoDet](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/YOLOX](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/YOLOv3](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/PP-YOLO](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/PP-YOLOv2](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/Faster-RCNN](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/Mask-RCNN](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [Megvii-BaseDetection/YOLOX](./examples/vision/detection/yolox) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Detection | [WongKinYiu/YOLOv7](./examples/vision/detection/yolov7) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Detection | [WongKinYiu/YOLOv7end2end_trt](./examples/vision/detection/yolov7end2end_trt) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Detection | [WongKinYiu/YOLOv7end2end_ort_](./examples/vision/detection/yolov7end2end_ort) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Detection | [meituan/YOLOv6](./examples/vision/detection/yolov6) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Detection | [ultralytics/YOLOv5](./examples/vision/detection/yolov5) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [WongKinYiu/YOLOR](./examples/vision/detection/yolor) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Detection | [WongKinYiu/ScaledYOLOv4](./examples/vision/detection/scaledyolov4) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Detection | [ppogg/YOLOv5Lite](./examples/vision/detection/yolov5lite) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Detection | [RangiLyu/NanoDetPlus](./examples/vision/detection/nanodet_plus) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| KeyPoint | [PaddleDetection/TinyPose](./examples/vision/keypointdetection/tiny_pose) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| KeyPoint | [PaddleDetection/PicoDet + TinyPose](./examples/vision/keypointdetection/det_keypoint_unite) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| HeadPose | [omasaht/headpose](examples/vision/headpose) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Tracking | [PaddleDetection/PP-Tracking](examples/vision/tracking/pptracking) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| OCR | [PaddleOCR/PP-OCRv2](./examples/vision/ocr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| OCR | [PaddleOCR/PP-OCRv3](./examples/vision/ocr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Segmentation | [PaddleSeg/PP-LiteSeg](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Segmentation | [PaddleSeg/PP-HumanSegLite](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Segmentation | [PaddleSeg/HRNet](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Segmentation | [PaddleSeg/PP-HumanSegServer](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Segmentation | [PaddleSeg/Unet](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Segmentation | [PaddleSeg/Deeplabv3](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| FaceDetection | [biubug6/RetinaFace](./examples/vision/facedet/retinaface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceDetection | [Linzaer/UltraFace](./examples/vision/facedet/ultraface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceDetection | [deepcam-cn/YOLOv5Face](./examples/vision/facedet/yolov5face) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceDetection | [insightface/SCRFD](./examples/vision/facedet/scrfd) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceAlign | [Hsintao/PFLD](examples/vision/facealign/pfld) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceAlign | [Single430FaceLandmark1000](./examples/vision/facealign/face_landmark_1000) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| FaceAlign | [jhb86253817/PIPNet](./examples/vision/facealign) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| FaceRecognition | [insightface/ArcFace](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceRecognition | [insightface/CosFace](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceRecognition | [insightface/PartialFC](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceRecognition | [insightface/VPL](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Matting | [ZHKKKe/MODNet](./examples/vision/matting/modnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Matting | [PeterL1n/RobustVideoMatting]() | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Matting | [PaddleSeg/PP-Matting](./examples/vision/matting/ppmatting) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Matting | [PaddleSeg/PP-HumanMatting](./examples/vision/matting/modnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Matting | [PaddleSeg/ModNet](./examples/vision/matting/modnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Video Super-Resolution | [PaddleGAN/BasicVSR](./) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Video Super-Resolution | [PaddleGAN/EDVR](./examples/vision/sr/edvr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Video Super-Resolution | [PaddleGAN/PP-MSVSR](./examples/vision/sr/ppmsvsr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Information Extraction | [PaddleNLP/UIE](./examples/text/uie) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | |
| NLP | [PaddleNLP/ERNIE-3.0](./examples/text/ernie-3.0) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ | ✅ |
| Speech | [PaddleSpeech/PP-TTS](./examples/audio/pp-tts) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | -- | ✅ |
</div></details>
<div id="fastdeploy-edge-doc"></div>
## 📲 Мобильное и конечное развертывание 🔥🔥🔥🔥
<div id="fastdeploy-edge-models"></div>
### Список поддержки конечных моделей
<details open><summary><b> Список поддержки конечных моделей (нажмите, чтобы уменьшить)</b></summary><div>
<div align="center">
<img src="https://user-images.githubusercontent.com/54695910/198620704-741523c1-dec7-44e5-9f2b-29ddd9997344.png" />
</div>
| Сценарии миссий | Модели | Размер(MB) | Linux | Android | Linux | Linux | Linux | Linux | Linux | TBD... |
|:------------------:|:-----------------------------------------------------------------------------------------:|:--------:|:-------:|:-------:|:-------:|:-----------------------:|:------------------------------:|:---------------------------:|:--------------------------------:|:-------:|
| --- | --- | --- | ARM CPU | ARM CPU | Rockchip-NPU<br>RK3568/RK3588 | Rockchip-NPU<br>RV1109/RV1126/RK1808 | Amlogic-NPU <br>A311D/S905D/C308X | NXP-NPU<br>i.MX&nbsp;8M&nbsp;Plus | TBD... |
| Classification | [PaddleClas/ResNet50](examples/vision/classification/paddleclas) | 98 | ✅ | ✅ | ❔ | ✅ | | | |
| Classification | [PaddleClas/PP-LCNet](examples/vision/classification/paddleclas) | 11.9 | ✅ | ✅ | ❔ | ✅ | -- | -- | -- |
| Classification | [PaddleClas/PP-LCNetv2](examples/vision/classification/paddleclas) | 26.6 | ✅ | ✅ | ❔ | ✅ | -- | -- | -- |
| Classification | [PaddleClas/EfficientNet](examples/vision/classification/paddleclas) | 31.4 | ✅ | ✅ | ❔ | ✅ | -- | -- | -- |
| Classification | [PaddleClas/GhostNet](examples/vision/classification/paddleclas) | 20.8 | ✅ | ✅ | ❔ | ✅ | -- | -- | -- |
| Classification | [PaddleClas/MobileNetV1](examples/vision/classification/paddleclas) | 17 | ✅ | ✅ | ❔ | ✅ | -- | -- | -- |
| Classification | [PaddleClas/MobileNetV2](examples/vision/classification/paddleclas) | 14.2 | ✅ | ✅ | ❔ | ✅ | -- | -- | -- |
| Classification | [PaddleClas/MobileNetV3](examples/vision/classification/paddleclas) | 22 | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | -- |
| Classification | [PaddleClas/ShuffleNetV2](examples/vision/classification/paddleclas) | 9.2 | ✅ | ✅ | ❔ | ✅ | -- | -- | -- |
| Classification | [PaddleClas/SqueezeNetV1.1](examples/vision/classification/paddleclas) | 5 | ✅ | ✅ | ❔ | ✅ | -- | -- | -- |
| Classification | [PaddleClas/Inceptionv3](examples/vision/classification/paddleclas) | 95.5 | ✅ | ✅ | ❔ | ✅ | -- | -- | -- |
| Classification | [PaddleClas/PP-HGNet](examples/vision/classification/paddleclas) | 59 | ✅ | ✅ | ❔ | ✅ | -- | -- | -- |
| Detection | [PaddleDetection/PP-PicoDet_s_320_coco_lcnet](examples/vision/detection/paddledetection) | 4.9 | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | -- |
| Face Detection | [deepinsight/SCRFD](./examples/vision/facedet/scrfd) | 2.5 | ✅ | ✅ | ✅ | -- | -- | -- | -- |
| Keypoint Detection | [PaddleDetection/PP-TinyPose](examples/vision/keypointdetection/tiny_pose) | 5.5 | ✅ | ✅ | ❔ | ❔ | ❔ | ❔ | -- |
| Segmentation | [PaddleSeg/PP-LiteSeg(STDC1)](examples/vision/segmentation/paddleseg) | 32.2 | ✅ | ✅ | ✅ | -- | -- | -- | -- |
| Segmentation | [PaddleSeg/PP-HumanSeg-Lite](examples/vision/segmentation/paddleseg) | 0.556 | ✅ | ✅ | ✅ | -- | -- | -- | -- |
| Segmentation | [PaddleSeg/HRNet-w18](examples/vision/segmentation/paddleseg) | 38.7 | ✅ | ✅ | ✅ | -- | -- | -- | -- |
| Segmentation | [PaddleSeg/PP-HumanSeg](examples/vision/segmentation/paddleseg) | 107.2 | ✅ | ✅ | ✅ | -- | -- | -- | -- |
| Segmentation | [PaddleSeg/Unet](examples/vision/segmentation/paddleseg) | 53.7 | ✅ | ✅ | ✅ | -- | -- | -- | -- |
| Segmentation | [PaddleSeg/Deeplabv3](examples/vision/segmentation/paddleseg) | 150 | ❔ | ✅ | ✅ | | | | |
| OCR | [PaddleOCR/PP-OCRv2](examples/vision/ocr/PP-OCRv2) | 2.3+4.4 | ✅ | ✅ | ❔ | -- | -- | -- | -- |
| OCR | [PaddleOCR/PP-OCRv3](examples/vision/ocr/PP-OCRv3) | 2.4+10.6 | ✅ | ❔ | ❔ | ❔ | ❔ | ❔ | -- |
</div></details>
## 🌐 🌐 Развертывание веб и апплетов
<div id="fastdeploy-web-models"></div>
<details open><summary><b> Список поддержки развертывания веб-приложений и апплетов (нажмите, чтобы уменьшить)</b></summary><div>
| Сценарии миссий | Модели | [web_demo](examples/application/js/web_demo) |
|:------------------:|:-------------------------------------------------------------------------------------------:|:--------------------------------------------:|
| --- | --- | [Paddle.js](examples/application/js) |
| Detection | [FaceDetection](examples/application/js/web_demo/src/pages/cv/detection) | ✅ |
| Detection | [ScrewDetection](examples/application/js/web_demo/src/pages/cv/detection) | ✅ |
| Segmentation | [PaddleSeg/HumanSeg](./examples/application/js/web_demo/src/pages/cv/segmentation/HumanSeg) | ✅ |
| Object Recognition | [GestureRecognition](examples/application/js/web_demo/src/pages/cv/recognition) | ✅ |
| Object Recognition | [ItemIdentification](examples/application/js/web_demo/src/pages/cv/recognition) | ✅ |
| OCR | [PaddleOCR/PP-OCRv3](./examples/application/js/web_demo/src/pages/cv/ocr) | ✅ |
</div></details>
<div id="fastdeploy-acknowledge"></div>
## Acknowledge
Для создания и загрузки SDK в этом проекте используются бесплатные и открытые возможности в [EasyEdge](https://ai.baidu.com/easyedge/app/openSource), за что мы хотели бы поблагодарить вас.
## License
<div id="fastdeploy-license"></div>
FastDeploy следует [протоколу Apache-2.0 с открытым исходным кодом](. /LICENSE).

View File

@@ -0,0 +1,334 @@
[English](../../README_EN.md) | [简体中文](../../README_CN.md) | हिन्दी | [日本語](./README_日本語.md) | [한국인](./README_한국어.md) | [Pу́сский язы́к](.//README_Ру́сский_язы́к.md)
![FastDeploy](https://user-images.githubusercontent.com/31974251/185771818-5d4423cd-c94c-4a49-9894-bc7a8d1c29d0.png)
</p>
<p align="center">
<a href="./LICENSE"><img src="https://img.shields.io/badge/license-Apache%202-dfd.svg"></a>
<a href="https://github.com/PaddlePaddle/FastDeploy/releases"><img src="https://img.shields.io/github/v/release/PaddlePaddle/FastDeploy?color=ffa"></a>
<a href=""><img src="https://img.shields.io/badge/python-3.7+-aff.svg"></a>
<a href=""><img src="https://img.shields.io/badge/os-linux%2C%20win%2C%20mac-pink.svg"></a>
<a href="https://github.com/PaddlePaddle/FastDeploy/graphs/contributors"><img src="https://img.shields.io/github/contributors/PaddlePaddle/FastDeploy?color=9ea"></a>
<a href="https://github.com/PaddlePaddle/FastDeploy/commits"><img src="https://img.shields.io/github/commit-activity/m/PaddlePaddle/FastDeploy?color=3af"></a>
<a href="https://github.com/PaddlePaddle/FastDeploy/issues"><img src="https://img.shields.io/github/issues/PaddlePaddle/FastDeploy?color=9cc"></a>
<a href="https://github.com/PaddlePaddle/FastDeploy/stargazers"><img src="https://img.shields.io/github/stars/PaddlePaddle/FastDeploy?color=ccf"></a>
</p>
<p align="center">
<a href="/docs/cn/build_and_install"><b> संस्थापन </b></a>
|
<a href="docs/README_CN.md"><b> दस्तावेज़ीकरण का उपयोग करें </b></a>
|
<a href="https://baidu-paddle.github.io/fastdeploy-api/"><b> APIप्रलेखन </b></a>
|
<a href="https://github.com/PaddlePaddle/FastDeploy/releases"><b> चेंजलॉग </b></a>
</p>
**⚡️फास्टडिप्लोय**एक एआई अनुमान तैनाती उपकरण है जो **सभी परिदृश्य**, **उपयोग करने में आसान और लचीला** और **बेहद कुशल** है। एक📦**आउट-ऑफ-द-बॉक्स** **क्लाउड-एज** परिनियोजन अनुभव प्रदान करता है, 🔥150+ से अधिक **टेक्स्ट**, **विजन**, **स्पीच** और **क्रॉस-मोडल** मॉडल का समर्थन करता है, और 🔚 **एंड-टू-एंड** अनुमान प्रदर्शन अनुकूलन को लागू करता है। डेवलपर्स की जरूरतों को पूरा करने के लिए छवि वर्गीकरण, ऑब्जेक्ट डिटेक्शन, छवि विभाजन, चेहरे का पता लगाने, चेहरे की पहचान, मुख्य बिंदु का पता लगाने, कटआउट, ओसीआर, एनएलपी, टीटीएस और अन्य कार्यों सहित **बहु-परिदृश्य, बहु-हार्डवेयर, बहु-मंच** उद्योग की तैनाती की जरूरत है।
| [Image Classification](examples/vision/classification) | [Object Detection](examples/vision/detection) | [Semantic Segmentation](examples/vision/segmentation/paddleseg) | [Potrait Segmentation](examples/vision/segmentation/paddleseg) |
|:----------------------------------------------------------------------------------------------------------------------------------------------:|:----------------------------------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|
| <img src='https://user-images.githubusercontent.com/54695910/200465949-da478e1b-21ce-43b8-9f3f-287460e786bd.png' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/54695910/188054680-2f8d1952-c120-4b67-88fc-7d2d7d2378b4.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/54695910/188054711-6119f0e7-d741-43b1-b273-9493d103d49f.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/54695910/188054718-6395321c-8937-4fa0-881c-5b20deb92aaa.gif' height="126px" width="190px"> |
| [**Image Matting**](examples/vision/matting) | [**Real-Time Matting**](examples/vision/matting) | [**OCR**](examples/vision/ocr) | [**Face Alignment**](examples/vision/facealign) |
| <img src='https://user-images.githubusercontent.com/54695910/188058231-a5fe1ce1-0a38-460f-9582-e0b881514908.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/54695910/188054691-e4cb1a70-09fe-4691-bc62-5552d50bd853.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/54695910/188054669-a85996ba-f7f3-4646-ae1f-3b7e3e353e7d.gif' height="126px" width="190px" > | <img src='https://user-images.githubusercontent.com/54695910/188059460-9845e717-c30a-4252-bd80-b7f6d4cf30cb.png' height="126px" width="190px"> |
| [**Pose Estimation**](examples/vision/keypointdetection) | [**Behavior Recognition**](https://github.com/PaddlePaddle/FastDeploy/issues/6) | [**NLP**](examples/text) | [**Speech**](examples/audio/pp-tts) |
| <img src='https://user-images.githubusercontent.com/54695910/188054671-394db8dd-537c-42b1-9d90-468d7ad1530e.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/48054808/173034825-623e4f78-22a5-4f14-9b83-dc47aa868478.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/54695910/200162475-f5d85d70-18fb-4930-8e7e-9ca065c1d618.gif' height="126px" width="190px"> | <p align="left">**input** 早上好今天是2020<br>/10/29最低温度是-3°C。<br><br> <p align="left">**output**: [<img src="https://user-images.githubusercontent.com/54695910/200161645-871e08da-5a31-4736-879c-a88bb171a676.png" width="170" style="max-width: 100%;">](https://paddlespeech.bj.bcebos.com/Parakeet/docs/demos/parakeet_espnet_fs2_pwg_demo/tn_g2p/parakeet/001.wav)</p> |
## **सामुदायिक संचार**
* **Slack**Join our [Slack community](https://join.slack.com/t/fastdeployworkspace/shared_invite/zt-1jznah134-3rxY~ytRb8rcPqkn9g~PDg) and chat with other community members about ideas
* **वीचैट**: क्यूआर कोड स्कैन करें, तकनीकी समुदाय में शामिल होने के लिए प्रश्नावली भरें, और सामुदायिक डेवलपर्स के साथ तैनाती के दर्द बिंदुओं और समाधानों पर चर्चा करें
<div align="center">
<img src="https://user-images.githubusercontent.com/54695910/200145290-d5565d18-6707-4a0b-a9af-85fd36d35d13.jpg" width = "220" height = "220" />
</div>
## डायरेक्टरी
* **🖥️ सर्वर-साइड परिनियोजन**
* [Python SDK एक त्वरित शुरुआत है](#fastdeploy-quick-start-python)
* [C++ SDK एक त्वरित शुरुआत है](#fastdeploy-quick-start-cpp)
* [सर्वर-साइड मॉडल समर्थन सूची](#fastdeploy-server-models)
* **📲 मोबाइल और एंड-साइड परिनियोजन**
* [एंड-साइड मॉडल समर्थन सूची](#fastdeploy-edge-models)
* **🌐 और एप्लेट तैनाती**
* [Web-साइड मॉडल समर्थन सूची](#fastdeploy-web-models)
* [Acknowledge](#fastdeploy-acknowledge)
* [License](#fastdeploy-license)
## 🖥️ सर्वर-साइड परिनियोजन
<div id="fastdeploy-quick-start-python"></div>
<details close>
<summary><b>पायथन एसडीके क्विक स्टार्ट (विवरण के लिए यहां क्लिक करें)</b></summary><div>
#### त्वरित स्थापना
##### पूर्व-निर्भरता
- CUDA >= 11.2、cuDNN >= 8.0、Python >= 3.6
- OS: Linux x86_64/macOS/Windows 10
##### संस्करण स्थापित करें
```bash
pip install numpy opencv-python fastdeploy-gpu-python -f https://www.paddlepaddle.org.cn/whl/fastdeploy.html
```
##### [Conda स्थापना (अनुशंसित) ](docs/cn/build_and_install/download_prebuilt_libraries.md)
```bash
conda config --add channels conda-forge && conda install cudatoolkit=11.2 cudnn=8.2
```
##### CPU संस्करण स्थापित करें
```bash
pip install numpy opencv-python fastdeploy-python -f https://www.paddlepaddle.org.cn/whl/fastdeploy.html
```
#### Python अनुमान उदाहरण
* मॉडल और चित्र तैयार करें
```bash
wget https://bj.bcebos.com/paddlehub/fastdeploy/ppyoloe_crn_l_300e_coco.tgz
tar xvf ppyoloe_crn_l_300e_coco.tgz
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
```
* परीक्षण अनुमान परिणाम
```python
# GPU/TensorRT तैनाती संदर्भ examples/vision/detection/paddledetection/python
import cv2
import fastdeploy.vision as vision
model = vision.detection.PPYOLOE("ppyoloe_crn_l_300e_coco/model.pdmodel",
"ppyoloe_crn_l_300e_coco/model.pdiparams",
"ppyoloe_crn_l_300e_coco/infer_cfg.yml")
im = cv2.imread("000000014439.jpg")
result = model.predict(im.copy())
print(result)
vis_im = vision.vis_detection(im, result, score_threshold=0.5)
cv2.imwrite("vis_image.jpg", vis_im)
```
</div></details>
<div id="fastdeploy-quick-start-cpp"></div>
<details close>
<summary><b> C++ SDK त्वरित प्रारंभ (विवरण के लिए यहां क्लिक करें) </b></summary><div>
#### संस्थापन
C++ SDK त्वरित प्रारंभ (विवरण के लिए क्लिक करें)(docs/cn/build_and_install/download_prebuilt_libraries.md)文档
#### C++ अनुमान उदाहरण
* मॉडल और चित्र तैयार करें
```bash
wget https://bj.bcebos.com/paddlehub/fastdeploy/ppyoloe_crn_l_300e_coco.tgz
tar xvf ppyoloe_crn_l_300e_coco.tgz
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
```
* अनुमान परिणामों का परीक्षण करें
```C++
// GPU/TensorRTपरिनियोजन संदर्भ examples/vision/detection/paddledetection/cpp
#include "fastdeploy/vision.h"
int main(int argc, char* argv[]) {
namespace vision = fastdeploy::vision;
auto model = vision::detection::PPYOLOE("ppyoloe_crn_l_300e_coco/model.pdmodel",
"ppyoloe_crn_l_300e_coco/model.pdiparams",
"ppyoloe_crn_l_300e_coco/infer_cfg.yml");
auto im = cv::imread("000000014439.jpg");
vision::DetectionResult res;
model.Predict(&im, &res);
auto vis_im = vision::Visualize::VisDetection(im, res, 0.5);
cv::imwrite("vis_image.jpg", vis_im);
return 0;
}
```
</div></details>
अधिक परिनियोजन उदाहरणों के लिए, कृपया मॉडल परिनियोजन उदाहरण देखें(examples) .
<div id="fastdeploy-server-models"></div>
### सर्वर-साइड मॉडल समर्थन सूची 🔥🔥🔥🔥🔥
प्रतीक विवरण: (1) ✅: पहले से समर्थित; (2) ❔:गति पर ; (3) N/A: समर्थित नहीं; <br>
<details open><summary><b> सर्वर-साइड मॉडल समर्थन सूची (ढहने के लिए क्लिक करें)</b></summary><div>
<div align="center">
<img src="https://user-images.githubusercontent.com/54695910/198620704-741523c1-dec7-44e5-9f2b-29ddd9997344.png"/>
</div>
| कार्य परिदृश्य | नमूना | Linux | Linux | Win | Win | Mac | Mac | Linux | Linux | Linux | Linux | Linux |
|:----------------------:|:--------------------------------------------------------------------------------------------:|:------------------------------------------------:|:----------:|:-------:|:----------:|:-------:|:-------:|:-----------:|:---------------:|:-------------:|:-------------:|:-------:|
| --- | --- | X86 CPU | NVIDIA GPU | X86 CPU | NVIDIA GPU | X86 CPU | Arm CPU | AArch64 CPU | Phytium D2000CPU | NVIDIA Jetson | Graphcore IPU | Serving |
| Classification | [PaddleClas/ResNet50](./examples/vision/classification/paddleclas) | [](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [TorchVison/ResNet](examples/vision/classification/resnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Classification | [ltralytics/YOLOv5Cls](examples/vision/classification/yolov5cls) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Classification | [PaddleClas/PP-LCNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/PP-LCNetv2](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/EfficientNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/GhostNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/MobileNetV1](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/MobileNetV2](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/MobileNetV3](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/ShuffleNetV2](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/SqueeezeNetV1.1](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/Inceptionv3](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Classification | [PaddleClas/PP-HGNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Detection | [PaddleDetection/PP-YOLOE](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/PicoDet](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/YOLOX](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/YOLOv3](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/PP-YOLO](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/PP-YOLOv2](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/Faster-RCNN](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/Mask-RCNN](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [Megvii-BaseDetection/YOLOX](./examples/vision/detection/yolox) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Detection | [WongKinYiu/YOLOv7](./examples/vision/detection/yolov7) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Detection | [WongKinYiu/YOLOv7end2end_trt](./examples/vision/detection/yolov7end2end_trt) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Detection | [WongKinYiu/YOLOv7end2end_ort_](./examples/vision/detection/yolov7end2end_ort) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Detection | [meituan/YOLOv6](./examples/vision/detection/yolov6) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Detection | [ultralytics/YOLOv5](./examples/vision/detection/yolov5) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [WongKinYiu/YOLOR](./examples/vision/detection/yolor) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Detection | [WongKinYiu/ScaledYOLOv4](./examples/vision/detection/scaledyolov4) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Detection | [ppogg/YOLOv5Lite](./examples/vision/detection/yolov5lite) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Detection | [RangiLyu/NanoDetPlus](./examples/vision/detection/nanodet_plus) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| KeyPoint | [PaddleDetection/TinyPose](./examples/vision/keypointdetection/tiny_pose) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| KeyPoint | [PaddleDetection/PicoDet + TinyPose](./examples/vision/keypointdetection/det_keypoint_unite) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| HeadPose | [omasaht/headpose](examples/vision/headpose) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Tracking | [PaddleDetection/PP-Tracking](examples/vision/tracking/pptracking) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| OCR | [PaddleOCR/PP-OCRv2](./examples/vision/ocr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| OCR | [PaddleOCR/PP-OCRv3](./examples/vision/ocr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Segmentation | [PaddleSeg/PP-LiteSeg](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Segmentation | [PaddleSeg/PP-HumanSegLite](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Segmentation | [PaddleSeg/HRNet](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Segmentation | [PaddleSeg/PP-HumanSegServer](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Segmentation | [PaddleSeg/Unet](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Segmentation | [PaddleSeg/Deeplabv3](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| FaceDetection | [biubug6/RetinaFace](./examples/vision/facedet/retinaface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceDetection | [Linzaer/UltraFace](./examples/vision/facedet/ultraface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceDetection | [deepcam-cn/YOLOv5Face](./examples/vision/facedet/yolov5face) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceDetection | [insightface/SCRFD](./examples/vision/facedet/scrfd) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceAlign | [Hsintao/PFLD](examples/vision/facealign/pfld) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceAlign | [Single430FaceLandmark1000](./examples/vision/facealign/face_landmark_1000) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| FaceAlign | [jhb86253817/PIPNet](./examples/vision/facealign) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| FaceRecognition | [insightface/ArcFace](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceRecognition | [insightface/CosFace](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceRecognition | [insightface/PartialFC](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceRecognition | [insightface/VPL](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Matting | [ZHKKKe/MODNet](./examples/vision/matting/modnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Matting | [PeterL1n/RobustVideoMatting]() | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Matting | [PaddleSeg/PP-Matting](./examples/vision/matting/ppmatting) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Matting | [PaddleSeg/PP-HumanMatting](./examples/vision/matting/modnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Matting | [PaddleSeg/ModNet](./examples/vision/matting/modnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Video Super-Resolution | [PaddleGAN/BasicVSR](./) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Video Super-Resolution | [PaddleGAN/EDVR](./examples/vision/sr/edvr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Video Super-Resolution | [PaddleGAN/PP-MSVSR](./examples/vision/sr/ppmsvsr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Information Extraction | [PaddleNLP/UIE](./examples/text/uie) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | |
| NLP | [PaddleNLP/ERNIE-3.0](./examples/text/ernie-3.0) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ | ✅ |
| Speech | [PaddleSpeech/PP-TTS](./examples/audio/pp-tts) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | -- | ✅ |
</div></details>
<div id="fastdeploy-edge-doc"></div>
## 📲 मोबाइल और एंड-साइड परिनियोजन 🔥🔥🔥🔥
<div id="fastdeploy-edge-models"></div>
### एंड-साइड मॉडल समर्थन सूची
<details open><summary><b> एंड-साइड मॉडल समर्थन सूची (पतन के लिए क्लिक करें)</b></summary><div>
<div align="center">
<img src="https://user-images.githubusercontent.com/54695910/198620704-741523c1-dec7-44e5-9f2b-29ddd9997344.png" />
</div>
| कार्य परिदृश्य | नमूना | आकार(MB) | Linux | Android | Linux | Linux | Linux | Linux | Linux | TBD... |
|:------------------:|:-----------------------------------------------------------------------------------------:|:--------:|:-------:|:-------:|:-------:|:-----------------------:|:------------------------------:|:---------------------------:|:--------------------------------:|:-------:|
| --- | --- | --- | ARM CPU | ARM CPU | Rockchip-NPU<br>RK3568/RK3588 | Rockchip-NPU<br>RV1109/RV1126/RK1808 | Amlogic-NPU <br>A311D/S905D/C308X | NXP-NPU<br>i.MX&nbsp;8M&nbsp;Plus | TBD... |
| Classification | [PaddleClas/ResNet50](examples/vision/classification/paddleclas) | 98 | ✅ | ✅ | ❔ | ✅ | | | |
| Classification | [PaddleClas/PP-LCNet](examples/vision/classification/paddleclas) | 11.9 | ✅ | ✅ | ❔ | ✅ | -- | -- | -- |
| Classification | [PaddleClas/PP-LCNetv2](examples/vision/classification/paddleclas) | 26.6 | ✅ | ✅ | ❔ | ✅ | -- | -- | -- |
| Classification | [PaddleClas/EfficientNet](examples/vision/classification/paddleclas) | 31.4 | ✅ | ✅ | ❔ | ✅ | -- | -- | -- |
| Classification | [PaddleClas/GhostNet](examples/vision/classification/paddleclas) | 20.8 | ✅ | ✅ | ❔ | ✅ | -- | -- | -- |
| Classification | [PaddleClas/MobileNetV1](examples/vision/classification/paddleclas) | 17 | ✅ | ✅ | ❔ | ✅ | -- | -- | -- |
| Classification | [PaddleClas/MobileNetV2](examples/vision/classification/paddleclas) | 14.2 | ✅ | ✅ | ❔ | ✅ | -- | -- | -- |
| Classification | [PaddleClas/MobileNetV3](examples/vision/classification/paddleclas) | 22 | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | -- |
| Classification | [PaddleClas/ShuffleNetV2](examples/vision/classification/paddleclas) | 9.2 | ✅ | ✅ | ❔ | ✅ | -- | -- | -- |
| Classification | [PaddleClas/SqueezeNetV1.1](examples/vision/classification/paddleclas) | 5 | ✅ | ✅ | ❔ | ✅ | -- | -- | -- |
| Classification | [PaddleClas/Inceptionv3](examples/vision/classification/paddleclas) | 95.5 | ✅ | ✅ | ❔ | ✅ | -- | -- | -- |
| Classification | [PaddleClas/PP-HGNet](examples/vision/classification/paddleclas) | 59 | ✅ | ✅ | ❔ | ✅ | -- | -- | -- |
| Detection | [PaddleDetection/PP-PicoDet_s_320_coco_lcnet](examples/vision/detection/paddledetection) | 4.9 | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | -- |
| Face Detection | [deepinsight/SCRFD](./examples/vision/facedet/scrfd) | 2.5 | ✅ | ✅ | ✅ | -- | -- | -- | -- |
| Keypoint Detection | [PaddleDetection/PP-TinyPose](examples/vision/keypointdetection/tiny_pose) | 5.5 | ✅ | ✅ | ❔ | ❔ | ❔ | ❔ | -- |
| Segmentation | [PaddleSeg/PP-LiteSeg(STDC1)](examples/vision/segmentation/paddleseg) | 32.2 | ✅ | ✅ | ✅ | -- | -- | -- | -- |
| Segmentation | [PaddleSeg/PP-HumanSeg-Lite](examples/vision/segmentation/paddleseg) | 0.556 | ✅ | ✅ | ✅ | -- | -- | -- | -- |
| Segmentation | [PaddleSeg/HRNet-w18](examples/vision/segmentation/paddleseg) | 38.7 | ✅ | ✅ | ✅ | -- | -- | -- | -- |
| Segmentation | [PaddleSeg/PP-HumanSeg](examples/vision/segmentation/paddleseg) | 107.2 | ✅ | ✅ | ✅ | -- | -- | -- | -- |
| Segmentation | [PaddleSeg/Unet](examples/vision/segmentation/paddleseg) | 53.7 | ✅ | ✅ | ✅ | -- | -- | -- | -- |
| Segmentation | [PaddleSeg/Deeplabv3](examples/vision/segmentation/paddleseg) | 150 | ❔ | ✅ | ✅ | | | | |
| OCR | [PaddleOCR/PP-OCRv2](examples/vision/ocr/PP-OCRv2) | 2.3+4.4 | ✅ | ✅ | ❔ | -- | -- | -- | -- |
| OCR | [PaddleOCR/PP-OCRv3](examples/vision/ocr/PP-OCRv3) | 2.4+10.6 | ✅ | ❔ | ❔ | ❔ | ❔ | ❔ | -- |
</div></details>
## 🌐 🌐 Web और एप्लेट तैनाती
<div id="fastdeploy-web-models"></div>
<details open><summary><b>Web और मिनी प्रोग्राम परिनियोजन समर्थन सूची (ढहने के लिए क्लिक करें)</b></summary><div>
| कार्य परिदृश्य | नमूना | [web_demo](examples/application/js/web_demo) |
|:------------------:|:-------------------------------------------------------------------------------------------:|:--------------------------------------------:|
| --- | --- | [Paddle.js](examples/application/js) |
| Detection | [FaceDetection](examples/application/js/web_demo/src/pages/cv/detection) | ✅ |
| Detection | [ScrewDetection](examples/application/js/web_demo/src/pages/cv/detection) | ✅ |
| Segmentation | [PaddleSeg/HumanSeg](./examples/application/js/web_demo/src/pages/cv/segmentation/HumanSeg) | ✅ |
| Object Recognition | [GestureRecognition](examples/application/js/web_demo/src/pages/cv/recognition) | ✅ |
| Object Recognition | [ItemIdentification](examples/application/js/web_demo/src/pages/cv/recognition) | ✅ |
| OCR | [PaddleOCR/PP-OCRv3](./examples/application/js/web_demo/src/pages/cv/ocr) | ✅ |
</div></details>
<div id="fastdeploy-acknowledge"></div>
## Acknowledge
यह परियोजना SDK पीढ़ी और डाउनलोड हम [EasyEdge](https://ai.baidu.com/easyedge/app/openSource) में मुक्त और खुली क्षमताओं का उपयोग करने के लिए आभारी हैं।
## License
<div id="fastdeploy-license"></div>
FastDeploy निम्नानुसार है [Apache-2.0 खुला स्रोत लाइसेंस](./LICENSE)。

View File

@@ -0,0 +1,334 @@
[English](../../README_EN.md) | [简体中文](../../README_CN.md) | [हिन्दी](./README_हिन्दी.md) | 日本語 | [한국인](./README_한국어.md) | [Pу́сский язы́к](.//README_Ру́сский_язы́к.md)
![FastDeploy](https://user-images.githubusercontent.com/31974251/185771818-5d4423cd-c94c-4a49-9894-bc7a8d1c29d0.png)
</p>
<p align="center">
<a href="./LICENSE"><img src="https://img.shields.io/badge/license-Apache%202-dfd.svg"></a>
<a href="https://github.com/PaddlePaddle/FastDeploy/releases"><img src="https://img.shields.io/github/v/release/PaddlePaddle/FastDeploy?color=ffa"></a>
<a href=""><img src="https://img.shields.io/badge/python-3.7+-aff.svg"></a>
<a href=""><img src="https://img.shields.io/badge/os-linux%2C%20win%2C%20mac-pink.svg"></a>
<a href="https://github.com/PaddlePaddle/FastDeploy/graphs/contributors"><img src="https://img.shields.io/github/contributors/PaddlePaddle/FastDeploy?color=9ea"></a>
<a href="https://github.com/PaddlePaddle/FastDeploy/commits"><img src="https://img.shields.io/github/commit-activity/m/PaddlePaddle/FastDeploy?color=3af"></a>
<a href="https://github.com/PaddlePaddle/FastDeploy/issues"><img src="https://img.shields.io/github/issues/PaddlePaddle/FastDeploy?color=9cc"></a>
<a href="https://github.com/PaddlePaddle/FastDeploy/stargazers"><img src="https://img.shields.io/github/stars/PaddlePaddle/FastDeploy?color=ccf"></a>
</p>
<p align="center">
<a href="/docs/cn/build_and_install"><b> インストール </b></a>
|
<a href="docs/README_CN.md"><b> ドキュメント </b></a>
|
<a href="https://baidu-paddle.github.io/fastdeploy-api/"><b> APIドキュメンテーション </b></a>
|
<a href="https://github.com/PaddlePaddle/FastDeploy/releases"><b> Changelog </b></a>
</p>
**FastDeploy**は、**オールシナリオで使いやすく**、**柔軟で非常に効率的な**AI推論デプロイツールです。 🔥150以上の**テキスト**、**ビジョン**、**スピーチ**および🔚クロスモーダルモデルをサポートし、エンドツーエンドの推論パフォーマンスの最適化を可能にする、すぐに使えるクラウド側のデプロイメントエクスペリエンスを提供します。 これには、画像分類、物体検出、画像分割、顔検出、顔認識、キーポイント検出、キーイング、OCR、NLP、TTSなどのタスクが含まれ、**マルチシーン**、**マルチハードウェア**、**マルチプラットフォーム**の産業展開に対する開発者のニーズに応えています。
| [Image Classification](examples/vision/classification) | [Object Detection](examples/vision/detection) | [Semantic Segmentation](examples/vision/segmentation/paddleseg) | [Potrait Segmentation](examples/vision/segmentation/paddleseg) |
|:----------------------------------------------------------------------------------------------------------------------------------------------:|:----------------------------------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|
| <img src='https://user-images.githubusercontent.com/54695910/200465949-da478e1b-21ce-43b8-9f3f-287460e786bd.png' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/54695910/188054680-2f8d1952-c120-4b67-88fc-7d2d7d2378b4.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/54695910/188054711-6119f0e7-d741-43b1-b273-9493d103d49f.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/54695910/188054718-6395321c-8937-4fa0-881c-5b20deb92aaa.gif' height="126px" width="190px"> |
| [**Image Matting**](examples/vision/matting) | [**Real-Time Matting**](examples/vision/matting) | [**OCR**](examples/vision/ocr) | [**Face Alignment**](examples/vision/facealign) |
| <img src='https://user-images.githubusercontent.com/54695910/188058231-a5fe1ce1-0a38-460f-9582-e0b881514908.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/54695910/188054691-e4cb1a70-09fe-4691-bc62-5552d50bd853.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/54695910/188054669-a85996ba-f7f3-4646-ae1f-3b7e3e353e7d.gif' height="126px" width="190px" > | <img src='https://user-images.githubusercontent.com/54695910/188059460-9845e717-c30a-4252-bd80-b7f6d4cf30cb.png' height="126px" width="190px"> |
| [**Pose Estimation**](examples/vision/keypointdetection) | [**Behavior Recognition**](https://github.com/PaddlePaddle/FastDeploy/issues/6) | [**NLP**](examples/text) | [**Speech**](examples/audio/pp-tts) |
| <img src='https://user-images.githubusercontent.com/54695910/188054671-394db8dd-537c-42b1-9d90-468d7ad1530e.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/48054808/173034825-623e4f78-22a5-4f14-9b83-dc47aa868478.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/54695910/200162475-f5d85d70-18fb-4930-8e7e-9ca065c1d618.gif' height="126px" width="190px"> | <p align="left">**input** 早上好今天是2020<br>/10/29最低温度是-3°C。<br><br> <p align="left">**output**: [<img src="https://user-images.githubusercontent.com/54695910/200161645-871e08da-5a31-4736-879c-a88bb171a676.png" width="170" style="max-width: 100%;">](https://paddlespeech.bj.bcebos.com/Parakeet/docs/demos/parakeet_espnet_fs2_pwg_demo/tn_g2p/parakeet/001.wav)</p> |
## **地域交流**
* **Slack**Join our [Slack community](https://join.slack.com/t/fastdeployworkspace/shared_invite/zt-1jznah134-3rxY~ytRb8rcPqkn9g~PDg) and chat with other community members about ideas
* **WeChat**: QRコードをスキャンしてアンケートに回答すると、テクニカルコミュニティに参加でき、コミュニティの開発者と導入時の問題点や解決策について議論することができます。
<div align="center">
<img src="https://user-images.githubusercontent.com/54695910/200145290-d5565d18-6707-4a0b-a9af-85fd36d35d13.jpg" width = "220" height = "220" />
</div>
## カタログ
* **🖥️ サーバーサイドのデプロイメント**
* [Python SDK クイックスタート](#fastdeploy-quick-start-python)
* [C++ SDK クイックスタート](#fastdeploy-quick-start-cpp)
* [サーバーサイドモデル対応表](#fastdeploy-server-models)
* **📲 モバイルとエンドサイドデプロイメント**
* [エンドサイドモデル対応表](#fastdeploy-edge-models)
* **🌐 Webとアプレットの展開**
* [Webサイドモデル対応表](#fastdeploy-web-models)
* [Acknowledge](#fastdeploy-acknowledge)
* [License](#fastdeploy-license)
## 🖥️ サーバーサイドのデプロイメント
<div id="fastdeploy-quick-start-python"></div>
<details close>
<summary><b>Python SDK クイックスタート(クリックで詳細表示)</b></summary><div>
#### クイックインストール
##### プリディペンデンス
- CUDA >= 11.2、cuDNN >= 8.0、Python >= 3.6
- OS: Linux x86_64/macOS/Windows 10
##### GPU版のインストール
```bash
pip install numpy opencv-python fastdeploy-gpu-python -f https://www.paddlepaddle.org.cn/whl/fastdeploy.html
```
##### [Condaのインストール推奨](docs/cn/build_and_install/download_prebuilt_libraries.md)
```bash
conda config --add channels conda-forge && conda install cudatoolkit=11.2 cudnn=8.2
```
##### CPUバージョンのインストール
```bash
pip install numpy opencv-python fastdeploy-python -f https://www.paddlepaddle.org.cn/whl/fastdeploy.html
```
#### Pythonの推論例
* モデルや画像の準備
```bash
wget https://bj.bcebos.com/paddlehub/fastdeploy/ppyoloe_crn_l_300e_coco.tgz
tar xvf ppyoloe_crn_l_300e_coco.tgz
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
```
* 推論結果のテスト
```python
# GPU/TensorRTデプロイメントリファレンス examples/vision/detection/paddledetection/python
import cv2
import fastdeploy.vision as vision
model = vision.detection.PPYOLOE("ppyoloe_crn_l_300e_coco/model.pdmodel",
"ppyoloe_crn_l_300e_coco/model.pdiparams",
"ppyoloe_crn_l_300e_coco/infer_cfg.yml")
im = cv2.imread("000000014439.jpg")
result = model.predict(im.copy())
print(result)
vis_im = vision.vis_detection(im, result, score_threshold=0.5)
cv2.imwrite("vis_image.jpg", vis_im)
```
</div></details>
<div id="fastdeploy-quick-start-cpp"></div>
<details close>
<summary><b>C++ SDK クイックスタート(クリックで詳細表示)</b></summary><div>
#### インストール
- リファレンス [C++プリコンパイル版ライブラリダウンロード](docs/cn/build_and_install/download_prebuilt_libraries.md)文档
#### C++の推論例
* モデルや画像の準備
```bash
wget https://bj.bcebos.com/paddlehub/fastdeploy/ppyoloe_crn_l_300e_coco.tgz
tar xvf ppyoloe_crn_l_300e_coco.tgz
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
```
* 推論結果のテスト
```C++
// GPU/TensorRTデプロイメントリファレンス examples/vision/detection/paddledetection/cpp
#include "fastdeploy/vision.h"
int main(int argc, char* argv[]) {
namespace vision = fastdeploy::vision;
auto model = vision::detection::PPYOLOE("ppyoloe_crn_l_300e_coco/model.pdmodel",
"ppyoloe_crn_l_300e_coco/model.pdiparams",
"ppyoloe_crn_l_300e_coco/infer_cfg.yml");
auto im = cv::imread("000000014439.jpg");
vision::DetectionResult res;
model.Predict(&im, &res);
auto vis_im = vision::Visualize::VisDetection(im, res, 0.5);
cv::imwrite("vis_image.jpg", vis_im);
return 0;
}
```
</div></details>
その他の展開例については、[モデルの展開例]を参照してください(examples) .
<div id="fastdeploy-server-models"></div>
### サーバーサイドの対応機種一覧 🔥🔥🔥🔥🔥
表記: (1) ✅: 対応済み; (2) ❔:進行中 ; (3) N/A: 未対応; <br>
<details open><summary><b> サーバーサイドモデル対応一覧(クリックで縮小します)</b></summary><div>
<div align="center">
<img src="https://user-images.githubusercontent.com/54695910/198620704-741523c1-dec7-44e5-9f2b-29ddd9997344.png"/>
</div>
| ミッションシナリオ | モデル | Linux | Linux | Win | Win | Mac | Mac | Linux | Linux | Linux | Linux | Linux |
|:----------------------:|:--------------------------------------------------------------------------------------------:|:------------------------------------------------:|:----------:|:-------:|:----------:|:-------:|:-------:|:-----------:|:---------------:|:-------------:|:-------------:|:-------:|
| --- | --- | X86 CPU | NVIDIA GPU | X86 CPU | NVIDIA GPU | X86 CPU | Arm CPU | AArch64 CPU | Phytium D2000CPU | NVIDIA Jetson | Graphcore IPU | Serving |
| Classification | [PaddleClas/ResNet50](./examples/vision/classification/paddleclas) | [](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [TorchVison/ResNet](examples/vision/classification/resnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Classification | [ltralytics/YOLOv5Cls](examples/vision/classification/yolov5cls) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Classification | [PaddleClas/PP-LCNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/PP-LCNetv2](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/EfficientNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/GhostNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/MobileNetV1](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/MobileNetV2](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/MobileNetV3](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/ShuffleNetV2](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/SqueeezeNetV1.1](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/Inceptionv3](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Classification | [PaddleClas/PP-HGNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Detection | [PaddleDetection/PP-YOLOE](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/PicoDet](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/YOLOX](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/YOLOv3](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/PP-YOLO](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/PP-YOLOv2](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/Faster-RCNN](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/Mask-RCNN](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [Megvii-BaseDetection/YOLOX](./examples/vision/detection/yolox) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Detection | [WongKinYiu/YOLOv7](./examples/vision/detection/yolov7) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Detection | [WongKinYiu/YOLOv7end2end_trt](./examples/vision/detection/yolov7end2end_trt) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Detection | [WongKinYiu/YOLOv7end2end_ort_](./examples/vision/detection/yolov7end2end_ort) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Detection | [meituan/YOLOv6](./examples/vision/detection/yolov6) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Detection | [ultralytics/YOLOv5](./examples/vision/detection/yolov5) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [WongKinYiu/YOLOR](./examples/vision/detection/yolor) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Detection | [WongKinYiu/ScaledYOLOv4](./examples/vision/detection/scaledyolov4) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Detection | [ppogg/YOLOv5Lite](./examples/vision/detection/yolov5lite) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Detection | [RangiLyu/NanoDetPlus](./examples/vision/detection/nanodet_plus) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| KeyPoint | [PaddleDetection/TinyPose](./examples/vision/keypointdetection/tiny_pose) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| KeyPoint | [PaddleDetection/PicoDet + TinyPose](./examples/vision/keypointdetection/det_keypoint_unite) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| HeadPose | [omasaht/headpose](examples/vision/headpose) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Tracking | [PaddleDetection/PP-Tracking](examples/vision/tracking/pptracking) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| OCR | [PaddleOCR/PP-OCRv2](./examples/vision/ocr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| OCR | [PaddleOCR/PP-OCRv3](./examples/vision/ocr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Segmentation | [PaddleSeg/PP-LiteSeg](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Segmentation | [PaddleSeg/PP-HumanSegLite](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Segmentation | [PaddleSeg/HRNet](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Segmentation | [PaddleSeg/PP-HumanSegServer](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Segmentation | [PaddleSeg/Unet](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Segmentation | [PaddleSeg/Deeplabv3](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| FaceDetection | [biubug6/RetinaFace](./examples/vision/facedet/retinaface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceDetection | [Linzaer/UltraFace](./examples/vision/facedet/ultraface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceDetection | [deepcam-cn/YOLOv5Face](./examples/vision/facedet/yolov5face) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceDetection | [insightface/SCRFD](./examples/vision/facedet/scrfd) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceAlign | [Hsintao/PFLD](examples/vision/facealign/pfld) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceAlign | [Single430FaceLandmark1000](./examples/vision/facealign/face_landmark_1000) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| FaceAlign | [jhb86253817/PIPNet](./examples/vision/facealign) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| FaceRecognition | [insightface/ArcFace](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceRecognition | [insightface/CosFace](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceRecognition | [insightface/PartialFC](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceRecognition | [insightface/VPL](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Matting | [ZHKKKe/MODNet](./examples/vision/matting/modnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Matting | [PeterL1n/RobustVideoMatting]() | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Matting | [PaddleSeg/PP-Matting](./examples/vision/matting/ppmatting) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Matting | [PaddleSeg/PP-HumanMatting](./examples/vision/matting/modnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Matting | [PaddleSeg/ModNet](./examples/vision/matting/modnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Video Super-Resolution | [PaddleGAN/BasicVSR](./) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Video Super-Resolution | [PaddleGAN/EDVR](./examples/vision/sr/edvr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Video Super-Resolution | [PaddleGAN/PP-MSVSR](./examples/vision/sr/ppmsvsr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Information Extraction | [PaddleNLP/UIE](./examples/text/uie) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | |
| NLP | [PaddleNLP/ERNIE-3.0](./examples/text/ernie-3.0) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ | ✅ |
| Speech | [PaddleSpeech/PP-TTS](./examples/audio/pp-tts) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | -- | ✅ |
</div></details>
<div id="fastdeploy-edge-doc"></div>
## 📲 モバイルとエンドサイドの展開 🔥🔥🔥🔥
<div id="fastdeploy-edge-models"></div>
### エンドユーザーモデル対応表
<details open><summary><b> エンドユーザーモデル対応表(クリックで縮小)</b></summary><div>
<div align="center">
<img src="https://user-images.githubusercontent.com/54695910/198620704-741523c1-dec7-44e5-9f2b-29ddd9997344.png" />
</div>
| ミッションシナリオ | モデル | サイズ(MB) | Linux | Android | Linux | Linux | Linux | Linux | Linux | TBD... |
|:------------------:|:-----------------------------------------------------------------------------------------:|:--------:|:-------:|:-------:|:-------:|:-----------------------:|:------------------------------:|:---------------------------:|:--------------------------------:|:-------:|
| --- | --- | --- | ARM CPU | ARM CPU | Rockchip-NPU<br>RK3568/RK3588 | Rockchip-NPU<br>RV1109/RV1126/RK1808 | Amlogic-NPU <br>A311D/S905D/C308X | NXP-NPU<br>i.MX&nbsp;8M&nbsp;Plus | TBD... |
| Classification | [PaddleClas/ResNet50](examples/vision/classification/paddleclas) | 98 | ✅ | ✅ | ❔ | ✅ | | | |
| Classification | [PaddleClas/PP-LCNet](examples/vision/classification/paddleclas) | 11.9 | ✅ | ✅ | ❔ | ✅ | -- | -- | -- |
| Classification | [PaddleClas/PP-LCNetv2](examples/vision/classification/paddleclas) | 26.6 | ✅ | ✅ | ❔ | ✅ | -- | -- | -- |
| Classification | [PaddleClas/EfficientNet](examples/vision/classification/paddleclas) | 31.4 | ✅ | ✅ | ❔ | ✅ | -- | -- | -- |
| Classification | [PaddleClas/GhostNet](examples/vision/classification/paddleclas) | 20.8 | ✅ | ✅ | ❔ | ✅ | -- | -- | -- |
| Classification | [PaddleClas/MobileNetV1](examples/vision/classification/paddleclas) | 17 | ✅ | ✅ | ❔ | ✅ | -- | -- | -- |
| Classification | [PaddleClas/MobileNetV2](examples/vision/classification/paddleclas) | 14.2 | ✅ | ✅ | ❔ | ✅ | -- | -- | -- |
| Classification | [PaddleClas/MobileNetV3](examples/vision/classification/paddleclas) | 22 | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | -- |
| Classification | [PaddleClas/ShuffleNetV2](examples/vision/classification/paddleclas) | 9.2 | ✅ | ✅ | ❔ | ✅ | -- | -- | -- |
| Classification | [PaddleClas/SqueezeNetV1.1](examples/vision/classification/paddleclas) | 5 | ✅ | ✅ | ❔ | ✅ | -- | -- | -- |
| Classification | [PaddleClas/Inceptionv3](examples/vision/classification/paddleclas) | 95.5 | ✅ | ✅ | ❔ | ✅ | -- | -- | -- |
| Classification | [PaddleClas/PP-HGNet](examples/vision/classification/paddleclas) | 59 | ✅ | ✅ | ❔ | ✅ | -- | -- | -- |
| Detection | [PaddleDetection/PP-PicoDet_s_320_coco_lcnet](examples/vision/detection/paddledetection) | 4.9 | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | -- |
| Face Detection | [deepinsight/SCRFD](./examples/vision/facedet/scrfd) | 2.5 | ✅ | ✅ | ✅ | -- | -- | -- | -- |
| Keypoint Detection | [PaddleDetection/PP-TinyPose](examples/vision/keypointdetection/tiny_pose) | 5.5 | ✅ | ✅ | ❔ | ❔ | ❔ | ❔ | -- |
| Segmentation | [PaddleSeg/PP-LiteSeg(STDC1)](examples/vision/segmentation/paddleseg) | 32.2 | ✅ | ✅ | ✅ | -- | -- | -- | -- |
| Segmentation | [PaddleSeg/PP-HumanSeg-Lite](examples/vision/segmentation/paddleseg) | 0.556 | ✅ | ✅ | ✅ | -- | -- | -- | -- |
| Segmentation | [PaddleSeg/HRNet-w18](examples/vision/segmentation/paddleseg) | 38.7 | ✅ | ✅ | ✅ | -- | -- | -- | -- |
| Segmentation | [PaddleSeg/PP-HumanSeg](examples/vision/segmentation/paddleseg) | 107.2 | ✅ | ✅ | ✅ | -- | -- | -- | -- |
| Segmentation | [PaddleSeg/Unet](examples/vision/segmentation/paddleseg) | 53.7 | ✅ | ✅ | ✅ | -- | -- | -- | -- |
| Segmentation | [PaddleSeg/Deeplabv3](examples/vision/segmentation/paddleseg) | 150 | ❔ | ✅ | ✅ | | | | |
| OCR | [PaddleOCR/PP-OCRv2](examples/vision/ocr/PP-OCRv2) | 2.3+4.4 | ✅ | ✅ | ❔ | -- | -- | -- | -- |
| OCR | [PaddleOCR/PP-OCRv3](examples/vision/ocr/PP-OCRv3) | 2.4+10.6 | ✅ | ❔ | ❔ | ❔ | ❔ | ❔ | -- |
</div></details>
## 🌐 🌐 Webとアプレットのデプロイメント
<div id="fastdeploy-web-models"></div>
<details open><summary><b> ウェブ・アプレット展開サポートリスト(クリックで縮小)</b></summary><div>
| ミッションシナリオ | モデル | [web_demo](examples/application/js/web_demo) |
|:------------------:|:-------------------------------------------------------------------------------------------:|:--------------------------------------------:|
| --- | --- | [Paddle.js](examples/application/js) |
| Detection | [FaceDetection](examples/application/js/web_demo/src/pages/cv/detection) | ✅ |
| Detection | [ScrewDetection](examples/application/js/web_demo/src/pages/cv/detection) | ✅ |
| Segmentation | [PaddleSeg/HumanSeg](./examples/application/js/web_demo/src/pages/cv/segmentation/HumanSeg) | ✅ |
| Object Recognition | [GestureRecognition](examples/application/js/web_demo/src/pages/cv/recognition) | ✅ |
| Object Recognition | [ItemIdentification](examples/application/js/web_demo/src/pages/cv/recognition) | ✅ |
| OCR | [PaddleOCR/PP-OCRv3](./examples/application/js/web_demo/src/pages/cv/ocr) | ✅ |
</div></details>
<div id="fastdeploy-acknowledge"></div>
## Acknowledge
このプロジェクトでは、SDKの生成とダウンロードに [EasyEdge](https://ai.baidu.com/easyedge/app/openSource) の無償かつオープンな機能を利用しており、そのことに謝意を表したいと思います。
## License
<div id="fastdeploy-license"></div>
FastDeploy は、[Apache-2.0 オープンソースプロトコル] (./LICENSE)に従っています。

View File

@@ -0,0 +1,335 @@
[English](../../README_EN.md) | [简体中文](../../README_CN.md) | [हिन्दी](./README_हिन्दी.md) | [日本語](./README_日本語.md) | 한국인 | [Pу́сский язы́к](.//README_Ру́сский_язы́к.md)
![FastDeploy](https://user-images.githubusercontent.com/31974251/185771818-5d4423cd-c94c-4a49-9894-bc7a8d1c29d0.png)
</p>
<p align="center">
<a href="./LICENSE"><img src="https://img.shields.io/badge/license-Apache%202-dfd.svg"></a>
<a href="https://github.com/PaddlePaddle/FastDeploy/releases"><img src="https://img.shields.io/github/v/release/PaddlePaddle/FastDeploy?color=ffa"></a>
<a href=""><img src="https://img.shields.io/badge/python-3.7+-aff.svg"></a>
<a href=""><img src="https://img.shields.io/badge/os-linux%2C%20win%2C%20mac-pink.svg"></a>
<a href="https://github.com/PaddlePaddle/FastDeploy/graphs/contributors"><img src="https://img.shields.io/github/contributors/PaddlePaddle/FastDeploy?color=9ea"></a>
<a href="https://github.com/PaddlePaddle/FastDeploy/commits"><img src="https://img.shields.io/github/commit-activity/m/PaddlePaddle/FastDeploy?color=3af"></a>
<a href="https://github.com/PaddlePaddle/FastDeploy/issues"><img src="https://img.shields.io/github/issues/PaddlePaddle/FastDeploy?color=9cc"></a>
<a href="https://github.com/PaddlePaddle/FastDeploy/stargazers"><img src="https://img.shields.io/github/stars/PaddlePaddle/FastDeploy?color=ccf"></a>
</p>
<p align="center">
<a href="/docs/cn/build_and_install"><b> 설치 </b></a>
|
<a href="docs/README_CN.md"><b> 문서 사용하기 </b></a>
|
<a href="https://baidu-paddle.github.io/fastdeploy-api/"><b> API문서 </b></a>
|
<a href="https://github.com/PaddlePaddle/FastDeploy/releases"><b> 로그 업데이트 </b></a>
</p>
**⚡Fastdeploy** 장면쉽게 유연 한 극,효율적 AI 추리 도구 가 배치 돼 있다.📦 제공 개표 즉의**구름을 단**부처 체험 지원 넘 🔥 150 +**text**,**비전**,**speech**과**다른 모드**모델 🔚 실현에 차 려 단'의 추리 성능 최적화 한다.이미지 분류, 객체 검출, 이미지 분할, 얼굴 검출, 얼굴 인식, 포인트 검출, 퍼팅, OCR, NLP, TTS 등의 작업을 포함하고 있어 개발자의**다중 장면, 다중 하드웨어, 다중 플랫폼**을 위한 산업 배치 요구를 충족시킨다.
| [Image Classification](examples/vision/classification) | [Object Detection](examples/vision/detection) | [Semantic Segmentation](examples/vision/segmentation/paddleseg) | [Potrait Segmentation](examples/vision/segmentation/paddleseg) |
|:----------------------------------------------------------------------------------------------------------------------------------------------:|:----------------------------------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|
| <img src='https://user-images.githubusercontent.com/54695910/200465949-da478e1b-21ce-43b8-9f3f-287460e786bd.png' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/54695910/188054680-2f8d1952-c120-4b67-88fc-7d2d7d2378b4.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/54695910/188054711-6119f0e7-d741-43b1-b273-9493d103d49f.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/54695910/188054718-6395321c-8937-4fa0-881c-5b20deb92aaa.gif' height="126px" width="190px"> |
| [**Image Matting**](examples/vision/matting) | [**Real-Time Matting**](examples/vision/matting) | [**OCR**](examples/vision/ocr) | [**Face Alignment**](examples/vision/facealign) |
| <img src='https://user-images.githubusercontent.com/54695910/188058231-a5fe1ce1-0a38-460f-9582-e0b881514908.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/54695910/188054691-e4cb1a70-09fe-4691-bc62-5552d50bd853.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/54695910/188054669-a85996ba-f7f3-4646-ae1f-3b7e3e353e7d.gif' height="126px" width="190px" > | <img src='https://user-images.githubusercontent.com/54695910/188059460-9845e717-c30a-4252-bd80-b7f6d4cf30cb.png' height="126px" width="190px"> |
| [**Pose Estimation**](examples/vision/keypointdetection) | [**Behavior Recognition**](https://github.com/PaddlePaddle/FastDeploy/issues/6) | [**NLP**](examples/text) | [**Speech**](examples/audio/pp-tts) |
| <img src='https://user-images.githubusercontent.com/54695910/188054671-394db8dd-537c-42b1-9d90-468d7ad1530e.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/48054808/173034825-623e4f78-22a5-4f14-9b83-dc47aa868478.gif' height="126px" width="190px"> | <img src='https://user-images.githubusercontent.com/54695910/200162475-f5d85d70-18fb-4930-8e7e-9ca065c1d618.gif' height="126px" width="190px"> | <p align="left">**input** 早上好今天是2020<br>/10/29最低温度是-3°C。<br><br> <p align="left">**output**: [<img src="https://user-images.githubusercontent.com/54695910/200161645-871e08da-5a31-4736-879c-a88bb171a676.png" width="170" style="max-width: 100%;">](https://paddlespeech.bj.bcebos.com/Parakeet/docs/demos/parakeet_espnet_fs2_pwg_demo/tn_g2p/parakeet/001.wav)</p> |
## **지역 사회 교류**
* **Slack**Join our [Slack community](https://join.slack.com/t/fastdeployworkspace/shared_invite/zt-1jznah134-3rxY~ytRb8rcPqkn9g~PDg) and chat with other community members about ideas
* **위챗**: QR 코드를 스캔하고 설문지를 기입하여 기술 커뮤니티에 가입하며 커뮤니티 개발자와 함께 배치의 통점과 방안을 토론한다
<div align="center">
<img src="https://user-images.githubusercontent.com/54695910/200145290-d5565d18-6707-4a0b-a9af-85fd36d35d13.jpg" width = "220" height = "220" />
</div>
## 목록
* **🖥️ 서버측 배포**
* [Python SDK 빠른 시작](#fastdeploy-quick-start-python)
* [C++ SDK 시작](#fastdeploy-quick-start-cpp)
* [서비스 모델 지원 목록](#fastdeploy-server-models)
* **📲 모바일 및 측면 배치**
* [측면 모델 지원 목록](#fastdeploy-edge-models)
* **🌐 웹과 애플릿 배포**
* [웹 쪽 모델 지원 목록](#fastdeploy-web-models)
* [Acknowledge](#fastdeploy-acknowledge)
* [License](#fastdeploy-license)
## 🖥️ 서비스 배포
<div id="fastdeploy-quick-start-python"></div>
<details close>
<summary><b>파이썬 SDK 빠른 시작 (자세한내용은 클릭)</b></summary><div>
#### 빠른 설치
##### 선행의존성
- CUDA >= 11.2、cuDNN >= 8.0、Python >= 3.6
- OS: Linux x86_64/macOS/Windows 10
##### GPU 버전을 설치합니다
```bash
pip install numpy opencv-python fastdeploy-gpu-python -f https://www.paddlepaddle.org.cn/whl/fastdeploy.html
```
##### [Conda 설치 (추천) ](docs/cn/build_and_install/download_prebuilt_libraries.md)
```bash
conda config --add channels conda-forge && conda install cudatoolkit=11.2 cudnn=8.2
```
##### CPU 버전 설치
```bash
pip install numpy opencv-python fastdeploy-python -f https://www.paddlepaddle.org.cn/whl/fastdeploy.html
```
#### Python 추리 예제
* 모형과 그림을 준비하다
```bash
wget https://bj.bcebos.com/paddlehub/fastdeploy/ppyoloe_crn_l_300e_coco.tgz
tar xvf ppyoloe_crn_l_300e_coco.tgz
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
```
* 테스트 추론 결과
```python
# GPU/TensorRT 배치 참조 examples/vision/detection/paddledetection/python
import cv2
import fastdeploy.vision as vision
model = vision.detection.PPYOLOE("ppyoloe_crn_l_300e_coco/model.pdmodel",
"ppyoloe_crn_l_300e_coco/model.pdiparams",
"ppyoloe_crn_l_300e_coco/infer_cfg.yml")
im = cv2.imread("000000014439.jpg")
result = model.predict(im.copy())
print(result)
vis_im = vision.vis_detection(im, result, score_threshold=0.5)
cv2.imwrite("vis_image.jpg", vis_im)
```
</div></details>
<div id="fastdeploy-quick-start-cpp"></div>
<details close>
<summary><b> C++ SDK 빠른 시작 (클릭 후 자세히 보기) </b></summary><div>
#### 설치
[C++ 라이브러리 다운로드](docs/cn/build_and_install/download_prebuilt_libraries.md)참조u
#### C++ 추론 예제
* 모형과 그림을 준비하다
```bash
wget https://bj.bcebos.com/paddlehub/fastdeploy/ppyoloe_crn_l_300e_coco.tgz
tar xvf ppyoloe_crn_l_300e_coco.tgz
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
```
* 추리 결과를 테스트하다
```C++
// GPU/TensorRT배치 참조examples/vision/detection/paddledetection/cpp
#include "fastdeploy/vision.h"
int main(int argc, char* argv[]) {
namespace vision = fastdeploy::vision;
auto model = vision::detection::PPYOLOE("ppyoloe_crn_l_300e_coco/model.pdmodel",
"ppyoloe_crn_l_300e_coco/model.pdiparams",
"ppyoloe_crn_l_300e_coco/infer_cfg.yml");
auto im = cv::imread("000000014439.jpg");
vision::DetectionResult res;
model.Predict(&im, &res);
auto vis_im = vision::Visualize::VisDetection(im, res, 0.5);
cv::imwrite("vis_image.jpg", vis_im);
return 0;
}
```
</div></details>
더 많은 배치 사례를 참고하시기 바랍니다 [모델 배포 예제](examples).
<div id="fastdeploy-server-models"></div>
### 서비스 모델 지원 목록🔥🔥🔥🔥🔥
부호 설명: (1) ✅: 지원 되여 있어야 한다; (2) ❔:진행 중이다; (3) N/A:지원되지 않습니다;<br>
<details open><summary><b> 서비스 모델 지원 목록 (누르면 축소 가능)</b></summary><div>
<div align="center">
<img src="https://user-images.githubusercontent.com/54695910/198620704-741523c1-dec7-44e5-9f2b-29ddd9997344.png"/>
</div>
| 작업 장면 | 모형 | Linux | Linux | Win | Win | Mac | Mac | Linux | Linux | Linux | Linux | Linux |
|:----------------------:|:--------------------------------------------------------------------------------------------:|:------------------------------------------------:|:----------:|:-------:|:----------:|:-------:|:-------:|:-----------:|:---------------:|:-------------:|:-------------:|:-------:|
| --- | --- | X86 CPU | NVIDIA GPU | X86 CPU | NVIDIA GPU | X86 CPU | Arm CPU | AArch64 CPU | Phytium D2000CPU | NVIDIA Jetson | Graphcore IPU | Serving |
| Classification | [PaddleClas/ResNet50](./examples/vision/classification/paddleclas) | [](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [TorchVison/ResNet](examples/vision/classification/resnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Classification | [ltralytics/YOLOv5Cls](examples/vision/classification/yolov5cls) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Classification | [PaddleClas/PP-LCNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/PP-LCNetv2](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/EfficientNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/GhostNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/MobileNetV1](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/MobileNetV2](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/MobileNetV3](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/ShuffleNetV2](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/SqueeezeNetV1.1](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Classification | [PaddleClas/Inceptionv3](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Classification | [PaddleClas/PP-HGNet](./examples/vision/classification/paddleclas) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Detection | [PaddleDetection/PP-YOLOE](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/PicoDet](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/YOLOX](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/YOLOv3](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/PP-YOLO](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/PP-YOLOv2](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/Faster-RCNN](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [PaddleDetection/Mask-RCNN](./examples/vision/detection/paddledetection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [Megvii-BaseDetection/YOLOX](./examples/vision/detection/yolox) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Detection | [WongKinYiu/YOLOv7](./examples/vision/detection/yolov7) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Detection | [WongKinYiu/YOLOv7end2end_trt](./examples/vision/detection/yolov7end2end_trt) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Detection | [WongKinYiu/YOLOv7end2end_ort_](./examples/vision/detection/yolov7end2end_ort) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Detection | [meituan/YOLOv6](./examples/vision/detection/yolov6) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Detection | [ultralytics/YOLOv5](./examples/vision/detection/yolov5) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Detection | [WongKinYiu/YOLOR](./examples/vision/detection/yolor) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Detection | [WongKinYiu/ScaledYOLOv4](./examples/vision/detection/scaledyolov4) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Detection | [ppogg/YOLOv5Lite](./examples/vision/detection/yolov5lite) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Detection | [RangiLyu/NanoDetPlus](./examples/vision/detection/nanodet_plus) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| KeyPoint | [PaddleDetection/TinyPose](./examples/vision/keypointdetection/tiny_pose) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| KeyPoint | [PaddleDetection/PicoDet + TinyPose](./examples/vision/keypointdetection/det_keypoint_unite) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| HeadPose | [omasaht/headpose](examples/vision/headpose) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Tracking | [PaddleDetection/PP-Tracking](examples/vision/tracking/pptracking) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| OCR | [PaddleOCR/PP-OCRv2](./examples/vision/ocr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| OCR | [PaddleOCR/PP-OCRv3](./examples/vision/ocr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ |
| Segmentation | [PaddleSeg/PP-LiteSeg](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Segmentation | [PaddleSeg/PP-HumanSegLite](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Segmentation | [PaddleSeg/HRNet](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Segmentation | [PaddleSeg/PP-HumanSegServer](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Segmentation | [PaddleSeg/Unet](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Segmentation | [PaddleSeg/Deeplabv3](./examples/vision/segmentation/paddleseg) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| FaceDetection | [biubug6/RetinaFace](./examples/vision/facedet/retinaface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceDetection | [Linzaer/UltraFace](./examples/vision/facedet/ultraface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceDetection | [deepcam-cn/YOLOv5Face](./examples/vision/facedet/yolov5face) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceDetection | [insightface/SCRFD](./examples/vision/facedet/scrfd) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceAlign | [Hsintao/PFLD](examples/vision/facealign/pfld) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceAlign | [Single430FaceLandmark1000](./examples/vision/facealign/face_landmark_1000) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| FaceAlign | [jhb86253817/PIPNet](./examples/vision/facealign) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| FaceRecognition | [insightface/ArcFace](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceRecognition | [insightface/CosFace](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceRecognition | [insightface/PartialFC](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| FaceRecognition | [insightface/VPL](./examples/vision/faceid/insightface) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Matting | [ZHKKKe/MODNet](./examples/vision/matting/modnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Matting | [PeterL1n/RobustVideoMatting]() | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Matting | [PaddleSeg/PP-Matting](./examples/vision/matting/ppmatting) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Matting | [PaddleSeg/PP-HumanMatting](./examples/vision/matting/modnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Matting | [PaddleSeg/ModNet](./examples/vision/matting/modnet) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ |
| Video Super-Resolution | [PaddleGAN/BasicVSR](./) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Video Super-Resolution | [PaddleGAN/EDVR](./examples/vision/sr/edvr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Video Super-Resolution | [PaddleGAN/PP-MSVSR](./examples/vision/sr/ppmsvsr) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ |
| Information Extraction | [PaddleNLP/UIE](./examples/text/uie) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ✅ | ❔ | |
| NLP | [PaddleNLP/ERNIE-3.0](./examples/text/ernie-3.0) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | ❔ | ✅ |
| Speech | [PaddleSpeech/PP-TTS](./examples/audio/pp-tts) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❔ | ❔ | -- | ✅ |
</div></details>
<div id="fastdeploy-edge-doc"></div>
## 📲 모바일 및 측면 배치 🔥🔥🔥🔥
<div id="fastdeploy-edge-models"></div>
### 측면 모델 지원 목록
<details open><summary><b>측면 모델 지원 목록 (누르면 축소 가능)</b></summary><div>
<div align="center">
<img src="https://user-images.githubusercontent.com/54695910/198620704-741523c1-dec7-44e5-9f2b-29ddd9997344.png" />
</div>
| 작업 장면 | 모형 | 크기(MB) | Linux | Android | Linux | Linux | Linux | Linux | Linux | TBD... |
|:------------------:|:-----------------------------------------------------------------------------------------:|:--------:|:-------:|:-------:|:-------:|:-----------------------:|:------------------------------:|:---------------------------:|:--------------------------------:|:-------:|
| --- | --- | --- | ARM CPU | ARM CPU | Rockchip-NPU<br>RK3568/RK3588 | Rockchip-NPU<br>RV1109/RV1126/RK1808 | Amlogic-NPU <br>A311D/S905D/C308X | NXP-NPU<br>i.MX&nbsp;8M&nbsp;Plus | TBD... |
| Classification | [PaddleClas/ResNet50](examples/vision/classification/paddleclas) | 98 | ✅ | ✅ | ❔ | ✅ | | | |
| Classification | [PaddleClas/PP-LCNet](examples/vision/classification/paddleclas) | 11.9 | ✅ | ✅ | ❔ | ✅ | -- | -- | -- |
| Classification | [PaddleClas/PP-LCNetv2](examples/vision/classification/paddleclas) | 26.6 | ✅ | ✅ | ❔ | ✅ | -- | -- | -- |
| Classification | [PaddleClas/EfficientNet](examples/vision/classification/paddleclas) | 31.4 | ✅ | ✅ | ❔ | ✅ | -- | -- | -- |
| Classification | [PaddleClas/GhostNet](examples/vision/classification/paddleclas) | 20.8 | ✅ | ✅ | ❔ | ✅ | -- | -- | -- |
| Classification | [PaddleClas/MobileNetV1](examples/vision/classification/paddleclas) | 17 | ✅ | ✅ | ❔ | ✅ | -- | -- | -- |
| Classification | [PaddleClas/MobileNetV2](examples/vision/classification/paddleclas) | 14.2 | ✅ | ✅ | ❔ | ✅ | -- | -- | -- |
| Classification | [PaddleClas/MobileNetV3](examples/vision/classification/paddleclas) | 22 | ✅ | ✅ | ❔ | ✅ | ❔ | ❔ | -- |
| Classification | [PaddleClas/ShuffleNetV2](examples/vision/classification/paddleclas) | 9.2 | ✅ | ✅ | ❔ | ✅ | -- | -- | -- |
| Classification | [PaddleClas/SqueezeNetV1.1](examples/vision/classification/paddleclas) | 5 | ✅ | ✅ | ❔ | ✅ | -- | -- | -- |
| Classification | [PaddleClas/Inceptionv3](examples/vision/classification/paddleclas) | 95.5 | ✅ | ✅ | ❔ | ✅ | -- | -- | -- |
| Classification | [PaddleClas/PP-HGNet](examples/vision/classification/paddleclas) | 59 | ✅ | ✅ | ❔ | ✅ | -- | -- | -- |
| Detection | [PaddleDetection/PP-PicoDet_s_320_coco_lcnet](examples/vision/detection/paddledetection) | 4.9 | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | -- |
| Face Detection | [deepinsight/SCRFD](./examples/vision/facedet/scrfd) | 2.5 | ✅ | ✅ | ✅ | -- | -- | -- | -- |
| Keypoint Detection | [PaddleDetection/PP-TinyPose](examples/vision/keypointdetection/tiny_pose) | 5.5 | ✅ | ✅ | ❔ | ❔ | ❔ | ❔ | -- |
| Segmentation | [PaddleSeg/PP-LiteSeg(STDC1)](examples/vision/segmentation/paddleseg) | 32.2 | ✅ | ✅ | ✅ | -- | -- | -- | -- |
| Segmentation | [PaddleSeg/PP-HumanSeg-Lite](examples/vision/segmentation/paddleseg) | 0.556 | ✅ | ✅ | ✅ | -- | -- | -- | -- |
| Segmentation | [PaddleSeg/HRNet-w18](examples/vision/segmentation/paddleseg) | 38.7 | ✅ | ✅ | ✅ | -- | -- | -- | -- |
| Segmentation | [PaddleSeg/PP-HumanSeg](examples/vision/segmentation/paddleseg) | 107.2 | ✅ | ✅ | ✅ | -- | -- | -- | -- |
| Segmentation | [PaddleSeg/Unet](examples/vision/segmentation/paddleseg) | 53.7 | ✅ | ✅ | ✅ | -- | -- | -- | -- |
| Segmentation | [PaddleSeg/Deeplabv3](examples/vision/segmentation/paddleseg) | 150 | ❔ | ✅ | ✅ | | | | |
| OCR | [PaddleOCR/PP-OCRv2](examples/vision/ocr/PP-OCRv2) | 2.3+4.4 | ✅ | ✅ | ❔ | -- | -- | -- | -- |
| OCR | [PaddleOCR/PP-OCRv3](examples/vision/ocr/PP-OCRv3) | 2.4+10.6 | ✅ | ❔ | ❔ | ❔ | ❔ | ❔ | -- |
</div></details>
## 🌐 🌐 웹과 애플 릿 포치
<div id="fastdeploy-web-models"></div>
<details open><summary><b>웹 및 애플릿 배포 지원 목록 (누르면 축소)</b></summary><div>
| 작업 장면 | 모형 | [web_demo](examples/application/js/web_demo) |
|:------------------:|:-------------------------------------------------------------------------------------------:|:--------------------------------------------:|
| --- | --- | [Paddle.js](examples/application/js) |
| Detection | [FaceDetection](examples/application/js/web_demo/src/pages/cv/detection) | ✅ |
| Detection | [ScrewDetection](examples/application/js/web_demo/src/pages/cv/detection) | ✅ |
| Segmentation | [PaddleSeg/HumanSeg](./examples/application/js/web_demo/src/pages/cv/segmentation/HumanSeg) | ✅ |
| Object Recognition | [GestureRecognition](examples/application/js/web_demo/src/pages/cv/recognition) | ✅ |
| Object Recognition | [ItemIdentification](examples/application/js/web_demo/src/pages/cv/recognition) | ✅ |
| OCR | [PaddleOCR/PP-OCRv3](./examples/application/js/web_demo/src/pages/cv/ocr) | ✅ |
</div></details>
<div id="fastdeploy-acknowledge"></div>
## Acknowledge
이 프로젝트의 SDK 생성 및 다운로드는 EasyEdge (https://ai.baidu.com/easyedge/app/openSource) 의 무료 오픈 기능을 사용하여 진행되었습니다. 이에 감사드립니다.
## License
<div id="fastdeploy-license"></div>
Fastdeploy 컴플라이언스 [Apache e-2.0 오픈 소스 프로토콜](./LICENSE)。

5
docs/en/build_and_install/README.md Normal file → Executable file
View File

@@ -12,6 +12,9 @@ English | [中文](../../cn/build_and_install/README.md)
- [Build and Install on IPU Platform](ipu.md)
- [Build and Install on Nvidia Jetson Platform](jetson.md)
- [Build and Install on Android Platform](android.md)
- [Build and Install on RV1126 Platform](rv1126.md)
- [Build and Install on A311D Platform](a311d.md)
- [Build and Install on KunlunXin XPU Platform](xpu.md)
## Build options
@@ -25,6 +28,8 @@ English | [中文](../../cn/build_and_install/README.md)
| ENABLE_VISION | Default OFFwhether to enable vision models deployment module |
| ENABLE_TEXT | Default OFFwhether to enable text models deployment module |
| WITH_GPU | Default OFF, if build on GPU, this need to be ON |
| WITH_XPU | Default OFFif deploy on KunlunXin XPUthis need to be ON |
| WITH_TIMVX | Default OFFif deploy on RV1126/RV1109/A311Dthis need to be ON |
| CUDA_DIRECTORY | Default /usr/local/cuda, if build on GPU, this defines the path of CUDA(>=11.2) |
| TRT_DIRECTORY | If build with ENABLE_TRT_BACKEND=ON, this defines the path of TensorRT(>=8.4) |
| ORT_DIRECTORY | [Optional] If build with ENABLE_ORT_BACKEND=ON, this flag defines the path of ONNX Runtime, but if this flag is not set, it will download ONNX Runtime library automatically |

View File

@@ -0,0 +1,105 @@
# How to Build A311D Deployment Environment
FastDeploy supports AI deployment on Rockchip Soc based on Paddle-Lite backend. For more detailed information, please refer to: [PaddleLite Deployment Example](https://www.paddlepaddle.org.cn/lite/develop/demo_guides/verisilicon_timvx.html).
This document describes how to compile the PaddleLite-based C++ FastDeploy cross-compilation library.
The relevant compilation options are described as follows:
|Compile Options|Default Values|Description|Remarks|
|:---|:---|:---|:---|
|ENABLE_LITE_BACKEND|OFF|It needs to be set to ON when compiling the A311D library| - |
|WITH_TIMVX|OFF|It needs to be set to ON when compiling the A311D library| - |
|TARGET_ABI|NONE|It needs to be set to arm64 when compiling the A311D library| - |
For more compilation options, please refer to [Description of FastDeploy compilation options](./README.md)
## Cross-compilation environment construction
### Host Environment Requirements
- osUbuntu == 16.04
- cmake version >= 3.10.0
### Building the compilation environment
You can enter the FastDeploy/tools/timvx directory and use the following command to install:
```bash
cd FastDeploy/tools/timvx
bash install.sh
```
You can also install it with the following commands:
```bash
# 1. Install basic software
apt update
apt-get install -y --no-install-recommends \
gcc g++ git make wget python unzip
# 2. Install arm gcc toolchains
apt-get install -y --no-install-recommends \
g++-arm-linux-gnueabi gcc-arm-linux-gnueabi \
g++-arm-linux-gnueabihf gcc-arm-linux-gnueabihf \
gcc-aarch64-linux-gnu g++-aarch64-linux-gnu
# 3. Install cmake 3.10 or above
wget -c https://mms-res.cdn.bcebos.com/cmake-3.10.3-Linux-x86_64.tar.gz && \
tar xzf cmake-3.10.3-Linux-x86_64.tar.gz && \
mv cmake-3.10.3-Linux-x86_64 /opt/cmake-3.10 && \
ln -s /opt/cmake-3.10/bin/cmake /usr/bin/cmake && \
ln -s /opt/cmake-3.10/bin/ccmake /usr/bin/ccmake
```
## FastDeploy cross-compilation library compilation based on PaddleLite
After setting up the cross-compilation environment, the compilation command is as follows:
```bash
# Download the latest source code
git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy
mkdir build && cd build
# CMake configuration with A311D toolchain
cmake -DCMAKE_TOOLCHAIN_FILE=./../cmake/toolchain.cmake \
-DWITH_TIMVX=ON \
-DTARGET_ABI=arm64 \
-DCMAKE_INSTALL_PREFIX=fastdeploy-tmivx \
-DENABLE_VISION=ON \ # 是否编译集成视觉模型的部署模块,可选择开启
-Wno-dev ..
# Build FastDeploy A311D C++ SDK
make -j8
make install
```
After the compilation is complete, the fastdeploy-tmivx directory will be generated, indicating that the FastDeploy library based on PadddleLite TIM-VX has been compiled.
## Prepare the Soc environment
Before deployment, ensure that the version of the driver galcore.so of the Verisilicon Linux Kernel NPU meets the requirements. Before deployment, please log in to the development board, and enter the following command through the command line to query the NPU driver version. The recommended version of the Rockchip driver is: 6.4.4.3
```bash
dmesg | grep Galcore
```
If the current version does not comply with the above, please read the following content carefully to ensure that the underlying NPU driver environment is correct.
There are two ways to modify the current NPU driver version:
1. Manually replace the NPU driver version. (recommend)
2. flash the machine, and flash the firmware that meets the requirements of the NPU driver version.
### Manually replace the NPU driver version
1. Use the following command to download and decompress the PaddleLite demo, which provides ready-made driver files
```bash
wget https://paddlelite-demo.bj.bcebos.com/devices/generic/PaddleLite-generic-demo.tar.gz
tar -xf PaddleLite-generic-demo.tar.gz
```
2. Use `uname -a` to check `Linux Kernel` version, it is determined to be version 4.19.111.
3. Upload `galcore.ko` under `PaddleLite-generic-demo/libs/PaddleLite/linux/arm64/lib/verisilicon_timvx/viv_sdk_6_4_4_3/lib/a311d/4.9.113` path to the development board.
4. Log in to the development board, enter `sudo rmmod galcore` on the command line to uninstall the original driver, and enter `sudo insmod galcore.ko` to load the uploaded device driver. (Whether sudo is needed depends on the actual situation of the development board. For some adb-linked devices, please adb root in advance). If this step fails, go to method 2.
5. Enter `dmesg | grep Galcore` in the development board to query the NPU driver version, and it is determined to be: 6.4.4.3
### flash
According to the specific development board model, ask the development board seller or the official website customer service for the firmware and flashing method corresponding to the 6.4.4.3 version of the NPU driver.
For more details, please refer to: [PaddleLite prepares the device environment](https://www.paddlepaddle.org.cn/lite/develop/demo_guides/verisilicon_timvx.html#zhunbeishebeihuanjing)
## Deployment example based on FastDeploy on A311D
1. For deploying the PaddleClas classification model on A311D, please refer to: [C++ deployment example of PaddleClas classification model on A311D](../../../examples/vision/classification/paddleclas/a311d/README.md)
2. For deploying PPYOLOE detection model on A311D, please refer to: [C++ deployment example of PPYOLOE detection model on A311D](../../../examples/vision/detection/paddledetection/a311d/README.md)
3. For deploying YOLOv5 detection model on A311D, please refer to: [C++ Deployment Example of YOLOv5 Detection Model on A311D](../../../examples/vision/detection/yolov5/a311d/README.md)
4. For deploying PP-LiteSeg segmentation model on A311D, please refer to: [C++ Deployment Example of PP-LiteSeg Segmentation Model on A311D](../../../examples/vision/segmentation/paddleseg/a311d/README.md)

View File

@@ -30,6 +30,11 @@ Prerequisite for Compiling on Linux & Mac:
- gcc/g++ >= 5.4 (8.2 is recommended)
- cmake >= 3.18.0
It it recommend install OpenCV library manually, and define `-DOPENCV_DIRECTORY` to set path of OpenCV library(If the flag is not defined, a prebuilt OpenCV library will be downloaded automaticly while building FastDeploy, but the prebuilt OpenCV cannot support reading video file or other function e.g `imshow`)
```
sudo apt-get install libopencv-dev
```
```
git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy
@@ -38,7 +43,8 @@ cmake .. -DENABLE_ORT_BACKEND=ON \
-DENABLE_PADDLE_BACKEND=ON \
-DENABLE_OPENVINO_BACKEND=ON \
-DCMAKE_INSTALL_PREFIX=${PWD}/compiled_fastdeploy_sdk \
-DENABLE_VISION=ON
-DENABLE_VISION=ON \
-DOPENCV_DIRECTORY=/usr/lib/x86_64-linux-gnu/cmake/opencv4
make -j12
make install
```
@@ -84,6 +90,11 @@ All compilation options are introduced via environment variables
### Linux & Mac
It it recommend install OpenCV library manually, and define `-DOPENCV_DIRECTORY` to set path of OpenCV library(If the flag is not defined, a prebuilt OpenCV library will be downloaded automaticly while building FastDeploy, but the prebuilt OpenCV cannot support reading video file or other function e.g `imshow`)
```
sudo apt-get install libopencv-dev
```
```
git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy/python
@@ -91,6 +102,8 @@ export ENABLE_ORT_BACKEND=ON
export ENABLE_PADDLE_BACKEND=ON
export ENABLE_OPENVINO_BACKEND=ON
export ENABLE_VISION=ON
# The OPENCV_DIRECTORY is optional, if not exported, a prebuilt OpenCV library will be downloaded
export OPENCV_DIRECTORY=/usr/lib/x86_64-linux-gnu/cmake/opencv4
python setup.py build
python setup.py bdist_wheel

View File

@@ -22,7 +22,7 @@ FastDeploy supports Computer Vision, Text and NLP model deployment on CPU and Nv
### Python SDK
Install the released versionthe newest 1.0.0 for now
Install the released versionthe newest 1.0.1 for now
```
pip install fastdeploy-gpu-python -f https://www.paddlepaddle.org.cn/whl/fastdeploy.html
@@ -42,12 +42,12 @@ conda config --add channels conda-forge && conda install cudatoolkit=11.2 cudnn=
### C++ SDK
Install the released versionLatest 1.0.0
Install the released versionLatest 1.0.1
| Platform | File | Description |
|:----------- |:--------------------------------------------------------------------------------------------------------------------- |:--------------------------------------------------------- |
| Linux x64 | [fastdeploy-linux-x64-gpu-1.0.0.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-gpu-1.0.0.tgz) | g++ 8.2, CUDA 11.2, cuDNN 8.2 |
| Windows x64 | [fastdeploy-win-x64-gpu-1.0.0.zip](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-win-x64-gpu-1.0.0.zip) | Visual Studio 16 2019, CUDA 11.2, cuDNN 8.2 |
| Linux x64 | [fastdeploy-linux-x64-gpu-1.0.1.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-gpu-1.0.1.tgz) | g++ 8.2, CUDA 11.2, cuDNN 8.2 |
| Windows x64 | [fastdeploy-win-x64-gpu-1.0.1.zip](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-win-x64-gpu-1.0.1.zip) | Visual Studio 16 2019, CUDA 11.2, cuDNN 8.2 |
Install the Develop versionNightly build
@@ -69,7 +69,7 @@ FastDeploy supports computer vision, text and NLP model deployment on CPU with P
### Python SDK
Install the released versionLatest 1.0.0 for now
Install the released versionLatest 1.0.1 for now
```
pip install fastdeploy-python -f https://www.paddlepaddle.org.cn/whl/fastdeploy.html
@@ -83,15 +83,15 @@ pip install fastdeploy-python==0.0.0 -f https://www.paddlepaddle.org.cn/whl/fast
### C++ SDK
Install the released versionLatest 1.0.0 for now, Android is 1.0.0
Install the released versionLatest 1.0.1 for now, Android is 1.0.1
| Platform | File | Description |
|:------------- |:--------------------------------------------------------------------------------------------------------------------- |:------------------------------ |
| Linux x64 | [fastdeploy-linux-x64-1.0.0.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-1.0.0.tgz) | g++ 8.2 |
| Windows x64 | [fastdeploy-win-x64-1.0.0.zip](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-win-x64-1.0.0.zip) | Visual Studio 16 2019 |
| Mac OSX x64 | [fastdeploy-osx-x86_64-1.0.0.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-osx-x86_64-1.0.0.tgz) | clang++ 10.0.0|
| Mac OSX arm64 | [fastdeploy-osx-arm64-1.0.0.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-osx-arm64-1.0.0.tgz) | clang++ 13.0.0 |
| Linux aarch64 | [fastdeploy-osx-arm64-1.0.0.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-aarch64-1.0.0.tgz) | gcc 6.3 |
| Linux x64 | [fastdeploy-linux-x64-1.0.1.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-1.0.1.tgz) | g++ 8.2 |
| Windows x64 | [fastdeploy-win-x64-1.0.1.zip](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-win-x64-1.0.1.zip) | Visual Studio 16 2019 |
| Mac OSX x64 | [fastdeploy-osx-x86_64-1.0.1.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-osx-x86_64-1.0.1.tgz) | clang++ 10.0.0|
| Mac OSX arm64 | [fastdeploy-osx-arm64-1.0.1.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-osx-arm64-1.0.1.tgz) | clang++ 13.0.0 |
| Linux aarch64 | [fastdeploy-osx-arm64-1.0.1.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-aarch64-1.0.1.tgz) | gcc 6.3 |
| Android armv7&v8 | [fastdeploy-android-1.0.0-shared.tgz](https://bj.bcebos.com/fastdeploy/release/android/fastdeploy-android-1.0.0-shared.tgz)| NDK 25, clang++, support arm64-v8a及armeabi-v7a |
## Java SDK

View File

@@ -34,6 +34,11 @@ Prerequisite for Compiling on Linux:
- cuda >= 11.2
- cudnn >= 8.2
It it recommend install OpenCV library manually, and define `-DOPENCV_DIRECTORY` to set path of OpenCV library(If the flag is not defined, a prebuilt OpenCV library will be downloaded automaticly while building FastDeploy, but the prebuilt OpenCV cannot support reading video file or other function e.g `imshow`)
```
sudo apt-get install libopencv-dev
```
```
git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy
@@ -46,7 +51,8 @@ cmake .. -DENABLE_ORT_BACKEND=ON \
-DTRT_DIRECTORY=/Paddle/TensorRT-8.4.1.5 \
-DCUDA_DIRECTORY=/usr/local/cuda \
-DCMAKE_INSTALL_PREFIX=${PWD}/compiled_fastdeploy_sdk \
-DENABLE_VISION=ON
-DENABLE_VISION=ON \
-DOPENCV_DIRECTORY=/usr/lib/x86_64-linux-gnu/cmake/opencv4
make -j12
make install
```
@@ -106,6 +112,11 @@ Prerequisite for Compiling on Linux:
All compilation options are imported via environment variables
It it recommend install OpenCV library manually, and define `-DOPENCV_DIRECTORY` to set path of OpenCV library(If the flag is not defined, a prebuilt OpenCV library will be downloaded automaticly while building FastDeploy, but the prebuilt OpenCV cannot support reading video file or other function e.g `imshow`)
```
sudo apt-get install libopencv-dev
```
```
git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy/python
@@ -117,6 +128,8 @@ export ENABLE_TRT_BACKEND=ON
export WITH_GPU=ON
export TRT_DIRECTORY=/Paddle/TensorRT-8.4.1.5
export CUDA_DIRECTORY=/usr/local/cuda
# The OPENCV_DIRECTORY is optional, if not exported, a prebuilt OpenCV library will be downloaded
export OPENCV_DIRECTORY=/usr/lib/x86_64-linux-gnu/cmake/opencv4
python setup.py build
python setup.py bdist_wheel

View File

@@ -1,7 +1,7 @@
# How to Build FastDeploy Library on Nvidia Jetson Platform
FastDeploy supports CPU inference with ONNX Runtime and GPU inference with Nvidia TensorRT on Nvidia Jetson platform
FastDeploy supports CPU inference with ONNX Runtime and GPU inference with Nvidia TensorRT/Paddle Inference on Nvidia Jetson platform
## How to Build and Install FastDeploy C++ Library
@@ -11,12 +11,16 @@ Prerequisite for Compiling on NVIDIA Jetson:
- cmake >= 3.10.0
- jetpack >= 4.6.1
If you need to integrate Paddle Inference backend(Support CPU/GPU)please download and decompress the prebuilt library in [Paddle Inference prebuild libraries](https://www.paddlepaddle.org.cn/inference/v2.4/guides/install/download_lib.html#c) according to your develop envriment.
```
git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy
mkdir build && cd build
cmake .. -DBUILD_ON_JETSON=ON \
-DENABLE_VISION=ON \
-DENABLE_PADDLE_BACKEND=ON \ # This is optional, can be OFF if you don't need
-DPADDLEINFERENCE_DIRECTORY=/Download/paddle_inference_jetson \
-DCMAKE_INSTALL_PREFIX=${PWD}/installed_fastdeploy
make -j8
make install
@@ -35,6 +39,8 @@ Prerequisite for Compiling on NVIDIA Jetson:
Notice the `wheel` is required if you need to pack a wheel, execute `pip install wheel` first.
If you need to integrate Paddle Inference backend(Support CPU/GPU)please download and decompress the prebuilt library in [Paddle Inference prebuild libraries](https://www.paddlepaddle.org.cn/inference/v2.4/guides/install/download_lib.html#c) according to your develop envriment.
All compilation options are imported via environment variables
```
@@ -43,6 +49,10 @@ cd FastDeploy/python
export BUILD_ON_JETSON=ON
export ENABLE_VISION=ON
# ENABLE_PADDLE_BACKEND & PADDLEINFERENCE_DIRECTORY are optional
export ENABLE_PADDLE_BACKEND=ON
export PADDLEINFERENCE_DIRECTORY=/Download/paddle_inference_jetson
python setup.py build
python setup.py bdist_wheel
```

View File

@@ -0,0 +1,105 @@
# How to Build RV1126 Deployment Environment
FastDeploy supports AI deployment on Rockchip Soc based on Paddle-Lite backend. For more detailed information, please refer to: [PaddleLite Deployment Example](https://www.paddlepaddle.org.cn/lite/develop/demo_guides/verisilicon_timvx.html).
This document describes how to compile the PaddleLite-based C++ FastDeploy cross-compilation library.
The relevant compilation options are described as follows:
|Compile Options|Default Values|Description|Remarks|
|:---|:---|:---|:---|
|ENABLE_LITE_BACKEND|OFF|It needs to be set to ON when compiling the RK library| - |
|WITH_TIMVX|OFF|It needs to be set to ON when compiling the RK library| - |
|TARGET_ABI|NONE|It needs to be set to armhf when compiling the RK library| - |
For more compilation options, please refer to [Description of FastDeploy compilation options](./README.md)
## Cross-compilation environment construction
### Host Environment Requirements
- osUbuntu == 16.04
- cmake version >= 3.10.0
### Building the compilation environment
You can enter the FastDeploy/tools/timvx directory and use the following command to install:
```bash
cd FastDeploy/tools/timvx
bash install.sh
```
You can also install it with the following commands:
```bash
# 1. Install basic software
apt update
apt-get install -y --no-install-recommends \
gcc g++ git make wget python unzip
# 2. Install arm gcc toolchains
apt-get install -y --no-install-recommends \
g++-arm-linux-gnueabi gcc-arm-linux-gnueabi \
g++-arm-linux-gnueabihf gcc-arm-linux-gnueabihf \
gcc-aarch64-linux-gnu g++-aarch64-linux-gnu
# 3. Install cmake 3.10 or above
wget -c https://mms-res.cdn.bcebos.com/cmake-3.10.3-Linux-x86_64.tar.gz && \
tar xzf cmake-3.10.3-Linux-x86_64.tar.gz && \
mv cmake-3.10.3-Linux-x86_64 /opt/cmake-3.10 && \
ln -s /opt/cmake-3.10/bin/cmake /usr/bin/cmake && \
ln -s /opt/cmake-3.10/bin/ccmake /usr/bin/ccmake
```
## FastDeploy cross-compilation library compilation based on PaddleLite
After setting up the cross-compilation environment, the compilation command is as follows:
```bash
# Download the latest source code
git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy
mkdir build && cd build
# CMake configuration with RK toolchain
cmake -DCMAKE_TOOLCHAIN_FILE=./../cmake/toolchain.cmake \
-DWITH_TIMVX=ON \
-DTARGET_ABI=armhf \
-DCMAKE_INSTALL_PREFIX=fastdeploy-tmivx \
-DENABLE_VISION=ON \ # 是否编译集成视觉模型的部署模块,可选择开启
-Wno-dev ..
# Build FastDeploy RV1126 C++ SDK
make -j8
make install
```
After the compilation is complete, the fastdeploy-tmivx directory will be generated, indicating that the FastDeploy library based on PadddleLite TIM-VX has been compiled.
## Prepare the Soc environment
Before deployment, ensure that the version of the driver galcore.so of the Verisilicon Linux Kernel NPU meets the requirements. Before deployment, please log in to the development board, and enter the following command through the command line to query the NPU driver version. The recommended version of the Rockchip driver is: 6.4.6.5
```bash
dmesg | grep Galcore
```
If the current version does not comply with the above, please read the following content carefully to ensure that the underlying NPU driver environment is correct.
There are two ways to modify the current NPU driver version:
1. Manually replace the NPU driver version. (recommend)
2. flash the machine, and flash the firmware that meets the requirements of the NPU driver version.
### Manually replace the NPU driver version
1. Use the following command to download and decompress the PaddleLite demo, which provides ready-made driver files
```bash
wget https://paddlelite-demo.bj.bcebos.com/devices/generic/PaddleLite-generic-demo.tar.gz
tar -xf PaddleLite-generic-demo.tar.gz
```
2. Use `uname -a` to check `Linux Kernel` version, it is determined to be version 4.19.111.
3. Upload `galcore.ko` under `PaddleLite-generic-demo/libs/PaddleLite/linux/armhf/lib/verisilicon_timvx/viv_sdk_6_4_6_5/lib/1126/4.19.111/` path to the development board.
4. Log in to the development board, enter `sudo rmmod galcore` on the command line to uninstall the original driver, and enter `sudo insmod galcore.ko` to load the uploaded device driver. (Whether sudo is needed depends on the actual situation of the development board. For some adb-linked devices, please adb root in advance). If this step fails, go to method 2.
5. Enter `dmesg | grep Galcore` in the development board to query the NPU driver version, and it is determined to be: 6.4.6.5
### flash
According to the specific development board model, ask the development board seller or the official website customer service for the firmware and flashing method corresponding to the 6.4.6.5 version of the NPU driver.
For more details, please refer to: [PaddleLite prepares the device environment](https://www.paddlepaddle.org.cn/lite/develop/demo_guides/verisilicon_timvx.html#zhunbeishebeihuanjing)
## Deployment example based on FastDeploy on RV1126
1. For deploying the PaddleClas classification model on RV1126, please refer to: [C++ deployment example of PaddleClas classification model on RV1126](../../../examples/vision/classification/paddleclas/rv1126/README.md)
2. For deploying PPYOLOE detection model on RV1126, please refer to: [C++ deployment example of PPYOLOE detection model on RV1126](../../../examples/vision/detection/paddledetection/rv1126/README.md)
3. For deploying YOLOv5 detection model on RV1126, please refer to: [C++ Deployment Example of YOLOv5 Detection Model on RV1126](../../../examples/vision/detection/yolov5/rv1126/README.md)
4. For deploying PP-LiteSeg segmentation model on RV1126, please refer to: [C++ Deployment Example of PP-LiteSeg Segmentation Model on RV1126](../../../examples/vision/segmentation/paddleseg/rv1126/README.md)

View File

@@ -0,0 +1,78 @@
# How to Build KunlunXin XPU Deployment Environment
FastDeploy supports deployment AI on KunlunXin XPU based on Paddle-Lite backend. For more detailed information, please refer to: [PaddleLite Deployment Example](https://www.paddlepaddle.org.cn/lite/develop/demo_guides/kunlunxin_xpu.html#xpu)。
This document describes how to compile the C++ FastDeploy library based on PaddleLite.
The relevant compilation options are described as follows:
|Compile Options|Default Values|Description|Remarks|
|:---|:---|:---|:---|
| ENABLE_LITE_BACKEND | OFF | It needs to be set to ON when compiling the RK library| - |
| WITH_XPU | OFF | It needs to be set to ON when compiling the KunlunXin XPU library| - |
| ENABLE_ORT_BACKEND | OFF | whether to intergrate ONNX Runtime backend | - |
| ENABLE_PADDLE_BACKEND | OFF | whether to intergrate Paddle Inference backend | - |
| ENABLE_OPENVINO_BACKEND | OFF | whether to intergrate OpenVINO backend | - |
| ENABLE_VISION | OFF | whether to intergrate vision models | - |
| ENABLE_TEXT | OFF | whether to intergrate text models | - |
The configuration for third libraries(Optional, if the following option is not defined, the prebuilt third libraries will download automaticly while building FastDeploy).
| Option | Description |
| :---------------------- | :--------------------------------------------------------------------------------------------- |
| ORT_DIRECTORY | While ENABLE_ORT_BACKEND=ON, use ORT_DIRECTORY to specify your own ONNX Runtime library path. |
| OPENCV_DIRECTORY | While ENABLE_VISION=ON, use OPENCV_DIRECTORY to specify your own OpenCV library path. |
| OPENVINO_DIRECTORY | While ENABLE_OPENVINO_BACKEND=ON, use OPENVINO_DIRECTORY to specify your own OpenVINO library path. |
For more compilation options, please refer to [Description of FastDeploy compilation options](./README.md)
## C++ FastDeploy library compilation based on PaddleLite
- OS: Linux
- gcc/g++: version >= 8.2
- cmake: version >= 3.15
It it recommend install OpenCV library manually, and define `-DOPENCV_DIRECTORY` to set path of OpenCV library(If the flag is not defined, a prebuilt OpenCV library will be downloaded automaticly while building FastDeploy, but the prebuilt OpenCV cannot support reading video file or other function e.g `imshow`)
```
sudo apt-get install libopencv-dev
```
The compilation command is as follows:
```bash
# Download the latest source code
git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy
mkdir build && cd build
# CMake configuration with KunlunXin xpu toolchain
cmake -DWITH_XPU=ON \
-DWITH_GPU=OFF \
-DENABLE_ORT_BACKEND=ON \
-DENABLE_PADDLE_BACKEND=ON \
-DCMAKE_INSTALL_PREFIX=fastdeploy-xpu \
-DENABLE_VISION=ON \
-DOPENCV_DIRECTORY=/usr/lib/x86_64-linux-gnu/cmake/opencv4 \
..
# Build FastDeploy KunlunXin XPU C++ SDK
make -j8
make install
```
After the compilation is complete, the fastdeploy-xpu directory will be generated, indicating that the PadddleLite-based FastDeploy library has been compiled.
## Python compile
The compilation command is as follows:
```bash
git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy/python
export WITH_XPU=ON
export WITH_GPU=OFF
export ENABLE_ORT_BACKEND=ON
export ENABLE_PADDLE_BACKEND=ON
export ENABLE_VISION=ON
# The OPENCV_DIRECTORY is optional, if not exported, a prebuilt OpenCV library will be downloaded
export OPENCV_DIRECTORY=/usr/lib/x86_64-linux-gnu/cmake/opencv4
python setup.py build
python setup.py bdist_wheel
```
After the compilation is completed, the compiled `wheel` package will be generated in the `FastDeploy/python/dist` directory, just pip install it directly
During the compilation process, if you modify the compilation parameters, in order to avoid the cache impact, you can delete the two subdirectories `build` and `.setuptools-cmake-build` under the `FastDeploy/python` directory and then recompile.

View File

@@ -1,3 +1,5 @@
English | [中文](../../cn/faq/use_sdk_on_linux.md)
# C++ Deployment on Linux
1. After compilation, and execute the binary file, throw error `error while loading shared libraries`
@@ -7,7 +9,7 @@ When we execute the binary file, it requires the dependent libraries can be foun
./infer_ppyoloe_demo: error while loading shared libraries: libonnxruntime.so.1.12.0: cannot open shared object file: No such file or directory
```
FastDeploy provides a shell scripts to help export the libraries path to `LD_LIBRARY_PATH`, execute the follwing command
FastDeploy provides a shell scripts to help export the libraries path to `LD_LIBRARY_PATH`, execute the following command
```
source /Downloads/fastdeploy-linux-x64-1.0.0/fastdeploy_init.sh

View File

@@ -1,3 +1,5 @@
English | [中文](../../cn/faq/use_sdk_on_windows.md)
# Using the FastDeploy C++ SDK on Windows Platform
## Contents

View File

@@ -48,13 +48,7 @@ function(add_fastdeploy_executable FIELD CC_FILE)
if(EXISTS ${TEMP_TARGET_FILE} AND TARGET fastdeploy)
add_executable(${TEMP_TARGET_NAME} ${TEMP_TARGET_FILE})
target_link_libraries(${TEMP_TARGET_NAME} PUBLIC fastdeploy)
if(TARGET gflags)
if(UNIX AND (NOT APPLE) AND (NOT ANDROID))
target_link_libraries(${TEMP_TARGET_NAME} PRIVATE gflags pthread)
else()
target_link_libraries(${TEMP_TARGET_NAME} PRIVATE gflags)
endif()
endif()
target_link_libraries(${TEMP_TARGET_NAME} PRIVATE ${GFLAGS_LIBRARIES})
config_fastdeploy_executable_link_flags(${TEMP_TARGET_NAME})
math(EXPR _EXAMPLES_NUM "${EXAMPLES_NUM} + 1")
set(EXAMPLES_NUM ${_EXAMPLES_NUM} PARENT_SCOPE)

View File

@@ -0,0 +1,11 @@
# PaddleClas 量化模型在 A311D 上的部署
目前 FastDeploy 已经支持基于 PaddleLite 部署 PaddleClas 量化模型到 A311D 上。
模型的量化和量化模型的下载请参考:[模型量化](../quantize/README.md)
## 详细部署文档
在 A311D 上只支持 C++ 的部署。
- [C++部署](cpp)

View File

@@ -0,0 +1,38 @@
PROJECT(infer_demo C CXX)
CMAKE_MINIMUM_REQUIRED (VERSION 3.10)
# 指定下载解压后的fastdeploy库路径
option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.")
include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
# 添加FastDeploy依赖头文件
include_directories(${FASTDEPLOY_INCS})
include_directories(${FastDeploy_INCLUDE_DIRS})
add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cc)
# 添加FastDeploy库依赖
target_link_libraries(infer_demo ${FASTDEPLOY_LIBS})
set(CMAKE_INSTALL_PREFIX ${CMAKE_SOURCE_DIR}/build/install)
install(TARGETS infer_demo DESTINATION ./)
install(DIRECTORY models DESTINATION ./)
install(DIRECTORY images DESTINATION ./)
# install(DIRECTORY run_with_adb.sh DESTINATION ./)
file(GLOB FASTDEPLOY_LIBS ${FASTDEPLOY_INSTALL_DIR}/lib/*)
install(PROGRAMS ${FASTDEPLOY_LIBS} DESTINATION lib)
file(GLOB OPENCV_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/opencv/lib/lib*)
install(PROGRAMS ${OPENCV_LIBS} DESTINATION lib)
file(GLOB PADDLELITE_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/paddlelite/lib/lib*)
install(PROGRAMS ${PADDLELITE_LIBS} DESTINATION lib)
file(GLOB TIMVX_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/paddlelite/lib/verisilicon_timvx/*)
install(PROGRAMS ${TIMVX_LIBS} DESTINATION lib)
file(GLOB ADB_TOOLS run_with_adb.sh)
install(PROGRAMS ${ADB_TOOLS} DESTINATION ./)

View File

@@ -0,0 +1,53 @@
# PaddleClas A311D 开发板 C++ 部署示例
本目录下提供的 `infer.cc`,可以帮助用户快速完成 PaddleClas 量化模型在 A311D 上的部署推理加速。
## 部署准备
### FastDeploy 交叉编译环境准备
- 1. 软硬件环境满足要求,以及交叉编译环境的准备,请参考:[FastDeploy 交叉编译环境准备](../../../../../../docs/cn/build_and_install/a311d.md#交叉编译环境搭建)
### 量化模型准备
- 1. 用户可以直接使用由 FastDeploy 提供的量化模型进行部署。
- 2. 用户可以使用 FastDeploy 提供的[一键模型自动化压缩工具](../../../../../../tools/common_tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署。(注意: 推理量化后的分类模型仍然需要FP32模型文件夹下的inference_cls.yaml文件, 自行量化的模型文件夹内不包含此 yaml 文件, 用户从 FP32 模型文件夹下复制此 yaml 文件到量化后的模型文件夹内即可.)
- 更多量化相关相关信息可查阅[模型量化](../../quantize/README.md)
## 在 A311D 上部署量化后的 ResNet50_Vd 分类模型
请按照以下步骤完成在 A311D 上部署 ResNet50_Vd 量化模型:
1. 交叉编译编译 FastDeploy 库,具体请参考:[交叉编译 FastDeploy](../../../../../../docs/cn/build_and_install/a311d.md#基于-paddlelite-的-fastdeploy-交叉编译库编译)
2. 将编译后的库拷贝到当前目录,可使用如下命令:
```bash
cp -r FastDeploy/build/fastdeploy-tmivx/ FastDeploy/examples/vision/classification/paddleclas/a311d/cpp/
```
3. 在当前路径下载部署所需的模型和示例图片:
```bash
mkdir models && mkdir images
wget https://bj.bcebos.com/paddlehub/fastdeploy/ResNet50_vd_infer.tgz
tar -xvf ResNet50_vd_infer.tgz
cp -r ResNet50_vd_infer models
wget https://gitee.com/paddlepaddle/PaddleClas/raw/release/2.4/deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg
cp -r ILSVRC2012_val_00000010.jpeg images
```
4. 编译部署示例,可使入如下命令:
```bash
mkdir build && cd build
cmake -DCMAKE_TOOLCHAIN_FILE=${PWD}/../fastdeploy-tmivx/toolchain.cmake -DFASTDEPLOY_INSTALL_DIR=${PWD}/../fastdeploy-tmivx -DTARGET_ABI=arm64 ..
make -j8
make install
# 成功编译之后,会生成 install 文件夹,里面有一个运行 demo 和部署所需的库
```
5. 基于 adb 工具部署 ResNet50_vd 分类模型到晶晨 A311D可使用如下命令
```bash
# 进入 install 目录
cd FastDeploy/examples/vision/classification/paddleclas/a311d/cpp/build/install/
# 如下命令表示bash run_with_adb.sh 需要运行的demo 模型路径 图片路径 设备的DEVICE_ID
bash run_with_adb.sh infer_demo ResNet50_vd_infer ILSVRC2012_val_00000010.jpeg $DEVICE_ID
```
部署成功后运行结果如下:
<img width="640" src="https://user-images.githubusercontent.com/30516196/200767389-26519e50-9e4f-4fe1-8d52-260718f73476.png">
需要特别注意的是,在 A311D 上部署的模型需要是量化后的模型,模型的量化请参考:[模型量化](../../../../../../docs/cn/quantize.md)

View File

@@ -0,0 +1,60 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <string>
#include "fastdeploy/vision.h"
#ifdef WIN32
const char sep = '\\';
#else
const char sep = '/';
#endif
void InitAndInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "inference.pdmodel";
auto params_file = model_dir + sep + "inference.pdiparams";
auto config_file = model_dir + sep + "inference_cls.yaml";
fastdeploy::RuntimeOption option;
option.UseTimVX();
auto model = fastdeploy::vision::classification::PaddleClasModel(
model_file, params_file, config_file, option);
assert(model.Initialized());
auto im = cv::imread(image_file);
fastdeploy::vision::ClassifyResult res;
if (!model.Predict(im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
std::cout << res.Str() << std::endl;
}
int main(int argc, char* argv[]) {
if (argc < 3) {
std::cout << "Usage: infer_demo path/to/quant_model "
"path/to/image "
"e.g ./infer_demo ./ResNet50_vd_quant ./test.jpeg"
<< std::endl;
return -1;
}
std::string model_dir = argv[1];
std::string test_image = argv[2];
InitAndInfer(model_dir, test_image);
return 0;
}

View File

@@ -0,0 +1,47 @@
#!/bin/bash
HOST_SPACE=${PWD}
echo ${HOST_SPACE}
WORK_SPACE=/data/local/tmp/test
# The first parameter represents the demo name
DEMO_NAME=image_classification_demo
if [ -n "$1" ]; then
DEMO_NAME=$1
fi
# The second parameter represents the model name
MODEL_NAME=mobilenet_v1_fp32_224
if [ -n "$2" ]; then
MODEL_NAME=$2
fi
# The third parameter indicates the name of the image to be tested
IMAGE_NAME=0001.jpg
if [ -n "$3" ]; then
IMAGE_NAME=$3
fi
# The fourth parameter represents the ID of the device
ADB_DEVICE_NAME=
if [ -n "$4" ]; then
ADB_DEVICE_NAME="-s $4"
fi
# Set the environment variables required during the running process
EXPORT_ENVIRONMENT_VARIABLES="export GLOG_v=5; export VIV_VX_ENABLE_GRAPH_TRANSFORM=-pcq:1; export VIV_VX_SET_PER_CHANNEL_ENTROPY=100; export TIMVX_BATCHNORM_FUSION_MAX_ALLOWED_QUANT_SCALE_DEVIATION=300000; export VSI_NN_LOG_LEVEL=5;"
EXPORT_ENVIRONMENT_VARIABLES="${EXPORT_ENVIRONMENT_VARIABLES}export LD_LIBRARY_PATH=${WORK_SPACE}/lib:\$LD_LIBRARY_PATH;"
# Please install adb, and DON'T run this in the docker.
set -e
adb $ADB_DEVICE_NAME shell "rm -rf $WORK_SPACE"
adb $ADB_DEVICE_NAME shell "mkdir -p $WORK_SPACE"
# Upload the demo, librarys, model and test images to the device
adb $ADB_DEVICE_NAME push ${HOST_SPACE}/lib $WORK_SPACE
adb $ADB_DEVICE_NAME push ${HOST_SPACE}/${DEMO_NAME} $WORK_SPACE
adb $ADB_DEVICE_NAME push models $WORK_SPACE
adb $ADB_DEVICE_NAME push images $WORK_SPACE
# Execute the deployment demo
adb $ADB_DEVICE_NAME shell "cd $WORK_SPACE; ${EXPORT_ENVIRONMENT_VARIABLES} chmod +x ./${DEMO_NAME}; ./${DEMO_NAME} ./models/${MODEL_NAME} ./images/$IMAGE_NAME"

View File

@@ -30,6 +30,10 @@ wget https://gitee.com/paddlepaddle/PaddleClas/raw/release/2.4/deploy/images/Ima
./infer_demo ResNet50_vd_infer ILSVRC2012_val_00000010.jpeg 1
# GPU上TensorRT推理
./infer_demo ResNet50_vd_infer ILSVRC2012_val_00000010.jpeg 2
# IPU推理
./infer_demo ResNet50_vd_infer ILSVRC2012_val_00000010.jpeg 3
# KunlunXin XPU推理
./infer_demo ResNet50_vd_infer ILSVRC2012_val_00000010.jpeg 4
```
以上命令只适用于Linux或MacOS, Windows下SDK的使用方式请参考:

30
examples/vision/classification/paddleclas/cpp/infer.cc Normal file → Executable file
View File

@@ -96,6 +96,32 @@ void IpuInfer(const std::string& model_dir, const std::string& image_file) {
std::cout << res.Str() << std::endl;
}
void XpuInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "inference.pdmodel";
auto params_file = model_dir + sep + "inference.pdiparams";
auto config_file = model_dir + sep + "inference_cls.yaml";
auto option = fastdeploy::RuntimeOption();
option.UseXpu();
auto model = fastdeploy::vision::classification::PaddleClasModel(
model_file, params_file, config_file, option);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return;
}
auto im = cv::imread(image_file);
fastdeploy::vision::ClassifyResult res;
if (!model.Predict(im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
// print res
std::cout << res.Str() << std::endl;
}
void TrtInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "inference.pdmodel";
auto params_file = model_dir + sep + "inference.pdiparams";
@@ -128,7 +154,7 @@ int main(int argc, char* argv[]) {
"e.g ./infer_demo ./ResNet50_vd ./test.jpeg 0"
<< std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
"with gpu; 2: run with gpu and use tensorrt backend."
"with gpu; 2: run with gpu and use tensorrt backend; 3: run with ipu; 4: run with xpu."
<< std::endl;
return -1;
}
@@ -141,6 +167,8 @@ int main(int argc, char* argv[]) {
TrtInfer(argv[1], argv[2]);
} else if (std::atoi(argv[3]) == 3) {
IpuInfer(argv[1], argv[2]);
} else if (std::atoi(argv[3]) == 4) {
XpuInfer(argv[1], argv[2]);
}
return 0;
}

View File

@@ -25,6 +25,8 @@ python infer.py --model ResNet50_vd_infer --image ILSVRC2012_val_00000010.jpeg -
python infer.py --model ResNet50_vd_infer --image ILSVRC2012_val_00000010.jpeg --device gpu --use_trt True --topk 1
# IPU推理注意IPU推理首次运行会有序列化模型的操作有一定耗时需要耐心等待
python infer.py --model ResNet50_vd_infer --image ILSVRC2012_val_00000010.jpeg --device ipu --topk 1
# XPU推理
python infer.py --model ResNet50_vd_infer --image ILSVRC2012_val_00000010.jpeg --device xpu --topk 1
```
运行完成后返回结果如下所示

View File

@@ -35,6 +35,9 @@ def build_option(args):
if args.device.lower() == "ipu":
option.use_ipu()
if args.device.lower() == "xpu":
option.use_xpu()
if args.use_trt:
option.use_trt_backend()
return option

View File

@@ -32,7 +32,7 @@ cp -r ILSVRC2012_val_00000010.jpeg images
4. 编译部署示例,可使入如下命令:
```bash
mkdir build && cd build
cmake -DCMAKE_TOOLCHAIN_FILE=${PWD}/../fastdeploy-tmivx/timvx.cmake -DFASTDEPLOY_INSTALL_DIR=${PWD}/../fastdeploy-tmivx ..
cmake -DCMAKE_TOOLCHAIN_FILE=${PWD}/../fastdeploy-tmivx/toolchain.cmake -DFASTDEPLOY_INSTALL_DIR=${PWD}/../fastdeploy-tmivx -DTARGET_ABI=armhf ..
make -j8
make install
# 成功编译之后,会生成 install 文件夹,里面有一个运行 demo 和部署所需的库

View File

@@ -48,7 +48,6 @@ int main(int argc, char* argv[]) {
if (argc < 3) {
std::cout << "Usage: infer_demo path/to/quant_model "
"path/to/image "
"run_option, "
"e.g ./infer_demo ./ResNet50_vd_quant ./test.jpeg"
<< std::endl;
return -1;

View File

@@ -84,7 +84,6 @@ class TritonPythonModel:
be the same as `requests`
"""
responses = []
# print("num:", len(requests), flush=True)
for request in requests:
infer_outputs = pb_utils.get_input_tensor_by_name(
request, self.input_names[0])

View File

@@ -46,5 +46,5 @@ model = fd.vision.classification.ResNet(
args.model, runtime_option=runtime_option)
# 预测图片分类结果
im = cv2.imread(args.image)
result = model.predict(im.copy(), args.topk)
result = model.predict(im, args.topk)
print(result)

View File

@@ -47,5 +47,5 @@ model = fd.vision.classification.YOLOv5Cls(
# 预测图片分类结果
im = cv2.imread(args.image)
result = model.predict(im.copy(), args.topk)
result = model.predict(im, args.topk)
print(result)

View File

@@ -12,7 +12,7 @@ FastDeploy目前支持如下目标检测模型部署
| [PaddleDetection/FasterRCNN](./paddledetection) | FasterRCNN系列模型 | Paddle | [Release/2.4](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4) |
| [WongKinYiu/YOLOv7](./yolov7) | YOLOv7、YOLOv7-X等系列模型 | ONNX | [Release/v0.1](https://github.com/WongKinYiu/yolov7/tree/v0.1) |
| [RangiLyu/NanoDetPlus](./nanodet_plus) | NanoDetPlus 系列模型 | ONNX | [Release/v1.0.0-alpha-1](https://github.com/RangiLyu/nanodet/tree/v1.0.0-alpha-1) |
| [ultralytics/YOLOv5](./yolov5) | YOLOv5 系列模型 | ONNX | [Release/v6.0](https://github.com/ultralytics/yolov5/tree/v6.0) |
| [ultralytics/YOLOv5](./yolov5) | YOLOv5 系列模型 | ONNX | [Release/v7.0](https://github.com/ultralytics/yolov5/tree/v7.0) |
| [ppogg/YOLOv5-Lite](./yolov5lite) | YOLOv5-Lite 系列模型 | ONNX | [Release/v1.4](https://github.com/ppogg/YOLOv5-Lite/releases/tag/v1.4) |
| [meituan/YOLOv6](./yolov6) | YOLOv6 系列模型 | ONNX | [Release/0.1.0](https://github.com/meituan/YOLOv6/releases/tag/0.1.0) |
| [WongKinYiu/YOLOR](./yolor) | YOLOR 系列模型 | ONNX | [Release/weights](https://github.com/WongKinYiu/yolor/releases/tag/weights) |

View File

@@ -22,7 +22,6 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) {
}
auto im = cv::imread(image_file);
auto im_bak = im.clone();
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
@@ -30,7 +29,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) {
return;
}
std::cout << res.Str() << std::endl;
auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res);
auto vis_im = fastdeploy::vision::VisDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
@@ -46,7 +45,6 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) {
}
auto im = cv::imread(image_file);
auto im_bak = im.clone();
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
@@ -55,7 +53,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) {
}
std::cout << res.Str() << std::endl;
auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res);
auto vis_im = fastdeploy::vision::VisDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
@@ -73,7 +71,6 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) {
}
auto im = cv::imread(image_file);
auto im_bak = im.clone();
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
@@ -82,7 +79,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) {
}
std::cout << res.Str() << std::endl;
auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res);
auto vis_im = fastdeploy::vision::VisDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}

View File

@@ -52,7 +52,7 @@ if args.image is None:
else:
image = args.image
im = cv2.imread(image)
result = model.predict(im.copy())
result = model.predict(im)
print(result)
# 预测结果可视化

View File

@@ -16,6 +16,10 @@
- [FasterRCNN系列模型](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4/configs/faster_rcnn)
- [MaskRCNN系列模型](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4/configs/mask_rcnn)
- [SSD系列模型](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.5/configs/ssd)
- [YOLOv5系列模型](https://github.com/PaddlePaddle/PaddleYOLO/tree/release/2.5/configs/yolov5)
- [YOLOv6系列模型](https://github.com/PaddlePaddle/PaddleYOLO/tree/release/2.5/configs/yolov6)
- [YOLOv7系列模型](https://github.com/PaddlePaddle/PaddleYOLO/tree/release/2.5/configs/yolov7)
- [RTMDet系列模型](https://github.com/PaddlePaddle/PaddleYOLO/tree/release/2.5/configs/rtmdet)
## 导出部署模型
@@ -23,6 +27,7 @@
**注意**
- 在导出模型时不要进行NMS的去除操作正常导出即可
- 如果用于跑原生TensorRT后端非Paddle Inference后端不要添加--trt参数
- 导出模型时,不要添加`fuse_normalize=True`参数
## 下载预训练模型
@@ -43,9 +48,18 @@
| [yolox_s_300e_coco](https://bj.bcebos.com/paddlehub/fastdeploy/yolox_s_300e_coco.tgz) | 35MB | Box AP 40.4% | |
| [faster_rcnn_r50_vd_fpn_2x_coco](https://bj.bcebos.com/paddlehub/fastdeploy/faster_rcnn_r50_vd_fpn_2x_coco.tgz) | 160MB | Box AP 40.8%| 暂不支持TensorRT |
| [mask_rcnn_r50_1x_coco](https://bj.bcebos.com/paddlehub/fastdeploy/mask_rcnn_r50_1x_coco.tgz) | 128M | Box AP 37.4%, Mask AP 32.8%| 暂不支持TensorRT、ORT |
| [ssd_mobilenet_v1_300_120e_voc](https://bj.bcebos.com/paddlehub/fastdeploy/ssd_mobilenet_v1_300_120e_voc.tgz) | 21.7M | Box AP 73.8%| 暂不支持TensorRT、ORT |
| [ssd_vgg16_300_240e_voc](https://bj.bcebos.com/paddlehub/fastdeploy/ssd_vgg16_300_240e_voc.tgz) | 97.7M | Box AP 77.8%| 暂不支持TensorRT、ORT |
| [ssdlite_mobilenet_v1_300_coco](https://bj.bcebos.com/paddlehub/fastdeploy/ssdlite_mobilenet_v1_300_coco.tgz) | 24.4M | | 暂不支持TensorRT、ORT |
| [ssd_mobilenet_v1_300_120e_voc](https://bj.bcebos.com/paddlehub/fastdeploy/ssd_mobilenet_v1_300_120e_voc.tgz) | 24.9M | Box AP 73.8%| 暂不支持TensorRT、ORT |
| [ssd_vgg16_300_240e_voc](https://bj.bcebos.com/paddlehub/fastdeploy/ssd_vgg16_300_240e_voc.tgz) | 106.5M | Box AP 77.8%| 暂不支持TensorRT、ORT |
| [ssdlite_mobilenet_v1_300_coco](https://bj.bcebos.com/paddlehub/fastdeploy/ssdlite_mobilenet_v1_300_coco.tgz) | 29.1M | | 暂不支持TensorRT、ORT |
| [rtmdet_l_300e_coco](https://bj.bcebos.com/paddlehub/fastdeploy/rtmdet_l_300e_coco.tgz) | 224M | Box AP 51.2%| |
| [rtmdet_s_300e_coco](https://bj.bcebos.com/paddlehub/fastdeploy/rtmdet_s_300e_coco.tgz) | 42M | Box AP 44.5%| |
| [yolov5_l_300e_coco](https://bj.bcebos.com/paddlehub/fastdeploy/yolov5_l_300e_coco.tgz) | 183M | Box AP 48.9%| |
| [yolov5_s_300e_coco](https://bj.bcebos.com/paddlehub/fastdeploy/yolov5_s_300e_coco.tgz) | 31M | Box AP 37.6%| |
| [yolov6_l_300e_coco](https://bj.bcebos.com/paddlehub/fastdeploy/yolov6_l_300e_coco.tgz) | 229M | Box AP 51.0%| |
| [yolov6_s_400e_coco](https://bj.bcebos.com/paddlehub/fastdeploy/yolov6_s_400e_coco.tgz) | 68M | Box AP 43.4%| |
| [yolov7_l_300e_coco](https://bj.bcebos.com/paddlehub/fastdeploy/yolov7_l_300e_coco.tgz) | 145M | Box AP 51.0%| |
| [yolov7_x_300e_coco](https://bj.bcebos.com/paddlehub/fastdeploy/yolov7_x_300e_coco.tgz) | 277M | Box AP 53.0%| |
## 详细部署文档
- [Python部署](python)

View File

@@ -0,0 +1,11 @@
# PP-YOLOE 量化模型在 A311D 上的部署
目前 FastDeploy 已经支持基于 PaddleLite 部署 PP-YOLOE 量化模型到 A311D 上。
模型的量化和量化模型的下载请参考:[模型量化](../quantize/README.md)
## 详细部署文档
在 A311D 上只支持 C++ 的部署。
- [C++部署](cpp)

View File

@@ -0,0 +1,38 @@
PROJECT(infer_demo C CXX)
CMAKE_MINIMUM_REQUIRED (VERSION 3.10)
# 指定下载解压后的fastdeploy库路径
option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.")
include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
# 添加FastDeploy依赖头文件
include_directories(${FASTDEPLOY_INCS})
include_directories(${FastDeploy_INCLUDE_DIRS})
add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer_ppyoloe.cc)
# 添加FastDeploy库依赖
target_link_libraries(infer_demo ${FASTDEPLOY_LIBS})
set(CMAKE_INSTALL_PREFIX ${CMAKE_SOURCE_DIR}/build/install)
install(TARGETS infer_demo DESTINATION ./)
install(DIRECTORY models DESTINATION ./)
install(DIRECTORY images DESTINATION ./)
# install(DIRECTORY run_with_adb.sh DESTINATION ./)
file(GLOB FASTDEPLOY_LIBS ${FASTDEPLOY_INSTALL_DIR}/lib/*)
install(PROGRAMS ${FASTDEPLOY_LIBS} DESTINATION lib)
file(GLOB OPENCV_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/opencv/lib/lib*)
install(PROGRAMS ${OPENCV_LIBS} DESTINATION lib)
file(GLOB PADDLELITE_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/paddlelite/lib/lib*)
install(PROGRAMS ${PADDLELITE_LIBS} DESTINATION lib)
file(GLOB TIMVX_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/paddlelite/lib/verisilicon_timvx/*)
install(PROGRAMS ${TIMVX_LIBS} DESTINATION lib)
file(GLOB ADB_TOOLS run_with_adb.sh)
install(PROGRAMS ${ADB_TOOLS} DESTINATION ./)

View File

@@ -0,0 +1,55 @@
# PP-YOLOE 量化模型 C++ 部署示例
本目录下提供的 `infer.cc`,可以帮助用户快速完成 PP-YOLOE 量化模型在 A311D 上的部署推理加速。
## 部署准备
### FastDeploy 交叉编译环境准备
- 1. 软硬件环境满足要求,以及交叉编译环境的准备,请参考:[FastDeploy 交叉编译环境准备](../../../../../../docs/cn/build_and_install/a311d.md#交叉编译环境搭建)
### 模型准备
- 1. 用户可以直接使用由 FastDeploy 提供的量化模型进行部署。
- 2. 用户可以先使用 PaddleDetection 自行导出 Float32 模型注意导出模型模型时设置参数use_shared_conv=False更多细节请参考[PP-YOLOE](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4/configs/ppyoloe)
- 3. 用户可以使用 FastDeploy 提供的[一键模型自动化压缩工具](../../../../../../tools/common_tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署。(注意: 推理量化后的检测模型仍然需要FP32模型文件夹下的 infer_cfg.yml 文件,自行量化的模型文件夹内不包含此 yaml 文件,用户从 FP32 模型文件夹下复制此yaml文件到量化后的模型文件夹内即可。
- 更多量化相关相关信息可查阅[模型量化](../../quantize/README.md)
## 在 A311D 上部署量化后的 PP-YOLOE 检测模型
请按照以下步骤完成在 A311D 上部署 PP-YOLOE 量化模型:
1. 交叉编译编译 FastDeploy 库,具体请参考:[交叉编译 FastDeploy](../../../../../../docs/cn/build_and_install/a311d.md#基于-paddlelite-的-fastdeploy-交叉编译库编译)
2. 将编译后的库拷贝到当前目录,可使用如下命令:
```bash
cp -r FastDeploy/build/fastdeploy-tmivx/ FastDeploy/examples/vision/detection/yolov5/a311d/cpp
```
3. 在当前路径下载部署所需的模型和示例图片:
```bash
mkdir models && mkdir images
wget https://bj.bcebos.com/fastdeploy/models/ppyoloe_noshare_qat.tar.gz
tar -xvf ppyoloe_noshare_qat.tar.gz
cp -r ppyoloe_noshare_qat models
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
cp -r 000000014439.jpg images
```
4. 编译部署示例,可使入如下命令:
```bash
mkdir build && cd build
cmake -DCMAKE_TOOLCHAIN_FILE=${PWD}/../fastdeploy-tmivx/toolchain.cmake -DFASTDEPLOY_INSTALL_DIR=${PWD}/../fastdeploy-tmivx -DTARGET_ABI=arm64 ..
make -j8
make install
# 成功编译之后,会生成 install 文件夹,里面有一个运行 demo 和部署所需的库
```
5. 基于 adb 工具部署 PP-YOLOE 检测模型到晶晨 A311D
```bash
# 进入 install 目录
cd FastDeploy/examples/vision/detection/paddledetection/a311d/cpp/build/install/
# 如下命令表示bash run_with_adb.sh 需要运行的demo 模型路径 图片路径 设备的DEVICE_ID
bash run_with_adb.sh infer_demo ppyoloe_noshare_qat 000000014439.jpg $DEVICE_ID
```
部署成功后运行结果如下:
<img width="640" src="https://user-images.githubusercontent.com/30516196/203708564-43c49485-9b48-4eb2-8fe7-0fa517979fff.png">
需要特别注意的是,在 A311D 上部署的模型需要是量化后的模型,模型的量化请参考:[模型量化](../../../../../../docs/cn/quantize.md)

View File

@@ -0,0 +1,65 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "fastdeploy/vision.h"
#ifdef WIN32
const char sep = '\\';
#else
const char sep = '/';
#endif
void InitAndInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "infer_cfg.yml";
auto subgraph_file = model_dir + sep + "subgraph.txt";
fastdeploy::RuntimeOption option;
option.UseTimVX();
option.SetLiteSubgraphPartitionPath(subgraph_file);
auto model = fastdeploy::vision::detection::PPYOLOE(model_file, params_file,
config_file, option);
assert(model.Initialized());
auto im = cv::imread(image_file);
fastdeploy::vision::DetectionResult res;
if (!model.Predict(im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
std::cout << res.Str() << std::endl;
auto vis_im = fastdeploy::vision::VisDetection(im, res, 0.5);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
int main(int argc, char* argv[]) {
if (argc < 3) {
std::cout << "Usage: infer_demo path/to/quant_model "
"path/to/image "
"e.g ./infer_demo ./PPYOLOE_L_quant ./test.jpeg"
<< std::endl;
return -1;
}
std::string model_dir = argv[1];
std::string test_image = argv[2];
InitAndInfer(model_dir, test_image);
return 0;
}

View File

@@ -0,0 +1,47 @@
#!/bin/bash
HOST_SPACE=${PWD}
echo ${HOST_SPACE}
WORK_SPACE=/data/local/tmp/test
# The first parameter represents the demo name
DEMO_NAME=image_classification_demo
if [ -n "$1" ]; then
DEMO_NAME=$1
fi
# The second parameter represents the model name
MODEL_NAME=mobilenet_v1_fp32_224
if [ -n "$2" ]; then
MODEL_NAME=$2
fi
# The third parameter indicates the name of the image to be tested
IMAGE_NAME=0001.jpg
if [ -n "$3" ]; then
IMAGE_NAME=$3
fi
# The fourth parameter represents the ID of the device
ADB_DEVICE_NAME=
if [ -n "$4" ]; then
ADB_DEVICE_NAME="-s $4"
fi
# Set the environment variables required during the running process
EXPORT_ENVIRONMENT_VARIABLES="export GLOG_v=5; export SUBGRAPH_ONLINE_MODE=true; export RKNPU_LOGLEVEL=5; export RKNN_LOG_LEVEL=5; ulimit -c unlimited; export VIV_VX_ENABLE_GRAPH_TRANSFORM=-pcq:1; export VIV_VX_SET_PER_CHANNEL_ENTROPY=100; export TIMVX_BATCHNORM_FUSION_MAX_ALLOWED_QUANT_SCALE_DEVIATION=300000; export VSI_NN_LOG_LEVEL=5;"
EXPORT_ENVIRONMENT_VARIABLES="${EXPORT_ENVIRONMENT_VARIABLES}export LD_LIBRARY_PATH=${WORK_SPACE}/lib:\$LD_LIBRARY_PATH;"
# Please install adb, and DON'T run this in the docker.
set -e
adb $ADB_DEVICE_NAME shell "rm -rf $WORK_SPACE"
adb $ADB_DEVICE_NAME shell "mkdir -p $WORK_SPACE"
# Upload the demo, librarys, model and test images to the device
adb $ADB_DEVICE_NAME push ${HOST_SPACE}/lib $WORK_SPACE
adb $ADB_DEVICE_NAME push ${HOST_SPACE}/${DEMO_NAME} $WORK_SPACE
adb $ADB_DEVICE_NAME push models $WORK_SPACE
adb $ADB_DEVICE_NAME push images $WORK_SPACE
# Execute the deployment demo
adb $ADB_DEVICE_NAME shell "cd $WORK_SPACE; ${EXPORT_ENVIRONMENT_VARIABLES} chmod +x ./${DEMO_NAME}; ./${DEMO_NAME} ./models/${MODEL_NAME} ./images/$IMAGE_NAME"

View File

@@ -29,3 +29,18 @@ target_link_libraries(infer_ppyolo_demo ${FASTDEPLOY_LIBS})
add_executable(infer_mask_rcnn_demo ${PROJECT_SOURCE_DIR}/infer_mask_rcnn.cc)
target_link_libraries(infer_mask_rcnn_demo ${FASTDEPLOY_LIBS})
add_executable(infer_ssd_demo ${PROJECT_SOURCE_DIR}/infer_ssd.cc)
target_link_libraries(infer_ssd_demo ${FASTDEPLOY_LIBS})
add_executable(infer_yolov5_demo ${PROJECT_SOURCE_DIR}/infer_yolov5.cc)
target_link_libraries(infer_yolov5_demo ${FASTDEPLOY_LIBS})
add_executable(infer_yolov6_demo ${PROJECT_SOURCE_DIR}/infer_yolov6.cc)
target_link_libraries(infer_yolov6_demo ${FASTDEPLOY_LIBS})
add_executable(infer_yolov7_demo ${PROJECT_SOURCE_DIR}/infer_yolov7.cc)
target_link_libraries(infer_yolov7_demo ${FASTDEPLOY_LIBS})
add_executable(infer_rtmdet_demo ${PROJECT_SOURCE_DIR}/infer_rtmdet.cc)
target_link_libraries(infer_rtmdet_demo ${FASTDEPLOY_LIBS})

View File

@@ -1,6 +1,6 @@
# PaddleDetection C++部署示例
本目录下提供`infer_xxx.cc`快速完成PaddleDetection模型包括PPYOLOE/PicoDet/YOLOX/YOLOv3/PPYOLO/FasterRCNN在CPU/GPU以及GPU上通过TensorRT加速部署的示例。
本目录下提供`infer_xxx.cc`快速完成PaddleDetection模型包括PPYOLOE/PicoDet/YOLOX/YOLOv3/PPYOLO/FasterRCNN/YOLOv5/YOLOv6/YOLOv7/RTMDet在CPU/GPU以及GPU上通过TensorRT加速部署的示例。
在部署前,需确认以下两个步骤
@@ -41,7 +41,7 @@ tar xvf ppyoloe_crn_l_300e_coco.tgz
### 模型类
PaddleDetection目前支持6种模型系列类名分别为`PPYOLOE`, `PicoDet`, `PaddleYOLOX`, `PPYOLO`, `FasterRCNN`所有类名的构造函数和预测函数在参数上完全一致本文档以PPYOLOE为例讲解API
PaddleDetection目前支持6种模型系列类名分别为`PPYOLOE`, `PicoDet`, `PaddleYOLOX`, `PPYOLO`, `FasterRCNN``SSD`,`PaddleYOLOv5`,`PaddleYOLOv6`,`PaddleYOLOv7`,`RTMDet`所有类名的构造函数和预测函数在参数上完全一致本文档以PPYOLOE为例讲解API
```c++
fastdeploy::vision::detection::PPYOLOE(
const string& model_file,

View File

@@ -0,0 +1,129 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "fastdeploy/vision.h"
#ifdef WIN32
const char sep = '\\';
#else
const char sep = '/';
#endif
void CpuInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "infer_cfg.yml";
auto option = fastdeploy::RuntimeOption();
option.UseCpu();
auto model = fastdeploy::vision::detection::RTMDet(model_file, params_file,
config_file, option);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return;
}
auto im = cv::imread(image_file);
auto im_bak = im.clone();
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
std::cout << res.Str() << std::endl;
auto vis_im = fastdeploy::vision::VisDetection(im_bak, res, 0.5);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
void GpuInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "infer_cfg.yml";
auto option = fastdeploy::RuntimeOption();
option.UseGpu();
auto model = fastdeploy::vision::detection::RTMDet(model_file, params_file,
config_file, option);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return;
}
auto im = cv::imread(image_file);
auto im_bak = im.clone();
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
std::cout << res.Str() << std::endl;
auto vis_im = fastdeploy::vision::VisDetection(im_bak, res, 0.5);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
void TrtInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "infer_cfg.yml";
auto option = fastdeploy::RuntimeOption();
option.UseGpu();
option.UseTrtBackend();
auto model = fastdeploy::vision::detection::RTMDet(model_file, params_file,
config_file, option);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return;
}
auto im = cv::imread(image_file);
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
std::cout << res.Str() << std::endl;
auto vis_im = fastdeploy::vision::VisDetection(im, res, 0.5);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
int main(int argc, char* argv[]) {
if (argc < 4) {
std::cout
<< "Usage: infer_demo path/to/model_dir path/to/image run_option, "
"e.g ./infer_model ./ppyolo_dirname ./test.jpeg 0"
<< std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
"with gpu."
<< std::endl;
return -1;
}
if (std::atoi(argv[3]) == 0) {
CpuInfer(argv[1], argv[2]);
} else if (std::atoi(argv[3]) == 1) {
GpuInfer(argv[1], argv[2]);
} else if(std::atoi(argv[3]) == 2){
TrtInfer(argv[1], argv[2]);
}
return 0;
}

View File

@@ -0,0 +1,129 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "fastdeploy/vision.h"
#ifdef WIN32
const char sep = '\\';
#else
const char sep = '/';
#endif
void CpuInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "infer_cfg.yml";
auto option = fastdeploy::RuntimeOption();
option.UseCpu();
auto model = fastdeploy::vision::detection::PaddleYOLOv5(model_file, params_file,
config_file, option);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return;
}
auto im = cv::imread(image_file);
auto im_bak = im.clone();
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
std::cout << res.Str() << std::endl;
auto vis_im = fastdeploy::vision::VisDetection(im_bak, res, 0.5);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
void GpuInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "infer_cfg.yml";
auto option = fastdeploy::RuntimeOption();
option.UseGpu();
auto model = fastdeploy::vision::detection::PaddleYOLOv5(model_file, params_file,
config_file, option);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return;
}
auto im = cv::imread(image_file);
auto im_bak = im.clone();
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
std::cout << res.Str() << std::endl;
auto vis_im = fastdeploy::vision::VisDetection(im_bak, res, 0.5);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
void TrtInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "infer_cfg.yml";
auto option = fastdeploy::RuntimeOption();
option.UseGpu();
option.UseTrtBackend();
auto model = fastdeploy::vision::detection::PaddleYOLOv5(model_file, params_file,
config_file, option);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return;
}
auto im = cv::imread(image_file);
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
std::cout << res.Str() << std::endl;
auto vis_im = fastdeploy::vision::VisDetection(im, res, 0.5);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
int main(int argc, char* argv[]) {
if (argc < 4) {
std::cout
<< "Usage: infer_demo path/to/model_dir path/to/image run_option, "
"e.g ./infer_model ./ppyolo_dirname ./test.jpeg 0"
<< std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
"with gpu."
<< std::endl;
return -1;
}
if (std::atoi(argv[3]) == 0) {
CpuInfer(argv[1], argv[2]);
} else if (std::atoi(argv[3]) == 1) {
GpuInfer(argv[1], argv[2]);
} else if(std::atoi(argv[3]) == 2){
TrtInfer(argv[1], argv[2]);
}
return 0;
}

View File

@@ -0,0 +1,129 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "fastdeploy/vision.h"
#ifdef WIN32
const char sep = '\\';
#else
const char sep = '/';
#endif
void CpuInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "infer_cfg.yml";
auto option = fastdeploy::RuntimeOption();
option.UseCpu();
auto model = fastdeploy::vision::detection::PaddleYOLOv6(model_file, params_file,
config_file, option);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return;
}
auto im = cv::imread(image_file);
auto im_bak = im.clone();
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
std::cout << res.Str() << std::endl;
auto vis_im = fastdeploy::vision::VisDetection(im_bak, res, 0.5);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
void GpuInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "infer_cfg.yml";
auto option = fastdeploy::RuntimeOption();
option.UseGpu();
auto model = fastdeploy::vision::detection::PaddleYOLOv6(model_file, params_file,
config_file, option);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return;
}
auto im = cv::imread(image_file);
auto im_bak = im.clone();
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
std::cout << res.Str() << std::endl;
auto vis_im = fastdeploy::vision::VisDetection(im_bak, res, 0.5);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
void TrtInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "infer_cfg.yml";
auto option = fastdeploy::RuntimeOption();
option.UseGpu();
option.UseTrtBackend();
auto model = fastdeploy::vision::detection::PaddleYOLOv6(model_file, params_file,
config_file, option);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return;
}
auto im = cv::imread(image_file);
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
std::cout << res.Str() << std::endl;
auto vis_im = fastdeploy::vision::VisDetection(im, res, 0.5);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
int main(int argc, char* argv[]) {
if (argc < 4) {
std::cout
<< "Usage: infer_demo path/to/model_dir path/to/image run_option, "
"e.g ./infer_model ./ppyolo_dirname ./test.jpeg 0"
<< std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
"with gpu."
<< std::endl;
return -1;
}
if (std::atoi(argv[3]) == 0) {
CpuInfer(argv[1], argv[2]);
} else if (std::atoi(argv[3]) == 1) {
GpuInfer(argv[1], argv[2]);
} else if(std::atoi(argv[3]) == 2){
TrtInfer(argv[1], argv[2]);
}
return 0;
}

View File

@@ -0,0 +1,128 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "fastdeploy/vision.h"
#ifdef WIN32
const char sep = '\\';
#else
const char sep = '/';
#endif
void CpuInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "infer_cfg.yml";
auto option = fastdeploy::RuntimeOption();
option.UseCpu();
auto model = fastdeploy::vision::detection::PaddleYOLOv7(model_file, params_file,
config_file, option);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return;
}
auto im = cv::imread(image_file);
auto im_bak = im.clone();
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
std::cout << res.Str() << std::endl;
auto vis_im = fastdeploy::vision::VisDetection(im_bak, res, 0.5);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
void GpuInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "infer_cfg.yml";
auto option = fastdeploy::RuntimeOption();
option.UseGpu();
auto model = fastdeploy::vision::detection::PaddleYOLOv7(model_file, params_file,config_file, option);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return;
}
auto im = cv::imread(image_file);
auto im_bak = im.clone();
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
std::cout << res.Str() << std::endl;
auto vis_im = fastdeploy::vision::VisDetection(im_bak, res, 0.5);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
void TrtInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "infer_cfg.yml";
auto option = fastdeploy::RuntimeOption();
option.UseGpu();
option.UseTrtBackend();
auto model = fastdeploy::vision::detection::PaddleYOLOv7(model_file, params_file,
config_file, option);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return;
}
auto im = cv::imread(image_file);
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
std::cout << res.Str() << std::endl;
auto vis_im = fastdeploy::vision::VisDetection(im, res, 0.5);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
int main(int argc, char* argv[]) {
if (argc < 4) {
std::cout
<< "Usage: infer_demo path/to/model_dir path/to/image run_option, "
"e.g ./infer_model ./ppyolo_dirname ./test.jpeg 0"
<< std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
"with gpu."
<< std::endl;
return -1;
}
if (std::atoi(argv[3]) == 0) {
CpuInfer(argv[1], argv[2]);
} else if (std::atoi(argv[3]) == 1) {
GpuInfer(argv[1], argv[2]);
} else if(std::atoi(argv[3]) == 2){
TrtInfer(argv[1], argv[2]);
}
return 0;
}

View File

@@ -41,6 +41,10 @@ fastdeploy.vision.detection.PPYOLO(model_file, params_file, config_file, runtime
fastdeploy.vision.detection.FasterRCNN(model_file, params_file, config_file, runtime_option=None, model_format=ModelFormat.PADDLE)
fastdeploy.vision.detection.MaskRCNN(model_file, params_file, config_file, runtime_option=None, model_format=ModelFormat.PADDLE)
fastdeploy.vision.detection.SSD(model_file, params_file, config_file, runtime_option=None, model_format=ModelFormat.PADDLE)
fastdeploy.vision.detection.PaddleYOLOv5(model_file, params_file, config_file, runtime_option=None, model_format=ModelFormat.PADDLE)
fastdeploy.vision.detection.PaddleYOLOv6(model_file, params_file, config_file, runtime_option=None, model_format=ModelFormat.PADDLE)
fastdeploy.vision.detection.PaddleYOLOv7(model_file, params_file, config_file, runtime_option=None, model_format=ModelFormat.PADDLE)
fastdeploy.vision.detection.RTMDet(model_file, params_file, config_file, runtime_option=None, model_format=ModelFormat.PADDLE)
```
PaddleDetection模型加载和初始化其中model_file params_file为导出的Paddle部署模型格式, config_file为PaddleDetection同时导出的部署配置yaml文件

View File

@@ -0,0 +1,59 @@
import fastdeploy as fd
import cv2
import os
def parse_arguments():
import argparse
import ast
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_dir",
required=True,
help="Path of PaddleDetection model directory")
parser.add_argument(
"--image", required=True, help="Path of test image file.")
parser.add_argument(
"--device",
type=str,
default='cpu',
help="Type of inference device, support 'cpu' or 'gpu'.")
parser.add_argument(
"--use_trt",
type=ast.literal_eval,
default=False,
help="Wether to use tensorrt.")
return parser.parse_args()
def build_option(args):
option = fd.RuntimeOption()
if args.device.lower() == "gpu":
option.use_gpu()
if args.use_trt:
option.use_trt_backend()
return option
args = parse_arguments()
model_file = os.path.join(args.model_dir, "model.pdmodel")
params_file = os.path.join(args.model_dir, "model.pdiparams")
config_file = os.path.join(args.model_dir, "infer_cfg.yml")
# 配置runtime加载模型
runtime_option = build_option(args)
model = fd.vision.detection.RTMDet(
model_file, params_file, config_file, runtime_option=runtime_option)
# 预测图片检测结果
im = cv2.imread(args.image)
result = model.predict(im.copy())
print(result)
# 预测结果可视化
vis_im = fd.vision.vis_detection(im, result, score_threshold=0.5)
cv2.imwrite("visualized_result.jpg", vis_im)
print("Visualized result save in ./visualized_result.jpg")

View File

@@ -0,0 +1,59 @@
import fastdeploy as fd
import cv2
import os
def parse_arguments():
import argparse
import ast
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_dir",
required=True,
help="Path of PaddleDetection model directory")
parser.add_argument(
"--image", required=True, help="Path of test image file.")
parser.add_argument(
"--device",
type=str,
default='cpu',
help="Type of inference device, support 'cpu' or 'gpu'.")
parser.add_argument(
"--use_trt",
type=ast.literal_eval,
default=False,
help="Wether to use tensorrt.")
return parser.parse_args()
def build_option(args):
option = fd.RuntimeOption()
if args.device.lower() == "gpu":
option.use_gpu()
if args.use_trt:
option.use_trt_backend()
return option
args = parse_arguments()
model_file = os.path.join(args.model_dir, "model.pdmodel")
params_file = os.path.join(args.model_dir, "model.pdiparams")
config_file = os.path.join(args.model_dir, "infer_cfg.yml")
# 配置runtime加载模型
runtime_option = build_option(args)
model = fd.vision.detection.PaddleYOLOv5(
model_file, params_file, config_file, runtime_option=runtime_option)
# 预测图片检测结果
im = cv2.imread(args.image)
result = model.predict(im.copy())
print(result)
# 预测结果可视化
vis_im = fd.vision.vis_detection(im, result, score_threshold=0.5)
cv2.imwrite("visualized_result.jpg", vis_im)
print("Visualized result save in ./visualized_result.jpg")

View File

@@ -0,0 +1,59 @@
import fastdeploy as fd
import cv2
import os
def parse_arguments():
import argparse
import ast
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_dir",
required=True,
help="Path of PaddleDetection model directory")
parser.add_argument(
"--image", required=True, help="Path of test image file.")
parser.add_argument(
"--device",
type=str,
default='cpu',
help="Type of inference device, support 'cpu' or 'gpu'.")
parser.add_argument(
"--use_trt",
type=ast.literal_eval,
default=False,
help="Wether to use tensorrt.")
return parser.parse_args()
def build_option(args):
option = fd.RuntimeOption()
if args.device.lower() == "gpu":
option.use_gpu()
if args.use_trt:
option.use_trt_backend()
return option
args = parse_arguments()
model_file = os.path.join(args.model_dir, "model.pdmodel")
params_file = os.path.join(args.model_dir, "model.pdiparams")
config_file = os.path.join(args.model_dir, "infer_cfg.yml")
# 配置runtime加载模型
runtime_option = build_option(args)
model = fd.vision.detection.PaddleYOLOv6(
model_file, params_file, config_file, runtime_option=runtime_option)
# 预测图片检测结果
im = cv2.imread(args.image)
result = model.predict(im.copy())
print(result)
# 预测结果可视化
vis_im = fd.vision.vis_detection(im, result, score_threshold=0.5)
cv2.imwrite("visualized_result.jpg", vis_im)
print("Visualized result save in ./visualized_result.jpg")

View File

@@ -0,0 +1,59 @@
import fastdeploy as fd
import cv2
import os
def parse_arguments():
import argparse
import ast
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_dir",
required=True,
help="Path of PaddleDetection model directory")
parser.add_argument(
"--image", required=True, help="Path of test image file.")
parser.add_argument(
"--device",
type=str,
default='cpu',
help="Type of inference device, support 'cpu' or 'gpu'.")
parser.add_argument(
"--use_trt",
type=ast.literal_eval,
default=False,
help="Wether to use tensorrt.")
return parser.parse_args()
def build_option(args):
option = fd.RuntimeOption()
if args.device.lower() == "gpu":
option.use_gpu()
if args.use_trt:
option.use_trt_backend()
return option
args = parse_arguments()
model_file = os.path.join(args.model_dir, "model.pdmodel")
params_file = os.path.join(args.model_dir, "model.pdiparams")
config_file = os.path.join(args.model_dir, "infer_cfg.yml")
# 配置runtime加载模型
runtime_option = build_option(args)
model = fd.vision.detection.PaddleYOLOv7(
model_file, params_file, config_file, runtime_option=runtime_option)
# 预测图片检测结果
im = cv2.imread(args.image)
result = model.predict(im.copy())
print(result)
# 预测结果可视化
vis_im = fd.vision.vis_detection(im, result, score_threshold=0.5)
cv2.imwrite("visualized_result.jpg", vis_im)
print("Visualized result save in ./visualized_result.jpg")

View File

@@ -45,8 +45,8 @@ model_path: ./picodet_s_416_coco_lcnet/picodet_s_416_coco_lcnet.onnx
output_folder: ./picodet_s_416_coco_lcnet
target_platform: RK3568
normalize:
mean: [[0.485,0.456,0.406],[0,0,0]]
std: [[0.229,0.224,0.225],[0.003921,0.003921]]
mean: [[0.485,0.456,0.406]]
std: [[0.229,0.224,0.225]]
outputs: ['tmp_17','p2o.Concat.9']
```
@@ -113,5 +113,7 @@ Preprocess:
type: Resize
```
## 其他链接
- [Cpp部署](./cpp)
- [Python部署](./python)
- [视觉模型预测结果](../../../../../docs/api/vision_results/)

View File

@@ -15,26 +15,39 @@
#include <string>
#include "fastdeploy/vision.h"
#include <sys/time.h>
double __get_us(struct timeval t) { return (t.tv_sec * 1000000 + t.tv_usec); }
void InferPicodet(const std::string& model_dir, const std::string& image_file);
int main(int argc, char* argv[]) {
if (argc < 3) {
void ONNXInfer(const std::string& model_dir, const std::string& image_file) {
std::string model_file = model_dir + "/picodet_s_416_coco_lcnet.onnx";
std::string params_file;
std::string config_file = model_dir + "/deploy.yaml";
auto option = fastdeploy::RuntimeOption();
option.UseCpu();
auto format = fastdeploy::ModelFormat::ONNX;
auto model = fastdeploy::vision::detection::PicoDet(
model_file, params_file, config_file,option,format);
model.GetPostprocessor().ApplyDecodeAndNMS();
fastdeploy::TimeCounter tc;
tc.Start();
auto im = cv::imread(image_file);
fastdeploy::vision::DetectionResult res;
if (!model.Predict(im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
auto vis_im = fastdeploy::vision::VisDetection(im, res,0.5);
tc.End();
tc.PrintInfo("PPDet in ONNX");
cv::imwrite("infer_onnx.jpg", vis_im);
std::cout
<< "Usage: infer_demo path/to/model_dir path/to/image run_option, "
"e.g ./infer_model ./picodet_model_dir ./test.jpeg"
<< "Visualized result saved in ./infer_onnx.jpg"
<< std::endl;
return -1;
}
InferPicodet(argv[1], argv[2]);
return 0;
}
void InferPicodet(const std::string& model_dir, const std::string& image_file) {
struct timeval start_time, stop_time;
auto model_file = model_dir + "/picodet_s_416_coco_lcnet_rk3568.rknn";
void RKNPU2Infer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + "/picodet_s_416_coco_lcnet_rk3588.rknn";
auto params_file = "";
auto config_file = model_dir + "/infer_cfg.yml";
@@ -51,16 +64,31 @@ void InferPicodet(const std::string& model_dir, const std::string& image_file) {
auto im = cv::imread(image_file);
fastdeploy::vision::DetectionResult res;
gettimeofday(&start_time, NULL);
fastdeploy::TimeCounter tc;
tc.Start();
if (!model.Predict(&im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
gettimeofday(&stop_time, NULL);
printf("infer use %f ms\n", (__get_us(stop_time) - __get_us(start_time)) / 1000);
tc.End();
tc.PrintInfo("PPDet in RKNPU2");
std::cout << res.Str() << std::endl;
auto vis_im = fastdeploy::vision::VisDetection(im, res,0.5);
cv::imwrite("picodet_result.jpg", vis_im);
std::cout << "Visualized result saved in ./picodet_result.jpg" << std::endl;
cv::imwrite("infer_rknpu2.jpg", vis_im);
std::cout << "Visualized result saved in ./infer_rknpu2.jpg" << std::endl;
}
int main(int argc, char* argv[]) {
if (argc < 3) {
std::cout
<< "Usage: infer_demo path/to/model_dir path/to/image run_option, "
"e.g ./infer_model ./picodet_model_dir ./test.jpeg"
<< std::endl;
return -1;
}
RKNPU2Infer(argv[1], argv[2]);
//ONNXInfer(argv[1], argv[2]);
return 0;
}

View File

@@ -34,7 +34,7 @@ cp -r 000000014439.jpg images
4. 编译部署示例,可使入如下命令:
```bash
mkdir build && cd build
cmake -DCMAKE_TOOLCHAIN_FILE=${PWD}/../fastdeploy-tmivx/timvx.cmake -DFASTDEPLOY_INSTALL_DIR=${PWD}/../fastdeploy-tmivx ..
cmake -DCMAKE_TOOLCHAIN_FILE=${PWD}/../fastdeploy-tmivx/toolchain.cmake -DFASTDEPLOY_INSTALL_DIR=${PWD}/../fastdeploy-tmivx -DTARGET_ABI=armhf ..
make -j8
make install
# 成功编译之后,会生成 install 文件夹,里面有一个运行 demo 和部署所需的库

View File

@@ -53,7 +53,6 @@ int main(int argc, char* argv[]) {
if (argc < 3) {
std::cout << "Usage: infer_demo path/to/quant_model "
"path/to/image "
"run_option, "
"e.g ./infer_demo ./PPYOLOE_L_quant ./test.jpeg"
<< std::endl;
return -1;

View File

@@ -75,7 +75,7 @@ I0928 04:51:15.826578 206 http_server.cc:167] Started Metrics Service at 0.0.0.0
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
#安装客户端依赖
python3 -m pip install tritonclient\[all\]
python3 -m pip install tritonclient[all]
# 发送请求
python3 paddledet_grpc_client.py

View File

@@ -0,0 +1,18 @@
# RKYOLO准备部署模型
RKYOLO参考[rknn_model_zoo](https://github.com/airockchip/rknn_model_zoo/tree/main/models/CV/object_detection/yolo)的代码
对RKYOLO系列模型进行了封装目前支持RKYOLOV5系列模型的部署。
## 支持模型列表
* RKYOLOV5
## 模型转换example
请参考[RKNN_model_convert](https://github.com/airockchip/rknn_model_zoo/tree/main/models/CV/object_detection/yolo/RKNN_model_convert)
## 其他链接
- [Cpp部署](./cpp)
- [Python部署](./python)
- [视觉模型预测结果](../../../../docs/api/vision_results/)

View File

@@ -0,0 +1,37 @@
CMAKE_MINIMUM_REQUIRED(VERSION 3.10)
project(rknpu2_test)
set(CMAKE_CXX_STANDARD 14)
# 指定下载解压后的fastdeploy库路径
set(FASTDEPLOY_INSTALL_DIR "thirdpartys/fastdeploy-0.0.3")
include(${FASTDEPLOY_INSTALL_DIR}/FastDeployConfig.cmake)
include_directories(${FastDeploy_INCLUDE_DIRS})
add_executable(infer_rkyolo infer_rkyolo.cc)
target_link_libraries(infer_rkyolo ${FastDeploy_LIBS})
set(CMAKE_INSTALL_PREFIX ${CMAKE_SOURCE_DIR}/build/install)
install(TARGETS infer_rkyolo DESTINATION ./)
install(DIRECTORY model DESTINATION ./)
install(DIRECTORY images DESTINATION ./)
file(GLOB FASTDEPLOY_LIBS ${FASTDEPLOY_INSTALL_DIR}/lib/*)
message("${FASTDEPLOY_LIBS}")
install(PROGRAMS ${FASTDEPLOY_LIBS} DESTINATION lib)
file(GLOB ONNXRUNTIME_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/onnxruntime/lib/*)
install(PROGRAMS ${ONNXRUNTIME_LIBS} DESTINATION lib)
install(DIRECTORY ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/opencv/lib DESTINATION ./)
file(GLOB PADDLETOONNX_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/paddle2onnx/lib/*)
install(PROGRAMS ${PADDLETOONNX_LIBS} DESTINATION lib)
file(GLOB RKNPU2_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/rknpu2_runtime/${RKNN2_TARGET_SOC}/lib/*)
install(PROGRAMS ${RKNPU2_LIBS} DESTINATION lib)

View File

@@ -0,0 +1,69 @@
# RKYOLO C++部署示例
本目录下提供`infer_xxxxx.cc`快速完成RKYOLO模型在Rockchip板子上上通过二代NPU加速部署的示例。
在部署前,需确认以下两个步骤:
1. 软硬件环境满足要求
2. 根据开发环境下载预编译部署库或者从头编译FastDeploy仓库
以上步骤请参考[RK2代NPU部署库编译](../../../../../docs/cn/build_and_install/rknpu2.md)实现
## 生成基本目录文件
该例程由以下几个部分组成
```text
.
├── CMakeLists.txt
├── build # 编译文件夹
├── image # 存放图片的文件夹
├── infer_rkyolo.cc
├── model # 存放模型文件的文件夹
└── thirdpartys # 存放sdk的文件夹
```
首先需要先生成目录结构
```bash
mkdir build
mkdir images
mkdir model
mkdir thirdpartys
```
## 编译
### 编译并拷贝SDK到thirdpartys文件夹
请参考[RK2代NPU部署库编译](../../../../../../docs/cn/build_and_install/rknpu2.md)仓库编译SDK编译完成后将在build目录下生成
fastdeploy-0.0.3目录请移动它至thirdpartys目录下.
### 拷贝模型文件以及配置文件至model文件夹
在Paddle动态图模型 -> Paddle静态图模型 -> ONNX模型的过程中将生成ONNX文件以及对应的yaml配置文件请将配置文件存放到model文件夹内。
转换为RKNN后的模型文件也需要拷贝至model。
### 准备测试图片至image文件夹
```bash
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
cp 000000014439.jpg ./images
```
### 编译example
```bash
cd build
cmake ..
make -j8
make install
```
## 运行例程
```bash
cd ./build/install
./infer_picodet model/ images/000000014439.jpg
```
- [模型介绍](../../)
- [Python部署](../python)
- [视觉模型预测结果](../../../../../../docs/api/vision_results/)

View File

@@ -0,0 +1,55 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "fastdeploy/vision.h"
void RKNPU2Infer(const std::string& model_file, const std::string& image_file) {
auto option = fastdeploy::RuntimeOption();
option.UseRKNPU2();
auto format = fastdeploy::ModelFormat::RKNN;
auto model = fastdeploy::vision::detection::RKYOLOV5(
model_file, option,format);
auto im = cv::imread(image_file);
fastdeploy::vision::DetectionResult res;
fastdeploy::TimeCounter tc;
tc.Start();
if (!model.Predict(im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
auto vis_im = fastdeploy::vision::VisDetection(im, res,0.5);
tc.End();
tc.PrintInfo("RKYOLOV5 in RKNN");
std::cout << res.Str() << std::endl;
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
int main(int argc, char* argv[]) {
if (argc < 3) {
std::cout
<< "Usage: infer_demo path/to/model_dir path/to/image run_option, "
"e.g ./infer_model ./picodet_model_dir ./test.jpeg"
<< std::endl;
return -1;
}
RKNPU2Infer(argv[1], argv[2]);
return 0;
}

View File

@@ -0,0 +1,34 @@
# RKYOLO Python部署示例
在部署前,需确认以下两个步骤
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../../docs/cn/build_and_install/rknpu2.md)
本目录下提供`infer.py`快速完成Picodet在RKNPU上部署的示例。执行如下脚本即可完成
```bash
# 下载部署示例代码
git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy/examples/vision/detection/rkyolo/python
# 下载图片
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
# copy model
cp -r ./model /path/to/FastDeploy/examples/vision/detection/rkyolo/python
# 推理
python3 infer.py --model_file ./model/ \
--image 000000014439.jpg
```
## 注意事项
RKNPU上对模型的输入要求是使用NHWC格式且图片归一化操作会在转RKNN模型时内嵌到模型中因此我们在使用FastDeploy部署时
## 其它文档
- [PaddleDetection 模型介绍](..)
- [PaddleDetection C++部署](../cpp)
- [模型预测结果说明](../../../../../../docs/api/vision_results/)
- [转换PaddleDetection RKNN模型文档](../README.md)

View File

@@ -0,0 +1,53 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fastdeploy as fd
import cv2
import os
def parse_arguments():
import argparse
import ast
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_file", required=True, help="Path of rknn model.")
parser.add_argument(
"--image", type=str, required=True, help="Path of test image file.")
return parser.parse_args()
if __name__ == "__main__":
args = parse_arguments()
model_file = args.model_file
params_file = ""
# 配置runtime加载模型
runtime_option = fd.RuntimeOption()
runtime_option.use_rknpu2()
model = fd.vision.detection.RKYOLOV5(
model_file,
runtime_option=runtime_option,
model_format=fd.ModelFormat.RKNN)
# 预测图片分割结果
im = cv2.imread(args.image)
result = model.predict(im)
print(result)
# 可视化结果
vis_im = fd.vision.vis_detection(im, result, score_threshold=0.5)
cv2.imwrite("visualized_result.jpg", vis_im)
print("Visualized result save in ./visualized_result.jpg")

View File

@@ -22,7 +22,6 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) {
}
auto im = cv::imread(image_file);
auto im_bak = im.clone();
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
@@ -31,7 +30,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) {
}
std::cout << res.Str() << std::endl;
auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res);
auto vis_im = fastdeploy::vision::VisDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
@@ -47,7 +46,6 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) {
}
auto im = cv::imread(image_file);
auto im_bak = im.clone();
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
@@ -56,7 +54,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) {
}
std::cout << res.Str() << std::endl;
auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res);
auto vis_im = fastdeploy::vision::VisDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
@@ -74,7 +72,6 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) {
}
auto im = cv::imread(image_file);
auto im_bak = im.clone();
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
@@ -83,7 +80,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) {
}
std::cout << res.Str() << std::endl;
auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res);
auto vis_im = fastdeploy::vision::VisDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}

View File

@@ -52,7 +52,7 @@ if args.image is None:
else:
image = args.image
im = cv2.imread(image)
result = model.predict(im.copy())
result = model.predict(im)
print(result)
# 预测结果可视化

View File

@@ -22,7 +22,6 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) {
}
auto im = cv::imread(image_file);
auto im_bak = im.clone();
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
@@ -31,7 +30,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) {
}
std::cout << res.Str() << std::endl;
auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res);
auto vis_im = fastdeploy::vision::VisDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
@@ -46,7 +45,6 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) {
}
auto im = cv::imread(image_file);
auto im_bak = im.clone();
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
@@ -55,7 +53,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) {
}
std::cout << res.Str() << std::endl;
auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res);
auto vis_im = fastdeploy::vision::VisDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
@@ -72,7 +70,6 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) {
}
auto im = cv::imread(image_file);
auto im_bak = im.clone();
fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
@@ -81,7 +78,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) {
}
std::cout << res.Str() << std::endl;
auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res);
auto vis_im = fastdeploy::vision::VisDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}

View File

@@ -54,7 +54,7 @@ else:
image = args.image
im = cv2.imread(image)
result = model.predict(im.copy())
result = model.predict(im)
print(result)
# 预测结果可视化

View File

@@ -1,8 +1,8 @@
# YOLOv5准备部署模型
- YOLOv5 v6.0部署模型实现来自[YOLOv5](https://github.com/ultralytics/yolov5/tree/v6.0),和[基于COCO的预训练模型](https://github.com/ultralytics/yolov5/releases/tag/v6.0)
- 1[官方库](https://github.com/ultralytics/yolov5/releases/tag/v6.0)提供的*.onnx可直接进行部署
- 2开发者基于自己数据训练的YOLOv5 v6.0模型,可使用[YOLOv5](https://github.com/ultralytics/yolov5)中的`export.py`导出ONNX文件后完成部署。
- YOLOv5 v7.0部署模型实现来自[YOLOv5](https://github.com/ultralytics/yolov5/tree/v7.0),和[基于COCO的预训练模型](https://github.com/ultralytics/yolov5/releases/tag/v7.0)
- 1[官方库](https://github.com/ultralytics/yolov5/releases/tag/v7.0)提供的*.onnx可直接进行部署
- 2开发者基于自己数据训练的YOLOv5 v7.0模型,可使用[YOLOv5](https://github.com/ultralytics/yolov5)中的`export.py`导出ONNX文件后完成部署。
## 下载预训练ONNX模型
@@ -10,13 +10,11 @@
为了方便开发者的测试下面提供了YOLOv5导出的各系列模型开发者可直接下载使用。下表中模型的精度来源于源官方库
| 模型 | 大小 | 精度 |
|:---------------------------------------------------------------- |:----- |:----- |
| [YOLOv5n](https://bj.bcebos.com/paddlehub/fastdeploy/yolov5n.onnx) | 7.5MB | 28.4% |
| [YOLOv5s](https://bj.bcebos.com/paddlehub/fastdeploy/yolov5s.onnx) | 28.9MB | 37.2% |
| [YOLOv5m](https://bj.bcebos.com/paddlehub/fastdeploy/yolov5m.onnx) | 84.7MB | 45.2% |
| [YOLOv5l](https://bj.bcebos.com/paddlehub/fastdeploy/yolov5l.onnx) | 186.2MB | 48.8% |
| [YOLOv5x](https://bj.bcebos.com/paddlehub/fastdeploy/yolov5x.onnx) | 346.9MB | 50.7% |
| [YOLOv5n](https://bj.bcebos.com/paddlehub/fastdeploy/yolov5n.onnx) | 7.6MB | 28.0% |
| [YOLOv5s](https://bj.bcebos.com/paddlehub/fastdeploy/yolov5s.onnx) | 28MB | 37.4% |
| [YOLOv5m](https://bj.bcebos.com/paddlehub/fastdeploy/yolov5m.onnx) | 82MB | 45.4% |
| [YOLOv5l](https://bj.bcebos.com/paddlehub/fastdeploy/yolov5l.onnx) | 178MB | 49.0% |
| [YOLOv5x](https://bj.bcebos.com/paddlehub/fastdeploy/yolov5x.onnx) | 332MB | 50.7% |
## 详细部署文档
@@ -27,4 +25,4 @@
## 版本说明
- 本版本文档和代码基于[YOLOv5 v6.0](https://github.com/ultralytics/yolov5/tree/v6.0) 编写
- 本版本文档和代码基于[YOLOv5 v7.0](https://github.com/ultralytics/yolov5/tree/v7.0) 编写

View File

@@ -0,0 +1,11 @@
# YOLOv5 量化模型在 A311D 上的部署
目前 FastDeploy 已经支持基于 PaddleLite 部署 YOLOv5 量化模型到 A311D 上。
模型的量化和量化模型的下载请参考:[模型量化](../quantize/README.md)
## 详细部署文档
在 A311D 上只支持 C++ 的部署。
- [C++部署](cpp)

View File

@@ -0,0 +1,37 @@
PROJECT(infer_demo C CXX)
CMAKE_MINIMUM_REQUIRED (VERSION 3.10)
# 指定下载解压后的fastdeploy库路径
option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.")
include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
# 添加FastDeploy依赖头文件
include_directories(${FASTDEPLOY_INCS})
include_directories(${FastDeploy_INCLUDE_DIRS})
add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cc)
# 添加FastDeploy库依赖
target_link_libraries(infer_demo ${FASTDEPLOY_LIBS})
set(CMAKE_INSTALL_PREFIX ${CMAKE_SOURCE_DIR}/build/install)
install(TARGETS infer_demo DESTINATION ./)
install(DIRECTORY models DESTINATION ./)
install(DIRECTORY images DESTINATION ./)
file(GLOB FASTDEPLOY_LIBS ${FASTDEPLOY_INSTALL_DIR}/lib/*)
install(PROGRAMS ${FASTDEPLOY_LIBS} DESTINATION lib)
file(GLOB OPENCV_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/opencv/lib/lib*)
install(PROGRAMS ${OPENCV_LIBS} DESTINATION lib)
file(GLOB PADDLELITE_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/paddlelite/lib/lib*)
install(PROGRAMS ${PADDLELITE_LIBS} DESTINATION lib)
file(GLOB TIMVX_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/paddlelite/lib/verisilicon_timvx/*)
install(PROGRAMS ${TIMVX_LIBS} DESTINATION lib)
file(GLOB ADB_TOOLS run_with_adb.sh)
install(PROGRAMS ${ADB_TOOLS} DESTINATION ./)

View File

@@ -0,0 +1,54 @@
# YOLOv5 量化模型 C++ 部署示例
本目录下提供的 `infer.cc`,可以帮助用户快速完成 YOLOv5 量化模型在 A311D 上的部署推理加速。
## 部署准备
### FastDeploy 交叉编译环境准备
- 1. 软硬件环境满足要求,以及交叉编译环境的准备,请参考:[FastDeploy 交叉编译环境准备](../../../../../../docs/cn/build_and_install/a311d.md#交叉编译环境搭建)
### 量化模型准备
- 1. 用户可以直接使用由 FastDeploy 提供的量化模型进行部署。
- 2. 用户可以使用 FastDeploy 提供的[一键模型自动化压缩工具](../../../../../../tools/common_tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署。
- 更多量化相关相关信息可查阅[模型量化](../../quantize/README.md)
## 在 A311D 上部署量化后的 YOLOv5 检测模型
请按照以下步骤完成在 A311D 上部署 YOLOv5 量化模型:
1. 交叉编译编译 FastDeploy 库,具体请参考:[交叉编译 FastDeploy](../../../../../../docs/cn/build_and_install/a311d.md#基于-paddlelite-的-fastdeploy-交叉编译库编译)
2. 将编译后的库拷贝到当前目录,可使用如下命令:
```bash
cp -r FastDeploy/build/fastdeploy-tmivx/ FastDeploy/examples/vision/detection/yolov5/a311d/cpp
```
3. 在当前路径下载部署所需的模型和示例图片:
```bash
mkdir models && mkdir images
wget https://bj.bcebos.com/fastdeploy/models/yolov5s_ptq_model.tar.gz
tar -xvf yolov5s_ptq_model.tar.gz
cp -r yolov5s_ptq_model models
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
cp -r 000000014439.jpg images
```
4. 编译部署示例,可使入如下命令:
```bash
mkdir build && cd build
cmake -DCMAKE_TOOLCHAIN_FILE=${PWD}/../fastdeploy-tmivx/toolchain.cmake -DFASTDEPLOY_INSTALL_DIR=${PWD}/../fastdeploy-tmivx -DTARGET_ABI=arm64 ..
make -j8
make install
# 成功编译之后,会生成 install 文件夹,里面有一个运行 demo 和部署所需的库
```
5. 基于 adb 工具部署 YOLOv5 检测模型到晶晨 A311D
```bash
# 进入 install 目录
cd FastDeploy/examples/vision/detection/yolov5/a311d/cpp/build/install/
# 如下命令表示bash run_with_adb.sh 需要运行的demo 模型路径 图片路径 设备的DEVICE_ID
bash run_with_adb.sh infer_demo yolov5s_ptq_model 000000014439.jpg $DEVICE_ID
```
部署成功后vis_result.jpg 保存的结果如下:
<img width="640" src="https://user-images.githubusercontent.com/30516196/203706969-dd58493c-6635-4ee7-9421-41c2e0c9524b.png">
需要特别注意的是,在 A311D 上部署的模型需要是量化后的模型,模型的量化请参考:[模型量化](../../../../../../docs/cn/quantize.md)

View File

@@ -0,0 +1,64 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "fastdeploy/vision.h"
#ifdef WIN32
const char sep = '\\';
#else
const char sep = '/';
#endif
void InitAndInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto subgraph_file = model_dir + sep + "subgraph.txt";
fastdeploy::RuntimeOption option;
option.UseTimVX();
option.SetLiteSubgraphPartitionPath(subgraph_file);
auto model = fastdeploy::vision::detection::YOLOv5(
model_file, params_file, option, fastdeploy::ModelFormat::PADDLE);
assert(model.Initialized());
auto im = cv::imread(image_file);
fastdeploy::vision::DetectionResult res;
if (!model.Predict(im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
std::cout << res.Str() << std::endl;
auto vis_im = fastdeploy::vision::VisDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
int main(int argc, char* argv[]) {
if (argc < 3) {
std::cout << "Usage: infer_demo path/to/quant_model "
"path/to/image "
"run_option, "
"e.g ./infer_demo ./yolov5s_quant ./000000014439.jpg"
<< std::endl;
return -1;
}
std::string model_dir = argv[1];
std::string test_image = argv[2];
InitAndInfer(model_dir, test_image);
return 0;
}

View File

@@ -0,0 +1,47 @@
#!/bin/bash
HOST_SPACE=${PWD}
echo ${HOST_SPACE}
WORK_SPACE=/data/local/tmp/test
# The first parameter represents the demo name
DEMO_NAME=image_classification_demo
if [ -n "$1" ]; then
DEMO_NAME=$1
fi
# The second parameter represents the model name
MODEL_NAME=mobilenet_v1_fp32_224
if [ -n "$2" ]; then
MODEL_NAME=$2
fi
# The third parameter indicates the name of the image to be tested
IMAGE_NAME=0001.jpg
if [ -n "$3" ]; then
IMAGE_NAME=$3
fi
# The fourth parameter represents the ID of the device
ADB_DEVICE_NAME=
if [ -n "$4" ]; then
ADB_DEVICE_NAME="-s $4"
fi
# Set the environment variables required during the running process
EXPORT_ENVIRONMENT_VARIABLES="export GLOG_v=5; export VIV_VX_ENABLE_GRAPH_TRANSFORM=-pcq:1; export VIV_VX_SET_PER_CHANNEL_ENTROPY=100; export TIMVX_BATCHNORM_FUSION_MAX_ALLOWED_QUANT_SCALE_DEVIATION=300000; export VSI_NN_LOG_LEVEL=5;"
EXPORT_ENVIRONMENT_VARIABLES="${EXPORT_ENVIRONMENT_VARIABLES}export LD_LIBRARY_PATH=${WORK_SPACE}/lib:\$LD_LIBRARY_PATH;"
# Please install adb, and DON'T run this in the docker.
set -e
adb $ADB_DEVICE_NAME shell "rm -rf $WORK_SPACE"
adb $ADB_DEVICE_NAME shell "mkdir -p $WORK_SPACE"
# Upload the demo, librarys, model and test images to the device
adb $ADB_DEVICE_NAME push ${HOST_SPACE}/lib $WORK_SPACE
adb $ADB_DEVICE_NAME push ${HOST_SPACE}/${DEMO_NAME} $WORK_SPACE
adb $ADB_DEVICE_NAME push models $WORK_SPACE
adb $ADB_DEVICE_NAME push images $WORK_SPACE
# Execute the deployment demo
adb $ADB_DEVICE_NAME shell "cd $WORK_SPACE; ${EXPORT_ENVIRONMENT_VARIABLES} chmod +x ./${DEMO_NAME}; ./${DEMO_NAME} ./models/${MODEL_NAME} ./images/$IMAGE_NAME"

4
examples/vision/detection/yolov5/cpp/CMakeLists.txt Normal file → Executable file
View File

@@ -12,3 +12,7 @@ include_directories(${FASTDEPLOY_INCS})
add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cc)
# 添加FastDeploy库依赖
target_link_libraries(infer_demo ${FASTDEPLOY_LIBS})
add_executable(infer_paddle_demo ${PROJECT_SOURCE_DIR}/infer_paddle_model.cc)
# 添加FastDeploy库依赖
target_link_libraries(infer_paddle_demo ${FASTDEPLOY_LIBS})

22
examples/vision/detection/yolov5/cpp/README.md Normal file → Executable file
View File

@@ -17,11 +17,28 @@ wget https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-x.x.x.tgz
tar xvf fastdeploy-linux-x64-x.x.x.tgz
cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-x.x.x
make -j
#下载官方转换好的yolov5模型文件和测试图片
wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov5s.onnx
#下载官方转换好的 yolov5 Paddle 模型文件和测试图片
wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov5s_infer.tar
tar -xvf yolov5s_infer.tar
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
# CPU推理
./infer_paddle_demo yolov5s_infer 000000014439.jpg 0
# GPU推理
./infer_paddle_demo yolov5s_infer 000000014439.jpg 1
# GPU上TensorRT推理
./infer_paddle_demo yolov5s_infer 000000014439.jpg 2
# XPU推理
./infer_paddle_demo yolov5s_infer 000000014439.jpg 3
```
上述的模型为 Paddle 模型的推理,如果想要做 ONNX 模型的推理,可以按照如下步骤:
```bash
# 1. 下载官方转换好的 yolov5 ONNX 模型文件和测试图片
wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov5s.onnx
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
# CPU推理
./infer_demo yolov5s.onnx 000000014439.jpg 0
# GPU推理
@@ -29,7 +46,6 @@ wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/0000000
# GPU上TensorRT推理
./infer_demo yolov5s.onnx 000000014439.jpg 2
```
运行完成可视化结果如下图所示
<img width="640" src="https://user-images.githubusercontent.com/67993288/184309358-d803347a-8981-44b6-b589-4608021ad0f4.jpg">

Some files were not shown because too many files have changed in this diff Show More