From d14db2629d6b24e555005d4ac487e916f62f0d6d Mon Sep 17 00:00:00 2001 From: Zheng-Bicheng <58363586+Zheng-Bicheng@users.noreply.github.com> Date: Thu, 16 Mar 2023 22:04:50 +0800 Subject: [PATCH] [Example] Move SOLOv2 jetson example -> cpp (#1600) * move solov2 * move solov2 --------- Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com> --- .../paddledetection/cpp/CMakeLists.txt | 3 + .../{jetson => }/cpp/infer_solov2.cc | 2 +- .../paddledetection/jetson/README.md | 21 ---- .../paddledetection/jetson/README_CN.md | 20 ---- .../paddledetection/jetson/cpp/CMakeLists.txt | 11 --- .../paddledetection/jetson/cpp/README.md | 28 ------ .../paddledetection/jetson/cpp/README_CN.md | 29 ------ .../paddledetection/jetson/python/README.md | 96 ------------------- .../jetson/python/README_CN.md | 96 ------------------- .../{jetson => }/python/infer_solov2.py | 0 10 files changed, 4 insertions(+), 302 deletions(-) rename examples/vision/detection/paddledetection/{jetson => }/cpp/infer_solov2.cc (99%) delete mode 100644 examples/vision/detection/paddledetection/jetson/README.md delete mode 100644 examples/vision/detection/paddledetection/jetson/README_CN.md delete mode 100644 examples/vision/detection/paddledetection/jetson/cpp/CMakeLists.txt delete mode 100644 examples/vision/detection/paddledetection/jetson/cpp/README.md delete mode 100644 examples/vision/detection/paddledetection/jetson/cpp/README_CN.md delete mode 100755 examples/vision/detection/paddledetection/jetson/python/README.md delete mode 100644 examples/vision/detection/paddledetection/jetson/python/README_CN.md rename examples/vision/detection/paddledetection/{jetson => }/python/infer_solov2.py (100%) diff --git a/examples/vision/detection/paddledetection/cpp/CMakeLists.txt b/examples/vision/detection/paddledetection/cpp/CMakeLists.txt index 3eb3af1e9..aa4639bb6 100644 --- a/examples/vision/detection/paddledetection/cpp/CMakeLists.txt +++ b/examples/vision/detection/paddledetection/cpp/CMakeLists.txt @@ -71,3 +71,6 @@ target_link_libraries(infer_tood_demo ${FASTDEPLOY_LIBS}) add_executable(infer_gfl_demo ${PROJECT_SOURCE_DIR}/infer_gfl.cc) target_link_libraries(infer_gfl_demo ${FASTDEPLOY_LIBS}) + +add_executable(infer_solov2_demo ${PROJECT_SOURCE_DIR}/infer_solov2.cc) +target_link_libraries(infer_solov2_demo ${FASTDEPLOY_LIBS}) diff --git a/examples/vision/detection/paddledetection/jetson/cpp/infer_solov2.cc b/examples/vision/detection/paddledetection/cpp/infer_solov2.cc similarity index 99% rename from examples/vision/detection/paddledetection/jetson/cpp/infer_solov2.cc rename to examples/vision/detection/paddledetection/cpp/infer_solov2.cc index 268dedb1d..74cc1384a 100644 --- a/examples/vision/detection/paddledetection/jetson/cpp/infer_solov2.cc +++ b/examples/vision/detection/paddledetection/cpp/infer_solov2.cc @@ -93,4 +93,4 @@ int main(int argc, char* argv[]) { GpuInfer(argv[1], argv[2]); } return 0; -} +} \ No newline at end of file diff --git a/examples/vision/detection/paddledetection/jetson/README.md b/examples/vision/detection/paddledetection/jetson/README.md deleted file mode 100644 index 5110001a4..000000000 --- a/examples/vision/detection/paddledetection/jetson/README.md +++ /dev/null @@ -1,21 +0,0 @@ -English | [简体中文](README_CN.md) - -# PaddleDetection Model Deployment - -FastDeploy supports the SOLOV2 model of [PaddleDetection version 2.6](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.6). - -You can enter the following command to get the static diagram model of SOLOV2. - -```bash -# install PaddleDetection -git clone https://github.com/PaddlePaddle/PaddleDetection.git -cd PaddleDetection - -python tools/export_model.py -c configs/solov2/solov2_r50_fpn_1x_coco.yml --output_dir=./inference_model \ - -o weights=https://paddledet.bj.bcebos.com/models/solov2_r50_fpn_1x_coco.pdparams -``` - -## Detailed Deployment Documents - -- [Python Deployment](python) -- [C++ Deployment](cpp) diff --git a/examples/vision/detection/paddledetection/jetson/README_CN.md b/examples/vision/detection/paddledetection/jetson/README_CN.md deleted file mode 100644 index 4fcf7e8a4..000000000 --- a/examples/vision/detection/paddledetection/jetson/README_CN.md +++ /dev/null @@ -1,20 +0,0 @@ -[English](README.md) | 简体中文 -# PaddleDetection模型部署 - -FastDeploy支持[PaddleDetection 2.6](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.6)版本的SOLOv2模型, - -你可以输入以下命令得到SOLOv2的静态图模型。 - -```bash -# install PaddleDetection -git clone https://github.com/PaddlePaddle/PaddleDetection.git -cd PaddleDetection - -python tools/export_model.py -c configs/solov2/solov2_r50_fpn_1x_coco.yml --output_dir=./inference_model \ - -o weights=https://paddledet.bj.bcebos.com/models/solov2_r50_fpn_1x_coco.pdparams -``` - -## 详细部署文档 - -- [Python部署](python) -- [C++部署](cpp) diff --git a/examples/vision/detection/paddledetection/jetson/cpp/CMakeLists.txt b/examples/vision/detection/paddledetection/jetson/cpp/CMakeLists.txt deleted file mode 100644 index 0332a5b78..000000000 --- a/examples/vision/detection/paddledetection/jetson/cpp/CMakeLists.txt +++ /dev/null @@ -1,11 +0,0 @@ -PROJECT(infer_demo C CXX) -CMAKE_MINIMUM_REQUIRED (VERSION 3.10) - -option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.") - -include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake) - -include_directories(${FASTDEPLOY_INCS}) - -add_executable(infer_solov2_demo ${PROJECT_SOURCE_DIR}/infer_solov2.cc) -target_link_libraries(infer_solov2_demo ${FASTDEPLOY_LIBS}) diff --git a/examples/vision/detection/paddledetection/jetson/cpp/README.md b/examples/vision/detection/paddledetection/jetson/cpp/README.md deleted file mode 100644 index 20a7ea83a..000000000 --- a/examples/vision/detection/paddledetection/jetson/cpp/README.md +++ /dev/null @@ -1,28 +0,0 @@ -English | [简体中文](README_CN.md) -# PaddleDetection C++ Deployment Example - -This directory provides examples that `infer_xxx.cc` fast finishes the deployment of PaddleDetection models, including SOLOv2 on CPU/GPU and GPU accelerated by TensorRT. - -Before deployment, two steps require confirmation - -- 1. Software and hardware should meet the requirements. Please refer to [FastDeploy Environment Requirements](../../../../../../docs/en/build_and_install/download_prebuilt_libraries.md) -- 2. Download the precompiled deployment library and samples code according to your development environment. Refer to [FastDeploy Precompiled Library](../../../../../../docs/en/build_and_install/download_prebuilt_libraries.md) - -Taking inference on Linux as an example, the compilation test can be completed by executing the following command in this directory. FastDeploy version 0.7.0 or above (x.x.x>=0.7.0) is required to support this model. - -```bash -mkdir build -cd build - -# Download the FastDeploy precompiled library. Users can choose your appropriate version in the `FastDeploy Precompiled Library` mentioned above -wget https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-x.x.x.tgz -tar xvf fastdeploy-linux-x64-x.x.x.tgz -cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-x.x.x -make -j - -wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg -# CPU inference -./infer_solov2_demo ./solov2_r50_fpn_1x_coco 000000014439.jpg 0 -# GPU inference -./infer_ppyoloe_demo ./ppyoloe_crn_l_300e_coco 000000014439.jpg 1 -``` diff --git a/examples/vision/detection/paddledetection/jetson/cpp/README_CN.md b/examples/vision/detection/paddledetection/jetson/cpp/README_CN.md deleted file mode 100644 index 20eaf33e7..000000000 --- a/examples/vision/detection/paddledetection/jetson/cpp/README_CN.md +++ /dev/null @@ -1,29 +0,0 @@ -[English](README.md) | 简体中文 -# PaddleDetection C++部署示例 - -本目录下提供`infer_xxx.cc`快速完成PaddleDetection模型包括SOLOv2在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。 - -在部署前,需确认以下两个步骤 - -- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md) -- 2. 根据开发环境,下载预编译部署库和examples代码,参考[FastDeploy预编译库](../../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md) - -以Linux上推理为例,在本目录执行如下命令即可完成编译测试,支持此模型需保证FastDeploy版本0.7.0以上(x.x.x>=0.7.0) - - -```bash -mkdir build -cd build - -# 下载FastDeploy预编译库,用户可在上文提到的`FastDeploy预编译库`中自行选择合适的版本使用 -wget https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-x.x.x.tgz -tar xvf fastdeploy-linux-x64-x.x.x.tgz -cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-x.x.x -make -j - -wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg -# CPU推理 -./infer_solov2_demo ./solov2_r50_fpn_1x_coco 000000014439.jpg 0 -# GPU推理 -./infer_ppyoloe_demo ./ppyoloe_crn_l_300e_coco 000000014439.jpg 1 -``` diff --git a/examples/vision/detection/paddledetection/jetson/python/README.md b/examples/vision/detection/paddledetection/jetson/python/README.md deleted file mode 100755 index 4818114ef..000000000 --- a/examples/vision/detection/paddledetection/jetson/python/README.md +++ /dev/null @@ -1,96 +0,0 @@ -English | [简体中文](README_CN.md) -# PaddleDetection Python Deployment Example - -Before deployment, two steps require confirmation. - -- 1. Software and hardware should meet the requirements. Please refer to [FastDeploy Environment Requirements](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md) -- 2. Install FastDeploy Python whl package. Refer to [FastDeploy Python Installation](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md) - -This directory provides examples that `infer_xxx.py` fast finishes the deployment of PPYOLOE/PicoDet models on CPU/GPU and GPU accelerated by TensorRT. The script is as follows - -```bash -# Download deployment example code -git clone https://github.com/PaddlePaddle/FastDeploy.git -cd FastDeploy/examples/vision/detection/paddledetection/python/ - -# Download the PPYOLOE model file and test images -wget https://bj.bcebos.com/paddlehub/fastdeploy/ppyoloe_crn_l_300e_coco.tgz -wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg -tar xvf ppyoloe_crn_l_300e_coco.tgz - -# CPU inference -python infer_ppyoloe.py --model_dir ppyoloe_crn_l_300e_coco --image 000000014439.jpg --device cpu -# GPU inference -python infer_ppyoloe.py --model_dir ppyoloe_crn_l_300e_coco --image 000000014439.jpg --device gpu -# TensorRT inference on GPU (Attention: It is somewhat time-consuming for the operation of model serialization when running TensorRT inference for the first time. Please be patient.) -python infer_ppyoloe.py --model_dir ppyoloe_crn_l_300e_coco --image 000000014439.jpg --device gpu --use_trt True -# Kunlunxin XPU Inference -python infer_ppyoloe.py --model_dir ppyoloe_crn_l_300e_coco --image 000000014439.jpg --device kunlunxin -# Huawei Ascend Inference -python infer_ppyoloe.py --model_dir ppyoloe_crn_l_300e_coco --image 000000014439.jpg --device ascend -``` - -The visualized result after running is as follows -