diff --git a/.gitignore b/.gitignore index c1706dee3..c4c9ec0e0 100644 --- a/.gitignore +++ b/.gitignore @@ -20,6 +20,6 @@ fastdeploy/core/config.h fastdeploy/pybind/main.cc python/fastdeploy/libs/lib* __pycache__ -build_fastdeploy_android.sh +build_fd_android.sh python/scripts/process_libraries.py .vs \ No newline at end of file diff --git a/CMakeLists.txt b/CMakeLists.txt index d356c36ab..5c0e2c066 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -51,14 +51,14 @@ option(ENABLE_TRT_BACKEND "Whether to enable tensorrt backend." OFF) option(ENABLE_PADDLE_BACKEND "Whether to enable paddle backend." OFF) option(ENABLE_OPENVINO_BACKEND "Whether to enable openvino backend." OFF) option(ENABLE_LITE_BACKEND "Whether to enable paddle lite backend." OFF) -option(CUDA_DIRECTORY "If build tensorrt backend, need to define path of cuda library.") -option(TRT_DIRECTORY "If build tensorrt backend, need to define path of tensorrt library.") option(ENABLE_VISION "Whether to enable vision models usage." OFF) option(ENABLE_VISION_VISUALIZE "Whether to enable visualize vision model result toolbox." ON) option(ENABLE_TEXT "Whether to enable text models usage." OFF) option(WITH_TESTING "Whether to compile with unittest." OFF) -option(OPENCV_DIRECTORY "User can specify the installed opencv directory.") -option(ORT_DIRECTORY "User can specify the installed onnxruntime directory.") +# option(CUDA_DIRECTORY "If build tensorrt backend, need to define path of cuda library.") +# option(TRT_DIRECTORY "If build tensorrt backend, need to define path of tensorrt library.") +# option(OPENCV_DIRECTORY "User can specify the installed opencv directory.") +# option(ORT_DIRECTORY "User can specify the installed onnxruntime directory.") ######################### Options for Android cross compiling #################### option(WITH_OPENCV_STATIC "Use OpenCV static lib for Android." OFF) option(WITH_LITE_STATIC "Use Paddle-Lite static lib for Android." OFF) @@ -69,6 +69,13 @@ option(WITH_LITE_STATIC "Use Paddle-Lite static lib for Android." OFF) # Whether to build fastdeploy with vision/text/... examples, only for testings. option(BUILD_EXAMPLES "Whether to build fastdeploy with vision examples" OFF) +######################### Paths to user's custom libraries directory ##################### +set(CUDA_DIRECTORY "" CACHE PATH "If build tensorrt backend, need to define path of cuda library.") +set(TRT_DIRECTORY "" CACHE PATH "If build tensorrt backend, need to define path of tensorrt library.") +set(ORT_DIRECTORY "" CACHE PATH "User can specify the installed onnxruntime directory.") +set(OPENCV_DIRECTORY "" CACHE PATH "User can specify the installed opencv directory.") + + # Whether to build fastdeploy on device Nvidia Jetson # Only support CPU Inference & GPU(TensorRT) Inference Now option(BUILD_ON_JETSON "Whether to build fastdeploy on Nvidia Jetson" OFF) @@ -421,6 +428,7 @@ install( ${PROJECT_SOURCE_DIR}/ThirdPartyNotices.txt ${PROJECT_SOURCE_DIR}/VERSION_NUMBER ${PROJECT_SOURCE_DIR}/FastDeploy.cmake + ${PROJECT_SOURCE_DIR}/cmake/FastDeployConfig.cmake ${PROJECT_SOURCE_DIR}/cmake/utils.cmake DESTINATION ${CMAKE_INSTALL_PREFIX} ) diff --git a/FastDeploy.cmake.in b/FastDeploy.cmake.in index c41fc7ffe..e73fcb313 100644 --- a/FastDeploy.cmake.in +++ b/FastDeploy.cmake.in @@ -2,19 +2,22 @@ CMAKE_MINIMUM_REQUIRED (VERSION 3.12) set(WITH_GPU @WITH_GPU@) set(ENABLE_ORT_BACKEND @ENABLE_ORT_BACKEND@) +set(ENABLE_LITE_BACKEND @ENABLE_LITE_BACKEND@) set(ENABLE_PADDLE_BACKEND @ENABLE_PADDLE_BACKEND@) set(ENABLE_OPENVINO_BACKEND @ENABLE_OPENVINO_BACKEND@) -set(PADDLEINFERENCE_VERSION @PADDLEINFERENCE_VERSION@) -set(OPENVINO_VERSION @OPENVINO_VERSION@) set(ENABLE_TRT_BACKEND @ENABLE_TRT_BACKEND@) set(ENABLE_PADDLE_FRONTEND @ENABLE_PADDLE_FRONTEND@) set(ENABLE_VISION @ENABLE_VISION@) set(ENABLE_TEXT @ENABLE_TEXT@) set(BUILD_ON_JETON @BUILD_ON_JETSON@) +set(PADDLEINFERENCE_VERSION @PADDLEINFERENCE_VERSION@) +set(OPENVINO_VERSION @OPENVINO_VERSION@) +set(WITH_LITE_STATIC @WITH_LITE_STATIC@) +set(WITH_OPENCV_STATIC @WITH_OPENCV_STATIC@) # set(ENABLE_OPENCV_CUDA @ENABLE_OPENCV_CUDA@) set(LIBRARY_NAME @LIBRARY_NAME@) -set(OPENCV_DIRECTORY @OPENCV_DIRECTORY@) -set(ORT_DIRECTORY @ORT_DIRECTORY@) +set(OPENCV_DIRECTORY "@OPENCV_DIRECTORY@") +set(ORT_DIRECTORY "@ORT_DIRECTORY@") set(FASTDEPLOY_LIBS "") set(FASTDEPLOY_INCS "") @@ -90,9 +93,27 @@ if(WITH_GPU) endif() if(ENABLE_VISION) - set(OpenCV_DIR @OpenCV_DIR@) if (OPENCV_DIRECTORY) set(OpenCV_DIR ${OPENCV_DIRECTORY}) + else() + if(WIN32) + set(OpenCV_DIR ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/opencv-win-x64-3.4.16/build) + elseif(ANDROID) + # TODO(qiuyanjun): use single 'opencv' path instead of 'opencv-xxx-xxx'. + set(OpenCV_DIR ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/opencv-android-4.6.0/sdk/native/jni) + elseif(APPLE) + if(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "arm64") + set(OpenCV_DIR ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/opencv-osx-x86_64-3.4.16) + else() + set(OpenCV_DIR ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/opencv-arm64-3.4.16) + endif() + else() + if(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "aarch64") + set(OPENCV_LIB ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/opencv-linux-aarch64-3.4.14) + else() + set(OpenCV_DIR ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/opencv-linux-x64-3.4.16) + endif() + endif() endif() message(STATUS "The path of OpenCV is ${OpenCV_DIR}.") find_package(OpenCV REQUIRED PATHS ${OpenCV_DIR}) @@ -128,16 +149,36 @@ message(STATUS " CXX flags : ${CMAKE_CXX_FLAGS}") message(STATUS " WITH_GPU : ${WITH_GPU}") message(STATUS " ENABLE_ORT_BACKEND : ${ENABLE_ORT_BACKEND}") message(STATUS " ENABLE_PADDLE_BACKEND : ${ENABLE_PADDLE_BACKEND}") +message(STATUS " ENABLE_OPENVINO_BACKEND : ${ENABLE_OPENVINO_BACKEND}") +message(STATUS " ENABLE_TRT_BACKEND : ${ENABLE_TRT_BACKEND}") +message(STATUS " ENABLE_LITE_BACKEND : ${ENABLE_LITE_BACKEND}") if(ENABLE_PADDLE_BACKEND) message(STATUS " Paddle Inference version : ${PADDLEINFERENCE_VERSION}") endif() -message(STATUS " ENABLE_OPENVINO_BACKEND : ${ENABLE_OPENVINO_BACKEND}") if(ENABLE_OPENVINO_BACKEND) message(STATUS " OpenVINO version : ${OPENVINO_VERSION}") endif() message(STATUS " ENABLE_TRT_BACKEND : ${ENABLE_TRT_BACKEND}") message(STATUS " ENABLE_VISION : ${ENABLE_VISION}") message(STATUS " ENABLE_TEXT : ${ENABLE_TEXT}") +if(WITH_GPU) + message(STATUS " CUDA_DIRECTORY : ${CUDA_DIRECTORY}") +endif() +if(OPENCV_DIRECTORY) + message(STATUS " OPENCV_DIRECTORY : ${OPENCV_DIRECTORY}") +endif() +if(ORT_DIRECTORY) + message(STATUS " ORT_DIRECTORY : ${ORT_DIRECTORY}") +endif() +if(ANDROID) + message(STATUS " ANDROID_ABI : ${ANDROID_ABI}") + message(STATUS " ANDROID_PLATFORM : ${ANDROID_PLATFORM}") + message(STATUS " ANDROID_NDK : ${ANDROID_NDK}") + message(STATUS " WITH_OPENCV_STATIC: : ${WITH_OPENCV_STATIC}") + if(ENABLE_LITE_BACKEND) + message(STATUS " WITH_LITE_STATIC : ${WITH_LITE_STATIC}") + endif() +endif() message(STATUS " DEPENDENCY_LIBS : ${FASTDEPLOY_LIBS}") if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") @@ -146,3 +187,39 @@ if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") message(FATAL_ERROR "[ERROR] FastDeploy require g++ version >= 5.4.0, but now your g++ version is ${CMAKE_CXX_COMPILER_VERSION}, this may cause failure! Use -DCMAKE_CXX_COMPILER to define path of your compiler.") endif() endif() + +# ------------------------------------------------------------------------------- # +# Utils for FastDeploy users. Install all dynamic libs # +# to a specific location, such as exe or dll dir. Usage: # +# install_fastdeploy_libraries(${CMAKE_CURRENT_BINARY_DIR}/Release) # +# ------------------------------------------------------------------------------- # +function(install_fastdeploy_libraries DESTINATION_DIR) + set(DYN_LIB_SUFFIX "*.so*") + if(WIN32) + set(DYN_LIB_SUFFIX "*.dll") + elseif(APPLE) + set(DYN_LIB_SUFFIX "*.dylib*") + endif() + if(FastDeploy_DIR) + file(GLOB_RECURSE ALL_NEED_DYN_LIBS ${FastDeploy_DIR}/${DYN_LIB_SUFFIX}) + elseif(FASTDEPLOY_INSTALL_DIR) + file(GLOB_RECURSE ALL_NEED_DYN_LIBS ${FASTDEPLOY_INSTALL_DIR}/${DYN_LIB_SUFFIX}) + else() + message(FATAL_ERROR "Please set FastDeploy_DIR/FASTDEPLOY_INSTALL_DIR before call install_fastdeploy_libraries.") + endif() + if(ENABLE_VISION) + file(GLOB_RECURSE ALL_OPENCV_DYN_LIBS ${OpenCV_DIR}/${DYN_LIB_SUFFIX}) + list(REMOVE_ITEM ALL_NEED_DYN_LIBS ${ALL_OPENCV_DYN_LIBS}) + # Only Install the necessary libraries + if(WIN32) + file(GLOB OPENCV_DYN_LIBS ${OpenCV_DIR}/x64/vc15/bin/${DYN_LIB_SUFFIX}) + elseif(ANDROID) + file(GLOB OPENCV_DYN_LIBS ${OpenCV_DIR}/libs/${ANDROID_ABI}/${DYN_LIB_SUFFIX}) + else() # linux/mac + file(GLOB OPENCV_DYN_LIBS ${OpenCV_DIR}/lib/${DYN_LIB_SUFFIX}) + endif() + file(INSTALL ${OPENCV_DYN_LIBS} DESTINATION ${DESTINATION_DIR}) + endif() + # Install other libraries + file(INSTALL ${ALL_NEED_DYN_LIBS} DESTINATION ${DESTINATION_DIR}) +endfunction() diff --git a/cmake/FastDeployConfig.cmake b/cmake/FastDeployConfig.cmake new file mode 100644 index 000000000..fdd487ccf --- /dev/null +++ b/cmake/FastDeployConfig.cmake @@ -0,0 +1,10 @@ +# This file will define the following variables for find_package method: +# - FastDeploy_LIBS : The list of libraries to link against. +# - FastDeploy_INCLUDE_DIRS : The FastDeploy include directories. +# - FastDeploy_Found : The status of FastDeploy + +include(${CMAKE_CURRENT_LIST_DIR}/FastDeploy.cmake) +# setup FastDeploy cmake variables +set(FastDeploy_LIBS ${FASTDEPLOY_LIBS}) +set(FastDeploy_INCLUDE_DIRS ${FASTDEPLOY_INCS}) +set(FastDeploy_FOUND TRUE) \ No newline at end of file diff --git a/docs/compile/how_to_build_windows.md b/docs/compile/how_to_build_windows.md index ce4cc2d4a..1de10c90f 100644 --- a/docs/compile/how_to_build_windows.md +++ b/docs/compile/how_to_build_windows.md @@ -1,6 +1,22 @@ -# Windows编译 +# FastDeploy Windows SDK 编译 -## 环境依赖 +## 目录 +- [环境依赖](#Environment) +- [命令行方式编译C++ SDK](#CommandLineCpp) + - [编译CPU版本 C++ SDK](#CommandLineCppCPU) + - [编译GPU版本 C++ SDK](#CommandLineCppGPU) +- [命令行方式编译Python Wheel包](#CommandLinePython) + - [编译CPU版本 Python Wheel包](#CommandLinePythonCPU) + - [编译GPU版本 Python Wheel包](#CommandLinePythonGPU) +- [CMake GUI + Visual Studio 2019 IDE方式编译C++ SDK](#CMakeGuiAndVS2019) + - [使用CMake GUI进行基础配置](#CMakeGuiAndVS2019Basic) + - [编译CPU版本 C++ SDK设置](#CMakeGuiAndVS2019CPU) + - [编译GPU版本 C++ SDK设置](#CMakeGuiAndVS2019GPU) + - [使用Visual Studio 2019 IDE进行编译](#CMakeGuiAndVS2019Build) +- [Windows下FastDeploy C++ SDK使用方式](#Usage) + +## 1. 环境依赖 +
- cmake >= 3.12 - Visual Studio 16 2019 @@ -8,7 +24,11 @@ - cudnn >= 8.0 (当WITH_GPU=ON) - TensorRT >= 8.4 (当ENABLE_TRT_BACKEND=ON) -## 编译CPU版本 C++ SDK +## 2. 命令行方式编译C++ SDK + + +### 编译CPU版本 C++ SDK + Windows菜单打开`x64 Native Tools Command Prompt for VS 2019`命令工具,其中`CMAKE_INSTALL_PREFIX`用于指定编译后生成的SDK路径 @@ -23,7 +43,8 @@ msbuild INSTALL.vcxproj /m /p:Configuration=Release /p:Platform=x64 ``` 编译后,FastDeploy CPU C++ SDK即在`D:\Paddle\FastDeploy\build\fastdeploy-win-x64`目录下 -## 编译GPU版本 C++ SDK +### 编译GPU版本 C++ SDK + Windows菜单打开`x64 Native Tools Command Prompt for VS 2019`命令工具,其中`CMAKE_INSTALL_PREFIX`用于指定编译后生成的SDK路径 @@ -44,7 +65,11 @@ msbuild INSTALL.vcxproj /m /p:Configuration=Release /p:Platform=x64 ``` 编译后,FastDeploy GPU C++ SDK即在`D:\Paddle\FastDeploy\build\fastdeploy-win-x64-gpu`目录下 -## 编译CPU版本 Python Wheel包 +## 命令行方式编译Python Wheel包 + + +### 编译CPU版本 Python Wheel包 + Windows菜单打开x64 Native Tools Command Prompt for VS 2019命令工具。Python编译时,通过环境变量获取编译选项,在命令行终端运行以下命令 ```bat @@ -65,7 +90,9 @@ C:\Python38\python.exe setup.py bdist_wheel C:\Python38\python.exe -m pip install dist\fastdeploy_python-0.2.1-cp38-cp38-win_amd64.whl ``` -## 编译GPU版本 Python Wheel包 +### 编译GPU版本 Python Wheel包 + + Windows菜单打开x64 Native Tools Command Prompt for VS 2019命令工具。Python编译时,通过环境变量获取编译选项,在命令行终端运行以下命令 ```bat % 说明:CUDA_DIRECTORY 为用户自己的CUDA目录 以下为示例 % @@ -92,3 +119,139 @@ C:\Python38\python.exe setup.py bdist_wheel C:\Python38\python.exe -m pip install dist\fastdeploy_gpu_python-0.2.1-cp38-cp38-win_amd64.whl ``` 更多编译选项说明参考[编译指南](./README.md) + +## 3. CMake GUI + Visual Studio 2019 IDE方式编译C++ SDK + + +### 使用CMake GUI进行基础配置 + + +步骤一:首先,打开CMake GUI,先初始化FastDeploy工程: + + + +步骤二:点击Configure后,在弹窗中设置编译"x64"架构: + + + +初始化完成后,显示如下: + + + +步骤三:由于FastDeploy目前只支持Release版本,因此,先将"CMAKE_CONFIGURATION_TYPES"修改成"Release" + + + +接下来,用户可根据自己实际的开发需求开启对应的编译选项,并生成sln解决方案。以下,针对编译CPU和GPU版本SDK各举一个例子。 + +### 编译CPU版本 C++ SDK设置 + + + +步骤一:勾选CPU版本对应的编译选项。注意CPU版本,请`不要`勾选WITH_GPU和ENABLE_TRT_BACKEND + + + +这个示例中,我们开启ORT、Paddle、OpenVINO等推理后端,并且选择了需要编译TEXT和VISION的API + + +步骤二:自定义设置SDK安装路径,修改CMAKE_INSTALL_PREFIX + + + +由于默认的安装路径是C盘,我们可以修改CMAKE_INSTALL_PREFIX来指定自己的安装路径,这里我们将安装路径修改到`build\fastdeploy-win-x64-0.2.1`目录下。 + + + + + +### 编译GPU版本 C++ SDK设置 + + +步骤一:勾选GPU版本对应的编译选项。注意GPU版本,请`需要`勾选WITH_GPU + + + +这个示例中,我们开启ORT、Paddle、OpenVINO和TRT等推理后端,并且选择了需要编译TEXT和VISION的API。并且,由于开启了GPU和TensorRT,此时需要额外指定CUDA_DIRECTORY和TRT_DIRECTORY,在GUI界面中找到这两个变量,点击右侧的选项框,分别选择您安装CUDA的路径和TensorRT的路径 + + + + + + + + +步骤二:自定义设置SDK安装路径,修改CMAKE_INSTALL_PREFIX + + + + +由于默认的安装路径是C盘,我们可以修改CMAKE_INSTALL_PREFIX来指定自己的安装路径,这里我们将安装路径修改到`build\fastdeploy-win-x64-gpu-0.2.1`目录下。 + + +### 使用Visual Studio 2019 IDE进行编译 + + + +步骤一:点击"Generate",生成sln解决方案,并用Visual Studio 2019打开 + + + +这个过程默认会从下载一些编译需要的资源,cmake的dev警告可以不用管。生成完成之后可以看到以下界面: + +CPU版本SDK: + + + +GPU版本SDK: + + + + +左侧界面,可以看到所有编译需要的include路径和lib路径已经被设置好了,用户可以考虑把这些路径记录下来方便后续的开发。右侧界面,可以看到已经生成fastdeploy.sln解决方案文件。接下来,我们使用Visual Studio 2019打开这个解决方案文件(理论上VS2022也可以编译,但目前建议使用VS2019)。 + + + + +步骤二:在Visual Studio 2019点击"ALL BUILD"->右键点击"生成"开始编译 + + + +CPU版本SDK编译成功! + + + +GPU版本SDK编译成功! + + + + +步骤三:编译完成后,在Visual Studio 2019点击"INSTALL"->右键点击"生成"将编译好的SDK安装到先前指定的目录 + + + + + + +SDK成功安装到指定目录! + +### 编译所有examples(可选) +可以在CMake GUI中勾选BUILD_EXAMPLES选项,连带编译所有的examples,编译完成后所有example的可执行文件保存在build/bin/Release目录下 + + + + + + + +## 4. 特别提示 + +如果是用户自行编译SDK,理论上支持Windows 10/11,VS 2019/2022,CUDA 11.x 以及 TensorRT 8.x等配置,但建议使用我们推荐的默认配置,即:Windows 10, VS 2019, CUDA 11.2 和 TensorRT 8.4.x版本。另外,如果编译过程中遇到中文字符的编码问题(如UIE example必须传入中文字符进行预测),可以参考Visual Studio的官方文档,设置源字符集为`/utf-8`解决: +- [/utf-8(将源字符集和执行字符集设置为 UTF-8)](https://learn.microsoft.com/zh-cn/cpp/build/reference/utf-8-set-source-and-executable-character-sets-to-utf-8?view=msvc-170) + +## 5. Windows下FastDeploy C++ SDK使用方式 + + + +Windows下FastDeploy C++ SDK使用方式,请参考文档: +- [how_to_use_sdk_on_windows.md](./how_to_use_sdk_on_windows.md) diff --git a/docs/compile/how_to_use_sdk_on_windows.md b/docs/compile/how_to_use_sdk_on_windows.md index 8fa6076c9..7ff313e14 100644 --- a/docs/compile/how_to_use_sdk_on_windows.md +++ b/docs/compile/how_to_use_sdk_on_windows.md @@ -1,13 +1,31 @@ # 在 Windows 使用 FastDeploy C++ SDK -在 Windows 下使用 FastDeploy C++ SDK 与在 Linux 下使用稍有不同。以下以 PPYOLOE 为例进行演示在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。 - -在部署前,需确认以下两个步骤 - +在 Windows 下使用 FastDeploy C++ SDK 与在 Linux 下使用稍有不同。以下以 PPYOLOE 为例进行演示在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。在部署前,需确认以下两个步骤: - 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../environment.md) - 2. 根据开发环境,下载预编译部署库和samples代码,参考[FastDeploy预编译库](../quick_start) -## 环境依赖 +## 目录 +- [环境依赖](#Environment) +- [下载 FastDeploy Windows 10 C++ SDK](#Download) +- [Windows下多种方式使用 C++ SDK 的方式](#CommandLine) + - [方式一:命令行方式使用 C++ SDK](#CommandLine) + - [步骤一:在 Windows 命令行终端 上编译 example](#CommandLine) + - [步骤二:运行可执行文件获得推理结果](#CommandLine) + - [方式二:Visual Studio 2019 IDE 方式使用 C++ SDK](#VisualStudio2019) + - [步骤一:Visual Studio 2019 创建CMake工程项目](#VisualStudio20191) + - [步骤二:在CMakeLists中配置 FastDeploy C++ SDK](#VisualStudio20192) + - [步骤三:生成工程缓存并修改CMakeSetting.json配置](#VisualStudio20193) + - [步骤四:生成可执行文件,运行获取结果](#VisualStudio20194) + - [方式三:CLion IDE 方式使用 C++ SDK](#CLion) + - [方式四:Visual Studio Code IDE 方式使用 C++ SDK](#VisualStudioCode) +- [多种方法配置exe运行时所需的依赖库](#CommandLineDeps1) + - [方式一:修改CMakeLists.txt,一行命令配置(推荐)](#CommandLineDeps1) + - [方式二:命令行设置环境变量](#CommandLineDeps2) + - [方法三:手动拷贝依赖库到exe的目录下](#CommandLineDeps3) + + +## 1. 环境依赖 + - cmake >= 3.12 - Visual Studio 16 2019 @@ -15,19 +33,24 @@ - cudnn >= 8.0 (当WITH_GPU=ON) - TensorRT >= 8.4 (当ENABLE_TRT_BACKEND=ON) -## 下载 FastDeploy Windows 10 C++ SDK +## 2. 下载 FastDeploy Windows 10 C++ SDK + + 可以从以下链接下载编译好的 FastDeploy Windows 10 C++ SDK,SDK中包含了examples代码。 ```text https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-win-x64-gpu-0.2.1.zip ``` -## 准备模型文件和测试图片 +## 3. 准备模型文件和测试图片 可以从以下链接下载模型文件和测试图片,并解压缩 ```text https://bj.bcebos.com/paddlehub/fastdeploy/ppyoloe_crn_l_300e_coco.tgz # (下载后解压缩) https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg ``` -## 在 Windows 上编译 PPYOLOE +## 4. SDK使用方式一:命令行方式使用 C++ SDK + + +### 4.1 在 Windows 上编译 PPYOLOE Windows菜单打开`x64 Native Tools Command Prompt for VS 2019`命令工具,cd到ppyoloe的demo路径 ```bat cd fastdeploy-win-x64-gpu-0.2.0\examples\vision\detection\paddledetection\cpp @@ -40,8 +63,164 @@ cmake .. -G "Visual Studio 16 2019" -A x64 -DFASTDEPLOY_INSTALL_DIR=%cd%\..\..\. ```bat msbuild infer_demo.sln /m:4 /p:Configuration=Release /p:Platform=x64 ``` -## 配置依赖库路径 -#### 方式一:命令行设置环境变量 + +### 4.2 运行 demo +```bat +cd Release +infer_ppyoloe_demo.exe ppyoloe_crn_l_300e_coco 000000014439.jpg 0 # CPU +infer_ppyoloe_demo.exe ppyoloe_crn_l_300e_coco 000000014439.jpg 1 # GPU +infer_ppyoloe_demo.exe ppyoloe_crn_l_300e_coco 000000014439.jpg 2 # GPU + TensorRT +``` + +特别说明,exe运行时所需要的依赖库配置方法,请参考章节: [多种方法配置exe运行时所需的依赖库](#CommandLineDeps) + + +## 5. SDK使用方式二:Visual Studio 2019 IDE 方式使用 C++ SDK + + +### 5.1 步骤一:Visual Studio 2019 创建“CMake”工程项目 + + + +(1)打开Visual Studio 2019,点击"创建新项目"->点击"CMake",从而创建CMake工程项目。以PPYOLOE为例,来说明如何在Visual Studio 2019 IDE中使用FastDeploy C++ SDK. + + + + + + + + +(2)打开工程发现,Visual Stuio 2019已经为我们生成了一些基本的文件,其中包括CMakeLists.txt。infer_ppyoloe.h头文件这里实际上用不到,我们可以直接删除。 + + + +### 5.2 步骤二:在CMakeLists中配置 FastDeploy C++ SDK + + + +(1)在工程创建完成后,我们需要添加infer_ppyoloe推理源码,并修改CMakeLists.txt,修改如下: + + + +(2)其中infer_ppyoloe.cpp的代码可以直接从examples中的代码拷贝过来: +- [examples/vision/detection/paddledetection/cpp/infer_ppyoloe.cc](../../examples/vision/detection/paddledetection/cpp/infer_ppyoloe.cc) + +(3)CMakeLists.txt主要包括配置FastDeploy C++ SDK的路径,如果是GPU版本的SDK,还需要配置CUDA_DIRECTORY为CUDA的安装路径,CMakeLists.txt的配置如下: + +```cmake +project(infer_ppyoloe_demo C CXX) +cmake_minimum_required(VERSION 3.12) + +# Only support "Release" mode now +set(CMAKE_BUILD_TYPE "Release") + +# Set FastDeploy install dir +set(FASTDEPLOY_INSTALL_DIR "D:/qiuyanjun/fastdeploy-win-x64-gpu-0.2.1" + CACHE PATH "Path to downloaded or built fastdeploy sdk.") + +# Set CUDA_DIRECTORY (CUDA 11.x) for GPU SDK +set(CUDA_DIRECTORY "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.7" + CACHE PATH "Path to installed CUDA Toolkit.") + +include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake) + +include_directories(${FASTDEPLOY_INCS}) + +add_executable(infer_ppyoloe_demo ${PROJECT_SOURCE_DIR}/infer_ppyoloe.cpp) +target_link_libraries(infer_ppyoloe_demo ${FASTDEPLOY_LIBS}) + +# Optional: install all DLLs to binary dir. +install_fastdeploy_libraries(${CMAKE_CURRENT_BINARY_DIR}/Release) +``` + +### 5.3 步骤三:生成工程缓存并修改CMakeSetting.json配置 + + + +(1)点击"CMakeLists.txt"->右键点击"生成缓存": + + + +发现已经成功生成缓存了,但是由于打开工程时,默认是Debug模式,我们发现exe和缓存保存路径还是Debug模式下的。 我们可以先修改CMake的设置为Release. + +(2)点击"CMakeLists.txt"->右键点击"infer_ppyoloe_demo的cmake设置",进入CMakeSettings.json的设置面板,把其中的Debug设置修改为Release. + + + +同时设置CMake生成器为 "Visual Studio 16 2019 Win64" + + + +(3)点击保存CMake缓存以切换为Release配置: + + + +(4):(4.1)点击"CMakeLists.txt"->右键"CMake缓存仅限x64-Release"->"点击删除缓存";(4.2)点击"CMakeLists.txt"->"生成缓存";(4.3)如果在步骤一发现删除缓存的选项是灰色的可以直接点击"CMakeLists.txt"->"生成",若生成失败则可以重复尝试(4.1)和(4。2) + + + +最终可以看到,配置已经成功生成Relase模式下的CMake缓存了。 + + + + +### 5.4 步骤四:生成可执行文件,运行获取结果。 + + + +(1)点击"CMakeLists.txt"->"生成"。可以发现已经成功生成了infer_ppyoloe_demo.exe,并保存在`out/build/x64-Release/Release`目录下。 + + + +(2)执行可执行文件,获得推理结果。 首先需要拷贝所有的dll到exe所在的目录下,这里我们可以在CMakeLists.txt添加一下命令,可将FastDeploy中所有的dll安装到指定的目录。 + +```cmake +install_fastdeploy_libraries(${CMAKE_CURRENT_BINARY_DIR}/Release) +``` +(3)同时,也需要把ppyoloe的模型文件和测试图片下载解压缩后,拷贝到exe所在的目录。 准备完成后,目录结构如下: + + + +(4)最后,执行以下命令获得推理结果: + +```bat +D:\xxxinfer_ppyoloe\out\build\x64-Release\Release>infer_ppyoloe_demo.exe ppyoloe_crn_l_300e_coco 000000014439.jpg 0 +[INFO] fastdeploy/runtime.cc(304)::fastdeploy::Runtime::Init Runtime initialized with Backend::OPENVINO in Device::CPU. +DetectionResult: [xmin, ymin, xmax, ymax, score, label_id] +415.047180,89.311569, 506.009613, 283.863098, 0.950423, 0 +163.665710,81.914932, 198.585342, 166.760895, 0.896433, 0 +581.788635,113.027618, 612.623474, 198.521713, 0.842596, 0 +267.217224,89.777306, 298.796051, 169.361526, 0.837951, 0 +...... +153.301407,123.233757, 177.130539, 164.558350, 0.066697, 60 +505.887604,140.919601, 523.167236, 151.875336, 0.084912, 67 + +Visualized result saved in ./vis_result.jpg +``` + +打开保存的图片查看可视化结果: + +