[docs] update win build docs with cmake-gui+vs2019 (#280)

* Delete redundant Chinese comments

* [docs] update win build docs with cmake-gui+vs2019

* [docs] update win build docs with cmake-gui+vs2019

* [examples] replace some cn comments with en

* [cmake] update FastDeploy.cmake.in

* [docs] update windows c++ sdk usage docs

* [cmake] update FastDeploy.cmake.in

* [docs] update windows sdk usage docs

Co-authored-by: Jason <jiangjiajun@baidu.com>
This commit is contained in:
DefTruth
2022-09-26 19:25:12 +08:00
committed by GitHub
parent 355382ad63
commit 9e09a55abb
14 changed files with 496 additions and 68 deletions

2
.gitignore vendored
View File

@@ -20,6 +20,6 @@ fastdeploy/core/config.h
fastdeploy/pybind/main.cc
python/fastdeploy/libs/lib*
__pycache__
build_fastdeploy_android.sh
build_fd_android.sh
python/scripts/process_libraries.py
.vs

View File

@@ -51,14 +51,14 @@ option(ENABLE_TRT_BACKEND "Whether to enable tensorrt backend." OFF)
option(ENABLE_PADDLE_BACKEND "Whether to enable paddle backend." OFF)
option(ENABLE_OPENVINO_BACKEND "Whether to enable openvino backend." OFF)
option(ENABLE_LITE_BACKEND "Whether to enable paddle lite backend." OFF)
option(CUDA_DIRECTORY "If build tensorrt backend, need to define path of cuda library.")
option(TRT_DIRECTORY "If build tensorrt backend, need to define path of tensorrt library.")
option(ENABLE_VISION "Whether to enable vision models usage." OFF)
option(ENABLE_VISION_VISUALIZE "Whether to enable visualize vision model result toolbox." ON)
option(ENABLE_TEXT "Whether to enable text models usage." OFF)
option(WITH_TESTING "Whether to compile with unittest." OFF)
option(OPENCV_DIRECTORY "User can specify the installed opencv directory.")
option(ORT_DIRECTORY "User can specify the installed onnxruntime directory.")
# option(CUDA_DIRECTORY "If build tensorrt backend, need to define path of cuda library.")
# option(TRT_DIRECTORY "If build tensorrt backend, need to define path of tensorrt library.")
# option(OPENCV_DIRECTORY "User can specify the installed opencv directory.")
# option(ORT_DIRECTORY "User can specify the installed onnxruntime directory.")
######################### Options for Android cross compiling ####################
option(WITH_OPENCV_STATIC "Use OpenCV static lib for Android." OFF)
option(WITH_LITE_STATIC "Use Paddle-Lite static lib for Android." OFF)
@@ -69,6 +69,13 @@ option(WITH_LITE_STATIC "Use Paddle-Lite static lib for Android." OFF)
# Whether to build fastdeploy with vision/text/... examples, only for testings.
option(BUILD_EXAMPLES "Whether to build fastdeploy with vision examples" OFF)
######################### Paths to user's custom libraries directory #####################
set(CUDA_DIRECTORY "" CACHE PATH "If build tensorrt backend, need to define path of cuda library.")
set(TRT_DIRECTORY "" CACHE PATH "If build tensorrt backend, need to define path of tensorrt library.")
set(ORT_DIRECTORY "" CACHE PATH "User can specify the installed onnxruntime directory.")
set(OPENCV_DIRECTORY "" CACHE PATH "User can specify the installed opencv directory.")
# Whether to build fastdeploy on device Nvidia Jetson
# Only support CPU Inference & GPU(TensorRT) Inference Now
option(BUILD_ON_JETSON "Whether to build fastdeploy on Nvidia Jetson" OFF)
@@ -421,6 +428,7 @@ install(
${PROJECT_SOURCE_DIR}/ThirdPartyNotices.txt
${PROJECT_SOURCE_DIR}/VERSION_NUMBER
${PROJECT_SOURCE_DIR}/FastDeploy.cmake
${PROJECT_SOURCE_DIR}/cmake/FastDeployConfig.cmake
${PROJECT_SOURCE_DIR}/cmake/utils.cmake
DESTINATION ${CMAKE_INSTALL_PREFIX}
)

View File

@@ -2,19 +2,22 @@ CMAKE_MINIMUM_REQUIRED (VERSION 3.12)
set(WITH_GPU @WITH_GPU@)
set(ENABLE_ORT_BACKEND @ENABLE_ORT_BACKEND@)
set(ENABLE_LITE_BACKEND @ENABLE_LITE_BACKEND@)
set(ENABLE_PADDLE_BACKEND @ENABLE_PADDLE_BACKEND@)
set(ENABLE_OPENVINO_BACKEND @ENABLE_OPENVINO_BACKEND@)
set(PADDLEINFERENCE_VERSION @PADDLEINFERENCE_VERSION@)
set(OPENVINO_VERSION @OPENVINO_VERSION@)
set(ENABLE_TRT_BACKEND @ENABLE_TRT_BACKEND@)
set(ENABLE_PADDLE_FRONTEND @ENABLE_PADDLE_FRONTEND@)
set(ENABLE_VISION @ENABLE_VISION@)
set(ENABLE_TEXT @ENABLE_TEXT@)
set(BUILD_ON_JETON @BUILD_ON_JETSON@)
set(PADDLEINFERENCE_VERSION @PADDLEINFERENCE_VERSION@)
set(OPENVINO_VERSION @OPENVINO_VERSION@)
set(WITH_LITE_STATIC @WITH_LITE_STATIC@)
set(WITH_OPENCV_STATIC @WITH_OPENCV_STATIC@)
# set(ENABLE_OPENCV_CUDA @ENABLE_OPENCV_CUDA@)
set(LIBRARY_NAME @LIBRARY_NAME@)
set(OPENCV_DIRECTORY @OPENCV_DIRECTORY@)
set(ORT_DIRECTORY @ORT_DIRECTORY@)
set(OPENCV_DIRECTORY "@OPENCV_DIRECTORY@")
set(ORT_DIRECTORY "@ORT_DIRECTORY@")
set(FASTDEPLOY_LIBS "")
set(FASTDEPLOY_INCS "")
@@ -90,9 +93,27 @@ if(WITH_GPU)
endif()
if(ENABLE_VISION)
set(OpenCV_DIR @OpenCV_DIR@)
if (OPENCV_DIRECTORY)
set(OpenCV_DIR ${OPENCV_DIRECTORY})
else()
if(WIN32)
set(OpenCV_DIR ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/opencv-win-x64-3.4.16/build)
elseif(ANDROID)
# TODO(qiuyanjun): use single 'opencv' path instead of 'opencv-xxx-xxx'.
set(OpenCV_DIR ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/opencv-android-4.6.0/sdk/native/jni)
elseif(APPLE)
if(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "arm64")
set(OpenCV_DIR ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/opencv-osx-x86_64-3.4.16)
else()
set(OpenCV_DIR ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/opencv-arm64-3.4.16)
endif()
else()
if(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "aarch64")
set(OPENCV_LIB ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/opencv-linux-aarch64-3.4.14)
else()
set(OpenCV_DIR ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/opencv-linux-x64-3.4.16)
endif()
endif()
endif()
message(STATUS "The path of OpenCV is ${OpenCV_DIR}.")
find_package(OpenCV REQUIRED PATHS ${OpenCV_DIR})
@@ -128,16 +149,36 @@ message(STATUS " CXX flags : ${CMAKE_CXX_FLAGS}")
message(STATUS " WITH_GPU : ${WITH_GPU}")
message(STATUS " ENABLE_ORT_BACKEND : ${ENABLE_ORT_BACKEND}")
message(STATUS " ENABLE_PADDLE_BACKEND : ${ENABLE_PADDLE_BACKEND}")
message(STATUS " ENABLE_OPENVINO_BACKEND : ${ENABLE_OPENVINO_BACKEND}")
message(STATUS " ENABLE_TRT_BACKEND : ${ENABLE_TRT_BACKEND}")
message(STATUS " ENABLE_LITE_BACKEND : ${ENABLE_LITE_BACKEND}")
if(ENABLE_PADDLE_BACKEND)
message(STATUS " Paddle Inference version : ${PADDLEINFERENCE_VERSION}")
endif()
message(STATUS " ENABLE_OPENVINO_BACKEND : ${ENABLE_OPENVINO_BACKEND}")
if(ENABLE_OPENVINO_BACKEND)
message(STATUS " OpenVINO version : ${OPENVINO_VERSION}")
endif()
message(STATUS " ENABLE_TRT_BACKEND : ${ENABLE_TRT_BACKEND}")
message(STATUS " ENABLE_VISION : ${ENABLE_VISION}")
message(STATUS " ENABLE_TEXT : ${ENABLE_TEXT}")
if(WITH_GPU)
message(STATUS " CUDA_DIRECTORY : ${CUDA_DIRECTORY}")
endif()
if(OPENCV_DIRECTORY)
message(STATUS " OPENCV_DIRECTORY : ${OPENCV_DIRECTORY}")
endif()
if(ORT_DIRECTORY)
message(STATUS " ORT_DIRECTORY : ${ORT_DIRECTORY}")
endif()
if(ANDROID)
message(STATUS " ANDROID_ABI : ${ANDROID_ABI}")
message(STATUS " ANDROID_PLATFORM : ${ANDROID_PLATFORM}")
message(STATUS " ANDROID_NDK : ${ANDROID_NDK}")
message(STATUS " WITH_OPENCV_STATIC: : ${WITH_OPENCV_STATIC}")
if(ENABLE_LITE_BACKEND)
message(STATUS " WITH_LITE_STATIC : ${WITH_LITE_STATIC}")
endif()
endif()
message(STATUS " DEPENDENCY_LIBS : ${FASTDEPLOY_LIBS}")
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
@@ -146,3 +187,39 @@ if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
message(FATAL_ERROR "[ERROR] FastDeploy require g++ version >= 5.4.0, but now your g++ version is ${CMAKE_CXX_COMPILER_VERSION}, this may cause failure! Use -DCMAKE_CXX_COMPILER to define path of your compiler.")
endif()
endif()
# ------------------------------------------------------------------------------- #
# Utils for FastDeploy users. Install all dynamic libs #
# to a specific location, such as exe or dll dir. Usage: #
# install_fastdeploy_libraries(${CMAKE_CURRENT_BINARY_DIR}/Release) #
# ------------------------------------------------------------------------------- #
function(install_fastdeploy_libraries DESTINATION_DIR)
set(DYN_LIB_SUFFIX "*.so*")
if(WIN32)
set(DYN_LIB_SUFFIX "*.dll")
elseif(APPLE)
set(DYN_LIB_SUFFIX "*.dylib*")
endif()
if(FastDeploy_DIR)
file(GLOB_RECURSE ALL_NEED_DYN_LIBS ${FastDeploy_DIR}/${DYN_LIB_SUFFIX})
elseif(FASTDEPLOY_INSTALL_DIR)
file(GLOB_RECURSE ALL_NEED_DYN_LIBS ${FASTDEPLOY_INSTALL_DIR}/${DYN_LIB_SUFFIX})
else()
message(FATAL_ERROR "Please set FastDeploy_DIR/FASTDEPLOY_INSTALL_DIR before call install_fastdeploy_libraries.")
endif()
if(ENABLE_VISION)
file(GLOB_RECURSE ALL_OPENCV_DYN_LIBS ${OpenCV_DIR}/${DYN_LIB_SUFFIX})
list(REMOVE_ITEM ALL_NEED_DYN_LIBS ${ALL_OPENCV_DYN_LIBS})
# Only Install the necessary libraries
if(WIN32)
file(GLOB OPENCV_DYN_LIBS ${OpenCV_DIR}/x64/vc15/bin/${DYN_LIB_SUFFIX})
elseif(ANDROID)
file(GLOB OPENCV_DYN_LIBS ${OpenCV_DIR}/libs/${ANDROID_ABI}/${DYN_LIB_SUFFIX})
else() # linux/mac
file(GLOB OPENCV_DYN_LIBS ${OpenCV_DIR}/lib/${DYN_LIB_SUFFIX})
endif()
file(INSTALL ${OPENCV_DYN_LIBS} DESTINATION ${DESTINATION_DIR})
endif()
# Install other libraries
file(INSTALL ${ALL_NEED_DYN_LIBS} DESTINATION ${DESTINATION_DIR})
endfunction()

View File

@@ -0,0 +1,10 @@
# This file will define the following variables for find_package method:
# - FastDeploy_LIBS : The list of libraries to link against.
# - FastDeploy_INCLUDE_DIRS : The FastDeploy include directories.
# - FastDeploy_Found : The status of FastDeploy
include(${CMAKE_CURRENT_LIST_DIR}/FastDeploy.cmake)
# setup FastDeploy cmake variables
set(FastDeploy_LIBS ${FASTDEPLOY_LIBS})
set(FastDeploy_INCLUDE_DIRS ${FASTDEPLOY_INCS})
set(FastDeploy_FOUND TRUE)

View File

@@ -1,6 +1,22 @@
# Windows编译
# FastDeploy Windows SDK 编译
## 环境依赖
## 目录
- [环境依赖](#Environment)
- [命令行方式编译C++ SDK](#CommandLineCpp)
- [编译CPU版本 C++ SDK](#CommandLineCppCPU)
- [编译GPU版本 C++ SDK](#CommandLineCppGPU)
- [命令行方式编译Python Wheel包](#CommandLinePython)
- [编译CPU版本 Python Wheel包](#CommandLinePythonCPU)
- [编译GPU版本 Python Wheel包](#CommandLinePythonGPU)
- [CMake GUI + Visual Studio 2019 IDE方式编译C++ SDK](#CMakeGuiAndVS2019)
- [使用CMake GUI进行基础配置](#CMakeGuiAndVS2019Basic)
- [编译CPU版本 C++ SDK设置](#CMakeGuiAndVS2019CPU)
- [编译GPU版本 C++ SDK设置](#CMakeGuiAndVS2019GPU)
- [使用Visual Studio 2019 IDE进行编译](#CMakeGuiAndVS2019Build)
- [Windows下FastDeploy C++ SDK使用方式](#Usage)
## 1. 环境依赖
<div id="Environment"></div>
- cmake >= 3.12
- Visual Studio 16 2019
@@ -8,7 +24,11 @@
- cudnn >= 8.0 (当WITH_GPU=ON)
- TensorRT >= 8.4 (当ENABLE_TRT_BACKEND=ON)
## 编译CPU版本 C++ SDK
## 2. 命令行方式编译C++ SDK
<div id="CommandLineCpp"></div>
### 编译CPU版本 C++ SDK
<div id="CommandLineCppCPU"></div>
Windows菜单打开`x64 Native Tools Command Prompt for VS 2019`命令工具,其中`CMAKE_INSTALL_PREFIX`用于指定编译后生成的SDK路径
@@ -23,7 +43,8 @@ msbuild INSTALL.vcxproj /m /p:Configuration=Release /p:Platform=x64
```
编译后FastDeploy CPU C++ SDK即在`D:\Paddle\FastDeploy\build\fastdeploy-win-x64`目录下
## 编译GPU版本 C++ SDK
### 编译GPU版本 C++ SDK
<div id="CommandLineCppGPU"></div>
Windows菜单打开`x64 Native Tools Command Prompt for VS 2019`命令工具,其中`CMAKE_INSTALL_PREFIX`用于指定编译后生成的SDK路径
@@ -44,7 +65,11 @@ msbuild INSTALL.vcxproj /m /p:Configuration=Release /p:Platform=x64
```
编译后FastDeploy GPU C++ SDK即在`D:\Paddle\FastDeploy\build\fastdeploy-win-x64-gpu`目录下
## 编译CPU版本 Python Wheel包
## 命令行方式编译Python Wheel包
<div id="CommandLinePython"></div>
### 编译CPU版本 Python Wheel包
<div id="CommandLinePythonCPU"></div>
Windows菜单打开x64 Native Tools Command Prompt for VS 2019命令工具。Python编译时通过环境变量获取编译选项在命令行终端运行以下命令
```bat
@@ -65,7 +90,9 @@ C:\Python38\python.exe setup.py bdist_wheel
C:\Python38\python.exe -m pip install dist\fastdeploy_python-0.2.1-cp38-cp38-win_amd64.whl
```
## 编译GPU版本 Python Wheel包
### 编译GPU版本 Python Wheel包
<div id="CommandLinePythonGPU"></div>
Windows菜单打开x64 Native Tools Command Prompt for VS 2019命令工具。Python编译时通过环境变量获取编译选项在命令行终端运行以下命令
```bat
% 说明CUDA_DIRECTORY 为用户自己的CUDA目录 以下为示例 %
@@ -92,3 +119,139 @@ C:\Python38\python.exe setup.py bdist_wheel
C:\Python38\python.exe -m pip install dist\fastdeploy_gpu_python-0.2.1-cp38-cp38-win_amd64.whl
```
更多编译选项说明参考[编译指南](./README.md)
## 3. CMake GUI + Visual Studio 2019 IDE方式编译C++ SDK
<div id="CMakeGuiAndVS2019"></div>
### 使用CMake GUI进行基础配置
<div id="CMakeGuiAndVS2019Basic"></div>
步骤一首先打开CMake GUI先初始化FastDeploy工程
![image](https://user-images.githubusercontent.com/31974251/192094881-c5beb0e5-82ae-4a62-a88c-73f3d80f7936.png)
步骤二点击Configure后在弹窗中设置编译"x64"架构:
![image](https://user-images.githubusercontent.com/31974251/192094951-958a0a22-2090-4ab6-84f5-3573164d0835.png)
初始化完成后,显示如下:
![image](https://user-images.githubusercontent.com/31974251/192095053-874b9c73-fc0d-4325-b555-ac94ab9a9f38.png)
步骤三由于FastDeploy目前只支持Release版本因此先将"CMAKE_CONFIGURATION_TYPES"修改成"Release"
![image](https://user-images.githubusercontent.com/31974251/192095175-3aeede95-a633-4b3c-81f8-067f0a0a44a3.png)
接下来用户可根据自己实际的开发需求开启对应的编译选项并生成sln解决方案。以下针对编译CPU和GPU版本SDK各举一个例子。
### 编译CPU版本 C++ SDK设置
<div id="CMakeGuiAndVS2019CPU"></div>
步骤一勾选CPU版本对应的编译选项。注意CPU版本`不要`勾选WITH_GPU和ENABLE_TRT_BACKEND
![image](https://user-images.githubusercontent.com/31974251/192095848-b3cfdf19-e378-41e0-b44e-5edb49461eeb.png)
这个示例中我们开启ORT、Paddle、OpenVINO等推理后端并且选择了需要编译TEXT和VISION的API
步骤二自定义设置SDK安装路径修改CMAKE_INSTALL_PREFIX
![image](https://user-images.githubusercontent.com/31974251/192095961-5f6e348a-c30b-4473-8331-8beefb7cd2e6.png)
由于默认的安装路径是C盘我们可以修改CMAKE_INSTALL_PREFIX来指定自己的安装路径这里我们将安装路径修改到`build\fastdeploy-win-x64-0.2.1`目录下。
![image](https://user-images.githubusercontent.com/31974251/192096055-8a276a9e-6017-4447-9ded-b95c5579d663.png)
### 编译GPU版本 C++ SDK设置
<div id="CMakeGuiAndVS2019GPU"></div>
步骤一勾选GPU版本对应的编译选项。注意GPU版本`需要`勾选WITH_GPU
![image](https://user-images.githubusercontent.com/31974251/192099254-9f82abb0-8a29-41ce-a0ce-da6aacf23582.png)
这个示例中我们开启ORT、Paddle、OpenVINO和TRT等推理后端并且选择了需要编译TEXT和VISION的API。并且由于开启了GPU和TensorRT此时需要额外指定CUDA_DIRECTORY和TRT_DIRECTORY在GUI界面中找到这两个变量点击右侧的选项框分别选择您安装CUDA的路径和TensorRT的路径
![image](https://user-images.githubusercontent.com/31974251/192098907-9dd9a49c-4a3e-4641-8e68-f25da1cafbba.png)
![image](https://user-images.githubusercontent.com/31974251/192098984-7fefd824-7e3b-4185-abba-bae5d8765e2a.png)
步骤二自定义设置SDK安装路径修改CMAKE_INSTALL_PREFIX
![image](https://user-images.githubusercontent.com/31974251/192099125-81fc8217-e51f-4039-9421-ba7a09c0027c.png)
由于默认的安装路径是C盘我们可以修改CMAKE_INSTALL_PREFIX来指定自己的安装路径这里我们将安装路径修改到`build\fastdeploy-win-x64-gpu-0.2.1`目录下。
### 使用Visual Studio 2019 IDE进行编译
<div id="CMakeGuiAndVS2019Build"></div>
步骤一:点击"Generate"生成sln解决方案并用Visual Studio 2019打开
![image](https://user-images.githubusercontent.com/31974251/192096162-c05cbb11-f96e-4c82-afde-c7fc02cddf68.png)
这个过程默认会从下载一些编译需要的资源cmake的dev警告可以不用管。生成完成之后可以看到以下界面
CPU版本SDK:
![image](https://user-images.githubusercontent.com/31974251/192096478-faa570bd-7569-43c3-ad79-cc6be5b605e3.png)
GPU版本SDK:
![image](https://user-images.githubusercontent.com/31974251/192099583-300e4680-1089-45cf-afaa-d2afda8fd436.png)
左侧界面可以看到所有编译需要的include路径和lib路径已经被设置好了用户可以考虑把这些路径记录下来方便后续的开发。右侧界面可以看到已经生成fastdeploy.sln解决方案文件。接下来我们使用Visual Studio 2019打开这个解决方案文件理论上VS2022也可以编译但目前建议使用VS2019
![image](https://user-images.githubusercontent.com/31974251/192096765-2aeadd68-47fb-4cd6-b083-4a478cf5e584.jpg)
步骤二在Visual Studio 2019点击"ALL BUILD"->右键点击"生成"开始编译
![image](https://user-images.githubusercontent.com/31974251/192096893-5d6bc428-b824-4ffe-8930-0ec6d4dcfd02.png)
CPU版本SDK编译成功
![image](https://user-images.githubusercontent.com/31974251/192097020-979bd7a3-1cdd-4fb5-a931-864c5372933d.png)
GPU版本SDK编译成功
![image](https://user-images.githubusercontent.com/31974251/192099902-4b661f9a-7691-4f7f-b573-92ca9397a890.png)
步骤三编译完成后在Visual Studio 2019点击"INSTALL"->右键点击"生成"将编译好的SDK安装到先前指定的目录
![image](https://user-images.githubusercontent.com/31974251/192097073-ce5236eb-1ed7-439f-8098-fef7a2d02779.png)
![image](https://user-images.githubusercontent.com/31974251/192097122-d675ae39-35fb-4dbb-9c75-eefb0597ec2e.png)
SDK成功安装到指定目录
### 编译所有examples可选
可以在CMake GUI中勾选BUILD_EXAMPLES选项连带编译所有的examples编译完成后所有example的可执行文件保存在build/bin/Release目录下
![image](https://user-images.githubusercontent.com/31974251/192110769-a4f0940d-dea3-4524-831b-1c2a6ab8e871.png)
![image](https://user-images.githubusercontent.com/31974251/192110930-e7e49bc6-c271-4076-be74-3d103f27bc78.png)
## 4. 特别提示
如果是用户自行编译SDK理论上支持Windows 10/11VS 2019/2022CUDA 11.x 以及 TensorRT 8.x等配置但建议使用我们推荐的默认配置Windows 10, VS 2019, CUDA 11.2 和 TensorRT 8.4.x版本。另外如果编译过程中遇到中文字符的编码问题如UIE example必须传入中文字符进行预测可以参考Visual Studio的官方文档设置源字符集为`/utf-8`解决:
- [/utf-8将源字符集和执行字符集设置为 UTF-8](https://learn.microsoft.com/zh-cn/cpp/build/reference/utf-8-set-source-and-executable-character-sets-to-utf-8?view=msvc-170)
## 5. Windows下FastDeploy C++ SDK使用方式
<div id="Usage"></div>
Windows下FastDeploy C++ SDK使用方式请参考文档
- [how_to_use_sdk_on_windows.md](./how_to_use_sdk_on_windows.md)

View File

@@ -1,13 +1,31 @@
# 在 Windows 使用 FastDeploy C++ SDK
在 Windows 下使用 FastDeploy C++ SDK 与在 Linux 下使用稍有不同。以下以 PPYOLOE 为例进行演示在CPU/GPU以及GPU上通过TensorRT加速部署的示例。
在部署前,需确认以下两个步骤
在 Windows 下使用 FastDeploy C++ SDK 与在 Linux 下使用稍有不同。以下以 PPYOLOE 为例进行演示在CPU/GPU以及GPU上通过TensorRT加速部署的示例。在部署前,需确认以下两个步骤:
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../environment.md)
- 2. 根据开发环境下载预编译部署库和samples代码参考[FastDeploy预编译库](../quick_start)
## 环境依赖
## 目录
- [环境依赖](#Environment)
- [下载 FastDeploy Windows 10 C++ SDK](#Download)
- [Windows下多种方式使用 C++ SDK 的方式](#CommandLine)
- [方式一:命令行方式使用 C++ SDK](#CommandLine)
- [步骤一:在 Windows 命令行终端 上编译 example](#CommandLine)
- [步骤二:运行可执行文件获得推理结果](#CommandLine)
- [方式二Visual Studio 2019 IDE 方式使用 C++ SDK](#VisualStudio2019)
- [步骤一Visual Studio 2019 创建CMake工程项目](#VisualStudio20191)
- [步骤二在CMakeLists中配置 FastDeploy C++ SDK](#VisualStudio20192)
- [步骤三生成工程缓存并修改CMakeSetting.json配置](#VisualStudio20193)
- [步骤四:生成可执行文件,运行获取结果](#VisualStudio20194)
- [方式三CLion IDE 方式使用 C++ SDK](#CLion)
- [方式四Visual Studio Code IDE 方式使用 C++ SDK](#VisualStudioCode)
- [多种方法配置exe运行时所需的依赖库](#CommandLineDeps1)
- [方式一修改CMakeLists.txt一行命令配置推荐](#CommandLineDeps1)
- [方式二:命令行设置环境变量](#CommandLineDeps2)
- [方法三手动拷贝依赖库到exe的目录下](#CommandLineDeps3)
## 1. 环境依赖
<div id="Environment"></div>
- cmake >= 3.12
- Visual Studio 16 2019
@@ -15,19 +33,24 @@
- cudnn >= 8.0 (当WITH_GPU=ON)
- TensorRT >= 8.4 (当ENABLE_TRT_BACKEND=ON)
## 下载 FastDeploy Windows 10 C++ SDK
## 2. 下载 FastDeploy Windows 10 C++ SDK
<div id="Download"></div>
可以从以下链接下载编译好的 FastDeploy Windows 10 C++ SDKSDK中包含了examples代码。
```text
https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-win-x64-gpu-0.2.1.zip
```
## 准备模型文件和测试图片
## 3. 准备模型文件和测试图片
可以从以下链接下载模型文件和测试图片,并解压缩
```text
https://bj.bcebos.com/paddlehub/fastdeploy/ppyoloe_crn_l_300e_coco.tgz # (下载后解压缩)
https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
```
## 在 Windows 上编译 PPYOLOE
## 4. SDK使用方式一命令行方式使用 C++ SDK
<div id="CommandLine"></div>
### 4.1 在 Windows 上编译 PPYOLOE
Windows菜单打开`x64 Native Tools Command Prompt for VS 2019`命令工具cd到ppyoloe的demo路径
```bat
cd fastdeploy-win-x64-gpu-0.2.0\examples\vision\detection\paddledetection\cpp
@@ -40,8 +63,164 @@ cmake .. -G "Visual Studio 16 2019" -A x64 -DFASTDEPLOY_INSTALL_DIR=%cd%\..\..\.
```bat
msbuild infer_demo.sln /m:4 /p:Configuration=Release /p:Platform=x64
```
## 配置依赖库路径
#### 方式一:命令行设置环境变量
### 4.2 运行 demo
```bat
cd Release
infer_ppyoloe_demo.exe ppyoloe_crn_l_300e_coco 000000014439.jpg 0 # CPU
infer_ppyoloe_demo.exe ppyoloe_crn_l_300e_coco 000000014439.jpg 1 # GPU
infer_ppyoloe_demo.exe ppyoloe_crn_l_300e_coco 000000014439.jpg 2 # GPU + TensorRT
```
特别说明exe运行时所需要的依赖库配置方法请参考章节: [多种方法配置exe运行时所需的依赖库](#CommandLineDeps)
## 5. SDK使用方式二Visual Studio 2019 IDE 方式使用 C++ SDK
<div id="VisualStudio2019"></div>
### 5.1 步骤一Visual Studio 2019 创建“CMake”工程项目
<div id="VisualStudio20191"></div>
1打开Visual Studio 2019点击"创建新项目"->点击"CMake"从而创建CMake工程项目。以PPYOLOE为例来说明如何在Visual Studio 2019 IDE中使用FastDeploy C++ SDK.
![image](https://user-images.githubusercontent.com/31974251/192143543-9f29e4cb-2307-45ca-a61a-bcfba5df19ff.png)
![image](https://user-images.githubusercontent.com/31974251/192143640-39e79c65-8b50-4254-8da6-baa21bb23e3c.png)
![image](https://user-images.githubusercontent.com/31974251/192143713-be2e6490-4cab-4151-8463-8c367dbc451a.png)
2打开工程发现Visual Stuio 2019已经为我们生成了一些基本的文件其中包括CMakeLists.txt。infer_ppyoloe.h头文件这里实际上用不到我们可以直接删除。
![image](https://user-images.githubusercontent.com/31974251/192143930-db1655c2-66ee-448c-82cb-0103ca1ca2a0.png)
### 5.2 步骤二在CMakeLists中配置 FastDeploy C++ SDK
<div id="VisualStudio20192"></div>
1在工程创建完成后我们需要添加infer_ppyoloe推理源码并修改CMakeLists.txt修改如下
![image](https://user-images.githubusercontent.com/31974251/192144782-79bccf8f-65d0-4f22-9f41-81751c530319.png)
2其中infer_ppyoloe.cpp的代码可以直接从examples中的代码拷贝过来
- [examples/vision/detection/paddledetection/cpp/infer_ppyoloe.cc](../../examples/vision/detection/paddledetection/cpp/infer_ppyoloe.cc)
3CMakeLists.txt主要包括配置FastDeploy C++ SDK的路径如果是GPU版本的SDK还需要配置CUDA_DIRECTORY为CUDA的安装路径CMakeLists.txt的配置如下
```cmake
project(infer_ppyoloe_demo C CXX)
cmake_minimum_required(VERSION 3.12)
# Only support "Release" mode now
set(CMAKE_BUILD_TYPE "Release")
# Set FastDeploy install dir
set(FASTDEPLOY_INSTALL_DIR "D:/qiuyanjun/fastdeploy-win-x64-gpu-0.2.1"
CACHE PATH "Path to downloaded or built fastdeploy sdk.")
# Set CUDA_DIRECTORY (CUDA 11.x) for GPU SDK
set(CUDA_DIRECTORY "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.7"
CACHE PATH "Path to installed CUDA Toolkit.")
include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
include_directories(${FASTDEPLOY_INCS})
add_executable(infer_ppyoloe_demo ${PROJECT_SOURCE_DIR}/infer_ppyoloe.cpp)
target_link_libraries(infer_ppyoloe_demo ${FASTDEPLOY_LIBS})
# Optional: install all DLLs to binary dir.
install_fastdeploy_libraries(${CMAKE_CURRENT_BINARY_DIR}/Release)
```
### 5.3 步骤三生成工程缓存并修改CMakeSetting.json配置
<div id="VisualStudio20193"></div>
1点击"CMakeLists.txt"->右键点击"生成缓存":
![image](https://user-images.githubusercontent.com/31974251/192145349-c78b110a-0e41-4ee5-8942-3bf70bd94a75.png)
发现已经成功生成缓存了但是由于打开工程时默认是Debug模式我们发现exe和缓存保存路径还是Debug模式下的。 我们可以先修改CMake的设置为Release.
2点击"CMakeLists.txt"->右键点击"infer_ppyoloe_demo的cmake设置"进入CMakeSettings.json的设置面板把其中的Debug设置修改为Release.
![image](https://user-images.githubusercontent.com/31974251/192145242-01d37b44-e2fa-47df-82c1-c11c2ccbff99.png)
同时设置CMake生成器为 "Visual Studio 16 2019 Win64"
![image](https://user-images.githubusercontent.com/31974251/192147961-ac46d0f6-7349-4126-a123-914af2b63d95.jpg)
3点击保存CMake缓存以切换为Release配置
![image](https://user-images.githubusercontent.com/31974251/192145974-b5a63341-9143-49a2-8bfe-94ac641b1670.png)
44.1)点击"CMakeLists.txt"->右键"CMake缓存仅限x64-Release"->"点击删除缓存"4.2)点击"CMakeLists.txt"->"生成缓存"4.3)如果在步骤一发现删除缓存的选项是灰色的可以直接点击"CMakeLists.txt"->"生成"若生成失败则可以重复尝试4.14。2
![image](https://user-images.githubusercontent.com/31974251/192146394-51fbf2b8-1cba-41ca-bb45-5f26890f64ce.jpg)
最终可以看到配置已经成功生成Relase模式下的CMake缓存了。
![image](https://user-images.githubusercontent.com/31974251/192146239-a1eacd9e-034d-4373-a262-65b18ce25b87.png)
### 5.4 步骤四:生成可执行文件,运行获取结果。
<div id="VisualStudio20194"></div>
1点击"CMakeLists.txt"->"生成"。可以发现已经成功生成了infer_ppyoloe_demo.exe并保存在`out/build/x64-Release/Release`目录下。
![image](https://user-images.githubusercontent.com/31974251/192146852-c64d2252-8c8f-4309-a950-908a5cb258b8.png)
2执行可执行文件获得推理结果。 首先需要拷贝所有的dll到exe所在的目录下这里我们可以在CMakeLists.txt添加一下命令可将FastDeploy中所有的dll安装到指定的目录。
```cmake
install_fastdeploy_libraries(${CMAKE_CURRENT_BINARY_DIR}/Release)
```
3同时也需要把ppyoloe的模型文件和测试图片下载解压缩后拷贝到exe所在的目录。 准备完成后,目录结构如下:
![image](https://user-images.githubusercontent.com/31974251/192147505-054edb77-564b-405e-89ee-fd0d2e413e78.png)
4最后执行以下命令获得推理结果
```bat
D:\xxxinfer_ppyoloe\out\build\x64-Release\Release>infer_ppyoloe_demo.exe ppyoloe_crn_l_300e_coco 000000014439.jpg 0
[INFO] fastdeploy/runtime.cc(304)::fastdeploy::Runtime::Init Runtime initialized with Backend::OPENVINO in Device::CPU.
DetectionResult: [xmin, ymin, xmax, ymax, score, label_id]
415.047180,89.311569, 506.009613, 283.863098, 0.950423, 0
163.665710,81.914932, 198.585342, 166.760895, 0.896433, 0
581.788635,113.027618, 612.623474, 198.521713, 0.842596, 0
267.217224,89.777306, 298.796051, 169.361526, 0.837951, 0
......
153.301407,123.233757, 177.130539, 164.558350, 0.066697, 60
505.887604,140.919601, 523.167236, 151.875336, 0.084912, 67
Visualized result saved in ./vis_result.jpg
```
打开保存的图片查看可视化结果:
<div align="center">
<img src="https://user-images.githubusercontent.com/19339784/184326520-7075e907-10ed-4fad-93f8-52d0e35d4964.jpg", width=480px, height=320px />
</div>
特别说明exe运行时所需要的依赖库配置方法请参考章节: [多种方法配置exe运行时所需的依赖库](#CommandLineDeps)
## 6. 多种方法配置exe运行时所需的依赖库
<div id="CommandLineDeps"></div>
### 6.1 方式一修改CMakeLists.txt一行命令配置(推荐)
<div id="CommandLineDeps1"></div>
考虑到Windows下C++开发的特殊性如经常需要拷贝所有的lib或dll文件到某个指定的目录FastDeploy提供了`install_fastdeploy_libraries`的cmake函数方便用户快速配置所有的dll。修改ppyoloe的CMakeLists.txt添加
```cmake
install_fastdeploy_libraries(${CMAKE_CURRENT_BINARY_DIR}/Release)
```
### 6.2 方式二:命令行设置环境变量
<div id="CommandLineDeps2"></div>
编译好的exe保存在Release目录下在运行demo前需要将模型和测试图片拷贝至该目录。另外需要在终端指定DLL的搜索路径。请在build目录下执行以下命令。
```bat
set FASTDEPLOY_HOME=%cd%\..\..\..\..\..\..\..\fastdeploy-win-x64-gpu-0.2.1
@@ -58,17 +237,25 @@ set PATH=%FASTDEPLOY_HOME%\third_libs\install\faster_tokenizer\third_party\lib;%
set PATH=%FASTDEPLOY_HOME%\third_libs\install\yaml-cpp\lib;%PATH%
set PATH=%FASTDEPLOY_HOME%\third_libs\install\openvino\bin;%PATH%
set PATH=%FASTDEPLOY_HOME%\third_libs\install\openvino\3rdparty\tbb\bin;%PATH%
```
```
注意需要拷贝onnxruntime.dll到exe所在的目录。
```bat
copy /Y %FASTDEPLOY_PATH%\third_libs\install\onnxruntime\lib\onnxruntime* Release\
```
copy /Y %FASTDEPLOY_HOME%\third_libs\install\onnxruntime\lib\onnxruntime* Release\
```
由于较新的Windows在System32系统目录下自带了onnxruntime.dll因此就算设置了PATH系统依然会出现onnxruntime的加载冲突。因此需要先拷贝demo用到的onnxruntime.dll到exe所在的目录。如下
```bat
where onnxruntime.dll
C:\Windows\System32\onnxruntime.dll # windows自带的onnxruntime.dll
```
可以把上述命令拷贝并保存到build目录下的某个bat脚本文件中(包含copy onnxruntime),如`setup_fastdeploy_dll.bat`,方便多次使用。
```bat
setup_fastdeploy_dll.bat
```
#### 方式二拷贝依赖库到exe的目录下
### 6.3 方式三手动拷贝依赖库到exe的目录下
<div id="CommandLineDeps3"></div>
手动拷贝或者在build目录下执行以下命令
```bat
set FASTDEPLOY_HOME=%cd%\..\..\..\..\..\..\..\fastdeploy-win-x64-gpu-0.2.1
@@ -87,11 +274,21 @@ copy /Y %FASTDEPLOY_HOME%\third_libs\install\openvino\bin\*.dll Release\
copy /Y %FASTDEPLOY_HOME%\third_libs\install\openvino\bin\*.xml Release\
copy /Y %FASTDEPLOY_HOME%\third_libs\install\openvino\3rdparty\tbb\bin\*.dll Release\
```
## 运行 demo
可以把上述命令拷贝并保存到build目录下的某个bat脚本文件中如`copy_fastdeploy_dll.bat`,方便多次使用。
```bat
cd Release
infer_ppyoloe_demo.exe ppyoloe_crn_l_300e_coco 000000014439.jpg 0 # CPU
infer_ppyoloe_demo.exe ppyoloe_crn_l_300e_coco 000000014439.jpg 1 # GPU
infer_ppyoloe_demo.exe ppyoloe_crn_l_300e_coco 000000014439.jpg 2 # GPU + TensorRT
copy_fastdeploy_dll.bat
```
特别说明上述的set和copy命令对应的依赖库路径需要用户根据自己使用SDK中的依赖库进行适当地修改。比如若是CPU版本的SDK则不需要TensorRT相关的设置。
## 7. CLion 2022 IDE 方式使用 C++ SDK
<div id="CLion"></div>
- TODO
## 8. Visual Studio Code IDE 方式使用 C++ SDK
<div id="VisualStudioCode"></div>
- TODO

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

View File

@@ -37,12 +37,10 @@ void CpuInfer(const std::string& model_file,
std::cout << "Prediction Done!" << std::endl;
// 输出预测框结果
std::cout << "--- [Face 0]:" << res0.Str();
std::cout << "--- [Face 1]:" << res1.Str();
std::cout << "--- [Face 2]:" << res2.Str();
// 计算余弦相似度
float cosine01 = fastdeploy::vision::utils::CosineSimilarity(
res0.embedding, res1.embedding, model.l2_normalize);
float cosine02 = fastdeploy::vision::utils::CosineSimilarity(
@@ -76,12 +74,10 @@ void GpuInfer(const std::string& model_file,
std::cout << "Prediction Done!" << std::endl;
// 输出预测框结果
std::cout << "--- [Face 0]:" << res0.Str();
std::cout << "--- [Face 1]:" << res1.Str();
std::cout << "--- [Face 2]:" << res2.Str();
// 计算余弦相似度
float cosine01 = fastdeploy::vision::utils::CosineSimilarity(
res0.embedding, res1.embedding, model.l2_normalize);
float cosine02 = fastdeploy::vision::utils::CosineSimilarity(
@@ -117,12 +113,10 @@ void TrtInfer(const std::string& model_file,
std::cout << "Prediction Done!" << std::endl;
// 输出预测框结果
std::cout << "--- [Face 0]:" << res0.Str();
std::cout << "--- [Face 1]:" << res1.Str();
std::cout << "--- [Face 2]:" << res2.Str();
// 计算余弦相似度
float cosine01 = fastdeploy::vision::utils::CosineSimilarity(
res0.embedding, res1.embedding, model.l2_normalize);
float cosine02 = fastdeploy::vision::utils::CosineSimilarity(

View File

@@ -37,12 +37,10 @@ void CpuInfer(const std::string& model_file,
std::cout << "Prediction Done!" << std::endl;
// 输出预测框结果
std::cout << "--- [Face 0]:" << res0.Str();
std::cout << "--- [Face 1]:" << res1.Str();
std::cout << "--- [Face 2]:" << res2.Str();
// 计算余弦相似度
float cosine01 = fastdeploy::vision::utils::CosineSimilarity(
res0.embedding, res1.embedding, model.l2_normalize);
float cosine02 = fastdeploy::vision::utils::CosineSimilarity(
@@ -76,12 +74,10 @@ void GpuInfer(const std::string& model_file,
std::cout << "Prediction Done!" << std::endl;
// 输出预测框结果
std::cout << "--- [Face 0]:" << res0.Str();
std::cout << "--- [Face 1]:" << res1.Str();
std::cout << "--- [Face 2]:" << res2.Str();
// 计算余弦相似度
float cosine01 = fastdeploy::vision::utils::CosineSimilarity(
res0.embedding, res1.embedding, model.l2_normalize);
float cosine02 = fastdeploy::vision::utils::CosineSimilarity(
@@ -117,12 +113,10 @@ void TrtInfer(const std::string& model_file,
std::cout << "Prediction Done!" << std::endl;
// 输出预测框结果
std::cout << "--- [Face 0]:" << res0.Str();
std::cout << "--- [Face 1]:" << res1.Str();
std::cout << "--- [Face 2]:" << res2.Str();
// 计算余弦相似度
float cosine01 = fastdeploy::vision::utils::CosineSimilarity(
res0.embedding, res1.embedding, model.l2_normalize);
float cosine02 = fastdeploy::vision::utils::CosineSimilarity(

View File

@@ -37,12 +37,10 @@ void CpuInfer(const std::string& model_file,
std::cout << "Prediction Done!" << std::endl;
// 输出预测框结果
std::cout << "--- [Face 0]:" << res0.Str();
std::cout << "--- [Face 1]:" << res1.Str();
std::cout << "--- [Face 2]:" << res2.Str();
// 计算余弦相似度
float cosine01 = fastdeploy::vision::utils::CosineSimilarity(
res0.embedding, res1.embedding, model.l2_normalize);
float cosine02 = fastdeploy::vision::utils::CosineSimilarity(
@@ -76,12 +74,10 @@ void GpuInfer(const std::string& model_file,
std::cout << "Prediction Done!" << std::endl;
// 输出预测框结果
std::cout << "--- [Face 0]:" << res0.Str();
std::cout << "--- [Face 1]:" << res1.Str();
std::cout << "--- [Face 2]:" << res2.Str();
// 计算余弦相似度
float cosine01 = fastdeploy::vision::utils::CosineSimilarity(
res0.embedding, res1.embedding, model.l2_normalize);
float cosine02 = fastdeploy::vision::utils::CosineSimilarity(
@@ -117,12 +113,10 @@ void TrtInfer(const std::string& model_file,
std::cout << "Prediction Done!" << std::endl;
// 输出预测框结果
std::cout << "--- [Face 0]:" << res0.Str();
std::cout << "--- [Face 1]:" << res1.Str();
std::cout << "--- [Face 2]:" << res2.Str();
// 计算余弦相似度
float cosine01 = fastdeploy::vision::utils::CosineSimilarity(
res0.embedding, res1.embedding, model.l2_normalize);
float cosine02 = fastdeploy::vision::utils::CosineSimilarity(

View File

@@ -37,12 +37,10 @@ void CpuInfer(const std::string& model_file,
std::cout << "Prediction Done!" << std::endl;
// 输出预测框结果
std::cout << "--- [Face 0]:" << res0.Str();
std::cout << "--- [Face 1]:" << res1.Str();
std::cout << "--- [Face 2]:" << res2.Str();
// 计算余弦相似度
float cosine01 = fastdeploy::vision::utils::CosineSimilarity(
res0.embedding, res1.embedding, model.l2_normalize);
float cosine02 = fastdeploy::vision::utils::CosineSimilarity(
@@ -76,12 +74,10 @@ void GpuInfer(const std::string& model_file,
std::cout << "Prediction Done!" << std::endl;
// 输出预测框结果
std::cout << "--- [Face 0]:" << res0.Str();
std::cout << "--- [Face 1]:" << res1.Str();
std::cout << "--- [Face 2]:" << res2.Str();
// 计算余弦相似度
float cosine01 = fastdeploy::vision::utils::CosineSimilarity(
res0.embedding, res1.embedding, model.l2_normalize);
float cosine02 = fastdeploy::vision::utils::CosineSimilarity(
@@ -117,12 +113,10 @@ void TrtInfer(const std::string& model_file,
std::cout << "Prediction Done!" << std::endl;
// 输出预测框结果
std::cout << "--- [Face 0]:" << res0.Str();
std::cout << "--- [Face 1]:" << res1.Str();
std::cout << "--- [Face 2]:" << res2.Str();
// 计算余弦相似度
float cosine01 = fastdeploy::vision::utils::CosineSimilarity(
res0.embedding, res1.embedding, model.l2_normalize);
float cosine02 = fastdeploy::vision::utils::CosineSimilarity(

View File

@@ -21,7 +21,6 @@ void CpuInfer(const std::string& model_file, const std::string& image_file,
std::cerr << "Failed to initialize." << std::endl;
return;
}
// 设置推理size, 必须和模型文件一致
model.size = {256, 256};
auto im = cv::imread(image_file);
auto im_bak = im.clone();
@@ -52,7 +51,6 @@ void GpuInfer(const std::string& model_file, const std::string& image_file,
std::cerr << "Failed to initialize." << std::endl;
return;
}
// 设置推理size, 必须和模型文件一致
model.size = {256, 256};
auto im = cv::imread(image_file);
@@ -86,7 +84,6 @@ void TrtInfer(const std::string& model_file, const std::string& image_file,
std::cerr << "Failed to initialize." << std::endl;
return;
}
// 设置推理size, 必须和模型文件一致
model.size = {256, 256};
auto im = cv::imread(image_file);
auto im_bak = im.clone();

View File

@@ -36,8 +36,8 @@ void InitAndInfer(const std::string& det_model_dir, const std::string& cls_model
assert(det_model.Initialized());
assert(cls_model.Initialized());
assert(rec_model.Initialized());
// 其中分类模型可选因此也可使用如下方式串联OCR系统
// The classification model is optional, so the OCR system can also be connected in series as follows
// auto ocr_system_v2 = fastdeploy::application::ocrsystem::PPOCRSystemv2(&det_model, &rec_model);
auto ocr_system_v2 = fastdeploy::application::ocrsystem::PPOCRSystemv2(&det_model, &cls_model, &rec_model);

View File

@@ -37,7 +37,7 @@ void InitAndInfer(const std::string& det_model_dir, const std::string& cls_model
assert(cls_model.Initialized());
assert(rec_model.Initialized());
// 其中分类模型可选因此也可使用如下方式串联OCR系统
// The classification model is optional, so the OCR system can also be connected in series as follows
// auto ocr_system_v3 = fastdeploy::application::ocrsystem::PPOCRSystemv3(&det_model, &rec_model);
auto ocr_system_v3 = fastdeploy::application::ocrsystem::PPOCRSystemv3(&det_model, &cls_model, &rec_model);