mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-28 18:51:58 +08:00
Merge branch 'develop' of https://github.com/PaddlePaddle/FastDeploy into huawei
This commit is contained in:
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -4,7 +4,7 @@
|
||||
### PR types(PR类型)
|
||||
<!-- One of PR types [ Model | Backend | Serving | Quantization | Doc | Bug Fix | Other] -->
|
||||
|
||||
### Describe
|
||||
### Description
|
||||
<!-- Describe what this PR does -->
|
||||
|
||||
|
||||
|
||||
@@ -37,6 +37,7 @@ include(${PROJECT_SOURCE_DIR}/cmake/utils.cmake)
|
||||
if(NOT MSVC)
|
||||
set(CMAKE_CXX_STANDARD 11)
|
||||
set(CMAKE_CXX_FLAGS "-Wno-format")
|
||||
add_definitions(-D_GLIBCXX_USE_CXX11_ABI=1)
|
||||
endif(NOT MSVC)
|
||||
|
||||
if(UNIX AND (NOT APPLE) AND (NOT ANDROID) AND (NOT ENABLE_TIMVX))
|
||||
@@ -434,13 +435,6 @@ if(ENABLE_VISION)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(ANDROID OR IOS)
|
||||
if(ENABLE_TEXT)
|
||||
set(ENABLE_TEXT OFF CACHE BOOL "Force ENABLE_TEXT OFF" FORCE)
|
||||
message(STATUS "Found Android or IOS, force ENABLE_TEXT OFF. We do not support fast_tokenizer with Android/IOS now.")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(ENABLE_TEXT)
|
||||
add_definitions(-DENABLE_TEXT)
|
||||
list(APPEND ALL_DEPLOY_SRCS ${DEPLOY_TEXT_SRCS})
|
||||
|
||||
@@ -35,9 +35,12 @@ list(APPEND FASTDEPLOY_INCS ${CMAKE_CURRENT_LIST_DIR}/include)
|
||||
# Note(zhoushunjie): include some useful utils function
|
||||
include(${CMAKE_CURRENT_LIST_DIR}/utils.cmake)
|
||||
|
||||
if(NOT CMAKE_CXX_STANDARD)
|
||||
# Set C++11 as standard for the whole project
|
||||
if(NOT MSVC)
|
||||
set(CMAKE_CXX_STANDARD 11)
|
||||
endif()
|
||||
set(CMAKE_CXX_FLAGS "-Wno-format")
|
||||
add_definitions(-D_GLIBCXX_USE_CXX11_ABI=1)
|
||||
endif(NOT MSVC)
|
||||
|
||||
if(ANDROID)
|
||||
add_library(fastdeploy STATIC IMPORTED GLOBAL)
|
||||
@@ -210,11 +213,18 @@ endif()
|
||||
|
||||
if (ENABLE_TEXT)
|
||||
if(ANDROID)
|
||||
message(FATAL_ERROR "Not support fastdeploy text APIs with Android now!")
|
||||
if(NOT ANDROID_TOOLCHAIN MATCHES "clang")
|
||||
message(FATAL_ERROR "Currently, only support clang toolchain while cross compiling FastDeploy for Android with FastTokenizer, but found ${ANDROID_TOOLCHAIN}.")
|
||||
endif()
|
||||
add_library(core_tokenizers STATIC IMPORTED GLOBAL)
|
||||
set_property(TARGET core_tokenizers PROPERTY IMPORTED_LOCATION
|
||||
${CMAKE_CURRENT_LIST_DIR}/third_libs/install/fast_tokenizer/lib/${ANDROID_ABI}/libcore_tokenizers.so)
|
||||
list(APPEND FASTDEPLOY_LIBS core_tokenizers)
|
||||
else()
|
||||
# Add dependency libs later: Linux/Mac/Win/...
|
||||
find_library(FAST_TOKENIZER_LIB core_tokenizers ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/fast_tokenizer/lib NO_DEFAULT_PATH)
|
||||
list(APPEND FASTDEPLOY_LIBS ${FAST_TOKENIZER_LIB})
|
||||
endif()
|
||||
# Add dependency libs later
|
||||
find_library(FAST_TOKENIZER_LIB core_tokenizers ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/fast_tokenizer/lib NO_DEFAULT_PATH)
|
||||
list(APPEND FASTDEPLOY_LIBS ${FAST_TOKENIZER_LIB})
|
||||
list(APPEND FASTDEPLOY_INCS ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/fast_tokenizer/include)
|
||||
list(APPEND FASTDEPLOY_INCS ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/fast_tokenizer/third_party/include)
|
||||
endif()
|
||||
|
||||
0
README_CN.md
Normal file → Executable file
0
README_CN.md
Normal file → Executable file
0
README_EN.md
Normal file → Executable file
0
README_EN.md
Normal file → Executable file
@@ -24,9 +24,16 @@ set(FASTTOKENIZER_INC_DIR
|
||||
"${FASTTOKENIZER_INSTALL_DIR}/include"
|
||||
"${FASTTOKENIZER_INSTALL_DIR}/third_party/include"
|
||||
CACHE PATH "fast_tokenizer include directory." FORCE)
|
||||
set(FASTTOKENIZER_LIB_DIR
|
||||
"${FASTTOKENIZER_INSTALL_DIR}/lib/"
|
||||
CACHE PATH "fast_tokenizer lib directory." FORCE)
|
||||
if(ANDROID)
|
||||
set(FASTTOKENIZER_LIB_DIR
|
||||
"${FASTTOKENIZER_INSTALL_DIR}/lib/${ANDROID_ABI}"
|
||||
CACHE PATH "fast_tokenizer lib directory." FORCE)
|
||||
else()
|
||||
set(FASTTOKENIZER_LIB_DIR
|
||||
"${FASTTOKENIZER_INSTALL_DIR}/lib/"
|
||||
CACHE PATH "fast_tokenizer lib directory." FORCE)
|
||||
endif()
|
||||
|
||||
set(FASTTOKENIZER_THIRD_LIB_DIR
|
||||
"${FASTTOKENIZER_INSTALL_DIR}/third_party/lib/"
|
||||
CACHE PATH "fast_tokenizer lib directory." FORCE)
|
||||
@@ -37,21 +44,21 @@ include_directories(${FASTTOKENIZER_INC_DIR})
|
||||
|
||||
# Set lib path
|
||||
if(WIN32)
|
||||
set(FASTTOKENIZER_COMPILE_LIB "${FASTTOKENIZER_LIB_DIR}/core_tokenizers.lib"
|
||||
CACHE FILEPATH "fast_tokenizer compile library." FORCE)
|
||||
message("FASTTOKENIZER_COMPILE_LIB = ${FASTTOKENIZER_COMPILE_LIB}")
|
||||
set(ICUDT_LIB "${FASTTOKENIZER_THIRD_LIB_DIR}/icudt.lib")
|
||||
set(ICUUC_LIB "${FASTTOKENIZER_THIRD_LIB_DIR}/icuuc.lib")
|
||||
|
||||
set(FASTTOKENIZER_COMPILE_LIB "${FASTTOKENIZER_LIB_DIR}/core_tokenizers.lib"
|
||||
CACHE FILEPATH "fast_tokenizer compile library." FORCE)
|
||||
set(ICUDT_LIB "${FASTTOKENIZER_THIRD_LIB_DIR}/icudt.lib")
|
||||
set(ICUUC_LIB "${FASTTOKENIZER_THIRD_LIB_DIR}/icuuc.lib")
|
||||
elseif(APPLE)
|
||||
set(FASTTOKENIZER_COMPILE_LIB "${FASTTOKENIZER_LIB_DIR}/libcore_tokenizers.dylib"
|
||||
CACHE FILEPATH "fast_tokenizer compile library." FORCE)
|
||||
set(FASTTOKENIZER_COMPILE_LIB "${FASTTOKENIZER_LIB_DIR}/libcore_tokenizers.dylib"
|
||||
CACHE FILEPATH "fast_tokenizer compile library." FORCE)
|
||||
elseif(ANDROID)
|
||||
set(FASTTOKENIZER_COMPILE_LIB "${FASTTOKENIZER_LIB_DIR}/libcore_tokenizers.so"
|
||||
CACHE FILEPATH "fast_tokenizer compile library." FORCE)
|
||||
else()
|
||||
|
||||
set(FASTTOKENIZER_COMPILE_LIB "${FASTTOKENIZER_LIB_DIR}/libcore_tokenizers.so"
|
||||
CACHE FILEPATH "fast_tokenizer compile library." FORCE)
|
||||
message("FASTTOKENIZER_COMPILE_LIB = ${FASTTOKENIZER_COMPILE_LIB}")
|
||||
set(FASTTOKENIZER_COMPILE_LIB "${FASTTOKENIZER_LIB_DIR}/libcore_tokenizers.so"
|
||||
CACHE FILEPATH "fast_tokenizer compile library." FORCE)
|
||||
endif(WIN32)
|
||||
message("FASTTOKENIZER_COMPILE_LIB = ${FASTTOKENIZER_COMPILE_LIB}")
|
||||
|
||||
set(FASTTOKENIZER_URL_BASE "https://bj.bcebos.com/paddlenlp/fast_tokenizer/")
|
||||
set(FASTTOKENIZER_VERSION "1.0.0")
|
||||
@@ -68,6 +75,15 @@ elseif(APPLE)
|
||||
else()
|
||||
set(FASTTOKENIZER_FILE "fast_tokenizer-osx-x86_64-${FASTTOKENIZER_VERSION}.tgz")
|
||||
endif()
|
||||
elseif(ANDROID)
|
||||
# check ABI, toolchain
|
||||
if((NOT ANDROID_ABI MATCHES "armeabi-v7a") AND (NOT ANDROID_ABI MATCHES "arm64-v8a"))
|
||||
message(FATAL_ERROR "FastDeploy with FastTokenizer on Android only support armeabi-v7a, arm64-v8a now.")
|
||||
endif()
|
||||
if(NOT ANDROID_TOOLCHAIN MATCHES "clang")
|
||||
message(FATAL_ERROR "Currently, only support clang toolchain while cross compiling FastDeploy for Android with FastTokenizer, but found ${ANDROID_TOOLCHAIN}.")
|
||||
endif()
|
||||
set(FASTTOKENIZER_FILE "fast_tokenizer-android-${ANDROID_ABI}-${FASTTOKENIZER_VERSION}.tgz")
|
||||
else()
|
||||
if(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "aarch64")
|
||||
set(FASTTOKENIZER_FILE "fast_tokenizer-linux-aarch64-${FASTTOKENIZER_VERSION}.tgz")
|
||||
@@ -77,18 +93,39 @@ else()
|
||||
endif()
|
||||
set(FASTTOKENIZER_URL "${FASTTOKENIZER_URL_BASE}${FASTTOKENIZER_FILE}")
|
||||
|
||||
ExternalProject_Add(
|
||||
${FASTTOKENIZER_PROJECT}
|
||||
${EXTERNAL_PROJECT_LOG_ARGS}
|
||||
URL ${FASTTOKENIZER_URL}
|
||||
PREFIX ${FASTTOKENIZER_PREFIX_DIR}
|
||||
DOWNLOAD_NO_PROGRESS 1
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
UPDATE_COMMAND ""
|
||||
INSTALL_COMMAND
|
||||
${CMAKE_COMMAND} -E copy_directory ${FASTTOKENIZER_SOURCE_DIR} ${FASTTOKENIZER_INSTALL_DIR}
|
||||
BUILD_BYPRODUCTS ${FASTTOKENIZER_COMPILE_LIB})
|
||||
if(ANDROID)
|
||||
ExternalProject_Add(
|
||||
${FASTTOKENIZER_PROJECT}
|
||||
${EXTERNAL_PROJECT_LOG_ARGS}
|
||||
URL ${FASTTOKENIZER_URL}
|
||||
PREFIX ${FASTTOKENIZER_PREFIX_DIR}
|
||||
DOWNLOAD_NO_PROGRESS 1
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
UPDATE_COMMAND ""
|
||||
INSTALL_COMMAND
|
||||
${CMAKE_COMMAND} -E remove_directory ${FASTTOKENIZER_INSTALL_DIR} &&
|
||||
${CMAKE_COMMAND} -E make_directory ${FASTTOKENIZER_INSTALL_DIR} &&
|
||||
${CMAKE_COMMAND} -E make_directory ${FASTTOKENIZER_INSTALL_DIR}/lib &&
|
||||
${CMAKE_COMMAND} -E make_directory ${FASTTOKENIZER_INSTALL_DIR}/third_party &&
|
||||
${CMAKE_COMMAND} -E rename ${FASTTOKENIZER_SOURCE_DIR}/lib/ ${FASTTOKENIZER_INSTALL_DIR}/lib/${ANDROID_ABI} &&
|
||||
${CMAKE_COMMAND} -E copy_directory ${FASTTOKENIZER_SOURCE_DIR}/include ${FASTTOKENIZER_INSTALL_DIR}/include &&
|
||||
${CMAKE_COMMAND} -E copy_directory ${FASTTOKENIZER_SOURCE_DIR}/third_party/include ${FASTTOKENIZER_INSTALL_DIR}/third_party/include
|
||||
BUILD_BYPRODUCTS ${FASTTOKENIZER_COMPILE_LIB})
|
||||
else()
|
||||
ExternalProject_Add(
|
||||
${FASTTOKENIZER_PROJECT}
|
||||
${EXTERNAL_PROJECT_LOG_ARGS}
|
||||
URL ${FASTTOKENIZER_URL}
|
||||
PREFIX ${FASTTOKENIZER_PREFIX_DIR}
|
||||
DOWNLOAD_NO_PROGRESS 1
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
UPDATE_COMMAND ""
|
||||
INSTALL_COMMAND
|
||||
${CMAKE_COMMAND} -E copy_directory ${FASTTOKENIZER_SOURCE_DIR} ${FASTTOKENIZER_INSTALL_DIR}
|
||||
BUILD_BYPRODUCTS ${FASTTOKENIZER_COMPILE_LIB})
|
||||
endif()
|
||||
|
||||
add_library(fast_tokenizer STATIC IMPORTED GLOBAL)
|
||||
set_property(TARGET fast_tokenizer PROPERTY IMPORTED_LOCATION ${FASTTOKENIZER_COMPILE_LIB})
|
||||
|
||||
@@ -41,10 +41,12 @@ elseif(IOS)
|
||||
else()
|
||||
if(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "aarch64")
|
||||
set(OPENCV_FILENAME "opencv-linux-aarch64-3.4.14")
|
||||
elseif(TARGET_ABI MATCHES "armhf")
|
||||
set(OPENCV_FILENAME "opencv-armv7hf")
|
||||
else()
|
||||
set(OPENCV_FILENAME "opencv-linux-x64-3.4.16")
|
||||
if(ENABLE_TIMVX)
|
||||
set(OPENCV_FILENAME "opencv-armv7hf")
|
||||
else()
|
||||
set(OPENCV_FILENAME "opencv-linux-x64-3.4.16")
|
||||
endif()
|
||||
endif()
|
||||
if(ENABLE_OPENCV_CUDA)
|
||||
if(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "aarch64")
|
||||
@@ -57,7 +59,7 @@ endif()
|
||||
set(OPENCV_INSTALL_DIR ${THIRD_PARTY_PATH}/install/)
|
||||
if(ANDROID)
|
||||
set(OPENCV_URL_PREFIX "https://bj.bcebos.com/fastdeploy/third_libs")
|
||||
elseif(TARGET_ABI MATCHES "armhf")
|
||||
elseif(ENABLE_TIMVX)
|
||||
set(OPENCV_URL_PREFIX "https://bj.bcebos.com/fastdeploy/test")
|
||||
else() # TODO: use fastdeploy/third_libs instead.
|
||||
set(OPENCV_URL_PREFIX "https://bj.bcebos.com/paddle2onnx/libs")
|
||||
@@ -185,7 +187,7 @@ else()
|
||||
file(RENAME ${THIRD_PARTY_PATH}/install/${OPENCV_FILENAME}/ ${THIRD_PARTY_PATH}/install/opencv)
|
||||
set(OPENCV_FILENAME opencv)
|
||||
set(OpenCV_DIR ${THIRD_PARTY_PATH}/install/${OPENCV_FILENAME})
|
||||
if(TARGET_ABI MATCHES "armhf")
|
||||
if(ENABLE_TIMVX)
|
||||
set(OpenCV_DIR ${OpenCV_DIR}/lib/cmake/opencv4)
|
||||
endif()
|
||||
if (WIN32)
|
||||
|
||||
@@ -43,7 +43,7 @@ else()
|
||||
endif(WIN32)
|
||||
|
||||
set(PADDLE2ONNX_URL_BASE "https://bj.bcebos.com/fastdeploy/third_libs/")
|
||||
set(PADDLE2ONNX_VERSION "1.0.4rc0")
|
||||
set(PADDLE2ONNX_VERSION "1.0.4")
|
||||
if(WIN32)
|
||||
set(PADDLE2ONNX_FILE "paddle2onnx-win-x64-${PADDLE2ONNX_VERSION}.zip")
|
||||
if(NOT CMAKE_CL_64)
|
||||
|
||||
@@ -48,7 +48,7 @@ endif(WIN32)
|
||||
|
||||
|
||||
set(PADDLEINFERENCE_URL_BASE "https://bj.bcebos.com/fastdeploy/third_libs/")
|
||||
set(PADDLEINFERENCE_VERSION "2.4-dev3")
|
||||
set(PADDLEINFERENCE_VERSION "2.4-dev4")
|
||||
if(WIN32)
|
||||
if (WITH_GPU)
|
||||
set(PADDLEINFERENCE_FILE "paddle_inference-win-x64-gpu-trt-${PADDLEINFERENCE_VERSION}.zip")
|
||||
|
||||
@@ -62,11 +62,12 @@ else() # Linux
|
||||
set(PADDLELITE_URL "https://bj.bcebos.com/fastdeploy/test/lite-linux_arm64_huawei_ascend_npu_1121.tgz")
|
||||
else()
|
||||
set(PADDLELITE_URL "${PADDLELITE_URL_PREFIX}/lite-linux-arm64-20220920.tgz")
|
||||
else()
|
||||
if(ENABLE_TIMVX)
|
||||
set(PADDLELITE_URL "https://bj.bcebos.com/fastdeploy/test/lite-linux_armhf_1130.tgz")
|
||||
else()
|
||||
message(FATAL_ERROR "Only support Linux aarch64 or ENABLE_TIMVX now, x64 is not supported with backend Paddle Lite.")
|
||||
endif()
|
||||
elseif(TARGET_ABI MATCHES "armhf")
|
||||
set(PADDLELITE_URL "https://bj.bcebos.com/fastdeploy/test/lite-linux_armhf_1101.tgz")
|
||||
elseif(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "x86_64")
|
||||
message(FATAL_ERROR "There is no need to compile on x86_64 for now.")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
if (NOT DEFINED TARGET_ABI)
|
||||
if (NOT DEFINED CMAKE_SYSTEM_PROCESSOR)
|
||||
set(CMAKE_SYSTEM_NAME Linux)
|
||||
set(CMAKE_SYSTEM_PROCESSOR arm)
|
||||
set(CMAKE_C_COMPILER "arm-linux-gnueabihf-gcc")
|
||||
set(CMAKE_CXX_COMPILER "arm-linux-gnueabihf-g++")
|
||||
set(CMAKE_CXX_FLAGS "-march=armv7-a -mfloat-abi=hard -mfpu=neon-vfpv4 ${CMAKE_CXX_FLAGS}")
|
||||
set(CMAKE_C_FLAGS "-march=armv7-a -mfloat-abi=hard -mfpu=neon-vfpv4 ${CMAKE_C_FLAGS}" )
|
||||
set(TARGET_ABI armhf)
|
||||
set(CMAKE_BUILD_TYPE MinSizeRel)
|
||||
else()
|
||||
if(NOT ${ENABLE_LITE_BACKEND})
|
||||
|
||||
@@ -8,10 +8,10 @@
|
||||
## 自行编译安装
|
||||
- [GPU部署环境](gpu.md)
|
||||
- [CPU部署环境](cpu.md)
|
||||
- [CPU部署环境](ipu.md)
|
||||
- [IPU部署环境](ipu.md)
|
||||
- [Jetson部署环境](jetson.md)
|
||||
- [Android平台部署环境](android.md)
|
||||
- [瑞芯微RK1126部署环境](rk1126.md)
|
||||
- [瑞芯微RV1126部署环境](rv1126.md)
|
||||
|
||||
|
||||
## FastDeploy编译选项说明
|
||||
@@ -22,6 +22,7 @@
|
||||
| ENABLE_PADDLE_BACKEND | 默认OFF,是否编译集成Paddle Inference后端(CPU/GPU上推荐打开) |
|
||||
| ENABLE_LITE_BACKEND | 默认OFF,是否编译集成Paddle Lite后端(编译Android库时需要设置为ON) |
|
||||
| ENABLE_RKNPU2_BACKEND | 默认OFF,是否编译集成RKNPU2后端(RK3588/RK3568/RK3566上推荐打开) |
|
||||
| ENABLE_TIMVX | 默认OFF,需要在RV1126/RV1109上部署时,需设置为ON |
|
||||
| ENABLE_TRT_BACKEND | 默认OFF,是否编译集成TensorRT后端(GPU上推荐打开) |
|
||||
| ENABLE_OPENVINO_BACKEND | 默认OFF,是否编译集成OpenVINO后端(CPU上推荐打开) |
|
||||
| ENABLE_VISION | 默认OFF,是否编译集成视觉模型的部署模块 |
|
||||
|
||||
@@ -1,63 +0,0 @@
|
||||
# 瑞芯微 RK1126 部署环境编译安装
|
||||
|
||||
FastDeploy基于 Paddle-Lite 后端支持在瑞芯微(Rockchip)Soc 上进行部署推理。
|
||||
更多详细的信息请参考:[PaddleLite部署示例](https://paddle-lite.readthedocs.io/zh/develop/demo_guides/verisilicon_timvx.html)。
|
||||
|
||||
本文档介绍如何编译基于 PaddleLite 的 C++ FastDeploy 交叉编译库。
|
||||
|
||||
相关编译选项说明如下:
|
||||
|编译选项|默认值|说明|备注|
|
||||
|:---|:---|:---|:---|
|
||||
|ENABLE_LITE_BACKEND|OFF|编译RK库时需要设置为ON| - |
|
||||
|
||||
更多编译选项请参考[FastDeploy编译选项说明](./README.md)
|
||||
|
||||
## 交叉编译环境搭建
|
||||
|
||||
### 宿主机环境需求
|
||||
- os:Ubuntu == 16.04
|
||||
- cmake: version >= 3.10.0
|
||||
|
||||
### 环境搭建
|
||||
```bash
|
||||
# 1. Install basic software
|
||||
apt update
|
||||
apt-get install -y --no-install-recommends \
|
||||
gcc g++ git make wget python unzip
|
||||
|
||||
# 2. Install arm gcc toolchains
|
||||
apt-get install -y --no-install-recommends \
|
||||
g++-arm-linux-gnueabi gcc-arm-linux-gnueabi \
|
||||
g++-arm-linux-gnueabihf gcc-arm-linux-gnueabihf \
|
||||
gcc-aarch64-linux-gnu g++-aarch64-linux-gnu
|
||||
|
||||
# 3. Install cmake 3.10 or above
|
||||
wget -c https://mms-res.cdn.bcebos.com/cmake-3.10.3-Linux-x86_64.tar.gz && \
|
||||
tar xzf cmake-3.10.3-Linux-x86_64.tar.gz && \
|
||||
mv cmake-3.10.3-Linux-x86_64 /opt/cmake-3.10 && \
|
||||
ln -s /opt/cmake-3.10/bin/cmake /usr/bin/cmake && \
|
||||
ln -s /opt/cmake-3.10/bin/ccmake /usr/bin/ccmake
|
||||
```
|
||||
|
||||
## 基于 PaddleLite 的 FastDeploy 交叉编译库编译
|
||||
搭建好交叉编译环境之后,编译命令如下:
|
||||
```bash
|
||||
# Download the latest source code
|
||||
git clone https://github.com/PaddlePaddle/FastDeploy.git
|
||||
cd FastDeploy
|
||||
mkdir build && cd build
|
||||
|
||||
# CMake configuration with RK toolchain
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE=./../cmake/timvx.cmake \
|
||||
-DENABLE_TIMVX=ON \
|
||||
-DCMAKE_INSTALL_PREFIX=fastdeploy-tmivx \
|
||||
-DENABLE_VISION=ON \ # 是否编译集成视觉模型的部署模块,可选择开启
|
||||
-Wno-dev ..
|
||||
|
||||
# Build FastDeploy RK1126 C++ SDK
|
||||
make -j8
|
||||
make install
|
||||
```
|
||||
编译完成之后,会生成 fastdeploy-tmivx 目录,表示基于 PadddleLite TIM-VX 的 FastDeploy 库编译完成。
|
||||
|
||||
RK1126 上部署 PaddleClas 分类模型请参考:[PaddleClas RK1126开发板 C++ 部署示例](../../../examples/vision/classification/paddleclas/rk1126/README.md)
|
||||
100
docs/cn/build_and_install/rv1126.md
Executable file
100
docs/cn/build_and_install/rv1126.md
Executable file
@@ -0,0 +1,100 @@
|
||||
# 瑞芯微 RV1126 部署环境编译安装
|
||||
|
||||
FastDeploy基于 Paddle-Lite 后端支持在瑞芯微(Rockchip)Soc 上进行部署推理。
|
||||
更多详细的信息请参考:[PaddleLite部署示例](https://www.paddlepaddle.org.cn/lite/develop/demo_guides/verisilicon_timvx.html)。
|
||||
|
||||
本文档介绍如何编译基于 PaddleLite 的 C++ FastDeploy 交叉编译库。
|
||||
|
||||
相关编译选项说明如下:
|
||||
|编译选项|默认值|说明|备注|
|
||||
|:---|:---|:---|:---|
|
||||
|ENABLE_LITE_BACKEND|OFF|编译RK库时需要设置为ON| - |
|
||||
|ENABLE_TIMVX|OFF|编译RK库时需要设置为ON| - |
|
||||
|
||||
更多编译选项请参考[FastDeploy编译选项说明](./README.md)
|
||||
|
||||
## 交叉编译环境搭建
|
||||
|
||||
### 宿主机环境需求
|
||||
- os:Ubuntu == 16.04
|
||||
- cmake: version >= 3.10.0
|
||||
|
||||
### 环境搭建
|
||||
```bash
|
||||
# 1. Install basic software
|
||||
apt update
|
||||
apt-get install -y --no-install-recommends \
|
||||
gcc g++ git make wget python unzip
|
||||
|
||||
# 2. Install arm gcc toolchains
|
||||
apt-get install -y --no-install-recommends \
|
||||
g++-arm-linux-gnueabi gcc-arm-linux-gnueabi \
|
||||
g++-arm-linux-gnueabihf gcc-arm-linux-gnueabihf \
|
||||
gcc-aarch64-linux-gnu g++-aarch64-linux-gnu
|
||||
|
||||
# 3. Install cmake 3.10 or above
|
||||
wget -c https://mms-res.cdn.bcebos.com/cmake-3.10.3-Linux-x86_64.tar.gz && \
|
||||
tar xzf cmake-3.10.3-Linux-x86_64.tar.gz && \
|
||||
mv cmake-3.10.3-Linux-x86_64 /opt/cmake-3.10 && \
|
||||
ln -s /opt/cmake-3.10/bin/cmake /usr/bin/cmake && \
|
||||
ln -s /opt/cmake-3.10/bin/ccmake /usr/bin/ccmake
|
||||
```
|
||||
|
||||
## 基于 PaddleLite 的 FastDeploy 交叉编译库编译
|
||||
搭建好交叉编译环境之后,编译命令如下:
|
||||
```bash
|
||||
# Download the latest source code
|
||||
git clone https://github.com/PaddlePaddle/FastDeploy.git
|
||||
cd FastDeploy
|
||||
mkdir build && cd build
|
||||
|
||||
# CMake configuration with RK toolchain
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE=./../cmake/timvx.cmake \
|
||||
-DENABLE_TIMVX=ON \
|
||||
-DCMAKE_INSTALL_PREFIX=fastdeploy-tmivx \
|
||||
-DENABLE_VISION=ON \ # 是否编译集成视觉模型的部署模块,可选择开启
|
||||
-Wno-dev ..
|
||||
|
||||
# Build FastDeploy RV1126 C++ SDK
|
||||
make -j8
|
||||
make install
|
||||
```
|
||||
编译完成之后,会生成 fastdeploy-tmivx 目录,表示基于 PadddleLite TIM-VX 的 FastDeploy 库编译完成。
|
||||
|
||||
## 准备设备运行环境
|
||||
部署前要保证芯原 Linux Kernel NPU 驱动 galcore.so 版本及所适用的芯片型号与依赖库保持一致,在部署前,请登录开发板,并通过命令行输入以下命令查询 NPU 驱动版本,Rockchip建议的驱动版本为: 6.4.6.5
|
||||
```bash
|
||||
dmesg | grep Galcore
|
||||
```
|
||||
|
||||
如果当前版本不符合上述,请用户仔细阅读以下内容,以保证底层 NPU 驱动环境正确。
|
||||
|
||||
有两种方式可以修改当前的 NPU 驱动版本:
|
||||
1. 手动替换 NPU 驱动版本。(推荐)
|
||||
2. 刷机,刷取 NPU 驱动版本符合要求的固件。
|
||||
|
||||
### 手动替换 NPU 驱动版本
|
||||
1. 使用如下命令下载解压 PaddleLite demo,其中提供了现成的驱动文件
|
||||
```bash
|
||||
wget https://paddlelite-demo.bj.bcebos.com/devices/generic/PaddleLite-generic-demo.tar.gz
|
||||
tar -xf PaddleLite-generic-demo.tar.gz
|
||||
```
|
||||
2. 使用 `uname -a` 查看 `Linux Kernel` 版本,确定为 `Linux` 系统 4.19.111 版本,
|
||||
3. 将 `PaddleLite-generic-demo/libs/PaddleLite/linux/armhf/lib/verisilicon_timvx/viv_sdk_6_4_6_5/lib/1126/4.19.111/` 路径下的 `galcore.ko` 上传至开发板。
|
||||
|
||||
4. 登录开发板,命令行输入 `sudo rmmod galcore` 来卸载原始驱动,输入 `sudo insmod galcore.ko` 来加载传上设备的驱动。(是否需要 sudo 根据开发板实际情况,部分 adb 链接的设备请提前 adb root)。此步骤如果操作失败,请跳转至方法 2。
|
||||
5. 在开发板中输入 `dmesg | grep Galcore` 查询 NPU 驱动版本,确定为:6.4.6.5
|
||||
|
||||
### 刷机
|
||||
根据具体的开发板型号,向开发板卖家或官网客服索要 6.4.6.5 版本 NPU 驱动对应的固件和刷机方法。
|
||||
|
||||
更多细节请参考:[PaddleLite准备设备环境](https://www.paddlepaddle.org.cn/lite/develop/demo_guides/verisilicon_timvx.html#zhunbeishebeihuanjing)
|
||||
|
||||
## 基于 FastDeploy 在 RV1126 上的部署示例
|
||||
1. RV1126 上部署 PaddleClas 分类模型请参考:[PaddleClas 分类模型在 RV1126 上的 C++ 部署示例](../../../examples/vision/classification/paddleclas/rv1126/README.md)
|
||||
|
||||
2. RV1126 上部署 PPYOLOE 检测模型请参考:[PPYOLOE 检测模型在 RV1126 上的 C++ 部署示例](../../../examples/vision/detection/paddledetection/rv1126/README.md)
|
||||
|
||||
3. RV1126 上部署 YOLOv5 检测模型请参考:[YOLOv5 检测模型在 RV1126 上的 C++ 部署示例](../../../examples/vision/detection/yolov5/rv1126/README.md)
|
||||
|
||||
4. RV1126 上部署 PP-LiteSeg 分割模型请参考:[PP-LiteSeg 分割模型在 RV1126 上的 C++ 部署示例](../../../examples/vision/segmentation/paddleseg/rv1126/README.md)
|
||||
35
docs/cn/faq/custom_opencv.md
Normal file
35
docs/cn/faq/custom_opencv.md
Normal file
@@ -0,0 +1,35 @@
|
||||
[English](../../en/faq/custom_opencv.md) | 中文
|
||||
|
||||
# 自定义OpenCV版本
|
||||
|
||||
受限于不同平台限制,目前FastDeploy提供的预编译包在**Linux平台**内置的OpenCV无法读取视频,或调用`imshow`等操作。对于有这类需求的开发者,可根据本文档来自行编译FastDeploy。
|
||||
|
||||
FastDeploy目前支持通过`-DOPENCV_DIRECTORY`来指定环境中的OpenCV版本,以Ubuntu为例,我们可以按照如下方式编译安装。
|
||||
|
||||
|
||||
## CPU C++ SDK
|
||||
|
||||
### 1. 安装Opencv
|
||||
```
|
||||
sudo apt-get install libopencv-dev
|
||||
```
|
||||
|
||||
### 2. 指定OpenCV编译FastDeploy
|
||||
```
|
||||
git clone https://github.com/PaddlePaddle/FastDeploy
|
||||
cd FastDeploy
|
||||
mkdir build && cd build
|
||||
cmake .. -DENABLE_ORT_BACKEND=ON \
|
||||
-DENABLE_PADDLE_BACKEND=ON \
|
||||
-DENABLE_OPENVINO_BACKEND=ON \
|
||||
-DENABLE_VISION=ON \
|
||||
-DCMAKE_INSTALL_PREFIX=${PWD}/installed_fastdeploy \
|
||||
-DOPENCV_DIRECTORY=/usr/lib/x86_64-linux-gnu/cmake/opencv4
|
||||
make -j8
|
||||
make install
|
||||
```
|
||||
编译完成的C++ SDK即为当前目录下的`installed_fastdeploy`,使用这个新的SDK即可。
|
||||
|
||||
其它部署硬件上的编译方式同理,通过`-DOPENCV_DIRECTORY`指定环境中的OpenCV编译即可, 注意此处的路径`/usr/lib/x86_64-linux-gnu/cmake/opencv4`需根据你的实际环境路径来设定,此目录下包含`OpenCVConfig-version.cmake`、`OpenCVConfig.cmake`等文件。
|
||||
|
||||
- [FastDeploy更多部署环境的编译](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/README_CN.md)
|
||||
10
docs/cn/quantize.md
Normal file → Executable file
10
docs/cn/quantize.md
Normal file → Executable file
@@ -36,7 +36,7 @@ FastDeploy基于PaddleSlim的Auto Compression Toolkit(ACT), 给用户提供了
|
||||
目前, FastDeploy支持自动化压缩,并完成部署测试的模型的Runtime Benchmark和端到端Benchmark如下所示.
|
||||
|
||||
Benchmark表格说明:
|
||||
- Rtuntime时延为模型在各种Runtime上的推理时延,包含CPU->GPU数据拷贝,GPU推理,GPU->CPU数据拷贝时间. 不包含模型各自的前后处理时间.
|
||||
- Runtime时延为模型在各种Runtime上的推理时延,包含CPU->GPU数据拷贝,GPU推理,GPU->CPU数据拷贝时间. 不包含模型各自的前后处理时间.
|
||||
- 端到端时延为模型在实际推理场景中的时延, 包含模型的前后处理.
|
||||
- 所测时延均为推理1000次后求得的平均值, 单位是毫秒.
|
||||
- INT8 + FP16 为在推理INT8量化模型的同时, 给Runtime 开启FP16推理选项
|
||||
@@ -63,7 +63,7 @@ Benchmark表格说明:
|
||||
| [YOLOv7](../../examples/vision/detection/yolov7/quantize/) | Paddle Inference | CPU | 995.85 | 477.93|None|None | 2.08 |51.1 | 46.2|量化蒸馏训练 |
|
||||
|
||||
#### 端到端 Benchmark
|
||||
| 模型 |推理后端 |部署硬件 | FP32 Runtime时延 | INT8 Runtime时延 | INT8 + FP16 Runtime时延 | INT8+FP16+PM Runtime时延 | 最大加速比 | FP32 mAP | INT8 mAP | 量化方式 |
|
||||
| 模型 |推理后端 |部署硬件 | FP32 End2End时延 | INT8 End2End时延 | INT8 + FP16 End2End时延 | INT8+FP16+PM End2End时延 | 最大加速比 | FP32 mAP | INT8 mAP | 量化方式 |
|
||||
| ------------------- | -----------------|-----------| -------- |-------- |-------- | --------- |-------- |----- |----- |----- |
|
||||
| [YOLOv5s](../../examples/vision/detection/yolov5/quantize/) | TensorRT | GPU | 24.61 | 21.20 | 20.78 | 20.94 | 1.18 | 37.6 | 36.7 | 量化蒸馏训练 |
|
||||
| [YOLOv5s](../../examples/vision/detection/yolov5/quantize/) | Paddle-TensorRT | GPU | 23.53 | None | 21.98 | 19.84 | 1.28 | 37.6 | 36.8 | 量化蒸馏训练 |
|
||||
@@ -94,7 +94,7 @@ Benchmark表格说明:
|
||||
| [MobileNetV1_ssld](../../examples/vision/classification/paddleclas/quantize/) | Paddle Inference | CPU | 12.29 | 4.68 | None|None|2.62 |77.89 | 71.36 |离线量化 |
|
||||
|
||||
#### 端到端 Benchmark
|
||||
| 模型 |推理后端 |部署硬件 | FP32 Runtime时延 | INT8 Runtime时延 | INT8 + FP16 Runtime时延 | INT8+FP16+PM Runtime时延 | 最大加速比 | FP32 Top1 | INT8 Top1 | 量化方式 |
|
||||
| 模型 |推理后端 |部署硬件 | FP32 End2End时延 | INT8 End2End时延 | INT8 + FP16 End2End时延 | INT8+FP16+PM End2End时延 | 最大加速比 | FP32 Top1 | INT8 Top1 | 量化方式 |
|
||||
| ------------------- | -----------------|-----------| -------- |-------- |-------- | --------- |-------- |----- |----- |----- |
|
||||
| [ResNet50_vd](../../examples/vision/classification/paddleclas/quantize/) | TensorRT | GPU | 4.92| 2.28|2.24|2.23 | 2.21 | 79.12 | 79.06 | 离线量化 |
|
||||
| [ResNet50_vd](../../examples/vision/classification/paddleclas/quantize/) | Paddle-TensorRT | GPU | 4.48|None |2.09|2.10 | 2.14 | 79.12 | 79.06 | 离线量化 |
|
||||
@@ -119,7 +119,7 @@ NOTE:
|
||||
- TensorRT比Paddle-TensorRT快的原因是在runtime移除了multiclass_nms3算子
|
||||
|
||||
#### 端到端 Benchmark
|
||||
| 模型 |推理后端 |部署硬件 | FP32 Runtime时延 | INT8 Runtime时延 | INT8 + FP16 Runtime时延 | INT8+FP16+PM Runtime时延 | 最大加速比 | FP32 mAP | INT8 mAP | 量化方式 |
|
||||
| 模型 |推理后端 |部署硬件 | FP32 End2End时延 | INT8 End2End时延 | INT8 + FP16 End2End时延 | INT8+FP16+PM End2End时延 | 最大加速比 | FP32 mAP | INT8 mAP | 量化方式 |
|
||||
| ------------------- | -----------------|-----------| -------- |-------- |-------- | --------- |-------- |----- |----- |----- |
|
||||
| [ppyoloe_crn_l_300e_coco](../../examples/vision/detection/paddledetection/quantize ) | TensorRT | GPU | 35.75 | 15.42 |20.70|20.85 | 2.32 | 51.4 | 50.7 | 量化蒸馏训练 |
|
||||
| [ppyoloe_crn_l_300e_coco](../../examples/vision/detection/paddledetection/quantize ) | Paddle-TensorRT | GPU | 33.48 |None | 18.47 |18.03 | 1.81 | 51.4 | 50.5| 量化蒸馏训练 |
|
||||
@@ -134,6 +134,6 @@ NOTE:
|
||||
| [PP-LiteSeg-T(STDC1)-cityscapes](../../examples/vision/segmentation/paddleseg/quantize) | Paddle Inference | CPU | 1138.04| 602.62 |None|None | 1.89 |77.37 | 71.62 |量化蒸馏训练 |
|
||||
|
||||
#### 端到端 Benchmark
|
||||
| 模型 |推理后端 |部署硬件 | FP32 Runtime时延 | INT8 Runtime时延 | INT8 + FP16 Runtime时延 | INT8+FP16+PM Runtime时延 | 最大加速比 | FP32 mIoU | INT8 mIoU | 量化方式 |
|
||||
| 模型 |推理后端 |部署硬件 | FP32 End2End时延 | INT8 End2End时延 | INT8 + FP16 End2End时延 | INT8+FP16+PM End2End时延 | 最大加速比 | FP32 mIoU | INT8 mIoU | 量化方式 |
|
||||
| ------------------- | -----------------|-----------| -------- |-------- |-------- | --------- |-------- |----- |----- |----- |
|
||||
| [PP-LiteSeg-T(STDC1)-cityscapes](../../examples/vision/segmentation/paddleseg/quantize) | Paddle Inference | CPU | 4726.65| 4134.91|None|None | 1.14 |77.37 | 71.62 |量化蒸馏训练 |
|
||||
|
||||
37
docs/en/faq/custom_opencv.md
Normal file
37
docs/en/faq/custom_opencv.md
Normal file
@@ -0,0 +1,37 @@
|
||||
English | [中文](../../cn/faq/custom_opencv.md)
|
||||
|
||||
# Use Own OpenCV Library
|
||||
|
||||
The prebuilt FastDeploy library has a built-in OpenCV library, which is not able to read video file or call `imshow` because the prebuilt FastDeploy has to build in manylinux version. If you need to read video or other functions provided by opencv, this document shows how to build FastDeploy with your own OpenCV in your environment.
|
||||
|
||||
FastDeploy provides flag `-DOPENCV_DIRECTORY` to set path of OpenCV library, the following steps show how to build CPU C++ SDK on Ubuntu.
|
||||
|
||||
## CPU C++ SDK
|
||||
|
||||
### 1. Install OpenCV
|
||||
|
||||
```
|
||||
sudo apt-get install libopencv-dev
|
||||
```
|
||||
|
||||
### 2. Build FastDeploy
|
||||
|
||||
```
|
||||
git clone https://github.com/PaddlePaddle/FastDeploy
|
||||
cd FastDeploy
|
||||
mkdir build && cd build
|
||||
cmake .. -DENABLE_ORT_BACKEND=ON \
|
||||
-DENABLE_PADDLE_BACKEND=ON \
|
||||
-DENABLE_OPENVINO_BACKEND=ON \
|
||||
-DENABLE_VISION=ON \
|
||||
-DCMAKE_INSTALL_PREFIX=${PWD}/installed_fastdeploy \
|
||||
-DOPENCV_DIRECTORY=/usr/lib/x86_64-linux-gnu/cmake/opencv4
|
||||
make -j8
|
||||
make install
|
||||
```
|
||||
|
||||
Now we get the C++ SDK in current directory `installed_fastdeploy`, this library can use all the functions from your own OpenCV library.
|
||||
|
||||
This document also works for other hardware deployment(GPU/IPU/XPU...) on Linux platform.
|
||||
|
||||
- [More Options to build FastDeploy](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/README_EN.md)
|
||||
12
examples/multimodal/stable_diffusion/cpp/README.md
Normal file
12
examples/multimodal/stable_diffusion/cpp/README.md
Normal file
@@ -0,0 +1,12 @@
|
||||
# StableDiffusion C++部署示例
|
||||
|
||||
在部署前,需确认以下两个步骤
|
||||
|
||||
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
|
||||
- 2. 根据开发环境,下载预编译部署库和samples代码,参考[FastDeploy预编译库](../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
|
||||
|
||||
本目录下提供`*_infer.cc`快速完成StableDiffusion各任务的C++部署示例。
|
||||
|
||||
## Inpaint任务
|
||||
|
||||
StableDiffusion Inpaint任务是一个根据提示文本补全图片的任务,具体而言就是用户给定提示文本,原始图片以及原始图片的mask图片,该任务输出补全后的图片。
|
||||
@@ -12,16 +12,23 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "dpm_solver_multistep_scheduler.h"
|
||||
#include "fastdeploy/vision/common/processors/mat.h"
|
||||
#include "./dpm_solver_multistep_scheduler.h"
|
||||
#include "./pipeline_stable_diffusion_inpaint.h"
|
||||
#include "fastdeploy/utils/perf.h"
|
||||
#include "fastdeploy/vision/common/processors/mat.h"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "pipeline_stable_diffusion_inpaint.h"
|
||||
#include <iostream>
|
||||
#include <memory>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
|
||||
#ifdef WIN32
|
||||
const char sep = '\\';
|
||||
#else
|
||||
const char sep = '/';
|
||||
#endif
|
||||
|
||||
template <typename T> std::string Str(const T* value, int size) {
|
||||
std::ostringstream oss;
|
||||
@@ -33,17 +40,40 @@ template <typename T> std::string Str(const T* value, int size) {
|
||||
return oss.str();
|
||||
}
|
||||
|
||||
std::unique_ptr<fastdeploy::Runtime>
|
||||
CreateRuntime(const std::string& model_file, const std::string& params_file,
|
||||
bool use_paddle_backend = true) {
|
||||
std::unique_ptr<fastdeploy::Runtime> CreateRuntime(
|
||||
const std::string& model_file, const std::string& params_file,
|
||||
bool use_trt_backend = false, bool use_fp16 = false,
|
||||
const std::unordered_map<std::string, std::vector<std::vector<int>>>&
|
||||
dynamic_shapes = {},
|
||||
const std::vector<std::string>& disable_paddle_trt_ops = {}) {
|
||||
fastdeploy::RuntimeOption runtime_option;
|
||||
runtime_option.SetModelPath(model_file, params_file,
|
||||
fastdeploy::ModelFormat::PADDLE);
|
||||
runtime_option.UseGpu();
|
||||
if (use_paddle_backend) {
|
||||
if (!use_trt_backend) {
|
||||
runtime_option.UsePaddleBackend();
|
||||
} else {
|
||||
runtime_option.UseOrtBackend();
|
||||
runtime_option.UseTrtBackend();
|
||||
runtime_option.EnablePaddleToTrt();
|
||||
for (auto it = dynamic_shapes.begin(); it != dynamic_shapes.end(); ++it) {
|
||||
if (it->second.size() != 3) {
|
||||
std::cerr << "The size of dynamic_shapes of input `" << it->first
|
||||
<< "` should be 3, but receive " << it->second.size()
|
||||
<< std::endl;
|
||||
continue;
|
||||
}
|
||||
std::vector<int> min_shape = (it->second)[0];
|
||||
std::vector<int> opt_shape = (it->second)[1];
|
||||
std::vector<int> max_shape = (it->second)[2];
|
||||
runtime_option.SetTrtInputShape(it->first, min_shape, opt_shape,
|
||||
max_shape);
|
||||
}
|
||||
runtime_option.SetTrtCacheFile("paddle.trt");
|
||||
runtime_option.EnablePaddleTrtCollectShape();
|
||||
runtime_option.DisablePaddleTrtOPs(disable_paddle_trt_ops);
|
||||
if (use_fp16) {
|
||||
runtime_option.EnableTrtFP16();
|
||||
}
|
||||
}
|
||||
std::unique_ptr<fastdeploy::Runtime> runtime =
|
||||
std::unique_ptr<fastdeploy::Runtime>(new fastdeploy::Runtime());
|
||||
@@ -59,6 +89,20 @@ CreateRuntime(const std::string& model_file, const std::string& params_file,
|
||||
}
|
||||
|
||||
int main() {
|
||||
// 0. Init all configs
|
||||
std::string model_dir = "sd15_inpaint";
|
||||
int max_length = 77;
|
||||
bool use_trt_backend = true;
|
||||
bool use_fp16 = true;
|
||||
int batch_size = 1;
|
||||
int num_images_per_prompt = 1;
|
||||
int num_inference_steps = 50;
|
||||
|
||||
int height = 512;
|
||||
int width = 512;
|
||||
constexpr int unet_inpaint_channels = 9;
|
||||
constexpr int latents_channels = 4;
|
||||
|
||||
// 1. Init scheduler
|
||||
std::unique_ptr<fastdeploy::Scheduler> dpm(
|
||||
new fastdeploy::DPMSolverMultistepScheduler(
|
||||
@@ -77,54 +121,116 @@ int main() {
|
||||
/* lower_order_final = */ true));
|
||||
|
||||
// 2. Init text encoder runtime
|
||||
std::string text_model_file = "sd15_inpaint/text_encoder/inference.pdmodel";
|
||||
std::string text_params_file =
|
||||
"sd15_inpaint/text_encoder/inference.pdiparams";
|
||||
std::unordered_map<std::string, std::vector<std::vector<int>>>
|
||||
text_dynamic_shape = {{"input_ids",
|
||||
{/* min_shape */ {1, max_length},
|
||||
/* opt_shape */ {batch_size, max_length},
|
||||
/* max_shape */ {2 * batch_size, max_length}}}};
|
||||
std::string text_model_dir = model_dir + sep + "text_encoder";
|
||||
std::string text_model_file = text_model_dir + sep + "inference.pdmodel";
|
||||
std::string text_params_file = text_model_dir + sep + "inference.pdiparams";
|
||||
std::unique_ptr<fastdeploy::Runtime> text_encoder_runtime =
|
||||
CreateRuntime(text_model_file, text_params_file, false);
|
||||
CreateRuntime(text_model_file, text_params_file, use_trt_backend,
|
||||
use_fp16, text_dynamic_shape);
|
||||
|
||||
// 3. Init vae encoder runtime
|
||||
std::unordered_map<std::string, std::vector<std::vector<int>>>
|
||||
vae_encoder_dynamic_shape = {
|
||||
{"sample",
|
||||
{/* min_shape */ {1, 3, height, width},
|
||||
/* opt_shape */ {2 * batch_size, 3, height, width},
|
||||
/* max_shape */ {2 * batch_size, 3, height, width}}}};
|
||||
std::string vae_encoder_model_dir = model_dir + sep + "vae_encoder";
|
||||
std::string vae_encoder_model_file =
|
||||
"sd15_inpaint/vae_encoder/inference.pdmodel";
|
||||
vae_encoder_model_dir + sep + "inference.pdmodel";
|
||||
std::string vae_encoder_params_file =
|
||||
"sd15_inpaint/vae_encoder/inference.pdiparams";
|
||||
vae_encoder_model_dir + sep + "inference.pdiparams";
|
||||
std::unique_ptr<fastdeploy::Runtime> vae_encoder_runtime =
|
||||
CreateRuntime(vae_encoder_model_file, vae_encoder_params_file);
|
||||
CreateRuntime(vae_encoder_model_file, vae_encoder_params_file,
|
||||
use_trt_backend, use_fp16, vae_encoder_dynamic_shape);
|
||||
|
||||
// 4. Init vae decoder runtime
|
||||
std::unordered_map<std::string, std::vector<std::vector<int>>>
|
||||
vae_decoder_dynamic_shape = {
|
||||
{"latent_sample",
|
||||
{/* min_shape */ {1, latents_channels, height / 8, width / 8},
|
||||
/* opt_shape */
|
||||
{2 * batch_size, latents_channels, height / 8, width / 8},
|
||||
/* max_shape */
|
||||
{2 * batch_size, latents_channels, height / 8, width / 8}}}};
|
||||
std::string vae_decoder_model_dir = model_dir + sep + "vae_decoder";
|
||||
std::string vae_decoder_model_file =
|
||||
"sd15_inpaint/vae_decoder/inference.pdmodel";
|
||||
vae_decoder_model_dir + sep + "inference.pdmodel";
|
||||
std::string vae_decoder_params_file =
|
||||
"sd15_inpaint/vae_decoder/inference.pdiparams";
|
||||
vae_decoder_model_dir + sep + "inference.pdiparams";
|
||||
std::unique_ptr<fastdeploy::Runtime> vae_decoder_runtime =
|
||||
CreateRuntime(vae_decoder_model_file, vae_decoder_params_file);
|
||||
CreateRuntime(vae_decoder_model_file, vae_decoder_params_file,
|
||||
use_trt_backend, use_fp16, vae_decoder_dynamic_shape);
|
||||
|
||||
// 5. Init unet runtime
|
||||
std::string unet_model_file = "sd15_inpaint/unet/inference.pdmodel";
|
||||
std::string unet_params_file = "sd15_inpaint/unet/inference.pdiparams";
|
||||
std::unordered_map<std::string, std::vector<std::vector<int>>>
|
||||
unet_dynamic_shape = {
|
||||
{"sample",
|
||||
{/* min_shape */ {1, unet_inpaint_channels, height / 8, width / 8},
|
||||
/* opt_shape */
|
||||
{2 * batch_size, unet_inpaint_channels, height / 8, width / 8},
|
||||
/* max_shape */
|
||||
{2 * batch_size, unet_inpaint_channels, height / 8, width / 8}}},
|
||||
{"timesteps", {{1}, {1}, {1}}},
|
||||
{"encoder_hidden_states",
|
||||
{{1, max_length, 768},
|
||||
{2 * batch_size, max_length, 768},
|
||||
{2 * batch_size, max_length, 768}}}};
|
||||
std::vector<std::string> unet_disable_paddle_trt_ops = {"sin", "cos"};
|
||||
std::string unet_model_dir = model_dir + sep + "unet";
|
||||
std::string unet_model_file = unet_model_dir + sep + "inference.pdmodel";
|
||||
std::string unet_params_file = unet_model_dir + sep + "inference.pdiparams";
|
||||
std::unique_ptr<fastdeploy::Runtime> unet_runtime =
|
||||
CreateRuntime(unet_model_file, unet_params_file);
|
||||
CreateRuntime(unet_model_file, unet_params_file, use_trt_backend,
|
||||
use_fp16, unet_dynamic_shape, unet_disable_paddle_trt_ops);
|
||||
|
||||
// 6. Init fast tokenizer
|
||||
paddlenlp::fast_tokenizer::tokenizers_impl::ClipFastTokenizer tokenizer(
|
||||
"clip/vocab.json", "clip/merges.txt", /* max_length = */ 77);
|
||||
"clip/vocab.json", "clip/merges.txt", /* max_length = */ max_length);
|
||||
fastdeploy::StableDiffusionInpaintPipeline pipe(
|
||||
std::move(vae_encoder_runtime), std::move(vae_decoder_runtime),
|
||||
std::move(text_encoder_runtime), std::move(unet_runtime),
|
||||
/* scheduler = */ std::move(dpm), tokenizer);
|
||||
/* vae_encoder = */ std::move(vae_encoder_runtime),
|
||||
/* vae_decoder = */ std::move(vae_decoder_runtime),
|
||||
/* text_encoder = */ std::move(text_encoder_runtime),
|
||||
/* unet = */ std::move(unet_runtime),
|
||||
/* scheduler = */ std::move(dpm),
|
||||
/* tokenizer = */ tokenizer);
|
||||
|
||||
// 7. Read images
|
||||
auto image = cv::imread("overture-creations.png");
|
||||
auto mask_image = cv::imread("overture-creations-mask.png");
|
||||
|
||||
// 8. Predict
|
||||
/*
|
||||
* One may need to pass the initial noise to predict api.
|
||||
* There's an example:
|
||||
* std::vector<float> latents_data = {xxxx};
|
||||
* fastdeploy::FDTensor latents;
|
||||
* latents.SetExternalData({batch_size * num_images_per_prompt, latents_channels, height / 8, width / 8},fastdeploy::FDDataType::FP32, latents_data.data());
|
||||
* pipe.Predict(..., /* latents = *\/ &latents, ....);
|
||||
*/
|
||||
std::vector<std::string> prompts = {
|
||||
"Face of a yellow cat, high resolution, sitting on a park bench"};
|
||||
std::vector<fastdeploy::FDTensor> outputs;
|
||||
fastdeploy::TimeCounter tc;
|
||||
tc.Start();
|
||||
pipe.Predict(prompts, image, mask_image, &outputs, /* height = */ 512,
|
||||
/* width = */ 512, /* num_inference_steps = */ 50);
|
||||
pipe.Predict(prompts, image, mask_image, &outputs,
|
||||
/* height = */ height,
|
||||
/* width = */ width,
|
||||
/* num_inference_steps = */ num_inference_steps,
|
||||
/* guidance_scale = */ 7.5,
|
||||
/* negative_prompt = */ {},
|
||||
/* num_images_per_prompt = */ num_images_per_prompt,
|
||||
/* eta = */ 1.0,
|
||||
/* max_length = */ max_length,
|
||||
/* latents = */ nullptr,
|
||||
/* output_cv_mat = */ true,
|
||||
/* callback = */ nullptr,
|
||||
/* callback_steps = */ 1);
|
||||
tc.End();
|
||||
tc.PrintInfo();
|
||||
fastdeploy::vision::FDMat mat = fastdeploy::vision::FDMat::Create(outputs[0]);
|
||||
|
||||
@@ -49,7 +49,8 @@ void StableDiffusionInpaintPipeline::PrepareMaskAndMaskedImage(
|
||||
float_mask[i] = 1;
|
||||
}
|
||||
}
|
||||
image_mask.SetExternalData({1, 1, shape[1] * 8, shape[0] * 8},
|
||||
// NCHW format
|
||||
image_mask.SetExternalData({1, 1, shape[0] * 8, shape[1] * 8},
|
||||
FDDataType::FP32, float_mask.data());
|
||||
|
||||
// Set mask_image
|
||||
@@ -314,9 +315,6 @@ void StableDiffusionInpaintPipeline::Predict(
|
||||
vision::FDMat mask_fdmat_t = vision::FDMat::Create((*output_images)[i]);
|
||||
vision::RGB2BGR::Run(&mask_fdmat_t, vision::ProcLib::OPENCV);
|
||||
mask_fdmat_t.CopyToTensor(&(*output_images)[i]);
|
||||
FDTensor sum;
|
||||
function::Sum((*output_images)[i], &sum, {}, false, true);
|
||||
FDINFO << "sum = " << ((float*)sum.Data())[0] << std::endl;
|
||||
}
|
||||
}
|
||||
} // namespace fastdeploy
|
||||
|
||||
6
examples/vision/classification/paddleclas/quantize/README.md
Normal file → Executable file
6
examples/vision/classification/paddleclas/quantize/README.md
Normal file → Executable file
@@ -4,14 +4,14 @@ FastDeploy已支持部署量化模型,并提供一键模型自动化压缩的工
|
||||
|
||||
## FastDeploy一键模型自动化压缩工具
|
||||
FastDeploy 提供了一键模型自动化压缩工具, 能够简单地通过输入一个配置文件, 对模型进行量化.
|
||||
详细教程请见: [一键模型自动化压缩工具](../../../../../tools/auto_compression/)
|
||||
详细教程请见: [一键模型自动化压缩工具](../../../../../tools/common_tools/auto_compression/)
|
||||
注意: 推理量化后的分类模型仍然需要FP32模型文件夹下的inference_cls.yaml文件, 自行量化的模型文件夹内不包含此yaml文件, 用户从FP32模型文件夹下复制此yaml文件到量化后的模型文件夹内即可。
|
||||
|
||||
## 下载量化完成的PaddleClas模型
|
||||
用户也可以直接下载下表中的量化模型进行部署.
|
||||
|
||||
Benchmark表格说明:
|
||||
- Rtuntime时延为模型在各种Runtime上的推理时延,包含CPU->GPU数据拷贝,GPU推理,GPU->CPU数据拷贝时间. 不包含模型各自的前后处理时间.
|
||||
- Runtime时延为模型在各种Runtime上的推理时延,包含CPU->GPU数据拷贝,GPU推理,GPU->CPU数据拷贝时间. 不包含模型各自的前后处理时间.
|
||||
- 端到端时延为模型在实际推理场景中的时延, 包含模型的前后处理.
|
||||
- 所测时延均为推理1000次后求得的平均值, 单位是毫秒.
|
||||
- INT8 + FP16 为在推理INT8量化模型的同时, 给Runtime 开启FP16推理选项
|
||||
@@ -33,7 +33,7 @@ Benchmark表格说明:
|
||||
| [MobileNetV1_ssld](https://bj.bcebos.com/paddlehub/fastdeploy/mobilenetv1_ssld_ptq.tar) | Paddle Inference | CPU | 12.29 | 4.68 | None|None|2.62 |77.89 | 71.36 |离线量化 |
|
||||
|
||||
### 端到端 Benchmark
|
||||
| 模型 |推理后端 |部署硬件 | FP32 Runtime时延 | INT8 Runtime时延 | INT8 + FP16 Runtime时延 | INT8+FP16+PM Runtime时延 | 最大加速比 | FP32 Top1 | INT8 Top1 | 量化方式 |
|
||||
| 模型 |推理后端 |部署硬件 | FP32 End2End时延 | INT8 End2End时延 | INT8 + FP16 End2End时延 | INT8+FP16+PM End2End时延 | 最大加速比 | FP32 Top1 | INT8 Top1 | 量化方式 |
|
||||
| ------------------- | -----------------|-----------| -------- |-------- |-------- | --------- |-------- |----- |----- |----- |
|
||||
| [ResNet50_vd](https://bj.bcebos.com/paddlehub/fastdeploy/resnet50_vd_ptq.tar) | TensorRT | GPU | 4.92| 2.28|2.24|2.23 | 2.21 | 79.12 | 79.06 | 离线量化 |
|
||||
| [ResNet50_vd](https://bj.bcebos.com/paddlehub/fastdeploy/resnet50_vd_ptq.tar) | Paddle-TensorRT | GPU | 4.48|None |2.09|2.10 | 2.14 | 79.12 | 79.06 | 离线量化 |
|
||||
|
||||
2
examples/vision/classification/paddleclas/quantize/cpp/README.md
Normal file → Executable file
2
examples/vision/classification/paddleclas/quantize/cpp/README.md
Normal file → Executable file
@@ -8,7 +8,7 @@
|
||||
|
||||
### 量化模型准备
|
||||
- 1. 用户可以直接使用由FastDeploy提供的量化模型进行部署.
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../../../../../tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.(注意: 推理量化后的分类模型仍然需要FP32模型文件夹下的inference_cls.yaml文件, 自行量化的模型文件夹内不包含此yaml文件, 用户从FP32模型文件夹下复制此yaml文件到量化后的模型文件夹内即可.)
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../../../../../tools/common_tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.(注意: 推理量化后的分类模型仍然需要FP32模型文件夹下的inference_cls.yaml文件, 自行量化的模型文件夹内不包含此yaml文件, 用户从FP32模型文件夹下复制此yaml文件到量化后的模型文件夹内即可.)
|
||||
|
||||
## 以量化后的ResNet50_Vd模型为例, 进行部署,支持此模型需保证FastDeploy版本0.7.0以上(x.x.x>=0.7.0)
|
||||
在本目录执行如下命令即可完成编译,以及量化模型部署.
|
||||
|
||||
2
examples/vision/classification/paddleclas/quantize/python/README.md
Normal file → Executable file
2
examples/vision/classification/paddleclas/quantize/python/README.md
Normal file → Executable file
@@ -8,7 +8,7 @@
|
||||
|
||||
### 量化模型准备
|
||||
- 1. 用户可以直接使用由FastDeploy提供的量化模型进行部署.
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../../../../../tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.(注意: 推理量化后的分类模型仍然需要FP32模型文件夹下的inference_cls.yaml文件, 自行量化的模型文件夹内不包含此yaml文件, 用户从FP32模型文件夹下复制此yaml文件到量化后的模型文件夹内即可.)
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../../../../../tools/common_tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.(注意: 推理量化后的分类模型仍然需要FP32模型文件夹下的inference_cls.yaml文件, 自行量化的模型文件夹内不包含此yaml文件, 用户从FP32模型文件夹下复制此yaml文件到量化后的模型文件夹内即可.)
|
||||
|
||||
|
||||
## 以量化后的ResNet50_Vd模型为例, 进行部署
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
# PaddleClas 量化模型在 RK1126 上的部署
|
||||
目前 FastDeploy 已经支持基于 PaddleLite 部署 PaddleClas 量化模型到 RK1126 上。
|
||||
# PaddleClas 量化模型在 RV1126 上的部署
|
||||
目前 FastDeploy 已经支持基于 PaddleLite 部署 PaddleClas 量化模型到 RV1126 上。
|
||||
|
||||
模型的量化和量化模型的下载请参考:[模型量化](../quantize/README.md)
|
||||
|
||||
|
||||
## 详细部署文档
|
||||
|
||||
在 RK1126 上只支持 C++ 的部署。
|
||||
在 RV1126 上只支持 C++ 的部署。
|
||||
|
||||
- [C++部署](cpp)
|
||||
@@ -1,22 +1,22 @@
|
||||
# PaddleClas RK1126开发板 C++ 部署示例
|
||||
本目录下提供的 `infer.cc`,可以帮助用户快速完成 PaddleClas 量化模型在 RK1126 上的部署推理加速。
|
||||
# PaddleClas RV1126 开发板 C++ 部署示例
|
||||
本目录下提供的 `infer.cc`,可以帮助用户快速完成 PaddleClas 量化模型在 RV1126 上的部署推理加速。
|
||||
|
||||
## 部署准备
|
||||
### FastDeploy 交叉编译环境准备
|
||||
- 1. 软硬件环境满足要求,以及交叉编译环境的准备,请参考:[FastDeploy 交叉编译环境准备](../../../../../../docs/cn/build_and_install/rk1126.md#交叉编译环境搭建)
|
||||
- 1. 软硬件环境满足要求,以及交叉编译环境的准备,请参考:[FastDeploy 交叉编译环境准备](../../../../../../docs/cn/build_and_install/rv1126.md#交叉编译环境搭建)
|
||||
|
||||
### 量化模型准备
|
||||
- 1. 用户可以直接使用由 FastDeploy 提供的量化模型进行部署。
|
||||
- 2. 用户可以使用 FastDeploy 提供的[一键模型自动化压缩工具](../../../../../../tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署。(注意: 推理量化后的分类模型仍然需要FP32模型文件夹下的inference_cls.yaml文件, 自行量化的模型文件夹内不包含此 yaml 文件, 用户从 FP32 模型文件夹下复制此 yaml 文件到量化后的模型文件夹内即可.)
|
||||
- 2. 用户可以使用 FastDeploy 提供的[一键模型自动化压缩工具](../../../../../../tools/common_tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署。(注意: 推理量化后的分类模型仍然需要FP32模型文件夹下的inference_cls.yaml文件, 自行量化的模型文件夹内不包含此 yaml 文件, 用户从 FP32 模型文件夹下复制此 yaml 文件到量化后的模型文件夹内即可.)
|
||||
- 更多量化相关相关信息可查阅[模型量化](../../quantize/README.md)
|
||||
|
||||
## 在 RK1126 上部署量化后的 ResNet50_Vd 分类模型
|
||||
请按照以下步骤完成在 RK1126 上部署 ResNet50_Vd 量化模型:
|
||||
1. 交叉编译编译 FastDeploy 库,具体请参考:[交叉编译 FastDeploy](../../../../../../docs/cn/build_and_install/rk1126.md#基于-paddlelite-的-fastdeploy-交叉编译库编译)
|
||||
## 在 RV1126 上部署量化后的 ResNet50_Vd 分类模型
|
||||
请按照以下步骤完成在 RV1126 上部署 ResNet50_Vd 量化模型:
|
||||
1. 交叉编译编译 FastDeploy 库,具体请参考:[交叉编译 FastDeploy](../../../../../../docs/cn/build_and_install/rv1126.md#基于-paddlelite-的-fastdeploy-交叉编译库编译)
|
||||
|
||||
2. 将编译后的库拷贝到当前目录,可使用如下命令:
|
||||
```bash
|
||||
cp -r FastDeploy/build/fastdeploy-tmivx/ FastDeploy/examples/vision/classification/paddleclas/rk1126/cpp/
|
||||
cp -r FastDeploy/build/fastdeploy-tmivx/ FastDeploy/examples/vision/classification/paddleclas/rv1126/cpp/
|
||||
```
|
||||
|
||||
3. 在当前路径下载部署所需的模型和示例图片:
|
||||
@@ -32,7 +32,7 @@ cp -r ILSVRC2012_val_00000010.jpeg images
|
||||
4. 编译部署示例,可使入如下命令:
|
||||
```bash
|
||||
mkdir build && cd build
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE=../fastdeploy-tmivx/timvx.cmake -DFASTDEPLOY_INSTALL_DIR=fastdeploy-tmivx ..
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE=${PWD}/../fastdeploy-tmivx/timvx.cmake -DFASTDEPLOY_INSTALL_DIR=${PWD}/../fastdeploy-tmivx ..
|
||||
make -j8
|
||||
make install
|
||||
# 成功编译之后,会生成 install 文件夹,里面有一个运行 demo 和部署所需的库
|
||||
@@ -41,7 +41,7 @@ make install
|
||||
5. 基于 adb 工具部署 ResNet50_vd 分类模型到 Rockchip RV1126,可使用如下命令:
|
||||
```bash
|
||||
# 进入 install 目录
|
||||
cd FastDeploy/examples/vision/classification/paddleclas/rk1126/cpp/build/install/
|
||||
cd FastDeploy/examples/vision/classification/paddleclas/rv1126/cpp/build/install/
|
||||
# 如下命令表示:bash run_with_adb.sh 需要运行的demo 模型路径 图片路径 设备的DEVICE_ID
|
||||
bash run_with_adb.sh infer_demo ResNet50_vd_infer ILSVRC2012_val_00000010.jpeg $DEVICE_ID
|
||||
```
|
||||
@@ -50,4 +50,4 @@ bash run_with_adb.sh infer_demo ResNet50_vd_infer ILSVRC2012_val_00000010.jpeg $
|
||||
|
||||
<img width="640" src="https://user-images.githubusercontent.com/30516196/200767389-26519e50-9e4f-4fe1-8d52-260718f73476.png">
|
||||
|
||||
需要特别注意的是,在 RK1126 上部署的模型需要是量化后的模型,模型的量化请参考:[模型量化](../../../../../../docs/cn/quantize.md)
|
||||
需要特别注意的是,在 RV1126 上部署的模型需要是量化后的模型,模型的量化请参考:[模型量化](../../../../../../docs/cn/quantize.md)
|
||||
6
examples/vision/detection/paddledetection/quantize/README.md
Normal file → Executable file
6
examples/vision/detection/paddledetection/quantize/README.md
Normal file → Executable file
@@ -4,14 +4,14 @@ FastDeploy已支持部署量化模型,并提供一键模型自动化压缩的工
|
||||
|
||||
## FastDeploy一键模型自动化压缩工具
|
||||
FastDeploy 提供了一键模型自动化压缩工具, 能够简单地通过输入一个配置文件, 对模型进行量化.
|
||||
详细教程请见: [一键模型自动化压缩工具](../../../../../tools/auto_compression/)
|
||||
详细教程请见: [一键模型自动化压缩工具](../../../../../tools/common_tools/auto_compression/)
|
||||
|
||||
## 下载量化完成的PP-YOLOE-l模型
|
||||
用户也可以直接下载下表中的量化模型进行部署.(点击模型名字即可下载)
|
||||
|
||||
|
||||
Benchmark表格说明:
|
||||
- Rtuntime时延为模型在各种Runtime上的推理时延,包含CPU->GPU数据拷贝,GPU推理,GPU->CPU数据拷贝时间. 不包含模型各自的前后处理时间.
|
||||
- Runtime时延为模型在各种Runtime上的推理时延,包含CPU->GPU数据拷贝,GPU推理,GPU->CPU数据拷贝时间. 不包含模型各自的前后处理时间.
|
||||
- 端到端时延为模型在实际推理场景中的时延, 包含模型的前后处理.
|
||||
- 所测时延均为推理1000次后求得的平均值, 单位是毫秒.
|
||||
- INT8 + FP16 为在推理INT8量化模型的同时, 给Runtime 开启FP16推理选项
|
||||
@@ -32,7 +32,7 @@ NOTE:
|
||||
- TensorRT比Paddle-TensorRT快的原因是在runtime移除了multiclass_nms3算子
|
||||
|
||||
#### 端到端 Benchmark
|
||||
| 模型 |推理后端 |部署硬件 | FP32 Runtime时延 | INT8 Runtime时延 | INT8 + FP16 Runtime时延 | INT8+FP16+PM Runtime时延 | 最大加速比 | FP32 mAP | INT8 mAP | 量化方式 |
|
||||
| 模型 |推理后端 |部署硬件 | FP32 End2End时延 | INT8 End2End时延 | INT8 + FP16 End2End时延 | INT8+FP16+PM End2End时延 | 最大加速比 | FP32 mAP | INT8 mAP | 量化方式 |
|
||||
| ------------------- | -----------------|-----------| -------- |-------- |-------- | --------- |-------- |----- |----- |----- |
|
||||
| [ppyoloe_crn_l_300e_coco](https://bj.bcebos.com/paddlehub/fastdeploy/ppyoloe_crn_l_300e_coco_qat.tar ) | TensorRT | GPU | 35.75 | 15.42 |20.70|20.85 | 2.32 | 51.4 | 50.7 | 量化蒸馏训练 |
|
||||
| [ppyoloe_crn_l_300e_coco](https://bj.bcebos.com/paddlehub/fastdeploy/ppyoloe_crn_l_300e_coco_qat.tar ) | Paddle-TensorRT | GPU | 33.48 |None | 18.47 |18.03 | 1.81 | 51.4 | 50.5 | 量化蒸馏训练 |
|
||||
|
||||
2
examples/vision/detection/paddledetection/quantize/cpp/README.md
Normal file → Executable file
2
examples/vision/detection/paddledetection/quantize/cpp/README.md
Normal file → Executable file
@@ -9,7 +9,7 @@
|
||||
|
||||
### 量化模型准备
|
||||
- 1. 用户可以直接使用由FastDeploy提供的量化模型进行部署.
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../../../../../tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.(注意: 推理量化后的分类模型仍然需要FP32模型文件夹下的infer_cfg.yml文件, 自行量化的模型文件夹内不包含此yaml文件, 用户从FP32模型文件夹下复制此yaml文件到量化后的模型文件夹内即可.)
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../../../../../tools/common_tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.(注意: 推理量化后的检测模型仍然需要FP32模型文件夹下的infer_cfg.yml文件, 自行量化的模型文件夹内不包含此yaml文件, 用户从FP32模型文件夹下复制此yaml文件到量化后的模型文件夹内即可.)
|
||||
|
||||
## 以量化后的PP-YOLOE-l模型为例, 进行部署。支持此模型需保证FastDeploy版本0.7.0以上(x.x.x>=0.7.0)
|
||||
在本目录执行如下命令即可完成编译,以及量化模型部署.
|
||||
|
||||
2
examples/vision/detection/paddledetection/quantize/python/README.md
Normal file → Executable file
2
examples/vision/detection/paddledetection/quantize/python/README.md
Normal file → Executable file
@@ -8,7 +8,7 @@
|
||||
|
||||
### 量化模型准备
|
||||
- 1. 用户可以直接使用由FastDeploy提供的量化模型进行部署.
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../../../../../tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.(注意: 推理量化后的分类模型仍然需要FP32模型文件夹下的infer_cfg.yml文件, 自行量化的模型文件夹内不包含此yaml文件, 用户从FP32模型文件夹下复制此yaml文件到量化后的模型文件夹内即可.)
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../../../../../tools/common_tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.(注意: 推理量化后的分类模型仍然需要FP32模型文件夹下的infer_cfg.yml文件, 自行量化的模型文件夹内不包含此yaml文件, 用户从FP32模型文件夹下复制此yaml文件到量化后的模型文件夹内即可.)
|
||||
|
||||
|
||||
## 以量化后的PP-YOLOE-l模型为例, 进行部署
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
DETECTION_MODEL_DIR="$(pwd)/picodet_detection/models"
|
||||
LIBS_DIR="$(pwd)"
|
||||
|
||||
DETECTION_MODEL_URL="https://paddlelite-demo.bj.bcebos.com/Paddle-Lite-Demo/models/picodetv2_relu6_coco_no_fuse.tar.gz"
|
||||
LIBS_URL="https://paddlelite-demo.bj.bcebos.com/Paddle-Lite-Demo/Paddle-Lite-libs.tar.gz"
|
||||
|
||||
download_and_uncompress() {
|
||||
local url="$1"
|
||||
local dir="$2"
|
||||
|
||||
echo "Start downloading ${url}"
|
||||
curl -L ${url} > ${dir}/download.tar.gz
|
||||
cd ${dir}
|
||||
tar -zxvf download.tar.gz
|
||||
rm -f download.tar.gz
|
||||
}
|
||||
|
||||
download_and_uncompress "${DETECTION_MODEL_URL}" "${DETECTION_MODEL_DIR}"
|
||||
download_and_uncompress "${LIBS_URL}" "${LIBS_DIR}"
|
||||
|
||||
echo "Download successful!"
|
||||
@@ -1,62 +0,0 @@
|
||||
cmake_minimum_required(VERSION 3.10)
|
||||
set(CMAKE_SYSTEM_NAME Linux)
|
||||
if(TARGET_ARCH_ABI STREQUAL "armv8")
|
||||
set(CMAKE_SYSTEM_PROCESSOR aarch64)
|
||||
set(CMAKE_C_COMPILER "aarch64-linux-gnu-gcc")
|
||||
set(CMAKE_CXX_COMPILER "aarch64-linux-gnu-g++")
|
||||
elseif(TARGET_ARCH_ABI STREQUAL "armv7hf")
|
||||
set(CMAKE_SYSTEM_PROCESSOR arm)
|
||||
set(CMAKE_C_COMPILER "arm-linux-gnueabihf-gcc")
|
||||
set(CMAKE_CXX_COMPILER "arm-linux-gnueabihf-g++")
|
||||
else()
|
||||
message(FATAL_ERROR "Unknown arch abi ${TARGET_ARCH_ABI}, only support armv8 and armv7hf.")
|
||||
return()
|
||||
endif()
|
||||
|
||||
project(object_detection_demo)
|
||||
|
||||
message(STATUS "TARGET ARCH ABI: ${TARGET_ARCH_ABI}")
|
||||
message(STATUS "PADDLE LITE DIR: ${PADDLE_LITE_DIR}")
|
||||
include_directories(${PADDLE_LITE_DIR}/include)
|
||||
link_directories(${PADDLE_LITE_DIR}/libs/${TARGET_ARCH_ABI})
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
|
||||
if(TARGET_ARCH_ABI STREQUAL "armv8")
|
||||
set(CMAKE_CXX_FLAGS "-march=armv8-a ${CMAKE_CXX_FLAGS}")
|
||||
set(CMAKE_C_FLAGS "-march=armv8-a ${CMAKE_C_FLAGS}")
|
||||
elseif(TARGET_ARCH_ABI STREQUAL "armv7hf")
|
||||
set(CMAKE_CXX_FLAGS "-march=armv7-a -mfloat-abi=hard -mfpu=neon-vfpv4 ${CMAKE_CXX_FLAGS}")
|
||||
set(CMAKE_C_FLAGS "-march=armv7-a -mfloat-abi=hard -mfpu=neon-vfpv4 ${CMAKE_C_FLAGS}" )
|
||||
endif()
|
||||
|
||||
include_directories(${PADDLE_LITE_DIR}/libs/${TARGET_ARCH_ABI}/third_party/yaml-cpp/include)
|
||||
link_directories(${PADDLE_LITE_DIR}/libs/${TARGET_ARCH_ABI}/third_party/yaml-cpp)
|
||||
|
||||
find_package(OpenMP REQUIRED)
|
||||
if(OpenMP_FOUND OR OpenMP_CXX_FOUND)
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
|
||||
message(STATUS "Found OpenMP ${OpenMP_VERSION} ${OpenMP_CXX_VERSION}")
|
||||
message(STATUS "OpenMP C flags: ${OpenMP_C_FLAGS}")
|
||||
message(STATUS "OpenMP CXX flags: ${OpenMP_CXX_FLAGS}")
|
||||
message(STATUS "OpenMP OpenMP_CXX_LIB_NAMES: ${OpenMP_CXX_LIB_NAMES}")
|
||||
message(STATUS "OpenMP OpenMP_CXX_LIBRARIES: ${OpenMP_CXX_LIBRARIES}")
|
||||
else()
|
||||
message(FATAL_ERROR "Could not found OpenMP!")
|
||||
return()
|
||||
endif()
|
||||
find_package(OpenCV REQUIRED)
|
||||
if(OpenCV_FOUND OR OpenCV_CXX_FOUND)
|
||||
include_directories(${OpenCV_INCLUDE_DIRS})
|
||||
message(STATUS "OpenCV library status:")
|
||||
message(STATUS " version: ${OpenCV_VERSION}")
|
||||
message(STATUS " libraries: ${OpenCV_LIBS}")
|
||||
message(STATUS " include path: ${OpenCV_INCLUDE_DIRS}")
|
||||
else()
|
||||
message(FATAL_ERROR "Could not found OpenCV!")
|
||||
return()
|
||||
endif()
|
||||
|
||||
|
||||
add_executable(object_detection_demo object_detection_demo.cc)
|
||||
|
||||
target_link_libraries(object_detection_demo paddle_full_api_shared dl ${OpenCV_LIBS} yaml-cpp)
|
||||
@@ -1,343 +0,0 @@
|
||||
# 目标检测 C++ API Demo 使用指南
|
||||
|
||||
在 ARMLinux 上实现实时的目标检测功能,此 Demo 有较好的的易用性和扩展性,如在 Demo 中跑自己训练好的模型等。
|
||||
- 如果该开发板使用搭载了芯原 NPU (瑞芯微、晶晨、JQL、恩智浦)的 Soc,将有更好的加速效果。
|
||||
|
||||
## 如何运行目标检测 Demo
|
||||
|
||||
### 环境准备
|
||||
|
||||
* 准备 ARMLiunx 开发版,将系统刷为 Ubuntu,用于 Demo 编译和运行。请注意,本 Demo 是使用板上编译,而非交叉编译,因此需要图形界面的开发板操作系统。
|
||||
* 如果需要使用 芯原 NPU 的计算加速,对 NPU 驱动版本有严格要求,请务必注意事先参考 [芯原 TIM-VX 部署示例](https://paddle-lite.readthedocs.io/zh/develop/demo_guides/verisilicon_timvx.html#id6),将 NPU 驱动改为要求的版本。
|
||||
* Paddle Lite 当前已验证的开发板为 Khadas VIM3(芯片为 Amlogic A311d)、荣品 RV1126、荣品RV1109,其它平台用户可自行尝试;
|
||||
- Khadas VIM3:由于 VIM3 出厂自带 Android 系统,请先刷成 Ubuntu 系统,在此提供刷机教程:[VIM3/3L Linux 文档](https://docs.khadas.com/linux/zh-cn/vim3),其中有详细描述刷机方法。以及系统镜像:VIM3 Linux:VIM3_Ubuntu-gnome-focal_Linux-4.9_arm64_EMMC_V1.0.7-210625:[官方链接](http://dl.khadas.com/firmware/VIM3/Ubuntu/EMMC/VIM3_Ubuntu-gnome-focal_Linux-4.9_arm64_EMMC_V1.0.7-210625.img.xz);[百度云备用链接](https://paddlelite-demo.bj.bcebos.com/devices/verisilicon/firmware/khadas/vim3/VIM3_Ubuntu-gnome-focal_Linux-4.9_arm64_EMMC_V1.0.7-210625.img.xz)
|
||||
- 荣品 RV1126、1109:由于出场自带 buildroot 系统,如果使用 GUI 界面的 demo,请先刷成 Ubuntu 系统,在此提供刷机教程:[RV1126/1109 教程](https://paddlelite-demo.bj.bcebos.com/Paddle-Lite-Demo/os_img/rockchip/RV1126-RV1109%E4%BD%BF%E7%94%A8%E6%8C%87%E5%AF%BC%E6%96%87%E6%A1%A3-V3.0.pdf),[刷机工具](https://paddlelite-demo.bj.bcebos.com/Paddle-Lite-Demo/os_img/rockchip/RKDevTool_Release.zip),以及镜像:[1126镜像](https://paddlelite-demo.bj.bcebos.com/Paddle-Lite-Demo/os_img/update-pro-rv1126-ubuntu20.04-5-720-1280-v2-20220505.img),[1109镜像](https://paddlelite-demo.bj.bcebos.com/Paddle-Lite-Demo/os_img/update-pro-rv1109-ubuntu20.04-5.5-720-1280-v2-20220429.img)。完整的文档和各种镜像请参考[百度网盘链接](https://pan.baidu.com/s/1Id0LMC0oO2PwR2YcYUAaiQ#list/path=%2F&parentPath=%2Fsharelink2521613171-184070898837664),密码:2345。
|
||||
* 准备 usb camera,注意使用 openCV capture 图像时,请注意 usb camera 的 video序列号作为入参。
|
||||
* 请注意,瑞芯微芯片不带有 HDMI 接口,图像显示是依赖 MIPI DSI,所以请准备好 MIPI 显示屏(我们提供的镜像是 720*1280 分辨率,网盘中有更多分辨率选择,注意:请选择 camera-gc2093x2 的镜像)。
|
||||
* 配置开发板的网络。如果是办公网络红区,可以将开发板和PC用以太网链接,然后PC共享网络给开发板。
|
||||
* gcc g++ opencv cmake 的安装(以下所有命令均在设备上操作)
|
||||
|
||||
```bash
|
||||
$ sudo apt-get update
|
||||
$ sudo apt-get install gcc g++ make wget unzip libopencv-dev pkg-config
|
||||
$ wget https://www.cmake.org/files/v3.10/cmake-3.10.3.tar.gz
|
||||
$ tar -zxvf cmake-3.10.3.tar.gz
|
||||
$ cd cmake-3.10.3
|
||||
$ ./configure
|
||||
$ make
|
||||
$ sudo make install
|
||||
```
|
||||
|
||||
### 部署步骤
|
||||
|
||||
1. 将本 repo 上传至 VIM3 开发板,或者直接开发板上下载或者 git clone 本 repo
|
||||
2. 目标检测 Demo 位于 `Paddle-Lite-Demo/object_detection/linux/picodet_detection` 目录
|
||||
3. 进入 `Paddle-Lite-Demo/object_detection/linux` 目录, 终端中执行 `download_models_and_libs.sh` 脚本自动下载模型和 Paddle Lite 预测库
|
||||
|
||||
```shell
|
||||
cd Paddle-Lite-Demo/object_detection/linux # 1. 终端中进入 Paddle-Lite-Demo/object_detection/linux
|
||||
sh download_models_and_libs.sh # 2. 执行脚本下载依赖项 (需要联网)
|
||||
```
|
||||
|
||||
下载完成后会出现提示: `Download successful!`
|
||||
4. 执行用例(保证 ARMLinux 环境准备完成)
|
||||
|
||||
```shell
|
||||
cd picodet_detection # 1. 终端中进入
|
||||
sh build.sh armv8 # 2. 编译 Demo 可执行程序,默认编译 armv8,如果是 32bit 环境,则改成 sh build.sh armv7hf。
|
||||
sh run.sh armv8 # 3. 执行物体检测(picodet 模型) demo,会直接开启摄像头,启动图形界面并呈现检测结果。如果是 32bit 环境,则改成 sh run.sh armv7hf
|
||||
```
|
||||
|
||||
### Demo 结果如下:(注意,示例的 picodet 仅使用 coco 数据集,在实际场景中效果一般,请使用实际业务场景重新训练)
|
||||
|
||||
<img src="https://paddlelite-demo.bj.bcebos.com/Paddle-Lite-Demo/demo_view.jpg" alt="demo_view" style="zoom: 10%;" />
|
||||
|
||||
## 更新预测库
|
||||
|
||||
* Paddle Lite 项目:https://github.com/PaddlePaddle/Paddle-Lite
|
||||
* 参考 [芯原 TIM-VX 部署示例](https://paddle-lite.readthedocs.io/zh/develop/demo_guides/verisilicon_timvx.html#tim-vx),编译预测库
|
||||
* 编译最终产物位于 `build.lite.xxx.xxx.xxx` 下的 `inference_lite_lib.xxx.xxx`
|
||||
* 替换 c++ 库
|
||||
* 头文件
|
||||
将生成的 `build.lite.linux.armv8.gcc/inference_lite_lib.armlinux.armv8.nnadapter/cxx/include` 文件夹替换 Demo 中的 `Paddle-Lite-Demo/object_detection/linux/Paddle-Lite/include`
|
||||
* armv8
|
||||
将生成的 `build.lite.linux.armv8.gcc/inference_lite_lib.armlinux.armv8.nnadapter/cxx/libs/libpaddle_full_api_shared.so、libnnadapter.so、libtim-vx.so、libverisilicon_timvx.so` 库替换 Demo 中的 `Paddle-Lite-Demo/object_detection/linux/Paddle-Lite/libs/armv8/` 目录下同名 so
|
||||
* armv7hf
|
||||
将生成的 `build.lite.linux.armv7hf.gcc/inference_lite_lib.armlinux.armv7hf.nnadapter/cxx/libs/libpaddle_full_api_shared.so、libnnadapter.so、libtim-vx.so、libverisilicon_timvx.so` 库替换 Demo 中的 `Paddle-Lite-Demo/object_detection/linux/Paddle-Lite/libs/armv7hf/` 目录下同名 so
|
||||
|
||||
## Demo 内容介绍
|
||||
|
||||
先整体介绍下目标检测 Demo 的代码结构,然后再简要地介绍 Demo 每部分功能.
|
||||
|
||||
1. `object_detection_demo.cc`: C++ 预测代码
|
||||
|
||||
```shell
|
||||
# 位置:
|
||||
Paddle-Lite-Demo/object_detection/linux/picodet_detection/object_detection_demo.cc
|
||||
```
|
||||
|
||||
2. `models` : 模型文件夹 (执行 download_models_and_libs.sh 后会下载 picodet Paddle 模型), label 使用 Paddle-Lite-Demo/object_detection/assets/labels 目录下 coco_label_list.txt
|
||||
|
||||
```shell
|
||||
# 位置:
|
||||
Paddle-Lite-Demo/object_detection/linux/picodet_detection/models/picodetv2_relu6_coco_no_fuse
|
||||
Paddle-Lite-Demo/object_detection/assets/labels/coco_label_list.txt
|
||||
```
|
||||
|
||||
3. `Paddle-Lite`:内含 Paddle-Lite 头文件和 动态库,默认带有 timvx 加速库,以及第三方库 yaml-cpp 用于解析 yml 配置文件(执行 download_models_and_libs.sh 后会下载)
|
||||
|
||||
```shell
|
||||
# 位置
|
||||
# 如果要替换动态库 so,则将新的动态库 so 更新到此目录下
|
||||
Paddle-Lite-Demo/object_detection/linux/Paddle-Lite/libs/armv8
|
||||
Paddle-Lite-Demo/object_detection/linux/Paddle-Lite/include
|
||||
```
|
||||
|
||||
4. `CMakeLists.txt` : C++ 预测代码的编译脚本,用于生成可执行文件
|
||||
|
||||
```shell
|
||||
# 位置
|
||||
Paddle-Lite-Demo/object_detection/linux/picodet_detection/CMakeLists.txt
|
||||
# 如果有cmake 编译选项更新,可以在 CMakeLists.txt 进行修改即可,默认编译 armv8 可执行文件;
|
||||
```
|
||||
|
||||
5. `build.sh` : 编译脚本
|
||||
|
||||
```shell
|
||||
# 位置
|
||||
Paddle-Lite-Demo/object_detection/linux/picodet_detection/build.sh
|
||||
```
|
||||
|
||||
6. `run.sh` : 运行脚本,请注意设置 arm-aarch,armv8 或者 armv7hf。默认为armv8
|
||||
|
||||
```shell
|
||||
# 位置
|
||||
Paddle-Lite-Demo/object_detection/linux/picodet_detection/run.sh
|
||||
```
|
||||
- 请注意,运行需要5个元素:测试程序、模型、label 文件、异构配置、yaml 文件。
|
||||
|
||||
## 代码讲解 (使用 Paddle Lite `C++ API` 执行预测)
|
||||
|
||||
ARMLinux 示例基于 C++ API 开发,调用 Paddle Lite `C++s API` 包括以下五步。更详细的 `API` 描述参考:[Paddle Lite C++ API ](https://paddle-lite.readthedocs.io/zh/latest/api_reference/cxx_api_doc.html)。
|
||||
|
||||
```c++
|
||||
#include <iostream>
|
||||
// 引入 C++ API
|
||||
#include "include/paddle_api.h"
|
||||
#include "include/paddle_use_ops.h"
|
||||
#include "include/paddle_use_kernels.h"
|
||||
|
||||
// 使用在线编译模型的方式(等价于使用 opt 工具)
|
||||
|
||||
// 1. 设置 CxxConfig
|
||||
paddle::lite_api::CxxConfig cxx_config;
|
||||
std::vector<paddle::lite_api::Place> valid_places;
|
||||
valid_places.push_back(
|
||||
paddle::lite_api::Place{TARGET(kARM), PRECISION(kInt8)});
|
||||
valid_places.push_back(
|
||||
paddle::lite_api::Place{TARGET(kARM), PRECISION(kFloat)});
|
||||
// 如果只需要 cpu 计算,那到此结束即可,下面是设置 NPU 的代码段
|
||||
valid_places.push_back(
|
||||
paddle::lite_api::Place{TARGET(kNNAdapter), PRECISION(kInt8)});
|
||||
cxx_config.set_valid_places(valid_places);
|
||||
std::string device = "verisilicon_timvx";
|
||||
cxx_config.set_nnadapter_device_names({device});
|
||||
// 设置定制化的异构策略 (如需要)
|
||||
cxx_config.set_nnadapter_subgraph_partition_config_buffer(
|
||||
nnadapter_subgraph_partition_config_string);
|
||||
|
||||
// 2. 生成 nb 模型 (等价于 opt 工具的产出)
|
||||
std::shared_ptr<paddle::lite_api::PaddlePredictor> predictor = nullptr;
|
||||
predictor = paddle::lite_api::CreatePaddlePredictor(cxx_config);
|
||||
predictor->SaveOptimizedModel(
|
||||
model_path, paddle::lite_api::LiteModelType::kNaiveBuffer);
|
||||
|
||||
// 3. 设置 MobileConfig
|
||||
MobileConfig config;
|
||||
config.set_model_from_file(modelPath); // 设置 NaiveBuffer 格式模型路径
|
||||
config.set_power_mode(LITE_POWER_NO_BIND); // 设置 CPU 运行模式
|
||||
config.set_threads(4); // 设置工作线程数
|
||||
|
||||
// 4. 创建 PaddlePredictor
|
||||
predictor = CreatePaddlePredictor<MobileConfig>(config);
|
||||
|
||||
// 5. 设置输入数据,注意,如果是带后处理的 picodet ,则是有两个输入
|
||||
std::unique_ptr<Tensor> input_tensor(std::move(predictor->GetInput(0)));
|
||||
input_tensor->Resize({1, 3, 416, 416});
|
||||
auto* data = input_tensor->mutable_data<float>();
|
||||
// scale_factor tensor
|
||||
auto scale_factor_tensor = predictor->GetInput(1);
|
||||
scale_factor_tensor->Resize({1, 2});
|
||||
auto scale_factor_data = scale_factor_tensor->mutable_data<float>();
|
||||
scale_factor_data[0] = 1.0f;
|
||||
scale_factor_data[1] = 1.0f;
|
||||
|
||||
// 6. 执行预测
|
||||
predictor->run();
|
||||
|
||||
// 7. 获取输出数据
|
||||
std::unique_ptr<const Tensor> output_tensor(std::move(predictor->GetOutput(0)));
|
||||
|
||||
```
|
||||
|
||||
## 如何更新模型和输入/输出预处理
|
||||
|
||||
### 更新模型
|
||||
1. 请参考 PaddleDetection 中 [picodet 重训和全量化文档](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/configs/picodet/FULL_QUANTIZATION.md),基于用户自己数据集重训并且重新全量化
|
||||
2. 将模型存放到目录 `object_detection_demo/models/` 下;
|
||||
3. 模型名字跟工程中模型名字一模一样,即均是使用 `model`、`params`;
|
||||
|
||||
```shell
|
||||
# shell 脚本 `object_detection_demo/run.sh`
|
||||
TARGET_ABI=armv8 # for 64bit, such as Amlogic A311D
|
||||
#TARGET_ABI=armv7hf # for 32bit, such as Rockchip 1109/1126
|
||||
if [ -n "$1" ]; then
|
||||
TARGET_ABI=$1
|
||||
fi
|
||||
export LD_LIBRARY_PATH=../Paddle-Lite/libs/$TARGET_ABI/
|
||||
export GLOG_v=0 # Paddle-Lite 日志等级
|
||||
export VSI_NN_LOG_LEVEL=0 # TIM-VX 日志等级
|
||||
export VIV_VX_ENABLE_GRAPH_TRANSFORM=-pcq:1 # NPU 开启 perchannel 量化模型
|
||||
export VIV_VX_SET_PER_CHANNEL_ENTROPY=100 # 同上
|
||||
build/object_detection_demo models/picodetv2_relu6_coco_no_fuse ../../assets/labels/coco_label_list.txt models/picodetv2_relu6_coco_no_fuse/subgraph.txt models/picodetv2_relu6_coco_no_fuse/picodet.yml # 执行 Demo 程序,4个 arg 分别为:模型、 label 文件、 自定义异构配置、 yaml
|
||||
```
|
||||
|
||||
- 如果需要更新 `label_list` 或者 `yaml` 文件,则修改 `object_detection_demo/run.sh` 中执行命令的第二个和第四个 arg 指定为新的 label 文件和 yaml 配置文件;
|
||||
|
||||
```shell
|
||||
# 代码文件 `object_detection_demo/rush.sh`
|
||||
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${PADDLE_LITE_DIR}/libs/${TARGET_ARCH_ABI}
|
||||
build/object_detection_demo {模型} {label} {自定义异构配置文件} {yaml}
|
||||
```
|
||||
|
||||
### 更新输入/输出预处理
|
||||
|
||||
1. 更新输入预处理
|
||||
预处理完全根据 yaml 文件来,如果完全按照 PaddleDetection 中 picodet 重训,只需要替换 yaml 文件即可
|
||||
|
||||
2. 更新输出预处理
|
||||
此处需要更新 `object_detection_demo/object_detection_demo.cc` 中的 `postprocess` 方法
|
||||
|
||||
```c++
|
||||
std::vector<RESULT> postprocess(const float *output_data, int64_t output_size,
|
||||
const std::vector<std::string> &word_labels,
|
||||
const float score_threshold,
|
||||
cv::Mat &output_image, double time) {
|
||||
std::vector<RESULT> results;
|
||||
std::vector<cv::Scalar> colors = {
|
||||
cv::Scalar(237, 189, 101), cv::Scalar(0, 0, 255),
|
||||
cv::Scalar(102, 153, 153), cv::Scalar(255, 0, 0),
|
||||
cv::Scalar(9, 255, 0), cv::Scalar(0, 0, 0),
|
||||
cv::Scalar(51, 153, 51)};
|
||||
for (int64_t i = 0; i < output_size; i += 6) {
|
||||
if (output_data[i + 1] < score_threshold) {
|
||||
continue;
|
||||
}
|
||||
int class_id = static_cast<int>(output_data[i]);
|
||||
float score = output_data[i + 1];
|
||||
RESULT result;
|
||||
std::string class_name = "Unknown";
|
||||
if (word_labels.size() > 0 && class_id >= 0 &&
|
||||
class_id < word_labels.size()) {
|
||||
class_name = word_labels[class_id];
|
||||
}
|
||||
result.class_name = class_name;
|
||||
result.score = score;
|
||||
result.left = output_data[i + 2] / 416; // 此处416根据输入的 HW 得来
|
||||
result.top = output_data[i + 3] / 416;
|
||||
result.right = output_data[i + 4] / 416;
|
||||
result.bottom = output_data[i + 5] / 416;
|
||||
int lx = static_cast<int>(result.left * output_image.cols);
|
||||
int ly = static_cast<int>(result.top * output_image.rows);
|
||||
int w = static_cast<int>(result.right * output_image.cols) - lx;
|
||||
int h = static_cast<int>(result.bottom * output_image.rows) - ly;
|
||||
cv::Rect bounding_box =
|
||||
cv::Rect(lx, ly, w, h) &
|
||||
cv::Rect(0, 0, output_image.cols, output_image.rows);
|
||||
if (w > 0 && h > 0 && score <= 1) {
|
||||
cv::Scalar color = colors[results.size() % colors.size()];
|
||||
cv::rectangle(output_image, bounding_box, color);
|
||||
cv::rectangle(output_image, cv::Point2d(lx, ly),
|
||||
cv::Point2d(lx + w, ly - 10), color, -1);
|
||||
cv::putText(output_image, std::to_string(results.size()) + "." +
|
||||
class_name + ":" + std::to_string(score),
|
||||
cv::Point2d(lx, ly), cv::FONT_HERSHEY_PLAIN, 1,
|
||||
cv::Scalar(255, 255, 255));
|
||||
results.push_back(result);
|
||||
}
|
||||
}
|
||||
return results;
|
||||
}
|
||||
```
|
||||
|
||||
## 更新模型后,自定义 NPU-CPU 异构配置(如需使用 NPU 加速)
|
||||
由于使用芯原 NPU 在 8bit 量化的情况下有最优的性能,因此部署时,我们往往会考虑量化
|
||||
- 由于量化可能会引入一定程度的精度问题,所以我们可以通过自定义的异构定制,来将部分有精度问题的 layer 异构至cpu,从而达到最优的精度
|
||||
|
||||
### 第一步,确定模型量化后在 arm cpu 上的精度
|
||||
如果在 arm cpu 上,精度都无法满足,那量化本身就是失败的,此时可以考虑修改训练集或者预处理。
|
||||
- 修改 Demo 程序,仅用 arm cpu 计算
|
||||
```c++
|
||||
paddle::lite_api::CxxConfig cxx_config;
|
||||
std::vector<paddle::lite_api::Place> valid_places;
|
||||
valid_places.push_back(
|
||||
paddle::lite_api::Place{TARGET(kARM), PRECISION(kInt8)});
|
||||
valid_places.push_back(
|
||||
paddle::lite_api::Place{TARGET(kARM), PRECISION(kFloat)});
|
||||
// 仅用 arm cpu 计算, 注释如下代码即可
|
||||
/*
|
||||
valid_places.push_back(
|
||||
paddle::lite_api::Place{TARGET(kNNAdapter), PRECISION(kInt8)});
|
||||
valid_places.push_back(
|
||||
paddle::lite_api::Place{TARGET(kNNAdapter), PRECISION(kFloat)});
|
||||
*/
|
||||
```
|
||||
如果 arm cpu 计算结果精度达标,则继续
|
||||
|
||||
### 第二步,获取整网拓扑信息
|
||||
- 回退第一步的修改,不再注释,使用 NPU 加速
|
||||
- 运行 Demo,如果此时精度良好,则无需参考后面步骤,模型部署和替换非常顺利,enjoy it。
|
||||
- 如果精度不行,请参考后续步骤。
|
||||
|
||||
### 第三步,获取整网拓扑信息
|
||||
- 回退第一步的修改,使用
|
||||
- 修改 run.sh ,将其中 export GLOG_v=0 改为 export GLOG_v=5
|
||||
- 运行 Demo,等摄像头启动,即可 ctrl+c 关闭 Demo
|
||||
- 收集日志,搜索关键字 "subgraph operators" 随后那一段,便是整个模型的拓扑信息,其格式如下:
|
||||
- 每行记录由『算子类型:输入张量名列表:输出张量名列表』组成(即以分号分隔算子类型、输入和输出张量名列表),以逗号分隔输入、输出张量名列表中的每个张量名;
|
||||
- 示例说明:
|
||||
```
|
||||
op_type0:var_name0,var_name1:var_name2 表示将算子类型为 op_type0、输入张量为var_name0 和 var_name1、输出张量为 var_name2 的节点强制运行在 ARM CPU 上
|
||||
```
|
||||
|
||||
### 第四步,修改异构配置文件
|
||||
- 首先看到示例 Demo 中 Paddle-Lite-Demo/object_detection/linux/picodet_detection/models/picodetv2_relu6_coco_no_fuse 目录下的 subgraph.txt 文件。(feed 和 fetch 分别代表整个模型的输入和输入)
|
||||
```
|
||||
feed:feed:scale_factor
|
||||
feed:feed:image
|
||||
|
||||
sqrt:tmp_3:sqrt_0.tmp_0
|
||||
reshape2:sqrt_0.tmp_0:reshape2_0.tmp_0,reshape2_0.tmp_1
|
||||
|
||||
matmul_v2:softmax_0.tmp_0,auto_113_:linear_0.tmp_0
|
||||
reshape2:linear_0.tmp_0:reshape2_2.tmp_0,reshape2_2.tmp_1
|
||||
|
||||
sqrt:tmp_6:sqrt_1.tmp_0
|
||||
reshape2:sqrt_1.tmp_0:reshape2_3.tmp_0,reshape2_3.tmp_1
|
||||
|
||||
matmul_v2:softmax_1.tmp_0,auto_113_:linear_1.tmp_0
|
||||
reshape2:linear_1.tmp_0:reshape2_5.tmp_0,reshape2_5.tmp_1
|
||||
|
||||
sqrt:tmp_9:sqrt_2.tmp_0
|
||||
reshape2:sqrt_2.tmp_0:reshape2_6.tmp_0,reshape2_6.tmp_1
|
||||
|
||||
matmul_v2:softmax_2.tmp_0,auto_113_:linear_2.tmp_0
|
||||
...
|
||||
```
|
||||
- 在 txt 中的都是需要异构至 cpu 计算的 layer,在示例 Demo 中,我们把 picodet 后处理的部分异构至 arm cpu 做计算,不必担心,Paddle-Lite 的 arm kernel 性能也是非常卓越。
|
||||
- 如果新训练的模型没有额外修改 layer,则直接复制使用示例 Demo 中的 subgraph.txt 即可
|
||||
- 此时 ./run.sh 看看精度是否符合预期,如果精度符合预期,恭喜,可以跳过本章节,enjoy it。
|
||||
- 如果精度不符合预期,则将上文『第二步,获取整网拓扑信息』中获取的拓扑信息,从 "feed" 之后第一行,直到 "sqrt" 之前,都复制进 sugraph.txt。这一步代表了将大量的 backbone 部分算子放到 arm cpu 计算。
|
||||
- 此时 ./run.sh 看看精度是否符合预期,如果精度达标,那说明在 backbone 中确实存在引入 NPU 精度异常的层(再次重申,在 subgraph.txt 的代表强制在 arm cpu 计算)。
|
||||
- 逐行删除、成片删除、二分法,发挥开发人员的耐心,找到引入 NPU 精度异常的 layer,将其留在 subgraph.txt 中,按照经验,如果有 NPU 精度问题,可能会有 1~5 层conv layer 需要异构。
|
||||
- 剩余没有精度问题的 layer 在 subgraph.txt 中删除即可
|
||||
@@ -1,17 +0,0 @@
|
||||
#!/bin/bash
|
||||
USE_FULL_API=TRUE
|
||||
# configure
|
||||
TARGET_ARCH_ABI=armv8 # for RK3399, set to default arch abi
|
||||
#TARGET_ARCH_ABI=armv7hf # for Raspberry Pi 3B
|
||||
PADDLE_LITE_DIR=../Paddle-Lite
|
||||
THIRD_PARTY_DIR=./third_party
|
||||
if [ "x$1" != "x" ]; then
|
||||
TARGET_ARCH_ABI=$1
|
||||
fi
|
||||
|
||||
# build
|
||||
rm -rf build
|
||||
mkdir build
|
||||
cd build
|
||||
cmake -DPADDLE_LITE_DIR=${PADDLE_LITE_DIR} -DTARGET_ARCH_ABI=${TARGET_ARCH_ABI} -DTHIRD_PARTY_DIR=${THIRD_PARTY_DIR} ..
|
||||
make
|
||||
@@ -1,411 +0,0 @@
|
||||
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "paddle_api.h"
|
||||
#include "yaml-cpp/yaml.h"
|
||||
#include <arm_neon.h>
|
||||
#include <fstream>
|
||||
#include <limits>
|
||||
#include <opencv2/core.hpp>
|
||||
#include <opencv2/highgui.hpp>
|
||||
#include <opencv2/opencv.hpp>
|
||||
#include <stdio.h>
|
||||
#include <sys/time.h>
|
||||
#include <unistd.h>
|
||||
#include <vector>
|
||||
|
||||
int WARMUP_COUNT = 0;
|
||||
int REPEAT_COUNT = 1;
|
||||
const int CPU_THREAD_NUM = 2;
|
||||
const paddle::lite_api::PowerMode CPU_POWER_MODE =
|
||||
paddle::lite_api::PowerMode::LITE_POWER_HIGH;
|
||||
const std::vector<int64_t> INPUT_SHAPE = {1, 3, 416, 416};
|
||||
std::vector<float> INPUT_MEAN = {0.f, 0.f, 0.f};
|
||||
std::vector<float> INPUT_STD = {1.f, 1.f, 1.f};
|
||||
float INPUT_SCALE = 1 / 255.f;
|
||||
const float SCORE_THRESHOLD = 0.35f;
|
||||
|
||||
struct RESULT {
|
||||
std::string class_name;
|
||||
float score;
|
||||
float left;
|
||||
float top;
|
||||
float right;
|
||||
float bottom;
|
||||
};
|
||||
|
||||
inline int64_t get_current_us() {
|
||||
struct timeval time;
|
||||
gettimeofday(&time, NULL);
|
||||
return 1000000LL * (int64_t)time.tv_sec + (int64_t)time.tv_usec;
|
||||
}
|
||||
|
||||
bool read_file(const std::string &filename, std::vector<char> *contents,
|
||||
bool binary = true) {
|
||||
FILE *fp = fopen(filename.c_str(), binary ? "rb" : "r");
|
||||
if (!fp)
|
||||
return false;
|
||||
fseek(fp, 0, SEEK_END);
|
||||
size_t size = ftell(fp);
|
||||
fseek(fp, 0, SEEK_SET);
|
||||
contents->clear();
|
||||
contents->resize(size);
|
||||
size_t offset = 0;
|
||||
char *ptr = reinterpret_cast<char *>(&(contents->at(0)));
|
||||
while (offset < size) {
|
||||
size_t already_read = fread(ptr, 1, size - offset, fp);
|
||||
offset += already_read;
|
||||
ptr += already_read;
|
||||
}
|
||||
fclose(fp);
|
||||
return true;
|
||||
}
|
||||
|
||||
std::vector<std::string> load_labels(const std::string &path) {
|
||||
std::ifstream file;
|
||||
std::vector<std::string> labels;
|
||||
file.open(path);
|
||||
while (file) {
|
||||
std::string line;
|
||||
std::getline(file, line);
|
||||
labels.push_back(line);
|
||||
}
|
||||
file.clear();
|
||||
file.close();
|
||||
return labels;
|
||||
}
|
||||
|
||||
bool load_yaml_config(std::string yaml_path) {
|
||||
YAML::Node cfg;
|
||||
try {
|
||||
std::cout << "before loadFile" << std::endl;
|
||||
cfg = YAML::LoadFile(yaml_path);
|
||||
} catch (YAML::BadFile &e) {
|
||||
std::cout << "Failed to load yaml file " << yaml_path
|
||||
<< ", maybe you should check this file." << std::endl;
|
||||
return false;
|
||||
}
|
||||
auto preprocess_cfg = cfg["TestReader"]["sample_transforms"];
|
||||
for (const auto &op : preprocess_cfg) {
|
||||
if (!op.IsMap()) {
|
||||
std::cout << "Require the transform information in yaml be Map type."
|
||||
<< std::endl;
|
||||
std::abort();
|
||||
}
|
||||
auto op_name = op.begin()->first.as<std::string>();
|
||||
if (op_name == "NormalizeImage") {
|
||||
INPUT_MEAN = op.begin()->second["mean"].as<std::vector<float>>();
|
||||
INPUT_STD = op.begin()->second["std"].as<std::vector<float>>();
|
||||
INPUT_SCALE = op.begin()->second["scale"].as<float>();
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void preprocess(cv::Mat &input_image, std::vector<float> &input_mean,
|
||||
std::vector<float> &input_std, float input_scale,
|
||||
int input_width, int input_height, float *input_data) {
|
||||
cv::Mat resize_image;
|
||||
cv::resize(input_image, resize_image, cv::Size(input_width, input_height), 0,
|
||||
0);
|
||||
if (resize_image.channels() == 4) {
|
||||
cv::cvtColor(resize_image, resize_image, cv::COLOR_BGRA2RGB);
|
||||
}
|
||||
cv::Mat norm_image;
|
||||
resize_image.convertTo(norm_image, CV_32FC3, input_scale);
|
||||
// NHWC->NCHW
|
||||
int image_size = input_height * input_width;
|
||||
const float *image_data = reinterpret_cast<const float *>(norm_image.data);
|
||||
float32x4_t vmean0 = vdupq_n_f32(input_mean[0]);
|
||||
float32x4_t vmean1 = vdupq_n_f32(input_mean[1]);
|
||||
float32x4_t vmean2 = vdupq_n_f32(input_mean[2]);
|
||||
float32x4_t vscale0 = vdupq_n_f32(1.0f / input_std[0]);
|
||||
float32x4_t vscale1 = vdupq_n_f32(1.0f / input_std[1]);
|
||||
float32x4_t vscale2 = vdupq_n_f32(1.0f / input_std[2]);
|
||||
float *input_data_c0 = input_data;
|
||||
float *input_data_c1 = input_data + image_size;
|
||||
float *input_data_c2 = input_data + image_size * 2;
|
||||
int i = 0;
|
||||
for (; i < image_size - 3; i += 4) {
|
||||
float32x4x3_t vin3 = vld3q_f32(image_data);
|
||||
float32x4_t vsub0 = vsubq_f32(vin3.val[0], vmean0);
|
||||
float32x4_t vsub1 = vsubq_f32(vin3.val[1], vmean1);
|
||||
float32x4_t vsub2 = vsubq_f32(vin3.val[2], vmean2);
|
||||
float32x4_t vs0 = vmulq_f32(vsub0, vscale0);
|
||||
float32x4_t vs1 = vmulq_f32(vsub1, vscale1);
|
||||
float32x4_t vs2 = vmulq_f32(vsub2, vscale2);
|
||||
vst1q_f32(input_data_c0, vs0);
|
||||
vst1q_f32(input_data_c1, vs1);
|
||||
vst1q_f32(input_data_c2, vs2);
|
||||
image_data += 12;
|
||||
input_data_c0 += 4;
|
||||
input_data_c1 += 4;
|
||||
input_data_c2 += 4;
|
||||
}
|
||||
for (; i < image_size; i++) {
|
||||
*(input_data_c0++) = (*(image_data++) - input_mean[0]) / input_std[0];
|
||||
*(input_data_c1++) = (*(image_data++) - input_mean[1]) / input_std[1];
|
||||
*(input_data_c2++) = (*(image_data++) - input_mean[2]) / input_std[2];
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<RESULT> postprocess(const float *output_data, int64_t output_size,
|
||||
const std::vector<std::string> &word_labels,
|
||||
const float score_threshold,
|
||||
cv::Mat &output_image, double time) {
|
||||
std::vector<RESULT> results;
|
||||
std::vector<cv::Scalar> colors = {
|
||||
cv::Scalar(237, 189, 101), cv::Scalar(0, 0, 255),
|
||||
cv::Scalar(102, 153, 153), cv::Scalar(255, 0, 0),
|
||||
cv::Scalar(9, 255, 0), cv::Scalar(0, 0, 0),
|
||||
cv::Scalar(51, 153, 51)};
|
||||
for (int64_t i = 0; i < output_size; i += 6) {
|
||||
if (output_data[i + 1] < score_threshold) {
|
||||
continue;
|
||||
}
|
||||
int class_id = static_cast<int>(output_data[i]);
|
||||
float score = output_data[i + 1];
|
||||
RESULT result;
|
||||
std::string class_name = "Unknown";
|
||||
if (word_labels.size() > 0 && class_id >= 0 &&
|
||||
class_id < word_labels.size()) {
|
||||
class_name = word_labels[class_id];
|
||||
}
|
||||
result.class_name = class_name;
|
||||
result.score = score;
|
||||
result.left = output_data[i + 2] / 416;
|
||||
result.top = output_data[i + 3] / 416;
|
||||
result.right = output_data[i + 4] / 416;
|
||||
result.bottom = output_data[i + 5] / 416;
|
||||
int lx = static_cast<int>(result.left * output_image.cols);
|
||||
int ly = static_cast<int>(result.top * output_image.rows);
|
||||
int w = static_cast<int>(result.right * output_image.cols) - lx;
|
||||
int h = static_cast<int>(result.bottom * output_image.rows) - ly;
|
||||
cv::Rect bounding_box =
|
||||
cv::Rect(lx, ly, w, h) &
|
||||
cv::Rect(0, 0, output_image.cols, output_image.rows);
|
||||
if (w > 0 && h > 0 && score <= 1) {
|
||||
cv::Scalar color = colors[results.size() % colors.size()];
|
||||
cv::rectangle(output_image, bounding_box, color);
|
||||
cv::rectangle(output_image, cv::Point2d(lx, ly),
|
||||
cv::Point2d(lx + w, ly - 10), color, -1);
|
||||
cv::putText(output_image, std::to_string(results.size()) + "." +
|
||||
class_name + ":" + std::to_string(score),
|
||||
cv::Point2d(lx, ly), cv::FONT_HERSHEY_PLAIN, 1,
|
||||
cv::Scalar(255, 255, 255));
|
||||
results.push_back(result);
|
||||
}
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
||||
cv::Mat process(cv::Mat &input_image, std::vector<std::string> &word_labels,
|
||||
std::shared_ptr<paddle::lite_api::PaddlePredictor> &predictor) {
|
||||
// Preprocess image and fill the data of input tensor
|
||||
std::unique_ptr<paddle::lite_api::Tensor> input_tensor(
|
||||
std::move(predictor->GetInput(0)));
|
||||
input_tensor->Resize(INPUT_SHAPE);
|
||||
int input_width = INPUT_SHAPE[3];
|
||||
int input_height = INPUT_SHAPE[2];
|
||||
auto *input_data = input_tensor->mutable_data<float>();
|
||||
#if 1
|
||||
// scale_factor tensor
|
||||
auto scale_factor_tensor = predictor->GetInput(1);
|
||||
scale_factor_tensor->Resize({1, 2});
|
||||
auto scale_factor_data = scale_factor_tensor->mutable_data<float>();
|
||||
scale_factor_data[0] = 1.0f;
|
||||
scale_factor_data[1] = 1.0f;
|
||||
#endif
|
||||
|
||||
double preprocess_start_time = get_current_us();
|
||||
preprocess(input_image, INPUT_MEAN, INPUT_STD, INPUT_SCALE, input_width,
|
||||
input_height, input_data);
|
||||
double preprocess_end_time = get_current_us();
|
||||
double preprocess_time =
|
||||
(preprocess_end_time - preprocess_start_time) / 1000.0f;
|
||||
|
||||
double prediction_time;
|
||||
// Run predictor
|
||||
// warm up to skip the first inference and get more stable time, remove it in
|
||||
// actual products
|
||||
for (int i = 0; i < WARMUP_COUNT; i++) {
|
||||
predictor->Run();
|
||||
}
|
||||
// repeat to obtain the average time, set REPEAT_COUNT=1 in actual products
|
||||
double max_time_cost = 0.0f;
|
||||
double min_time_cost = std::numeric_limits<float>::max();
|
||||
double total_time_cost = 0.0f;
|
||||
for (int i = 0; i < REPEAT_COUNT; i++) {
|
||||
auto start = get_current_us();
|
||||
predictor->Run();
|
||||
auto end = get_current_us();
|
||||
double cur_time_cost = (end - start) / 1000.0f;
|
||||
if (cur_time_cost > max_time_cost) {
|
||||
max_time_cost = cur_time_cost;
|
||||
}
|
||||
if (cur_time_cost < min_time_cost) {
|
||||
min_time_cost = cur_time_cost;
|
||||
}
|
||||
total_time_cost += cur_time_cost;
|
||||
prediction_time = total_time_cost / REPEAT_COUNT;
|
||||
printf("iter %d cost: %f ms\n", i, cur_time_cost);
|
||||
}
|
||||
printf("warmup: %d repeat: %d, average: %f ms, max: %f ms, min: %f ms\n",
|
||||
WARMUP_COUNT, REPEAT_COUNT, prediction_time, max_time_cost,
|
||||
min_time_cost);
|
||||
|
||||
// Get the data of output tensor and postprocess to output detected objects
|
||||
std::unique_ptr<const paddle::lite_api::Tensor> output_tensor(
|
||||
std::move(predictor->GetOutput(0)));
|
||||
const float *output_data = output_tensor->mutable_data<float>();
|
||||
int64_t output_size = 1;
|
||||
for (auto dim : output_tensor->shape()) {
|
||||
output_size *= dim;
|
||||
}
|
||||
cv::Mat output_image = input_image.clone();
|
||||
double postprocess_start_time = get_current_us();
|
||||
std::vector<RESULT> results =
|
||||
postprocess(output_data, output_size, word_labels, SCORE_THRESHOLD,
|
||||
output_image, prediction_time);
|
||||
double postprocess_end_time = get_current_us();
|
||||
double postprocess_time =
|
||||
(postprocess_end_time - postprocess_start_time) / 1000.0f;
|
||||
|
||||
printf("results: %d\n", results.size());
|
||||
for (int i = 0; i < results.size(); i++) {
|
||||
printf("[%d] %s - %f %f,%f,%f,%f\n", i, results[i].class_name.c_str(),
|
||||
results[i].score, results[i].left, results[i].top, results[i].right,
|
||||
results[i].bottom);
|
||||
}
|
||||
printf("Preprocess time: %f ms\n", preprocess_time);
|
||||
printf("Prediction time: %f ms\n", prediction_time);
|
||||
printf("Postprocess time: %f ms\n\n", postprocess_time);
|
||||
|
||||
return output_image;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
if (argc < 5 || argc == 6) {
|
||||
printf("Usage: \n"
|
||||
"./object_detection_demo model_dir label_path [input_image_path] "
|
||||
"[output_image_path]"
|
||||
"use images from camera if input_image_path and input_image_path "
|
||||
"isn't provided.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
std::string model_path = argv[1];
|
||||
std::string label_path = argv[2];
|
||||
std::vector<std::string> word_labels = load_labels(label_path);
|
||||
std::string nnadapter_subgraph_partition_config_path = argv[3];
|
||||
|
||||
std::string yaml_path = argv[4];
|
||||
if (yaml_path != "null") {
|
||||
load_yaml_config(yaml_path);
|
||||
}
|
||||
|
||||
// Run inference by using full api with CxxConfig
|
||||
paddle::lite_api::CxxConfig cxx_config;
|
||||
if (1) { // combined model
|
||||
cxx_config.set_model_file(model_path + "/model");
|
||||
cxx_config.set_param_file(model_path + "/params");
|
||||
} else {
|
||||
cxx_config.set_model_dir(model_path);
|
||||
}
|
||||
cxx_config.set_threads(CPU_THREAD_NUM);
|
||||
cxx_config.set_power_mode(CPU_POWER_MODE);
|
||||
|
||||
std::shared_ptr<paddle::lite_api::PaddlePredictor> predictor = nullptr;
|
||||
std::vector<paddle::lite_api::Place> valid_places;
|
||||
valid_places.push_back(
|
||||
paddle::lite_api::Place{TARGET(kARM), PRECISION(kInt8)});
|
||||
valid_places.push_back(
|
||||
paddle::lite_api::Place{TARGET(kARM), PRECISION(kFloat)});
|
||||
valid_places.push_back(
|
||||
paddle::lite_api::Place{TARGET(kNNAdapter), PRECISION(kInt8)});
|
||||
valid_places.push_back(
|
||||
paddle::lite_api::Place{TARGET(kNNAdapter), PRECISION(kFloat)});
|
||||
cxx_config.set_valid_places(valid_places);
|
||||
std::string device = "verisilicon_timvx";
|
||||
cxx_config.set_nnadapter_device_names({device});
|
||||
// cxx_config.set_nnadapter_context_properties(nnadapter_context_properties);
|
||||
|
||||
// cxx_config.set_nnadapter_model_cache_dir(nnadapter_model_cache_dir);
|
||||
// Set the subgraph custom partition configuration file
|
||||
|
||||
if (!nnadapter_subgraph_partition_config_path.empty()) {
|
||||
std::vector<char> nnadapter_subgraph_partition_config_buffer;
|
||||
if (read_file(nnadapter_subgraph_partition_config_path,
|
||||
&nnadapter_subgraph_partition_config_buffer, false)) {
|
||||
if (!nnadapter_subgraph_partition_config_buffer.empty()) {
|
||||
std::string nnadapter_subgraph_partition_config_string(
|
||||
nnadapter_subgraph_partition_config_buffer.data(),
|
||||
nnadapter_subgraph_partition_config_buffer.size());
|
||||
cxx_config.set_nnadapter_subgraph_partition_config_buffer(
|
||||
nnadapter_subgraph_partition_config_string);
|
||||
}
|
||||
} else {
|
||||
printf("Failed to load the subgraph custom partition configuration file "
|
||||
"%s\n",
|
||||
nnadapter_subgraph_partition_config_path.c_str());
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
predictor = paddle::lite_api::CreatePaddlePredictor(cxx_config);
|
||||
predictor->SaveOptimizedModel(
|
||||
model_path, paddle::lite_api::LiteModelType::kNaiveBuffer);
|
||||
} catch (std::exception e) {
|
||||
printf("An internal error occurred in PaddleLite(cxx config).\n");
|
||||
}
|
||||
|
||||
paddle::lite_api::MobileConfig config;
|
||||
config.set_model_from_file(model_path + ".nb");
|
||||
config.set_threads(CPU_THREAD_NUM);
|
||||
config.set_power_mode(CPU_POWER_MODE);
|
||||
config.set_nnadapter_device_names({device});
|
||||
predictor =
|
||||
paddle::lite_api::CreatePaddlePredictor<paddle::lite_api::MobileConfig>(
|
||||
config);
|
||||
if (argc > 5) {
|
||||
WARMUP_COUNT = 1;
|
||||
REPEAT_COUNT = 5;
|
||||
std::string input_image_path = argv[5];
|
||||
std::string output_image_path = argv[6];
|
||||
cv::Mat input_image = cv::imread(input_image_path);
|
||||
cv::Mat output_image = process(input_image, word_labels, predictor);
|
||||
cv::imwrite(output_image_path, output_image);
|
||||
cv::imshow("Object Detection Demo", output_image);
|
||||
cv::waitKey(0);
|
||||
} else {
|
||||
cv::VideoCapture cap(1);
|
||||
cap.set(cv::CAP_PROP_FRAME_WIDTH, 640);
|
||||
cap.set(cv::CAP_PROP_FRAME_HEIGHT, 480);
|
||||
if (!cap.isOpened()) {
|
||||
return -1;
|
||||
}
|
||||
while (1) {
|
||||
cv::Mat input_image;
|
||||
cap >> input_image;
|
||||
cv::Mat output_image = process(input_image, word_labels, predictor);
|
||||
cv::imshow("Object Detection Demo", output_image);
|
||||
if (cv::waitKey(1) == char('q')) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
cap.release();
|
||||
cv::destroyAllWindows();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
#!/bin/bash
|
||||
#run
|
||||
|
||||
TARGET_ABI=armv8 # for 64bit
|
||||
#TARGET_ABI=armv7hf # for 32bit
|
||||
if [ -n "$1" ]; then
|
||||
TARGET_ABI=$1
|
||||
fi
|
||||
export LD_LIBRARY_PATH=../Paddle-Lite/libs/$TARGET_ABI/
|
||||
export GLOG_v=0
|
||||
export VSI_NN_LOG_LEVEL=0
|
||||
export VIV_VX_ENABLE_GRAPH_TRANSFORM=-pcq:1
|
||||
export VIV_VX_SET_PER_CHANNEL_ENTROPY=100
|
||||
export TIMVX_BATCHNORM_FUSION_MAX_ALLOWED_QUANT_SCALE_DEVIATION=30000
|
||||
build/object_detection_demo models/picodetv2_relu6_coco_no_fuse ../../assets/labels/coco_label_list.txt models/picodetv2_relu6_coco_no_fuse/subgraph.txt models/picodetv2_relu6_coco_no_fuse/picodet.yml
|
||||
11
examples/vision/detection/paddledetection/rv1126/README.md
Executable file
11
examples/vision/detection/paddledetection/rv1126/README.md
Executable file
@@ -0,0 +1,11 @@
|
||||
# PP-YOLOE 量化模型在 RV1126 上的部署
|
||||
目前 FastDeploy 已经支持基于 PaddleLite 部署 PP-YOLOE 量化模型到 RV1126 上。
|
||||
|
||||
模型的量化和量化模型的下载请参考:[模型量化](../quantize/README.md)
|
||||
|
||||
|
||||
## 详细部署文档
|
||||
|
||||
在 RV1126 上只支持 C++ 的部署。
|
||||
|
||||
- [C++部署](cpp)
|
||||
38
examples/vision/detection/paddledetection/rv1126/cpp/CMakeLists.txt
Executable file
38
examples/vision/detection/paddledetection/rv1126/cpp/CMakeLists.txt
Executable file
@@ -0,0 +1,38 @@
|
||||
PROJECT(infer_demo C CXX)
|
||||
CMAKE_MINIMUM_REQUIRED (VERSION 3.10)
|
||||
|
||||
# 指定下载解压后的fastdeploy库路径
|
||||
option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.")
|
||||
|
||||
include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
|
||||
|
||||
# 添加FastDeploy依赖头文件
|
||||
include_directories(${FASTDEPLOY_INCS})
|
||||
include_directories(${FastDeploy_INCLUDE_DIRS})
|
||||
|
||||
add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer_ppyoloe.cc)
|
||||
# 添加FastDeploy库依赖
|
||||
target_link_libraries(infer_demo ${FASTDEPLOY_LIBS})
|
||||
|
||||
set(CMAKE_INSTALL_PREFIX ${CMAKE_SOURCE_DIR}/build/install)
|
||||
|
||||
install(TARGETS infer_demo DESTINATION ./)
|
||||
|
||||
install(DIRECTORY models DESTINATION ./)
|
||||
install(DIRECTORY images DESTINATION ./)
|
||||
# install(DIRECTORY run_with_adb.sh DESTINATION ./)
|
||||
|
||||
file(GLOB FASTDEPLOY_LIBS ${FASTDEPLOY_INSTALL_DIR}/lib/*)
|
||||
install(PROGRAMS ${FASTDEPLOY_LIBS} DESTINATION lib)
|
||||
|
||||
file(GLOB OPENCV_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/opencv/lib/lib*)
|
||||
install(PROGRAMS ${OPENCV_LIBS} DESTINATION lib)
|
||||
|
||||
file(GLOB PADDLELITE_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/paddlelite/lib/lib*)
|
||||
install(PROGRAMS ${PADDLELITE_LIBS} DESTINATION lib)
|
||||
|
||||
file(GLOB TIMVX_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/paddlelite/lib/verisilicon_timvx/*)
|
||||
install(PROGRAMS ${TIMVX_LIBS} DESTINATION lib)
|
||||
|
||||
file(GLOB ADB_TOOLS run_with_adb.sh)
|
||||
install(PROGRAMS ${ADB_TOOLS} DESTINATION ./)
|
||||
55
examples/vision/detection/paddledetection/rv1126/cpp/README.md
Executable file
55
examples/vision/detection/paddledetection/rv1126/cpp/README.md
Executable file
@@ -0,0 +1,55 @@
|
||||
# PP-YOLOE 量化模型 C++ 部署示例
|
||||
|
||||
本目录下提供的 `infer.cc`,可以帮助用户快速完成 PP-YOLOE 量化模型在 RV1126 上的部署推理加速。
|
||||
|
||||
## 部署准备
|
||||
### FastDeploy 交叉编译环境准备
|
||||
- 1. 软硬件环境满足要求,以及交叉编译环境的准备,请参考:[FastDeploy 交叉编译环境准备](../../../../../../docs/cn/build_and_install/rv1126.md#交叉编译环境搭建)
|
||||
|
||||
### 模型准备
|
||||
- 1. 用户可以直接使用由 FastDeploy 提供的量化模型进行部署。
|
||||
- 2. 用户可以先使用 PaddleDetection 自行导出 Float32 模型,注意导出模型模型时设置参数:use_shared_conv=False,更多细节请参考:[PP-YOLOE](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4/configs/ppyoloe)
|
||||
- 3. 用户可以使用 FastDeploy 提供的[一键模型自动化压缩工具](../../../../../../tools/common_tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署。(注意: 推理量化后的检测模型仍然需要FP32模型文件夹下的 infer_cfg.yml 文件,自行量化的模型文件夹内不包含此 yaml 文件,用户从 FP32 模型文件夹下复制此yaml文件到量化后的模型文件夹内即可。)
|
||||
- 更多量化相关相关信息可查阅[模型量化](../../quantize/README.md)
|
||||
|
||||
## 在 RV1126 上部署量化后的 PP-YOLOE 检测模型
|
||||
请按照以下步骤完成在 RV1126 上部署 PP-YOLOE 量化模型:
|
||||
1. 交叉编译编译 FastDeploy 库,具体请参考:[交叉编译 FastDeploy](../../../../../../docs/cn/build_and_install/rv1126.md#基于-paddlelite-的-fastdeploy-交叉编译库编译)
|
||||
|
||||
2. 将编译后的库拷贝到当前目录,可使用如下命令:
|
||||
```bash
|
||||
cp -r FastDeploy/build/fastdeploy-tmivx/ FastDeploy/examples/vision/detection/yolov5/rv1126/cpp
|
||||
```
|
||||
|
||||
3. 在当前路径下载部署所需的模型和示例图片:
|
||||
```bash
|
||||
mkdir models && mkdir images
|
||||
wget https://bj.bcebos.com/fastdeploy/models/ppyoloe_noshare_qat.tar.gz
|
||||
tar -xvf ppyoloe_noshare_qat.tar.gz
|
||||
cp -r ppyoloe_noshare_qat models
|
||||
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
|
||||
cp -r 000000014439.jpg images
|
||||
```
|
||||
|
||||
4. 编译部署示例,可使入如下命令:
|
||||
```bash
|
||||
mkdir build && cd build
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE=${PWD}/../fastdeploy-tmivx/timvx.cmake -DFASTDEPLOY_INSTALL_DIR=${PWD}/../fastdeploy-tmivx ..
|
||||
make -j8
|
||||
make install
|
||||
# 成功编译之后,会生成 install 文件夹,里面有一个运行 demo 和部署所需的库
|
||||
```
|
||||
|
||||
5. 基于 adb 工具部署 PP-YOLOE 检测模型到 Rockchip RV1126,可使用如下命令:
|
||||
```bash
|
||||
# 进入 install 目录
|
||||
cd FastDeploy/examples/vision/detection/paddledetection/rv1126/cpp/build/install/
|
||||
# 如下命令表示:bash run_with_adb.sh 需要运行的demo 模型路径 图片路径 设备的DEVICE_ID
|
||||
bash run_with_adb.sh infer_demo ppyoloe_noshare_qat 000000014439.jpg $DEVICE_ID
|
||||
```
|
||||
|
||||
部署成功后运行结果如下:
|
||||
|
||||
<img width="640" src="https://user-images.githubusercontent.com/30516196/203708564-43c49485-9b48-4eb2-8fe7-0fa517979fff.png">
|
||||
|
||||
需要特别注意的是,在 RV1126 上部署的模型需要是量化后的模型,模型的量化请参考:[模型量化](../../../../../../docs/cn/quantize.md)
|
||||
66
examples/vision/detection/paddledetection/rv1126/cpp/infer_ppyoloe.cc
Executable file
66
examples/vision/detection/paddledetection/rv1126/cpp/infer_ppyoloe.cc
Executable file
@@ -0,0 +1,66 @@
|
||||
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "fastdeploy/vision.h"
|
||||
#ifdef WIN32
|
||||
const char sep = '\\';
|
||||
#else
|
||||
const char sep = '/';
|
||||
#endif
|
||||
|
||||
void InitAndInfer(const std::string& model_dir, const std::string& image_file) {
|
||||
auto model_file = model_dir + sep + "model.pdmodel";
|
||||
auto params_file = model_dir + sep + "model.pdiparams";
|
||||
auto config_file = model_dir + sep + "infer_cfg.yml";
|
||||
auto subgraph_file = model_dir + sep + "subgraph.txt";
|
||||
|
||||
fastdeploy::RuntimeOption option;
|
||||
option.UseTimVX();
|
||||
option.SetLiteSubgraphPartitionPath(subgraph_file);
|
||||
|
||||
auto model = fastdeploy::vision::detection::PPYOLOE(model_file, params_file,
|
||||
config_file, option);
|
||||
assert(model.Initialized());
|
||||
|
||||
auto im = cv::imread(image_file);
|
||||
|
||||
fastdeploy::vision::DetectionResult res;
|
||||
if (!model.Predict(im, &res)) {
|
||||
std::cerr << "Failed to predict." << std::endl;
|
||||
return;
|
||||
}
|
||||
|
||||
std::cout << res.Str() << std::endl;
|
||||
|
||||
auto vis_im = fastdeploy::vision::VisDetection(im, res, 0.5);
|
||||
cv::imwrite("vis_result.jpg", vis_im);
|
||||
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
|
||||
|
||||
}
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
if (argc < 3) {
|
||||
std::cout << "Usage: infer_demo path/to/quant_model "
|
||||
"path/to/image "
|
||||
"run_option, "
|
||||
"e.g ./infer_demo ./PPYOLOE_L_quant ./test.jpeg"
|
||||
<< std::endl;
|
||||
return -1;
|
||||
}
|
||||
|
||||
std::string model_dir = argv[1];
|
||||
std::string test_image = argv[2];
|
||||
InitAndInfer(model_dir, test_image);
|
||||
return 0;
|
||||
}
|
||||
47
examples/vision/detection/paddledetection/rv1126/cpp/run_with_adb.sh
Executable file
47
examples/vision/detection/paddledetection/rv1126/cpp/run_with_adb.sh
Executable file
@@ -0,0 +1,47 @@
|
||||
#!/bin/bash
|
||||
HOST_SPACE=${PWD}
|
||||
echo ${HOST_SPACE}
|
||||
WORK_SPACE=/data/local/tmp/test
|
||||
|
||||
# The first parameter represents the demo name
|
||||
DEMO_NAME=image_classification_demo
|
||||
if [ -n "$1" ]; then
|
||||
DEMO_NAME=$1
|
||||
fi
|
||||
|
||||
# The second parameter represents the model name
|
||||
MODEL_NAME=mobilenet_v1_fp32_224
|
||||
if [ -n "$2" ]; then
|
||||
MODEL_NAME=$2
|
||||
fi
|
||||
|
||||
# The third parameter indicates the name of the image to be tested
|
||||
IMAGE_NAME=0001.jpg
|
||||
if [ -n "$3" ]; then
|
||||
IMAGE_NAME=$3
|
||||
fi
|
||||
|
||||
# The fourth parameter represents the ID of the device
|
||||
ADB_DEVICE_NAME=
|
||||
if [ -n "$4" ]; then
|
||||
ADB_DEVICE_NAME="-s $4"
|
||||
fi
|
||||
|
||||
# Set the environment variables required during the running process
|
||||
EXPORT_ENVIRONMENT_VARIABLES="export GLOG_v=5; export SUBGRAPH_ONLINE_MODE=true; export RKNPU_LOGLEVEL=5; export RKNN_LOG_LEVEL=5; ulimit -c unlimited; export VIV_VX_ENABLE_GRAPH_TRANSFORM=-pcq:1; export VIV_VX_SET_PER_CHANNEL_ENTROPY=100; export TIMVX_BATCHNORM_FUSION_MAX_ALLOWED_QUANT_SCALE_DEVIATION=300000; export VSI_NN_LOG_LEVEL=5;"
|
||||
|
||||
EXPORT_ENVIRONMENT_VARIABLES="${EXPORT_ENVIRONMENT_VARIABLES}export LD_LIBRARY_PATH=${WORK_SPACE}/lib:\$LD_LIBRARY_PATH;"
|
||||
|
||||
# Please install adb, and DON'T run this in the docker.
|
||||
set -e
|
||||
adb $ADB_DEVICE_NAME shell "rm -rf $WORK_SPACE"
|
||||
adb $ADB_DEVICE_NAME shell "mkdir -p $WORK_SPACE"
|
||||
|
||||
# Upload the demo, librarys, model and test images to the device
|
||||
adb $ADB_DEVICE_NAME push ${HOST_SPACE}/lib $WORK_SPACE
|
||||
adb $ADB_DEVICE_NAME push ${HOST_SPACE}/${DEMO_NAME} $WORK_SPACE
|
||||
adb $ADB_DEVICE_NAME push models $WORK_SPACE
|
||||
adb $ADB_DEVICE_NAME push images $WORK_SPACE
|
||||
|
||||
# Execute the deployment demo
|
||||
adb $ADB_DEVICE_NAME shell "cd $WORK_SPACE; ${EXPORT_ENVIRONMENT_VARIABLES} chmod +x ./${DEMO_NAME}; ./${DEMO_NAME} ./models/${MODEL_NAME} ./images/$IMAGE_NAME"
|
||||
6
examples/vision/detection/yolov5/quantize/README.md
Normal file → Executable file
6
examples/vision/detection/yolov5/quantize/README.md
Normal file → Executable file
@@ -4,13 +4,13 @@ FastDeploy已支持部署量化模型,并提供一键模型自动化压缩的工
|
||||
|
||||
## FastDeploy一键模型自动化压缩工具
|
||||
FastDeploy 提供了一键模型自动化压缩工具, 能够简单地通过输入一个配置文件, 对模型进行量化.
|
||||
详细教程请见: [一键模型自动化压缩工具](../../../../../tools/auto_compression/)
|
||||
详细教程请见: [一键模型自动化压缩工具](../../../../../tools/common_tools/auto_compression/)
|
||||
|
||||
## 下载量化完成的YOLOv5s模型
|
||||
用户也可以直接下载下表中的量化模型进行部署.(点击模型名字即可下载)
|
||||
|
||||
Benchmark表格说明:
|
||||
- Rtuntime时延为模型在各种Runtime上的推理时延,包含CPU->GPU数据拷贝,GPU推理,GPU->CPU数据拷贝时间. 不包含模型各自的前后处理时间.
|
||||
- Runtime时延为模型在各种Runtime上的推理时延,包含CPU->GPU数据拷贝,GPU推理,GPU->CPU数据拷贝时间. 不包含模型各自的前后处理时间.
|
||||
- 端到端时延为模型在实际推理场景中的时延, 包含模型的前后处理.
|
||||
- 所测时延均为推理1000次后求得的平均值, 单位是毫秒.
|
||||
- INT8 + FP16 为在推理INT8量化模型的同时, 给Runtime 开启FP16推理选项
|
||||
@@ -29,7 +29,7 @@ Benchmark表格说明:
|
||||
| [YOLOv5s](https://bj.bcebos.com/paddlehub/fastdeploy/yolov5s_quant.tar) | Paddle Inference| CPU | 213.73 | 130.19 | None | None | 1.64 |37.6 | 35.2 | 量化蒸馏训练 |
|
||||
|
||||
#### 端到端 Benchmark
|
||||
| 模型 |推理后端 |部署硬件 | FP32 Runtime时延 | INT8 Runtime时延 | INT8 + FP16 Runtime时延 | INT8+FP16+PM Runtime时延 | 最大加速比 | FP32 mAP | INT8 mAP | 量化方式 |
|
||||
| 模型 |推理后端 |部署硬件 | FP32 End2End时延 | INT8 End2End时延 | INT8 + FP16 End2End时延 | INT8+FP16+PM End2End时延 | 最大加速比 | FP32 mAP | INT8 mAP | 量化方式 |
|
||||
| ------------------- | -----------------|-----------| -------- |-------- |-------- | --------- |-------- |----- |----- |----- |
|
||||
| [YOLOv5s](https://bj.bcebos.com/paddlehub/fastdeploy/yolov5s_quant.tar) | TensorRT | GPU | 24.61 | 21.20 | 20.78 | 20.94 | 1.18 | 37.6 | 36.7 | 量化蒸馏训练 |
|
||||
| [YOLOv5s](https://bj.bcebos.com/paddlehub/fastdeploy/yolov5s_quant.tar) | Paddle-TensorRT | GPU | 23.53 | None | 21.98 | 19.84 | 1.28 | 37.6 | 36.8 | 量化蒸馏训练 |
|
||||
|
||||
2
examples/vision/detection/yolov5/quantize/README_EN.md
Normal file → Executable file
2
examples/vision/detection/yolov5/quantize/README_EN.md
Normal file → Executable file
@@ -6,7 +6,7 @@ Users can use the one-click model quantization tool to quantize and deploy the m
|
||||
## FastDeploy One-Click Model Quantization Tool
|
||||
|
||||
FastDeploy provides a one-click quantization tool that allows users to quantize a model simply with a configuration file.
|
||||
For a detailed tutorial, please refer to: [One-Click Model Quantization Tool](../../../../../tools/auto_compression/)
|
||||
For a detailed tutorial, please refer to: [One-Click Model Quantization Tool](../../../../../tools/common_tools/auto_compression/)
|
||||
|
||||
## Download Quantized YOLOv5s Model
|
||||
|
||||
|
||||
2
examples/vision/detection/yolov5/quantize/cpp/README.md
Normal file → Executable file
2
examples/vision/detection/yolov5/quantize/cpp/README.md
Normal file → Executable file
@@ -9,7 +9,7 @@
|
||||
|
||||
### 量化模型准备
|
||||
- 1. 用户可以直接使用由FastDeploy提供的量化模型进行部署.
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../../../../../tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../../../../../tools/common_tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.
|
||||
|
||||
## 以量化后的YOLOv5s模型为例, 进行部署
|
||||
在本目录执行如下命令即可完成编译,以及量化模型部署.支持此模型需保证FastDeploy版本0.7.0以上(x.x.x>=0.7.0)
|
||||
|
||||
2
examples/vision/detection/yolov5/quantize/python/README.md
Normal file → Executable file
2
examples/vision/detection/yolov5/quantize/python/README.md
Normal file → Executable file
@@ -8,7 +8,7 @@
|
||||
|
||||
### 量化模型准备
|
||||
- 1. 用户可以直接使用由FastDeploy提供的量化模型进行部署.
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../../../../../tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../../../../../tools/common_tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.
|
||||
|
||||
|
||||
## 以量化后的YOLOv5s模型为例, 进行部署
|
||||
|
||||
11
examples/vision/detection/yolov5/rv1126/README.md
Executable file
11
examples/vision/detection/yolov5/rv1126/README.md
Executable file
@@ -0,0 +1,11 @@
|
||||
# YOLOv5 量化模型在 RV1126 上的部署
|
||||
目前 FastDeploy 已经支持基于 PaddleLite 部署 YOLOv5 量化模型到 RV1126 上。
|
||||
|
||||
模型的量化和量化模型的下载请参考:[模型量化](../quantize/README.md)
|
||||
|
||||
|
||||
## 详细部署文档
|
||||
|
||||
在 RV1126 上只支持 C++ 的部署。
|
||||
|
||||
- [C++部署](cpp)
|
||||
37
examples/vision/detection/yolov5/rv1126/cpp/CMakeLists.txt
Executable file
37
examples/vision/detection/yolov5/rv1126/cpp/CMakeLists.txt
Executable file
@@ -0,0 +1,37 @@
|
||||
PROJECT(infer_demo C CXX)
|
||||
CMAKE_MINIMUM_REQUIRED (VERSION 3.10)
|
||||
|
||||
# 指定下载解压后的fastdeploy库路径
|
||||
option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.")
|
||||
|
||||
include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
|
||||
|
||||
# 添加FastDeploy依赖头文件
|
||||
include_directories(${FASTDEPLOY_INCS})
|
||||
include_directories(${FastDeploy_INCLUDE_DIRS})
|
||||
|
||||
add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cc)
|
||||
# 添加FastDeploy库依赖
|
||||
target_link_libraries(infer_demo ${FASTDEPLOY_LIBS})
|
||||
|
||||
set(CMAKE_INSTALL_PREFIX ${CMAKE_SOURCE_DIR}/build/install)
|
||||
|
||||
install(TARGETS infer_demo DESTINATION ./)
|
||||
|
||||
install(DIRECTORY models DESTINATION ./)
|
||||
install(DIRECTORY images DESTINATION ./)
|
||||
|
||||
file(GLOB FASTDEPLOY_LIBS ${FASTDEPLOY_INSTALL_DIR}/lib/*)
|
||||
install(PROGRAMS ${FASTDEPLOY_LIBS} DESTINATION lib)
|
||||
|
||||
file(GLOB OPENCV_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/opencv/lib/lib*)
|
||||
install(PROGRAMS ${OPENCV_LIBS} DESTINATION lib)
|
||||
|
||||
file(GLOB PADDLELITE_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/paddlelite/lib/lib*)
|
||||
install(PROGRAMS ${PADDLELITE_LIBS} DESTINATION lib)
|
||||
|
||||
file(GLOB TIMVX_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/paddlelite/lib/verisilicon_timvx/*)
|
||||
install(PROGRAMS ${TIMVX_LIBS} DESTINATION lib)
|
||||
|
||||
file(GLOB ADB_TOOLS run_with_adb.sh)
|
||||
install(PROGRAMS ${ADB_TOOLS} DESTINATION ./)
|
||||
54
examples/vision/detection/yolov5/rv1126/cpp/README.md
Executable file
54
examples/vision/detection/yolov5/rv1126/cpp/README.md
Executable file
@@ -0,0 +1,54 @@
|
||||
# YOLOv5 量化模型 C++ 部署示例
|
||||
|
||||
本目录下提供的 `infer.cc`,可以帮助用户快速完成 YOLOv5 量化模型在 RV1126 上的部署推理加速。
|
||||
|
||||
## 部署准备
|
||||
### FastDeploy 交叉编译环境准备
|
||||
- 1. 软硬件环境满足要求,以及交叉编译环境的准备,请参考:[FastDeploy 交叉编译环境准备](../../../../../../docs/cn/build_and_install/rv1126.md#交叉编译环境搭建)
|
||||
|
||||
### 量化模型准备
|
||||
- 1. 用户可以直接使用由 FastDeploy 提供的量化模型进行部署。
|
||||
- 2. 用户可以使用 FastDeploy 提供的[一键模型自动化压缩工具](../../../../../../tools/common_tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署。
|
||||
- 更多量化相关相关信息可查阅[模型量化](../../quantize/README.md)
|
||||
|
||||
## 在 RV1126 上部署量化后的 YOLOv5 检测模型
|
||||
请按照以下步骤完成在 RV1126 上部署 YOLOv5 量化模型:
|
||||
1. 交叉编译编译 FastDeploy 库,具体请参考:[交叉编译 FastDeploy](../../../../../../docs/cn/build_and_install/rv1126.md#基于-paddlelite-的-fastdeploy-交叉编译库编译)
|
||||
|
||||
2. 将编译后的库拷贝到当前目录,可使用如下命令:
|
||||
```bash
|
||||
cp -r FastDeploy/build/fastdeploy-tmivx/ FastDeploy/examples/vision/detection/yolov5/rv1126/cpp
|
||||
```
|
||||
|
||||
3. 在当前路径下载部署所需的模型和示例图片:
|
||||
```bash
|
||||
mkdir models && mkdir images
|
||||
wget https://bj.bcebos.com/fastdeploy/models/yolov5s_ptq_model.tar.gz
|
||||
tar -xvf yolov5s_ptq_model.tar.gz
|
||||
cp -r yolov5s_ptq_model models
|
||||
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
|
||||
cp -r 000000014439.jpg images
|
||||
```
|
||||
|
||||
4. 编译部署示例,可使入如下命令:
|
||||
```bash
|
||||
mkdir build && cd build
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE=${PWD}/../fastdeploy-tmivx/timvx.cmake -DFASTDEPLOY_INSTALL_DIR=${PWD}/../fastdeploy-tmivx ..
|
||||
make -j8
|
||||
make install
|
||||
# 成功编译之后,会生成 install 文件夹,里面有一个运行 demo 和部署所需的库
|
||||
```
|
||||
|
||||
5. 基于 adb 工具部署 YOLOv5 检测模型到 Rockchip RV1126,可使用如下命令:
|
||||
```bash
|
||||
# 进入 install 目录
|
||||
cd FastDeploy/examples/vision/detection/yolov5/rv1126/cpp/build/install/
|
||||
# 如下命令表示:bash run_with_adb.sh 需要运行的demo 模型路径 图片路径 设备的DEVICE_ID
|
||||
bash run_with_adb.sh infer_demo yolov5s_ptq_model 000000014439.jpg $DEVICE_ID
|
||||
```
|
||||
|
||||
部署成功后,vis_result.jpg 保存的结果如下:
|
||||
|
||||
<img width="640" src="https://user-images.githubusercontent.com/30516196/203706969-dd58493c-6635-4ee7-9421-41c2e0c9524b.png">
|
||||
|
||||
需要特别注意的是,在 RV1126 上部署的模型需要是量化后的模型,模型的量化请参考:[模型量化](../../../../../../docs/cn/quantize.md)
|
||||
64
examples/vision/detection/yolov5/rv1126/cpp/infer.cc
Executable file
64
examples/vision/detection/yolov5/rv1126/cpp/infer.cc
Executable file
@@ -0,0 +1,64 @@
|
||||
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "fastdeploy/vision.h"
|
||||
#ifdef WIN32
|
||||
const char sep = '\\';
|
||||
#else
|
||||
const char sep = '/';
|
||||
#endif
|
||||
|
||||
void InitAndInfer(const std::string& model_dir, const std::string& image_file) {
|
||||
auto model_file = model_dir + sep + "model.pdmodel";
|
||||
auto params_file = model_dir + sep + "model.pdiparams";
|
||||
auto subgraph_file = model_dir + sep + "subgraph.txt";
|
||||
|
||||
fastdeploy::RuntimeOption option;
|
||||
option.UseTimVX();
|
||||
option.SetLiteSubgraphPartitionPath(subgraph_file);
|
||||
|
||||
auto model = fastdeploy::vision::detection::YOLOv5(
|
||||
model_file, params_file, option, fastdeploy::ModelFormat::PADDLE);
|
||||
assert(model.Initialized());
|
||||
|
||||
auto im = cv::imread(image_file);
|
||||
|
||||
fastdeploy::vision::DetectionResult res;
|
||||
if (!model.Predict(im, &res)) {
|
||||
std::cerr << "Failed to predict." << std::endl;
|
||||
return;
|
||||
}
|
||||
|
||||
std::cout << res.Str() << std::endl;
|
||||
|
||||
auto vis_im = fastdeploy::vision::VisDetection(im, res);
|
||||
cv::imwrite("vis_result.jpg", vis_im);
|
||||
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
|
||||
}
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
if (argc < 3) {
|
||||
std::cout << "Usage: infer_demo path/to/quant_model "
|
||||
"path/to/image "
|
||||
"run_option, "
|
||||
"e.g ./infer_demo ./yolov5s_quant ./000000014439.jpg"
|
||||
<< std::endl;
|
||||
return -1;
|
||||
}
|
||||
|
||||
std::string model_dir = argv[1];
|
||||
std::string test_image = argv[2];
|
||||
InitAndInfer(model_dir, test_image);
|
||||
return 0;
|
||||
}
|
||||
47
examples/vision/detection/yolov5/rv1126/cpp/run_with_adb.sh
Executable file
47
examples/vision/detection/yolov5/rv1126/cpp/run_with_adb.sh
Executable file
@@ -0,0 +1,47 @@
|
||||
#!/bin/bash
|
||||
HOST_SPACE=${PWD}
|
||||
echo ${HOST_SPACE}
|
||||
WORK_SPACE=/data/local/tmp/test
|
||||
|
||||
# The first parameter represents the demo name
|
||||
DEMO_NAME=image_classification_demo
|
||||
if [ -n "$1" ]; then
|
||||
DEMO_NAME=$1
|
||||
fi
|
||||
|
||||
# The second parameter represents the model name
|
||||
MODEL_NAME=mobilenet_v1_fp32_224
|
||||
if [ -n "$2" ]; then
|
||||
MODEL_NAME=$2
|
||||
fi
|
||||
|
||||
# The third parameter indicates the name of the image to be tested
|
||||
IMAGE_NAME=0001.jpg
|
||||
if [ -n "$3" ]; then
|
||||
IMAGE_NAME=$3
|
||||
fi
|
||||
|
||||
# The fourth parameter represents the ID of the device
|
||||
ADB_DEVICE_NAME=
|
||||
if [ -n "$4" ]; then
|
||||
ADB_DEVICE_NAME="-s $4"
|
||||
fi
|
||||
|
||||
# Set the environment variables required during the running process
|
||||
EXPORT_ENVIRONMENT_VARIABLES="export GLOG_v=5; export VIV_VX_ENABLE_GRAPH_TRANSFORM=-pcq:1; export VIV_VX_SET_PER_CHANNEL_ENTROPY=100; export TIMVX_BATCHNORM_FUSION_MAX_ALLOWED_QUANT_SCALE_DEVIATION=300000; export VSI_NN_LOG_LEVEL=5;"
|
||||
|
||||
EXPORT_ENVIRONMENT_VARIABLES="${EXPORT_ENVIRONMENT_VARIABLES}export LD_LIBRARY_PATH=${WORK_SPACE}/lib:\$LD_LIBRARY_PATH;"
|
||||
|
||||
# Please install adb, and DON'T run this in the docker.
|
||||
set -e
|
||||
adb $ADB_DEVICE_NAME shell "rm -rf $WORK_SPACE"
|
||||
adb $ADB_DEVICE_NAME shell "mkdir -p $WORK_SPACE"
|
||||
|
||||
# Upload the demo, librarys, model and test images to the device
|
||||
adb $ADB_DEVICE_NAME push ${HOST_SPACE}/lib $WORK_SPACE
|
||||
adb $ADB_DEVICE_NAME push ${HOST_SPACE}/${DEMO_NAME} $WORK_SPACE
|
||||
adb $ADB_DEVICE_NAME push models $WORK_SPACE
|
||||
adb $ADB_DEVICE_NAME push images $WORK_SPACE
|
||||
|
||||
# Execute the deployment demo
|
||||
adb $ADB_DEVICE_NAME shell "cd $WORK_SPACE; ${EXPORT_ENVIRONMENT_VARIABLES} chmod +x ./${DEMO_NAME}; ./${DEMO_NAME} ./models/${MODEL_NAME} ./images/$IMAGE_NAME"
|
||||
@@ -49,7 +49,7 @@ I0928 04:51:15.826578 206 http_server.cc:167] Started Metrics Service at 0.0.0.0
|
||||
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
|
||||
|
||||
#安装客户端依赖
|
||||
python3 -m pip install tritonclient\[all\]
|
||||
python3 -m pip install tritonclient[all]
|
||||
|
||||
# 发送请求
|
||||
python3 yolov5_grpc_client.py
|
||||
|
||||
6
examples/vision/detection/yolov6/quantize/README.md
Normal file → Executable file
6
examples/vision/detection/yolov6/quantize/README.md
Normal file → Executable file
@@ -4,12 +4,12 @@ FastDeploy已支持部署量化模型,并提供一键模型自动化压缩的工
|
||||
|
||||
## FastDeploy一键模型自动化压缩工具
|
||||
FastDeploy 提供了一键模型自动化压缩工具, 能够简单地通过输入一个配置文件, 对模型进行量化.
|
||||
详细教程请见: [一键模型自动化压缩工具](../../../../../tools/auto_compression/)
|
||||
详细教程请见: [一键模型自动化压缩工具](../../../../../tools/common_tools/auto_compression/)
|
||||
## 下载量化完成的YOLOv6s模型
|
||||
用户也可以直接下载下表中的量化模型进行部署.(点击模型名字即可下载)
|
||||
|
||||
Benchmark表格说明:
|
||||
- Rtuntime时延为模型在各种Runtime上的推理时延,包含CPU->GPU数据拷贝,GPU推理,GPU->CPU数据拷贝时间. 不包含模型各自的前后处理时间.
|
||||
- Runtime时延为模型在各种Runtime上的推理时延,包含CPU->GPU数据拷贝,GPU推理,GPU->CPU数据拷贝时间. 不包含模型各自的前后处理时间.
|
||||
- 端到端时延为模型在实际推理场景中的时延, 包含模型的前后处理.
|
||||
- 所测时延均为推理1000次后求得的平均值, 单位是毫秒.
|
||||
- INT8 + FP16 为在推理INT8量化模型的同时, 给Runtime 开启FP16推理选项
|
||||
@@ -28,7 +28,7 @@ Benchmark表格说明:
|
||||
|
||||
|
||||
#### 端到端 Benchmark
|
||||
| 模型 |推理后端 |部署硬件 | FP32 Runtime时延 | INT8 Runtime时延 | INT8 + FP16 Runtime时延 | INT8+FP16+PM Runtime时延 | 最大加速比 | FP32 mAP | INT8 mAP | 量化方式 |
|
||||
| 模型 |推理后端 |部署硬件 | FP32 End2End时延 | INT8 End2End时延 | INT8 + FP16 End2End时延 | INT8+FP16+PM End2End时延 | 最大加速比 | FP32 mAP | INT8 mAP | 量化方式 |
|
||||
| ------------------- | -----------------|-----------| -------- |-------- |-------- | --------- |-------- |----- |----- |----- |
|
||||
| [YOLOv6s](https://bj.bcebos.com/paddlehub/fastdeploy/yolov6s_ptq_model.tar) | TensorRT | GPU | 15.66 | 11.30 | 10.25 |9.59 | 1.63 | 42.5 | 40.7|量化蒸馏训练 |
|
||||
| [YOLOv6s](https://bj.bcebos.com/paddlehub/fastdeploy/yolov6s_ptq_model.tar) | Paddle-TensorRT | GPU | 15.03 | None| 11.36 | 9.32 | 1.61 | 42.5 | 40.7|量化蒸馏训练 |
|
||||
|
||||
2
examples/vision/detection/yolov6/quantize/README_EN.md
Normal file → Executable file
2
examples/vision/detection/yolov6/quantize/README_EN.md
Normal file → Executable file
@@ -6,7 +6,7 @@ Users can use the one-click model quantization tool to quantize and deploy the m
|
||||
## FastDeploy One-Click Model Quantization Tool
|
||||
|
||||
FastDeploy provides a one-click quantization tool that allows users to quantize a model simply with a configuration file.
|
||||
For detailed tutorial, please refer to : [One-Click Model Quantization Tool](../../../../../tools/auto_compression/)
|
||||
For detailed tutorial, please refer to : [One-Click Model Quantization Tool](../../../../../tools/common_tools/auto_compression/)
|
||||
|
||||
## Download Quantized YOLOv6s Model
|
||||
|
||||
|
||||
2
examples/vision/detection/yolov6/quantize/cpp/README.md
Normal file → Executable file
2
examples/vision/detection/yolov6/quantize/cpp/README.md
Normal file → Executable file
@@ -9,7 +9,7 @@
|
||||
|
||||
### 量化模型准备
|
||||
- 1. 用户可以直接使用由FastDeploy提供的量化模型进行部署.
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../../../../../tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../../../../../tools/common_tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.
|
||||
|
||||
## 以量化后的YOLOv6s模型为例, 进行部署
|
||||
在本目录执行如下命令即可完成编译,以及量化模型部署.支持此模型需保证FastDeploy版本0.7.0以上(x.x.x>=0.7.0)
|
||||
|
||||
2
examples/vision/detection/yolov6/quantize/python/README.md
Normal file → Executable file
2
examples/vision/detection/yolov6/quantize/python/README.md
Normal file → Executable file
@@ -8,7 +8,7 @@
|
||||
|
||||
### 量化模型准备
|
||||
- 1. 用户可以直接使用由FastDeploy提供的量化模型进行部署.
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../../../../../tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../../../../../tools/common_tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.
|
||||
|
||||
## 以量化后的YOLOv6s模型为例, 进行部署
|
||||
```bash
|
||||
|
||||
6
examples/vision/detection/yolov7/quantize/README.md
Normal file → Executable file
6
examples/vision/detection/yolov7/quantize/README.md
Normal file → Executable file
@@ -4,14 +4,14 @@ FastDeploy已支持部署量化模型,并提供一键模型自动化压缩的工
|
||||
|
||||
## FastDeploy一键模型自动化压缩工具
|
||||
FastDeploy 提供了一键模型自动化压缩工具, 能够简单地通过输入一个配置文件, 对模型进行量化.
|
||||
详细教程请见: [一键模型自动化压缩工具](../../../../../tools/auto_compression/)
|
||||
详细教程请见: [一键模型自动化压缩工具](../../../../../tools/common_tools/auto_compression/)
|
||||
|
||||
## 下载量化完成的YOLOv7模型
|
||||
用户也可以直接下载下表中的量化模型进行部署.(点击模型名字即可下载)
|
||||
|
||||
|
||||
Benchmark表格说明:
|
||||
- Rtuntime时延为模型在各种Runtime上的推理时延,包含CPU->GPU数据拷贝,GPU推理,GPU->CPU数据拷贝时间. 不包含模型各自的前后处理时间.
|
||||
- Runtime时延为模型在各种Runtime上的推理时延,包含CPU->GPU数据拷贝,GPU推理,GPU->CPU数据拷贝时间. 不包含模型各自的前后处理时间.
|
||||
- 端到端时延为模型在实际推理场景中的时延, 包含模型的前后处理.
|
||||
- 所测时延均为推理1000次后求得的平均值, 单位是毫秒.
|
||||
- INT8 + FP16 为在推理INT8量化模型的同时, 给Runtime 开启FP16推理选项
|
||||
@@ -29,7 +29,7 @@ Benchmark表格说明:
|
||||
| [YOLOv7](https://bj.bcebos.com/paddlehub/fastdeploy/yolov7_quant.tar) | Paddle Inference | CPU | 995.85 | 477.93|None|None | 2.08 |51.1 | 46.2|量化蒸馏训练 |
|
||||
|
||||
#### 端到端 Benchmark
|
||||
| 模型 |推理后端 |部署硬件 | FP32 Runtime时延 | INT8 Runtime时延 | INT8 + FP16 Runtime时延 | INT8+FP16+PM Runtime时延 | 最大加速比 | FP32 mAP | INT8 mAP | 量化方式 |
|
||||
| 模型 |推理后端 |部署硬件 | FP32 End2End时延 | INT8 End2End时延 | INT8 + FP16 End2End时延 | INT8+FP16+PM End2End时延 | 最大加速比 | FP32 mAP | INT8 mAP | 量化方式 |
|
||||
| ------------------- | -----------------|-----------| -------- |-------- |-------- | --------- |-------- |----- |----- |----- |
|
||||
| [YOLOv7](https://bj.bcebos.com/paddlehub/fastdeploy/yolov7_quant.tar) | TensorRT | GPU | 36.47 | 18.81 | 20.33| 17.58| 2.07 | 51.1| 50.4|量化蒸馏训练 |
|
||||
| [YOLOv7](https://bj.bcebos.com/paddlehub/fastdeploy/yolov7_quant.tar) | Paddle-TensorRT | GPU | 37.06|None|20.26|17.53 | 2.11 | 51.1| 50.4|量化蒸馏训练 |
|
||||
|
||||
2
examples/vision/detection/yolov7/quantize/README_EN.md
Normal file → Executable file
2
examples/vision/detection/yolov7/quantize/README_EN.md
Normal file → Executable file
@@ -6,7 +6,7 @@ Users can use the one-click model quantization tool to quantize and deploy the m
|
||||
## FastDeploy One-Click Model Quantization Tool
|
||||
|
||||
FastDeploy provides a one-click quantization tool that allows users to quantize a model simply with a configuration file.
|
||||
For detailed tutorial, please refer to : [One-Click Model Quantization Tool](../../../../../tools/auto_compression/)
|
||||
For detailed tutorial, please refer to : [One-Click Model Quantization Tool](../../../../../tools/common_tools/auto_compression/)
|
||||
|
||||
## Download Quantized YOLOv7 Model
|
||||
|
||||
|
||||
2
examples/vision/detection/yolov7/quantize/cpp/README.md
Normal file → Executable file
2
examples/vision/detection/yolov7/quantize/cpp/README.md
Normal file → Executable file
@@ -9,7 +9,7 @@
|
||||
|
||||
### 量化模型准备
|
||||
- 1. 用户可以直接使用由FastDeploy提供的量化模型进行部署.
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../../../../../tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../../../../../tools/common_tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.
|
||||
|
||||
## 以量化后的YOLOv7模型为例, 进行部署
|
||||
在本目录执行如下命令即可完成编译,以及量化模型部署.支持此模型需保证FastDeploy版本0.7.0以上(x.x.x>=0.7.0)
|
||||
|
||||
2
examples/vision/detection/yolov7/quantize/python/README.md
Normal file → Executable file
2
examples/vision/detection/yolov7/quantize/python/README.md
Normal file → Executable file
@@ -8,7 +8,7 @@
|
||||
|
||||
### 量化模型准备
|
||||
- 1. 用户可以直接使用由FastDeploy提供的量化模型进行部署.
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../../../../../tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../../../../../tools/common_tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.
|
||||
|
||||
## 以量化后的YOLOv7模型为例, 进行部署
|
||||
```bash
|
||||
|
||||
6
examples/vision/segmentation/paddleseg/quantize/README.md
Normal file → Executable file
6
examples/vision/segmentation/paddleseg/quantize/README.md
Normal file → Executable file
@@ -4,14 +4,14 @@ FastDeploy已支持部署量化模型,并提供一键模型自动化压缩的工
|
||||
|
||||
## FastDeploy一键模型自动化压缩工具
|
||||
FastDeploy 提供了一键模型自动化压缩工具, 能够简单地通过输入一个配置文件, 对模型进行量化.
|
||||
详细教程请见: [一键模型自动化压缩工具](../../../../../tools/auto_compression/)
|
||||
详细教程请见: [一键模型自动化压缩工具](../../../../../tools/common_tools/auto_compression/)
|
||||
注意: 推理量化后的分类模型仍然需要FP32模型文件夹下的deploy.yaml文件, 自行量化的模型文件夹内不包含此yaml文件, 用户从FP32模型文件夹下复制此yaml文件到量化后的模型文件夹内即可。
|
||||
|
||||
## 下载量化完成的PaddleSeg模型
|
||||
用户也可以直接下载下表中的量化模型进行部署.(点击模型名字即可下载)
|
||||
|
||||
Benchmark表格说明:
|
||||
- Rtuntime时延为模型在各种Runtime上的推理时延,包含CPU->GPU数据拷贝,GPU推理,GPU->CPU数据拷贝时间. 不包含模型各自的前后处理时间.
|
||||
- Runtime时延为模型在各种Runtime上的推理时延,包含CPU->GPU数据拷贝,GPU推理,GPU->CPU数据拷贝时间. 不包含模型各自的前后处理时间.
|
||||
- 端到端时延为模型在实际推理场景中的时延, 包含模型的前后处理.
|
||||
- 所测时延均为推理1000次后求得的平均值, 单位是毫秒.
|
||||
- INT8 + FP16 为在推理INT8量化模型的同时, 给Runtime 开启FP16推理选项
|
||||
@@ -26,7 +26,7 @@ Benchmark表格说明:
|
||||
| [PP-LiteSeg-T(STDC1)-cityscapes](https://bj.bcebos.com/paddlehub/fastdeploy/PP_LiteSeg_T_STDC1_cityscapes_without_argmax_infer_QAT_new.tar)) | Paddle Inference | CPU | 1138.04| 602.62 |None|None | 1.89 |77.37 | 71.62 |量化蒸馏训练 |
|
||||
|
||||
#### 端到端 Benchmark
|
||||
| 模型 |推理后端 |部署硬件 | FP32 Runtime时延 | INT8 Runtime时延 | INT8 + FP16 Runtime时延 | INT8+FP16+PM Runtime时延 | 最大加速比 | FP32 mIoU | INT8 mIoU | 量化方式 |
|
||||
| 模型 |推理后端 |部署硬件 | FP32 End2End时延 | INT8 End2End时延 | INT8 + FP16 End2End时延 | INT8+FP16+PM End2End时延 | 最大加速比 | FP32 mIoU | INT8 mIoU | 量化方式 |
|
||||
| ------------------- | -----------------|-----------| -------- |-------- |-------- | --------- |-------- |----- |----- |----- |
|
||||
| [PP-LiteSeg-T(STDC1)-cityscapes](https://bj.bcebos.com/paddlehub/fastdeploy/PP_LiteSeg_T_STDC1_cityscapes_without_argmax_infer_QAT_new.tar)) | Paddle Inference | CPU | 4726.65| 4134.91|None|None | 1.14 |77.37 | 71.62 |量化蒸馏训练 |
|
||||
|
||||
|
||||
2
examples/vision/segmentation/paddleseg/quantize/cpp/README.md
Normal file → Executable file
2
examples/vision/segmentation/paddleseg/quantize/cpp/README.md
Normal file → Executable file
@@ -8,7 +8,7 @@
|
||||
|
||||
### 量化模型准备
|
||||
- 1. 用户可以直接使用由FastDeploy提供的量化模型进行部署.
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../../../../../tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.(注意: 推理量化后的分类模型仍然需要FP32模型文件夹下的deploy.yaml文件, 自行量化的模型文件夹内不包含此yaml文件, 用户从FP32模型文件夹下复制此yaml文件到量化后的模型文件夹内即可.)
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../../../../../tools/common_tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.(注意: 推理量化后的分类模型仍然需要FP32模型文件夹下的deploy.yaml文件, 自行量化的模型文件夹内不包含此yaml文件, 用户从FP32模型文件夹下复制此yaml文件到量化后的模型文件夹内即可.)
|
||||
|
||||
## 以量化后的PP_LiteSeg_T_STDC1_cityscapes模型为例, 进行部署
|
||||
在本目录执行如下命令即可完成编译,以及量化模型部署.支持此模型需保证FastDeploy版本0.7.0以上(x.x.x>=0.7.0)
|
||||
|
||||
2
examples/vision/segmentation/paddleseg/quantize/python/README.md
Normal file → Executable file
2
examples/vision/segmentation/paddleseg/quantize/python/README.md
Normal file → Executable file
@@ -8,7 +8,7 @@
|
||||
|
||||
### 量化模型准备
|
||||
- 1. 用户可以直接使用由FastDeploy提供的量化模型进行部署.
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../../../../../tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.(注意: 推理量化后的分类模型仍然需要FP32模型文件夹下的deploy.yaml文件, 自行量化的模型文件夹内不包含此yaml文件, 用户从FP32模型文件夹下复制此yaml文件到量化后的模型文件夹内即可.)
|
||||
- 2. 用户可以使用FastDeploy提供的[一键模型自动化压缩工具](../../../../../../tools/common_tools/auto_compression/),自行进行模型量化, 并使用产出的量化模型进行部署.(注意: 推理量化后的分类模型仍然需要FP32模型文件夹下的deploy.yaml文件, 自行量化的模型文件夹内不包含此yaml文件, 用户从FP32模型文件夹下复制此yaml文件到量化后的模型文件夹内即可.)
|
||||
|
||||
|
||||
## 以量化后的PP_LiteSeg_T_STDC1_cityscapes模型为例, 进行部署
|
||||
|
||||
11
examples/vision/segmentation/paddleseg/rv1126/README.md
Executable file
11
examples/vision/segmentation/paddleseg/rv1126/README.md
Executable file
@@ -0,0 +1,11 @@
|
||||
# PP-LiteSeg 量化模型在 RV1126 上的部署
|
||||
目前 FastDeploy 已经支持基于 PaddleLite 部署 PP-LiteSeg 量化模型到 RV1126 上。
|
||||
|
||||
模型的量化和量化模型的下载请参考:[模型量化](../quantize/README.md)
|
||||
|
||||
|
||||
## 详细部署文档
|
||||
|
||||
在 RV1126 上只支持 C++ 的部署。
|
||||
|
||||
- [C++部署](cpp)
|
||||
38
examples/vision/segmentation/paddleseg/rv1126/cpp/CMakeLists.txt
Executable file
38
examples/vision/segmentation/paddleseg/rv1126/cpp/CMakeLists.txt
Executable file
@@ -0,0 +1,38 @@
|
||||
PROJECT(infer_demo C CXX)
|
||||
CMAKE_MINIMUM_REQUIRED (VERSION 3.10)
|
||||
|
||||
# 指定下载解压后的fastdeploy库路径
|
||||
option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.")
|
||||
|
||||
include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
|
||||
|
||||
# 添加FastDeploy依赖头文件
|
||||
include_directories(${FASTDEPLOY_INCS})
|
||||
include_directories(${FastDeploy_INCLUDE_DIRS})
|
||||
|
||||
add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cc)
|
||||
# 添加FastDeploy库依赖
|
||||
target_link_libraries(infer_demo ${FASTDEPLOY_LIBS})
|
||||
|
||||
set(CMAKE_INSTALL_PREFIX ${CMAKE_SOURCE_DIR}/build/install)
|
||||
|
||||
install(TARGETS infer_demo DESTINATION ./)
|
||||
|
||||
install(DIRECTORY models DESTINATION ./)
|
||||
install(DIRECTORY images DESTINATION ./)
|
||||
# install(DIRECTORY run_with_adb.sh DESTINATION ./)
|
||||
|
||||
file(GLOB FASTDEPLOY_LIBS ${FASTDEPLOY_INSTALL_DIR}/lib/*)
|
||||
install(PROGRAMS ${FASTDEPLOY_LIBS} DESTINATION lib)
|
||||
|
||||
file(GLOB OPENCV_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/opencv/lib/lib*)
|
||||
install(PROGRAMS ${OPENCV_LIBS} DESTINATION lib)
|
||||
|
||||
file(GLOB PADDLELITE_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/paddlelite/lib/lib*)
|
||||
install(PROGRAMS ${PADDLELITE_LIBS} DESTINATION lib)
|
||||
|
||||
file(GLOB TIMVX_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/paddlelite/lib/verisilicon_timvx/*)
|
||||
install(PROGRAMS ${TIMVX_LIBS} DESTINATION lib)
|
||||
|
||||
file(GLOB ADB_TOOLS run_with_adb.sh)
|
||||
install(PROGRAMS ${ADB_TOOLS} DESTINATION ./)
|
||||
54
examples/vision/segmentation/paddleseg/rv1126/cpp/README.md
Executable file
54
examples/vision/segmentation/paddleseg/rv1126/cpp/README.md
Executable file
@@ -0,0 +1,54 @@
|
||||
# PP-LiteSeg 量化模型 C++ 部署示例
|
||||
|
||||
本目录下提供的 `infer.cc`,可以帮助用户快速完成 PP-LiteSeg 量化模型在 RV1126 上的部署推理加速。
|
||||
|
||||
## 部署准备
|
||||
### FastDeploy 交叉编译环境准备
|
||||
- 1. 软硬件环境满足要求,以及交叉编译环境的准备,请参考:[FastDeploy 交叉编译环境准备](../../../../../../docs/cn/build_and_install/rv1126.md#交叉编译环境搭建)
|
||||
|
||||
### 模型准备
|
||||
- 1. 用户可以直接使用由 FastDeploy 提供的量化模型进行部署。
|
||||
- 2. 用户可以使用 FastDeploy 提供的一键模型自动化压缩工具,自行进行模型量化, 并使用产出的量化模型进行部署.(注意: 推理量化后的分类模型仍然需要FP32模型文件夹下的 deploy.yaml 文件, 自行量化的模型文件夹内不包含此 yaml 文件, 用户从FP32模型文件夹下复制此yaml文件到量化后的模型文件夹内即可.)
|
||||
- 更多量化相关相关信息可查阅[模型量化](../../quantize/README.md)
|
||||
|
||||
## 在 RV1126 上部署量化后的 PP-LiteSeg 分割模型
|
||||
请按照以下步骤完成在 RV1126 上部署 PP-LiteSeg 量化模型:
|
||||
1. 交叉编译编译 FastDeploy 库,具体请参考:[交叉编译 FastDeploy](../../../../../../docs/cn/build_and_install/rv1126.md#基于-paddlelite-的-fastdeploy-交叉编译库编译)
|
||||
|
||||
2. 将编译后的库拷贝到当前目录,可使用如下命令:
|
||||
```bash
|
||||
cp -r FastDeploy/build/fastdeploy-tmivx/ FastDeploy/examples/vision/segmentation/paddleseg/rv1126/cpp
|
||||
```
|
||||
|
||||
3. 在当前路径下载部署所需的模型和示例图片:
|
||||
```bash
|
||||
mkdir models && mkdir images
|
||||
wget https://bj.bcebos.com/fastdeploy/models/rk1/ppliteseg.tar.gz
|
||||
tar -xvf ppliteseg.tar.gz
|
||||
cp -r ppliteseg models
|
||||
wget https://paddleseg.bj.bcebos.com/dygraph/demo/cityscapes_demo.png
|
||||
cp -r cityscapes_demo.png images
|
||||
```
|
||||
|
||||
4. 编译部署示例,可使入如下命令:
|
||||
```bash
|
||||
mkdir build && cd build
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE=${PWD}/../fastdeploy-tmivx/timvx.cmake -DFASTDEPLOY_INSTALL_DIR=${PWD}/../fastdeploy-tmivx ..
|
||||
make -j8
|
||||
make install
|
||||
# 成功编译之后,会生成 install 文件夹,里面有一个运行 demo 和部署所需的库
|
||||
```
|
||||
|
||||
5. 基于 adb 工具部署 PP-LiteSeg 分割模型到 Rockchip RV1126,可使用如下命令:
|
||||
```bash
|
||||
# 进入 install 目录
|
||||
cd FastDeploy/examples/vision/segmentation/paddleseg/rv1126/cpp/build/install/
|
||||
# 如下命令表示:bash run_with_adb.sh 需要运行的demo 模型路径 图片路径 设备的DEVICE_ID
|
||||
bash run_with_adb.sh infer_demo ppliteseg cityscapes_demo.png $DEVICE_ID
|
||||
```
|
||||
|
||||
部署成功后运行结果如下:
|
||||
|
||||
<img width="640" src="https://user-images.githubusercontent.com/30516196/205544166-9b2719ff-ed82-4908-b90a-095de47392e1.png">
|
||||
|
||||
需要特别注意的是,在 RV1126 上部署的模型需要是量化后的模型,模型的量化请参考:[模型量化](../../../../../../docs/cn/quantize.md)
|
||||
66
examples/vision/segmentation/paddleseg/rv1126/cpp/infer.cc
Executable file
66
examples/vision/segmentation/paddleseg/rv1126/cpp/infer.cc
Executable file
@@ -0,0 +1,66 @@
|
||||
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "fastdeploy/vision.h"
|
||||
#ifdef WIN32
|
||||
const char sep = '\\';
|
||||
#else
|
||||
const char sep = '/';
|
||||
#endif
|
||||
|
||||
void InitAndInfer(const std::string& model_dir, const std::string& image_file) {
|
||||
auto model_file = model_dir + sep + "model.pdmodel";
|
||||
auto params_file = model_dir + sep + "model.pdiparams";
|
||||
auto config_file = model_dir + sep + "deploy.yaml";
|
||||
auto subgraph_file = model_dir + sep + "subgraph.txt";
|
||||
|
||||
fastdeploy::RuntimeOption option;
|
||||
option.UseTimVX();
|
||||
option.SetLiteSubgraphPartitionPath(subgraph_file);
|
||||
|
||||
auto model = fastdeploy::vision::segmentation::PaddleSegModel(
|
||||
model_file, params_file, config_file,option);
|
||||
|
||||
assert(model.Initialized());
|
||||
|
||||
auto im = cv::imread(image_file);
|
||||
|
||||
fastdeploy::vision::SegmentationResult res;
|
||||
if (!model.Predict(im, &res)) {
|
||||
std::cerr << "Failed to predict." << std::endl;
|
||||
return;
|
||||
}
|
||||
|
||||
std::cout << res.Str() << std::endl;
|
||||
|
||||
auto vis_im = fastdeploy::vision::VisSegmentation(im, res, 0.5);
|
||||
cv::imwrite("vis_result.jpg", vis_im);
|
||||
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
|
||||
}
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
if (argc < 3) {
|
||||
std::cout << "Usage: infer_demo path/to/quant_model "
|
||||
"path/to/image "
|
||||
"run_option, "
|
||||
"e.g ./infer_demo ./ResNet50_vd_quant ./test.jpeg"
|
||||
<< std::endl;
|
||||
return -1;
|
||||
}
|
||||
|
||||
std::string model_dir = argv[1];
|
||||
std::string test_image = argv[2];
|
||||
InitAndInfer(model_dir, test_image);
|
||||
return 0;
|
||||
}
|
||||
47
examples/vision/segmentation/paddleseg/rv1126/cpp/run_with_adb.sh
Executable file
47
examples/vision/segmentation/paddleseg/rv1126/cpp/run_with_adb.sh
Executable file
@@ -0,0 +1,47 @@
|
||||
#!/bin/bash
|
||||
HOST_SPACE=${PWD}
|
||||
echo ${HOST_SPACE}
|
||||
WORK_SPACE=/data/local/tmp/test
|
||||
|
||||
# The first parameter represents the demo name
|
||||
DEMO_NAME=image_classification_demo
|
||||
if [ -n "$1" ]; then
|
||||
DEMO_NAME=$1
|
||||
fi
|
||||
|
||||
# The second parameter represents the model name
|
||||
MODEL_NAME=mobilenet_v1_fp32_224
|
||||
if [ -n "$2" ]; then
|
||||
MODEL_NAME=$2
|
||||
fi
|
||||
|
||||
# The third parameter indicates the name of the image to be tested
|
||||
IMAGE_NAME=0001.jpg
|
||||
if [ -n "$3" ]; then
|
||||
IMAGE_NAME=$3
|
||||
fi
|
||||
|
||||
# The fourth parameter represents the ID of the device
|
||||
ADB_DEVICE_NAME=
|
||||
if [ -n "$4" ]; then
|
||||
ADB_DEVICE_NAME="-s $4"
|
||||
fi
|
||||
|
||||
# Set the environment variables required during the running process
|
||||
EXPORT_ENVIRONMENT_VARIABLES="export GLOG_v=5; export VIV_VX_ENABLE_GRAPH_TRANSFORM=-pcq:1; export VIV_VX_SET_PER_CHANNEL_ENTROPY=100; export TIMVX_BATCHNORM_FUSION_MAX_ALLOWED_QUANT_SCALE_DEVIATION=300000; export VSI_NN_LOG_LEVEL=5;"
|
||||
|
||||
EXPORT_ENVIRONMENT_VARIABLES="${EXPORT_ENVIRONMENT_VARIABLES}export LD_LIBRARY_PATH=${WORK_SPACE}/lib:\$LD_LIBRARY_PATH;"
|
||||
|
||||
# Please install adb, and DON'T run this in the docker.
|
||||
set -e
|
||||
adb $ADB_DEVICE_NAME shell "rm -rf $WORK_SPACE"
|
||||
adb $ADB_DEVICE_NAME shell "mkdir -p $WORK_SPACE"
|
||||
|
||||
# Upload the demo, librarys, model and test images to the device
|
||||
adb $ADB_DEVICE_NAME push ${HOST_SPACE}/lib $WORK_SPACE
|
||||
adb $ADB_DEVICE_NAME push ${HOST_SPACE}/${DEMO_NAME} $WORK_SPACE
|
||||
adb $ADB_DEVICE_NAME push models $WORK_SPACE
|
||||
adb $ADB_DEVICE_NAME push images $WORK_SPACE
|
||||
|
||||
# Execute the deployment demo
|
||||
adb $ADB_DEVICE_NAME shell "cd $WORK_SPACE; ${EXPORT_ENVIRONMENT_VARIABLES} chmod +x ./${DEMO_NAME}; ./${DEMO_NAME} ./models/${MODEL_NAME} ./images/$IMAGE_NAME"
|
||||
@@ -72,7 +72,7 @@ void LiteBackend::BuildOption(const LiteBackendOption& option) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if(option_.enable_timvx){
|
||||
if(option_.enable_timvx) {
|
||||
config_.set_nnadapter_device_names({"verisilicon_timvx"});
|
||||
valid_places.push_back(
|
||||
paddle::lite_api::Place{TARGET(kNNAdapter), PRECISION(kInt8)});
|
||||
@@ -231,6 +231,14 @@ bool LiteBackend::Infer(std::vector<FDTensor>& inputs,
|
||||
tensor->CopyFromCpu<uint8_t, paddle::lite_api::TargetType::kARM>(
|
||||
reinterpret_cast<const uint8_t*>(const_cast<void*>(
|
||||
inputs[i].CpuData())));
|
||||
} else if (inputs[i].dtype == FDDataType::INT64) {
|
||||
#ifdef __aarch64__
|
||||
tensor->CopyFromCpu<int64_t, paddle::lite_api::TargetType::kARM>(
|
||||
reinterpret_cast<const int64_t*>(const_cast<void*>(
|
||||
inputs[i].CpuData())));
|
||||
#else
|
||||
FDASSERT(false, "FDDataType::INT64 is not support for Arm v7 now!");
|
||||
#endif
|
||||
} else {
|
||||
FDASSERT(false, "Unexpected data type of %d.", inputs[i].dtype);
|
||||
}
|
||||
|
||||
@@ -230,7 +230,7 @@ bool UIEModel::Initialize() {
|
||||
|
||||
void UIEModel::SetValidBackend() {
|
||||
// TODO(zhoushunjie): Add lite backend in future
|
||||
valid_cpu_backends = {Backend::ORT, Backend::OPENVINO, Backend::PDINFER};
|
||||
valid_cpu_backends = {Backend::ORT, Backend::OPENVINO, Backend::PDINFER, Backend::LITE};
|
||||
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
|
||||
}
|
||||
|
||||
|
||||
@@ -27,6 +27,7 @@ YOLOv5::YOLOv5(const std::string& model_file, const std::string& params_file,
|
||||
} else {
|
||||
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::LITE};
|
||||
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
|
||||
valid_timvx_backends = {Backend::LITE};
|
||||
}
|
||||
runtime_option = custom_option;
|
||||
runtime_option.model_format = model_format;
|
||||
|
||||
1
fastdeploy/vision/detection/ppdet/model.h
Normal file → Executable file
1
fastdeploy/vision/detection/ppdet/model.h
Normal file → Executable file
@@ -64,6 +64,7 @@ class FASTDEPLOY_DECL PPYOLOE : public PPDetBase {
|
||||
valid_cpu_backends = {Backend::OPENVINO, Backend::ORT,
|
||||
Backend::PDINFER, Backend::LITE};
|
||||
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
|
||||
valid_timvx_backends = {Backend::LITE};
|
||||
initialized = Initialize();
|
||||
}
|
||||
|
||||
|
||||
1
fastdeploy/vision/segmentation/ppseg/model.cc
Normal file → Executable file
1
fastdeploy/vision/segmentation/ppseg/model.cc
Normal file → Executable file
@@ -27,6 +27,7 @@ PaddleSegModel::PaddleSegModel(const std::string& model_file,
|
||||
valid_cpu_backends = {Backend::OPENVINO, Backend::PDINFER, Backend::ORT, Backend::LITE};
|
||||
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
|
||||
valid_rknpu_backends = {Backend::RKNPU2};
|
||||
valid_timvx_backends = {Backend::LITE};
|
||||
runtime_option = custom_option;
|
||||
runtime_option.model_format = model_format;
|
||||
runtime_option.model_file = model_file;
|
||||
|
||||
@@ -25,6 +25,8 @@ namespace fastdeploy {
|
||||
namespace vision {
|
||||
|
||||
#ifdef __ARM_NEON
|
||||
static constexpr int VIS_SEG_OMP_NUM_THREADS=2;
|
||||
|
||||
static inline void QuantizeBlendingWeight8(
|
||||
float weight, uint8_t* old_multi_factor, uint8_t* new_multi_factor) {
|
||||
// Quantize the weight to boost blending performance.
|
||||
@@ -53,7 +55,8 @@ static cv::Mat FastVisSegmentationNEON(
|
||||
const uint8_t *im_ptr = static_cast<const uint8_t*>(im.data);
|
||||
|
||||
if (!quantize_weight) {
|
||||
#pragma omp parallel for num_threads(2) schedule(static)
|
||||
#pragma omp parallel for proc_bind(close) \
|
||||
num_threads(VIS_SEG_OMP_NUM_THREADS) schedule(static)
|
||||
for (int i = 0; i < size - 15; i += 16) {
|
||||
uint8x16_t labelx16 = vld1q_u8(label_ptr + i); // 16 bytes
|
||||
// e.g 0b00000001 << 7 -> 0b10000000 128;
|
||||
@@ -87,7 +90,8 @@ static cv::Mat FastVisSegmentationNEON(
|
||||
|
||||
if (new_multi_factor == 8) {
|
||||
// Only keep mask, no need to blending with origin image.
|
||||
#pragma omp parallel for num_threads(2) schedule(static)
|
||||
#pragma omp parallel for proc_bind(close) \
|
||||
num_threads(VIS_SEG_OMP_NUM_THREADS) schedule(static)
|
||||
for (int i = 0; i < size - 15; i += 16) {
|
||||
uint8x16_t labelx16 = vld1q_u8(label_ptr + i); // 16 bytes
|
||||
// e.g 0b00000001 << 7 -> 0b10000000 128;
|
||||
@@ -112,7 +116,8 @@ static cv::Mat FastVisSegmentationNEON(
|
||||
uint8x16_t old_mulx16 = vdupq_n_u8(old_multi_factor);
|
||||
uint8x16_t new_mulx16 = vdupq_n_u8(new_multi_factor);
|
||||
// Blend the two colors together with quantize 'weight'.
|
||||
#pragma omp parallel for num_threads(2) schedule(static)
|
||||
#pragma omp parallel for proc_bind(close) \
|
||||
num_threads(VIS_SEG_OMP_NUM_THREADS) schedule(static)
|
||||
for (int i = 0; i < size - 15; i += 16) {
|
||||
uint8x16x3_t bgrx16x3 = vld3q_u8(im_ptr + i * 3); // 48 bytes
|
||||
uint8x16_t labelx16 = vld1q_u8(label_ptr + i); // 16 bytes
|
||||
|
||||
79
scripts/android/build_android_aar.sh
Executable file
79
scripts/android/build_android_aar.sh
Executable file
@@ -0,0 +1,79 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
set +x
|
||||
|
||||
PACKAGE_VERSION=$1
|
||||
FASTDEPLOY_DIR=$(pwd)
|
||||
BUILT_PACKAGE_DIR=build/Android
|
||||
CXX_PACKAGE_PREFIX=fastdeploy-android-${PACKAGE_VERSION}-shared
|
||||
CXX_PACKAGE_NAME=${BUILT_PACKAGE_DIR}/${CXX_PACKAGE_PREFIX}
|
||||
ARMV8_CXX_PACKAGE_NAME=${BUILT_PACKAGE_DIR}/arm64-v8a-api-21/install
|
||||
ARMV7_CXX_PACKAGE_NAME=${BUILT_PACKAGE_DIR}/armeabi-v7a-api-21/install
|
||||
|
||||
# check package name
|
||||
echo "[INFO] --- FASTDEPLOY_DIR: ${FASTDEPLOY_DIR}"
|
||||
if [ "$PACKAGE_VERSION" = "dev" ]; then
|
||||
CXX_PACKAGE_PREFIX=fastdeploy-android-latest-shared-dev
|
||||
CXX_PACKAGE_NAME=${BUILT_PACKAGE_DIR}/fastdeploy-android-latest-shared-dev
|
||||
fi
|
||||
|
||||
# check arm v7 & v8 c++ sdk
|
||||
if [ ! -d "${BUILT_PACKAGE_DIR}" ]; then
|
||||
echo "[ERROR] --- ${BUILT_PACKAGE_DIR} not exist, please build c++ sdk first!"
|
||||
exit 0
|
||||
fi
|
||||
if [ ! -d "${ARMV8_CXX_PACKAGE_NAME}" ]; then
|
||||
echo "[ERROR] --- ${ARMV8_CXX_PACKAGE_NAME} not exist, please build c++ sdk first!"
|
||||
exit 0
|
||||
fi
|
||||
if [ ! -d "${ARMV7_CXX_PACKAGE_NAME}" ]; then
|
||||
echo "[ERROR] --- ${ARMV7_CXX_PACKAGE_NAME} not exist, please build c++ sdk first!"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# remove old package
|
||||
echo "[INFO] --- Packing ${CXX_PACKAGE_NAME} package ..."
|
||||
if [ -d "${CXX_PACKAGE_NAME}" ]; then
|
||||
echo "[INFO] --- Removed old package done !"
|
||||
rm ${CXX_PACKAGE_NAME}.tgz
|
||||
rm -rf ${CXX_PACKAGE_NAME}
|
||||
fi
|
||||
|
||||
# package latest c++ sdk
|
||||
mkdir ${CXX_PACKAGE_NAME}
|
||||
echo "[INFO] --- Collecting package contents ..."
|
||||
cp -r ${ARMV7_CXX_PACKAGE_NAME}/* ${CXX_PACKAGE_NAME}/
|
||||
cp -r ${ARMV8_CXX_PACKAGE_NAME}/* ${CXX_PACKAGE_NAME}/
|
||||
rm -rf ${CXX_PACKAGE_NAME}/examples
|
||||
echo "[INFO] --- Removed examples files ..."
|
||||
echo "[INFO] --- Removing static .a files: "
|
||||
find ${CXX_PACKAGE_NAME}/third_libs/install/ -name "*.a"
|
||||
rm $(find ${CXX_PACKAGE_NAME}/third_libs/install/ -name "*.a")
|
||||
echo "[INFO] --- Taring ${CXX_PACKAGE_NAME}.tgz package ..."
|
||||
tar -zcvf ${CXX_PACKAGE_NAME}.tgz ${CXX_PACKAGE_NAME}/* >> ${BUILT_PACKAGE_DIR}/pkg.log 2>&1
|
||||
echo "[INFO] --- Package ${CXX_PACKAGE_NAME}.tgz done ! Package size info: "
|
||||
du -sh ${BUILT_PACKAGE_DIR}/* | grep ${CXX_PACKAGE_PREFIX}
|
||||
|
||||
# update c++ sdk to jni lib
|
||||
echo "[INFO] --- Update c++ sdk for jni lib ..."
|
||||
JAVA_ANDROID_DIR=${FASTDEPLOY_DIR}/java/android
|
||||
JNI_LIB_DIR=${JAVA_ANDROID_DIR}/fastdeploy
|
||||
CXX_LIB_FOR_JNI_DIR=${JNI_LIB_DIR}/libs/${CXX_PACKAGE_PREFIX}
|
||||
if [ -d "${CXX_LIB_FOR_JNI_DIR}" ]; then
|
||||
rm -rf ${CXX_LIB_FOR_JNI_DIR}
|
||||
echo "[INFO] --- Remove old ${CXX_LIB_FOR_JNI_DIR} done!"
|
||||
fi
|
||||
cp -r ${CXX_PACKAGE_NAME} ${JNI_LIB_DIR}/libs
|
||||
echo "[INFO] --- Update ${CXX_LIB_FOR_JNI_DIR} done!"
|
||||
|
||||
# build java aar package
|
||||
cd ${JAVA_ANDROID_DIR}
|
||||
echo "[INFO] --- JAVA_ANDROID_DIR: ${JAVA_ANDROID_DIR}"
|
||||
echo "[INFO] --- Building java aar package ... "
|
||||
chmod +x gradlew
|
||||
./gradlew fastdeploy:assembleDebug
|
||||
echo "[INFO] --- Built java aar package!"
|
||||
ls -lh ${JNI_LIB_DIR}/build/outputs/aar/
|
||||
|
||||
# Usage:
|
||||
# ./scripts/android/build_android_aar.sh dev
|
||||
37
scripts/android/build_android_app.sh
Executable file
37
scripts/android/build_android_app.sh
Executable file
@@ -0,0 +1,37 @@
|
||||
# build java aar package
|
||||
FASTDEPLOY_DIR=$(pwd)
|
||||
JAVA_ANDROID_DIR=${FASTDEPLOY_DIR}/java/android
|
||||
JNI_LIB_DIR=${JAVA_ANDROID_DIR}/fastdeploy
|
||||
AAR_DEBUG_PACKAGE=${JNI_LIB_DIR}/build/outputs/aar/fastdeploy-debug.aar
|
||||
APP_DIR=${JAVA_ANDROID_DIR}/app
|
||||
APP_LIBS_DIR=${APP_DIR}/libs
|
||||
|
||||
cd ${JAVA_ANDROID_DIR}
|
||||
# check aar package
|
||||
echo "[INFO] --- JAVA_ANDROID_DIR: ${JAVA_ANDROID_DIR}"
|
||||
if [ ! -d "${JNI_LIB_DIR}/build/outputs/aar" ]; then
|
||||
echo "-- [ERROR] ${JNI_LIB_DIR} not exists, please build aar package first!"
|
||||
exit 0
|
||||
fi
|
||||
ls -lh ${JNI_LIB_DIR}/build/outputs/aar/
|
||||
if [ ! -d "${APP_LIBS_DIR}" ]; then
|
||||
mkdir -p "${APP_LIBS_DIR}" && echo "-- [INFO] Created ${APP_LIBS_DIR} !"
|
||||
fi
|
||||
# update aar package
|
||||
echo "[INFO] --- Update aar package ..."
|
||||
if [ -f "${APP_LIBS_DIR}/fastdeploy-android-sdk-latest-dev.aar" ]; then
|
||||
rm -f "${APP_LIBS_DIR}/fastdeploy-android-sdk-latest-dev.aar"
|
||||
echo "[INFO] --- Removed old aar package: ${APP_LIBS_DIR}/fastdeploy-android-sdk-latest-dev.aar"
|
||||
fi
|
||||
cp ${AAR_DEBUG_PACKAGE} ${APP_LIBS_DIR}/fastdeploy-android-sdk-latest-dev.aar
|
||||
if [ -f "${APP_LIBS_DIR}/fastdeploy-android-sdk-latest-dev.aar" ]; then
|
||||
echo "[INFO] --- Update aar package done!"
|
||||
fi
|
||||
# build android app
|
||||
echo "[INFO] --- Building FastDeploy Android app ..."
|
||||
chmod +x gradlew
|
||||
./gradlew app:assembleDebug
|
||||
echo "[INFO] --- Built FastDeploy Android app."
|
||||
|
||||
# Usage:
|
||||
# ./scripts/android/build_android_app.sh
|
||||
118
scripts/android/build_android_cpp.sh
Executable file
118
scripts/android/build_android_cpp.sh
Executable file
@@ -0,0 +1,118 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
set +x
|
||||
|
||||
# -------------------------------------------------------------------------------
|
||||
# mutable global variables
|
||||
# -------------------------------------------------------------------------------
|
||||
TOOLCHAIN=clang # gcc/clang toolchain
|
||||
|
||||
# -------------------------------------------------------------------------------
|
||||
# readonly global variables
|
||||
# -------------------------------------------------------------------------------
|
||||
readonly ROOT_PATH=$(pwd)
|
||||
readonly ANDROID_ABI=$1
|
||||
readonly ANDROID_PLATFORM="android-$2"
|
||||
readonly BUILD_ROOT=build/Android
|
||||
readonly BUILD_DIR=${BUILD_ROOT}/${ANDROID_ABI}-api-$2
|
||||
|
||||
# -------------------------------------------------------------------------------
|
||||
# tasks
|
||||
# -------------------------------------------------------------------------------
|
||||
__make_build_dir() {
|
||||
if [ ! -d "${BUILD_DIR}" ]; then
|
||||
echo "-- [INFO] BUILD_DIR: ${BUILD_DIR} not exists, setup manually ..."
|
||||
if [ ! -d "${BUILD_ROOT}" ]; then
|
||||
mkdir -p "${BUILD_ROOT}" && echo "-- [INFO] Created ${BUILD_ROOT} !"
|
||||
fi
|
||||
mkdir -p "${BUILD_DIR}" && echo "-- [INFO] Created ${BUILD_DIR} !"
|
||||
else
|
||||
echo "-- [INFO] Found BUILD_DIR: ${BUILD_DIR}"
|
||||
fi
|
||||
}
|
||||
|
||||
__check_cxx_envs() {
|
||||
if [ $LDFLAGS ]; then
|
||||
echo "-- [INFO] Found LDFLAGS: ${LDFLAGS}, \c"
|
||||
echo "unset it before crossing compiling ${ANDROID_ABI}"
|
||||
unset LDFLAGS
|
||||
fi
|
||||
if [ $CPPFLAGS ]; then
|
||||
echo "-- [INFO] Found CPPFLAGS: ${CPPFLAGS}, \c"
|
||||
echo "unset it before crossing compiling ${ANDROID_ABI}"
|
||||
unset CPPFLAGS
|
||||
fi
|
||||
if [ $CPLUS_INCLUDE_PATH ]; then
|
||||
echo "-- [INFO] Found CPLUS_INCLUDE_PATH: ${CPLUS_INCLUDE_PATH}, \c"
|
||||
echo "unset it before crossing compiling ${ANDROID_ABI}"
|
||||
unset CPLUS_INCLUDE_PATH
|
||||
fi
|
||||
if [ $C_INCLUDE_PATH ]; then
|
||||
echo "-- [INFO] Found C_INCLUDE_PATH: ${C_INCLUDE_PATH}, \c"
|
||||
echo "unset it before crossing compiling ${ANDROID_ABI}"
|
||||
unset C_INCLUDE_PATH
|
||||
fi
|
||||
}
|
||||
|
||||
__set_android_ndk() {
|
||||
if [ -z $ANDROID_NDK ]; then
|
||||
echo "-- [INFO] ANDROID_NDK not exists, please setup manually ..."
|
||||
exit 0
|
||||
else
|
||||
echo "-- [INFO] Found ANDROID_NDK: ${ANDROID_NDK}"
|
||||
fi
|
||||
if [ "$ANDROID_NDK" ]; then
|
||||
NDK_VERSION=$(echo $ANDROID_NDK | egrep -o "[0-9]{2}" | head -n 1)
|
||||
if [ "$NDK_VERSION" -gt 17 ]; then
|
||||
TOOLCHAIN=clang
|
||||
fi
|
||||
echo "-- [INFO] Checked ndk version: ${NDK_VERSION}"
|
||||
echo "-- [INFO] Selected toolchain: ${TOOLCHAIN}"
|
||||
fi
|
||||
}
|
||||
|
||||
__build_fastdeploy_android_shared() {
|
||||
|
||||
local ANDROID_STL=c++_shared # c++_static
|
||||
local ANDROID_TOOLCHAIN=${TOOLCHAIN}
|
||||
local TOOLCHAIN_FILE=${ANDROID_NDK}/build/cmake/android.toolchain.cmake
|
||||
local FASDEPLOY_INSTALL_DIR="${ROOT_PATH}/${BUILD_DIR}/install"
|
||||
cd "${BUILD_DIR}" && echo "-- [INFO] Working Dir: ${PWD}"
|
||||
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE=${TOOLCHAIN_FILE} \
|
||||
-DCMAKE_BUILD_TYPE=MinSizeRel \
|
||||
-DANDROID_ABI=${ANDROID_ABI} \
|
||||
-DANDROID_NDK=${ANDROID_NDK} \
|
||||
-DANDROID_PLATFORM=${ANDROID_PLATFORM} \
|
||||
-DANDROID_STL=${ANDROID_STL} \
|
||||
-DANDROID_TOOLCHAIN=${ANDROID_TOOLCHAIN} \
|
||||
-DENABLE_ORT_BACKEND=OFF \
|
||||
-DENABLE_LITE_BACKEND=ON \
|
||||
-DENABLE_PADDLE_FRONTEND=OFF \
|
||||
-DENABLE_FLYCV=ON \
|
||||
-DENABLE_TEXT=ON \
|
||||
-DENABLE_VISION=ON \
|
||||
-DENABLE_VISION_VISUALIZE=ON \
|
||||
-DBUILD_EXAMPLES=ON \
|
||||
-DWITH_OPENCV_STATIC=OFF \
|
||||
-DWITH_LITE_STATIC=OFF \
|
||||
-DWITH_OPENMP=OFF \
|
||||
-DCMAKE_INSTALL_PREFIX=${FASDEPLOY_INSTALL_DIR} \
|
||||
-Wno-dev ../../.. && make -j8 && make install
|
||||
|
||||
echo "-- [INFO][built][${ANDROID_ABI}][${BUILD_DIR}/install]"
|
||||
}
|
||||
|
||||
main() {
|
||||
__make_build_dir
|
||||
__check_cxx_envs
|
||||
__set_android_ndk
|
||||
__build_fastdeploy_android_shared
|
||||
exit 0
|
||||
}
|
||||
|
||||
main
|
||||
|
||||
# Usage:
|
||||
# ./scripts/android/build_android_cpp.sh arm64-v8a 21
|
||||
# ./scripts/android/build_android_cpp.sh armeabi-v7a 21
|
||||
Reference in New Issue
Block a user