diff --git a/CMakeLists.txt b/CMakeLists.txt
index f6e7c8039..549d7b708 100755
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -66,7 +66,7 @@ option(ENABLE_TEXT "Whether to enable text models usage." OFF)
option(ENABLE_FLYCV "Whether to enable flycv to boost image preprocess." OFF)
option(WITH_ASCEND "Whether to compile for Huawei Ascend deploy." OFF)
option(WITH_TIMVX "Whether to compile for TIMVX deploy." OFF)
-option(WITH_XPU "Whether to compile for KunlunXin XPU deploy." OFF)
+option(WITH_KUNLUNXIN "Whether to compile for KunlunXin XPU deploy." OFF)
option(WITH_TESTING "Whether to compile with unittest." OFF)
############################# Options for Android cross compiling #########################
option(WITH_OPENCV_STATIC "Use OpenCV static lib for Android." OFF)
@@ -148,12 +148,12 @@ if (WITH_ASCEND)
include(${PROJECT_SOURCE_DIR}/cmake/ascend.cmake)
endif()
-if (WITH_XPU)
+if (WITH_KUNLUNXIN)
if(NOT ENABLE_LITE_BACKEND)
set(ENABLE_LITE_BACKEND ON)
endif()
if(NOT CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "x86_64")
- message(FATAL_ERROR "XPU is only supported on Linux x64 platform")
+ message(FATAL_ERROR "KunlunXin XPU is only supported on Linux x64 platform")
endif()
if(NOT PADDLELITE_URL)
set(PADDLELITE_URL "https://bj.bcebos.com/fastdeploy/third_libs/lite-linux-x64-xpu-20221215.tgz")
diff --git a/FastDeploy.cmake.in b/FastDeploy.cmake.in
index e148e5121..456a4d321 100755
--- a/FastDeploy.cmake.in
+++ b/FastDeploy.cmake.in
@@ -27,7 +27,7 @@ set(OPENCV_DIRECTORY "@OPENCV_DIRECTORY@")
set(ORT_DIRECTORY "@ORT_DIRECTORY@")
set(OPENVINO_DIRECTORY "@OPENVINO_DIRECTORY@")
set(RKNN2_TARGET_SOC "@RKNN2_TARGET_SOC@")
-set(WITH_XPU @WITH_XPU@)
+set(WITH_KUNLUNXIN @WITH_KUNLUNXIN@)
set(FASTDEPLOY_LIBS "")
set(FASTDEPLOY_INCS "")
@@ -246,7 +246,7 @@ if(ENABLE_PADDLE_FRONTEND)
list(APPEND FASTDEPLOY_LIBS ${PADDLE2ONNX_LIB})
endif()
-if(WITH_XPU)
+if(WITH_KUNLUNXIN)
list(APPEND FASTDEPLOY_LIBS -lpthread -lrt -ldl)
endif()
diff --git a/cmake/flycv.cmake b/cmake/flycv.cmake
index 0836aba54..d1e63d104 100755
--- a/cmake/flycv.cmake
+++ b/cmake/flycv.cmake
@@ -84,6 +84,8 @@ else()
else()
if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64")
set(FLYCV_FILE "flycv-linux-aarch64-${FLYCV_VERSION}.tgz")
+ elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "arm")
+ set(FLYCV_FILE "flycv-linux-armhf-${FLYCV_VERSION}.tgz")
else()
set(FLYCV_FILE "flycv-linux-x64-${FLYCV_VERSION}.tgz")
endif()
diff --git a/cmake/rknpu2.cmake b/cmake/rknpu2.cmake
index e8ed424be..7f11b0bfb 100644
--- a/cmake/rknpu2.cmake
+++ b/cmake/rknpu2.cmake
@@ -10,12 +10,6 @@ download_and_decompress(${RKNPU2_URL} ${CMAKE_CURRENT_BINARY_DIR}/${RKNPU2_FILE}
# set path
set(RKNPU_RUNTIME_PATH ${THIRD_PARTY_PATH}/install/rknpu2_runtime)
-if (${CMAKE_SYSTEM_NAME} STREQUAL "Linux")
-else ()
- message(FATAL_ERROR "[rknpu2.cmake] Only support build rknpu2 in Linux")
-endif ()
-
-
if (EXISTS ${RKNPU_RUNTIME_PATH})
set(RKNN_RT_LIB ${RKNPU_RUNTIME_PATH}/${RKNN2_TARGET_SOC}/lib/librknnrt.so)
include_directories(${RKNPU_RUNTIME_PATH}/${RKNN2_TARGET_SOC}/include)
diff --git a/cmake/summary.cmake b/cmake/summary.cmake
index 92127c880..85c829e04 100755
--- a/cmake/summary.cmake
+++ b/cmake/summary.cmake
@@ -39,7 +39,7 @@ function(fastdeploy_summary)
message(STATUS " ENABLE_OPENVINO_BACKEND : ${ENABLE_OPENVINO_BACKEND}")
message(STATUS " WITH_ASCEND : ${WITH_ASCEND}")
message(STATUS " WITH_TIMVX : ${WITH_TIMVX}")
- message(STATUS " WITH_XPU : ${WITH_XPU}")
+ message(STATUS " WITH_KUNLUNXIN : ${WITH_KUNLUNXIN}")
if(ENABLE_ORT_BACKEND)
message(STATUS " ONNXRuntime version : ${ONNXRUNTIME_VERSION}")
endif()
diff --git a/docs/README.md b/docs/README.md
index 2e06d0762..048d74d09 100755
--- a/docs/README.md
+++ b/docs/README.md
@@ -8,7 +8,7 @@
- [Build and Install FastDeploy Library on GPU Platform](en/build_and_install/gpu.md)
- [Build and Install FastDeploy Library on CPU Platform](en/build_and_install/cpu.md)
- [Build and Install FastDeploy Library on IPU Platform](en/build_and_install/ipu.md)
-- [Build and Install FastDeploy Library on KunlunXin XPU Platform](en/build_and_install/xpu.md)
+- [Build and Install FastDeploy Library on KunlunXin XPU Platform](en/build_and_install/kunlunxin.md)
- [Build and Install on RV1126 Platform](en/build_and_install/rv1126.md)
- [Build and Install on RK3588 and RK356X Platform](en/build_and_install/rknpu2.md)
- [Build and Install on A311D Platform](en/build_and_install/a311d.md)
diff --git a/docs/README_CN.md b/docs/README_CN.md
index 7277a1457..da0c7b8a9 100755
--- a/docs/README_CN.md
+++ b/docs/README_CN.md
@@ -8,7 +8,7 @@
- [GPU部署环境编译安装](cn/build_and_install/gpu.md)
- [CPU部署环境编译安装](cn/build_and_install/cpu.md)
- [IPU部署环境编译安装](cn/build_and_install/ipu.md)
-- [昆仑芯XPU部署环境编译安装](cn/build_and_install/xpu.md)
+- [昆仑芯XPU部署环境编译安装](cn/build_and_install/kunlunxin.md)
- [瑞芯微RV1126部署环境编译安装](cn/build_and_install/rv1126.md)
- [瑞芯微RK3588部署环境编译安装](cn/build_and_install/rknpu2.md)
- [晶晨A311D部署环境编译安装](cn/build_and_install/a311d.md)
diff --git a/docs/README_EN.md b/docs/README_EN.md
index 5c959ba7f..c44c3d5f4 100755
--- a/docs/README_EN.md
+++ b/docs/README_EN.md
@@ -8,7 +8,7 @@
- [Build and Install FastDeploy Library on GPU Platform](en/build_and_install/gpu.md)
- [Build and Install FastDeploy Library on CPU Platform](en/build_and_install/cpu.md)
- [Build and Install FastDeploy Library on IPU Platform](en/build_and_install/ipu.md)
-- [Build and Install FastDeploy Library on KunlunXin XPU Platform](en/build_and_install/xpu.md)
+- [Build and Install FastDeploy Library on KunlunXin XPU Platform](en/build_and_install/kunlunxin.md)
- [Build and Install on RV1126 Platform](en/build_and_install/rv1126.md)
- [Build and Install on RK3588 Platform](en/build_and_install/rknpu2.md)
- [Build and Install on A311D Platform](en/build_and_install/a311d.md)
diff --git a/docs/cn/build_and_install/README.md b/docs/cn/build_and_install/README.md
index caddc5190..0ca92592a 100755
--- a/docs/cn/build_and_install/README.md
+++ b/docs/cn/build_and_install/README.md
@@ -14,7 +14,7 @@
- [瑞芯微RV1126部署环境](rv1126.md)
- [瑞芯微RK3588部署环境](rknpu2.md)
- [晶晨A311D部署环境](a311d.md)
-- [昆仑芯XPU部署环境](xpu.md)
+- [昆仑芯XPU部署环境](kunlunxin.md)
- [华为昇腾部署环境](huawei_ascend.md)
@@ -27,7 +27,7 @@
| ENABLE_LITE_BACKEND | 默认OFF,是否编译集成Paddle Lite后端(编译Android库时需要设置为ON) |
| ENABLE_RKNPU2_BACKEND | 默认OFF,是否编译集成RKNPU2后端(RK3588/RK3568/RK3566上推荐打开) |
| WITH_ASCEND | 默认OFF,当在华为昇腾NPU上部署时, 需要设置为ON |
-| WITH_XPU | 默认OFF,当在昆仑芯XPU上部署时,需设置为ON |
+| WITH_KUNLUNXIN | 默认OFF,当在昆仑芯XPU上部署时,需设置为ON |
| WITH_TIMVX | 默认OFF,需要在RV1126/RV1109/A311D上部署时,需设置为ON |
| ENABLE_TRT_BACKEND | 默认OFF,是否编译集成TensorRT后端(GPU上推荐打开) |
| ENABLE_OPENVINO_BACKEND | 默认OFF,是否编译集成OpenVINO后端(CPU上推荐打开) |
diff --git a/docs/cn/build_and_install/xpu.md b/docs/cn/build_and_install/kunlunxin.md
similarity index 90%
rename from docs/cn/build_and_install/xpu.md
rename to docs/cn/build_and_install/kunlunxin.md
index a53ea4e06..a5e9e9d06 100755
--- a/docs/cn/build_and_install/xpu.md
+++ b/docs/cn/build_and_install/kunlunxin.md
@@ -1,4 +1,4 @@
-[English](../../en/build_and_install/xpu.md) | 简体中文
+[English](../../en/build_and_install/kunlunxin.md) | 简体中文
# 昆仑芯 XPU 部署环境编译安装
@@ -10,7 +10,7 @@ FastDeploy 基于 Paddle Lite 后端支持在昆仑芯 XPU 上进行部署推理
相关编译选项说明如下:
|编译选项|默认值|说明|备注|
|:---|:---|:---|:---|
-| WITH_XPU| OFF | 需要在XPU上部署时需要设置为ON | - |
+| WITH_KUNLUNXIN| OFF | 需要在昆仑芯XPU上部署时需要设置为ON | - |
| ENABLE_ORT_BACKEND | OFF | 是否编译集成ONNX Runtime后端 | - |
| ENABLE_PADDLE_BACKEND | OFF | 是否编译集成Paddle Inference后端 | - |
| ENABLE_OPENVINO_BACKEND | OFF | 是否编译集成OpenVINO后端 | - |
@@ -41,11 +41,11 @@ cd FastDeploy
mkdir build && cd build
# CMake configuration with KunlunXin xpu toolchain
-cmake -DWITH_XPU=ON \
+cmake -DWITH_KUNLUNXIN=ON \
-DWITH_GPU=OFF \ # 不编译 GPU
-DENABLE_ORT_BACKEND=ON \ # 可选择开启 ORT 后端
-DENABLE_PADDLE_BACKEND=ON \ # 可选择开启 Paddle 后端
- -DCMAKE_INSTALL_PREFIX=fastdeploy-xpu \
+ -DCMAKE_INSTALL_PREFIX=fastdeploy-kunlunxin \
-DENABLE_VISION=ON \ # 是否编译集成视觉模型的部署模块,可选择开启
-DOPENCV_DIRECTORY=/usr/lib/x86_64-linux-gnu/cmake/opencv4 \
..
@@ -54,14 +54,14 @@ cmake -DWITH_XPU=ON \
make -j8
make install
```
-编译完成之后,会生成 fastdeploy-xpu 目录,表示基于 Paddle Lite 的 FastDeploy 库编译完成。
+编译完成之后,会生成 fastdeploy-kunlunxin 目录,表示基于 Paddle Lite 的 FastDeploy 库编译完成。
## Python 编译
编译命令如下:
```bash
git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy/python
-export WITH_XPU=ON
+export WITH_KUNLUNXIN=ON
export WITH_GPU=OFF
export ENABLE_ORT_BACKEND=ON
export ENABLE_PADDLE_BACKEND=ON
diff --git a/docs/cn/build_and_install/rv1126.md b/docs/cn/build_and_install/rv1126.md
index 95627045d..159ac00e4 100755
--- a/docs/cn/build_and_install/rv1126.md
+++ b/docs/cn/build_and_install/rv1126.md
@@ -61,6 +61,7 @@ mkdir build && cd build
cmake -DCMAKE_TOOLCHAIN_FILE=./../cmake/toolchain.cmake \
-DWITH_TIMVX=ON \
-DTARGET_ABI=armhf \
+ -DENABLE_FLYCV=ON \ # 是否开启 FlyCV 优化前后处理,可以选择开启
-DCMAKE_INSTALL_PREFIX=fastdeploy-timvx \
-DENABLE_VISION=ON \ # 是否编译集成视觉模型的部署模块,可选择开启
-Wno-dev ..
diff --git a/docs/cn/faq/rknpu2/export.md b/docs/cn/faq/rknpu2/export.md
index f9fff9764..1d6bbb296 100644
--- a/docs/cn/faq/rknpu2/export.md
+++ b/docs/cn/faq/rknpu2/export.md
@@ -4,7 +4,10 @@
## 简介
-Fastdeploy已经简单的集成了onnx->rknn的转换过程。本教程使用tools/export.py文件导出模型,在导出之前需要编写yaml配置文件。
+Fastdeploy已经简单的集成了onnx->rknn的转换过程。
+本教程使用tools/rknpu2/export.py文件导出模型,在导出之前需要编写yaml配置文件。
+
+## 环境要求
在进行转换前请根据[rknn_toolkit2安装文档](./install_rknn_toolkit2.md)检查环境是否已经安装成功。
@@ -14,29 +17,72 @@ Fastdeploy已经简单的集成了onnx->rknn的转换过程。本教程使用too
|-----------------|------------|--------------------|
| verbose | 是,默认值为True | 是否在屏幕上输出转换模型时的具体信息 |
| config_path | 否 | 配置文件路径 |
+| target_platform | 否 | cpu型号 |
## config 配置文件介绍
### config yaml文件模版
```yaml
-model_path: ./portrait_pp_humansegv2_lite_256x144_pretrained.onnx
-output_folder: ./
-target_platform: RK3588
-normalize:
- mean: [[0.5,0.5,0.5]]
- std: [[0.5,0.5,0.5]]
-outputs: None
+mean:
+ -
+ - 128.5
+ - 128.5
+ - 128.5
+std:
+ -
+ - 128.5
+ - 128.5
+ - 128.5
+model_path: "./scrfd_500m_bnkps_shape640x640.onnx"
+outputs_nodes:
+do_quantization: True
+dataset: "./datasets.txt"
+output_folder: "./"
```
### config 配置参数介绍
-* model_path: 模型储存路径
-* output_folder: 模型储存文件夹名字
-* target_platform: 模型跑在哪一个设备上,只能为RK3588或RK3568
-* normalize: 配置在NPU上的normalize操作,有std和mean两个参数
- * std: 如果在外部做normalize操作,请配置为[1/255,1/255,1/255]
- * mean: 如果在外部做normalize操作,请配置为[0,0,0]
-* outputs: 输出节点列表,如果使用默认输出节点,请配置为None
+#### model_path
+代表需要转换为RKNN的ONNX格式的模型路径
+```yaml
+model_path: "./scrfd_500m_bnkps_shape640x640.onnx"
+```
+#### output_folder
+代表最后储存RKNN模型文件的文件夹路径
+```yaml
+output_folder: "./"
+```
+
+#### std 和 mean
+如果需要在NPU上进行normalize操作需要配置此参数,并且请自行将参数乘以255,例如你的normalize中mean参数为[0.5,0.5,0.5]时,
+配置文件中的mean应该配置为[128.5,128.5,128.5]。 请自行将[128.5,128.5,128.5]换成yaml格式,如下:
+```yaml
+mean:
+ -
+ - 128.5
+ - 128.5
+ - 128.5
+std:
+ -
+ - 128.5
+ - 128.5
+ - 128.5
+```
+当然如果在外部进行normalize和permute操作,则无需配置这两个参数。
+
+#### outputs_nodes
+输出节点的名字。当整个模型导出时,无语配置改参数。
+```yaml
+outputs_nodes:
+```
+
+#### do_quantization 和 dataset
+do_quantization代表是否进行静态量化。dataset表示进行静态量化时的图片数据集目录。
+这两个参数配套使用,当do_quantization生效时,dataset才生效。
+```yaml
+do_quantization: True
+dataset: "./datasets.txt"
+```
## 如何转换模型
根目录下执行以下代码
@@ -47,4 +93,4 @@ python tools/export.py --config_path=./config.yaml
## 模型导出要注意的事项
-* 请不要导出带softmax和argmax的模型,这两个算子存在bug,请在外部进行运算
+* 不建议导出softmax以及argmax算子
diff --git a/docs/en/build_and_install/README.md b/docs/en/build_and_install/README.md
index eff85c0f4..21e954034 100755
--- a/docs/en/build_and_install/README.md
+++ b/docs/en/build_and_install/README.md
@@ -15,7 +15,7 @@ English | [中文](../../cn/build_and_install/README.md)
- [Build and Install on RV1126 Platform](rv1126.md)
- [Build and Install on RK3588 Platform](rknpu2.md)
- [Build and Install on A311D Platform](a311d.md)
-- [Build and Install on KunlunXin XPU Platform](xpu.md)
+- [Build and Install on KunlunXin XPU Platform](kunlunxin.md)
## Build options
@@ -29,7 +29,7 @@ English | [中文](../../cn/build_and_install/README.md)
| ENABLE_VISION | Default OFF,whether to enable vision models deployment module |
| ENABLE_TEXT | Default OFF,whether to enable text models deployment module |
| WITH_GPU | Default OFF, if build on GPU, this needs to be ON |
-| WITH_XPU | Default OFF,if deploy on KunlunXin XPU,this needs to be ON |
+| WITH_KUNLUNXIN | Default OFF,if deploy on KunlunXin XPU,this needs to be ON |
| WITH_TIMVX | Default OFF,if deploy on RV1126/RV1109/A311D,this needs to be ON |
| WITH_ASCEND | Default OFF,if deploy on Huawei Ascend,this needs to be ON |
| CUDA_DIRECTORY | Default /usr/local/cuda, if build on GPU, this defines the path of CUDA(>=11.2) |
diff --git a/docs/en/build_and_install/xpu.md b/docs/en/build_and_install/kunlunxin.md
similarity index 89%
rename from docs/en/build_and_install/xpu.md
rename to docs/en/build_and_install/kunlunxin.md
index 254e64155..843a0464f 100755
--- a/docs/en/build_and_install/xpu.md
+++ b/docs/en/build_and_install/kunlunxin.md
@@ -1,4 +1,4 @@
-English | [中文](../../cn/build_and_install/xpu.md)
+English | [中文](../../cn/build_and_install/kunlunxin.md)
# How to Build KunlunXin XPU Deployment Environment
@@ -10,7 +10,7 @@ The relevant compilation options are described as follows:
|Compile Options|Default Values|Description|Remarks|
|:---|:---|:---|:---|
| ENABLE_LITE_BACKEND | OFF | It needs to be set to ON when compiling the RK library| - |
-| WITH_XPU | OFF | It needs to be set to ON when compiling the KunlunXin XPU library| - |
+| WITH_KUNLUNXIN | OFF | It needs to be set to ON when compiling the KunlunXin XPU library| - |
| ENABLE_ORT_BACKEND | OFF | whether to intergrate ONNX Runtime backend | - |
| ENABLE_PADDLE_BACKEND | OFF | whether to intergrate Paddle Inference backend | - |
| ENABLE_OPENVINO_BACKEND | OFF | whether to intergrate OpenVINO backend | - |
@@ -44,11 +44,11 @@ cd FastDeploy
mkdir build && cd build
# CMake configuration with KunlunXin xpu toolchain
-cmake -DWITH_XPU=ON \
+cmake -DWITH_KUNLUNXIN=ON \
-DWITH_GPU=OFF \
-DENABLE_ORT_BACKEND=ON \
-DENABLE_PADDLE_BACKEND=ON \
- -DCMAKE_INSTALL_PREFIX=fastdeploy-xpu \
+ -DCMAKE_INSTALL_PREFIX=fastdeploy-kunlunxin \
-DENABLE_VISION=ON \
-DOPENCV_DIRECTORY=/usr/lib/x86_64-linux-gnu/cmake/opencv4 \
..
@@ -57,14 +57,14 @@ cmake -DWITH_XPU=ON \
make -j8
make install
```
-After the compilation is complete, the fastdeploy-xpu directory will be generated, indicating that the Padddle Lite based FastDeploy library has been compiled.
+After the compilation is complete, the fastdeploy-kunlunxin directory will be generated, indicating that the Padddle Lite based FastDeploy library has been compiled.
## Python compile
The compilation command is as follows:
```bash
git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy/python
-export WITH_XPU=ON
+export WITH_KUNLUNXIN=ON
export WITH_GPU=OFF
export ENABLE_ORT_BACKEND=ON
export ENABLE_PADDLE_BACKEND=ON
diff --git a/docs/en/build_and_install/rv1126.md b/docs/en/build_and_install/rv1126.md
index 91f4edf79..de9c563ab 100755
--- a/docs/en/build_and_install/rv1126.md
+++ b/docs/en/build_and_install/rv1126.md
@@ -60,6 +60,7 @@ mkdir build && cd build
cmake -DCMAKE_TOOLCHAIN_FILE=./../cmake/toolchain.cmake \
-DWITH_TIMVX=ON \
-DTARGET_ABI=armhf \
+ -DENABLE_FLYCV=ON \ # Whether to enable FlyCV optimization
-DCMAKE_INSTALL_PREFIX=fastdeploy-timvx \
-DENABLE_VISION=ON \ # Whether to compile the vision module
-Wno-dev ..
diff --git a/examples/multimodal/stable_diffusion/README.md b/examples/multimodal/stable_diffusion/README.md
index 39b88a9f6..0b5bbcd09 100755
--- a/examples/multimodal/stable_diffusion/README.md
+++ b/examples/multimodal/stable_diffusion/README.md
@@ -41,7 +41,7 @@ python infer.py --model_dir stable-diffusion-v1-4/ --scheduler "pndm" --backend
python infer.py --model_dir stable-diffusion-v1-5/ --scheduler "euler_ancestral" --backend paddle
# 在昆仑芯XPU上推理
-python infer.py --model_dir stable-diffusion-v1-5/ --scheduler "euler_ancestral" --backend paddle-xpu
+python infer.py --model_dir stable-diffusion-v1-5/ --scheduler "euler_ancestral" --backend paddle-kunlunxin
```
#### 参数说明
@@ -52,7 +52,7 @@ python infer.py --model_dir stable-diffusion-v1-5/ --scheduler "euler_ancestral"
|----------|--------------|
| --model_dir | 导出后模型的目录。 |
| --model_format | 模型格式。默认为`'paddle'`,可选列表:`['paddle', 'onnx']`。 |
-| --backend | 推理引擎后端。默认为`paddle`,可选列表:`['onnx_runtime', 'paddle', 'paddle-xpu']`,当模型格式为`onnx`时,可选列表为`['onnx_runtime']`。 |
+| --backend | 推理引擎后端。默认为`paddle`,可选列表:`['onnx_runtime', 'paddle', 'paddle-kunlunxin']`,当模型格式为`onnx`时,可选列表为`['onnx_runtime']`。 |
| --scheduler | StableDiffusion 模型的scheduler。默认为`'pndm'`。可选列表:`['pndm', 'euler_ancestral']`,StableDiffusio模型对应的scheduler可参考[ppdiffuser模型列表](https://github.com/PaddlePaddle/PaddleNLP/tree/develop/ppdiffusers/examples/textual_inversion)。|
| --unet_model_prefix | UNet模型前缀。默认为`unet`。 |
| --vae_model_prefix | VAE模型前缀。默认为`vae_decoder`。 |
diff --git a/examples/multimodal/stable_diffusion/infer.py b/examples/multimodal/stable_diffusion/infer.py
index 91954df6f..a22b569c1 100755
--- a/examples/multimodal/stable_diffusion/infer.py
+++ b/examples/multimodal/stable_diffusion/infer.py
@@ -69,7 +69,7 @@ def parse_arguments():
type=str,
default='paddle',
# Note(zhoushunjie): Will support 'tensorrt', 'paddle-tensorrt' soon.
- choices=['onnx_runtime', 'paddle', 'paddle-xpu'],
+ choices=['onnx_runtime', 'paddle', 'paddle-kunlunxin'],
help="The inference runtime backend of unet model and text encoder model."
)
parser.add_argument(
@@ -175,9 +175,9 @@ def create_trt_runtime(model_dir,
return fd.Runtime(option)
-def create_xpu_runtime(model_dir, model_prefix, device_id=0):
+def create_kunlunxin_runtime(model_dir, model_prefix, device_id=0):
option = fd.RuntimeOption()
- option.use_xpu(
+ option.use_kunlunxin(
device_id,
l3_workspace_size=(64 * 1024 * 1024 - 4 * 1024),
locked=False,
@@ -306,18 +306,18 @@ if __name__ == "__main__":
dynamic_shape=unet_dynamic_shape,
device_id=args.device_id)
print(f"Spend {time.time() - start : .2f} s to load unet model.")
- elif args.backend == "paddle-xpu":
+ elif args.backend == "paddle-kunlunxin":
print("=== build text_encoder_runtime")
- text_encoder_runtime = create_xpu_runtime(
+ text_encoder_runtime = create_kunlunxin_runtime(
args.model_dir,
args.text_encoder_model_prefix,
device_id=args.device_id)
print("=== build vae_decoder_runtime")
- vae_decoder_runtime = create_xpu_runtime(
+ vae_decoder_runtime = create_kunlunxin_runtime(
args.model_dir, args.vae_model_prefix, device_id=args.device_id)
print("=== build unet_runtime")
start = time.time()
- unet_runtime = create_xpu_runtime(
+ unet_runtime = create_kunlunxin_runtime(
args.model_dir, args.unet_model_prefix, device_id=args.device_id)
print(f"Spend {time.time() - start : .2f} s to load unet model.")
pipe = StableDiffusionFastDeployPipeline(
diff --git a/examples/text/ernie-3.0/cpp/README.md b/examples/text/ernie-3.0/cpp/README.md
index 053697799..c5527907c 100755
--- a/examples/text/ernie-3.0/cpp/README.md
+++ b/examples/text/ernie-3.0/cpp/README.md
@@ -35,8 +35,8 @@ tar xvfz ernie-3.0-medium-zh-afqmc.tgz
# GPU Inference
./seq_cls_infer_demo --device gpu --model_dir ernie-3.0-medium-zh-afqmc
-# XPU 推理
-./seq_cls_infer_demo --device xpu --model_dir ernie-3.0-medium-zh-afqmc
+# KunlunXin XPU 推理
+./seq_cls_infer_demo --device kunlunxin --model_dir ernie-3.0-medium-zh-afqmc
```
The result returned after running is as follows:
```bash
diff --git a/examples/text/ernie-3.0/cpp/seq_cls_infer.cc b/examples/text/ernie-3.0/cpp/seq_cls_infer.cc
index 71ded2a1e..6ec738b07 100755
--- a/examples/text/ernie-3.0/cpp/seq_cls_infer.cc
+++ b/examples/text/ernie-3.0/cpp/seq_cls_infer.cc
@@ -32,7 +32,7 @@ const char sep = '/';
DEFINE_string(model_dir, "", "Directory of the inference model.");
DEFINE_string(vocab_path, "", "Path of the vocab file.");
DEFINE_string(device, "cpu",
- "Type of inference device, support 'cpu', 'xpu' or 'gpu'.");
+ "Type of inference device, support 'cpu', 'kunlunxin' or 'gpu'.");
DEFINE_string(backend, "onnx_runtime",
"The inference runtime backend, support: ['onnx_runtime', "
"'paddle', 'openvino', 'tensorrt', 'paddle_tensorrt']");
@@ -61,8 +61,8 @@ bool CreateRuntimeOption(fastdeploy::RuntimeOption* option) {
<< ", param_path = " << param_path << std::endl;
option->SetModelPath(model_path, param_path);
- if (FLAGS_device == "xpu") {
- option->UseXpu();
+ if (FLAGS_device == "kunlunxin") {
+ option->UseKunlunXin();
return true;
} else if (FLAGS_device == "gpu") {
option->UseGpu();
diff --git a/examples/text/ernie-3.0/python/README.md b/examples/text/ernie-3.0/python/README.md
index f1424aff7..9fb641478 100755
--- a/examples/text/ernie-3.0/python/README.md
+++ b/examples/text/ernie-3.0/python/README.md
@@ -40,8 +40,8 @@ python seq_cls_infer.py --device cpu --model_dir ernie-3.0-medium-zh-afqmc
# GPU Inference
python seq_cls_infer.py --device gpu --model_dir ernie-3.0-medium-zh-afqmc
-# XPU Inference
-python seq_cls_infer.py --device xpu --model_dir ernie-3.0-medium-zh-afqmc
+# KunlunXin XPU Inference
+python seq_cls_infer.py --device kunlunxin --model_dir ernie-3.0-medium-zh-afqmc
```
The result returned after running is as follows:
diff --git a/examples/text/ernie-3.0/python/seq_cls_infer.py b/examples/text/ernie-3.0/python/seq_cls_infer.py
index 67cde26ae..87c2a6308 100755
--- a/examples/text/ernie-3.0/python/seq_cls_infer.py
+++ b/examples/text/ernie-3.0/python/seq_cls_infer.py
@@ -35,8 +35,8 @@ def parse_arguments():
"--device",
type=str,
default='cpu',
- choices=['gpu', 'cpu', 'xpu'],
- help="Type of inference device, support 'cpu', 'xpu' or 'gpu'.")
+ choices=['gpu', 'cpu', 'kunlunxin'],
+ help="Type of inference device, support 'cpu', 'kunlunxin' or 'gpu'.")
parser.add_argument(
"--backend",
type=str,
@@ -94,8 +94,8 @@ class ErnieForSequenceClassificationPredictor(object):
model_path = os.path.join(args.model_dir, "infer.pdmodel")
params_path = os.path.join(args.model_dir, "infer.pdiparams")
option.set_model_path(model_path, params_path)
- if args.device == 'xpu':
- option.use_xpu()
+ if args.device == 'kunlunxin':
+ option.use_kunlunxin()
option.use_paddle_lite_backend()
return fd.Runtime(option)
if args.device == 'cpu':
diff --git a/examples/text/ernie-3.0/serving/README.md b/examples/text/ernie-3.0/serving/README.md
index 7b6bf7805..15fa1ba64 100644
--- a/examples/text/ernie-3.0/serving/README.md
+++ b/examples/text/ernie-3.0/serving/README.md
@@ -51,16 +51,16 @@ models
```bash
# x.y.z represent image versions. Please refer to the serving document to replace them with numbers
# GPU Image
-docker pull paddlepaddle/fastdeploy:x.y.z-gpu-cuda11.4-trt8.4-21.10
+docker pull registry.baidubce.com/paddlepaddle/fastdeploy:x.y.z-gpu-cuda11.4-trt8.4-21.10
# CPU Image
-docker pull paddlepaddle/fastdeploy:x.y.z-cpu-only-21.10
+docker pull registry.baidubce.com/paddlepaddle/fastdeploy:x.y.z-cpu-only-21.10
# Running
-docker run -it --net=host --name fastdeploy_server --shm-size="1g" -v /path/serving/models:/models paddlepaddle/fastdeploy:x.y.z-cpu-only-21.10 bash
+docker run -it --net=host --name fastdeploy_server --shm-size="1g" -v /path/serving/models:/models registry.baidubce.com/paddlepaddle/fastdeploy:x.y.z-cpu-only-21.10 bash
```
## Deployment Models
-The serving directory contains the configuration to start the pipeline service and the code to send the prediction request, including
+The serving directory contains the configuration to start the pipeline service and the code to send the prediction request, including
```
models # 服务化启动需要的模型仓库,包含模型和服务配置文件
@@ -70,7 +70,7 @@ token_cls_rpc_client.py # 序列标注任务发送pipeline预测请求的脚
*Attention*:Attention: When starting the service, each python backend process of Server requests 64M memory by default, and the docker started by default cannot start more than one python backend node. There are two solutions:
-- 1.Set the `shm-size` parameter when starting the container, for example, `docker run -it --net=host --name fastdeploy_server --shm-size="1g" -v /path/serving/models:/models paddlepaddle/fastdeploy:x.y.z-gpu-cuda11.4-trt8.4-21.10 bash`
+- 1.Set the `shm-size` parameter when starting the container, for example, `docker run -it --net=host --name fastdeploy_server --shm-size="1g" -v /path/serving/models:/models registry.baidubce.com/paddlepaddle/fastdeploy:x.y.z-gpu-cuda11.4-trt8.4-21.10 bash`
- 2.Set the `shm-default-byte-size` parameter of python backend when starting the service. Set the default memory of python backend to 10M: `tritonserver --model-repository=/models --backend-config=python,shm-default-byte-size=10485760`
### Classification Task
diff --git a/examples/text/ernie-3.0/serving/README_CN.md b/examples/text/ernie-3.0/serving/README_CN.md
index 97bd9d905..8de633bfb 100644
--- a/examples/text/ernie-3.0/serving/README_CN.md
+++ b/examples/text/ernie-3.0/serving/README_CN.md
@@ -51,12 +51,12 @@ models
```bash
# x.y.z为镜像版本号,需参照serving文档替换为数字
# GPU镜像
-docker pull paddlepaddle/fastdeploy:x.y.z-gpu-cuda11.4-trt8.4-21.10
+docker pull registry.baidubce.com/paddlepaddle/fastdeploy:x.y.z-gpu-cuda11.4-trt8.4-21.10
# CPU镜像
-docker pull paddlepaddle/fastdeploy:x.y.z-cpu-only-21.10
+docker pull registry.baidubce.com/paddlepaddle/fastdeploy:x.y.z-cpu-only-21.10
# 运行
-docker run -it --net=host --name fastdeploy_server --shm-size="1g" -v /path/serving/models:/models paddlepaddle/fastdeploy:x.y.z-cpu-only-21.10 bash
+docker run -it --net=host --name fastdeploy_server --shm-size="1g" -v /path/serving/models:/models registry.baidubce.com/paddlepaddle/fastdeploy:x.y.z-cpu-only-21.10 bash
```
## 部署模型
@@ -69,7 +69,7 @@ token_cls_rpc_client.py # 序列标注任务发送pipeline预测请求的脚
```
*注意*:启动服务时,Server的每个python后端进程默认申请`64M`内存,默认启动的docker无法启动多个python后端节点。有两个解决方案:
-- 1.启动容器时设置`shm-size`参数, 比如:`docker run -it --net=host --name fastdeploy_server --shm-size="1g" -v /path/serving/models:/models paddlepaddle/fastdeploy:x.y.z-gpu-cuda11.4-trt8.4-21.10 bash`
+- 1.启动容器时设置`shm-size`参数, 比如:`docker run -it --net=host --name fastdeploy_server --shm-size="1g" -v /path/serving/models:/models registry.baidubce.com/paddlepaddle/fastdeploy:x.y.z-gpu-cuda11.4-trt8.4-21.10 bash`
- 2.启动服务时设置python后端的`shm-default-byte-size`参数, 设置python后端的默认内存为10M: `tritonserver --model-repository=/models --backend-config=python,shm-default-byte-size=10485760`
### 分类任务
diff --git a/examples/vision/classification/paddleclas/cpp/infer.cc b/examples/vision/classification/paddleclas/cpp/infer.cc
index e5326b16e..90c5557c2 100755
--- a/examples/vision/classification/paddleclas/cpp/infer.cc
+++ b/examples/vision/classification/paddleclas/cpp/infer.cc
@@ -96,13 +96,13 @@ void IpuInfer(const std::string& model_dir, const std::string& image_file) {
std::cout << res.Str() << std::endl;
}
-void XpuInfer(const std::string& model_dir, const std::string& image_file) {
+void KunlunXinInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "inference.pdmodel";
auto params_file = model_dir + sep + "inference.pdiparams";
auto config_file = model_dir + sep + "inference_cls.yaml";
auto option = fastdeploy::RuntimeOption();
- option.UseXpu();
+ option.UseKunlunXin();
auto model = fastdeploy::vision::classification::PaddleClasModel(
model_file, params_file, config_file, option);
if (!model.Initialized()) {
@@ -179,7 +179,7 @@ int main(int argc, char* argv[]) {
"e.g ./infer_demo ./ResNet50_vd ./test.jpeg 0"
<< std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
- "with gpu; 2: run with gpu and use tensorrt backend; 3: run with ipu; 4: run with xpu."
+ "with gpu; 2: run with gpu and use tensorrt backend; 3: run with ipu; 4: run with kunlunxin."
<< std::endl;
return -1;
}
@@ -193,7 +193,7 @@ int main(int argc, char* argv[]) {
} else if (std::atoi(argv[3]) == 3) {
IpuInfer(argv[1], argv[2]);
} else if (std::atoi(argv[3]) == 4) {
- XpuInfer(argv[1], argv[2]);
+ KunlunXinInfer(argv[1], argv[2]);
} else if (std::atoi(argv[3]) == 5) {
AscendInfer(argv[1], argv[2]);
}
diff --git a/examples/vision/classification/paddleclas/python/README.md b/examples/vision/classification/paddleclas/python/README.md
index 9b235b79b..14de9ff38 100755
--- a/examples/vision/classification/paddleclas/python/README.md
+++ b/examples/vision/classification/paddleclas/python/README.md
@@ -25,8 +25,8 @@ python infer.py --model ResNet50_vd_infer --image ILSVRC2012_val_00000010.jpeg -
python infer.py --model ResNet50_vd_infer --image ILSVRC2012_val_00000010.jpeg --device gpu --use_trt True --topk 1
# IPU推理(注意:IPU推理首次运行会有序列化模型的操作,有一定耗时,需要耐心等待)
python infer.py --model ResNet50_vd_infer --image ILSVRC2012_val_00000010.jpeg --device ipu --topk 1
-# XPU推理
-python infer.py --model ResNet50_vd_infer --image ILSVRC2012_val_00000010.jpeg --device xpu --topk 1
+# 昆仑芯XPU推理
+python infer.py --model ResNet50_vd_infer --image ILSVRC2012_val_00000010.jpeg --device kunlunxin --topk 1
# 华为昇腾NPU推理
python infer.py --model ResNet50_vd_infer --image ILSVRC2012_val_00000010.jpeg --device ascend --topk 1
```
diff --git a/examples/vision/classification/paddleclas/python/infer.py b/examples/vision/classification/paddleclas/python/infer.py
index b9ef0c82d..3cac182e9 100755
--- a/examples/vision/classification/paddleclas/python/infer.py
+++ b/examples/vision/classification/paddleclas/python/infer.py
@@ -17,7 +17,7 @@ def parse_arguments():
"--device",
type=str,
default='cpu',
- help="Type of inference device, support 'cpu' or 'gpu' or 'ipu' or 'xpu' or 'ascend' ."
+ help="Type of inference device, support 'cpu' or 'gpu' or 'ipu' or 'kunlunxin' or 'ascend' ."
)
parser.add_argument(
"--use_trt",
@@ -36,8 +36,8 @@ def build_option(args):
if args.device.lower() == "ipu":
option.use_ipu()
- if args.device.lower() == "xpu":
- option.use_xpu()
+ if args.device.lower() == "kunlunxin":
+ option.use_kunlunxin()
if args.device.lower() == "ascend":
option.use_ascend()
diff --git a/examples/vision/classification/paddleclas/rv1126/cpp/CMakeLists.txt b/examples/vision/classification/paddleclas/rv1126/cpp/CMakeLists.txt
index baaf8331f..64b7a6466 100755
--- a/examples/vision/classification/paddleclas/rv1126/cpp/CMakeLists.txt
+++ b/examples/vision/classification/paddleclas/rv1126/cpp/CMakeLists.txt
@@ -20,19 +20,11 @@ install(TARGETS infer_demo DESTINATION ./)
install(DIRECTORY models DESTINATION ./)
install(DIRECTORY images DESTINATION ./)
-# install(DIRECTORY run_with_adb.sh DESTINATION ./)
-file(GLOB FASTDEPLOY_LIBS ${FASTDEPLOY_INSTALL_DIR}/lib/*)
-install(PROGRAMS ${FASTDEPLOY_LIBS} DESTINATION lib)
-
-file(GLOB OPENCV_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/opencv/lib/lib*)
-install(PROGRAMS ${OPENCV_LIBS} DESTINATION lib)
-
-file(GLOB PADDLELITE_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/paddlelite/lib/lib*)
-install(PROGRAMS ${PADDLELITE_LIBS} DESTINATION lib)
-
-file(GLOB TIMVX_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/paddlelite/lib/verisilicon_timvx/*)
-install(PROGRAMS ${TIMVX_LIBS} DESTINATION lib)
+file(GLOB_RECURSE FASTDEPLOY_LIBS ${FASTDEPLOY_INSTALL_DIR}/lib/lib*.so*)
+file(GLOB_RECURSE ALL_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/lib*.so*)
+list(APPEND ALL_LIBS ${FASTDEPLOY_LIBS})
+install(PROGRAMS ${ALL_LIBS} DESTINATION lib)
file(GLOB ADB_TOOLS run_with_adb.sh)
install(PROGRAMS ${ADB_TOOLS} DESTINATION ./)
diff --git a/examples/vision/classification/paddleclas/rv1126/cpp/infer.cc b/examples/vision/classification/paddleclas/rv1126/cpp/infer.cc
index 140311eec..b1b6f435b 100755
--- a/examples/vision/classification/paddleclas/rv1126/cpp/infer.cc
+++ b/examples/vision/classification/paddleclas/rv1126/cpp/infer.cc
@@ -23,7 +23,7 @@ void InitAndInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "inference.pdmodel";
auto params_file = model_dir + sep + "inference.pdiparams";
auto config_file = model_dir + sep + "inference_cls.yaml";
-
+ fastdeploy::vision::EnableFlyCV();
fastdeploy::RuntimeOption option;
option.UseTimVX();
diff --git a/examples/vision/classification/paddleclas/serving/README.md b/examples/vision/classification/paddleclas/serving/README.md
index 0b4771717..abfe6529b 100644
--- a/examples/vision/classification/paddleclas/serving/README.md
+++ b/examples/vision/classification/paddleclas/serving/README.md
@@ -26,12 +26,12 @@ mv ResNet50_vd_infer/inference.pdiparams models/runtime/1/model.pdiparams
# 拉取fastdeploy镜像(x.y.z为镜像版本号,需参照serving文档替换为数字)
# GPU镜像
-docker pull paddlepaddle/fastdeploy:x.y.z-gpu-cuda11.4-trt8.4-21.10
+docker pull registry.baidubce.com/paddlepaddle/fastdeploy:x.y.z-gpu-cuda11.4-trt8.4-21.10
# CPU镜像
-docker pull paddlepaddle/fastdeploy:x.y.z-cpu-only-21.10
+docker pull registry.baidubce.com/paddlepaddle/fastdeploy:x.y.z-cpu-only-21.10
# 运行容器.容器名字为 fd_serving, 并挂载当前目录为容器的 /serving 目录
-nvidia-docker run -it --net=host --name fd_serving -v `pwd`/:/serving paddlepaddle/fastdeploy:x.y.z-gpu-cuda11.4-trt8.4-21.10 bash
+nvidia-docker run -it --net=host --name fd_serving -v `pwd`/:/serving registry.baidubce.com/paddlepaddle/fastdeploy:x.y.z-gpu-cuda11.4-trt8.4-21.10 bash
# 启动服务(不设置CUDA_VISIBLE_DEVICES环境变量,会拥有所有GPU卡的调度权限)
CUDA_VISIBLE_DEVICES=0 fastdeployserver --model-repository=/serving/models --backend-config=python,shm-default-byte-size=10485760
diff --git a/examples/vision/detection/fastestdet/cpp/CMakeLists.txt b/examples/vision/detection/fastestdet/cpp/CMakeLists.txt
new file mode 100644
index 000000000..9ba668762
--- /dev/null
+++ b/examples/vision/detection/fastestdet/cpp/CMakeLists.txt
@@ -0,0 +1,14 @@
+PROJECT(infer_demo C CXX)
+CMAKE_MINIMUM_REQUIRED (VERSION 3.10)
+
+# Specifies the path to the fastdeploy library after you have downloaded it
+option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.")
+
+include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
+
+# Include the FastDeploy dependency header file
+include_directories(${FASTDEPLOY_INCS})
+
+add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cc)
+# Add the FastDeploy library dependency
+target_link_libraries(infer_demo ${FASTDEPLOY_LIBS})
diff --git a/examples/vision/detection/fastestdet/cpp/README.md b/examples/vision/detection/fastestdet/cpp/README.md
new file mode 100644
index 000000000..bf2d01394
--- /dev/null
+++ b/examples/vision/detection/fastestdet/cpp/README.md
@@ -0,0 +1,87 @@
+# FastestDet C++部署示例
+
+本目录下提供`infer.cc`快速完成FastestDet在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。
+
+在部署前,需确认以下两个步骤
+
+- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
+- 2. 根据开发环境,下载预编译部署库和samples代码,参考[FastDeploy预编译库](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
+
+以Linux上CPU推理为例,在本目录执行如下命令即可完成编译测试
+
+```bash
+mkdir build
+cd build
+wget https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-1.0.3.tgz
+tar xvf fastdeploy-linux-x64-1.0.3.tgz
+cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-1.0.3
+make -j
+
+#下载官方转换好的FastestDet模型文件和测试图片
+wget https://bj.bcebos.com/paddlehub/fastdeploy/FastestDet.onnx
+wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
+
+
+# CPU推理
+./infer_demo FastestDet.onnx 000000014439.jpg 0
+# GPU推理
+./infer_demo FastestDet.onnx 000000014439.jpg 1
+# GPU上TensorRT推理
+./infer_demo FastestDet.onnx 000000014439.jpg 2
+```
+
+运行完成可视化结果如下图所示
+
+
+
+以上命令只适用于Linux或MacOS, Windows下SDK的使用方式请参考:
+- [如何在Windows中使用FastDeploy C++ SDK](../../../../../docs/cn/faq/use_sdk_on_windows.md)
+
+## FastestDet C++接口
+
+### FastestDet类
+
+```c++
+fastdeploy::vision::detection::FastestDet(
+ const string& model_file,
+ const string& params_file = "",
+ const RuntimeOption& runtime_option = RuntimeOption(),
+ const ModelFormat& model_format = ModelFormat::ONNX)
+```
+
+FastestDet模型加载和初始化,其中model_file为导出的ONNX模型格式。
+
+**参数**
+
+> * **model_file**(str): 模型文件路径
+> * **params_file**(str): 参数文件路径,当模型格式为ONNX时,此参数传入空字符串即可
+> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置
+> * **model_format**(ModelFormat): 模型格式,默认为ONNX格式
+
+#### Predict函数
+
+> ```c++
+> FastestDet::Predict(cv::Mat* im, DetectionResult* result,
+> float conf_threshold = 0.65,
+> float nms_iou_threshold = 0.45)
+> ```
+>
+> 模型预测接口,输入图像直接输出检测结果。
+>
+> **参数**
+>
+> > * **im**: 输入图像,注意需为HWC,BGR格式
+> > * **result**: 检测结果,包括检测框,各个框的置信度, DetectionResult说明参考[视觉模型预测结果](../../../../../docs/api/vision_results/)
+> > * **conf_threshold**: 检测框置信度过滤阈值
+> > * **nms_iou_threshold**: NMS处理过程中iou阈值
+
+### 类成员变量
+#### 预处理参数
+用户可按照自己的实际需求,修改下列预处理参数,从而影响最终的推理和部署效果
+
+> > * **size**(vector<int>): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[352, 352]
+
+- [模型介绍](../../)
+- [Python部署](../python)
+- [视觉模型预测结果](../../../../../docs/api/vision_results/)
+- [如何切换模型推理后端引擎](../../../../../docs/cn/faq/how_to_change_backend.md)
diff --git a/examples/vision/detection/fastestdet/cpp/infer.cc b/examples/vision/detection/fastestdet/cpp/infer.cc
new file mode 100644
index 000000000..71dd862a2
--- /dev/null
+++ b/examples/vision/detection/fastestdet/cpp/infer.cc
@@ -0,0 +1,105 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "fastdeploy/vision.h"
+
+void CpuInfer(const std::string& model_file, const std::string& image_file) {
+ auto model = fastdeploy::vision::detection::FastestDet(model_file);
+ if (!model.Initialized()) {
+ std::cerr << "Failed to initialize." << std::endl;
+ return;
+ }
+
+ auto im = cv::imread(image_file);
+
+ fastdeploy::vision::DetectionResult res;
+ if (!model.Predict(im, &res)) {
+ std::cerr << "Failed to predict." << std::endl;
+ return;
+ }
+ std::cout << res.Str() << std::endl;
+
+ auto vis_im = fastdeploy::vision::VisDetection(im, res);
+ cv::imwrite("vis_result.jpg", vis_im);
+ std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
+}
+
+void GpuInfer(const std::string& model_file, const std::string& image_file) {
+ auto option = fastdeploy::RuntimeOption();
+ option.UseGpu();
+ auto model = fastdeploy::vision::detection::FastestDet(model_file, "", option);
+ if (!model.Initialized()) {
+ std::cerr << "Failed to initialize." << std::endl;
+ return;
+ }
+
+ auto im = cv::imread(image_file);
+
+ fastdeploy::vision::DetectionResult res;
+ if (!model.Predict(im, &res)) {
+ std::cerr << "Failed to predict." << std::endl;
+ return;
+ }
+ std::cout << res.Str() << std::endl;
+
+ auto vis_im = fastdeploy::vision::VisDetection(im, res);
+ cv::imwrite("vis_result.jpg", vis_im);
+ std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
+}
+
+void TrtInfer(const std::string& model_file, const std::string& image_file) {
+ auto option = fastdeploy::RuntimeOption();
+ option.UseGpu();
+ option.UseTrtBackend();
+ option.SetTrtInputShape("images", {1, 3, 352, 352});
+ auto model = fastdeploy::vision::detection::FastestDet(model_file, "", option);
+ if (!model.Initialized()) {
+ std::cerr << "Failed to initialize." << std::endl;
+ return;
+ }
+
+ auto im = cv::imread(image_file);
+
+ fastdeploy::vision::DetectionResult res;
+ if (!model.Predict(im, &res)) {
+ std::cerr << "Failed to predict." << std::endl;
+ return;
+ }
+ std::cout << res.Str() << std::endl;
+
+ auto vis_im = fastdeploy::vision::VisDetection(im, res);
+ cv::imwrite("vis_result.jpg", vis_im);
+ std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
+}
+
+int main(int argc, char* argv[]) {
+ if (argc < 4) {
+ std::cout << "Usage: infer_demo path/to/model path/to/image run_option, "
+ "e.g ./infer_model ./FastestDet.onnx ./test.jpeg 0"
+ << std::endl;
+ std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
+ "with gpu; 2: run with gpu and use tensorrt backend."
+ << std::endl;
+ return -1;
+ }
+
+ if (std::atoi(argv[3]) == 0) {
+ CpuInfer(argv[1], argv[2]);
+ } else if (std::atoi(argv[3]) == 1) {
+ GpuInfer(argv[1], argv[2]);
+ } else if (std::atoi(argv[3]) == 2) {
+ TrtInfer(argv[1], argv[2]);
+ }
+ return 0;
+}
diff --git a/examples/vision/detection/fastestdet/python/README.md b/examples/vision/detection/fastestdet/python/README.md
new file mode 100644
index 000000000..000bf05cc
--- /dev/null
+++ b/examples/vision/detection/fastestdet/python/README.md
@@ -0,0 +1,74 @@
+# FastestDet Python部署示例
+
+在部署前,需确认以下两个步骤
+
+- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
+- 2. FastDeploy Python whl包安装,参考[FastDeploy Python安装](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
+
+本目录下提供`infer.py`快速完成FastestDet在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成
+
+```bash
+#下载部署示例代码
+git clone https://github.com/PaddlePaddle/FastDeploy.git
+cd examples/vision/detection/fastestdet/python/
+
+#下载fastestdet模型文件和测试图片
+wget https://bj.bcebos.com/paddlehub/fastdeploy/FastestDet.onnx
+wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
+
+# CPU推理
+python infer.py --model FastestDet.onnx --image 000000014439.jpg --device cpu
+# GPU推理
+python infer.py --model FastestDet.onnx --image 000000014439.jpg --device gpu
+# GPU上使用TensorRT推理
+python infer.py --model FastestDet.onnx --image 000000014439.jpg --device gpu --use_trt True
+```
+
+运行完成可视化结果如下图所示
+
+
+
+## FastestDet Python接口
+
+```python
+fastdeploy.vision.detection.FastestDet(model_file, params_file=None, runtime_option=None, model_format=ModelFormat.ONNX)
+```
+
+FastestDet模型加载和初始化,其中model_file为导出的ONNX模型格式
+
+**参数**
+
+> * **model_file**(str): 模型文件路径
+> * **params_file**(str): 参数文件路径,当模型格式为ONNX格式时,此参数无需设定
+> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置
+> * **model_format**(ModelFormat): 模型格式,默认为ONNX
+
+### predict函数
+
+> ```python
+> FastestDet.predict(image_data)
+> ```
+>
+> 模型预测接口,输入图像直接输出检测结果。
+>
+> **参数**
+>
+> > * **image_data**(np.ndarray): 输入数据,注意需为HWC,BGR格式
+
+> **返回**
+>
+> > 返回`fastdeploy.vision.DetectionResult`结构体,结构体说明参考文档[视觉模型预测结果](../../../../../docs/api/vision_results/)
+
+### 类成员属性
+#### 预处理参数
+用户可按照自己的实际需求,修改下列预处理参数,从而影响最终的推理和部署效果
+
+> > * **size**(list[int]): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[352, 352]
+
+
+## 其它文档
+
+- [FastestDet 模型介绍](..)
+- [FastestDet C++部署](../cpp)
+- [模型预测结果说明](../../../../../docs/api/vision_results/)
+- [如何切换模型推理后端引擎](../../../../../docs/cn/faq/how_to_change_backend.md)
diff --git a/examples/vision/detection/fastestdet/python/infer.py b/examples/vision/detection/fastestdet/python/infer.py
new file mode 100644
index 000000000..ad734b4d7
--- /dev/null
+++ b/examples/vision/detection/fastestdet/python/infer.py
@@ -0,0 +1,51 @@
+import fastdeploy as fd
+import cv2
+
+
+def parse_arguments():
+ import argparse
+ import ast
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--model", required=True, help="Path of FastestDet onnx model.")
+ parser.add_argument(
+ "--image", required=True, help="Path of test image file.")
+ parser.add_argument(
+ "--device",
+ type=str,
+ default='cpu',
+ help="Type of inference device, support 'cpu' or 'gpu'.")
+ parser.add_argument(
+ "--use_trt",
+ type=ast.literal_eval,
+ default=False,
+ help="Wether to use tensorrt.")
+ return parser.parse_args()
+
+
+def build_option(args):
+ option = fd.RuntimeOption()
+
+ if args.device.lower() == "gpu":
+ option.use_gpu()
+
+ if args.use_trt:
+ option.use_trt_backend()
+ option.set_trt_input_shape("images", [1, 3, 352, 352])
+ return option
+
+
+args = parse_arguments()
+
+# Configure runtime and load model
+runtime_option = build_option(args)
+model = fd.vision.detection.FastestDet(args.model, runtime_option=runtime_option)
+
+# Predict picture detection results
+im = cv2.imread(args.image)
+result = model.predict(im)
+
+# Visualization of prediction results
+vis_im = fd.vision.vis_detection(im, result)
+cv2.imwrite("visualized_result.jpg", vis_im)
+print("Visualized result save in ./visualized_result.jpg")
diff --git a/examples/vision/detection/paddledetection/cpp/infer_faster_rcnn.cc b/examples/vision/detection/paddledetection/cpp/infer_faster_rcnn.cc
index 287ae0ed3..b95180103 100755
--- a/examples/vision/detection/paddledetection/cpp/infer_faster_rcnn.cc
+++ b/examples/vision/detection/paddledetection/cpp/infer_faster_rcnn.cc
@@ -47,12 +47,12 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file) {
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
-void XpuInfer(const std::string& model_dir, const std::string& image_file) {
+void KunlunXinInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "infer_cfg.yml";
auto option = fastdeploy::RuntimeOption();
- option.UseXpu(0, 0, false, false);
+ option.UseKunlunXin(0, 0, false, false);
auto model = fastdeploy::vision::detection::FasterRCNN(
model_file, params_file, config_file, option);
if (!model.Initialized()) {
@@ -109,7 +109,7 @@ int main(int argc, char* argv[]) {
"e.g ./infer_model ./faster_rcnn_r50_vd_fpn_2x_coco ./test.jpeg 0"
<< std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
- "with gpu; 2: run with xpu."
+ "with gpu; 2: run with kunlunxin."
<< std::endl;
return -1;
}
@@ -119,7 +119,7 @@ int main(int argc, char* argv[]) {
} else if (std::atoi(argv[3]) == 1) {
GpuInfer(argv[1], argv[2]);
} else if (std::atoi(argv[3]) == 2) {
- XpuInfer(argv[1], argv[2]);
+ KunlunXinInfer(argv[1], argv[2]);
}
return 0;
}
diff --git a/examples/vision/detection/paddledetection/cpp/infer_mask_rcnn.cc b/examples/vision/detection/paddledetection/cpp/infer_mask_rcnn.cc
index 99f673832..9ad63e424 100755
--- a/examples/vision/detection/paddledetection/cpp/infer_mask_rcnn.cc
+++ b/examples/vision/detection/paddledetection/cpp/infer_mask_rcnn.cc
@@ -47,12 +47,12 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file) {
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
-void XpuInfer(const std::string& model_dir, const std::string& image_file) {
+void KunlunXinInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "infer_cfg.yml";
auto option = fastdeploy::RuntimeOption();
- option.UseXpu(0, 0, false, false);
+ option.UseKunlunXin(0, 0, false, false);
auto model = fastdeploy::vision::detection::MaskRCNN(model_file, params_file,
config_file, option);
if (!model.Initialized()) {
@@ -109,7 +109,7 @@ int main(int argc, char* argv[]) {
"e.g ./infer_model ./mask_rcnn_r50_1x_coco/ ./test.jpeg 0"
<< std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
- "with gpu; 2: run with xpu."
+ "with gpu; 2: run with kunlunxin."
<< std::endl;
return -1;
}
@@ -119,7 +119,7 @@ int main(int argc, char* argv[]) {
} else if (std::atoi(argv[3]) == 1) {
GpuInfer(argv[1], argv[2]);
} else if (std::atoi(argv[3]) == 2) {
- XpuInfer(argv[1], argv[2]);
+ KunlunXinInfer(argv[1], argv[2]);
} else if (std::atoi(argv[3]) == 3) {
std::cout
<< "Backend::TRT has not been supported yet, will skip this inference."
diff --git a/examples/vision/detection/paddledetection/cpp/infer_picodet.cc b/examples/vision/detection/paddledetection/cpp/infer_picodet.cc
index d12d07728..308ce8f22 100755
--- a/examples/vision/detection/paddledetection/cpp/infer_picodet.cc
+++ b/examples/vision/detection/paddledetection/cpp/infer_picodet.cc
@@ -47,12 +47,12 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file) {
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
-void XpuInfer(const std::string& model_dir, const std::string& image_file) {
+void KunlunXinInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "infer_cfg.yml";
auto option = fastdeploy::RuntimeOption();
- option.UseXpu();
+ option.UseKunlunXin();
auto model = fastdeploy::vision::detection::PicoDet(model_file, params_file,
config_file, option);
if (!model.Initialized()) {
@@ -138,7 +138,7 @@ int main(int argc, char* argv[]) {
"e.g ./infer_model ./picodet_model_dir ./test.jpeg 0"
<< std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
- "with gpu; 2: run with gpu and use tensorrt backend; 3: run with xpu."
+ "with gpu; 2: run with gpu and use tensorrt backend; 3: run with kunlunxin."
<< std::endl;
return -1;
}
@@ -150,7 +150,7 @@ int main(int argc, char* argv[]) {
} else if (std::atoi(argv[3]) == 2) {
TrtInfer(argv[1], argv[2]);
} else if (std::atoi(argv[3]) == 3) {
- XpuInfer(argv[1], argv[2]);
+ KunlunXinInfer(argv[1], argv[2]);
}
return 0;
}
diff --git a/examples/vision/detection/paddledetection/cpp/infer_ppyolo.cc b/examples/vision/detection/paddledetection/cpp/infer_ppyolo.cc
index d0aceca49..088e98402 100755
--- a/examples/vision/detection/paddledetection/cpp/infer_ppyolo.cc
+++ b/examples/vision/detection/paddledetection/cpp/infer_ppyolo.cc
@@ -47,12 +47,12 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file) {
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
-void XpuInfer(const std::string& model_dir, const std::string& image_file) {
+void KunlunXinInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "infer_cfg.yml";
auto option = fastdeploy::RuntimeOption();
- option.UseXpu();
+ option.UseKunlunXin();
auto model = fastdeploy::vision::detection::PPYOLO(model_file, params_file,
config_file, option);
if (!model.Initialized()) {
@@ -109,7 +109,7 @@ int main(int argc, char* argv[]) {
"e.g ./infer_model ./ppyolo_dirname ./test.jpeg 0"
<< std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
- "with gpu; 2: run with xpu."
+ "with gpu; 2: run with kunlunxin."
<< std::endl;
return -1;
}
@@ -119,7 +119,7 @@ int main(int argc, char* argv[]) {
} else if (std::atoi(argv[3]) == 1) {
GpuInfer(argv[1], argv[2]);
} else if (std::atoi(argv[3]) == 2) {
- XpuInfer(argv[1], argv[2]);
+ KunlunXinInfer(argv[1], argv[2]);
}
return 0;
}
diff --git a/examples/vision/detection/paddledetection/cpp/infer_ppyoloe.cc b/examples/vision/detection/paddledetection/cpp/infer_ppyoloe.cc
index a232bca6b..99922e22a 100755
--- a/examples/vision/detection/paddledetection/cpp/infer_ppyoloe.cc
+++ b/examples/vision/detection/paddledetection/cpp/infer_ppyoloe.cc
@@ -47,12 +47,12 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file) {
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
-void XpuInfer(const std::string& model_dir, const std::string& image_file) {
+void KunlunXinInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "infer_cfg.yml";
auto option = fastdeploy::RuntimeOption();
- option.UseXpu();
+ option.UseKunlunXin();
auto model = fastdeploy::vision::detection::PPYOLOE(model_file, params_file,
config_file, option);
if (!model.Initialized()) {
@@ -138,7 +138,7 @@ int main(int argc, char* argv[]) {
"e.g ./infer_model ./ppyoloe_model_dir ./test.jpeg 0"
<< std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
- "with gpu; 2: run with gpu and use tensorrt backend; 3: run with xpu."
+ "with gpu; 2: run with gpu and use tensorrt backend; 3: run with kunlunxin."
<< std::endl;
return -1;
}
@@ -150,7 +150,7 @@ int main(int argc, char* argv[]) {
} else if (std::atoi(argv[3]) == 2) {
TrtInfer(argv[1], argv[2]);
} else if (std::atoi(argv[3]) == 3) {
- XpuInfer(argv[1], argv[2]);
+ KunlunXinInfer(argv[1], argv[2]);
}
return 0;
}
diff --git a/examples/vision/detection/paddledetection/cpp/infer_rtmdet.cc b/examples/vision/detection/paddledetection/cpp/infer_rtmdet.cc
index 0d211cac2..3a0a6968d 100755
--- a/examples/vision/detection/paddledetection/cpp/infer_rtmdet.cc
+++ b/examples/vision/detection/paddledetection/cpp/infer_rtmdet.cc
@@ -48,12 +48,12 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file) {
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
-void XpuInfer(const std::string& model_dir, const std::string& image_file) {
+void KunlunXinInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "infer_cfg.yml";
auto option = fastdeploy::RuntimeOption();
- option.UseXpu();
+ option.UseKunlunXin();
auto model = fastdeploy::vision::detection::RTMDet(model_file, params_file,
config_file, option);
if (!model.Initialized()) {
@@ -141,7 +141,7 @@ int main(int argc, char* argv[]) {
"e.g ./infer_model ./ppyolo_dirname ./test.jpeg 0"
<< std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
- "with gpu, 2: run with gpu and use tensorrt backend; 3: run with xpu."
+ "with gpu, 2: run with gpu and use tensorrt backend; 3: run with kunlunxin."
<< std::endl;
return -1;
}
@@ -153,7 +153,7 @@ int main(int argc, char* argv[]) {
} else if(std::atoi(argv[3]) == 2) {
TrtInfer(argv[1], argv[2]);
} else if(std::atoi(argv[3]) == 3) {
- XpuInfer(argv[1], argv[2]);
+ KunlunXinInfer(argv[1], argv[2]);
}
return 0;
}
diff --git a/examples/vision/detection/paddledetection/cpp/infer_ssd.cc b/examples/vision/detection/paddledetection/cpp/infer_ssd.cc
index 0b0c17dbf..b71bf266c 100755
--- a/examples/vision/detection/paddledetection/cpp/infer_ssd.cc
+++ b/examples/vision/detection/paddledetection/cpp/infer_ssd.cc
@@ -48,12 +48,12 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file) {
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
-void XpuInfer(const std::string& model_dir, const std::string& image_file) {
+void KunlunXinInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "infer_cfg.yml";
auto option = fastdeploy::RuntimeOption();
- option.UseXpu();
+ option.UseKunlunXin();
auto model = fastdeploy::vision::detection::SSD(model_file, params_file,
config_file, option);
if (!model.Initialized()) {
@@ -111,7 +111,7 @@ int main(int argc, char* argv[]) {
"e.g ./infer_model ./ssd_dirname ./test.jpeg 0"
<< std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
- "with gpu; 2: run with xpu."
+ "with gpu; 2: run with kunlunxin."
<< std::endl;
return -1;
}
@@ -121,7 +121,7 @@ int main(int argc, char* argv[]) {
} else if (std::atoi(argv[3]) == 1) {
GpuInfer(argv[1], argv[2]);
} else if (std::atoi(argv[3]) == 2) {
- XpuInfer(argv[1], argv[2]);
+ KunlunXinInfer(argv[1], argv[2]);
}
return 0;
}
diff --git a/examples/vision/detection/paddledetection/cpp/infer_yolov3.cc b/examples/vision/detection/paddledetection/cpp/infer_yolov3.cc
index 045e030ce..3ec441014 100755
--- a/examples/vision/detection/paddledetection/cpp/infer_yolov3.cc
+++ b/examples/vision/detection/paddledetection/cpp/infer_yolov3.cc
@@ -47,12 +47,12 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file) {
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
-void XpuInfer(const std::string& model_dir, const std::string& image_file) {
+void KunlunXinInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "infer_cfg.yml";
auto option = fastdeploy::RuntimeOption();
- option.UseXpu();
+ option.UseKunlunXin();
auto model = fastdeploy::vision::detection::YOLOv3(model_file, params_file,
config_file, option);
if (!model.Initialized()) {
@@ -109,7 +109,7 @@ int main(int argc, char* argv[]) {
"e.g ./infer_model ./ppyolo_dirname ./test.jpeg 0"
<< std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
- "with gpu; 2: run with xpu."
+ "with gpu; 2: run with kunlunxin."
<< std::endl;
return -1;
}
@@ -119,7 +119,7 @@ int main(int argc, char* argv[]) {
} else if (std::atoi(argv[3]) == 1) {
GpuInfer(argv[1], argv[2]);
} else if (std::atoi(argv[3]) == 2) {
- XpuInfer(argv[1], argv[2]);
+ KunlunXinInfer(argv[1], argv[2]);
}
return 0;
}
diff --git a/examples/vision/detection/paddledetection/cpp/infer_yolov5.cc b/examples/vision/detection/paddledetection/cpp/infer_yolov5.cc
index 6f8ecf57d..4b9dba6ba 100755
--- a/examples/vision/detection/paddledetection/cpp/infer_yolov5.cc
+++ b/examples/vision/detection/paddledetection/cpp/infer_yolov5.cc
@@ -48,12 +48,12 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file) {
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
-void XpuInfer(const std::string& model_dir, const std::string& image_file) {
+void KunlunXinInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "infer_cfg.yml";
auto option = fastdeploy::RuntimeOption();
- option.UseXpu();
+ option.UseKunlunXin();
auto model = fastdeploy::vision::detection::PaddleYOLOv5(model_file, params_file,
config_file, option);
if (!model.Initialized()) {
@@ -142,7 +142,7 @@ int main(int argc, char* argv[]) {
"e.g ./infer_model ./ppyolo_dirname ./test.jpeg 0"
<< std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
- "with gpu; 2: run with gpu and use tensorrt backend; 3: run with xpu."
+ "with gpu; 2: run with gpu and use tensorrt backend; 3: run with kunlunxin."
<< std::endl;
return -1;
}
@@ -154,7 +154,7 @@ int main(int argc, char* argv[]) {
} else if(std::atoi(argv[3]) == 2){
TrtInfer(argv[1], argv[2]);
} else if(std::atoi(argv[3]) == 3){
- XpuInfer(argv[1], argv[2]);
+ KunlunXinInfer(argv[1], argv[2]);
}
return 0;
}
diff --git a/examples/vision/detection/paddledetection/cpp/infer_yolov6.cc b/examples/vision/detection/paddledetection/cpp/infer_yolov6.cc
index 9ea54a43d..67129743d 100755
--- a/examples/vision/detection/paddledetection/cpp/infer_yolov6.cc
+++ b/examples/vision/detection/paddledetection/cpp/infer_yolov6.cc
@@ -48,12 +48,12 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file) {
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
-void XpuInfer(const std::string& model_dir, const std::string& image_file) {
+void KunlunXinInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "infer_cfg.yml";
auto option = fastdeploy::RuntimeOption();
- option.UseXpu();
+ option.UseKunlunXin();
auto model = fastdeploy::vision::detection::PaddleYOLOv6(model_file, params_file,
config_file, option);
if (!model.Initialized()) {
@@ -141,7 +141,7 @@ int main(int argc, char* argv[]) {
"e.g ./infer_model ./ppyolo_dirname ./test.jpeg 0"
<< std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
- "with gpu; 2: run with gpu and use tensorrt backend; 3: run with xpu."
+ "with gpu; 2: run with gpu and use tensorrt backend; 3: run with kunlunxin."
<< std::endl;
return -1;
}
@@ -153,7 +153,7 @@ int main(int argc, char* argv[]) {
} else if(std::atoi(argv[3]) == 2){
TrtInfer(argv[1], argv[2]);
} else if(std::atoi(argv[3]) == 3){
- XpuInfer(argv[1], argv[2]);
+ KunlunXinInfer(argv[1], argv[2]);
}
return 0;
}
diff --git a/examples/vision/detection/paddledetection/cpp/infer_yolov7.cc b/examples/vision/detection/paddledetection/cpp/infer_yolov7.cc
index 55b531457..5ee81a9f8 100755
--- a/examples/vision/detection/paddledetection/cpp/infer_yolov7.cc
+++ b/examples/vision/detection/paddledetection/cpp/infer_yolov7.cc
@@ -48,12 +48,12 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file) {
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
-void XpuInfer(const std::string& model_dir, const std::string& image_file) {
+void KunlunXinInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "infer_cfg.yml";
auto option = fastdeploy::RuntimeOption();
- option.UseXpu();
+ option.UseKunlunXin();
auto model = fastdeploy::vision::detection::PaddleYOLOv7(model_file, params_file,
config_file, option);
if (!model.Initialized()) {
@@ -140,7 +140,7 @@ int main(int argc, char* argv[]) {
"e.g ./infer_model ./ppyolo_dirname ./test.jpeg 0"
<< std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
- "with gpu; 2: run with gpu and use tensorrt backend; 3: run with xpu."
+ "with gpu; 2: run with gpu and use tensorrt backend; 3: run with kunlunxin."
<< std::endl;
return -1;
}
@@ -152,7 +152,7 @@ int main(int argc, char* argv[]) {
} else if(std::atoi(argv[3]) == 2){
TrtInfer(argv[1], argv[2]);
} else if(std::atoi(argv[3]) == 3){
- XpuInfer(argv[1], argv[2]);
+ KunlunXinInfer(argv[1], argv[2]);
}
return 0;
}
diff --git a/examples/vision/detection/paddledetection/cpp/infer_yolox.cc b/examples/vision/detection/paddledetection/cpp/infer_yolox.cc
index a10ce673f..405a48862 100755
--- a/examples/vision/detection/paddledetection/cpp/infer_yolox.cc
+++ b/examples/vision/detection/paddledetection/cpp/infer_yolox.cc
@@ -47,12 +47,12 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file) {
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
-void XpuInfer(const std::string& model_dir, const std::string& image_file) {
+void KunlunXinInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "infer_cfg.yml";
auto option = fastdeploy::RuntimeOption();
- option.UseXpu();
+ option.UseKunlunXin();
auto model = fastdeploy::vision::detection::PaddleYOLOX(
model_file, params_file, config_file, option);
if (!model.Initialized()) {
@@ -138,7 +138,7 @@ int main(int argc, char* argv[]) {
"e.g ./infer_model ./paddle_yolox_dirname ./test.jpeg 0"
<< std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
- "with gpu; 2: run with gpu by tensorrt; 3: run with xpu."
+ "with gpu; 2: run with gpu by tensorrt; 3: run with kunlunxin."
<< std::endl;
return -1;
}
@@ -150,7 +150,7 @@ int main(int argc, char* argv[]) {
} else if (std::atoi(argv[3]) == 2) {
TrtInfer(argv[1], argv[2]);
} else if (std::atoi(argv[3]) == 3) {
- XpuInfer(argv[1], argv[2]);
+ KunlunXinInfer(argv[1], argv[2]);
}
return 0;
}
diff --git a/examples/vision/detection/paddledetection/python/README.md b/examples/vision/detection/paddledetection/python/README.md
index 5b4f73675..b926dd9ee 100755
--- a/examples/vision/detection/paddledetection/python/README.md
+++ b/examples/vision/detection/paddledetection/python/README.md
@@ -24,7 +24,7 @@ python infer_ppyoloe.py --model_dir ppyoloe_crn_l_300e_coco --image 000000014439
# GPU上使用TensorRT推理 (注意:TensorRT推理第一次运行,有序列化模型的操作,有一定耗时,需要耐心等待)
python infer_ppyoloe.py --model_dir ppyoloe_crn_l_300e_coco --image 000000014439.jpg --device gpu --use_trt True
# 昆仑芯XPU推理
-python infer_ppyoloe.py --model_dir ppyoloe_crn_l_300e_coco --image 000000014439.jpg --device xpu
+python infer_ppyoloe.py --model_dir ppyoloe_crn_l_300e_coco --image 000000014439.jpg --device kunlunxin
```
运行完成可视化结果如下图所示
diff --git a/examples/vision/detection/paddledetection/python/infer_faster_rcnn.py b/examples/vision/detection/paddledetection/python/infer_faster_rcnn.py
index 19baf9db8..771816ee5 100755
--- a/examples/vision/detection/paddledetection/python/infer_faster_rcnn.py
+++ b/examples/vision/detection/paddledetection/python/infer_faster_rcnn.py
@@ -17,7 +17,7 @@ def parse_arguments():
"--device",
type=str,
default='cpu',
- help="Type of inference device, support 'xpu', 'cpu' or 'gpu'.")
+ help="Type of inference device, support 'kunlunxin', 'cpu' or 'gpu'.")
parser.add_argument(
"--use_trt",
type=ast.literal_eval,
@@ -29,8 +29,8 @@ def parse_arguments():
def build_option(args):
option = fd.RuntimeOption()
- if args.device.lower() == "xpu":
- option.use_xpu(autotune=False, l3_workspace_size=0)
+ if args.device.lower() == "kunlunxin":
+ option.use_kunlunxin(autotune=False, l3_workspace_size=0)
if args.device.lower() == "gpu":
option.use_gpu()
diff --git a/examples/vision/detection/paddledetection/python/infer_mask_rcnn.py b/examples/vision/detection/paddledetection/python/infer_mask_rcnn.py
index 4c06bc8ba..670109867 100755
--- a/examples/vision/detection/paddledetection/python/infer_mask_rcnn.py
+++ b/examples/vision/detection/paddledetection/python/infer_mask_rcnn.py
@@ -17,7 +17,7 @@ def parse_arguments():
"--device",
type=str,
default='cpu',
- help="Type of inference device, support 'xpu', 'cpu' or 'gpu'.")
+ help="Type of inference device, support 'kunlunxin', 'cpu' or 'gpu'.")
parser.add_argument(
"--use_trt",
type=ast.literal_eval,
@@ -29,8 +29,8 @@ def parse_arguments():
def build_option(args):
option = fd.RuntimeOption()
- if args.device.lower() == "xpu":
- option.use_xpu(autotune=False, l3_workspace_size=0)
+ if args.device.lower() == "kunlunxin":
+ option.use_kunlunxin(autotune=False, l3_workspace_size=0)
if args.device.lower() == "gpu":
# option.use_gpu()
diff --git a/examples/vision/detection/paddledetection/python/infer_picodet.py b/examples/vision/detection/paddledetection/python/infer_picodet.py
index 29dd10c20..71cd865fc 100755
--- a/examples/vision/detection/paddledetection/python/infer_picodet.py
+++ b/examples/vision/detection/paddledetection/python/infer_picodet.py
@@ -17,7 +17,7 @@ def parse_arguments():
"--device",
type=str,
default='cpu',
- help="Type of inference device, support 'xpu', 'cpu' or 'gpu'.")
+ help="Type of inference device, support 'kunlunxin', 'cpu' or 'gpu'.")
parser.add_argument(
"--use_trt",
type=ast.literal_eval,
@@ -29,8 +29,8 @@ def parse_arguments():
def build_option(args):
option = fd.RuntimeOption()
- if args.device.lower() == "xpu":
- option.use_xpu()
+ if args.device.lower() == "kunlunxin":
+ option.use_kunlunxin()
if args.device.lower() == "gpu":
option.use_gpu()
diff --git a/examples/vision/detection/paddledetection/python/infer_ppyolo.py b/examples/vision/detection/paddledetection/python/infer_ppyolo.py
index 60a9b6067..279c5bb9f 100755
--- a/examples/vision/detection/paddledetection/python/infer_ppyolo.py
+++ b/examples/vision/detection/paddledetection/python/infer_ppyolo.py
@@ -17,7 +17,7 @@ def parse_arguments():
"--device",
type=str,
default='cpu',
- help="Type of inference device, support 'xpu', 'cpu' or 'gpu'.")
+ help="Type of inference device, support 'kunlunxin', 'cpu' or 'gpu'.")
parser.add_argument(
"--use_trt",
type=ast.literal_eval,
@@ -29,8 +29,8 @@ def parse_arguments():
def build_option(args):
option = fd.RuntimeOption()
- if args.device.lower() == "xpu":
- option.use_xpu()
+ if args.device.lower() == "kunlunxin":
+ option.use_kunlunxin()
if args.device.lower() == "gpu":
option.use_gpu()
diff --git a/examples/vision/detection/paddledetection/python/infer_ppyoloe.py b/examples/vision/detection/paddledetection/python/infer_ppyoloe.py
index 65e73fbf2..2b0971f12 100755
--- a/examples/vision/detection/paddledetection/python/infer_ppyoloe.py
+++ b/examples/vision/detection/paddledetection/python/infer_ppyoloe.py
@@ -18,7 +18,7 @@ def parse_arguments():
"--device",
type=str,
default='cpu',
- help="Type of inference device, support 'xpu', 'cpu' or 'gpu'.")
+ help="Type of inference device, support 'kunlunxin', 'cpu' or 'gpu'.")
parser.add_argument(
"--use_trt",
type=ast.literal_eval,
@@ -30,8 +30,8 @@ def parse_arguments():
def build_option(args):
option = fd.RuntimeOption()
- if args.device.lower() == "xpu":
- option.use_xpu()
+ if args.device.lower() == "kunlunxin":
+ option.use_kunlunxin()
if args.device.lower() == "gpu":
option.use_gpu()
diff --git a/examples/vision/detection/paddledetection/python/infer_rtmdet.py b/examples/vision/detection/paddledetection/python/infer_rtmdet.py
index 444023de2..a2480398f 100755
--- a/examples/vision/detection/paddledetection/python/infer_rtmdet.py
+++ b/examples/vision/detection/paddledetection/python/infer_rtmdet.py
@@ -17,7 +17,7 @@ def parse_arguments():
"--device",
type=str,
default='cpu',
- help="Type of inference device, support 'xpu', 'cpu' or 'gpu'.")
+ help="Type of inference device, support 'kunlunxin', 'cpu' or 'gpu'.")
parser.add_argument(
"--use_trt",
type=ast.literal_eval,
@@ -29,8 +29,8 @@ def parse_arguments():
def build_option(args):
option = fd.RuntimeOption()
- if args.device.lower() == "xpu":
- option.use_xpu()
+ if args.device.lower() == "kunlunxin":
+ option.use_kunlunxin()
if args.device.lower() == "gpu":
option.use_gpu()
diff --git a/examples/vision/detection/paddledetection/python/infer_ssd.py b/examples/vision/detection/paddledetection/python/infer_ssd.py
index e032a712e..536bf5944 100755
--- a/examples/vision/detection/paddledetection/python/infer_ssd.py
+++ b/examples/vision/detection/paddledetection/python/infer_ssd.py
@@ -17,14 +17,14 @@ def parse_arguments():
"--device",
type=str,
default='cpu',
- help="Type of inference device, support 'xpu', 'cpu' or 'gpu'.")
+ help="Type of inference device, support 'kunlunxin', 'cpu' or 'gpu'.")
return parser.parse_args()
def build_option(args):
option = fd.RuntimeOption()
- if args.device.lower() == "xpu":
- option.use_xpu()
+ if args.device.lower() == "kunlunxin":
+ option.use_kunlunxin()
if args.device.lower() == "gpu":
option.use_gpu()
diff --git a/examples/vision/detection/paddledetection/python/infer_yolov3.py b/examples/vision/detection/paddledetection/python/infer_yolov3.py
index 4d8ce4a99..8f6b509f9 100755
--- a/examples/vision/detection/paddledetection/python/infer_yolov3.py
+++ b/examples/vision/detection/paddledetection/python/infer_yolov3.py
@@ -17,7 +17,7 @@ def parse_arguments():
"--device",
type=str,
default='cpu',
- help="Type of inference device, support 'xpu', 'cpu' or 'gpu'.")
+ help="Type of inference device, support 'kunlunxin', 'cpu' or 'gpu'.")
parser.add_argument(
"--use_trt",
type=ast.literal_eval,
@@ -29,8 +29,8 @@ def parse_arguments():
def build_option(args):
option = fd.RuntimeOption()
- if args.device.lower() == "xpu":
- option.use_xpu()
+ if args.device.lower() == "kunlunxin":
+ option.use_kunlunxin()
if args.device.lower() == "gpu":
option.use_gpu()
diff --git a/examples/vision/detection/paddledetection/python/infer_yolov5.py b/examples/vision/detection/paddledetection/python/infer_yolov5.py
index 50851896c..aabdcaa7e 100755
--- a/examples/vision/detection/paddledetection/python/infer_yolov5.py
+++ b/examples/vision/detection/paddledetection/python/infer_yolov5.py
@@ -17,7 +17,7 @@ def parse_arguments():
"--device",
type=str,
default='cpu',
- help="Type of inference device, support 'xpu', 'cpu' or 'gpu'.")
+ help="Type of inference device, support 'kunlunxin', 'cpu' or 'gpu'.")
parser.add_argument(
"--use_trt",
type=ast.literal_eval,
@@ -29,8 +29,8 @@ def parse_arguments():
def build_option(args):
option = fd.RuntimeOption()
- if args.device.lower() == "xpu":
- option.use_xpu()
+ if args.device.lower() == "kunlunxin":
+ option.use_kunlunxin()
if args.device.lower() == "gpu":
option.use_gpu()
diff --git a/examples/vision/detection/paddledetection/python/infer_yolov6.py b/examples/vision/detection/paddledetection/python/infer_yolov6.py
index dee39e027..892f2cbb0 100755
--- a/examples/vision/detection/paddledetection/python/infer_yolov6.py
+++ b/examples/vision/detection/paddledetection/python/infer_yolov6.py
@@ -17,7 +17,7 @@ def parse_arguments():
"--device",
type=str,
default='cpu',
- help="Type of inference device, support 'xpu', 'cpu' or 'gpu'.")
+ help="Type of inference device, support 'kunlunxin', 'cpu' or 'gpu'.")
parser.add_argument(
"--use_trt",
type=ast.literal_eval,
@@ -29,8 +29,8 @@ def parse_arguments():
def build_option(args):
option = fd.RuntimeOption()
- if args.device.lower() == "xpu":
- option.use_xpu()
+ if args.device.lower() == "kunlunxin":
+ option.use_kunlunxin()
if args.device.lower() == "gpu":
option.use_gpu()
diff --git a/examples/vision/detection/paddledetection/python/infer_yolov7.py b/examples/vision/detection/paddledetection/python/infer_yolov7.py
index bb8d116fb..dfdf55b81 100755
--- a/examples/vision/detection/paddledetection/python/infer_yolov7.py
+++ b/examples/vision/detection/paddledetection/python/infer_yolov7.py
@@ -17,7 +17,7 @@ def parse_arguments():
"--device",
type=str,
default='cpu',
- help="Type of inference device, support 'xpu', 'cpu' or 'gpu'.")
+ help="Type of inference device, support 'kunlunxin', 'cpu' or 'gpu'.")
parser.add_argument(
"--use_trt",
type=ast.literal_eval,
@@ -29,8 +29,8 @@ def parse_arguments():
def build_option(args):
option = fd.RuntimeOption()
- if args.device.lower() == "xpu":
- option.use_xpu()
+ if args.device.lower() == "kunlunxin":
+ option.use_kunlunxin()
if args.device.lower() == "gpu":
option.use_gpu()
diff --git a/examples/vision/detection/paddledetection/python/infer_yolox.py b/examples/vision/detection/paddledetection/python/infer_yolox.py
index c075d521e..411262255 100755
--- a/examples/vision/detection/paddledetection/python/infer_yolox.py
+++ b/examples/vision/detection/paddledetection/python/infer_yolox.py
@@ -17,7 +17,7 @@ def parse_arguments():
"--device",
type=str,
default='cpu',
- help="Type of inference device, support 'xpu', 'cpu' or 'gpu'.")
+ help="Type of inference device, support 'kunlunxin', 'cpu' or 'gpu'.")
parser.add_argument(
"--use_trt",
type=ast.literal_eval,
@@ -29,8 +29,8 @@ def parse_arguments():
def build_option(args):
option = fd.RuntimeOption()
- if args.device.lower() == "xpu":
- option.use_xpu()
+ if args.device.lower() == "kunlunxin":
+ option.use_kunlunxin()
if args.device.lower() == "gpu":
option.use_gpu()
diff --git a/examples/vision/detection/paddledetection/python/serving/README.md b/examples/vision/detection/paddledetection/python/serving/README.md
new file mode 120000
index 000000000..bacd3186b
--- /dev/null
+++ b/examples/vision/detection/paddledetection/python/serving/README.md
@@ -0,0 +1 @@
+README_CN.md
\ No newline at end of file
diff --git a/examples/vision/detection/paddledetection/python/serving/README_CN.md b/examples/vision/detection/paddledetection/python/serving/README_CN.md
new file mode 100644
index 000000000..f73206ba3
--- /dev/null
+++ b/examples/vision/detection/paddledetection/python/serving/README_CN.md
@@ -0,0 +1,36 @@
+简体中文 | [English](README_EN.md)
+
+# PaddleDetection Python轻量服务化部署示例
+
+在部署前,需确认以下两个步骤
+
+- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
+- 2. FastDeploy Python whl包安装,参考[FastDeploy Python安装](../../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
+
+服务端:
+```bash
+# 下载部署示例代码
+git clone https://github.com/PaddlePaddle/FastDeploy.git
+cd FastDeploy/examples/vision/detection/paddledetection/python/serving
+
+# 下载PPYOLOE模型文件(如果不下载,代码里会自动从hub下载)
+wget https://bj.bcebos.com/paddlehub/fastdeploy/ppyoloe_crn_l_300e_coco.tgz
+tar xvf ppyoloe_crn_l_300e_coco.tgz
+
+# 启动服务,可修改server.py中的配置项来指定硬件、后端等
+# 可通过--host、--port指定IP和端口号
+fastdeploy simple_serving --app server:app
+```
+
+客户端:
+```bash
+# 下载部署示例代码
+git clone https://github.com/PaddlePaddle/FastDeploy.git
+cd FastDeploy/examples/vision/detection/paddledetection/python/serving
+
+# 下载测试图片
+wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
+
+# 请求服务,获取推理结果(如有必要,请修改脚本中的IP和端口号)
+python client.py
+```
diff --git a/examples/vision/detection/paddledetection/python/serving/README_EN.md b/examples/vision/detection/paddledetection/python/serving/README_EN.md
new file mode 100644
index 000000000..56049981d
--- /dev/null
+++ b/examples/vision/detection/paddledetection/python/serving/README_EN.md
@@ -0,0 +1,36 @@
+English | [简体中文](README_CN.md)
+
+# PaddleDetection Python Simple Serving Demo
+
+
+## Environment
+
+- 1. Prepare environment and install FastDeploy Python whl, refer to [download_prebuilt_libraries](../../../../../../docs/en/build_and_install/download_prebuilt_libraries.md)
+
+Server:
+```bash
+# Download demo code
+git clone https://github.com/PaddlePaddle/FastDeploy.git
+cd FastDeploy/examples/vision/detection/paddledetection/python/serving
+
+# Download PPYOLOE model
+wget https://bj.bcebos.com/paddlehub/fastdeploy/ppyoloe_crn_l_300e_coco.tgz
+tar xvf ppyoloe_crn_l_300e_coco.tgz
+
+# Launch server, change the configurations in server.py to select hardware, backend, etc.
+# and use --host, --port to specify IP and port
+fastdeploy simple_serving --app server:app
+```
+
+Client:
+```bash
+# Download demo code
+git clone https://github.com/PaddlePaddle/FastDeploy.git
+cd FastDeploy/examples/vision/detection/paddledetection/python/serving
+
+# Download test image
+wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
+
+# Send request and get inference result (Please adapt the IP and port if necessary)
+python client.py
+```
diff --git a/examples/vision/detection/paddledetection/python/serving/client.py b/examples/vision/detection/paddledetection/python/serving/client.py
new file mode 100644
index 000000000..50538a1a0
--- /dev/null
+++ b/examples/vision/detection/paddledetection/python/serving/client.py
@@ -0,0 +1,23 @@
+import requests
+import json
+import cv2
+import fastdeploy as fd
+from fastdeploy.serving.utils import cv2_to_base64
+
+if __name__ == '__main__':
+ url = "http://127.0.0.1:8000/fd/ppyoloe"
+ headers = {"Content-Type": "application/json"}
+
+ im = cv2.imread("000000014439.jpg")
+ data = {"data": {"image": cv2_to_base64(im)}, "parameters": {}}
+
+ resp = requests.post(url=url, headers=headers, data=json.dumps(data))
+ if resp.status_code == 200:
+ r_json = json.loads(resp.json()["result"])
+ det_result = fd.vision.utils.json_to_detection(r_json)
+ vis_im = fd.vision.vis_detection(im, det_result, score_threshold=0.5)
+ cv2.imwrite("visualized_result.jpg", vis_im)
+ print("Visualized result save in ./visualized_result.jpg")
+ else:
+ print("Error code:", resp.status_code)
+ print(resp.text)
diff --git a/examples/vision/detection/paddledetection/python/serving/server.py b/examples/vision/detection/paddledetection/python/serving/server.py
new file mode 100644
index 000000000..9abd2ed56
--- /dev/null
+++ b/examples/vision/detection/paddledetection/python/serving/server.py
@@ -0,0 +1,38 @@
+import fastdeploy as fd
+from fastdeploy.serving.server import SimpleServer
+import os
+import logging
+
+logging.getLogger().setLevel(logging.INFO)
+
+# Configurations
+model_dir = 'ppyoloe_crn_l_300e_coco'
+device = 'cpu'
+use_trt = False
+
+# Prepare model
+model_file = os.path.join(model_dir, "model.pdmodel")
+params_file = os.path.join(model_dir, "model.pdiparams")
+config_file = os.path.join(model_dir, "infer_cfg.yml")
+
+# Setup runtime option to select hardware, backend, etc.
+option = fd.RuntimeOption()
+if device.lower() == 'gpu':
+ option.use_gpu()
+if use_trt:
+ option.use_trt_backend()
+ option.set_trt_cache_file('ppyoloe.trt')
+
+# Create model instance
+model_instance = fd.vision.detection.PPYOLOE(
+ model_file=model_file,
+ params_file=params_file,
+ config_file=config_file,
+ runtime_option=option)
+
+# Create server, setup REST API
+app = SimpleServer()
+app.register(
+ task_name="fd/ppyoloe",
+ model_handler=fd.serving.handler.VisionModelHandler,
+ predictor=model_instance)
diff --git a/examples/vision/detection/paddledetection/rknpu2/README.md b/examples/vision/detection/paddledetection/rknpu2/README.md
index d242cf339..5fad37ceb 100644
--- a/examples/vision/detection/paddledetection/rknpu2/README.md
+++ b/examples/vision/detection/paddledetection/rknpu2/README.md
@@ -70,7 +70,8 @@ outputs: ['tmp_17','p2o.Concat.9']
# ONNX模型转RKNN模型
# 转换模型,模型将生成在picodet_s_320_coco_lcnet_non_postprocess目录下
-python tools/rknpu2/export.py --config_path tools/rknpu2/config/RK3568/picodet_s_416_coco_lcnet.yaml
+python tools/rknpu2/export.py --config_path tools/rknpu2/config/picodet_s_416_coco_lcnet.yaml \
+ --target_platform rk3588
```
### 修改模型运行时的配置文件
diff --git a/examples/vision/detection/paddledetection/rv1126/cpp/CMakeLists.txt b/examples/vision/detection/paddledetection/rv1126/cpp/CMakeLists.txt
index 7a145177e..d5627b4e7 100755
--- a/examples/vision/detection/paddledetection/rv1126/cpp/CMakeLists.txt
+++ b/examples/vision/detection/paddledetection/rv1126/cpp/CMakeLists.txt
@@ -20,19 +20,11 @@ install(TARGETS infer_demo DESTINATION ./)
install(DIRECTORY models DESTINATION ./)
install(DIRECTORY images DESTINATION ./)
-# install(DIRECTORY run_with_adb.sh DESTINATION ./)
-file(GLOB FASTDEPLOY_LIBS ${FASTDEPLOY_INSTALL_DIR}/lib/*)
-install(PROGRAMS ${FASTDEPLOY_LIBS} DESTINATION lib)
-
-file(GLOB OPENCV_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/opencv/lib/lib*)
-install(PROGRAMS ${OPENCV_LIBS} DESTINATION lib)
-
-file(GLOB PADDLELITE_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/paddlelite/lib/lib*)
-install(PROGRAMS ${PADDLELITE_LIBS} DESTINATION lib)
-
-file(GLOB TIMVX_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/paddlelite/lib/verisilicon_timvx/*)
-install(PROGRAMS ${TIMVX_LIBS} DESTINATION lib)
+file(GLOB_RECURSE FASTDEPLOY_LIBS ${FASTDEPLOY_INSTALL_DIR}/lib/lib*.so*)
+file(GLOB_RECURSE ALL_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/lib*.so*)
+list(APPEND ALL_LIBS ${FASTDEPLOY_LIBS})
+install(PROGRAMS ${ALL_LIBS} DESTINATION lib)
file(GLOB ADB_TOOLS run_with_adb.sh)
install(PROGRAMS ${ADB_TOOLS} DESTINATION ./)
diff --git a/examples/vision/detection/paddledetection/rv1126/cpp/infer_ppyoloe.cc b/examples/vision/detection/paddledetection/rv1126/cpp/infer_ppyoloe.cc
index 609a41d4b..c7b81f9f9 100755
--- a/examples/vision/detection/paddledetection/rv1126/cpp/infer_ppyoloe.cc
+++ b/examples/vision/detection/paddledetection/rv1126/cpp/infer_ppyoloe.cc
@@ -24,7 +24,7 @@ void InitAndInfer(const std::string& model_dir, const std::string& image_file) {
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "infer_cfg.yml";
auto subgraph_file = model_dir + sep + "subgraph.txt";
-
+ fastdeploy::vision::EnableFlyCV();
fastdeploy::RuntimeOption option;
option.UseTimVX();
option.SetLiteSubgraphPartitionPath(subgraph_file);
diff --git a/examples/vision/detection/paddledetection/serving/README.md b/examples/vision/detection/paddledetection/serving/README.md
index d6a38c8d4..adfa19093 100644
--- a/examples/vision/detection/paddledetection/serving/README.md
+++ b/examples/vision/detection/paddledetection/serving/README.md
@@ -37,13 +37,13 @@ cp models/runtime/ppyoloe_runtime_config.pbtxt models/runtime/config.pbtxt
# 拉取fastdeploy镜像(x.y.z为镜像版本号,需替换成fastdeploy版本数字)
# GPU镜像
-docker pull paddlepaddle/fastdeploy:x.y.z-gpu-cuda11.4-trt8.4-21.10
+docker pull registry.baidubce.com/paddlepaddle/fastdeploy:x.y.z-gpu-cuda11.4-trt8.4-21.10
# CPU镜像
docker pull paddlepaddle/fastdeploy:z.y.z-cpu-only-21.10
# 运行容器.容器名字为 fd_serving, 并挂载当前目录为容器的 /serving 目录
-nvidia-docker run -it --net=host --name fd_serving --shm-size="1g" -v `pwd`/:/serving paddlepaddle/fastdeploy:x.y.z-gpu-cuda11.4-trt8.4-21.10 bash
+nvidia-docker run -it --net=host --name fd_serving --shm-size="1g" -v `pwd`/:/serving registry.baidubce.com/paddlepaddle/fastdeploy:x.y.z-gpu-cuda11.4-trt8.4-21.10 bash
# 启动服务(不设置CUDA_VISIBLE_DEVICES环境变量,会拥有所有GPU卡的调度权限)
CUDA_VISIBLE_DEVICES=0 fastdeployserver --model-repository=/serving/models
diff --git a/examples/vision/detection/yolov5/cpp/README.md b/examples/vision/detection/yolov5/cpp/README.md
index 581f1c49b..8f03a39ad 100755
--- a/examples/vision/detection/yolov5/cpp/README.md
+++ b/examples/vision/detection/yolov5/cpp/README.md
@@ -29,7 +29,7 @@ wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/0000000
./infer_paddle_demo yolov5s_infer 000000014439.jpg 1
# GPU上TensorRT推理
./infer_paddle_demo yolov5s_infer 000000014439.jpg 2
-# XPU推理
+# 昆仑芯XPU推理
./infer_paddle_demo yolov5s_infer 000000014439.jpg 3
```
diff --git a/examples/vision/detection/yolov5/cpp/infer_paddle_model.cc b/examples/vision/detection/yolov5/cpp/infer_paddle_model.cc
index d5692ce7c..e4c02af8a 100755
--- a/examples/vision/detection/yolov5/cpp/infer_paddle_model.cc
+++ b/examples/vision/detection/yolov5/cpp/infer_paddle_model.cc
@@ -102,11 +102,11 @@ void TrtInfer(const std::string& model_dir, const std::string& image_file) {
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
-void XpuInfer(const std::string& model_dir, const std::string& image_file) {
+void KunlunXinInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
fastdeploy::RuntimeOption option;
- option.UseXpu();
+ option.UseKunlunXin();
auto model = fastdeploy::vision::detection::YOLOv5(
model_file, params_file, option, fastdeploy::ModelFormat::PADDLE);
@@ -148,7 +148,7 @@ int main(int argc, char* argv[]) {
} else if (std::atoi(argv[3]) == 2) {
TrtInfer(argv[1], argv[2]);
} else if (std::atoi(argv[3]) == 3) {
- XpuInfer(argv[1], argv[2]);
+ KunlunXinInfer(argv[1], argv[2]);
}
return 0;
}
diff --git a/examples/vision/detection/yolov5/python/README.md b/examples/vision/detection/yolov5/python/README.md
index 83f6ed781..77f9027d5 100755
--- a/examples/vision/detection/yolov5/python/README.md
+++ b/examples/vision/detection/yolov5/python/README.md
@@ -23,8 +23,8 @@ python infer.py --model yolov5s_infer --image 000000014439.jpg --device cpu
python infer.py --model yolov5s_infer --image 000000014439.jpg --device gpu
# GPU上使用TensorRT推理
python infer.py --model yolov5s_infer --image 000000014439.jpg --device gpu --use_trt True
-# XPU推理
-python infer.py --model yolov5s_infer --image 000000014439.jpg --device xpu
+# 昆仑芯XPU推理
+python infer.py --model yolov5s_infer --image 000000014439.jpg --device kunlunxin
```
运行完成可视化结果如下图所示
diff --git a/examples/vision/detection/yolov5/python/infer.py b/examples/vision/detection/yolov5/python/infer.py
index b155af0ed..7f0823d8a 100755
--- a/examples/vision/detection/yolov5/python/infer.py
+++ b/examples/vision/detection/yolov5/python/infer.py
@@ -14,7 +14,7 @@ def parse_arguments():
"--device",
type=str,
default='cpu',
- help="Type of inference device, support 'cpu' or 'gpu' or 'xpu'.")
+ help="Type of inference device, support 'cpu' or 'gpu' or 'kunlunxin'.")
parser.add_argument(
"--use_trt",
type=ast.literal_eval,
@@ -25,8 +25,8 @@ def parse_arguments():
def build_option(args):
option = fd.RuntimeOption()
- if args.device.lower() == "xpu":
- option.use_xpu()
+ if args.device.lower() == "kunlunxin":
+ option.use_kunlunxin()
if args.device.lower() == "gpu":
option.use_gpu()
diff --git a/examples/vision/detection/yolov5/python/serving/README.md b/examples/vision/detection/yolov5/python/serving/README.md
new file mode 120000
index 000000000..bacd3186b
--- /dev/null
+++ b/examples/vision/detection/yolov5/python/serving/README.md
@@ -0,0 +1 @@
+README_CN.md
\ No newline at end of file
diff --git a/examples/vision/detection/yolov5/python/serving/README_CN.md b/examples/vision/detection/yolov5/python/serving/README_CN.md
new file mode 100644
index 000000000..28963fd3f
--- /dev/null
+++ b/examples/vision/detection/yolov5/python/serving/README_CN.md
@@ -0,0 +1,36 @@
+简体中文 | [English](README_EN.md)
+
+# YOLOv5 Python轻量服务化部署示例
+
+在部署前,需确认以下两个步骤
+
+- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
+- 2. FastDeploy Python whl包安装,参考[FastDeploy Python安装](../../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
+
+服务端:
+```bash
+# 下载部署示例代码
+git clone https://github.com/PaddlePaddle/FastDeploy.git
+cd FastDeploy/examples/vision/detection/yolov5/python/serving
+
+# 下载模型文件
+wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov5s_infer.tar
+tar xvf yolov5s_infer.tar
+
+# 启动服务,可修改server.py中的配置项来指定硬件、后端等
+# 可通过--host、--port指定IP和端口号
+fastdeploy simple_serving --app server:app
+```
+
+客户端:
+```bash
+# 下载部署示例代码
+git clone https://github.com/PaddlePaddle/FastDeploy.git
+cd FastDeploy/examples/vision/detection/yolov5/python/serving
+
+# 下载测试图片
+wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
+
+# 请求服务,获取推理结果(如有必要,请修改脚本中的IP和端口号)
+python client.py
+```
diff --git a/examples/vision/detection/yolov5/python/serving/README_EN.md b/examples/vision/detection/yolov5/python/serving/README_EN.md
new file mode 100644
index 000000000..b0cb92244
--- /dev/null
+++ b/examples/vision/detection/yolov5/python/serving/README_EN.md
@@ -0,0 +1,36 @@
+English | [简体中文](README_CN.md)
+
+# YOLOv5 Python Simple Serving Demo
+
+
+## Environment
+
+- 1. Prepare environment and install FastDeploy Python whl, refer to [download_prebuilt_libraries](../../../../../../docs/en/build_and_install/download_prebuilt_libraries.md)
+
+Server:
+```bash
+# Download demo code
+git clone https://github.com/PaddlePaddle/FastDeploy.git
+cd FastDeploy/examples/vision/detection/yolov5/python/serving
+
+# Download model
+wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov5s_infer.tar
+tar xvf yolov5s_infer.tar
+
+# Launch server, change the configurations in server.py to select hardware, backend, etc.
+# and use --host, --port to specify IP and port
+fastdeploy simple_serving --app server:app
+```
+
+Client:
+```bash
+# Download demo code
+git clone https://github.com/PaddlePaddle/FastDeploy.git
+cd FastDeploy/examples/vision/detection/yolov5/python/serving
+
+# Download test image
+wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
+
+# Send request and get inference result (Please adapt the IP and port if necessary)
+python client.py
+```
diff --git a/examples/vision/detection/yolov5/python/serving/client.py b/examples/vision/detection/yolov5/python/serving/client.py
new file mode 100644
index 000000000..bb310dff6
--- /dev/null
+++ b/examples/vision/detection/yolov5/python/serving/client.py
@@ -0,0 +1,23 @@
+import requests
+import json
+import cv2
+import fastdeploy as fd
+from fastdeploy.serving.utils import cv2_to_base64
+
+if __name__ == '__main__':
+ url = "http://127.0.0.1:8000/fd/yolov5s"
+ headers = {"Content-Type": "application/json"}
+
+ im = cv2.imread("000000014439.jpg")
+ data = {"data": {"image": cv2_to_base64(im)}, "parameters": {}}
+
+ resp = requests.post(url=url, headers=headers, data=json.dumps(data))
+ if resp.status_code == 200:
+ r_json = json.loads(resp.json()["result"])
+ det_result = fd.vision.utils.json_to_detection(r_json)
+ vis_im = fd.vision.vis_detection(im, det_result, score_threshold=0.5)
+ cv2.imwrite("visualized_result.jpg", vis_im)
+ print("Visualized result save in ./visualized_result.jpg")
+ else:
+ print("Error code:", resp.status_code)
+ print(resp.text)
diff --git a/examples/vision/detection/yolov5/python/serving/server.py b/examples/vision/detection/yolov5/python/serving/server.py
new file mode 100644
index 000000000..138260b05
--- /dev/null
+++ b/examples/vision/detection/yolov5/python/serving/server.py
@@ -0,0 +1,38 @@
+import fastdeploy as fd
+from fastdeploy.serving.server import SimpleServer
+import os
+import logging
+
+logging.getLogger().setLevel(logging.INFO)
+
+# Configurations
+model_dir = 'yolov5s_infer'
+device = 'cpu'
+use_trt = False
+
+# Prepare model
+model_file = os.path.join(model_dir, "model.pdmodel")
+params_file = os.path.join(model_dir, "model.pdiparams")
+
+# Setup runtime option to select hardware, backend, etc.
+option = fd.RuntimeOption()
+if device.lower() == 'gpu':
+ option.use_gpu()
+if use_trt:
+ option.use_trt_backend()
+ option.set_trt_input_shape("images", [1, 3, 640, 640])
+ option.set_trt_cache_file('yolov5s.trt')
+
+# Create model instance
+model_instance = fd.vision.detection.YOLOv5(
+ model_file,
+ params_file,
+ runtime_option=option,
+ model_format=fd.ModelFormat.PADDLE)
+
+# Create server, setup REST API
+app = SimpleServer()
+app.register(
+ task_name="fd/yolov5s",
+ model_handler=fd.serving.handler.VisionModelHandler,
+ predictor=model_instance)
diff --git a/examples/vision/detection/yolov5/rv1126/cpp/CMakeLists.txt b/examples/vision/detection/yolov5/rv1126/cpp/CMakeLists.txt
index 3c9eee38a..64b7a6466 100755
--- a/examples/vision/detection/yolov5/rv1126/cpp/CMakeLists.txt
+++ b/examples/vision/detection/yolov5/rv1126/cpp/CMakeLists.txt
@@ -21,17 +21,10 @@ install(TARGETS infer_demo DESTINATION ./)
install(DIRECTORY models DESTINATION ./)
install(DIRECTORY images DESTINATION ./)
-file(GLOB FASTDEPLOY_LIBS ${FASTDEPLOY_INSTALL_DIR}/lib/*)
-install(PROGRAMS ${FASTDEPLOY_LIBS} DESTINATION lib)
-
-file(GLOB OPENCV_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/opencv/lib/lib*)
-install(PROGRAMS ${OPENCV_LIBS} DESTINATION lib)
-
-file(GLOB PADDLELITE_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/paddlelite/lib/lib*)
-install(PROGRAMS ${PADDLELITE_LIBS} DESTINATION lib)
-
-file(GLOB TIMVX_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/paddlelite/lib/verisilicon_timvx/*)
-install(PROGRAMS ${TIMVX_LIBS} DESTINATION lib)
+file(GLOB_RECURSE FASTDEPLOY_LIBS ${FASTDEPLOY_INSTALL_DIR}/lib/lib*.so*)
+file(GLOB_RECURSE ALL_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/lib*.so*)
+list(APPEND ALL_LIBS ${FASTDEPLOY_LIBS})
+install(PROGRAMS ${ALL_LIBS} DESTINATION lib)
file(GLOB ADB_TOOLS run_with_adb.sh)
install(PROGRAMS ${ADB_TOOLS} DESTINATION ./)
diff --git a/examples/vision/detection/yolov5/rv1126/cpp/infer.cc b/examples/vision/detection/yolov5/rv1126/cpp/infer.cc
index f1cf9e8dc..3e57ee4d8 100755
--- a/examples/vision/detection/yolov5/rv1126/cpp/infer.cc
+++ b/examples/vision/detection/yolov5/rv1126/cpp/infer.cc
@@ -23,7 +23,7 @@ void InitAndInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto subgraph_file = model_dir + sep + "subgraph.txt";
-
+ fastdeploy::vision::EnableFlyCV();
fastdeploy::RuntimeOption option;
option.UseTimVX();
option.SetLiteSubgraphPartitionPath(subgraph_file);
diff --git a/examples/vision/detection/yolov5/serving/README.md b/examples/vision/detection/yolov5/serving/README.md
index 52e3b9743..8ad621849 100644
--- a/examples/vision/detection/yolov5/serving/README.md
+++ b/examples/vision/detection/yolov5/serving/README.md
@@ -20,12 +20,12 @@ mv yolov5s.onnx models/runtime/1/model.onnx
# 拉取fastdeploy镜像(x.y.z为镜像版本号,需参照serving文档替换为数字)
# GPU镜像
-docker pull paddlepaddle/fastdeploy:x.y.z-gpu-cuda11.4-trt8.4-21.10
+docker pull registry.baidubce.com/paddlepaddle/fastdeploy:x.y.z-gpu-cuda11.4-trt8.4-21.10
# CPU镜像
-docker pull paddlepaddle/fastdeploy:x.y.z-cpu-only-21.10
+docker pull registry.baidubce.com/paddlepaddle/fastdeploy:x.y.z-cpu-only-21.10
# 运行容器.容器名字为 fd_serving, 并挂载当前目录为容器的 /yolov5_serving 目录
-nvidia-docker run -it --net=host --name fd_serving -v `pwd`/:/yolov5_serving paddlepaddle/fastdeploy:x.y.z-gpu-cuda11.4-trt8.4-21.10 bash
+nvidia-docker run -it --net=host --name fd_serving -v `pwd`/:/yolov5_serving registry.baidubce.com/paddlepaddle/fastdeploy:x.y.z-gpu-cuda11.4-trt8.4-21.10 bash
# 启动服务(不设置CUDA_VISIBLE_DEVICES环境变量,会拥有所有GPU卡的调度权限)
CUDA_VISIBLE_DEVICES=0 fastdeployserver --model-repository=/yolov5_serving/models --backend-config=python,shm-default-byte-size=10485760
diff --git a/examples/vision/detection/yolov6/cpp/README.md b/examples/vision/detection/yolov6/cpp/README.md
index 5a07e6738..973b6a546 100755
--- a/examples/vision/detection/yolov6/cpp/README.md
+++ b/examples/vision/detection/yolov6/cpp/README.md
@@ -19,16 +19,16 @@ cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-x.x.x
make -j
#下载Paddle模型文件和测试图片
-https://bj.bcebos.com/paddlehub/fastdeploy/yolov6s_infer.tar
+wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov6s_infer.tar
tar -xf yolov6s_infer.tar
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
# CPU推理
-./infer_paddle_demo ./../yolov6s_infer 000000014439.jpg 0
+./infer_paddle_demo yolov6s_infer 000000014439.jpg 0
# GPU推理
-./infer_paddle_demo ./../yolov6s_infer 000000014439.jpg 1
-# XPU推理
-./infer_paddle_demo ./../yolov6s_infer 000000014439.jpg 2
+./infer_paddle_demo yolov6s_infer 000000014439.jpg 1
+# 昆仑芯XPU推理
+./infer_paddle_demo yolov6s_infer 000000014439.jpg 2
```
如果想要验证ONNX模型的推理,可以参考如下命令:
diff --git a/examples/vision/detection/yolov6/cpp/infer_paddle_model.cc b/examples/vision/detection/yolov6/cpp/infer_paddle_model.cc
index 87cdb347b..fc43ee0ae 100755
--- a/examples/vision/detection/yolov6/cpp/infer_paddle_model.cc
+++ b/examples/vision/detection/yolov6/cpp/infer_paddle_model.cc
@@ -45,9 +45,9 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file) {
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
-void XpuInfer(const std::string& model_dir, const std::string& image_file) {
+void KunlunXinInfer(const std::string& model_dir, const std::string& image_file) {
fastdeploy::RuntimeOption option;
- option.UseXpu();
+ option.UseKunlunXin();
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto model = fastdeploy::vision::detection::YOLOv6(model_file, params_file, option, fastdeploy::ModelFormat::PADDLE);
@@ -103,7 +103,7 @@ int main(int argc, char* argv[]) {
"e.g ./infer_model ./yolov6s_infer ./test.jpeg 0"
<< std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
- "with gpu; 2: run with xpu."
+ "with gpu; 2: run with kunlunxin."
<< std::endl;
return -1;
}
@@ -113,7 +113,7 @@ int main(int argc, char* argv[]) {
} else if (std::atoi(argv[3]) == 1) {
GpuInfer(argv[1], argv[2]);
} else if (std::atoi(argv[3]) == 2) {
- XpuInfer(argv[1], argv[2]);
+ KunlunXinInfer(argv[1], argv[2]);
}
return 0;
}
diff --git a/examples/vision/detection/yolov6/python/README.md b/examples/vision/detection/yolov6/python/README.md
index 3b413f4fb..a12bb9020 100755
--- a/examples/vision/detection/yolov6/python/README.md
+++ b/examples/vision/detection/yolov6/python/README.md
@@ -12,7 +12,7 @@
git clone https://github.com/PaddlePaddle/FastDeploy.git
cd examples/vision/detection/yolov6/python/
-https://bj.bcebos.com/paddlehub/fastdeploy/yolov6s_infer.tar
+wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov6s_infer.tar
tar -xf yolov6s_infer.tar
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
@@ -20,8 +20,8 @@ wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/0000000
python infer_paddle_model.py --model yolov6s_infer --image 000000014439.jpg --device cpu
# GPU推理
python infer_paddle_model.py --model yolov6s_infer --image 000000014439.jpg --device gpu
-# XPU推理
-python infer_paddle_model.py --model yolov6s_infer --image 000000014439.jpg --device xpu
+# 昆仑芯XPU推理
+python infer_paddle_model.py --model yolov6s_infer --image 000000014439.jpg --device kunlunxin
```
如果想要验证ONNX模型的推理,可以参考如下命令:
```bash
diff --git a/examples/vision/detection/yolov6/python/infer.py b/examples/vision/detection/yolov6/python/infer.py
old mode 100644
new mode 100755
diff --git a/examples/vision/detection/yolov6/python/infer_paddle_model.py b/examples/vision/detection/yolov6/python/infer_paddle_model.py
index 7cecbe8d2..250486201 100755
--- a/examples/vision/detection/yolov6/python/infer_paddle_model.py
+++ b/examples/vision/detection/yolov6/python/infer_paddle_model.py
@@ -16,7 +16,7 @@ def parse_arguments():
"--device",
type=str,
default='cpu',
- help="Type of inference device, support 'cpu', 'xpu' or 'gpu'.")
+ help="Type of inference device, support 'cpu', 'kunlunxin' or 'gpu'.")
return parser.parse_args()
@@ -25,8 +25,8 @@ def build_option(args):
if args.device.lower() == "gpu":
option.use_gpu(0)
- if args.device.lower() == "xpu":
- option.use_xpu()
+ if args.device.lower() == "kunlunxin":
+ option.use_kunlunxin()
return option
diff --git a/examples/vision/detection/yolov7/cpp/README.md b/examples/vision/detection/yolov7/cpp/README.md
index 894965646..8fc7928a8 100755
--- a/examples/vision/detection/yolov7/cpp/README.md
+++ b/examples/vision/detection/yolov7/cpp/README.md
@@ -26,7 +26,7 @@ wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/0000000
./infer_paddle_model_demo yolov7_infer 000000014439.jpg 0
# GPU推理
./infer_paddle_model_demo yolov7_infer 000000014439.jpg 1
-# XPU推理
+# 昆仑芯XPU推理
./infer_paddle_model_demo yolov7_infer 000000014439.jpg 2
```
如果想要验证ONNX模型的推理,可以参考如下命令:
diff --git a/examples/vision/detection/yolov7/cpp/infer_paddle_model.cc b/examples/vision/detection/yolov7/cpp/infer_paddle_model.cc
index 73a0c77ba..f88f0d3a0 100755
--- a/examples/vision/detection/yolov7/cpp/infer_paddle_model.cc
+++ b/examples/vision/detection/yolov7/cpp/infer_paddle_model.cc
@@ -52,7 +52,7 @@ int main(int argc, char* argv[]) {
<< std::endl;
std::cout << "The data type of run_option is int, 0: run on cpu with ORT "
"backend; 1: run "
- "on gpu with TensorRT backend ; 2: run with xpu. "
+ "on gpu with TensorRT backend ; 2: run with kunlunxin. "
<< std::endl;
return -1;
}
@@ -67,7 +67,7 @@ int main(int argc, char* argv[]) {
option.UseGpu();
option.UseTrtBackend();
} else if (flag == 2) {
- option.UseXpu();
+ option.UseKunlunXin();
}
std::string model_dir = argv[1];
diff --git a/examples/vision/detection/yolov7/python/README.md b/examples/vision/detection/yolov7/python/README.md
index 20c3497cd..c0aa78337 100755
--- a/examples/vision/detection/yolov7/python/README.md
+++ b/examples/vision/detection/yolov7/python/README.md
@@ -22,8 +22,8 @@ wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/0000000
python infer_paddle_model.py --model yolov7_infer --image 000000014439.jpg --device cpu
# GPU推理
python infer_paddle_model.py --model yolov7_infer --image 000000014439.jpg --device gpu
-# XPU推理
-python infer_paddle_model.py --model yolov7_infer --image 000000014439.jpg --device xpu
+# 昆仑芯XPU推理
+python infer_paddle_model.py --model yolov7_infer --image 000000014439.jpg --device kunlunxin
```
如果想要验证ONNX模型的推理,可以参考如下命令:
```bash
diff --git a/examples/vision/detection/yolov7/python/README_EN.md b/examples/vision/detection/yolov7/python/README_EN.md
index 65f6eea21..64b6fcc98 100755
--- a/examples/vision/detection/yolov7/python/README_EN.md
+++ b/examples/vision/detection/yolov7/python/README_EN.md
@@ -22,8 +22,8 @@ wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/0000000
python infer_paddle_model.py --model yolov7_infer --image 000000014439.jpg --device cpu
# GPU
python infer_paddle_model.py --model yolov7_infer --image 000000014439.jpg --device gpu
-# XPU
-python infer_paddle_model.py --model yolov7_infer --image 000000014439.jpg --device xpu
+# KunlunXin XPU
+python infer_paddle_model.py --model yolov7_infer --image 000000014439.jpg --device kunlunxin
```
If you want to test ONNX model:
```bash
diff --git a/examples/vision/detection/yolov7/python/infer.py b/examples/vision/detection/yolov7/python/infer.py
old mode 100644
new mode 100755
diff --git a/examples/vision/detection/yolov7/python/infer_paddle_model.py b/examples/vision/detection/yolov7/python/infer_paddle_model.py
index 1dd278b07..2a3351e59 100755
--- a/examples/vision/detection/yolov7/python/infer_paddle_model.py
+++ b/examples/vision/detection/yolov7/python/infer_paddle_model.py
@@ -16,7 +16,7 @@ def parse_arguments():
"--device",
type=str,
default='cpu',
- help="Type of inference device, support 'cpu', 'xpu' or 'gpu'.")
+ help="Type of inference device, support 'cpu', 'kunlunxin' or 'gpu'.")
return parser.parse_args()
@@ -25,8 +25,8 @@ def build_option(args):
if args.device.lower() == "gpu":
option.use_gpu(0)
- if args.device.lower() == "xpu":
- option.use_xpu()
+ if args.device.lower() == "kunlunxin":
+ option.use_kunlunxin()
return option
diff --git a/examples/vision/faceid/README.md b/examples/vision/faceid/README.md
index aa5b6b6fd..b897314cb 100644
--- a/examples/vision/faceid/README.md
+++ b/examples/vision/faceid/README.md
@@ -1,6 +1,8 @@
# 人脸识别模型
+## 模型支持列表
+
FastDeploy目前支持如下人脸识别模型部署
| 模型 | 说明 | 模型格式 | 版本 |
@@ -10,3 +12,7 @@ FastDeploy目前支持如下人脸识别模型部署
| [deepinsight/PartialFC](./insightface) | PartialFC 系列模型 | ONNX | [CommitID:babb9a5](https://github.com/deepinsight/insightface/commit/babb9a5) |
| [deepinsight/VPL](./insightface) | VPL 系列模型 | ONNX | [CommitID:babb9a5](https://github.com/deepinsight/insightface/commit/babb9a5) |
| [paddleclas/AdaFace](./adaface) | AdaFace 系列模型 | PADDLE | [CommitID:babb9a5](https://github.com/PaddlePaddle/PaddleClas/tree/v2.4.0) |
+
+## 模型demo简介
+
+ArcFace,CosFace,PartialFC,VPL同属于deepinsight系列,因此demo使用ONNX作为推理框架。AdaFace则采用PaddleInference作为推理框架。
diff --git a/examples/vision/faceid/adaface/cpp/CMakeLists.txt b/examples/vision/faceid/adaface/cpp/CMakeLists.txt
index 434573219..9df990f64 100644
--- a/examples/vision/faceid/adaface/cpp/CMakeLists.txt
+++ b/examples/vision/faceid/adaface/cpp/CMakeLists.txt
@@ -1,4 +1,4 @@
-PROJECT(infer_demo C CXX)
+PROJECT(infer_adaface_demo C CXX)
CMAKE_MINIMUM_REQUIRED (VERSION 3.12)
# 指定下载解压后的fastdeploy库路径
@@ -9,5 +9,5 @@ include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
# 添加FastDeploy依赖头文件
include_directories(${FASTDEPLOY_INCS})
-add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cc)
-target_link_libraries(infer_demo ${FASTDEPLOY_LIBS})
+add_executable(infer_adaface_demo ${PROJECT_SOURCE_DIR}/infer.cc)
+target_link_libraries(infer_adaface_demo ${FASTDEPLOY_LIBS})
diff --git a/examples/vision/faceid/adaface/cpp/README.md b/examples/vision/faceid/adaface/cpp/README.md
index b4a6a5991..f284b2fd0 100755
--- a/examples/vision/faceid/adaface/cpp/README.md
+++ b/examples/vision/faceid/adaface/cpp/README.md
@@ -11,53 +11,40 @@
以Linux上CPU推理为例,在本目录执行如下命令即可完成编译测试,支持此模型需保证FastDeploy版本0.7.0以上(x.x.x>=0.7.0)
```bash
+# “如果预编译库不包含本模型,请从最新代码编译SDK”
mkdir build
cd build
-# 下载FastDeploy预编译库,用户可在上文提到的`FastDeploy预编译库`中自行选择合适的版本使用
wget https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-x.x.x.tgz
tar xvf fastdeploy-linux-x64-x.x.x.tgz
cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-x.x.x
make -j
#下载测试图片
-wget https://bj.bcebos.com/paddlehub/test_samples/test_lite_focal_arcface_0.JPG
-wget https://bj.bcebos.com/paddlehub/test_samples/test_lite_focal_arcface_1.JPG
-wget https://bj.bcebos.com/paddlehub/test_samples/test_lite_focal_arcface_2.JPG
+wget https://bj.bcebos.com/paddlehub/fastdeploy/rknpu2/face_demo.zip
+unzip face_demo.zip
# 如果为Paddle模型,运行以下代码
wget https://bj.bcebos.com/paddlehub/fastdeploy/mobilefacenet_adaface.tgz
tar zxvf mobilefacenet_adaface.tgz -C ./
# CPU推理
-./infer_demo mobilefacenet_adaface/mobilefacenet_adaface.pdmodel \
+./infer_adaface_demo mobilefacenet_adaface/mobilefacenet_adaface.pdmodel \
mobilefacenet_adaface/mobilefacenet_adaface.pdiparams \
- test_lite_focal_arcface_0.JPG \
- test_lite_focal_arcface_1.JPG \
- test_lite_focal_arcface_2.JPG \
- 0
+ face_0.jpg face_1.jpg face_2.jpg 0
# GPU推理
-./infer_demo mobilefacenet_adaface/mobilefacenet_adaface.pdmodel \
+./infer_adaface_demo mobilefacenet_adaface/mobilefacenet_adaface.pdmodel \
mobilefacenet_adaface/mobilefacenet_adaface.pdiparams \
- test_lite_focal_arcface_0.JPG \
- test_lite_focal_arcface_1.JPG \
- test_lite_focal_arcface_2.JPG \
- 1
+ face_0.jpg face_1.jpg face_2.jpg 1
# GPU上TensorRT推理
-./infer_demo mobilefacenet_adaface/mobilefacenet_adaface.pdmodel \
+./infer_adaface_demo mobilefacenet_adaface/mobilefacenet_adaface.pdmodel \
mobilefacenet_adaface/mobilefacenet_adaface.pdiparams \
- test_lite_focal_arcface_0.JPG \
- test_lite_focal_arcface_1.JPG \
- test_lite_focal_arcface_2.JPG \
- 2
+ face_0.jpg face_1.jpg face_2.jpg 2
-# XPU推理
+# 昆仑芯XPU推理
./infer_demo mobilefacenet_adaface/mobilefacenet_adaface.pdmodel \
mobilefacenet_adaface/mobilefacenet_adaface.pdiparams \
- test_lite_focal_arcface_0.JPG \
- test_lite_focal_arcface_1.JPG \
- test_lite_focal_arcface_2.JPG \
- 3
+ face_0.jpg face_1.jpg face_2.jpg 3
```
运行完成可视化结果如下图所示
@@ -101,16 +88,22 @@ AdaFace模型加载和初始化,如果使用PaddleInference推理,model_file
> > * **im**: 输入图像,注意需为HWC,BGR格式
> > * **result**: 检测结果,包括检测框,各个框的置信度, FaceRecognitionResult说明参考[视觉模型预测结果](../../../../../docs/api/vision_results/)
-### 类成员变量
-#### 预处理参数
-用户可按照自己的实际需求,修改下列预处理参数,从而影响最终的推理和部署效果
+### 修改预处理以及后处理的参数
+预处理和后处理的参数的需要通过修改AdaFacePostprocessor,AdaFacePreprocessor的成员变量来进行修改。
+#### AdaFacePreprocessor成员变量(预处理参数)
+> > * **size**(vector<int>): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[112, 112],
+ 通过AdaFacePreprocessor::SetSize(std::vector& size)来进行修改
+> > * **alpha**(vector<float>): 预处理归一化的alpha值,计算公式为`x'=x*alpha+beta`,alpha默认为[1. / 127.5, 1.f / 127.5, 1. / 127.5],
+ 通过AdaFacePreprocessor::SetAlpha(std::vector& alpha)来进行修改
+> > * **beta**(vector<float>): 预处理归一化的beta值,计算公式为`x'=x*alpha+beta`,beta默认为[-1.f, -1.f, -1.f],
+ 通过AdaFacePreprocessor::SetBeta(std::vector& beta)来进行修改
+> > * **permute**(bool): 预处理是否将BGR转换成RGB,默认true,
+ 通过AdaFacePreprocessor::SetPermute(bool permute)来进行修改
-> > * **size**(vector<int>): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[112, 112]
-> > * **alpha**(vector<float>): 预处理归一化的alpha值,计算公式为`x'=x*alpha+beta`,alpha默认为[1. / 127.5, 1.f / 127.5, 1. / 127.5]
-> > * **beta**(vector<float>): 预处理归一化的beta值,计算公式为`x'=x*alpha+beta`,beta默认为[-1.f, -1.f, -1.f]
-> > * **swap_rb**(bool): 预处理是否将BGR转换成RGB,默认true
-> > * **l2_normalize**(bool): 输出人脸向量之前是否执行l2归一化,默认false
+#### AdaFacePostprocessor成员变量(后处理参数)
+> > * **l2_normalize**(bool): 输出人脸向量之前是否执行l2归一化,默认false,
+ AdaFacePostprocessor::SetL2Normalize(bool& l2_normalize)来进行修改
- [模型介绍](../../)
- [Python部署](../python)
diff --git a/examples/vision/faceid/adaface/cpp/infer.cc b/examples/vision/faceid/adaface/cpp/infer.cc
index 8d0ca938c..08fc7b23f 100755
--- a/examples/vision/faceid/adaface/cpp/infer.cc
+++ b/examples/vision/faceid/adaface/cpp/infer.cc
@@ -1,14 +1,17 @@
-/***************************************************************************
- *
- * Copyright (c) 2021 Baidu.com, Inc. All Rights Reserved
- *
- **************************************************************************/
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
-/**
- * @author Baidu
- * @brief demo_image_inference
- *
- **/
#include "fastdeploy/vision.h"
void CpuInfer(const std::string &model_file, const std::string ¶ms_file,
@@ -28,8 +31,8 @@ void CpuInfer(const std::string &model_file, const std::string ¶ms_file,
fastdeploy::vision::FaceRecognitionResult res1;
fastdeploy::vision::FaceRecognitionResult res2;
- if ((!model.Predict(&face0, &res0)) || (!model.Predict(&face1, &res1)) ||
- (!model.Predict(&face2, &res2))) {
+ if ((!model.Predict(face0, &res0)) || (!model.Predict(face1, &res1)) ||
+ (!model.Predict(face2, &res2))) {
std::cerr << "Prediction Failed." << std::endl;
}
@@ -40,17 +43,19 @@ void CpuInfer(const std::string &model_file, const std::string ¶ms_file,
std::cout << "--- [Face 2]:" << res2.Str();
float cosine01 = fastdeploy::vision::utils::CosineSimilarity(
- res0.embedding, res1.embedding, model.l2_normalize);
+ res0.embedding, res1.embedding,
+ model.GetPostprocessor().GetL2Normalize());
float cosine02 = fastdeploy::vision::utils::CosineSimilarity(
- res0.embedding, res2.embedding, model.l2_normalize);
+ res0.embedding, res2.embedding,
+ model.GetPostprocessor().GetL2Normalize());
std::cout << "Detect Done! Cosine 01: " << cosine01
<< ", Cosine 02:" << cosine02 << std::endl;
}
-void XpuInfer(const std::string &model_file, const std::string ¶ms_file,
+void KunlunXinInfer(const std::string &model_file, const std::string ¶ms_file,
const std::vector &image_file) {
auto option = fastdeploy::RuntimeOption();
- option.UseXpu();
+ option.UseKunlunXin();
auto model = fastdeploy::vision::faceid::AdaFace(model_file, params_file);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
@@ -65,8 +70,8 @@ void XpuInfer(const std::string &model_file, const std::string ¶ms_file,
fastdeploy::vision::FaceRecognitionResult res1;
fastdeploy::vision::FaceRecognitionResult res2;
- if ((!model.Predict(&face0, &res0)) || (!model.Predict(&face1, &res1)) ||
- (!model.Predict(&face2, &res2))) {
+ if ((!model.Predict(face0, &res0)) || (!model.Predict(face1, &res1)) ||
+ (!model.Predict(face2, &res2))) {
std::cerr << "Prediction Failed." << std::endl;
}
@@ -77,9 +82,11 @@ void XpuInfer(const std::string &model_file, const std::string ¶ms_file,
std::cout << "--- [Face 2]:" << res2.Str();
float cosine01 = fastdeploy::vision::utils::CosineSimilarity(
- res0.embedding, res1.embedding, model.l2_normalize);
+ res0.embedding, res1.embedding,
+ model.GetPostprocessor().GetL2Normalize());
float cosine02 = fastdeploy::vision::utils::CosineSimilarity(
- res0.embedding, res2.embedding, model.l2_normalize);
+ res0.embedding, res2.embedding,
+ model.GetPostprocessor().GetL2Normalize());
std::cout << "Detect Done! Cosine 01: " << cosine01
<< ", Cosine 02:" << cosine02 << std::endl;
}
@@ -103,8 +110,8 @@ void GpuInfer(const std::string &model_file, const std::string ¶ms_file,
fastdeploy::vision::FaceRecognitionResult res1;
fastdeploy::vision::FaceRecognitionResult res2;
- if ((!model.Predict(&face0, &res0)) || (!model.Predict(&face1, &res1)) ||
- (!model.Predict(&face2, &res2))) {
+ if ((!model.Predict(face0, &res0)) || (!model.Predict(face1, &res1)) ||
+ (!model.Predict(face2, &res2))) {
std::cerr << "Prediction Failed." << std::endl;
}
@@ -115,9 +122,11 @@ void GpuInfer(const std::string &model_file, const std::string ¶ms_file,
std::cout << "--- [Face 2]:" << res2.Str();
float cosine01 = fastdeploy::vision::utils::CosineSimilarity(
- res0.embedding, res1.embedding, model.l2_normalize);
+ res0.embedding, res1.embedding,
+ model.GetPostprocessor().GetL2Normalize());
float cosine02 = fastdeploy::vision::utils::CosineSimilarity(
- res0.embedding, res2.embedding, model.l2_normalize);
+ res0.embedding, res2.embedding,
+ model.GetPostprocessor().GetL2Normalize());
std::cout << "Detect Done! Cosine 01: " << cosine01
<< ", Cosine 02:" << cosine02 << std::endl;
}
@@ -143,8 +152,8 @@ void TrtInfer(const std::string &model_file, const std::string ¶ms_file,
fastdeploy::vision::FaceRecognitionResult res1;
fastdeploy::vision::FaceRecognitionResult res2;
- if ((!model.Predict(&face0, &res0)) || (!model.Predict(&face1, &res1)) ||
- (!model.Predict(&face2, &res2))) {
+ if ((!model.Predict(face0, &res0)) || (!model.Predict(face1, &res1)) ||
+ (!model.Predict(face2, &res2))) {
std::cerr << "Prediction Failed." << std::endl;
}
@@ -155,9 +164,11 @@ void TrtInfer(const std::string &model_file, const std::string ¶ms_file,
std::cout << "--- [Face 2]:" << res2.Str();
float cosine01 = fastdeploy::vision::utils::CosineSimilarity(
- res0.embedding, res1.embedding, model.l2_normalize);
+ res0.embedding, res1.embedding,
+ model.GetPostprocessor().GetL2Normalize());
float cosine02 = fastdeploy::vision::utils::CosineSimilarity(
- res0.embedding, res2.embedding, model.l2_normalize);
+ res0.embedding, res2.embedding,
+ model.GetPostprocessor().GetL2Normalize());
std::cout << "Detect Done! Cosine 01: " << cosine01
<< ", Cosine 02:" << cosine02 << std::endl;
}
@@ -171,7 +182,7 @@ int main(int argc, char *argv[]) {
"test_lite_focal_AdaFace_2.JPG 0"
<< std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
- "with gpu; 2: run with gpu and use tensorrt backend; 3: run with xpu."
+ "with gpu; 2: run with gpu and use tensorrt backend; 3: run with kunlunxin."
<< std::endl;
return -1;
}
@@ -185,7 +196,7 @@ int main(int argc, char *argv[]) {
} else if (std::atoi(argv[6]) == 2) {
TrtInfer(argv[1], argv[2], image_files);
} else if (std::atoi(argv[6]) == 3) {
- CpuInfer(argv[1], argv[2], image_files);
+ KunlunXinInfer(argv[1], argv[2], image_files);
}
return 0;
}
diff --git a/examples/vision/faceid/adaface/python/README.md b/examples/vision/faceid/adaface/python/README.md
index 699da9001..6475b1a32 100755
--- a/examples/vision/faceid/adaface/python/README.md
+++ b/examples/vision/faceid/adaface/python/README.md
@@ -15,9 +15,8 @@ cd examples/vision/faceid/adaface/python/
#下载AdaFace模型文件和测试图片
#下载测试图片
-wget https://bj.bcebos.com/paddlehub/test_samples/test_lite_focal_arcface_0.JPG
-wget https://bj.bcebos.com/paddlehub/test_samples/test_lite_focal_arcface_1.JPG
-wget https://bj.bcebos.com/paddlehub/test_samples/test_lite_focal_arcface_2.JPG
+wget https://bj.bcebos.com/paddlehub/fastdeploy/rknpu2/face_demo.zip
+unzip face_demo.zip
# 如果为Paddle模型,运行以下代码
wget https://bj.bcebos.com/paddlehub/fastdeploy/mobilefacenet_adaface.tgz
@@ -26,33 +25,33 @@ tar zxvf mobilefacenet_adaface.tgz -C ./
# CPU推理
python infer.py --model mobilefacenet_adaface/mobilefacenet_adaface.pdmodel \
--params_file mobilefacenet_adaface/mobilefacenet_adaface.pdiparams \
- --face test_lite_focal_arcface_0.JPG \
- --face_positive test_lite_focal_arcface_1.JPG \
- --face_negative test_lite_focal_arcface_2.JPG \
+ --face face_0.jpg \
+ --face_positive face_1.jpg \
+ --face_negative face_2.jpg \
--device cpu
# GPU推理
python infer.py --model mobilefacenet_adaface/mobilefacenet_adaface.pdmodel \
--params_file mobilefacenet_adaface/mobilefacenet_adaface.pdiparams \
- --face test_lite_focal_arcface_0.JPG \
- --face_positive test_lite_focal_arcface_1.JPG \
- --face_negative test_lite_focal_arcface_2.JPG \
+ --face face_0.jpg \
+ --face_positive face_1.jpg \
+ --face_negative face_2.jpg \
--device gpu
# GPU上使用TensorRT推理
python infer.py --model mobilefacenet_adaface/mobilefacenet_adaface.pdmodel \
--params_file mobilefacenet_adaface/mobilefacenet_adaface.pdiparams \
- --face test_lite_focal_arcface_0.JPG \
- --face_positive test_lite_focal_arcface_1.JPG \
- --face_negative test_lite_focal_arcface_2.JPG \
- --device gpu \
- --use_trt True
+ --face face_0.jpg \
+ --face_positive face_1.jpg \
+ --face_negative face_2.jpg \
+ --device gpu \
+ --use_trt True
-# XPU推理
+# 昆仑芯XPU推理
python infer.py --model mobilefacenet_adaface/mobilefacenet_adaface.pdmodel \
--params_file mobilefacenet_adaface/mobilefacenet_adaface.pdiparams \
--face test_lite_focal_arcface_0.JPG \
--face_positive test_lite_focal_arcface_1.JPG \
--face_negative test_lite_focal_arcface_2.JPG \
- --device xpu
+ --device kunlunxin
```
运行完成可视化结果如下图所示
@@ -106,11 +105,15 @@ AdaFace模型加载和初始化,其中model_file为导出的ONNX模型格式
#### 预处理参数
用户可按照自己的实际需求,修改下列预处理参数,从而影响最终的推理和部署效果
-
+#### AdaFacePreprocessor的成员变量
+以下变量为AdaFacePreprocessor的成员变量
> > * **size**(list[int]): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[112, 112]
> > * **alpha**(list[float]): 预处理归一化的alpha值,计算公式为`x'=x*alpha+beta`,alpha默认为[1. / 127.5, 1.f / 127.5, 1. / 127.5]
> > * **beta**(list[float]): 预处理归一化的beta值,计算公式为`x'=x*alpha+beta`,beta默认为[-1.f, -1.f, -1.f]
> > * **swap_rb**(bool): 预处理是否将BGR转换成RGB,默认True
+
+#### AdaFacePostprocessor的成员变量
+以下变量为AdaFacePostprocessor的成员变量
> > * **l2_normalize**(bool): 输出人脸向量之前是否执行l2归一化,默认False
diff --git a/examples/vision/faceid/adaface/python/infer.py b/examples/vision/faceid/adaface/python/infer.py
index 50a30191f..4fbd549c6 100755
--- a/examples/vision/faceid/adaface/python/infer.py
+++ b/examples/vision/faceid/adaface/python/infer.py
@@ -39,7 +39,7 @@ def parse_arguments():
"--device",
type=str,
default='cpu',
- help="Type of inference device, support 'cpu' , 'xpu' or 'gpu'.")
+ help="Type of inference device, support 'cpu' , 'kunlunxin' or 'gpu'.")
parser.add_argument(
"--use_trt",
type=ast.literal_eval,
@@ -54,8 +54,8 @@ def build_option(args):
if args.device.lower() == "gpu":
option.use_gpu()
- if args.device.lower() == "xpu":
- option.use_xpu()
+ if args.device.lower() == "kunlunxin":
+ option.use_kunlunxin()
if args.use_trt:
option.use_trt_backend()
diff --git a/examples/vision/faceid/insightface/cpp/README.md b/examples/vision/faceid/insightface/cpp/README.md
index 341478b7c..1722d9d5d 100644
--- a/examples/vision/faceid/insightface/cpp/README.md
+++ b/examples/vision/faceid/insightface/cpp/README.md
@@ -7,12 +7,11 @@
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
- 2. 根据开发环境,下载预编译部署库和samples代码,参考[FastDeploy预编译库](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
-以Linux上CPU推理为例,在本目录执行如下命令即可完成编译测试,支持此模型需保证FastDeploy版本0.7.0以上(x.x.x>=0.7.0)
+以Linux上CPU推理为例,在本目录执行如下命令即可完成编译测试
```bash
mkdir build
cd build
-# 下载FastDeploy预编译库,用户可在上文提到的`FastDeploy预编译库`中自行选择合适的版本使用
wget https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-x.x.x.tgz
tar xvf fastdeploy-linux-x64-x.x.x.tgz
cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-x.x.x
@@ -20,17 +19,15 @@ make -j
#下载官方转换好的ArcFace模型文件和测试图片
wget https://bj.bcebos.com/paddlehub/fastdeploy/ms1mv3_arcface_r100.onnx
-wget https://bj.bcebos.com/paddlehub/test_samples/test_lite_focal_arcface_0.JPG
-wget https://bj.bcebos.com/paddlehub/test_samples/test_lite_focal_arcface_1.JPG
-wget https://bj.bcebos.com/paddlehub/test_samples/test_lite_focal_arcface_2.JPG
-
+wget https://bj.bcebos.com/paddlehub/fastdeploy/rknpu2/face_demo.zip
+unzip face_demo.zip
# CPU推理
-./infer_arcface_demo ms1mv3_arcface_r100.onnx test_lite_focal_arcface_0.JPG test_lite_focal_arcface_1.JPG test_lite_focal_arcface_2.JPG 0
+./infer_arcface_demo ms1mv3_arcface_r100.onnx face_0.jpg face_1.jpg face_2.jpg 0
# GPU推理
-./infer_arcface_demo ms1mv3_arcface_r100.onnx test_lite_focal_arcface_0.JPG test_lite_focal_arcface_1.JPG test_lite_focal_arcface_2.JPG 1
+./infer_arcface_demo ms1mv3_arcface_r100.onnx face_0.jpg face_1.jpg face_2.jpg 1
# GPU上TensorRT推理
-./infer_arcface_demo ms1mv3_arcface_r100.onnx test_lite_focal_arcface_0.JPG test_lite_focal_arcface_1.JPG test_lite_focal_arcface_2.JPG 2
+./infer_arcface_demo ms1mv3_arcface_r100.onnx face_0.jpg face_1.jpg face_2.jpg 2
```
运行完成可视化结果如下图所示
@@ -113,16 +110,22 @@ VPL模型加载和初始化,其中model_file为导出的ONNX模型格式。
> > * **im**: 输入图像,注意需为HWC,BGR格式
> > * **result**: 检测结果,包括检测框,各个框的置信度, FaceRecognitionResult说明参考[视觉模型预测结果](../../../../../docs/api/vision_results/)
-### 类成员变量
-#### 预处理参数
-用户可按照自己的实际需求,修改下列预处理参数,从而影响最终的推理和部署效果
+### 修改预处理以及后处理的参数
+预处理和后处理的参数的需要通过修改InsightFaceRecognitionPostprocessor,InsightFaceRecognitionPreprocessor的成员变量来进行修改。
+#### InsightFaceRecognitionPreprocessor成员变量(预处理参数)
+> > * **size**(vector<int>): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[112, 112],
+ 通过InsightFaceRecognitionPreprocessor::SetSize(std::vector& size)来进行修改
+> > * **alpha**(vector<float>): 预处理归一化的alpha值,计算公式为`x'=x*alpha+beta`,alpha默认为[1. / 127.5, 1.f / 127.5, 1. / 127.5],
+ 通过InsightFaceRecognitionPreprocessor::SetAlpha(std::vector& alpha)来进行修改
+> > * **beta**(vector<float>): 预处理归一化的beta值,计算公式为`x'=x*alpha+beta`,beta默认为[-1.f, -1.f, -1.f],
+ 通过InsightFaceRecognitionPreprocessor::SetBeta(std::vector& beta)来进行修改
+> > * **permute**(bool): 预处理是否将BGR转换成RGB,默认true,
+ 通过InsightFaceRecognitionPreprocessor::SetPermute(bool permute)来进行修改
-> > * **size**(vector<int>): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[112, 112]
-> > * **alpha**(vector<float>): 预处理归一化的alpha值,计算公式为`x'=x*alpha+beta`,alpha默认为[1. / 127.5, 1.f / 127.5, 1. / 127.5]
-> > * **beta**(vector<float>): 预处理归一化的beta值,计算公式为`x'=x*alpha+beta`,beta默认为[-1.f, -1.f, -1.f]
-> > * **swap_rb**(bool): 预处理是否将BGR转换成RGB,默认true
-> > * **l2_normalize**(bool): 输出人脸向量之前是否执行l2归一化,默认false
+#### InsightFaceRecognitionPostprocessor成员变量(后处理参数)
+> > * **l2_normalize**(bool): 输出人脸向量之前是否执行l2归一化,默认false,
+ InsightFaceRecognitionPostprocessor::SetL2Normalize(bool& l2_normalize)来进行修改
- [模型介绍](../../)
- [Python部署](../python)
diff --git a/examples/vision/faceid/insightface/cpp/infer_arcface.cc b/examples/vision/faceid/insightface/cpp/infer_arcface.cc
index ce4c0fcd4..dbd390dc1 100644
--- a/examples/vision/faceid/insightface/cpp/infer_arcface.cc
+++ b/examples/vision/faceid/insightface/cpp/infer_arcface.cc
@@ -16,11 +16,7 @@
void CpuInfer(const std::string& model_file,
const std::vector& image_file) {
- auto model = fastdeploy::vision::faceid::ArcFace(model_file);
- if (!model.Initialized()) {
- std::cerr << "Failed to initialize." << std::endl;
- return;
- }
+ auto model = fastdeploy::vision::faceid::ArcFace(model_file, "");
cv::Mat face0 = cv::imread(image_file[0]);
cv::Mat face1 = cv::imread(image_file[1]);
@@ -30,8 +26,8 @@ void CpuInfer(const std::string& model_file,
fastdeploy::vision::FaceRecognitionResult res1;
fastdeploy::vision::FaceRecognitionResult res2;
- if ((!model.Predict(&face0, &res0)) || (!model.Predict(&face1, &res1)) ||
- (!model.Predict(&face2, &res2))) {
+ if ((!model.Predict(face0, &res0)) || (!model.Predict(face1, &res1)) ||
+ (!model.Predict(face2, &res2))) {
std::cerr << "Prediction Failed." << std::endl;
}
@@ -42,9 +38,11 @@ void CpuInfer(const std::string& model_file,
std::cout << "--- [Face 2]:" << res2.Str();
float cosine01 = fastdeploy::vision::utils::CosineSimilarity(
- res0.embedding, res1.embedding, model.l2_normalize);
+ res0.embedding, res1.embedding,
+ model.GetPostprocessor().GetL2Normalize());
float cosine02 = fastdeploy::vision::utils::CosineSimilarity(
- res0.embedding, res2.embedding, model.l2_normalize);
+ res0.embedding, res2.embedding,
+ model.GetPostprocessor().GetL2Normalize());
std::cout << "Detect Done! Cosine 01: " << cosine01
<< ", Cosine 02:" << cosine02 << std::endl;
}
@@ -67,8 +65,8 @@ void GpuInfer(const std::string& model_file,
fastdeploy::vision::FaceRecognitionResult res1;
fastdeploy::vision::FaceRecognitionResult res2;
- if ((!model.Predict(&face0, &res0)) || (!model.Predict(&face1, &res1)) ||
- (!model.Predict(&face2, &res2))) {
+ if ((!model.Predict(face0, &res0)) || (!model.Predict(face1, &res1)) ||
+ (!model.Predict(face2, &res2))) {
std::cerr << "Prediction Failed." << std::endl;
}
@@ -79,9 +77,11 @@ void GpuInfer(const std::string& model_file,
std::cout << "--- [Face 2]:" << res2.Str();
float cosine01 = fastdeploy::vision::utils::CosineSimilarity(
- res0.embedding, res1.embedding, model.l2_normalize);
+ res0.embedding, res1.embedding,
+ model.GetPostprocessor().GetL2Normalize());
float cosine02 = fastdeploy::vision::utils::CosineSimilarity(
- res0.embedding, res2.embedding, model.l2_normalize);
+ res0.embedding, res2.embedding,
+ model.GetPostprocessor().GetL2Normalize());
std::cout << "Detect Done! Cosine 01: " << cosine01
<< ", Cosine 02:" << cosine02 << std::endl;
}
@@ -106,8 +106,8 @@ void TrtInfer(const std::string& model_file,
fastdeploy::vision::FaceRecognitionResult res1;
fastdeploy::vision::FaceRecognitionResult res2;
- if ((!model.Predict(&face0, &res0)) || (!model.Predict(&face1, &res1)) ||
- (!model.Predict(&face2, &res2))) {
+ if ((!model.Predict(face0, &res0)) || (!model.Predict(face1, &res1)) ||
+ (!model.Predict(face2, &res2))) {
std::cerr << "Prediction Failed." << std::endl;
}
@@ -118,9 +118,11 @@ void TrtInfer(const std::string& model_file,
std::cout << "--- [Face 2]:" << res2.Str();
float cosine01 = fastdeploy::vision::utils::CosineSimilarity(
- res0.embedding, res1.embedding, model.l2_normalize);
+ res0.embedding, res1.embedding,
+ model.GetPostprocessor().GetL2Normalize());
float cosine02 = fastdeploy::vision::utils::CosineSimilarity(
- res0.embedding, res2.embedding, model.l2_normalize);
+ res0.embedding, res2.embedding,
+ model.GetPostprocessor().GetL2Normalize());
std::cout << "Detect Done! Cosine 01: " << cosine01
<< ", Cosine 02:" << cosine02 << std::endl;
}
@@ -129,8 +131,7 @@ int main(int argc, char* argv[]) {
if (argc < 6) {
std::cout << "Usage: infer_demo path/to/model path/to/image run_option, "
"e.g ./infer_arcface_demo ms1mv3_arcface_r100.onnx "
- "test_lite_focal_arcface_0.JPG test_lite_focal_arcface_1.JPG "
- "test_lite_focal_arcface_2.JPG 0"
+ "face_0.jpg face_1.jpg face_2.jpg 0"
<< std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
"with gpu; 2: run with gpu and use tensorrt backend."
diff --git a/examples/vision/faceid/insightface/cpp/infer_cosface.cc b/examples/vision/faceid/insightface/cpp/infer_cosface.cc
index e3b554b04..e8925faa3 100644
--- a/examples/vision/faceid/insightface/cpp/infer_cosface.cc
+++ b/examples/vision/faceid/insightface/cpp/infer_cosface.cc
@@ -16,11 +16,7 @@
void CpuInfer(const std::string& model_file,
const std::vector& image_file) {
- auto model = fastdeploy::vision::faceid::CosFace(model_file);
- if (!model.Initialized()) {
- std::cerr << "Failed to initialize." << std::endl;
- return;
- }
+ auto model = fastdeploy::vision::faceid::CosFace(model_file, "");
cv::Mat face0 = cv::imread(image_file[0]);
cv::Mat face1 = cv::imread(image_file[1]);
@@ -30,8 +26,8 @@ void CpuInfer(const std::string& model_file,
fastdeploy::vision::FaceRecognitionResult res1;
fastdeploy::vision::FaceRecognitionResult res2;
- if ((!model.Predict(&face0, &res0)) || (!model.Predict(&face1, &res1)) ||
- (!model.Predict(&face2, &res2))) {
+ if ((!model.Predict(face0, &res0)) || (!model.Predict(face1, &res1)) ||
+ (!model.Predict(face2, &res2))) {
std::cerr << "Prediction Failed." << std::endl;
}
@@ -42,9 +38,11 @@ void CpuInfer(const std::string& model_file,
std::cout << "--- [Face 2]:" << res2.Str();
float cosine01 = fastdeploy::vision::utils::CosineSimilarity(
- res0.embedding, res1.embedding, model.l2_normalize);
+ res0.embedding, res1.embedding,
+ model.GetPostprocessor().GetL2Normalize());
float cosine02 = fastdeploy::vision::utils::CosineSimilarity(
- res0.embedding, res2.embedding, model.l2_normalize);
+ res0.embedding, res2.embedding,
+ model.GetPostprocessor().GetL2Normalize());
std::cout << "Detect Done! Cosine 01: " << cosine01
<< ", Cosine 02:" << cosine02 << std::endl;
}
@@ -67,8 +65,8 @@ void GpuInfer(const std::string& model_file,
fastdeploy::vision::FaceRecognitionResult res1;
fastdeploy::vision::FaceRecognitionResult res2;
- if ((!model.Predict(&face0, &res0)) || (!model.Predict(&face1, &res1)) ||
- (!model.Predict(&face2, &res2))) {
+ if ((!model.Predict(face0, &res0)) || (!model.Predict(face1, &res1)) ||
+ (!model.Predict(face2, &res2))) {
std::cerr << "Prediction Failed." << std::endl;
}
@@ -79,9 +77,11 @@ void GpuInfer(const std::string& model_file,
std::cout << "--- [Face 2]:" << res2.Str();
float cosine01 = fastdeploy::vision::utils::CosineSimilarity(
- res0.embedding, res1.embedding, model.l2_normalize);
+ res0.embedding, res1.embedding,
+ model.GetPostprocessor().GetL2Normalize());
float cosine02 = fastdeploy::vision::utils::CosineSimilarity(
- res0.embedding, res2.embedding, model.l2_normalize);
+ res0.embedding, res2.embedding,
+ model.GetPostprocessor().GetL2Normalize());
std::cout << "Detect Done! Cosine 01: " << cosine01
<< ", Cosine 02:" << cosine02 << std::endl;
}
@@ -106,8 +106,8 @@ void TrtInfer(const std::string& model_file,
fastdeploy::vision::FaceRecognitionResult res1;
fastdeploy::vision::FaceRecognitionResult res2;
- if ((!model.Predict(&face0, &res0)) || (!model.Predict(&face1, &res1)) ||
- (!model.Predict(&face2, &res2))) {
+ if ((!model.Predict(face0, &res0)) || (!model.Predict(face1, &res1)) ||
+ (!model.Predict(face2, &res2))) {
std::cerr << "Prediction Failed." << std::endl;
}
@@ -118,9 +118,11 @@ void TrtInfer(const std::string& model_file,
std::cout << "--- [Face 2]:" << res2.Str();
float cosine01 = fastdeploy::vision::utils::CosineSimilarity(
- res0.embedding, res1.embedding, model.l2_normalize);
+ res0.embedding, res1.embedding,
+ model.GetPostprocessor().GetL2Normalize());
float cosine02 = fastdeploy::vision::utils::CosineSimilarity(
- res0.embedding, res2.embedding, model.l2_normalize);
+ res0.embedding, res2.embedding,
+ model.GetPostprocessor().GetL2Normalize());
std::cout << "Detect Done! Cosine 01: " << cosine01
<< ", Cosine 02:" << cosine02 << std::endl;
}
@@ -128,9 +130,8 @@ void TrtInfer(const std::string& model_file,
int main(int argc, char* argv[]) {
if (argc < 6) {
std::cout << "Usage: infer_demo path/to/model path/to/image run_option, "
- "e.g ./infer_arcface_demo ms1mv3_arcface_r100.onnx "
- "test_lite_focal_arcface_0.JPG test_lite_focal_arcface_1.JPG "
- "test_lite_focal_arcface_2.JPG 0"
+ "e.g ./infer_cosface_demo ms1mv3_cosface_r100.onnx "
+ "face_0.jpg face_1.jpg face_2.jpg 0"
<< std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
"with gpu; 2: run with gpu and use tensorrt backend."
diff --git a/examples/vision/faceid/insightface/cpp/infer_partial_fc.cc b/examples/vision/faceid/insightface/cpp/infer_partial_fc.cc
index 607e59b9f..879e1013e 100644
--- a/examples/vision/faceid/insightface/cpp/infer_partial_fc.cc
+++ b/examples/vision/faceid/insightface/cpp/infer_partial_fc.cc
@@ -16,11 +16,7 @@
void CpuInfer(const std::string& model_file,
const std::vector& image_file) {
- auto model = fastdeploy::vision::faceid::PartialFC(model_file);
- if (!model.Initialized()) {
- std::cerr << "Failed to initialize." << std::endl;
- return;
- }
+ auto model = fastdeploy::vision::faceid::PartialFC(model_file, "");
cv::Mat face0 = cv::imread(image_file[0]);
cv::Mat face1 = cv::imread(image_file[1]);
@@ -30,8 +26,8 @@ void CpuInfer(const std::string& model_file,
fastdeploy::vision::FaceRecognitionResult res1;
fastdeploy::vision::FaceRecognitionResult res2;
- if ((!model.Predict(&face0, &res0)) || (!model.Predict(&face1, &res1)) ||
- (!model.Predict(&face2, &res2))) {
+ if ((!model.Predict(face0, &res0)) || (!model.Predict(face1, &res1)) ||
+ (!model.Predict(face2, &res2))) {
std::cerr << "Prediction Failed." << std::endl;
}
@@ -42,9 +38,11 @@ void CpuInfer(const std::string& model_file,
std::cout << "--- [Face 2]:" << res2.Str();
float cosine01 = fastdeploy::vision::utils::CosineSimilarity(
- res0.embedding, res1.embedding, model.l2_normalize);
+ res0.embedding, res1.embedding,
+ model.GetPostprocessor().GetL2Normalize());
float cosine02 = fastdeploy::vision::utils::CosineSimilarity(
- res0.embedding, res2.embedding, model.l2_normalize);
+ res0.embedding, res2.embedding,
+ model.GetPostprocessor().GetL2Normalize());
std::cout << "Detect Done! Cosine 01: " << cosine01
<< ", Cosine 02:" << cosine02 << std::endl;
}
@@ -67,8 +65,8 @@ void GpuInfer(const std::string& model_file,
fastdeploy::vision::FaceRecognitionResult res1;
fastdeploy::vision::FaceRecognitionResult res2;
- if ((!model.Predict(&face0, &res0)) || (!model.Predict(&face1, &res1)) ||
- (!model.Predict(&face2, &res2))) {
+ if ((!model.Predict(face0, &res0)) || (!model.Predict(face1, &res1)) ||
+ (!model.Predict(face2, &res2))) {
std::cerr << "Prediction Failed." << std::endl;
}
@@ -79,9 +77,11 @@ void GpuInfer(const std::string& model_file,
std::cout << "--- [Face 2]:" << res2.Str();
float cosine01 = fastdeploy::vision::utils::CosineSimilarity(
- res0.embedding, res1.embedding, model.l2_normalize);
+ res0.embedding, res1.embedding,
+ model.GetPostprocessor().GetL2Normalize());
float cosine02 = fastdeploy::vision::utils::CosineSimilarity(
- res0.embedding, res2.embedding, model.l2_normalize);
+ res0.embedding, res2.embedding,
+ model.GetPostprocessor().GetL2Normalize());
std::cout << "Detect Done! Cosine 01: " << cosine01
<< ", Cosine 02:" << cosine02 << std::endl;
}
@@ -106,8 +106,8 @@ void TrtInfer(const std::string& model_file,
fastdeploy::vision::FaceRecognitionResult res1;
fastdeploy::vision::FaceRecognitionResult res2;
- if ((!model.Predict(&face0, &res0)) || (!model.Predict(&face1, &res1)) ||
- (!model.Predict(&face2, &res2))) {
+ if ((!model.Predict(face0, &res0)) || (!model.Predict(face1, &res1)) ||
+ (!model.Predict(face2, &res2))) {
std::cerr << "Prediction Failed." << std::endl;
}
@@ -118,9 +118,11 @@ void TrtInfer(const std::string& model_file,
std::cout << "--- [Face 2]:" << res2.Str();
float cosine01 = fastdeploy::vision::utils::CosineSimilarity(
- res0.embedding, res1.embedding, model.l2_normalize);
+ res0.embedding, res1.embedding,
+ model.GetPostprocessor().GetL2Normalize());
float cosine02 = fastdeploy::vision::utils::CosineSimilarity(
- res0.embedding, res2.embedding, model.l2_normalize);
+ res0.embedding, res2.embedding,
+ model.GetPostprocessor().GetL2Normalize());
std::cout << "Detect Done! Cosine 01: " << cosine01
<< ", Cosine 02:" << cosine02 << std::endl;
}
@@ -128,9 +130,8 @@ void TrtInfer(const std::string& model_file,
int main(int argc, char* argv[]) {
if (argc < 6) {
std::cout << "Usage: infer_demo path/to/model path/to/image run_option, "
- "e.g ./infer_arcface_demo ms1mv3_arcface_r100.onnx "
- "test_lite_focal_arcface_0.JPG test_lite_focal_arcface_1.JPG "
- "test_lite_focal_arcface_2.JPG 0"
+ "e.g ./infer_arcface_demo ms1mv3_partial_fc_r100.onnx "
+ "face_0.jpg face_1.jpg face_2.jpg 0"
<< std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
"with gpu; 2: run with gpu and use tensorrt backend."
diff --git a/examples/vision/faceid/insightface/cpp/infer_vpl.cc b/examples/vision/faceid/insightface/cpp/infer_vpl.cc
index 1629994a0..10816e8db 100644
--- a/examples/vision/faceid/insightface/cpp/infer_vpl.cc
+++ b/examples/vision/faceid/insightface/cpp/infer_vpl.cc
@@ -16,11 +16,7 @@
void CpuInfer(const std::string& model_file,
const std::vector& image_file) {
- auto model = fastdeploy::vision::faceid::VPL(model_file);
- if (!model.Initialized()) {
- std::cerr << "Failed to initialize." << std::endl;
- return;
- }
+ auto model = fastdeploy::vision::faceid::VPL(model_file, "");
cv::Mat face0 = cv::imread(image_file[0]);
cv::Mat face1 = cv::imread(image_file[1]);
@@ -30,8 +26,8 @@ void CpuInfer(const std::string& model_file,
fastdeploy::vision::FaceRecognitionResult res1;
fastdeploy::vision::FaceRecognitionResult res2;
- if ((!model.Predict(&face0, &res0)) || (!model.Predict(&face1, &res1)) ||
- (!model.Predict(&face2, &res2))) {
+ if ((!model.Predict(face0, &res0)) || (!model.Predict(face1, &res1)) ||
+ (!model.Predict(face2, &res2))) {
std::cerr << "Prediction Failed." << std::endl;
}
@@ -42,9 +38,11 @@ void CpuInfer(const std::string& model_file,
std::cout << "--- [Face 2]:" << res2.Str();
float cosine01 = fastdeploy::vision::utils::CosineSimilarity(
- res0.embedding, res1.embedding, model.l2_normalize);
+ res0.embedding, res1.embedding,
+ model.GetPostprocessor().GetL2Normalize());
float cosine02 = fastdeploy::vision::utils::CosineSimilarity(
- res0.embedding, res2.embedding, model.l2_normalize);
+ res0.embedding, res2.embedding,
+ model.GetPostprocessor().GetL2Normalize());
std::cout << "Detect Done! Cosine 01: " << cosine01
<< ", Cosine 02:" << cosine02 << std::endl;
}
@@ -67,8 +65,8 @@ void GpuInfer(const std::string& model_file,
fastdeploy::vision::FaceRecognitionResult res1;
fastdeploy::vision::FaceRecognitionResult res2;
- if ((!model.Predict(&face0, &res0)) || (!model.Predict(&face1, &res1)) ||
- (!model.Predict(&face2, &res2))) {
+ if ((!model.Predict(face0, &res0)) || (!model.Predict(face1, &res1)) ||
+ (!model.Predict(face2, &res2))) {
std::cerr << "Prediction Failed." << std::endl;
}
@@ -79,9 +77,11 @@ void GpuInfer(const std::string& model_file,
std::cout << "--- [Face 2]:" << res2.Str();
float cosine01 = fastdeploy::vision::utils::CosineSimilarity(
- res0.embedding, res1.embedding, model.l2_normalize);
+ res0.embedding, res1.embedding,
+ model.GetPostprocessor().GetL2Normalize());
float cosine02 = fastdeploy::vision::utils::CosineSimilarity(
- res0.embedding, res2.embedding, model.l2_normalize);
+ res0.embedding, res2.embedding,
+ model.GetPostprocessor().GetL2Normalize());
std::cout << "Detect Done! Cosine 01: " << cosine01
<< ", Cosine 02:" << cosine02 << std::endl;
}
@@ -106,8 +106,8 @@ void TrtInfer(const std::string& model_file,
fastdeploy::vision::FaceRecognitionResult res1;
fastdeploy::vision::FaceRecognitionResult res2;
- if ((!model.Predict(&face0, &res0)) || (!model.Predict(&face1, &res1)) ||
- (!model.Predict(&face2, &res2))) {
+ if ((!model.Predict(face0, &res0)) || (!model.Predict(face1, &res1)) ||
+ (!model.Predict(face2, &res2))) {
std::cerr << "Prediction Failed." << std::endl;
}
@@ -118,9 +118,11 @@ void TrtInfer(const std::string& model_file,
std::cout << "--- [Face 2]:" << res2.Str();
float cosine01 = fastdeploy::vision::utils::CosineSimilarity(
- res0.embedding, res1.embedding, model.l2_normalize);
+ res0.embedding, res1.embedding,
+ model.GetPostprocessor().GetL2Normalize());
float cosine02 = fastdeploy::vision::utils::CosineSimilarity(
- res0.embedding, res2.embedding, model.l2_normalize);
+ res0.embedding, res2.embedding,
+ model.GetPostprocessor().GetL2Normalize());
std::cout << "Detect Done! Cosine 01: " << cosine01
<< ", Cosine 02:" << cosine02 << std::endl;
}
@@ -128,9 +130,8 @@ void TrtInfer(const std::string& model_file,
int main(int argc, char* argv[]) {
if (argc < 6) {
std::cout << "Usage: infer_demo path/to/model path/to/image run_option, "
- "e.g ./infer_arcface_demo ms1mv3_arcface_r100.onnx "
- "test_lite_focal_arcface_0.JPG test_lite_focal_arcface_1.JPG "
- "test_lite_focal_arcface_2.JPG 0"
+ "e.g ./infer_arcface_demo ms1mv3_vpl_r100.onnx "
+ "face_0.jpg face_1.jpg face_2.jpg 0"
<< std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
"with gpu; 2: run with gpu and use tensorrt backend."
diff --git a/examples/vision/faceid/insightface/python/README.md b/examples/vision/faceid/insightface/python/README.md
index 0df0f8fbe..b3b44dc92 100644
--- a/examples/vision/faceid/insightface/python/README.md
+++ b/examples/vision/faceid/insightface/python/README.md
@@ -15,16 +15,28 @@ cd examples/vision/faceid/insightface/python/
#下载ArcFace模型文件和测试图片
wget https://bj.bcebos.com/paddlehub/fastdeploy/ms1mv3_arcface_r100.onnx
-wget https://bj.bcebos.com/paddlehub/test_samples/test_lite_focal_arcface_0.JPG
-wget https://bj.bcebos.com/paddlehub/test_samples/test_lite_focal_arcface_1.JPG
-wget https://bj.bcebos.com/paddlehub/test_samples/test_lite_focal_arcface_2.JPG
+wget https://bj.bcebos.com/paddlehub/fastdeploy/rknpu2/face_demo.zip
+unzip face_demo.zip
# CPU推理
-python infer_arcface.py --model ms1mv3_arcface_r100.onnx --face test_lite_focal_arcface_0.JPG --face_positive test_lite_focal_arcface_1.JPG --face_negative test_lite_focal_arcface_2.JPG --device cpu
+python infer_arcface.py --model ms1mv3_arcface_r100.onnx \
+ --face face_0.jpg \
+ --face_positive face_1.jpg \
+ --face_negative face_2.jpg \
+ --device cpu
# GPU推理
-python infer_arcface.py --model ms1mv3_arcface_r100.onnx --face test_lite_focal_arcface_0.JPG --face_positive test_lite_focal_arcface_1.JPG --face_negative test_lite_focal_arcface_2.JPG --device gpu
+python infer_arcface.py --model ms1mv3_arcface_r100.onnx \
+ --face face_0.jpg \
+ --face_positive face_1.jpg \
+ --face_negative face_2.jpg \
+ --device gpu
# GPU上使用TensorRT推理
-python infer_arcface.py --model ms1mv3_arcface_r100.onnx --face test_lite_focal_arcface_0.JPG --face_positive test_lite_focal_arcface_1.JPG --face_negative test_lite_focal_arcface_2.JPG --device gpu --use_trt True
+python infer_arcface.py --model ms1mv3_arcface_r100.onnx \
+ --face face_0.jpg \
+ --face_positive face_1.jpg \
+ --face_negative face_2.jpg \
+ --device gpu \
+ --use_trt True
```
运行完成可视化结果如下图所示
@@ -82,11 +94,15 @@ ArcFace模型加载和初始化,其中model_file为导出的ONNX模型格式
#### 预处理参数
用户可按照自己的实际需求,修改下列预处理参数,从而影响最终的推理和部署效果
-
+#### AdaFacePreprocessor的成员变量
+以下变量为AdaFacePreprocessor的成员变量
> > * **size**(list[int]): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[112, 112]
> > * **alpha**(list[float]): 预处理归一化的alpha值,计算公式为`x'=x*alpha+beta`,alpha默认为[1. / 127.5, 1.f / 127.5, 1. / 127.5]
> > * **beta**(list[float]): 预处理归一化的beta值,计算公式为`x'=x*alpha+beta`,beta默认为[-1.f, -1.f, -1.f]
> > * **swap_rb**(bool): 预处理是否将BGR转换成RGB,默认True
+
+#### AdaFacePostprocessor的成员变量
+以下变量为AdaFacePostprocessor的成员变量
> > * **l2_normalize**(bool): 输出人脸向量之前是否执行l2归一化,默认False
diff --git a/examples/vision/faceid/insightface/python/infer_arcface.py b/examples/vision/faceid/insightface/python/infer_arcface.py
index a400bd32f..06e8ef001 100644
--- a/examples/vision/faceid/insightface/python/infer_arcface.py
+++ b/examples/vision/faceid/insightface/python/infer_arcface.py
@@ -66,7 +66,7 @@ face1 = cv2.imread(args.face_positive)
face2 = cv2.imread(args.face_negative) # 0,2 不同的人
# 设置 l2 normalize
-model.l2_normalize = True
+model.postprocessor.l2_normalize = True
# 预测图片检测结果
result0 = model.predict(face0)
diff --git a/examples/vision/faceid/insightface/python/infer_cosface.py b/examples/vision/faceid/insightface/python/infer_cosface.py
index 267bff37d..2bb1292f4 100644
--- a/examples/vision/faceid/insightface/python/infer_cosface.py
+++ b/examples/vision/faceid/insightface/python/infer_cosface.py
@@ -66,7 +66,7 @@ face1 = cv2.imread(args.face_positive)
face2 = cv2.imread(args.face_negative) # 0,2 不同的人
# 设置 l2 normalize
-model.l2_normalize = True
+model.postprocessor.l2_normalize = True
# 预测图片检测结果
result0 = model.predict(face0)
diff --git a/examples/vision/faceid/insightface/python/infer_partial_fc.py b/examples/vision/faceid/insightface/python/infer_partial_fc.py
index 2e503116f..e81531e6e 100644
--- a/examples/vision/faceid/insightface/python/infer_partial_fc.py
+++ b/examples/vision/faceid/insightface/python/infer_partial_fc.py
@@ -66,7 +66,7 @@ face1 = cv2.imread(args.face_positive)
face2 = cv2.imread(args.face_negative) # 0,2 不同的人
# 设置 l2 normalize
-model.l2_normalize = True
+model.postprocessor.l2_normalize = True
# 预测图片检测结果
result0 = model.predict(face0)
diff --git a/examples/vision/faceid/insightface/python/infer_vpl.py b/examples/vision/faceid/insightface/python/infer_vpl.py
index dad84d647..6113ad3df 100644
--- a/examples/vision/faceid/insightface/python/infer_vpl.py
+++ b/examples/vision/faceid/insightface/python/infer_vpl.py
@@ -66,7 +66,7 @@ face1 = cv2.imread(args.face_positive)
face2 = cv2.imread(args.face_negative) # 0,2 不同的人
# 设置 l2 normalize
-model.l2_normalize = True
+model.postprocessor.l2_normalize = True
# 预测图片检测结果
result0 = model.predict(face0)
diff --git a/examples/vision/keypointdetection/det_keypoint_unite/cpp/README.md b/examples/vision/keypointdetection/det_keypoint_unite/cpp/README.md
index 477d0872d..57c513cda 100755
--- a/examples/vision/keypointdetection/det_keypoint_unite/cpp/README.md
+++ b/examples/vision/keypointdetection/det_keypoint_unite/cpp/README.md
@@ -33,7 +33,7 @@ wget https://bj.bcebos.com/paddlehub/fastdeploy/000000018491.jpg
./infer_demo PP_PicoDet_V2_S_Pedestrian_320x320_infer PP_TinyPose_256x192_infer 000000018491.jpg 1
# GPU上TensorRT推理
./infer_demo PP_PicoDet_V2_S_Pedestrian_320x320_infer PP_TinyPose_256x192_infer 000000018491.jpg 2
-# XPU推理
+# 昆仑芯XPU推理
./infer_demo PP_PicoDet_V2_S_Pedestrian_320x320_infer PP_TinyPose_256x192_infer 000000018491.jpg 3
```
diff --git a/examples/vision/keypointdetection/det_keypoint_unite/cpp/det_keypoint_unite_infer.cc b/examples/vision/keypointdetection/det_keypoint_unite/cpp/det_keypoint_unite_infer.cc
index bd5c080a8..35c522d60 100755
--- a/examples/vision/keypointdetection/det_keypoint_unite/cpp/det_keypoint_unite_infer.cc
+++ b/examples/vision/keypointdetection/det_keypoint_unite/cpp/det_keypoint_unite_infer.cc
@@ -55,10 +55,9 @@ void CpuInfer(const std::string& det_model_dir,
} else {
std::cout << "TinyPose Prediction Done!" << std::endl;
}
- // 输出预测框结果
+
std::cout << res.Str() << std::endl;
- // 可视化预测结果
auto vis_im =
fastdeploy::vision::VisKeypointDetection(im, res, 0.2);
cv::imwrite("vis_result.jpg", vis_im);
@@ -66,11 +65,11 @@ void CpuInfer(const std::string& det_model_dir,
<< std::endl;
}
-void XpuInfer(const std::string& det_model_dir,
+void KunlunXinInfer(const std::string& det_model_dir,
const std::string& tinypose_model_dir,
const std::string& image_file) {
auto option = fastdeploy::RuntimeOption();
- option.UseXpu();
+ option.UseKunlunXin();
auto det_model_file = det_model_dir + sep + "model.pdmodel";
auto det_params_file = det_model_dir + sep + "model.pdiparams";
auto det_config_file = det_model_dir + sep + "infer_cfg.yml";
@@ -104,10 +103,9 @@ void XpuInfer(const std::string& det_model_dir,
} else {
std::cout << "TinyPose Prediction Done!" << std::endl;
}
- // 输出预测框结果
+
std::cout << res.Str() << std::endl;
- // 可视化预测结果
auto vis_im =
fastdeploy::vision::VisKeypointDetection(im, res, 0.2);
cv::imwrite("vis_result.jpg", vis_im);
@@ -153,10 +151,9 @@ void GpuInfer(const std::string& det_model_dir,
} else {
std::cout << "TinyPose Prediction Done!" << std::endl;
}
- // 输出预测框结果
+
std::cout << res.Str() << std::endl;
- // 可视化预测结果
auto vis_im =
fastdeploy::vision::VisKeypointDetection(im, res, 0.2);
cv::imwrite("vis_result.jpg", vis_im);
@@ -210,10 +207,9 @@ void TrtInfer(const std::string& det_model_dir,
} else {
std::cout << "TinyPose Prediction Done!" << std::endl;
}
- // 输出预测关键点结果
+
std::cout << res.Str() << std::endl;
- // 可视化预测结果
auto vis_im =
fastdeploy::vision::VisKeypointDetection(im, res, 0.2);
cv::imwrite("vis_result.jpg", vis_im);
@@ -229,7 +225,7 @@ int main(int argc, char* argv[]) {
"./test.jpeg 0"
<< std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
- "with gpu; 2: run with gpu and use tensorrt backend; 3: run with xpu."
+ "with gpu; 2: run with gpu and use tensorrt backend; 3: run with kunlunxin."
<< std::endl;
return -1;
}
@@ -241,7 +237,7 @@ int main(int argc, char* argv[]) {
} else if (std::atoi(argv[4]) == 2) {
TrtInfer(argv[1], argv[2], argv[3]);
} else if (std::atoi(argv[4]) == 3) {
- XpuInfer(argv[1], argv[2], argv[3]);
+ KunlunXinInfer(argv[1], argv[2], argv[3]);
}
return 0;
}
diff --git a/examples/vision/keypointdetection/det_keypoint_unite/python/README.md b/examples/vision/keypointdetection/det_keypoint_unite/python/README.md
index 9ea6a1b6d..a6366b800 100755
--- a/examples/vision/keypointdetection/det_keypoint_unite/python/README.md
+++ b/examples/vision/keypointdetection/det_keypoint_unite/python/README.md
@@ -25,8 +25,8 @@ python det_keypoint_unite_infer.py --tinypose_model_dir PP_TinyPose_256x192_infe
python det_keypoint_unite_infer.py --tinypose_model_dir PP_TinyPose_256x192_infer --det_model_dir PP_PicoDet_V2_S_Pedestrian_320x320_infer --image 000000018491.jpg --device gpu
# GPU上使用TensorRT推理 (注意:TensorRT推理第一次运行,有序列化模型的操作,有一定耗时,需要耐心等待)
python det_keypoint_unite_infer.py --tinypose_model_dir PP_TinyPose_256x192_infer --det_model_dir PP_PicoDet_V2_S_Pedestrian_320x320_infer --image 000000018491.jpg --device gpu --use_trt True
-# XPU推理
-python det_keypoint_unite_infer.py --tinypose_model_dir PP_TinyPose_256x192_infer --det_model_dir PP_PicoDet_V2_S_Pedestrian_320x320_infer --image 000000018491.jpg --device xpu
+# 昆仑芯XPU推理
+python det_keypoint_unite_infer.py --tinypose_model_dir PP_TinyPose_256x192_infer --det_model_dir PP_PicoDet_V2_S_Pedestrian_320x320_infer --image 000000018491.jpg --device kunlunxin
```
运行完成可视化结果如下图所示
diff --git a/examples/vision/keypointdetection/det_keypoint_unite/python/det_keypoint_unite_infer.py b/examples/vision/keypointdetection/det_keypoint_unite/python/det_keypoint_unite_infer.py
index d76c9944b..d42138da8 100755
--- a/examples/vision/keypointdetection/det_keypoint_unite/python/det_keypoint_unite_infer.py
+++ b/examples/vision/keypointdetection/det_keypoint_unite/python/det_keypoint_unite_infer.py
@@ -19,7 +19,7 @@ def parse_arguments():
"--device",
type=str,
default='cpu',
- help="type of inference device, support 'cpu', 'xpu' or 'gpu'.")
+ help="type of inference device, support 'cpu', 'kunlunxin' or 'gpu'.")
parser.add_argument(
"--use_trt",
type=ast.literal_eval,
@@ -47,8 +47,8 @@ def build_tinypose_option(args):
if args.device.lower() == "gpu":
option.use_gpu()
- if args.device.lower() == "xpu":
- option.use_xpu()
+ if args.device.lower() == "kunlunxin":
+ option.use_kunlunxin()
if args.use_trt:
option.use_trt_backend()
diff --git a/examples/vision/keypointdetection/tiny_pose/cpp/README.md b/examples/vision/keypointdetection/tiny_pose/cpp/README.md
index 9b4983ee8..867e4251c 100755
--- a/examples/vision/keypointdetection/tiny_pose/cpp/README.md
+++ b/examples/vision/keypointdetection/tiny_pose/cpp/README.md
@@ -32,7 +32,7 @@ wget https://bj.bcebos.com/paddlehub/fastdeploy/hrnet_demo.jpg
./infer_tinypose_demo PP_TinyPose_256x192_infer hrnet_demo.jpg 1
# GPU上TensorRT推理
./infer_tinypose_demo PP_TinyPose_256x192_infer hrnet_demo.jpg 2
-# XPU推理
+# 昆仑芯XPU推理
./infer_tinypose_demo PP_TinyPose_256x192_infer hrnet_demo.jpg 3
```
diff --git a/examples/vision/keypointdetection/tiny_pose/cpp/pptinypose_infer.cc b/examples/vision/keypointdetection/tiny_pose/cpp/pptinypose_infer.cc
index b81761836..445af07e4 100755
--- a/examples/vision/keypointdetection/tiny_pose/cpp/pptinypose_infer.cc
+++ b/examples/vision/keypointdetection/tiny_pose/cpp/pptinypose_infer.cc
@@ -42,10 +42,9 @@ void CpuInfer(const std::string& tinypose_model_dir,
} else {
std::cout << "TinyPose Prediction Done!" << std::endl;
}
- // 输出预测框结果
+
std::cout << res.Str() << std::endl;
- // 可视化预测结果
auto tinypose_vis_im =
fastdeploy::vision::VisKeypointDetection(im, res, 0.5);
cv::imwrite("tinypose_vis_result.jpg", tinypose_vis_im);
@@ -53,13 +52,13 @@ void CpuInfer(const std::string& tinypose_model_dir,
<< std::endl;
}
-void XpuInfer(const std::string& tinypose_model_dir,
+void KunlunXinInfer(const std::string& tinypose_model_dir,
const std::string& image_file) {
auto tinypose_model_file = tinypose_model_dir + sep + "model.pdmodel";
auto tinypose_params_file = tinypose_model_dir + sep + "model.pdiparams";
auto tinypose_config_file = tinypose_model_dir + sep + "infer_cfg.yml";
auto option = fastdeploy::RuntimeOption();
- option.UseXpu();
+ option.UseKunlunXin();
auto tinypose_model = fastdeploy::vision::keypointdetection::PPTinyPose(
tinypose_model_file, tinypose_params_file, tinypose_config_file, option);
if (!tinypose_model.Initialized()) {
@@ -75,10 +74,9 @@ void XpuInfer(const std::string& tinypose_model_dir,
} else {
std::cout << "TinyPose Prediction Done!" << std::endl;
}
- // 输出预测框结果
+
std::cout << res.Str() << std::endl;
- // 可视化预测结果
auto tinypose_vis_im =
fastdeploy::vision::VisKeypointDetection(im, res, 0.5);
cv::imwrite("tinypose_vis_result.jpg", tinypose_vis_im);
@@ -110,10 +108,9 @@ void GpuInfer(const std::string& tinypose_model_dir,
} else {
std::cout << "TinyPose Prediction Done!" << std::endl;
}
- // 输出预测框结果
+
std::cout << res.Str() << std::endl;
- // 可视化预测结果
auto tinypose_vis_im =
fastdeploy::vision::VisKeypointDetection(im, res, 0.5);
cv::imwrite("tinypose_vis_result.jpg", tinypose_vis_im);
@@ -145,10 +142,9 @@ void TrtInfer(const std::string& tinypose_model_dir,
} else {
std::cout << "TinyPose Prediction Done!" << std::endl;
}
- // 输出预测框结果
+
std::cout << res.Str() << std::endl;
- // 可视化预测结果
auto tinypose_vis_im =
fastdeploy::vision::VisKeypointDetection(im, res, 0.5);
cv::imwrite("tinypose_vis_result.jpg", tinypose_vis_im);
@@ -163,7 +159,7 @@ int main(int argc, char* argv[]) {
"e.g ./infer_model ./pptinypose_model_dir ./test.jpeg 0"
<< std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
- "with gpu; 2: run with gpu and use tensorrt backend; 3: run with xpu."
+ "with gpu; 2: run with gpu and use tensorrt backend; 3: run with kunlunxin."
<< std::endl;
return -1;
}
@@ -175,7 +171,7 @@ int main(int argc, char* argv[]) {
} else if (std::atoi(argv[3]) == 2) {
TrtInfer(argv[1], argv[2]);
} else if (std::atoi(argv[3]) == 3) {
- TrtInfer(argv[1], argv[2]);
+ KunlunXinInfer(argv[1], argv[2]);
}
return 0;
}
diff --git a/examples/vision/keypointdetection/tiny_pose/python/README.md b/examples/vision/keypointdetection/tiny_pose/python/README.md
index 6467de863..4ac811bca 100755
--- a/examples/vision/keypointdetection/tiny_pose/python/README.md
+++ b/examples/vision/keypointdetection/tiny_pose/python/README.md
@@ -25,8 +25,8 @@ python pptinypose_infer.py --tinypose_model_dir PP_TinyPose_256x192_infer --imag
python pptinypose_infer.py --tinypose_model_dir PP_TinyPose_256x192_infer --image hrnet_demo.jpg --device gpu
# GPU上使用TensorRT推理 (注意:TensorRT推理第一次运行,有序列化模型的操作,有一定耗时,需要耐心等待)
python pptinypose_infer.py --tinypose_model_dir PP_TinyPose_256x192_infer --image hrnet_demo.jpg --device gpu --use_trt True
-# XPU推理
-python pptinypose_infer.py --tinypose_model_dir PP_TinyPose_256x192_infer --image hrnet_demo.jpg --device xpu
+# 昆仑芯XPU推理
+python pptinypose_infer.py --tinypose_model_dir PP_TinyPose_256x192_infer --image hrnet_demo.jpg --device kunlunxin
```
运行完成可视化结果如下图所示
diff --git a/examples/vision/keypointdetection/tiny_pose/python/pptinypose_infer.py b/examples/vision/keypointdetection/tiny_pose/python/pptinypose_infer.py
index e103d75ef..5bc66aa17 100755
--- a/examples/vision/keypointdetection/tiny_pose/python/pptinypose_infer.py
+++ b/examples/vision/keypointdetection/tiny_pose/python/pptinypose_infer.py
@@ -17,7 +17,7 @@ def parse_arguments():
"--device",
type=str,
default='cpu',
- help="type of inference device, support 'cpu', 'xpu' or 'gpu'.")
+ help="type of inference device, support 'cpu', 'kunlunxin' or 'gpu'.")
parser.add_argument(
"--use_trt",
type=ast.literal_eval,
@@ -32,8 +32,8 @@ def build_tinypose_option(args):
if args.device.lower() == "gpu":
option.use_gpu()
- if args.device.lower() == "xpu":
- option.use_xpu()
+ if args.device.lower() == "kunlunxin":
+ option.use_kunlunxin()
if args.use_trt:
option.use_trt_backend()
diff --git a/examples/vision/matting/ppmatting/cpp/README.md b/examples/vision/matting/ppmatting/cpp/README.md
index 219869fa5..21fd779be 100755
--- a/examples/vision/matting/ppmatting/cpp/README.md
+++ b/examples/vision/matting/ppmatting/cpp/README.md
@@ -31,7 +31,7 @@ wget https://bj.bcebos.com/paddlehub/fastdeploy/matting_bgr.jpg
./infer_demo PP-Matting-512 matting_input.jpg matting_bgr.jpg 1
# GPU上TensorRT推理
./infer_demo PP-Matting-512 matting_input.jpg matting_bgr.jpg 2
-# XPU推理
+# 昆仑芯XPU推理
./infer_demo PP-Matting-512 matting_input.jpg matting_bgr.jpg 3
```
diff --git a/examples/vision/matting/ppmatting/cpp/infer.cc b/examples/vision/matting/ppmatting/cpp/infer.cc
index 00df1833c..3b8309044 100755
--- a/examples/vision/matting/ppmatting/cpp/infer.cc
+++ b/examples/vision/matting/ppmatting/cpp/infer.cc
@@ -51,13 +51,13 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file,
<< std::endl;
}
-void XpuInfer(const std::string& model_dir, const std::string& image_file,
+void KunlunXinInfer(const std::string& model_dir, const std::string& image_file,
const std::string& background_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "deploy.yaml";
auto option = fastdeploy::RuntimeOption();
- option.UseXpu();
+ option.UseKunlunXin();
auto model = fastdeploy::vision::matting::PPMatting(model_file, params_file,
config_file, option);
if (!model.Initialized()) {
@@ -156,7 +156,7 @@ int main(int argc, char* argv[]) {
"e.g ./infer_model ./PP-Matting-512 ./test.jpg ./test_bg.jpg 0"
<< std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
- "with gpu; 2: run with gpu and use tensorrt backend, 3: run with xpu."
+ "with gpu; 2: run with gpu and use tensorrt backend, 3: run with kunlunxin."
<< std::endl;
return -1;
}
@@ -167,7 +167,7 @@ int main(int argc, char* argv[]) {
} else if (std::atoi(argv[4]) == 2) {
TrtInfer(argv[1], argv[2], argv[3]);
} else if (std::atoi(argv[4]) == 3) {
- XpuInfer(argv[1], argv[2], argv[3]);
+ KunlunXinInfer(argv[1], argv[2], argv[3]);
}
return 0;
}
diff --git a/examples/vision/matting/ppmatting/python/README.md b/examples/vision/matting/ppmatting/python/README.md
index f9f15e5b4..c0791d5d6 100755
--- a/examples/vision/matting/ppmatting/python/README.md
+++ b/examples/vision/matting/ppmatting/python/README.md
@@ -23,8 +23,8 @@ python infer.py --model PP-Matting-512 --image matting_input.jpg --bg matting_bg
python infer.py --model PP-Matting-512 --image matting_input.jpg --bg matting_bgr.jpg --device gpu
# GPU上使用TensorRT推理 (注意:TensorRT推理第一次运行,有序列化模型的操作,有一定耗时,需要耐心等待)
python infer.py --model PP-Matting-512 --image matting_input.jpg --bg matting_bgr.jpg --device gpu --use_trt True
-# XPU推理
-python infer.py --model PP-Matting-512 --image matting_input.jpg --bg matting_bgr.jpg --device xpu
+# 昆仑芯XPU推理
+python infer.py --model PP-Matting-512 --image matting_input.jpg --bg matting_bgr.jpg --device kunlunxin
```
运行完成可视化结果如下图所示
diff --git a/examples/vision/matting/ppmatting/python/infer.py b/examples/vision/matting/ppmatting/python/infer.py
index 12b4890f1..512d0ca86 100755
--- a/examples/vision/matting/ppmatting/python/infer.py
+++ b/examples/vision/matting/ppmatting/python/infer.py
@@ -21,7 +21,7 @@ def parse_arguments():
"--device",
type=str,
default='cpu',
- help="Type of inference device, support 'cpu', 'xpu' or 'gpu'.")
+ help="Type of inference device, support 'cpu', 'kunlunxin' or 'gpu'.")
parser.add_argument(
"--use_trt",
type=ast.literal_eval,
@@ -40,8 +40,8 @@ def build_option(args):
option.use_trt_backend()
option.set_trt_input_shape("img", [1, 3, 512, 512])
- if args.device.lower() == "xpu":
- option.use_xpu()
+ if args.device.lower() == "kunlunxin":
+ option.use_kunlunxin()
return option
diff --git a/examples/vision/ocr/PP-OCRv2/cpp/README.md b/examples/vision/ocr/PP-OCRv2/cpp/README.md
index a93eb097f..fbde53fff 100755
--- a/examples/vision/ocr/PP-OCRv2/cpp/README.md
+++ b/examples/vision/ocr/PP-OCRv2/cpp/README.md
@@ -41,7 +41,7 @@ wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppocr/utils/ppocr_
./infer_demo ./ch_PP-OCRv2_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv2_rec_infer ./ppocr_keys_v1.txt ./12.jpg 2
# GPU上Paddle-TRT推理
./infer_demo ./ch_PP-OCRv2_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv2_rec_infer ./ppocr_keys_v1.txt ./12.jpg 3
-# XPU推理
+# 昆仑芯XPU推理
./infer_demo ./ch_PP-OCRv2_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv2_rec_infer ./ppocr_keys_v1.txt ./12.jpg 4
```
diff --git a/examples/vision/ocr/PP-OCRv2/cpp/infer.cc b/examples/vision/ocr/PP-OCRv2/cpp/infer.cc
index 3435b7ad2..3406246aa 100755
--- a/examples/vision/ocr/PP-OCRv2/cpp/infer.cc
+++ b/examples/vision/ocr/PP-OCRv2/cpp/infer.cc
@@ -100,7 +100,7 @@ int main(int argc, char* argv[]) {
"./ppocr_keys_v1.txt ./12.jpg 0"
<< std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
- "with gpu; 2: run with gpu and use tensorrt backend; 3: run with gpu and use Paddle-TRT; 4: run with xpu."
+ "with gpu; 2: run with gpu and use tensorrt backend; 3: run with gpu and use Paddle-TRT; 4: run with kunlunxin."
<< std::endl;
return -1;
}
@@ -121,7 +121,7 @@ int main(int argc, char* argv[]) {
option.EnablePaddleTrtCollectShape();
option.EnablePaddleToTrt();
} else if (flag == 4) {
- option.UseXpu();
+ option.UseKunlunXin();
}
std::string det_model_dir = argv[1];
diff --git a/examples/vision/ocr/PP-OCRv2/python/README.md b/examples/vision/ocr/PP-OCRv2/python/README.md
index 21e824d6e..66bba9e5b 100755
--- a/examples/vision/ocr/PP-OCRv2/python/README.md
+++ b/examples/vision/ocr/PP-OCRv2/python/README.md
@@ -34,8 +34,8 @@ python infer.py --det_model ch_PP-OCRv2_det_infer --cls_model ch_ppocr_mobile_v2
python infer.py --det_model ch_PP-OCRv2_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv2_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device gpu
# GPU上使用TensorRT推理
python infer.py --det_model ch_PP-OCRv2_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv2_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device gpu --backend trt
-# XPU推理
-python infer.py --det_model ch_PP-OCRv2_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv2_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device xpu
+# 昆仑芯XPU推理
+python infer.py --det_model ch_PP-OCRv2_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv2_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device kunlunxin
```
运行完成可视化结果如下图所示
diff --git a/examples/vision/ocr/PP-OCRv2/python/infer.py b/examples/vision/ocr/PP-OCRv2/python/infer.py
index 02a443ee4..b8c731ef3 100755
--- a/examples/vision/ocr/PP-OCRv2/python/infer.py
+++ b/examples/vision/ocr/PP-OCRv2/python/infer.py
@@ -41,7 +41,7 @@ def parse_arguments():
"--device",
type=str,
default='cpu',
- help="Type of inference device, support 'cpu', 'xpu' or 'gpu'.")
+ help="Type of inference device, support 'cpu', 'kunlunxin' or 'gpu'.")
parser.add_argument(
"--backend",
type=str,
@@ -68,8 +68,8 @@ def build_option(args):
option.set_cpu_thread_num(args.cpu_thread_num)
- if args.device.lower() == "xpu":
- option.use_xpu()
+ if args.device.lower() == "kunlunxin":
+ option.use_kunlunxin()
return option
if args.backend.lower() == "trt":
diff --git a/examples/vision/ocr/PP-OCRv3/cpp/README.md b/examples/vision/ocr/PP-OCRv3/cpp/README.md
index 301ccef3e..9c5eff4ef 100755
--- a/examples/vision/ocr/PP-OCRv3/cpp/README.md
+++ b/examples/vision/ocr/PP-OCRv3/cpp/README.md
@@ -41,7 +41,7 @@ wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppocr/utils/ppocr_
./infer_demo ./ch_PP-OCRv3_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv3_rec_infer ./ppocr_keys_v1.txt ./12.jpg 2
# GPU上Paddle-TRT推理
./infer_demo ./ch_PP-OCRv3_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv3_rec_infer ./ppocr_keys_v1.txt ./12.jpg 3
-# XPU推理
+# 昆仑芯XPU推理
./infer_demo ./ch_PP-OCRv3_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv3_rec_infer ./ppocr_keys_v1.txt ./12.jpg 4
```
diff --git a/examples/vision/ocr/PP-OCRv3/cpp/infer.cc b/examples/vision/ocr/PP-OCRv3/cpp/infer.cc
index 6146765fa..fd25eca7e 100755
--- a/examples/vision/ocr/PP-OCRv3/cpp/infer.cc
+++ b/examples/vision/ocr/PP-OCRv3/cpp/infer.cc
@@ -101,7 +101,7 @@ int main(int argc, char* argv[]) {
"./ppocr_keys_v1.txt ./12.jpg 0"
<< std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
- "with gpu; 2: run with gpu and use tensorrt backend; 3: run with gpu and use Paddle-TRT; 4: run with xpu."
+ "with gpu; 2: run with gpu and use tensorrt backend; 3: run with gpu and use Paddle-TRT; 4: run with kunlunxin."
<< std::endl;
return -1;
}
@@ -122,7 +122,7 @@ int main(int argc, char* argv[]) {
option.EnablePaddleTrtCollectShape();
option.EnablePaddleToTrt();
} else if (flag == 4) {
- option.UseXpu();
+ option.UseKunlunXin();
}
std::string det_model_dir = argv[1];
diff --git a/examples/vision/ocr/PP-OCRv3/python/README.md b/examples/vision/ocr/PP-OCRv3/python/README.md
index 90bdf0e2c..e87729353 100755
--- a/examples/vision/ocr/PP-OCRv3/python/README.md
+++ b/examples/vision/ocr/PP-OCRv3/python/README.md
@@ -33,8 +33,8 @@ python infer.py --det_model ch_PP-OCRv3_det_infer --cls_model ch_ppocr_mobile_v2
python infer.py --det_model ch_PP-OCRv3_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv3_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device gpu
# GPU上使用TensorRT推理
python infer.py --det_model ch_PP-OCRv3_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv3_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device gpu --backend trt
-# XPU推理
-python infer.py --det_model ch_PP-OCRv3_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv3_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device xpu
+# 昆仑芯XPU推理
+python infer.py --det_model ch_PP-OCRv3_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv3_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device kunlunxin
```
运行完成可视化结果如下图所示
diff --git a/examples/vision/ocr/PP-OCRv3/python/infer.py b/examples/vision/ocr/PP-OCRv3/python/infer.py
index 0753c8594..97ee1d070 100755
--- a/examples/vision/ocr/PP-OCRv3/python/infer.py
+++ b/examples/vision/ocr/PP-OCRv3/python/infer.py
@@ -41,7 +41,7 @@ def parse_arguments():
"--device",
type=str,
default='cpu',
- help="Type of inference device, support 'cpu', 'xpu' or 'gpu'.")
+ help="Type of inference device, support 'cpu', 'kunlunxin' or 'gpu'.")
parser.add_argument(
"--backend",
type=str,
@@ -68,8 +68,8 @@ def build_option(args):
option.set_cpu_thread_num(args.cpu_thread_num)
- if args.device.lower() == "xpu":
- option.use_xpu()
+ if args.device.lower() == "kunlunxin":
+ option.use_kunlunxin()
return option
if args.backend.lower() == "trt":
diff --git a/examples/vision/ocr/PP-OCRv3/python/serving/README.md b/examples/vision/ocr/PP-OCRv3/python/serving/README.md
new file mode 120000
index 000000000..bacd3186b
--- /dev/null
+++ b/examples/vision/ocr/PP-OCRv3/python/serving/README.md
@@ -0,0 +1 @@
+README_CN.md
\ No newline at end of file
diff --git a/examples/vision/ocr/PP-OCRv3/python/serving/README_CN.md b/examples/vision/ocr/PP-OCRv3/python/serving/README_CN.md
new file mode 100644
index 000000000..8b7fc2fa3
--- /dev/null
+++ b/examples/vision/ocr/PP-OCRv3/python/serving/README_CN.md
@@ -0,0 +1,44 @@
+简体中文 | [English](README_EN.md)
+
+# PP-OCRv3 Python轻量服务化部署示例
+
+在部署前,需确认以下两个步骤
+
+- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
+- 2. FastDeploy Python whl包安装,参考[FastDeploy Python安装](../../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
+
+服务端:
+```bash
+# 下载部署示例代码
+git clone https://github.com/PaddlePaddle/FastDeploy.git
+cd FastDeploy/examples/vision/ocr/PP-OCRv3/python/serving
+
+# 下载模型和字典文件
+wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar
+tar xvf ch_PP-OCRv3_det_infer.tar
+
+wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar
+tar -xvf ch_ppocr_mobile_v2.0_cls_infer.tar
+
+wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_infer.tar
+tar xvf ch_PP-OCRv3_rec_infer.tar
+
+wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppocr/utils/ppocr_keys_v1.txt
+
+# 启动服务,可修改server.py中的配置项来指定硬件、后端等
+# 可通过--host、--port指定IP和端口号
+fastdeploy simple_serving --app server:app
+```
+
+客户端:
+```bash
+# 下载部署示例代码
+git clone https://github.com/PaddlePaddle/FastDeploy.git
+cd FastDeploy/examples/vision/ocr/PP-OCRv3/python/serving
+
+# 下载测试图片
+wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/doc/imgs/12.jpg
+
+# 请求服务,获取推理结果(如有必要,请修改脚本中的IP和端口号)
+python client.py
+```
diff --git a/examples/vision/ocr/PP-OCRv3/python/serving/README_EN.md b/examples/vision/ocr/PP-OCRv3/python/serving/README_EN.md
new file mode 100644
index 000000000..fb3d6c9b8
--- /dev/null
+++ b/examples/vision/ocr/PP-OCRv3/python/serving/README_EN.md
@@ -0,0 +1,43 @@
+English | [简体中文](README_CN.md)
+
+# PP-OCRv3 Python Simple Serving Demo
+
+## Environment
+
+- 1. Prepare environment and install FastDeploy Python whl, refer to [download_prebuilt_libraries](../../../../../../docs/en/build_and_install/download_prebuilt_libraries.md)
+
+Server:
+```bash
+# Download demo code
+git clone https://github.com/PaddlePaddle/FastDeploy.git
+cd FastDeploy/examples/vision/ocr/PP-OCRv3/python/serving
+
+# Download models and labels
+wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar
+tar xvf ch_PP-OCRv3_det_infer.tar
+
+wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar
+tar -xvf ch_ppocr_mobile_v2.0_cls_infer.tar
+
+wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_infer.tar
+tar xvf ch_PP-OCRv3_rec_infer.tar
+
+wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppocr/utils/ppocr_keys_v1.txt
+
+# Launch server, change the configurations in server.py to select hardware, backend, etc.
+# and use --host, --port to specify IP and port
+fastdeploy simple_serving --app server:app
+```
+
+Client:
+```bash
+# Download demo code
+git clone https://github.com/PaddlePaddle/FastDeploy.git
+cd FastDeploy/examples/vision/ocr/PP-OCRv3/python/serving
+
+# Download test image
+wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/doc/imgs/12.jpg
+
+# Send request and get inference result (Please adapt the IP and port if necessary)
+python client.py
+```
diff --git a/examples/vision/ocr/PP-OCRv3/python/serving/client.py b/examples/vision/ocr/PP-OCRv3/python/serving/client.py
new file mode 100644
index 000000000..6849c2204
--- /dev/null
+++ b/examples/vision/ocr/PP-OCRv3/python/serving/client.py
@@ -0,0 +1,24 @@
+import requests
+import json
+import cv2
+import fastdeploy as fd
+from fastdeploy.serving.utils import cv2_to_base64
+
+if __name__ == '__main__':
+ url = "http://127.0.0.1:8000/fd/ppocrv3"
+ headers = {"Content-Type": "application/json"}
+
+ im = cv2.imread("12.jpg")
+ data = {"data": {"image": cv2_to_base64(im)}, "parameters": {}}
+
+ resp = requests.post(url=url, headers=headers, data=json.dumps(data))
+ if resp.status_code == 200:
+ r_json = json.loads(resp.json()["result"])
+ print(r_json)
+ ocr_result = fd.vision.utils.json_to_ocr(r_json)
+ vis_im = fd.vision.vis_ppocr(im, ocr_result)
+ cv2.imwrite("visualized_result.jpg", vis_im)
+ print("Visualized result save in ./visualized_result.jpg")
+ else:
+ print("Error code:", resp.status_code)
+ print(resp.text)
diff --git a/examples/vision/ocr/PP-OCRv3/python/serving/server.py b/examples/vision/ocr/PP-OCRv3/python/serving/server.py
new file mode 100644
index 000000000..0078b7112
--- /dev/null
+++ b/examples/vision/ocr/PP-OCRv3/python/serving/server.py
@@ -0,0 +1,80 @@
+import fastdeploy as fd
+from fastdeploy.serving.server import SimpleServer
+import os
+import logging
+
+logging.getLogger().setLevel(logging.INFO)
+
+# Configurations
+det_model_dir = 'ch_PP-OCRv3_det_infer'
+cls_model_dir = 'ch_ppocr_mobile_v2.0_cls_infer'
+rec_model_dir = 'ch_PP-OCRv3_rec_infer'
+rec_label_file = 'ppocr_keys_v1.txt'
+device = 'cpu'
+# backend: ['paddle', 'trt'], you can also use other backends, but need to modify
+# the runtime option below
+backend = 'paddle'
+
+# Prepare models
+# Detection model
+det_model_file = os.path.join(det_model_dir, "inference.pdmodel")
+det_params_file = os.path.join(det_model_dir, "inference.pdiparams")
+# Classification model
+cls_model_file = os.path.join(cls_model_dir, "inference.pdmodel")
+cls_params_file = os.path.join(cls_model_dir, "inference.pdiparams")
+# Recognition model
+rec_model_file = os.path.join(rec_model_dir, "inference.pdmodel")
+rec_params_file = os.path.join(rec_model_dir, "inference.pdiparams")
+
+# Setup runtime option to select hardware, backend, etc.
+option = fd.RuntimeOption()
+if device.lower() == 'gpu':
+ option.use_gpu()
+if backend == 'trt':
+ option.use_trt_backend()
+else:
+ option.use_paddle_infer_backend()
+
+det_option = option
+det_option.set_trt_input_shape("x", [1, 3, 64, 64], [1, 3, 640, 640],
+ [1, 3, 960, 960])
+
+# det_option.set_trt_cache_file("det_trt_cache.trt")
+print(det_model_file, det_params_file)
+det_model = fd.vision.ocr.DBDetector(
+ det_model_file, det_params_file, runtime_option=det_option)
+
+cls_batch_size = 1
+rec_batch_size = 6
+
+cls_option = option
+cls_option.set_trt_input_shape("x", [1, 3, 48, 10],
+ [cls_batch_size, 3, 48, 320],
+ [cls_batch_size, 3, 48, 1024])
+
+# cls_option.set_trt_cache_file("cls_trt_cache.trt")
+cls_model = fd.vision.ocr.Classifier(
+ cls_model_file, cls_params_file, runtime_option=cls_option)
+
+rec_option = option
+rec_option.set_trt_input_shape("x", [1, 3, 48, 10],
+ [rec_batch_size, 3, 48, 320],
+ [rec_batch_size, 3, 48, 2304])
+
+# rec_option.set_trt_cache_file("rec_trt_cache.trt")
+rec_model = fd.vision.ocr.Recognizer(
+ rec_model_file, rec_params_file, rec_label_file, runtime_option=rec_option)
+
+# Create PPOCRv3 pipeline
+ppocr_v3 = fd.vision.ocr.PPOCRv3(
+ det_model=det_model, cls_model=cls_model, rec_model=rec_model)
+
+ppocr_v3.cls_batch_size = cls_batch_size
+ppocr_v3.rec_batch_size = rec_batch_size
+
+# Create server, setup REST API
+app = SimpleServer()
+app.register(
+ task_name="fd/ppocrv3",
+ model_handler=fd.serving.handler.VisionModelHandler,
+ predictor=ppocr_v3)
diff --git a/examples/vision/segmentation/paddleseg/cpp/README.md b/examples/vision/segmentation/paddleseg/cpp/README.md
index 620f32345..35d288ba4 100755
--- a/examples/vision/segmentation/paddleseg/cpp/README.md
+++ b/examples/vision/segmentation/paddleseg/cpp/README.md
@@ -32,7 +32,7 @@ wget https://paddleseg.bj.bcebos.com/dygraph/demo/cityscapes_demo.png
./infer_demo Unet_cityscapes_without_argmax_infer cityscapes_demo.png 1
# GPU上TensorRT推理
./infer_demo Unet_cityscapes_without_argmax_infer cityscapes_demo.png 2
-# XPU推理
+# 昆仑芯XPU推理
./infer_demo Unet_cityscapes_without_argmax_infer cityscapes_demo.png 3
```
diff --git a/examples/vision/segmentation/paddleseg/cpp/infer.cc b/examples/vision/segmentation/paddleseg/cpp/infer.cc
index d0b9af0ed..389699a51 100755
--- a/examples/vision/segmentation/paddleseg/cpp/infer.cc
+++ b/examples/vision/segmentation/paddleseg/cpp/infer.cc
@@ -48,12 +48,12 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file) {
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
-void XpuInfer(const std::string& model_dir, const std::string& image_file) {
+void KunlunXinInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "deploy.yaml";
auto option = fastdeploy::RuntimeOption();
- option.UseXpu();
+ option.UseKunlunXin();
auto model = fastdeploy::vision::segmentation::PaddleSegModel(
model_file, params_file, config_file, option);
@@ -142,7 +142,7 @@ int main(int argc, char* argv[]) {
"e.g ./infer_model ./ppseg_model_dir ./test.jpeg 0"
<< std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
- "with gpu; 2: run with gpu and use tensorrt backend; 3: run with xpu."
+ "with gpu; 2: run with gpu and use tensorrt backend; 3: run with kunlunxin."
<< std::endl;
return -1;
}
@@ -154,7 +154,7 @@ int main(int argc, char* argv[]) {
} else if (std::atoi(argv[3]) == 2) {
TrtInfer(argv[1], argv[2]);
} else if (std::atoi(argv[3]) == 3) {
- XpuInfer(argv[1], argv[2]);
+ KunlunXinInfer(argv[1], argv[2]);
}
return 0;
}
diff --git a/examples/vision/segmentation/paddleseg/python/README.md b/examples/vision/segmentation/paddleseg/python/README.md
index 18c8092f5..9b5163bdf 100755
--- a/examples/vision/segmentation/paddleseg/python/README.md
+++ b/examples/vision/segmentation/paddleseg/python/README.md
@@ -25,8 +25,8 @@ python infer.py --model Unet_cityscapes_without_argmax_infer --image cityscapes_
python infer.py --model Unet_cityscapes_without_argmax_infer --image cityscapes_demo.png --device gpu
# GPU上使用TensorRT推理 (注意:TensorRT推理第一次运行,有序列化模型的操作,有一定耗时,需要耐心等待)
python infer.py --model Unet_cityscapes_without_argmax_infer --image cityscapes_demo.png --device gpu --use_trt True
-# XPU推理
-python infer.py --model Unet_cityscapes_without_argmax_infer --image cityscapes_demo.png --device xpu
+# 昆仑芯XPU推理
+python infer.py --model Unet_cityscapes_without_argmax_infer --image cityscapes_demo.png --device kunlunxin
```
运行完成可视化结果如下图所示
diff --git a/examples/vision/segmentation/paddleseg/python/infer.py b/examples/vision/segmentation/paddleseg/python/infer.py
index a1c31ebcd..0b19fedc2 100755
--- a/examples/vision/segmentation/paddleseg/python/infer.py
+++ b/examples/vision/segmentation/paddleseg/python/infer.py
@@ -15,7 +15,7 @@ def parse_arguments():
"--device",
type=str,
default='cpu',
- help="Type of inference device, support 'xpu', 'cpu' or 'gpu'.")
+ help="Type of inference device, support 'kunlunxin', 'cpu' or 'gpu'.")
parser.add_argument(
"--use_trt",
type=ast.literal_eval,
@@ -30,8 +30,8 @@ def build_option(args):
if args.device.lower() == "gpu":
option.use_gpu()
- if args.device.lower() == "xpu":
- option.use_xpu()
+ if args.device.lower() == "kunlunxin":
+ option.use_kunlunxin()
if args.use_trt:
option.use_trt_backend()
diff --git a/examples/vision/segmentation/paddleseg/rv1126/cpp/CMakeLists.txt b/examples/vision/segmentation/paddleseg/rv1126/cpp/CMakeLists.txt
index baaf8331f..64b7a6466 100755
--- a/examples/vision/segmentation/paddleseg/rv1126/cpp/CMakeLists.txt
+++ b/examples/vision/segmentation/paddleseg/rv1126/cpp/CMakeLists.txt
@@ -20,19 +20,11 @@ install(TARGETS infer_demo DESTINATION ./)
install(DIRECTORY models DESTINATION ./)
install(DIRECTORY images DESTINATION ./)
-# install(DIRECTORY run_with_adb.sh DESTINATION ./)
-file(GLOB FASTDEPLOY_LIBS ${FASTDEPLOY_INSTALL_DIR}/lib/*)
-install(PROGRAMS ${FASTDEPLOY_LIBS} DESTINATION lib)
-
-file(GLOB OPENCV_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/opencv/lib/lib*)
-install(PROGRAMS ${OPENCV_LIBS} DESTINATION lib)
-
-file(GLOB PADDLELITE_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/paddlelite/lib/lib*)
-install(PROGRAMS ${PADDLELITE_LIBS} DESTINATION lib)
-
-file(GLOB TIMVX_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/paddlelite/lib/verisilicon_timvx/*)
-install(PROGRAMS ${TIMVX_LIBS} DESTINATION lib)
+file(GLOB_RECURSE FASTDEPLOY_LIBS ${FASTDEPLOY_INSTALL_DIR}/lib/lib*.so*)
+file(GLOB_RECURSE ALL_LIBS ${FASTDEPLOY_INSTALL_DIR}/third_libs/install/lib*.so*)
+list(APPEND ALL_LIBS ${FASTDEPLOY_LIBS})
+install(PROGRAMS ${ALL_LIBS} DESTINATION lib)
file(GLOB ADB_TOOLS run_with_adb.sh)
install(PROGRAMS ${ADB_TOOLS} DESTINATION ./)
diff --git a/examples/vision/segmentation/paddleseg/rv1126/cpp/infer.cc b/examples/vision/segmentation/paddleseg/rv1126/cpp/infer.cc
index f084e6719..d9f7c3311 100755
--- a/examples/vision/segmentation/paddleseg/rv1126/cpp/infer.cc
+++ b/examples/vision/segmentation/paddleseg/rv1126/cpp/infer.cc
@@ -24,7 +24,7 @@ void InitAndInfer(const std::string& model_dir, const std::string& image_file) {
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "deploy.yaml";
auto subgraph_file = model_dir + sep + "subgraph.txt";
-
+ fastdeploy::vision::EnableFlyCV();
fastdeploy::RuntimeOption option;
option.UseTimVX();
option.SetLiteSubgraphPartitionPath(subgraph_file);
diff --git a/fastdeploy/backends/lite/lite_backend.cc b/fastdeploy/backends/lite/lite_backend.cc
index c53eed544..b8dcab255 100755
--- a/fastdeploy/backends/lite/lite_backend.cc
+++ b/fastdeploy/backends/lite/lite_backend.cc
@@ -43,7 +43,7 @@ void LiteBackend::BuildOption(const LiteBackendOption& option) {
option_ = option;
std::vector valid_places;
if (option_.enable_int8) {
- if(option_.enable_xpu) {
+ if(option_.enable_kunlunxin) {
valid_places.push_back(
paddle::lite_api::Place{TARGET(kXPU), PRECISION(kInt8)});
} else {
@@ -54,7 +54,7 @@ void LiteBackend::BuildOption(const LiteBackendOption& option) {
<< "inference with int8 precision!" << std::endl;
}
if (option_.enable_fp16) {
- if(option_.enable_xpu){
+ if(option_.enable_kunlunxin){
valid_places.push_back(
paddle::lite_api::Place{TARGET(kXPU), PRECISION(kFP16)});
} else {
@@ -127,17 +127,17 @@ void LiteBackend::BuildOption(const LiteBackendOption& option) {
paddle::lite_api::Place{TARGET(kARM), PRECISION(kInt8)});
}
- if(option_.enable_xpu){
+ if(option_.enable_kunlunxin){
valid_places.push_back(
paddle::lite_api::Place{TARGET(kXPU), PRECISION(kFloat)});
valid_places.push_back(
paddle::lite_api::Place{TARGET(kX86), PRECISION(kFloat)});
config_.set_xpu_dev_per_thread(option_.device_id);
- config_.set_xpu_workspace_l3_size_per_thread(option_.xpu_l3_workspace_size);
- config_.set_xpu_l3_cache_method(option_.xpu_l3_workspace_size, option_.xpu_locked);
- config_.set_xpu_conv_autotune(option_.xpu_autotune, option_.xpu_autotune_file);
- config_.set_xpu_multi_encoder_method(option_.xpu_precision, option_.xpu_adaptive_seqlen);
- if (option_.xpu_enable_multi_stream) {
+ config_.set_xpu_workspace_l3_size_per_thread(option_.kunlunxin_l3_workspace_size);
+ config_.set_xpu_l3_cache_method(option_.kunlunxin_l3_workspace_size, option_.kunlunxin_locked);
+ config_.set_xpu_conv_autotune(option_.kunlunxin_autotune, option_.kunlunxin_autotune_file);
+ config_.set_xpu_multi_encoder_method(option_.kunlunxin_precision, option_.kunlunxin_adaptive_seqlen);
+ if (option_.kunlunxin_enable_multi_stream) {
config_.enable_xpu_multi_stream();
}
} else {
@@ -221,7 +221,7 @@ bool LiteBackend::InitFromPaddle(const std::string& model_file,
auto shape = tensor->shape();
info.shape.assign(shape.begin(), shape.end());
info.name = output_names[i];
- if(!option_.enable_xpu){
+ if(!option_.enable_kunlunxin){
info.dtype = LiteDataTypeToFD(tensor->precision());
}
outputs_desc_.emplace_back(info);
diff --git a/fastdeploy/backends/lite/lite_backend.h b/fastdeploy/backends/lite/lite_backend.h
index ed97dbd88..261a4af39 100755
--- a/fastdeploy/backends/lite/lite_backend.h
+++ b/fastdeploy/backends/lite/lite_backend.h
@@ -53,15 +53,15 @@ struct LiteBackendOption {
std::vector nnadapter_device_names = {};
bool enable_timvx = false;
bool enable_ascend = false;
- bool enable_xpu = false;
+ bool enable_kunlunxin = false;
int device_id = 0;
- int xpu_l3_workspace_size = 0xfffc00;
- bool xpu_locked = false;
- bool xpu_autotune = true;
- std::string xpu_autotune_file = "";
- std::string xpu_precision = "int16";
- bool xpu_adaptive_seqlen = false;
- bool xpu_enable_multi_stream = false;
+ int kunlunxin_l3_workspace_size = 0xfffc00;
+ bool kunlunxin_locked = false;
+ bool kunlunxin_autotune = true;
+ std::string kunlunxin_autotune_file = "";
+ std::string kunlunxin_precision = "int16";
+ bool kunlunxin_adaptive_seqlen = false;
+ bool kunlunxin_enable_multi_stream = false;
};
// Convert data type from paddle lite to fastdeploy
diff --git a/fastdeploy/core/fd_type.cc b/fastdeploy/core/fd_type.cc
index a573529ed..21e3d30c3 100755
--- a/fastdeploy/core/fd_type.cc
+++ b/fastdeploy/core/fd_type.cc
@@ -65,8 +65,8 @@ std::string Str(const Device& d) {
case Device::ASCEND:
out = "Device::ASCEND";
break;
- case Device::XPU:
- out = "Device::XPU";
+ case Device::KUNLUNXIN:
+ out = "Device::KUNLUNXIN";
break;
default:
out = "Device::UNKOWN";
@@ -88,8 +88,8 @@ std::ostream& operator<<(std::ostream& out,const Device& d){
case Device::TIMVX:
out << "Device::TIMVX";
break;
- case Device::XPU:
- out << "Device::XPU";
+ case Device::KUNLUNXIN:
+ out << "Device::KUNLUNXIN";
break;
case Device::ASCEND:
out << "Device::ASCEND";
diff --git a/fastdeploy/core/fd_type.h b/fastdeploy/core/fd_type.h
index 80097994e..fda26c1c8 100755
--- a/fastdeploy/core/fd_type.h
+++ b/fastdeploy/core/fd_type.h
@@ -22,7 +22,7 @@
namespace fastdeploy {
-enum FASTDEPLOY_DECL Device { CPU, GPU, RKNPU, IPU, TIMVX, XPU, ASCEND};
+enum FASTDEPLOY_DECL Device { CPU, GPU, RKNPU, IPU, TIMVX, KUNLUNXIN, ASCEND};
FASTDEPLOY_DECL std::string Str(const Device& d);
diff --git a/fastdeploy/fastdeploy_model.cc b/fastdeploy/fastdeploy_model.cc
index 390c1c475..77c1539c3 100755
--- a/fastdeploy/fastdeploy_model.cc
+++ b/fastdeploy/fastdeploy_model.cc
@@ -52,7 +52,7 @@ bool FastDeployModel::InitRuntimeWithSpecifiedBackend() {
bool use_rknpu = (runtime_option.device == Device::RKNPU);
bool use_timvx = (runtime_option.device == Device::TIMVX);
bool use_ascend = (runtime_option.device == Device::ASCEND);
- bool use_xpu = (runtime_option.device == Device::XPU);
+ bool use_kunlunxin = (runtime_option.device == Device::KUNLUNXIN);
if (use_gpu) {
if (!IsSupported(valid_gpu_backends, runtime_option.backend)) {
@@ -74,9 +74,9 @@ bool FastDeployModel::InitRuntimeWithSpecifiedBackend() {
FDERROR << "The valid ascend backends of model " << ModelName() << " are " << Str(valid_ascend_backends) << ", " << runtime_option.backend << " is not supported." << std::endl;
return false;
}
- } else if (use_xpu) {
- if (!IsSupported(valid_xpu_backends, runtime_option.backend)) {
- FDERROR << "The valid xpu backends of model " << ModelName() << " are " << Str(valid_xpu_backends) << ", " << runtime_option.backend << " is not supported." << std::endl;
+ } else if (use_kunlunxin) {
+ if (!IsSupported(valid_kunlunxin_backends, runtime_option.backend)) {
+ FDERROR << "The valid kunlunxin backends of model " << ModelName() << " are " << Str(valid_kunlunxin_backends) << ", " << runtime_option.backend << " is not supported." << std::endl;
return false;
}
} else if(use_ipu) {
@@ -116,8 +116,8 @@ bool FastDeployModel::InitRuntimeWithSpecifiedDevice() {
return CreateTimVXBackend();
} else if (runtime_option.device == Device::ASCEND) {
return CreateASCENDBackend();
- } else if (runtime_option.device == Device::XPU) {
- return CreateXPUBackend();
+ } else if (runtime_option.device == Device::KUNLUNXIN) {
+ return CreateKunlunXinBackend();
} else if (runtime_option.device == Device::IPU) {
#ifdef WITH_IPU
return CreateIpuBackend();
@@ -127,7 +127,7 @@ bool FastDeployModel::InitRuntimeWithSpecifiedDevice() {
return false;
#endif
}
- FDERROR << "Only support CPU/GPU/IPU/RKNPU/TIMVX/XPU/ASCEND now." << std::endl;
+ FDERROR << "Only support CPU/GPU/IPU/RKNPU/TIMVX/KunlunXin/ASCEND now." << std::endl;
return false;
}
@@ -241,18 +241,18 @@ bool FastDeployModel::CreateTimVXBackend() {
return false;
}
-bool FastDeployModel::CreateXPUBackend() {
- if (valid_xpu_backends.size() == 0) {
- FDERROR << "There's no valid xpu backends for model: " << ModelName()
+bool FastDeployModel::CreateKunlunXinBackend() {
+ if (valid_kunlunxin_backends.size() == 0) {
+ FDERROR << "There's no valid KunlunXin backends for model: " << ModelName()
<< std::endl;
return false;
}
- for (size_t i = 0; i < valid_xpu_backends.size(); ++i) {
- if (!IsBackendAvailable(valid_xpu_backends[i])) {
+ for (size_t i = 0; i < valid_kunlunxin_backends.size(); ++i) {
+ if (!IsBackendAvailable(valid_kunlunxin_backends[i])) {
continue;
}
- runtime_option.backend = valid_xpu_backends[i];
+ runtime_option.backend = valid_kunlunxin_backends[i];
runtime_ = std::unique_ptr(new Runtime());
if (!runtime_->Init(runtime_option)) {
return false;
diff --git a/fastdeploy/fastdeploy_model.h b/fastdeploy/fastdeploy_model.h
index 566dd337f..6d2f7e5b7 100755
--- a/fastdeploy/fastdeploy_model.h
+++ b/fastdeploy/fastdeploy_model.h
@@ -50,7 +50,7 @@ class FASTDEPLOY_DECL FastDeployModel {
std::vector valid_ascend_backends = {};
/** Model's valid KunlunXin xpu backends. This member defined all the KunlunXin xpu backends have successfully tested for the model
*/
- std::vector valid_xpu_backends = {};
+ std::vector valid_kunlunxin_backends = {};
/** Model's valid hardware backends. This member defined all the gpu backends have successfully tested for the model
*/
std::vector valid_rknpu_backends = {};
@@ -149,7 +149,7 @@ class FASTDEPLOY_DECL FastDeployModel {
bool CreateIpuBackend();
bool CreateRKNPUBackend();
bool CreateTimVXBackend();
- bool CreateXPUBackend();
+ bool CreateKunlunXinBackend();
bool CreateASCENDBackend();
std::shared_ptr runtime_;
diff --git a/fastdeploy/pybind/runtime.cc b/fastdeploy/pybind/runtime.cc
index 1e322339f..5c299da0e 100755
--- a/fastdeploy/pybind/runtime.cc
+++ b/fastdeploy/pybind/runtime.cc
@@ -25,7 +25,7 @@ void BindRuntime(pybind11::module& m) {
.def("use_cpu", &RuntimeOption::UseCpu)
.def("use_rknpu2", &RuntimeOption::UseRKNPU2)
.def("use_ascend", &RuntimeOption::UseAscend)
- .def("use_xpu", &RuntimeOption::UseXpu)
+ .def("use_kunlunxin", &RuntimeOption::UseKunlunXin)
.def("set_external_stream", &RuntimeOption::SetExternalStream)
.def("set_cpu_thread_num", &RuntimeOption::SetCpuThreadNum)
.def("use_paddle_backend", &RuntimeOption::UsePaddleBackend)
@@ -114,20 +114,20 @@ void BindRuntime(pybind11::module& m) {
&RuntimeOption::ipu_available_memory_proportion)
.def_readwrite("ipu_enable_half_partial",
&RuntimeOption::ipu_enable_half_partial)
- .def_readwrite("xpu_l3_workspace_size",
- &RuntimeOption::xpu_l3_workspace_size)
- .def_readwrite("xpu_locked",
- &RuntimeOption::xpu_locked)
- .def_readwrite("xpu_autotune",
- &RuntimeOption::xpu_autotune)
- .def_readwrite("xpu_autotune_file",
- &RuntimeOption::xpu_autotune_file)
- .def_readwrite("xpu_precision",
- &RuntimeOption::xpu_precision)
- .def_readwrite("xpu_adaptive_seqlen",
- &RuntimeOption::xpu_adaptive_seqlen)
- .def_readwrite("xpu_enable_multi_stream",
- &RuntimeOption::xpu_enable_multi_stream);
+ .def_readwrite("kunlunxin_l3_workspace_size",
+ &RuntimeOption::kunlunxin_l3_workspace_size)
+ .def_readwrite("kunlunxin_locked",
+ &RuntimeOption::kunlunxin_locked)
+ .def_readwrite("kunlunxin_autotune",
+ &RuntimeOption::kunlunxin_autotune)
+ .def_readwrite("kunlunxin_autotune_file",
+ &RuntimeOption::kunlunxin_autotune_file)
+ .def_readwrite("kunlunxin_precision",
+ &RuntimeOption::kunlunxin_precision)
+ .def_readwrite("kunlunxin_adaptive_seqlen",
+ &RuntimeOption::kunlunxin_adaptive_seqlen)
+ .def_readwrite("kunlunxin_enable_multi_stream",
+ &RuntimeOption::kunlunxin_enable_multi_stream);
pybind11::class_(m, "TensorInfo")
.def_readwrite("name", &TensorInfo::name)
diff --git a/fastdeploy/runtime.cc b/fastdeploy/runtime.cc
index 3f403f04a..0110b4313 100755
--- a/fastdeploy/runtime.cc
+++ b/fastdeploy/runtime.cc
@@ -263,7 +263,7 @@ void RuntimeOption::UseTimVX() {
device = Device::TIMVX;
}
-void RuntimeOption::UseXpu(int xpu_id,
+void RuntimeOption::UseKunlunXin(int kunlunxin_id,
int l3_workspace_size,
bool locked,
bool autotune,
@@ -271,16 +271,16 @@ void RuntimeOption::UseXpu(int xpu_id,
const std::string &precision,
bool adaptive_seqlen,
bool enable_multi_stream) {
- enable_xpu = true;
- device_id = xpu_id;
- xpu_l3_workspace_size = l3_workspace_size;
- xpu_locked=locked;
- xpu_autotune=autotune;
- xpu_autotune_file=autotune_file;
- xpu_precision = precision;
- xpu_adaptive_seqlen=adaptive_seqlen;
- xpu_enable_multi_stream=enable_multi_stream;
- device = Device::XPU;
+ enable_kunlunxin = true;
+ device_id = kunlunxin_id;
+ kunlunxin_l3_workspace_size = l3_workspace_size;
+ kunlunxin_locked=locked;
+ kunlunxin_autotune=autotune;
+ kunlunxin_autotune_file=autotune_file;
+ kunlunxin_precision = precision;
+ kunlunxin_adaptive_seqlen=adaptive_seqlen;
+ kunlunxin_enable_multi_stream=enable_multi_stream;
+ device = Device::KUNLUNXIN;
}
void RuntimeOption::UseAscend(){
@@ -611,8 +611,8 @@ bool Runtime::Init(const RuntimeOption& _option) {
FDINFO << "Runtime initialized with Backend::OPENVINO in "
<< Str(option.device) << "." << std::endl;
} else if (option.backend == Backend::LITE) {
- FDASSERT(option.device == Device::CPU || option.device == Device::TIMVX || option.device == Device::XPU || option.device == Device::ASCEND,
- "Backend::LITE only supports Device::CPU/Device::TIMVX/Device::XPU.");
+ FDASSERT(option.device == Device::CPU || option.device == Device::TIMVX || option.device == Device::KUNLUNXIN || option.device == Device::ASCEND,
+ "Backend::LITE only supports Device::CPU/Device::TIMVX/Device::KUNLUNXIN.");
CreateLiteBackend();
FDINFO << "Runtime initialized with Backend::LITE in " << Str(option.device)
<< "." << std::endl;
@@ -660,7 +660,11 @@ bool Runtime::Infer(std::vector& input_tensors,
}
bool Runtime::Infer() {
- return backend_->Infer(input_tensors_, &output_tensors_, false);
+ bool result = backend_->Infer(input_tensors_, &output_tensors_, false);
+ for (auto& tensor : output_tensors_) {
+ tensor.device_id = option.device_id;
+ }
+ return result;
}
void Runtime::BindInputTensor(const std::string& name, FDTensor& input) {
@@ -882,15 +886,15 @@ void Runtime::CreateLiteBackend() {
lite_option.nnadapter_mixed_precision_quantization_config_path = option.lite_nnadapter_mixed_precision_quantization_config_path;
lite_option.enable_timvx = option.enable_timvx;
lite_option.enable_ascend = option.enable_ascend;
- lite_option.enable_xpu = option.enable_xpu;
+ lite_option.enable_kunlunxin = option.enable_kunlunxin;
lite_option.device_id = option.device_id;
- lite_option.xpu_l3_workspace_size = option.xpu_l3_workspace_size;
- lite_option.xpu_locked = option.xpu_locked;
- lite_option.xpu_autotune = option.xpu_autotune;
- lite_option.xpu_autotune_file = option.xpu_autotune_file;
- lite_option.xpu_precision = option.xpu_precision;
- lite_option.xpu_adaptive_seqlen = option.xpu_adaptive_seqlen;
- lite_option.xpu_enable_multi_stream = option.xpu_enable_multi_stream;
+ lite_option.kunlunxin_l3_workspace_size = option.kunlunxin_l3_workspace_size;
+ lite_option.kunlunxin_locked = option.kunlunxin_locked;
+ lite_option.kunlunxin_autotune = option.kunlunxin_autotune;
+ lite_option.kunlunxin_autotune_file = option.kunlunxin_autotune_file;
+ lite_option.kunlunxin_precision = option.kunlunxin_precision;
+ lite_option.kunlunxin_adaptive_seqlen = option.kunlunxin_adaptive_seqlen;
+ lite_option.kunlunxin_enable_multi_stream = option.kunlunxin_enable_multi_stream;
FDASSERT(option.model_format == ModelFormat::PADDLE,
"LiteBackend only support model format of ModelFormat::PADDLE");
diff --git a/fastdeploy/runtime.h b/fastdeploy/runtime.h
index 54e491882..c889d4337 100755
--- a/fastdeploy/runtime.h
+++ b/fastdeploy/runtime.h
@@ -120,9 +120,9 @@ struct FASTDEPLOY_DECL RuntimeOption {
void UseAscend();
///
- /// \brief Turn on XPU.
+ /// \brief Turn on KunlunXin XPU.
///
- /// \param xpu_id the XPU card to use (default is 0).
+ /// \param kunlunxin_id the KunlunXin XPU card to use (default is 0).
/// \param l3_workspace_size The size of the video memory allocated by the l3
/// cache, the maximum is 16M.
/// \param locked Whether the allocated L3 cache can be locked. If false,
@@ -139,9 +139,10 @@ struct FASTDEPLOY_DECL RuntimeOption {
/// file will be used and autotune will not be performed again.
/// \param precision Calculation accuracy of multi_encoder
/// \param adaptive_seqlen Is the input of multi_encoder variable length
- /// \param enable_multi_stream Whether to enable the multi stream of xpu.
+ /// \param enable_multi_stream Whether to enable the multi stream of
+ /// KunlunXin XPU.
///
- void UseXpu(int xpu_id = 0,
+ void UseKunlunXin(int kunlunxin_id = 0,
int l3_workspace_size = 0xfffc00,
bool locked = false,
bool autotune = true,
@@ -449,7 +450,7 @@ struct FASTDEPLOY_DECL RuntimeOption {
bool enable_timvx = false;
bool enable_ascend = false;
- bool enable_xpu = false;
+ bool enable_kunlunxin = false;
// ======Only for Trt Backend=======
std::map> trt_max_shape;
@@ -482,14 +483,14 @@ struct FASTDEPLOY_DECL RuntimeOption {
fastdeploy::rknpu2::CoreMask rknpu2_core_mask_ =
fastdeploy::rknpu2::CoreMask::RKNN_NPU_CORE_AUTO;
- // ======Only for XPU Backend=======
- int xpu_l3_workspace_size = 0xfffc00;
- bool xpu_locked = false;
- bool xpu_autotune = true;
- std::string xpu_autotune_file = "";
- std::string xpu_precision = "int16";
- bool xpu_adaptive_seqlen = false;
- bool xpu_enable_multi_stream = false;
+ // ======Only for KunlunXin XPU Backend=======
+ int kunlunxin_l3_workspace_size = 0xfffc00;
+ bool kunlunxin_locked = false;
+ bool kunlunxin_autotune = true;
+ std::string kunlunxin_autotune_file = "";
+ std::string kunlunxin_precision = "int16";
+ bool kunlunxin_adaptive_seqlen = false;
+ bool kunlunxin_enable_multi_stream = false;
std::string model_file = ""; // Path of model file
std::string params_file = ""; // Path of parameters file, can be empty
diff --git a/fastdeploy/vision.h b/fastdeploy/vision.h
index 9c22d45ad..ef2fc90a6 100644
--- a/fastdeploy/vision.h
+++ b/fastdeploy/vision.h
@@ -22,6 +22,7 @@
#include "fastdeploy/vision/detection/contrib/scaledyolov4.h"
#include "fastdeploy/vision/detection/contrib/yolor.h"
#include "fastdeploy/vision/detection/contrib/yolov5/yolov5.h"
+#include "fastdeploy/vision/detection/contrib/fastestdet/fastestdet.h"
#include "fastdeploy/vision/detection/contrib/yolov5lite.h"
#include "fastdeploy/vision/detection/contrib/yolov6.h"
#include "fastdeploy/vision/detection/contrib/yolov7/yolov7.h"
@@ -38,12 +39,8 @@
#include "fastdeploy/vision/facedet/contrib/ultraface.h"
#include "fastdeploy/vision/facedet/contrib/yolov5face.h"
#include "fastdeploy/vision/facedet/contrib/yolov7face/yolov7face.h"
-#include "fastdeploy/vision/faceid/contrib/adaface.h"
-#include "fastdeploy/vision/faceid/contrib/arcface.h"
-#include "fastdeploy/vision/faceid/contrib/cosface.h"
-#include "fastdeploy/vision/faceid/contrib/insightface_rec.h"
-#include "fastdeploy/vision/faceid/contrib/partial_fc.h"
-#include "fastdeploy/vision/faceid/contrib/vpl.h"
+#include "fastdeploy/vision/faceid/contrib/insightface/model.h"
+#include "fastdeploy/vision/faceid/contrib/adaface/adaface.h"
#include "fastdeploy/vision/headpose/contrib/fsanet.h"
#include "fastdeploy/vision/keypointdet/pptinypose/pptinypose.h"
#include "fastdeploy/vision/matting/contrib/modnet.h"
diff --git a/fastdeploy/vision/classification/ppcls/model.cc b/fastdeploy/vision/classification/ppcls/model.cc
index 96b949827..9d691b80b 100755
--- a/fastdeploy/vision/classification/ppcls/model.cc
+++ b/fastdeploy/vision/classification/ppcls/model.cc
@@ -30,7 +30,7 @@ PaddleClasModel::PaddleClasModel(const std::string& model_file,
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
valid_timvx_backends = {Backend::LITE};
valid_ascend_backends = {Backend::LITE};
- valid_xpu_backends = {Backend::LITE};
+ valid_kunlunxin_backends = {Backend::LITE};
valid_ipu_backends = {Backend::PDINFER};
} else if (model_format == ModelFormat::ONNX) {
valid_cpu_backends = {Backend::ORT, Backend::OPENVINO};
diff --git a/fastdeploy/vision/detection/contrib/fastestdet/fastestdet.cc b/fastdeploy/vision/detection/contrib/fastestdet/fastestdet.cc
new file mode 100644
index 000000000..2bef9f38b
--- /dev/null
+++ b/fastdeploy/vision/detection/contrib/fastestdet/fastestdet.cc
@@ -0,0 +1,79 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "fastdeploy/vision/detection/contrib/fastestdet/fastestdet.h"
+
+namespace fastdeploy {
+namespace vision {
+namespace detection {
+
+FastestDet::FastestDet(const std::string& model_file, const std::string& params_file,
+ const RuntimeOption& custom_option,
+ const ModelFormat& model_format) {
+ if (model_format == ModelFormat::ONNX) {
+ valid_cpu_backends = {Backend::OPENVINO, Backend::ORT};
+ valid_gpu_backends = {Backend::ORT, Backend::TRT};
+ } else {
+ valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::LITE};
+ valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
+ }
+ runtime_option = custom_option;
+ runtime_option.model_format = model_format;
+ runtime_option.model_file = model_file;
+ runtime_option.params_file = params_file;
+ initialized = Initialize();
+}
+
+bool FastestDet::Initialize() {
+ if (!InitRuntime()) {
+ FDERROR << "Failed to initialize fastdeploy backend." << std::endl;
+ return false;
+ }
+ return true;
+}
+
+bool FastestDet::Predict(const cv::Mat& im, DetectionResult* result) {
+ std::vector results;
+ if (!BatchPredict({im}, &results)) {
+ return false;
+ }
+ *result = std::move(results[0]);
+ return true;
+}
+
+bool FastestDet::BatchPredict(const std::vector& images, std::vector* results) {
+ std::vector>> ims_info;
+ std::vector fd_images = WrapMat(images);
+
+ if (!preprocessor_.Run(&fd_images, &reused_input_tensors_, &ims_info)) {
+ FDERROR << "Failed to preprocess the input image." << std::endl;
+ return false;
+ }
+
+ reused_input_tensors_[0].name = InputInfoOfRuntime(0).name;
+ if (!Infer(reused_input_tensors_, &reused_output_tensors_)) {
+ FDERROR << "Failed to inference by runtime." << std::endl;
+ return false;
+ }
+
+ if (!postprocessor_.Run(reused_output_tensors_, results, ims_info)) {
+ FDERROR << "Failed to postprocess the inference results by runtime." << std::endl;
+ return false;
+ }
+ return true;
+}
+
+} // namespace detection
+} // namespace vision
+} // namespace fastdeploy
diff --git a/fastdeploy/vision/detection/contrib/fastestdet/fastestdet.h b/fastdeploy/vision/detection/contrib/fastestdet/fastestdet.h
new file mode 100644
index 000000000..9bd6e07df
--- /dev/null
+++ b/fastdeploy/vision/detection/contrib/fastestdet/fastestdet.h
@@ -0,0 +1,76 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. //NOLINT
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#pragma once
+
+#include "fastdeploy/fastdeploy_model.h"
+#include "fastdeploy/vision/detection/contrib/fastestdet/preprocessor.h"
+#include "fastdeploy/vision/detection/contrib/fastestdet/postprocessor.h"
+
+namespace fastdeploy {
+namespace vision {
+namespace detection {
+/*! @brief FastestDet model object used when to load a FastestDet model exported by FastestDet.
+ */
+class FASTDEPLOY_DECL FastestDet : public FastDeployModel {
+ public:
+ /** \brief Set path of model file and the configuration of runtime.
+ *
+ * \param[in] model_file Path of model file, e.g ./fastestdet.onnx
+ * \param[in] params_file Path of parameter file, e.g ppyoloe/model.pdiparams, if the model format is ONNX, this parameter will be ignored
+ * \param[in] custom_option RuntimeOption for inference, the default will use cpu, and choose the backend defined in "valid_cpu_backends"
+ * \param[in] model_format Model format of the loaded model, default is ONNX format
+ */
+ FastestDet(const std::string& model_file, const std::string& params_file = "",
+ const RuntimeOption& custom_option = RuntimeOption(),
+ const ModelFormat& model_format = ModelFormat::ONNX);
+
+ std::string ModelName() const { return "fastestdet"; }
+
+ /** \brief Predict the detection result for an input image
+ *
+ * \param[in] img The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
+ * \param[in] result The output detection result will be writen to this structure
+ * \return true if the prediction successed, otherwise false
+ */
+ virtual bool Predict(const cv::Mat& img, DetectionResult* result);
+
+ /** \brief Predict the detection results for a batch of input images
+ *
+ * \param[in] imgs, The input image list, each element comes from cv::imread()
+ * \param[in] results The output detection result list
+ * \return true if the prediction successed, otherwise false
+ */
+ virtual bool BatchPredict(const std::vector& imgs,
+ std::vector* results);
+
+ /// Get preprocessor reference of FastestDet
+ virtual FastestDetPreprocessor& GetPreprocessor() {
+ return preprocessor_;
+ }
+
+ /// Get postprocessor reference of FastestDet
+ virtual FastestDetPostprocessor& GetPostprocessor() {
+ return postprocessor_;
+ }
+
+ protected:
+ bool Initialize();
+ FastestDetPreprocessor preprocessor_;
+ FastestDetPostprocessor postprocessor_;
+};
+
+} // namespace detection
+} // namespace vision
+} // namespace fastdeploy
diff --git a/fastdeploy/vision/detection/contrib/fastestdet/fastestdet_pybind.cc b/fastdeploy/vision/detection/contrib/fastestdet/fastestdet_pybind.cc
new file mode 100644
index 000000000..4ed494134
--- /dev/null
+++ b/fastdeploy/vision/detection/contrib/fastestdet/fastestdet_pybind.cc
@@ -0,0 +1,85 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "fastdeploy/pybind/main.h"
+
+namespace fastdeploy {
+void BindFastestDet(pybind11::module& m) {
+ pybind11::class_(
+ m, "FastestDetPreprocessor")
+ .def(pybind11::init<>())
+ .def("run", [](vision::detection::FastestDetPreprocessor& self, std::vector& im_list) {
+ std::vector images;
+ for (size_t i = 0; i < im_list.size(); ++i) {
+ images.push_back(vision::WrapMat(PyArrayToCvMat(im_list[i])));
+ }
+ std::vector outputs;
+ std::vector>> ims_info;
+ if (!self.Run(&images, &outputs, &ims_info)) {
+ throw std::runtime_error("raise Exception('Failed to preprocess the input data in FastestDetPreprocessor.')");
+ }
+ for (size_t i = 0; i < outputs.size(); ++i) {
+ outputs[i].StopSharing();
+ }
+ return make_pair(outputs, ims_info);
+ })
+ .def_property("size", &vision::detection::FastestDetPreprocessor::GetSize, &vision::detection::FastestDetPreprocessor::SetSize);
+
+ pybind11::class_(
+ m, "FastestDetPostprocessor")
+ .def(pybind11::init<>())
+ .def("run", [](vision::detection::FastestDetPostprocessor& self, std::vector& inputs,
+ const std::vector>>& ims_info) {
+ std::vector results;
+ if (!self.Run(inputs, &results, ims_info)) {
+ throw std::runtime_error("raise Exception('Failed to postprocess the runtime result in FastestDetPostprocessor.')");
+ }
+ return results;
+ })
+ .def("run", [](vision::detection::FastestDetPostprocessor& self, std::vector& input_array,
+ const std::vector>>& ims_info) {
+ std::vector results;
+ std::vector inputs;
+ PyArrayToTensorList(input_array, &inputs, /*share_buffer=*/true);
+ if (!self.Run(inputs, &results, ims_info)) {
+ throw std::runtime_error("raise Exception('Failed to postprocess the runtime result in FastestDetPostprocessor.')");
+ }
+ return results;
+ })
+ .def_property("conf_threshold", &vision::detection::FastestDetPostprocessor::GetConfThreshold, &vision::detection::FastestDetPostprocessor::SetConfThreshold)
+ .def_property("nms_threshold", &vision::detection::FastestDetPostprocessor::GetNMSThreshold, &vision::detection::FastestDetPostprocessor::SetNMSThreshold);
+
+ pybind11::class_(m, "FastestDet")
+ .def(pybind11::init())
+ .def("predict",
+ [](vision::detection::FastestDet& self, pybind11::array& data) {
+ auto mat = PyArrayToCvMat(data);
+ vision::DetectionResult res;
+ self.Predict(mat, &res);
+ return res;
+ })
+ .def("batch_predict", [](vision::detection::FastestDet& self, std::vector& data) {
+ std::vector images;
+ for (size_t i = 0; i < data.size(); ++i) {
+ images.push_back(PyArrayToCvMat(data[i]));
+ }
+ std::vector results;
+ self.BatchPredict(images, &results);
+ return results;
+ })
+ .def_property_readonly("preprocessor", &vision::detection::FastestDet::GetPreprocessor)
+ .def_property_readonly("postprocessor", &vision::detection::FastestDet::GetPostprocessor);
+}
+} // namespace fastdeploy
diff --git a/fastdeploy/vision/detection/contrib/fastestdet/postprocessor.cc b/fastdeploy/vision/detection/contrib/fastestdet/postprocessor.cc
new file mode 100644
index 000000000..447a16c8a
--- /dev/null
+++ b/fastdeploy/vision/detection/contrib/fastestdet/postprocessor.cc
@@ -0,0 +1,132 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "fastdeploy/vision/detection/contrib/fastestdet/postprocessor.h"
+#include "fastdeploy/vision/utils/utils.h"
+
+namespace fastdeploy {
+namespace vision {
+namespace detection {
+
+FastestDetPostprocessor::FastestDetPostprocessor() {
+ conf_threshold_ = 0.65;
+ nms_threshold_ = 0.45;
+}
+float FastestDetPostprocessor::Sigmoid(float x) {
+ return 1.0f / (1.0f + exp(-x));
+}
+
+float FastestDetPostprocessor::Tanh(float x) {
+ return 2.0f / (1.0f + exp(-2 * x)) - 1;
+}
+
+bool FastestDetPostprocessor::Run(
+ const std::vector &tensors, std::vector *results,
+ const std::vector>> &ims_info) {
+ int batch = 1;
+
+ results->resize(batch);
+
+ for (size_t bs = 0; bs < batch; ++bs) {
+
+ (*results)[bs].Clear();
+ // output (1,85,22,22) CHW
+ const float* output = reinterpret_cast(tensors[0].Data()) + bs * tensors[0].shape[1] * tensors[0].shape[2] * tensors[0].shape[3];
+ int output_h = tensors[0].shape[2]; // out map height
+ int output_w = tensors[0].shape[3]; // out map weight
+ auto iter_out = ims_info[bs].find("output_shape");
+ auto iter_ipt = ims_info[bs].find("input_shape");
+ FDASSERT(iter_out != ims_info[bs].end() && iter_ipt != ims_info[bs].end(),
+ "Cannot find input_shape or output_shape from im_info.");
+ float ipt_h = iter_ipt->second[0];
+ float ipt_w = iter_ipt->second[1];
+
+ // handle output boxes from out map
+ for (int h = 0; h < output_h; h++) {
+ for (int w = 0; w < output_w; w++) {
+ // object score
+ int obj_score_index = (h * output_w) + w;
+ float obj_score = output[obj_score_index];
+
+ // find max class
+ int category = 0;
+ float max_score = 0.0f;
+ int class_num = tensors[0].shape[1]-5;
+ for (size_t i = 0; i < class_num; i++) {
+ obj_score_index =((5 + i) * output_h * output_w) + (h * output_w) + w;
+ float cls_score = output[obj_score_index];
+ if (cls_score > max_score) {
+ max_score = cls_score;
+ category = i;
+ }
+ }
+ float score = pow(max_score, 0.4) * pow(obj_score, 0.6);
+
+ // score threshold
+ if (score <= conf_threshold_) {
+ continue;
+ }
+ if (score > conf_threshold_) {
+ // handle box x y w h
+ int x_offset_index = (1 * output_h * output_w) + (h * output_w) + w;
+ int y_offset_index = (2 * output_h * output_w) + (h * output_w) + w;
+ int box_width_index = (3 * output_h * output_w) + (h * output_w) + w;
+ int box_height_index = (4 * output_h * output_w) + (h * output_w) + w;
+
+ float x_offset = Tanh(output[x_offset_index]);
+ float y_offset = Tanh(output[y_offset_index]);
+ float box_width = Sigmoid(output[box_width_index]);
+ float box_height = Sigmoid(output[box_height_index]);
+
+ float cx = (w + x_offset) / output_w;
+ float cy = (h + y_offset) / output_h;
+
+ // convert from [x, y, w, h] to [x1, y1, x2, y2]
+ (*results)[bs].boxes.emplace_back(std::array{
+ cx - box_width / 2.0f,
+ cy - box_height / 2.0f,
+ cx + box_width / 2.0f,
+ cy + box_height / 2.0f});
+ (*results)[bs].label_ids.push_back(category);
+ (*results)[bs].scores.push_back(score);
+ }
+ }
+ }
+ if ((*results)[bs].boxes.size() == 0) {
+ return true;
+ }
+
+ // scale boxes to origin shape
+ for (size_t i = 0; i < (*results)[bs].boxes.size(); ++i) {
+ (*results)[bs].boxes[i][0] = ((*results)[bs].boxes[i][0]) * ipt_w;
+ (*results)[bs].boxes[i][1] = ((*results)[bs].boxes[i][1]) * ipt_h;
+ (*results)[bs].boxes[i][2] = ((*results)[bs].boxes[i][2]) * ipt_w;
+ (*results)[bs].boxes[i][3] = ((*results)[bs].boxes[i][3]) * ipt_h;
+ }
+ //NMS
+ utils::NMS(&((*results)[bs]), nms_threshold_);
+ //clip box
+ for (size_t i = 0; i < (*results)[bs].boxes.size(); ++i) {
+ (*results)[bs].boxes[i][0] = std::max((*results)[bs].boxes[i][0], 0.0f);
+ (*results)[bs].boxes[i][1] = std::max((*results)[bs].boxes[i][1], 0.0f);
+ (*results)[bs].boxes[i][2] = std::min((*results)[bs].boxes[i][2], ipt_w);
+ (*results)[bs].boxes[i][3] = std::min((*results)[bs].boxes[i][3], ipt_h);
+ }
+ }
+ return true;
+}
+
+} // namespace detection
+} // namespace vision
+} // namespace fastdeploy
diff --git a/fastdeploy/vision/detection/contrib/fastestdet/postprocessor.h b/fastdeploy/vision/detection/contrib/fastestdet/postprocessor.h
new file mode 100644
index 000000000..c576aee20
--- /dev/null
+++ b/fastdeploy/vision/detection/contrib/fastestdet/postprocessor.h
@@ -0,0 +1,67 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#pragma once
+#include "fastdeploy/vision/common/processors/transform.h"
+#include "fastdeploy/vision/common/result.h"
+
+namespace fastdeploy {
+namespace vision {
+
+namespace detection {
+/*! @brief Postprocessor object for FastestDet serials model.
+ */
+class FASTDEPLOY_DECL FastestDetPostprocessor {
+ public:
+ /** \brief Create a postprocessor instance for FastestDet serials model
+ */
+ FastestDetPostprocessor();
+
+ /** \brief Process the result of runtime and fill to DetectionResult structure
+ *
+ * \param[in] tensors The inference result from runtime
+ * \param[in] result The output result of detection
+ * \param[in] ims_info The shape info list, record input_shape and output_shape
+ * \return true if the postprocess successed, otherwise false
+ */
+ bool Run(const std::vector& tensors,
+ std::vector* results,
+ const std::vector>>& ims_info);
+
+ /// Set conf_threshold, default 0.65
+ void SetConfThreshold(const float& conf_threshold) {
+ conf_threshold_ = conf_threshold;
+ }
+
+ /// Get conf_threshold, default 0.65
+ float GetConfThreshold() const { return conf_threshold_; }
+
+ /// Set nms_threshold, default 0.45
+ void SetNMSThreshold(const float& nms_threshold) {
+ nms_threshold_ = nms_threshold;
+ }
+
+ /// Get nms_threshold, default 0.45
+ float GetNMSThreshold() const { return nms_threshold_; }
+
+ protected:
+ float conf_threshold_;
+ float nms_threshold_;
+ float Sigmoid(float x);
+ float Tanh(float x);
+};
+
+} // namespace detection
+} // namespace vision
+} // namespace fastdeploy
diff --git a/fastdeploy/vision/detection/contrib/fastestdet/preprocessor.cc b/fastdeploy/vision/detection/contrib/fastestdet/preprocessor.cc
new file mode 100644
index 000000000..f4ff11e8f
--- /dev/null
+++ b/fastdeploy/vision/detection/contrib/fastestdet/preprocessor.cc
@@ -0,0 +1,81 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "fastdeploy/vision/detection/contrib/fastestdet/preprocessor.h"
+#include "fastdeploy/function/concat.h"
+
+namespace fastdeploy {
+namespace vision {
+namespace detection {
+
+FastestDetPreprocessor::FastestDetPreprocessor() {
+ size_ = {352, 352}; //{h,w}
+}
+
+bool FastestDetPreprocessor::Preprocess(FDMat* mat, FDTensor* output,
+ std::map>* im_info) {
+ // Record the shape of image and the shape of preprocessed image
+ (*im_info)["input_shape"] = {static_cast(mat->Height()),
+ static_cast(mat->Width())};
+
+ // process after image load
+ double ratio = (size_[0] * 1.0) / std::max(static_cast(mat->Height()),
+ static_cast(mat->Width()));
+
+ // fastestdet's preprocess steps
+ // 1. resize
+ // 2. convert_and_permute(swap_rb=false)
+ Resize::Run(mat, size_[0], size_[1]); //resize
+ std::vector alpha = {1.0f / 255.0f, 1.0f / 255.0f, 1.0f / 255.0f};
+ std::vector beta = {0.0f, 0.0f, 0.0f};
+ //convert to float and HWC2CHW
+ ConvertAndPermute::Run(mat, alpha, beta, false);
+
+ // Record output shape of preprocessed image
+ (*im_info)["output_shape"] = {static_cast(mat->Height()),
+ static_cast(mat->Width())};
+
+ mat->ShareWithTensor(output);
+ output->ExpandDim(0); // reshape to n, h, w, c
+ return true;
+}
+
+bool FastestDetPreprocessor::Run(std::vector* images, std::vector* outputs,
+ std::vector>>* ims_info) {
+ if (images->size() == 0) {
+ FDERROR << "The size of input images should be greater than 0." << std::endl;
+ return false;
+ }
+ ims_info->resize(images->size());
+ outputs->resize(1);
+ // Concat all the preprocessed data to a batch tensor
+ std::vector tensors(images->size());
+ for (size_t i = 0; i < images->size(); ++i) {
+ if (!Preprocess(&(*images)[i], &tensors[i], &(*ims_info)[i])) {
+ FDERROR << "Failed to preprocess input image." << std::endl;
+ return false;
+ }
+ }
+
+ if (tensors.size() == 1) {
+ (*outputs)[0] = std::move(tensors[0]);
+ } else {
+ function::Concat(tensors, &((*outputs)[0]), 0);
+ }
+ return true;
+}
+
+} // namespace detection
+} // namespace vision
+} // namespace fastdeploy
diff --git a/fastdeploy/vision/detection/contrib/fastestdet/preprocessor.h b/fastdeploy/vision/detection/contrib/fastestdet/preprocessor.h
new file mode 100644
index 000000000..8166f6198
--- /dev/null
+++ b/fastdeploy/vision/detection/contrib/fastestdet/preprocessor.h
@@ -0,0 +1,57 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#pragma once
+#include "fastdeploy/vision/common/processors/transform.h"
+#include "fastdeploy/vision/common/result.h"
+
+namespace fastdeploy {
+namespace vision {
+
+namespace detection {
+/*! @brief Preprocessor object for FastestDet serials model.
+ */
+class FASTDEPLOY_DECL FastestDetPreprocessor {
+ public:
+ /** \brief Create a preprocessor instance for FastestDet serials model
+ */
+ FastestDetPreprocessor();
+
+ /** \brief Process the input image and prepare input tensors for runtime
+ *
+ * \param[in] images The input image data list, all the elements are returned by cv::imread()
+ * \param[in] outputs The output tensors which will feed in runtime
+ * \param[in] ims_info The shape info list, record input_shape and output_shape
+ * \return true if the preprocess successed, otherwise false
+ */
+ bool Run(std::vector* images, std::vector* outputs,
+ std::vector>>* ims_info);
+
+ /// Set target size, tuple of (width, height), default size = {352, 352}
+ void SetSize(const std::vector& size) { size_ = size; }
+
+ /// Get target size, tuple of (width, height), default size = {352, 352}
+ std::vector GetSize() const { return size_; }
+
+ protected:
+ bool Preprocess(FDMat* mat, FDTensor* output,
+ std::map>* im_info);
+
+ // target size, tuple of (width, height), default size = {352, 352}
+ std::vector size_;
+};
+
+} // namespace detection
+} // namespace vision
+} // namespace fastdeploy
diff --git a/fastdeploy/vision/detection/contrib/yolov5/yolov5.cc b/fastdeploy/vision/detection/contrib/yolov5/yolov5.cc
index 9af186e13..44e020ffd 100755
--- a/fastdeploy/vision/detection/contrib/yolov5/yolov5.cc
+++ b/fastdeploy/vision/detection/contrib/yolov5/yolov5.cc
@@ -27,7 +27,7 @@ YOLOv5::YOLOv5(const std::string& model_file, const std::string& params_file,
} else {
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
- valid_xpu_backends = {Backend::LITE};
+ valid_kunlunxin_backends = {Backend::LITE};
valid_timvx_backends = {Backend::LITE};
valid_ascend_backends = {Backend::LITE};
}
diff --git a/fastdeploy/vision/detection/contrib/yolov6.cc b/fastdeploy/vision/detection/contrib/yolov6.cc
index 95f2034ca..9f4d43544 100755
--- a/fastdeploy/vision/detection/contrib/yolov6.cc
+++ b/fastdeploy/vision/detection/contrib/yolov6.cc
@@ -72,7 +72,7 @@ YOLOv6::YOLOv6(const std::string& model_file, const std::string& params_file,
} else {
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
- valid_xpu_backends = {Backend::LITE};
+ valid_kunlunxin_backends = {Backend::LITE};
valid_ascend_backends = {Backend::LITE};
}
runtime_option = custom_option;
diff --git a/fastdeploy/vision/detection/contrib/yolov7/yolov7.cc b/fastdeploy/vision/detection/contrib/yolov7/yolov7.cc
index 84951c981..e84e4e0be 100755
--- a/fastdeploy/vision/detection/contrib/yolov7/yolov7.cc
+++ b/fastdeploy/vision/detection/contrib/yolov7/yolov7.cc
@@ -27,7 +27,7 @@ YOLOv7::YOLOv7(const std::string& model_file, const std::string& params_file,
} else {
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
- valid_xpu_backends = {Backend::LITE};
+ valid_kunlunxin_backends = {Backend::LITE};
valid_ascend_backends = {Backend::LITE};
}
runtime_option = custom_option;
diff --git a/fastdeploy/vision/detection/detection_pybind.cc b/fastdeploy/vision/detection/detection_pybind.cc
index 9d585e18c..80bdff859 100644
--- a/fastdeploy/vision/detection/detection_pybind.cc
+++ b/fastdeploy/vision/detection/detection_pybind.cc
@@ -22,6 +22,7 @@ void BindYOLOR(pybind11::module& m);
void BindYOLOv6(pybind11::module& m);
void BindYOLOv5Lite(pybind11::module& m);
void BindYOLOv5(pybind11::module& m);
+void BindFastestDet(pybind11::module& m);
void BindYOLOX(pybind11::module& m);
void BindNanoDetPlus(pybind11::module& m);
void BindPPDet(pybind11::module& m);
@@ -39,6 +40,7 @@ void BindDetection(pybind11::module& m) {
BindYOLOv6(detection_module);
BindYOLOv5Lite(detection_module);
BindYOLOv5(detection_module);
+ BindFastestDet(detection_module);
BindYOLOX(detection_module);
BindNanoDetPlus(detection_module);
BindYOLOv7End2EndTRT(detection_module);
diff --git a/fastdeploy/vision/detection/ppdet/model.h b/fastdeploy/vision/detection/ppdet/model.h
index 4d6b1a390..be13b0b4d 100755
--- a/fastdeploy/vision/detection/ppdet/model.h
+++ b/fastdeploy/vision/detection/ppdet/model.h
@@ -39,7 +39,7 @@ class FASTDEPLOY_DECL PicoDet : public PPDetBase {
Backend::PDINFER, Backend::LITE};
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
valid_rknpu_backends = {Backend::RKNPU2};
- valid_xpu_backends = {Backend::LITE};
+ valid_kunlunxin_backends = {Backend::LITE};
valid_ascend_backends = {Backend::LITE};
initialized = Initialize();
}
@@ -67,7 +67,7 @@ class FASTDEPLOY_DECL PPYOLOE : public PPDetBase {
Backend::PDINFER, Backend::LITE};
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
valid_timvx_backends = {Backend::LITE};
- valid_xpu_backends = {Backend::LITE};
+ valid_kunlunxin_backends = {Backend::LITE};
valid_ascend_backends = {Backend::LITE};
initialized = Initialize();
}
@@ -93,7 +93,7 @@ class FASTDEPLOY_DECL PPYOLO : public PPDetBase {
model_format) {
valid_cpu_backends = {Backend::PDINFER, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER};
- valid_xpu_backends = {Backend::LITE};
+ valid_kunlunxin_backends = {Backend::LITE};
valid_ascend_backends = {Backend::LITE};
initialized = Initialize();
}
@@ -112,7 +112,7 @@ class FASTDEPLOY_DECL YOLOv3 : public PPDetBase {
valid_cpu_backends = {Backend::OPENVINO, Backend::ORT, Backend::PDINFER,
Backend::LITE};
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
- valid_xpu_backends = {Backend::LITE};
+ valid_kunlunxin_backends = {Backend::LITE};
valid_ascend_backends = {Backend::LITE};
initialized = Initialize();
}
@@ -131,7 +131,7 @@ class FASTDEPLOY_DECL PaddleYOLOX : public PPDetBase {
valid_cpu_backends = {Backend::OPENVINO, Backend::ORT, Backend::PDINFER,
Backend::LITE};
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
- valid_xpu_backends = {Backend::LITE};
+ valid_kunlunxin_backends = {Backend::LITE};
valid_ascend_backends = {Backend::LITE};
initialized = Initialize();
}
@@ -149,7 +149,7 @@ class FASTDEPLOY_DECL FasterRCNN : public PPDetBase {
model_format) {
valid_cpu_backends = {Backend::PDINFER, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER};
- valid_xpu_backends = {Backend::LITE};
+ valid_kunlunxin_backends = {Backend::LITE};
initialized = Initialize();
}
@@ -166,7 +166,7 @@ class FASTDEPLOY_DECL MaskRCNN : public PPDetBase {
model_format) {
valid_cpu_backends = {Backend::PDINFER, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER};
- valid_xpu_backends = {Backend::LITE};
+ valid_kunlunxin_backends = {Backend::LITE};
initialized = Initialize();
}
@@ -183,7 +183,7 @@ class FASTDEPLOY_DECL SSD : public PPDetBase {
model_format) {
valid_cpu_backends = {Backend::PDINFER, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER};
- valid_xpu_backends = {Backend::LITE};
+ valid_kunlunxin_backends = {Backend::LITE};
valid_ascend_backends = {Backend::LITE};
initialized = Initialize();
}
@@ -201,7 +201,7 @@ class FASTDEPLOY_DECL PaddleYOLOv5 : public PPDetBase {
model_format) {
valid_cpu_backends = {Backend::ORT, Backend::PDINFER};
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
- valid_xpu_backends = {Backend::LITE};
+ valid_kunlunxin_backends = {Backend::LITE};
initialized = Initialize();
}
@@ -218,7 +218,7 @@ class FASTDEPLOY_DECL PaddleYOLOv6 : public PPDetBase {
model_format) {
valid_cpu_backends = {Backend::OPENVINO, Backend::ORT, Backend::PDINFER};
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
- valid_xpu_backends = {Backend::LITE};
+ valid_kunlunxin_backends = {Backend::LITE};
initialized = Initialize();
}
@@ -235,7 +235,7 @@ class FASTDEPLOY_DECL PaddleYOLOv7 : public PPDetBase {
model_format) {
valid_cpu_backends = {Backend::ORT, Backend::PDINFER};
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
- valid_xpu_backends = {Backend::LITE};
+ valid_kunlunxin_backends = {Backend::LITE};
initialized = Initialize();
}
@@ -252,7 +252,7 @@ class FASTDEPLOY_DECL RTMDet : public PPDetBase {
model_format) {
valid_cpu_backends = {Backend::OPENVINO, Backend::ORT, Backend::PDINFER};
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
- valid_xpu_backends = {Backend::LITE};
+ valid_kunlunxin_backends = {Backend::LITE};
initialized = Initialize();
}
diff --git a/fastdeploy/vision/faceid/contrib/adaface.cc b/fastdeploy/vision/faceid/contrib/adaface.cc
deleted file mode 100644
index 916a2ef76..000000000
--- a/fastdeploy/vision/faceid/contrib/adaface.cc
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "fastdeploy/vision/faceid/contrib/adaface.h"
-
-#include "fastdeploy/utils/perf.h"
-#include "fastdeploy/vision/utils/utils.h"
-
-namespace fastdeploy {
-
-namespace vision {
-
-namespace faceid {
-
-AdaFace::AdaFace(const std::string& model_file, const std::string& params_file,
- const RuntimeOption& custom_option,
- const ModelFormat& model_format)
- : InsightFaceRecognitionModel(model_file, params_file, custom_option,
- model_format) {
- initialized = Initialize();
-}
-
-bool AdaFace::Initialize() {
- // (1) if parent class initialed backend
- if (initialized) {
- // (1.1) re-init parameters for specific sub-classes
- size = {112, 112};
- alpha = {1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f};
- beta = {-1.f, -1.f, -1.f}; // RGB
- swap_rb = true;
- l2_normalize = false;
- return true;
- }
- // (2) if parent class not initialed backend
- if (!InsightFaceRecognitionModel::Initialize()) {
- FDERROR << "Failed to initialize fastdeploy backend." << std::endl;
- return false;
- }
- // (2.1) re-init parameters for specific sub-classes
- size = {112, 112};
- alpha = {1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f};
- beta = {-1.f, -1.f, -1.f}; // RGB
- swap_rb = true;
- l2_normalize = false;
- return true;
-}
-
-bool AdaFace::Preprocess(Mat* mat, FDTensor* output) {
- return InsightFaceRecognitionModel::Preprocess(mat, output);
-}
-
-bool AdaFace::Postprocess(std::vector& infer_result,
- FaceRecognitionResult* result) {
- return InsightFaceRecognitionModel::Postprocess(infer_result, result);
-}
-
-bool AdaFace::Predict(cv::Mat* im, FaceRecognitionResult* result) {
- return InsightFaceRecognitionModel::Predict(im, result);
-}
-
-} // namespace faceid
-} // namespace vision
-} // namespace fastdeploy
\ No newline at end of file
diff --git a/fastdeploy/vision/faceid/contrib/adaface.h b/fastdeploy/vision/faceid/contrib/adaface.h
deleted file mode 100644
index a11d61298..000000000
--- a/fastdeploy/vision/faceid/contrib/adaface.h
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#pragma once
-#include "fastdeploy/fastdeploy_model.h"
-#include "fastdeploy/vision/common/processors/transform.h"
-#include "fastdeploy/vision/common/result.h"
-#include "fastdeploy/vision/faceid/contrib/insightface_rec.h"
-
-namespace fastdeploy {
-
-namespace vision {
-/** \brief All object face recognition model APIs are defined inside this namespace
- *
- */
-namespace faceid {
-/*! @brief AdaFace model object used when to load a AdaFace model exported by AdaFacePaddleCLas.
- */
-class FASTDEPLOY_DECL AdaFace : public InsightFaceRecognitionModel {
- public:
- /** \brief Set path of model file and the configuration of runtime.
- *
- * \param[in] model_file Path of model file, e.g ./adaface.onnx
- * \param[in] params_file Path of parameter file, e.g ppyoloe/model.pdiparams, if the model format is ONNX, this parameter will be ignored
- * \param[in] custom_option RuntimeOption for inference, the default will use cpu, and choose the backend defined in "valid_cpu_backends"
- * \param[in] model_format Model format of the loaded model, default is PADDLE format
- */
- AdaFace(const std::string& model_file, const std::string& params_file = "",
- const RuntimeOption& custom_option = RuntimeOption(),
- const ModelFormat& model_format = ModelFormat::PADDLE);
-
- std::string ModelName() const override {
- return "Zheng-Bicheng/AdaFacePaddleCLas";
- }
- /** \brief Predict the face recognition result for an input image
- *
- * \param[in] im The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
- * \param[in] result The output face recognition result will be writen to this structure
- * \return true if the prediction successed, otherwise false
- */
- bool Predict(cv::Mat* im, FaceRecognitionResult* result) override;
-
- private:
- bool Initialize() override;
-
- bool Preprocess(Mat* mat, FDTensor* output) override;
-
- bool Postprocess(std::vector& infer_result,
- FaceRecognitionResult* result) override;
-};
-
-} // namespace faceid
-} // namespace vision
-} // namespace fastdeploy
diff --git a/fastdeploy/vision/faceid/contrib/adaface/adaface.cc b/fastdeploy/vision/faceid/contrib/adaface/adaface.cc
new file mode 100755
index 000000000..99403d464
--- /dev/null
+++ b/fastdeploy/vision/faceid/contrib/adaface/adaface.cc
@@ -0,0 +1,84 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "fastdeploy/vision/faceid/contrib/adaface/adaface.h"
+
+namespace fastdeploy {
+namespace vision {
+namespace faceid {
+
+AdaFace::AdaFace(
+ const std::string& model_file, const std::string& params_file,
+ const fastdeploy::RuntimeOption& custom_option,
+ const fastdeploy::ModelFormat& model_format) {
+
+ if (model_format == ModelFormat::ONNX) {
+ valid_cpu_backends = {Backend::ORT};
+ valid_gpu_backends = {Backend::ORT, Backend::TRT};
+ } else {
+ valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::LITE};
+ valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
+ }
+ runtime_option = custom_option;
+ runtime_option.model_format = model_format;
+ runtime_option.model_file = model_file;
+ runtime_option.params_file = params_file;
+ initialized = Initialize();
+}
+
+bool AdaFace::Initialize() {
+ if (!InitRuntime()) {
+ FDERROR << "Failed to initialize fastdeploy backend." << std::endl;
+ return false;
+ }
+ return true;
+}
+
+bool AdaFace::Predict(const cv::Mat& im,
+ FaceRecognitionResult* result) {
+ std::vector results;
+ if (!BatchPredict({im}, &results)) {
+ return false;
+ }
+ if(!results.empty()){
+ *result = std::move(results[0]);
+ }
+ return true;
+}
+
+bool AdaFace::BatchPredict(const std::vector& images,
+ std::vector* results){
+ std::vector fd_images = WrapMat(images);
+ FDASSERT(images.size() == 1, "Only support batch = 1 now.");
+ if (!preprocessor_.Run(&fd_images, &reused_input_tensors_)) {
+ FDERROR << "Failed to preprocess the input image." << std::endl;
+ return false;
+ }
+
+ reused_input_tensors_[0].name = InputInfoOfRuntime(0).name;
+ if (!Infer(reused_input_tensors_, &reused_output_tensors_)) {
+ FDERROR << "Failed to inference by runtime." << std::endl;
+ return false;
+ }
+
+ if (!postprocessor_.Run(reused_output_tensors_, results)){
+ FDERROR << "Failed to postprocess the inference results by runtime." << std::endl;
+ return false;
+ }
+ return true;
+}
+
+} // namespace faceid
+} // namespace vision
+} // namespace fastdeploy
diff --git a/fastdeploy/vision/faceid/contrib/adaface/adaface.h b/fastdeploy/vision/faceid/contrib/adaface/adaface.h
new file mode 100755
index 000000000..e6198f320
--- /dev/null
+++ b/fastdeploy/vision/faceid/contrib/adaface/adaface.h
@@ -0,0 +1,77 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. //NOLINT
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#pragma once
+
+#include "fastdeploy/fastdeploy_model.h"
+#include "fastdeploy/vision/faceid/contrib/adaface/postprocessor.h"
+#include "fastdeploy/vision/faceid/contrib/adaface/preprocessor.h"
+
+namespace fastdeploy {
+namespace vision {
+namespace faceid {
+/*! @brief AdaFace model object used when to load a AdaFace model exported by AdaFace.
+ */
+class FASTDEPLOY_DECL AdaFace : public FastDeployModel {
+ public:
+ /** \brief Set path of model file and the configuration of runtime.
+ *
+ * \param[in] model_file Path of model file, e.g ./adaface.onnx
+ * \param[in] params_file Path of parameter file, e.g adaface/model.pdiparams, if the model format is ONNX, this parameter will be ignored
+ * \param[in] custom_option RuntimeOption for inference, the default will use cpu, and choose the backend defined in "valid_cpu_backends"
+ * \param[in] model_format Model format of the loaded model, default is ONNX format
+ */
+ AdaFace(
+ const std::string& model_file, const std::string& params_file = "",
+ const RuntimeOption& custom_option = RuntimeOption(),
+ const ModelFormat& model_format = ModelFormat::ONNX);
+
+ std::string ModelName() const { return "insightface_rec"; }
+
+ /** \brief Predict the detection result for an input image
+ *
+ * \param[in] img The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
+ * \param[in] result The output FaceRecognitionResult will be writen to this structure
+ * \return true if the prediction successed, otherwise false
+ */
+ virtual bool Predict(const cv::Mat& im, FaceRecognitionResult* result);
+
+ /** \brief Predict the detection results for a batch of input images
+ *
+ * \param[in] imgs, The input image list, each element comes from cv::imread()
+ * \param[in] results The output FaceRecognitionResult list
+ * \return true if the prediction successed, otherwise false
+ */
+ virtual bool BatchPredict(const std::vector& images,
+ std::vector* results);
+
+ /// Get preprocessor reference of AdaFace
+ virtual AdaFacePreprocessor& GetPreprocessor() {
+ return preprocessor_;
+ }
+
+ /// Get postprocessor reference of AdaFace
+ virtual AdaFacePostprocessor& GetPostprocessor() {
+ return postprocessor_;
+ }
+
+ protected:
+ bool Initialize();
+ AdaFacePreprocessor preprocessor_;
+ AdaFacePostprocessor postprocessor_;
+};
+
+} // namespace faceid
+} // namespace vision
+} // namespace fastdeploy
diff --git a/fastdeploy/vision/faceid/contrib/adaface/adaface_pybind.cc b/fastdeploy/vision/faceid/contrib/adaface/adaface_pybind.cc
new file mode 100644
index 000000000..f04ebdad5
--- /dev/null
+++ b/fastdeploy/vision/faceid/contrib/adaface/adaface_pybind.cc
@@ -0,0 +1,89 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "fastdeploy/pybind/main.h"
+
+namespace fastdeploy {
+void BindAdaFace(pybind11::module& m) {
+ pybind11::class_(
+ m, "AdaFacePreprocessor")
+ .def(pybind11::init())
+ .def("run", [](vision::faceid::AdaFacePreprocessor& self,
+ std::vector& im_list) {
+ std::vector images;
+ for (size_t i = 0; i < im_list.size(); ++i) {
+ images.push_back(vision::WrapMat(PyArrayToCvMat(im_list[i])));
+ }
+ std::vector outputs;
+ if (!self.Run(&images, &outputs)) {
+ throw std::runtime_error("Failed to preprocess the input data in AdaFacePreprocessor.");
+ }
+ for (size_t i = 0; i < outputs.size(); ++i) {
+ outputs[i].StopSharing();
+ }
+ return outputs;
+ })
+ .def_property("permute", &vision::faceid::AdaFacePreprocessor::GetPermute,
+ &vision::faceid::AdaFacePreprocessor::SetPermute)
+ .def_property("alpha", &vision::faceid::AdaFacePreprocessor::GetAlpha,
+ &vision::faceid::AdaFacePreprocessor::SetAlpha)
+ .def_property("beta", &vision::faceid::AdaFacePreprocessor::GetBeta,
+ &vision::faceid::AdaFacePreprocessor::SetBeta)
+ .def_property("size", &vision::faceid::AdaFacePreprocessor::GetSize,
+ &vision::faceid::AdaFacePreprocessor::SetSize);
+
+ pybind11::class_(
+ m, "AdaFacePostprocessor")
+ .def(pybind11::init())
+ .def("run", [](vision::faceid::AdaFacePostprocessor& self, std::vector& inputs) {
+ std::vector results;
+ if (!self.Run(inputs, &results)) {
+ throw std::runtime_error("Failed to postprocess the runtime result in AdaFacePostprocessor.");
+ }
+ return results;
+ })
+ .def("run", [](vision::faceid::AdaFacePostprocessor& self, std::vector& input_array) {
+ std::vector results;
+ std::vector inputs;
+ PyArrayToTensorList(input_array, &inputs, /*share_buffer=*/true);
+ if (!self.Run(inputs, &results)) {
+ throw std::runtime_error("Failed to postprocess the runtime result in AdaFacePostprocessor.");
+ }
+ return results;
+ })
+ .def_property("l2_normalize", &vision::faceid::AdaFacePostprocessor::GetL2Normalize,
+ &vision::faceid::AdaFacePostprocessor::SetL2Normalize);
+
+ pybind11::class_(
+ m, "AdaFace")
+ .def(pybind11::init())
+ .def("predict", [](vision::faceid::AdaFace& self, pybind11::array& data) {
+ cv::Mat im = PyArrayToCvMat(data);
+ vision::FaceRecognitionResult result;
+ self.Predict(im, &result);
+ return result;
+ })
+ .def("batch_predict", [](vision::faceid::AdaFace& self, std::vector& data) {
+ std::vector images;
+ for (size_t i = 0; i < data.size(); ++i) {
+ images.push_back(PyArrayToCvMat(data[i]));
+ }
+ std::vector results;
+ self.BatchPredict(images, &results);
+ return results;
+ })
+ .def_property_readonly("preprocessor", &vision::faceid::AdaFace::GetPreprocessor)
+ .def_property_readonly("postprocessor", &vision::faceid::AdaFace::GetPostprocessor);
+}
+} // namespace fastdeploy
diff --git a/fastdeploy/vision/faceid/contrib/adaface/postprocessor.cc b/fastdeploy/vision/faceid/contrib/adaface/postprocessor.cc
new file mode 100755
index 000000000..0f2d81ce9
--- /dev/null
+++ b/fastdeploy/vision/faceid/contrib/adaface/postprocessor.cc
@@ -0,0 +1,67 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "fastdeploy/vision/faceid/contrib/adaface/postprocessor.h"
+#include "fastdeploy/vision/utils/utils.h"
+
+namespace fastdeploy {
+namespace vision {
+namespace faceid {
+
+AdaFacePostprocessor::AdaFacePostprocessor() {
+ l2_normalize_ = false;
+}
+
+bool AdaFacePostprocessor::Run(std::vector& infer_result,
+ std::vector* results) {
+ if (infer_result[0].dtype != FDDataType::FP32) {
+ FDERROR << "Only support post process with float32 data." << std::endl;
+ return false;
+ }
+ if(infer_result.size() != 1){
+ FDERROR << "The default number of output tensor "
+ "must be 1 according to insightface." << std::endl;
+ }
+ int batch = infer_result[0].shape[0];
+ results->resize(batch);
+ for (size_t bs = 0; bs < batch; ++bs) {
+ FDTensor& embedding_tensor = infer_result.at(bs);
+ FDASSERT((embedding_tensor.shape[0] == 1), "Only support batch = 1 now.");
+ if (embedding_tensor.dtype != FDDataType::FP32) {
+ FDERROR << "Only support post process with float32 data." << std::endl;
+ return false;
+ }
+ (*results)[bs].Clear();
+ (*results)[bs].Resize(embedding_tensor.Numel());
+
+ // Copy the raw embedding vector directly without L2 normalize
+ // post process. Let the user decide whether to normalize or not.
+ // Will call utils::L2Normlize() method to perform L2
+ // normalize if l2_normalize was set as 'true'.
+ std::memcpy((*results)[bs].embedding.data(),
+ embedding_tensor.Data(),
+ embedding_tensor.Nbytes());
+ if (l2_normalize_) {
+ auto norm_embedding = utils::L2Normalize((*results)[bs].embedding);
+ std::memcpy((*results)[bs].embedding.data(),
+ norm_embedding.data(),
+ embedding_tensor.Nbytes());
+ }
+ }
+ return true;
+}
+
+} // namespace detection
+} // namespace vision
+} // namespace fastdeploy
diff --git a/fastdeploy/vision/faceid/contrib/adaface/postprocessor.h b/fastdeploy/vision/faceid/contrib/adaface/postprocessor.h
new file mode 100755
index 000000000..8ad7708dc
--- /dev/null
+++ b/fastdeploy/vision/faceid/contrib/adaface/postprocessor.h
@@ -0,0 +1,50 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#pragma once
+#include "fastdeploy/vision/common/processors/transform.h"
+#include "fastdeploy/vision/common/result.h"
+
+namespace fastdeploy {
+namespace vision {
+
+namespace faceid {
+/*! @brief Postprocessor object for AdaFace serials model.
+ */
+class FASTDEPLOY_DECL AdaFacePostprocessor {
+ public:
+ /** \brief Create a postprocessor instance for AdaFace serials model
+ */
+ AdaFacePostprocessor();
+
+ /** \brief Process the result of runtime and fill to FaceRecognitionResult structure
+ *
+ * \param[in] tensors The inference result from runtime
+ * \param[in] result The output result of FaceRecognitionResult
+ * \return true if the postprocess successed, otherwise false
+ */
+ bool Run(std::vector& infer_result,
+ std::vector* results);
+
+ void SetL2Normalize(bool& l2_normalize) { l2_normalize_ = l2_normalize; }
+
+ bool GetL2Normalize() { return l2_normalize_; }
+
+ private:
+ bool l2_normalize_;
+};
+
+} // namespace faceid
+} // namespace vision
+} // namespace fastdeploy
diff --git a/fastdeploy/vision/faceid/contrib/adaface/preprocessor.cc b/fastdeploy/vision/faceid/contrib/adaface/preprocessor.cc
new file mode 100755
index 000000000..8e8f95950
--- /dev/null
+++ b/fastdeploy/vision/faceid/contrib/adaface/preprocessor.cc
@@ -0,0 +1,76 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "fastdeploy/vision/faceid/contrib/adaface/preprocessor.h"
+
+namespace fastdeploy {
+namespace vision {
+namespace faceid {
+
+AdaFacePreprocessor::AdaFacePreprocessor() {
+ // parameters for preprocess
+ size_ = {112, 112};
+ alpha_ = {1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f};
+ beta_ = {-1.f, -1.f, -1.f}; // RGB
+ permute_ = true;
+}
+
+bool AdaFacePreprocessor::Preprocess(FDMat * mat, FDTensor* output) {
+
+ // face recognition model's preprocess steps in insightface
+ // reference: insightface/recognition/arcface_torch/inference.py
+ // 1. Resize
+ // 2. BGR2RGB
+ // 3. Convert(opencv style) or Normalize
+ // 4. HWC2CHW
+ int resize_w = size_[0];
+ int resize_h = size_[1];
+ if (resize_h != mat->Height() || resize_w != mat->Width()) {
+ Resize::Run(mat, resize_w, resize_h);
+ }
+ if (permute_) {
+ BGR2RGB::Run(mat);
+ }
+
+ Convert::Run(mat, alpha_, beta_);
+ HWC2CHW::Run(mat);
+ Cast::Run(mat, "float");
+
+ mat->ShareWithTensor(output);
+ output->ExpandDim(0); // reshape to n, h, w, c
+ return true;
+}
+
+bool AdaFacePreprocessor::Run(std::vector* images,
+ std::vector* outputs) {
+ if (images->empty()) {
+ FDERROR << "The size of input images should be greater than 0." << std::endl;
+ return false;
+ }
+ FDASSERT(images->size() == 1, "Only support batch = 1 now.");
+ outputs->resize(1);
+ // Concat all the preprocessed data to a batch tensor
+ std::vector tensors(images->size());
+ for (size_t i = 0; i < images->size(); ++i) {
+ if (!Preprocess(&(*images)[i], &tensors[i])) {
+ FDERROR << "Failed to preprocess input image." << std::endl;
+ return false;
+ }
+ }
+ (*outputs)[0] = std::move(tensors[0]);
+ return true;
+}
+} // namespace faceid
+} // namespace vision
+} // namespace fastdeploy
diff --git a/fastdeploy/vision/faceid/contrib/adaface/preprocessor.h b/fastdeploy/vision/faceid/contrib/adaface/preprocessor.h
new file mode 100755
index 000000000..008df3d10
--- /dev/null
+++ b/fastdeploy/vision/faceid/contrib/adaface/preprocessor.h
@@ -0,0 +1,80 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#pragma once
+#include "fastdeploy/vision/common/processors/transform.h"
+#include "fastdeploy/vision/common/result.h"
+
+namespace fastdeploy {
+namespace vision {
+
+namespace faceid {
+/*! @brief Preprocessor object for AdaFace serials model.
+ */
+class FASTDEPLOY_DECL AdaFacePreprocessor {
+ public:
+ /** \brief Create a preprocessor instance for AdaFace serials model
+ */
+ AdaFacePreprocessor();
+
+ /** \brief Process the input image and prepare input tensors for runtime
+ *
+ * \param[in] images The input image data list, all the elements are returned by cv::imread()
+ * \param[in] outputs The output tensors which will feed in runtime
+ * \return true if the preprocess successed, otherwise false
+ */
+ bool Run(std::vector* images, std::vector* outputs);
+
+ /// Get Size
+ std::vector GetSize() { return size_; }
+
+ /// Set size.
+ void SetSize(std::vector& size) { size_ = size; }
+
+ /// Get alpha
+ std::vector GetAlpha() { return alpha_; }
+
+ /// Set alpha.
+ void SetAlpha(std::vector& alpha) { alpha_ = alpha; }
+
+ /// Get beta
+ std::vector GetBeta() { return beta_; }
+
+ /// Set beta.
+ void SetBeta(std::vector& beta) { beta_ = beta; }
+
+ bool GetPermute() { return permute_; }
+
+ /// Set permute.
+ void SetPermute(bool permute) { permute_ = permute; }
+
+ protected:
+ bool Preprocess(FDMat* mat, FDTensor* output);
+ // Argument for image preprocessing step, tuple of (width, height),
+ // decide the target size after resize, default (112, 112)
+ std::vector size_;
+ // Argument for image preprocessing step, alpha values for normalization,
+ // default alpha = {1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f};
+ std::vector alpha_;
+ // Argument for image preprocessing step, beta values for normalization,
+ // default beta = {-1.f, -1.f, -1.f}
+ std::vector beta_;
+ // Argument for image preprocessing step, whether to swap the B and R channel,
+ // such as BGR->RGB, default true.
+ bool permute_;
+};
+
+} // namespace faceid
+} // namespace vision
+} // namespace fastdeploy
diff --git a/fastdeploy/vision/faceid/contrib/adaface_pybind.cc b/fastdeploy/vision/faceid/contrib/adaface_pybind.cc
deleted file mode 100644
index 1333bb6e3..000000000
--- a/fastdeploy/vision/faceid/contrib/adaface_pybind.cc
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "fastdeploy/pybind/main.h"
-
-namespace fastdeploy {
-void BindAdaFace(pybind11::module& m) {
- // Bind AdaFace
- pybind11::class_(m, "AdaFace")
- .def(pybind11::init())
- .def("predict",
- [](vision::faceid::AdaFace& self, pybind11::array& data) {
- auto mat = PyArrayToCvMat(data);
- vision::FaceRecognitionResult res;
- self.Predict(&mat, &res);
- return res;
- })
- .def_readwrite("size", &vision::faceid::AdaFace::size)
- .def_readwrite("alpha", &vision::faceid::AdaFace::alpha)
- .def_readwrite("beta", &vision::faceid::AdaFace::beta)
- .def_readwrite("swap_rb", &vision::faceid::AdaFace::swap_rb)
- .def_readwrite("l2_normalize", &vision::faceid::AdaFace::l2_normalize);
-}
-
-} // namespace fastdeploy
diff --git a/fastdeploy/vision/faceid/contrib/arcface.cc b/fastdeploy/vision/faceid/contrib/arcface.cc
deleted file mode 100644
index 5f6cb834d..000000000
--- a/fastdeploy/vision/faceid/contrib/arcface.cc
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "fastdeploy/vision/faceid/contrib/arcface.h"
-#include "fastdeploy/utils/perf.h"
-#include "fastdeploy/vision/utils/utils.h"
-
-namespace fastdeploy {
-
-namespace vision {
-
-namespace faceid {
-
-ArcFace::ArcFace(const std::string& model_file, const std::string& params_file,
- const RuntimeOption& custom_option,
- const ModelFormat& model_format)
- : InsightFaceRecognitionModel(model_file, params_file, custom_option,
- model_format) {
- initialized = Initialize();
-}
-
-bool ArcFace::Initialize() {
-
- // (1) if parent class initialed backend
- if (initialized) {
- // (1.1) re-init parameters for specific sub-classes
- size = {112, 112};
- alpha = {1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f};
- beta = {-1.f, -1.f, -1.f}; // RGB
- swap_rb = true;
- l2_normalize = false;
- return true;
- }
- // (2) if parent class not initialed backend
- if (!InsightFaceRecognitionModel::Initialize()) {
- FDERROR << "Failed to initialize fastdeploy backend." << std::endl;
- return false;
- }
- // (2.1) re-init parameters for specific sub-classes
- size = {112, 112};
- alpha = {1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f};
- beta = {-1.f, -1.f, -1.f}; // RGB
- swap_rb = true;
- l2_normalize = false;
- return true;
-}
-
-bool ArcFace::Preprocess(Mat* mat, FDTensor* output) {
- return InsightFaceRecognitionModel::Preprocess(mat, output);
-}
-
-bool ArcFace::Postprocess(std::vector& infer_result,
- FaceRecognitionResult* result) {
- return InsightFaceRecognitionModel::Postprocess(infer_result, result);
-}
-
-bool ArcFace::Predict(cv::Mat* im, FaceRecognitionResult* result) {
- return InsightFaceRecognitionModel::Predict(im, result);
-}
-
-} // namespace faceid
-} // namespace vision
-} // namespace fastdeploy
\ No newline at end of file
diff --git a/fastdeploy/vision/faceid/contrib/arcface.h b/fastdeploy/vision/faceid/contrib/arcface.h
deleted file mode 100644
index 05478fc66..000000000
--- a/fastdeploy/vision/faceid/contrib/arcface.h
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#pragma once
-#include "fastdeploy/fastdeploy_model.h"
-#include "fastdeploy/vision/common/processors/transform.h"
-#include "fastdeploy/vision/common/result.h"
-#include "fastdeploy/vision/faceid/contrib/insightface_rec.h"
-
-namespace fastdeploy {
-
-namespace vision {
-
-namespace faceid {
-/*! @brief ArcFace model object used when to load a ArcFace model exported by IngsightFace.
- */
-class FASTDEPLOY_DECL ArcFace : public InsightFaceRecognitionModel {
- public:
- /** \brief Set path of model file and the configuration of runtime.
- *
- * \param[in] model_file Path of model file, e.g ./arcface.onnx
- * \param[in] params_file Path of parameter file, e.g ppyoloe/model.pdiparams, if the model format is ONNX, this parameter will be ignored
- * \param[in] custom_option RuntimeOption for inference, the default will use cpu, and choose the backend defined in "valid_cpu_backends"
- * \param[in] model_format Model format of the loaded model, default is ONNX format
- */
- ArcFace(const std::string& model_file, const std::string& params_file = "",
- const RuntimeOption& custom_option = RuntimeOption(),
- const ModelFormat& model_format = ModelFormat::ONNX);
-
- std::string ModelName() const override {
- return "deepinsight/insightface/recognition/arcface_pytorch";
- }
- /** \brief Predict the face recognition result for an input image
- *
- * \param[in] im The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
- * \param[in] result The output face recognition result will be writen to this structure
- * \return true if the prediction successed, otherwise false
- */
- bool Predict(cv::Mat* im, FaceRecognitionResult* result) override;
-
- private:
- bool Initialize() override;
-
- bool Preprocess(Mat* mat, FDTensor* output) override;
-
- bool Postprocess(std::vector& infer_result,
- FaceRecognitionResult* result) override;
-};
-
-} // namespace faceid
-} // namespace vision
-} // namespace fastdeploy
diff --git a/fastdeploy/vision/faceid/contrib/arcface_pybind.cc b/fastdeploy/vision/faceid/contrib/arcface_pybind.cc
deleted file mode 100644
index 0d508f444..000000000
--- a/fastdeploy/vision/faceid/contrib/arcface_pybind.cc
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "fastdeploy/pybind/main.h"
-
-namespace fastdeploy {
-void BindArcFace(pybind11::module& m) {
- // Bind ArcFace
- pybind11::class_(m, "ArcFace")
- .def(pybind11::init())
- .def("predict",
- [](vision::faceid::ArcFace& self, pybind11::array& data) {
- auto mat = PyArrayToCvMat(data);
- vision::FaceRecognitionResult res;
- self.Predict(&mat, &res);
- return res;
- })
- .def_readwrite("size", &vision::faceid::ArcFace::size)
- .def_readwrite("alpha", &vision::faceid::ArcFace::alpha)
- .def_readwrite("beta", &vision::faceid::ArcFace::beta)
- .def_readwrite("swap_rb", &vision::faceid::ArcFace::swap_rb)
- .def_readwrite("l2_normalize", &vision::faceid::ArcFace::l2_normalize);
-}
-
-} // namespace fastdeploy
diff --git a/fastdeploy/vision/faceid/contrib/cosface.cc b/fastdeploy/vision/faceid/contrib/cosface.cc
deleted file mode 100644
index 5dffcc02a..000000000
--- a/fastdeploy/vision/faceid/contrib/cosface.cc
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "fastdeploy/vision/faceid/contrib/cosface.h"
-#include "fastdeploy/utils/perf.h"
-#include "fastdeploy/vision/utils/utils.h"
-
-namespace fastdeploy {
-
-namespace vision {
-
-namespace faceid {
-
-CosFace::CosFace(const std::string& model_file, const std::string& params_file,
- const RuntimeOption& custom_option,
- const ModelFormat& model_format)
- : InsightFaceRecognitionModel(model_file, params_file, custom_option,
- model_format) {
- initialized = Initialize();
-}
-
-bool CosFace::Initialize() {
-
- if (initialized) {
- // (1.1) re-init parameters for specific sub-classes
- size = {112, 112};
- alpha = {1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f};
- beta = {-1.f, -1.f, -1.f}; // RGB
- swap_rb = true;
- l2_normalize = false;
- return true;
- }
- if (!InsightFaceRecognitionModel::Initialize()) {
- FDERROR << "Failed to initialize fastdeploy backend." << std::endl;
- return false;
- }
- // (2.1) re-init parameters for specific sub-classes
- size = {112, 112};
- alpha = {1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f};
- beta = {-1.f, -1.f, -1.f}; // RGB
- swap_rb = true;
- l2_normalize = false;
- return true;
-}
-
-bool CosFace::Preprocess(Mat* mat, FDTensor* output) {
- return InsightFaceRecognitionModel::Preprocess(mat, output);
-}
-
-bool CosFace::Postprocess(std::vector& infer_result,
- FaceRecognitionResult* result) {
- return InsightFaceRecognitionModel::Postprocess(infer_result, result);
-}
-
-bool CosFace::Predict(cv::Mat* im, FaceRecognitionResult* result) {
- return InsightFaceRecognitionModel::Predict(im, result);
-}
-
-} // namespace faceid
-} // namespace vision
-} // namespace fastdeploy
\ No newline at end of file
diff --git a/fastdeploy/vision/faceid/contrib/cosface.h b/fastdeploy/vision/faceid/contrib/cosface.h
deleted file mode 100644
index dbf2e4ac6..000000000
--- a/fastdeploy/vision/faceid/contrib/cosface.h
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#pragma once
-#include "fastdeploy/fastdeploy_model.h"
-#include "fastdeploy/vision/common/processors/transform.h"
-#include "fastdeploy/vision/common/result.h"
-#include "fastdeploy/vision/faceid/contrib/insightface_rec.h"
-
-namespace fastdeploy {
-
-namespace vision {
-
-namespace faceid {
-/*! @brief CosFace model object used when to load a CosFace model exported by IngsightFace.
- */
-class FASTDEPLOY_DECL CosFace : public InsightFaceRecognitionModel {
- public:
- /** \brief Set path of model file and the configuration of runtime.
- *
- * \param[in] model_file Path of model file, e.g ./cosface.onnx
- * \param[in] params_file Path of parameter file, e.g ppyoloe/model.pdiparams, if the model format is ONNX, this parameter will be ignored
- * \param[in] custom_option RuntimeOption for inference, the default will use cpu, and choose the backend defined in "valid_cpu_backends"
- * \param[in] model_format Model format of the loaded model, default is ONNX format
- */
- CosFace(const std::string& model_file, const std::string& params_file = "",
- const RuntimeOption& custom_option = RuntimeOption(),
- const ModelFormat& model_format = ModelFormat::ONNX);
-
- std::string ModelName() const override {
- return "deepinsight/insightface/recognition/arcface_pytorch";
- }
- /** \brief Predict the face recognition result for an input image
- *
- * \param[in] im The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
- * \param[in] result The output face recognition result will be writen to this structure
- * \return true if the prediction successed, otherwise false
- */
- bool Predict(cv::Mat* im, FaceRecognitionResult* result) override;
-
- private:
- bool Initialize() override;
-
- bool Preprocess(Mat* mat, FDTensor* output) override;
-
- bool Postprocess(std::vector& infer_result,
- FaceRecognitionResult* result) override;
-};
-
-} // namespace faceid
-} // namespace vision
-} // namespace fastdeploy
diff --git a/fastdeploy/vision/faceid/contrib/cosface_pybind.cc b/fastdeploy/vision/faceid/contrib/cosface_pybind.cc
deleted file mode 100644
index fe27a95b7..000000000
--- a/fastdeploy/vision/faceid/contrib/cosface_pybind.cc
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "fastdeploy/pybind/main.h"
-
-namespace fastdeploy {
-void BindCosFace(pybind11::module& m) {
- // Bind CosFace
- pybind11::class_(m, "CosFace")
- .def(pybind11::init())
- .def("predict",
- [](vision::faceid::CosFace& self, pybind11::array& data) {
- auto mat = PyArrayToCvMat(data);
- vision::FaceRecognitionResult res;
- self.Predict(&mat, &res);
- return res;
- })
- .def_readwrite("size", &vision::faceid::CosFace::size)
- .def_readwrite("alpha", &vision::faceid::CosFace::alpha)
- .def_readwrite("beta", &vision::faceid::CosFace::beta)
- .def_readwrite("swap_rb", &vision::faceid::CosFace::swap_rb)
- .def_readwrite("l2_normalize", &vision::faceid::CosFace::l2_normalize);
-}
-
-} // namespace fastdeploy
diff --git a/fastdeploy/vision/faceid/contrib/insightface/base.cc b/fastdeploy/vision/faceid/contrib/insightface/base.cc
new file mode 100755
index 000000000..35d8b8086
--- /dev/null
+++ b/fastdeploy/vision/faceid/contrib/insightface/base.cc
@@ -0,0 +1,82 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "fastdeploy/vision/faceid/contrib/insightface/base.h"
+
+namespace fastdeploy {
+namespace vision {
+namespace faceid {
+
+InsightFaceRecognitionBase::InsightFaceRecognitionBase(
+ const std::string& model_file, const std::string& params_file,
+ const fastdeploy::RuntimeOption& custom_option,
+ const fastdeploy::ModelFormat& model_format) {
+
+ if (model_format == ModelFormat::ONNX) {
+ valid_cpu_backends = {Backend::ORT};
+ valid_gpu_backends = {Backend::ORT, Backend::TRT};
+ } else {
+ valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::LITE};
+ valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
+ valid_kunlunxin_backends = {Backend::LITE};
+ }
+ runtime_option = custom_option;
+ runtime_option.model_format = model_format;
+ runtime_option.model_file = model_file;
+ runtime_option.params_file = params_file;
+}
+
+bool InsightFaceRecognitionBase::Initialize() {
+ if (!InitRuntime()) {
+ FDERROR << "Failed to initialize fastdeploy backend." << std::endl;
+ return false;
+ }
+ return true;
+}
+
+bool InsightFaceRecognitionBase::Predict(const cv::Mat& im,
+ FaceRecognitionResult* result) {
+ std::vector results;
+ if (!BatchPredict({im}, &results)) {
+ return false;
+ }
+ *result = std::move(results[0]);
+ return true;
+}
+
+bool InsightFaceRecognitionBase::BatchPredict(const std::vector& images,
+ std::vector* results){
+ std::vector fd_images = WrapMat(images);
+ FDASSERT(images.size() == 1, "Only support batch = 1 now.");
+ if (!preprocessor_.Run(&fd_images, &reused_input_tensors_)) {
+ FDERROR << "Failed to preprocess the input image." << std::endl;
+ return false;
+ }
+
+ reused_input_tensors_[0].name = InputInfoOfRuntime(0).name;
+ if (!Infer(reused_input_tensors_, &reused_output_tensors_)) {
+ FDERROR << "Failed to inference by runtime." << std::endl;
+ return false;
+ }
+
+ if (!postprocessor_.Run(reused_output_tensors_, results)){
+ FDERROR << "Failed to postprocess the inference results by runtime." << std::endl;
+ return false;
+ }
+ return true;
+}
+
+} // namespace faceid
+} // namespace vision
+} // namespace fastdeploy
diff --git a/fastdeploy/vision/faceid/contrib/insightface/base.h b/fastdeploy/vision/faceid/contrib/insightface/base.h
new file mode 100755
index 000000000..571a5bd4c
--- /dev/null
+++ b/fastdeploy/vision/faceid/contrib/insightface/base.h
@@ -0,0 +1,77 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. //NOLINT
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#pragma once
+
+#include "fastdeploy/fastdeploy_model.h"
+#include "fastdeploy/vision/faceid/contrib/insightface/postprocessor.h"
+#include "fastdeploy/vision/faceid/contrib/insightface/preprocessor.h"
+
+namespace fastdeploy {
+namespace vision {
+namespace faceid {
+/*! @brief InsightFaceRecognition model object used when to load a InsightFaceRecognition model exported by InsightFaceRecognition.
+ */
+class FASTDEPLOY_DECL InsightFaceRecognitionBase : public FastDeployModel {
+ public:
+ /** \brief Set path of model file and the configuration of runtime.
+ *
+ * \param[in] model_file Path of model file, e.g ./arcface.onnx
+ * \param[in] params_file Path of parameter file, e.g arcface/model.pdiparams, if the model format is ONNX, this parameter will be ignored
+ * \param[in] custom_option RuntimeOption for inference, the default will use cpu, and choose the backend defined in "valid_cpu_backends"
+ * \param[in] model_format Model format of the loaded model, default is ONNX format
+ */
+ InsightFaceRecognitionBase(
+ const std::string& model_file, const std::string& params_file = "",
+ const RuntimeOption& custom_option = RuntimeOption(),
+ const ModelFormat& model_format = ModelFormat::ONNX);
+
+ std::string ModelName() const { return "insightface_rec"; }
+
+ /** \brief Predict the detection result for an input image
+ *
+ * \param[in] img The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
+ * \param[in] result The output FaceRecognitionResult will be writen to this structure
+ * \return true if the prediction successed, otherwise false
+ */
+ virtual bool Predict(const cv::Mat& im, FaceRecognitionResult* result);
+
+ /** \brief Predict the detection results for a batch of input images
+ *
+ * \param[in] imgs, The input image list, each element comes from cv::imread()
+ * \param[in] results The output FaceRecognitionResult list
+ * \return true if the prediction successed, otherwise false
+ */
+ virtual bool BatchPredict(const std::vector& images,
+ std::vector* results);
+
+ /// Get preprocessor reference of InsightFaceRecognition
+ virtual InsightFaceRecognitionPreprocessor& GetPreprocessor() {
+ return preprocessor_;
+ }
+
+ /// Get postprocessor reference of InsightFaceRecognition
+ virtual InsightFaceRecognitionPostprocessor& GetPostprocessor() {
+ return postprocessor_;
+ }
+
+ protected:
+ bool Initialize();
+ InsightFaceRecognitionPreprocessor preprocessor_;
+ InsightFaceRecognitionPostprocessor postprocessor_;
+};
+
+} // namespace faceid
+} // namespace vision
+} // namespace fastdeploy
diff --git a/fastdeploy/vision/faceid/contrib/insightface/insightface_pybind.cc b/fastdeploy/vision/faceid/contrib/insightface/insightface_pybind.cc
new file mode 100644
index 000000000..b193d9fb7
--- /dev/null
+++ b/fastdeploy/vision/faceid/contrib/insightface/insightface_pybind.cc
@@ -0,0 +1,101 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "fastdeploy/pybind/main.h"
+
+namespace fastdeploy {
+void BindInsightFace(pybind11::module& m) {
+ pybind11::class_(
+ m, "InsightFaceRecognitionPreprocessor")
+ .def(pybind11::init())
+ .def("run", [](vision::faceid::InsightFaceRecognitionPreprocessor& self,
+ std::vector& im_list) {
+ std::vector images;
+ for (size_t i = 0; i < im_list.size(); ++i) {
+ images.push_back(vision::WrapMat(PyArrayToCvMat(im_list[i])));
+ }
+ std::vector outputs;
+ if (!self.Run(&images, &outputs)) {
+ throw std::runtime_error("Failed to preprocess the input data in InsightFaceRecognitionPreprocessor.");
+ }
+ for (size_t i = 0; i < outputs.size(); ++i) {
+ outputs[i].StopSharing();
+ }
+ return outputs;
+ })
+ .def_property("permute", &vision::faceid::InsightFaceRecognitionPreprocessor::GetPermute,
+ &vision::faceid::InsightFaceRecognitionPreprocessor::SetPermute)
+ .def_property("alpha", &vision::faceid::InsightFaceRecognitionPreprocessor::GetAlpha,
+ &vision::faceid::InsightFaceRecognitionPreprocessor::SetAlpha)
+ .def_property("beta", &vision::faceid::InsightFaceRecognitionPreprocessor::GetBeta,
+ &vision::faceid::InsightFaceRecognitionPreprocessor::SetBeta)
+ .def_property("size", &vision::faceid::InsightFaceRecognitionPreprocessor::GetSize,
+ &vision::faceid::InsightFaceRecognitionPreprocessor::SetSize);
+
+ pybind11::class_(
+ m, "InsightFaceRecognitionPostprocessor")
+ .def(pybind11::init())
+ .def("run", [](vision::faceid::InsightFaceRecognitionPostprocessor& self, std::vector& inputs) {
+ std::vector results;
+ if (!self.Run(inputs, &results)) {
+ throw std::runtime_error("Failed to postprocess the runtime result in InsightFaceRecognitionPostprocessor.");
+ }
+ return results;
+ })
+ .def("run", [](vision::faceid::InsightFaceRecognitionPostprocessor& self, std::vector& input_array) {
+ std::vector results;
+ std::vector inputs;
+ PyArrayToTensorList(input_array, &inputs, /*share_buffer=*/true);
+ if (!self.Run(inputs, &results)) {
+ throw std::runtime_error("Failed to postprocess the runtime result in InsightFaceRecognitionPostprocessor.");
+ }
+ return results;
+ })
+ .def_property("l2_normalize", &vision::faceid::InsightFaceRecognitionPostprocessor::GetL2Normalize,
+ &vision::faceid::InsightFaceRecognitionPostprocessor::SetL2Normalize);
+
+ pybind11::class_(
+ m, "InsightFaceRecognitionBase")
+ .def(pybind11::init())
+ .def("predict", [](vision::faceid::InsightFaceRecognitionBase& self, pybind11::array& data) {
+ cv::Mat im = PyArrayToCvMat(data);
+ vision::FaceRecognitionResult result;
+ self.Predict(im, &result);
+ return result;
+ })
+ .def("batch_predict", [](vision::faceid::InsightFaceRecognitionBase& self, std::vector& data) {
+ std::vector images;
+ for (size_t i = 0; i < data.size(); ++i) {
+ images.push_back(PyArrayToCvMat(data[i]));
+ }
+ std::vector results;
+ self.BatchPredict(images, &results);
+ return results;
+ })
+ .def_property_readonly("preprocessor", &vision::faceid::InsightFaceRecognitionBase::GetPreprocessor)
+ .def_property_readonly("postprocessor", &vision::faceid::InsightFaceRecognitionBase::GetPostprocessor);
+
+ pybind11::class_(m, "ArcFace")
+ .def(pybind11::init());
+
+ pybind11::class_(m, "CosFace")
+ .def(pybind11::init());
+
+ pybind11::class_(m, "PartialFC")
+ .def(pybind11::init());
+
+ pybind11::class_(m, "VPL")
+ .def(pybind11::init());
+}
+} // namespace fastdeploy
diff --git a/fastdeploy/vision/faceid/contrib/insightface/model.h b/fastdeploy/vision/faceid/contrib/insightface/model.h
new file mode 100755
index 000000000..a1a8f128b
--- /dev/null
+++ b/fastdeploy/vision/faceid/contrib/insightface/model.h
@@ -0,0 +1,133 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. //NOLINT
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#pragma once
+#include "fastdeploy/vision/faceid/contrib/insightface/base.h"
+
+namespace fastdeploy {
+namespace vision {
+namespace faceid {
+class FASTDEPLOY_DECL ArcFace : public InsightFaceRecognitionBase {
+ public:
+ /** \brief Set path of model file and configuration file, and the configuration of runtime
+ *
+ * \param[in] model_file Path of model file, e.g ArcFace/model.pdmodel
+ * \param[in] params_file Path of parameter file, e.g ArcFace/model.pdiparams, if the model format is ONNX, this parameter will be ignored
+ * \param[in] custom_option RuntimeOption for inference, the default will use cpu, and choose the backend defined in `valid_cpu_backends`
+ * \param[in] model_format Model format of the loaded model, default is Paddle format
+ */
+ ArcFace(const std::string& model_file, const std::string& params_file = "",
+ const RuntimeOption& custom_option = RuntimeOption(),
+ const ModelFormat& model_format = ModelFormat::ONNX)
+ : InsightFaceRecognitionBase(model_file, params_file, custom_option,
+ model_format) {
+ if (model_format == ModelFormat::ONNX) {
+ valid_cpu_backends = {Backend::ORT};
+ valid_gpu_backends = {Backend::ORT, Backend::TRT};
+ } else {
+ valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::LITE};
+ valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
+ valid_kunlunxin_backends = {Backend::LITE};
+ }
+ initialized = Initialize();
+ }
+
+ virtual std::string ModelName() const { return "ArcFace"; }
+};
+
+class FASTDEPLOY_DECL CosFace : public InsightFaceRecognitionBase {
+ public:
+ /** \brief Set path of model file and configuration file, and the configuration of runtime
+ *
+ * \param[in] model_file Path of model file, e.g CosFace/model.pdmodel
+ * \param[in] params_file Path of parameter file, e.g CosFace/model.pdiparams, if the model format is ONNX, this parameter will be ignored
+ * \param[in] custom_option RuntimeOption for inference, the default will use cpu, and choose the backend defined in `valid_cpu_backends`
+ * \param[in] model_format Model format of the loaded model, default is Paddle format
+ */
+ CosFace(const std::string& model_file, const std::string& params_file = "",
+ const RuntimeOption& custom_option = RuntimeOption(),
+ const ModelFormat& model_format = ModelFormat::ONNX)
+ : InsightFaceRecognitionBase(model_file, params_file, custom_option,
+ model_format) {
+ if (model_format == ModelFormat::ONNX) {
+ valid_cpu_backends = {Backend::ORT};
+ valid_gpu_backends = {Backend::ORT, Backend::TRT};
+ } else {
+ valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::LITE};
+ valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
+ valid_kunlunxin_backends = {Backend::LITE};
+ }
+ initialized = Initialize();
+ }
+
+ virtual std::string ModelName() const { return "CosFace"; }
+};
+class FASTDEPLOY_DECL PartialFC : public InsightFaceRecognitionBase {
+ public:
+ /** \brief Set path of model file and configuration file, and the configuration of runtime
+ *
+ * \param[in] model_file Path of model file, e.g PartialFC/model.pdmodel
+ * \param[in] params_file Path of parameter file, e.g PartialFC/model.pdiparams, if the model format is ONNX, this parameter will be ignored
+ * \param[in] custom_option RuntimeOption for inference, the default will use cpu, and choose the backend defined in `valid_cpu_backends`
+ * \param[in] model_format Model format of the loaded model, default is Paddle format
+ */
+ PartialFC(const std::string& model_file, const std::string& params_file = "",
+ const RuntimeOption& custom_option = RuntimeOption(),
+ const ModelFormat& model_format = ModelFormat::ONNX)
+ : InsightFaceRecognitionBase(model_file, params_file, custom_option,
+ model_format) {
+ if (model_format == ModelFormat::ONNX) {
+ valid_cpu_backends = {Backend::ORT};
+ valid_gpu_backends = {Backend::ORT, Backend::TRT};
+ } else {
+ valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::LITE};
+ valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
+ valid_kunlunxin_backends = {Backend::LITE};
+ }
+ initialized = Initialize();
+ }
+
+ virtual std::string ModelName() const { return "PartialFC"; }
+};
+class FASTDEPLOY_DECL VPL : public InsightFaceRecognitionBase {
+ public:
+ /** \brief Set path of model file and configuration file, and the configuration of runtime
+ *
+ * \param[in] model_file Path of model file, e.g VPL/model.pdmodel
+ * \param[in] params_file Path of parameter file, e.g VPL/model.pdiparams, if the model format is ONNX, this parameter will be ignored
+ * \param[in] custom_option RuntimeOption for inference, the default will use cpu, and choose the backend defined in `valid_cpu_backends`
+ * \param[in] model_format Model format of the loaded model, default is Paddle format
+ */
+ VPL(const std::string& model_file, const std::string& params_file = "",
+ const RuntimeOption& custom_option = RuntimeOption(),
+ const ModelFormat& model_format = ModelFormat::ONNX)
+ : InsightFaceRecognitionBase(model_file, params_file, custom_option,
+ model_format) {
+ if (model_format == ModelFormat::ONNX) {
+ valid_cpu_backends = {Backend::ORT};
+ valid_gpu_backends = {Backend::ORT, Backend::TRT};
+ } else {
+ valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::LITE};
+ valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
+ valid_kunlunxin_backends = {Backend::LITE};
+ }
+ initialized = Initialize();
+ }
+
+ virtual std::string ModelName() const { return "VPL"; }
+};
+
+} // namespace faceid
+} // namespace vision
+} // namespace fastdeploy
diff --git a/fastdeploy/vision/faceid/contrib/insightface/postprocessor.cc b/fastdeploy/vision/faceid/contrib/insightface/postprocessor.cc
new file mode 100755
index 000000000..ca30719f6
--- /dev/null
+++ b/fastdeploy/vision/faceid/contrib/insightface/postprocessor.cc
@@ -0,0 +1,67 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "fastdeploy/vision/faceid/contrib/insightface/postprocessor.h"
+#include "fastdeploy/vision/utils/utils.h"
+
+namespace fastdeploy {
+namespace vision {
+namespace faceid {
+
+InsightFaceRecognitionPostprocessor::InsightFaceRecognitionPostprocessor() {
+ l2_normalize_ = false;
+}
+
+bool InsightFaceRecognitionPostprocessor::Run(std::vector& infer_result,
+ std::vector* results) {
+ if (infer_result[0].dtype != FDDataType::FP32) {
+ FDERROR << "Only support post process with float32 data." << std::endl;
+ return false;
+ }
+ if(infer_result.size() != 1){
+ FDERROR << "The default number of output tensor "
+ "must be 1 according to insightface." << std::endl;
+ }
+ int batch = infer_result[0].shape[0];
+ results->resize(batch);
+ for (size_t bs = 0; bs < batch; ++bs) {
+ FDTensor& embedding_tensor = infer_result.at(bs);
+ FDASSERT((embedding_tensor.shape[0] == 1), "Only support batch = 1 now.");
+ if (embedding_tensor.dtype != FDDataType::FP32) {
+ FDERROR << "Only support post process with float32 data." << std::endl;
+ return false;
+ }
+ (*results)[bs].Clear();
+ (*results)[bs].Resize(embedding_tensor.Numel());
+
+ // Copy the raw embedding vector directly without L2 normalize
+ // post process. Let the user decide whether to normalize or not.
+ // Will call utils::L2Normlize() method to perform L2
+ // normalize if l2_normalize was set as 'true'.
+ std::memcpy((*results)[bs].embedding.data(),
+ embedding_tensor.Data(),
+ embedding_tensor.Nbytes());
+ if (l2_normalize_) {
+ auto norm_embedding = utils::L2Normalize((*results)[bs].embedding);
+ std::memcpy((*results)[bs].embedding.data(),
+ norm_embedding.data(),
+ embedding_tensor.Nbytes());
+ }
+ }
+ return true;
+}
+
+} // namespace detection
+} // namespace vision
+} // namespace fastdeploy
diff --git a/fastdeploy/vision/faceid/contrib/insightface/postprocessor.h b/fastdeploy/vision/faceid/contrib/insightface/postprocessor.h
new file mode 100755
index 000000000..90c90980d
--- /dev/null
+++ b/fastdeploy/vision/faceid/contrib/insightface/postprocessor.h
@@ -0,0 +1,50 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#pragma once
+#include "fastdeploy/vision/common/processors/transform.h"
+#include "fastdeploy/vision/common/result.h"
+
+namespace fastdeploy {
+namespace vision {
+
+namespace faceid {
+/*! @brief Postprocessor object for InsightFaceRecognition serials model.
+ */
+class FASTDEPLOY_DECL InsightFaceRecognitionPostprocessor {
+ public:
+ /** \brief Create a postprocessor instance for InsightFaceRecognition serials model
+ */
+ InsightFaceRecognitionPostprocessor();
+
+ /** \brief Process the result of runtime and fill to FaceRecognitionResult structure
+ *
+ * \param[in] tensors The inference result from runtime
+ * \param[in] result The output result of FaceRecognitionResult
+ * \return true if the postprocess successed, otherwise false
+ */
+ bool Run(std::vector& infer_result,
+ std::vector* results);
+
+ void SetL2Normalize(bool& l2_normalize) { l2_normalize_ = l2_normalize; }
+
+ bool GetL2Normalize() { return l2_normalize_; }
+
+ private:
+ bool l2_normalize_;
+};
+
+} // namespace faceid
+} // namespace vision
+} // namespace fastdeploy
diff --git a/fastdeploy/vision/faceid/contrib/insightface/preprocessor.cc b/fastdeploy/vision/faceid/contrib/insightface/preprocessor.cc
new file mode 100755
index 000000000..c846522cc
--- /dev/null
+++ b/fastdeploy/vision/faceid/contrib/insightface/preprocessor.cc
@@ -0,0 +1,76 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "fastdeploy/vision/faceid/contrib/insightface/preprocessor.h"
+
+namespace fastdeploy {
+namespace vision {
+namespace faceid {
+
+InsightFaceRecognitionPreprocessor::InsightFaceRecognitionPreprocessor() {
+ // parameters for preprocess
+ size_ = {112, 112};
+ alpha_ = {1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f};
+ beta_ = {-1.f, -1.f, -1.f}; // RGB
+ permute_ = true;
+}
+
+bool InsightFaceRecognitionPreprocessor::Preprocess(FDMat * mat, FDTensor* output) {
+
+ // face recognition model's preprocess steps in insightface
+ // reference: insightface/recognition/arcface_torch/inference.py
+ // 1. Resize
+ // 2. BGR2RGB
+ // 3. Convert(opencv style) or Normalize
+ // 4. HWC2CHW
+ int resize_w = size_[0];
+ int resize_h = size_[1];
+ if (resize_h != mat->Height() || resize_w != mat->Width()) {
+ Resize::Run(mat, resize_w, resize_h);
+ }
+ if (permute_) {
+ BGR2RGB::Run(mat);
+ }
+
+ Convert::Run(mat, alpha_, beta_);
+ HWC2CHW::Run(mat);
+ Cast::Run(mat, "float");
+
+ mat->ShareWithTensor(output);
+ output->ExpandDim(0); // reshape to n, h, w, c
+ return true;
+}
+
+bool InsightFaceRecognitionPreprocessor::Run(std::vector* images,
+ std::vector* outputs) {
+ if (images->empty()) {
+ FDERROR << "The size of input images should be greater than 0." << std::endl;
+ return false;
+ }
+ FDASSERT(images->size() == 1, "Only support batch = 1 now.");
+ outputs->resize(1);
+ // Concat all the preprocessed data to a batch tensor
+ std::vector tensors(images->size());
+ for (size_t i = 0; i < images->size(); ++i) {
+ if (!Preprocess(&(*images)[i], &tensors[i])) {
+ FDERROR << "Failed to preprocess input image." << std::endl;
+ return false;
+ }
+ }
+ (*outputs)[0] = std::move(tensors[0]);
+ return true;
+}
+} // namespace faceid
+} // namespace vision
+} // namespace fastdeploy
diff --git a/fastdeploy/vision/faceid/contrib/insightface/preprocessor.h b/fastdeploy/vision/faceid/contrib/insightface/preprocessor.h
new file mode 100755
index 000000000..88d0dce8d
--- /dev/null
+++ b/fastdeploy/vision/faceid/contrib/insightface/preprocessor.h
@@ -0,0 +1,80 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#pragma once
+#include "fastdeploy/vision/common/processors/transform.h"
+#include "fastdeploy/vision/common/result.h"
+
+namespace fastdeploy {
+namespace vision {
+
+namespace faceid {
+/*! @brief Preprocessor object for InsightFaceRecognition serials model.
+ */
+class FASTDEPLOY_DECL InsightFaceRecognitionPreprocessor {
+ public:
+ /** \brief Create a preprocessor instance for InsightFaceRecognition serials model
+ */
+ InsightFaceRecognitionPreprocessor();
+
+ /** \brief Process the input image and prepare input tensors for runtime
+ *
+ * \param[in] images The input image data list, all the elements are returned by cv::imread()
+ * \param[in] outputs The output tensors which will feed in runtime
+ * \return true if the preprocess successed, otherwise false
+ */
+ bool Run(std::vector* images, std::vector* outputs);
+
+ /// Get Size
+ std::vector GetSize() { return size_; }
+
+ /// Set size.
+ void SetSize(std::vector& size) { size_ = size; }
+
+ /// Get alpha
+ std::vector GetAlpha() { return alpha_; }
+
+ /// Set alpha.
+ void SetAlpha(std::vector& alpha) { alpha_ = alpha; }
+
+ /// Get beta
+ std::vector GetBeta() { return beta_; }
+
+ /// Set beta.
+ void SetBeta(std::vector& beta) { beta_ = beta; }
+
+ bool GetPermute() { return permute_; }
+
+ /// Set permute.
+ void SetPermute(bool permute) { permute_ = permute; }
+
+ protected:
+ bool Preprocess(FDMat* mat, FDTensor* output);
+ // Argument for image preprocessing step, tuple of (width, height),
+ // decide the target size after resize, default (112, 112)
+ std::vector size_;
+ // Argument for image preprocessing step, alpha values for normalization,
+ // default alpha = {1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f};
+ std::vector alpha_;
+ // Argument for image preprocessing step, beta values for normalization,
+ // default beta = {-1.f, -1.f, -1.f}
+ std::vector beta_;
+ // Argument for image preprocessing step, whether to swap the B and R channel,
+ // such as BGR->RGB, default true.
+ bool permute_;
+};
+
+} // namespace faceid
+} // namespace vision
+} // namespace fastdeploy
diff --git a/fastdeploy/vision/faceid/contrib/insightface_rec.cc b/fastdeploy/vision/faceid/contrib/insightface_rec.cc
deleted file mode 100755
index f8810e82a..000000000
--- a/fastdeploy/vision/faceid/contrib/insightface_rec.cc
+++ /dev/null
@@ -1,138 +0,0 @@
-// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "fastdeploy/vision/faceid/contrib/insightface_rec.h"
-
-#include "fastdeploy/utils/perf.h"
-#include "fastdeploy/vision/utils/utils.h"
-
-namespace fastdeploy {
-
-namespace vision {
-
-namespace faceid {
-
-InsightFaceRecognitionModel::InsightFaceRecognitionModel(
- const std::string& model_file, const std::string& params_file,
- const RuntimeOption& custom_option, const ModelFormat& model_format) {
- if (model_format == ModelFormat::ONNX) {
- valid_cpu_backends = {Backend::ORT};
- valid_gpu_backends = {Backend::ORT, Backend::TRT};
- } else {
- valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::LITE};
- valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
- valid_xpu_backends = {Backend::LITE};
- }
- runtime_option = custom_option;
- runtime_option.model_format = model_format;
- runtime_option.model_file = model_file;
- runtime_option.params_file = params_file;
- initialized = Initialize();
-}
-
-bool InsightFaceRecognitionModel::Initialize() {
- // parameters for preprocess
- size = {112, 112};
- alpha = {1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f};
- beta = {-1.f, -1.f, -1.f}; // RGB
- swap_rb = true;
- l2_normalize = false;
-
- if (!InitRuntime()) {
- FDERROR << "Failed to initialize fastdeploy backend." << std::endl;
- return false;
- }
- return true;
-}
-
-bool InsightFaceRecognitionModel::Preprocess(Mat* mat, FDTensor* output) {
- // face recognition model's preprocess steps in insightface
- // reference: insightface/recognition/arcface_torch/inference.py
- // 1. Resize
- // 2. BGR2RGB
- // 3. Convert(opencv style) or Normalize
- // 4. HWC2CHW
- int resize_w = size[0];
- int resize_h = size[1];
- if (resize_h != mat->Height() || resize_w != mat->Width()) {
- Resize::Run(mat, resize_w, resize_h);
- }
- if (swap_rb) {
- BGR2RGB::Run(mat);
- }
-
- Convert::Run(mat, alpha, beta);
- HWC2CHW::Run(mat);
- Cast::Run(mat, "float");
-
- mat->ShareWithTensor(output);
- output->shape.insert(output->shape.begin(), 1); // reshape to n, h, w, c
- return true;
-}
-
-bool InsightFaceRecognitionModel::Postprocess(
- std::vector& infer_result, FaceRecognitionResult* result) {
- FDASSERT((infer_result.size() == 1),
- "The default number of output tensor must be 1 according to "
- "insightface.");
- FDTensor& embedding_tensor = infer_result.at(0);
- FDASSERT((embedding_tensor.shape[0] == 1), "Only support batch =1 now.");
- if (embedding_tensor.dtype != FDDataType::FP32) {
- FDERROR << "Only support post process with float32 data." << std::endl;
- return false;
- }
-
- result->Clear();
- result->Resize(embedding_tensor.Numel());
- // Copy the raw embedding vector directly without L2 normalize
- // post process. Let the user decide whether to normalize or not.
- // Will call utils::L2Normlize() method to perform L2
- // normalize if l2_normalize was set as 'true'.
- std::memcpy(result->embedding.data(), embedding_tensor.Data(),
- embedding_tensor.Nbytes());
- if (l2_normalize) {
- auto norm_embedding = utils::L2Normalize(result->embedding);
- std::memcpy(result->embedding.data(), norm_embedding.data(),
- embedding_tensor.Nbytes());
- }
- return true;
-}
-
-bool InsightFaceRecognitionModel::Predict(cv::Mat* im,
- FaceRecognitionResult* result) {
- Mat mat(*im);
- std::vector input_tensors(1);
-
- if (!Preprocess(&mat, &input_tensors[0])) {
- FDERROR << "Failed to preprocess input image." << std::endl;
- return false;
- }
-
- input_tensors[0].name = InputInfoOfRuntime(0).name;
- std::vector output_tensors;
- if (!Infer(input_tensors, &output_tensors)) {
- FDERROR << "Failed to inference." << std::endl;
- return false;
- }
-
- if (!Postprocess(output_tensors, result)) {
- FDERROR << "Failed to post process." << std::endl;
- return false;
- }
- return true;
-}
-
-} // namespace faceid
-} // namespace vision
-} // namespace fastdeploy
\ No newline at end of file
diff --git a/fastdeploy/vision/faceid/contrib/insightface_rec.h b/fastdeploy/vision/faceid/contrib/insightface_rec.h
deleted file mode 100644
index 2e66d3d71..000000000
--- a/fastdeploy/vision/faceid/contrib/insightface_rec.h
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#pragma once
-#include "fastdeploy/fastdeploy_model.h"
-#include "fastdeploy/vision/common/processors/transform.h"
-#include "fastdeploy/vision/common/result.h"
-
-namespace fastdeploy {
-
-namespace vision {
-
-namespace faceid {
-/*! @brief CosFace model object used when to load a CosFace model exported by IngsightFace.
- */
-class FASTDEPLOY_DECL InsightFaceRecognitionModel : public FastDeployModel {
- public:
- /** \brief Set path of model file and the configuration of runtime.
- *
- * \param[in] model_file Path of model file, e.g ./arcface.onnx
- * \param[in] params_file Path of parameter file, e.g ppyoloe/model.pdiparams, if the model format is ONNX, this parameter will be ignored
- * \param[in] custom_option RuntimeOption for inference, the default will use cpu, and choose the backend defined in "valid_cpu_backends"
- * \param[in] model_format Model format of the loaded model, default is ONNX format
- */
- InsightFaceRecognitionModel(
- const std::string& model_file, const std::string& params_file = "",
- const RuntimeOption& custom_option = RuntimeOption(),
- const ModelFormat& model_format = ModelFormat::ONNX);
-
- virtual std::string ModelName() const { return "deepinsight/insightface"; }
-
- /*! @brief
- Argument for image preprocessing step, tuple of (width, height), decide the target size after resize, default (112, 112)
- */
- std::vector size;
- /*! @brief
- Argument for image preprocessing step, alpha values for normalization, default alpha = {1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f};
- */
- std::vector alpha;
- /*! @brief
- Argument for image preprocessing step, beta values for normalization, default beta = {-1.f, -1.f, -1.f}
- */
- std::vector beta;
- /*! @brief
- Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default true.
- */
- bool swap_rb;
- /*! @brief
- Argument for image postprocessing step, whether to apply l2 normalize to embedding values, default false;
- */
- bool l2_normalize;
- /** \brief Predict the face recognition result for an input image
- *
- * \param[in] im The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
- * \param[in] result The output face recognition result will be writen to this structure
- * \return true if the prediction successed, otherwise false
- */
- virtual bool Predict(cv::Mat* im, FaceRecognitionResult* result);
-
- virtual bool Initialize();
-
- virtual bool Preprocess(Mat* mat, FDTensor* output);
-
- virtual bool Postprocess(std::vector& infer_result,
- FaceRecognitionResult* result);
-};
-
-} // namespace faceid
-} // namespace vision
-} // namespace fastdeploy
diff --git a/fastdeploy/vision/faceid/contrib/insightface_rec_pybind.cc b/fastdeploy/vision/faceid/contrib/insightface_rec_pybind.cc
deleted file mode 100644
index bbbe60d3a..000000000
--- a/fastdeploy/vision/faceid/contrib/insightface_rec_pybind.cc
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "fastdeploy/pybind/main.h"
-
-namespace fastdeploy {
-void BindInsightFaceRecognitionModel(pybind11::module& m) {
- // Bind InsightFaceRecognitionModel
- pybind11::class_(m, "InsightFaceRecognitionModel")
- .def(pybind11::init())
- .def("predict",
- [](vision::faceid::InsightFaceRecognitionModel& self,
- pybind11::array& data) {
- auto mat = PyArrayToCvMat(data);
- vision::FaceRecognitionResult res;
- self.Predict(&mat, &res);
- return res;
- })
- .def_readwrite("size", &vision::faceid::InsightFaceRecognitionModel::size)
- .def_readwrite("alpha",
- &vision::faceid::InsightFaceRecognitionModel::alpha)
- .def_readwrite("beta", &vision::faceid::InsightFaceRecognitionModel::beta)
- .def_readwrite("swap_rb",
- &vision::faceid::InsightFaceRecognitionModel::swap_rb)
- .def_readwrite(
- "l2_normalize",
- &vision::faceid::InsightFaceRecognitionModel::l2_normalize);
-}
-
-} // namespace fastdeploy
diff --git a/fastdeploy/vision/faceid/contrib/partial_fc.cc b/fastdeploy/vision/faceid/contrib/partial_fc.cc
deleted file mode 100644
index 1ef1f218b..000000000
--- a/fastdeploy/vision/faceid/contrib/partial_fc.cc
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "fastdeploy/vision/faceid/contrib/partial_fc.h"
-#include "fastdeploy/utils/perf.h"
-#include "fastdeploy/vision/utils/utils.h"
-
-namespace fastdeploy {
-
-namespace vision {
-
-namespace faceid {
-
-PartialFC::PartialFC(const std::string& model_file,
- const std::string& params_file,
- const RuntimeOption& custom_option,
- const ModelFormat& model_format)
- : InsightFaceRecognitionModel(model_file, params_file, custom_option,
- model_format) {
- initialized = Initialize();
-}
-
-bool PartialFC::Initialize() {
-
- if (initialized) {
- // (1.1) re-init parameters for specific sub-classes
- size = {112, 112};
- alpha = {1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f};
- beta = {-1.f, -1.f, -1.f}; // RGB
- swap_rb = true;
- l2_normalize = false;
- return true;
- }
- if (!InsightFaceRecognitionModel::Initialize()) {
- FDERROR << "Failed to initialize fastdeploy backend." << std::endl;
- return false;
- }
- // (2.1) re-init parameters for specific sub-classes
- size = {112, 112};
- alpha = {1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f};
- beta = {-1.f, -1.f, -1.f}; // RGB
- swap_rb = true;
- l2_normalize = false;
- return true;
-}
-
-bool PartialFC::Preprocess(Mat* mat, FDTensor* output) {
- return InsightFaceRecognitionModel::Preprocess(mat, output);
-}
-
-bool PartialFC::Postprocess(std::vector& infer_result,
- FaceRecognitionResult* result) {
- return InsightFaceRecognitionModel::Postprocess(infer_result, result);
-}
-
-bool PartialFC::Predict(cv::Mat* im, FaceRecognitionResult* result) {
- return InsightFaceRecognitionModel::Predict(im, result);
-}
-
-} // namespace faceid
-} // namespace vision
-} // namespace fastdeploy
\ No newline at end of file
diff --git a/fastdeploy/vision/faceid/contrib/partial_fc.h b/fastdeploy/vision/faceid/contrib/partial_fc.h
deleted file mode 100644
index fac525be7..000000000
--- a/fastdeploy/vision/faceid/contrib/partial_fc.h
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#pragma once
-#include "fastdeploy/fastdeploy_model.h"
-#include "fastdeploy/vision/common/processors/transform.h"
-#include "fastdeploy/vision/common/result.h"
-#include "fastdeploy/vision/faceid/contrib/insightface_rec.h"
-
-namespace fastdeploy {
-
-namespace vision {
-
-namespace faceid {
-/*! @brief PartialFC model object used when to load a PartialFC model exported by IngsightFace.
- */
-class FASTDEPLOY_DECL PartialFC : public InsightFaceRecognitionModel {
- public:
- /** \brief Set path of model file and the configuration of runtime.
- *
- * \param[in] model_file Path of model file, e.g ./partial_fc.onnx
- * \param[in] params_file Path of parameter file, e.g ppyoloe/model.pdiparams, if the model format is ONNX, this parameter will be ignored
- * \param[in] custom_option RuntimeOption for inference, the default will use cpu, and choose the backend defined in "valid_cpu_backends"
- * \param[in] model_format Model format of the loaded model, default is ONNX format
- */
- PartialFC(const std::string& model_file, const std::string& params_file = "",
- const RuntimeOption& custom_option = RuntimeOption(),
- const ModelFormat& model_format = ModelFormat::ONNX);
-
- std::string ModelName() const override {
- return "deepinsight/insightface/recognition/partial_fc";
- }
- /** \brief Predict the face recognition result for an input image
- *
- * \param[in] im The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
- * \param[in] result The output face recognition result will be writen to this structure
- * \return true if the prediction successed, otherwise false
- */
- bool Predict(cv::Mat* im, FaceRecognitionResult* result) override;
-
- private:
- bool Initialize() override;
-
- bool Preprocess(Mat* mat, FDTensor* output) override;
-
- bool Postprocess(std::vector& infer_result,
- FaceRecognitionResult* result) override;
-};
-
-} // namespace faceid
-} // namespace vision
-} // namespace fastdeploy
diff --git a/fastdeploy/vision/faceid/contrib/partial_fc_pybind.cc b/fastdeploy/vision/faceid/contrib/partial_fc_pybind.cc
deleted file mode 100644
index 4b1761ca1..000000000
--- a/fastdeploy/vision/faceid/contrib/partial_fc_pybind.cc
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "fastdeploy/pybind/main.h"
-
-namespace fastdeploy {
-void BindPartialFC(pybind11::module& m) {
- // Bind Partial FC
- pybind11::class_(m, "PartialFC")
- .def(pybind11::init())
- .def("predict",
- [](vision::faceid::PartialFC& self, pybind11::array& data) {
- auto mat = PyArrayToCvMat(data);
- vision::FaceRecognitionResult res;
- self.Predict(&mat, &res);
- return res;
- })
- .def_readwrite("size", &vision::faceid::PartialFC::size)
- .def_readwrite("alpha", &vision::faceid::PartialFC::alpha)
- .def_readwrite("beta", &vision::faceid::PartialFC::beta)
- .def_readwrite("swap_rb", &vision::faceid::PartialFC::swap_rb)
- .def_readwrite("l2_normalize", &vision::faceid::PartialFC::l2_normalize);
-}
-
-} // namespace fastdeploy
diff --git a/fastdeploy/vision/faceid/contrib/vpl.cc b/fastdeploy/vision/faceid/contrib/vpl.cc
deleted file mode 100644
index 841c889a9..000000000
--- a/fastdeploy/vision/faceid/contrib/vpl.cc
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "fastdeploy/vision/faceid/contrib/vpl.h"
-#include "fastdeploy/utils/perf.h"
-#include "fastdeploy/vision/utils/utils.h"
-
-namespace fastdeploy {
-
-namespace vision {
-
-namespace faceid {
-
-VPL::VPL(const std::string& model_file, const std::string& params_file,
- const RuntimeOption& custom_option, const ModelFormat& model_format)
- : InsightFaceRecognitionModel(model_file, params_file, custom_option,
- model_format) {
- initialized = Initialize();
-}
-
-bool VPL::Initialize() {
-
- if (initialized) {
- // (1.1) re-init parameters for specific sub-classes
- size = {112, 112};
- alpha = {1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f};
- beta = {-1.f, -1.f, -1.f}; // RGB
- swap_rb = true;
- l2_normalize = false;
- return true;
- }
- if (!InsightFaceRecognitionModel::Initialize()) {
- FDERROR << "Failed to initialize fastdeploy backend." << std::endl;
- return false;
- }
- // (2.1) re-init parameters for specific sub-classes
- size = {112, 112};
- alpha = {1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f};
- beta = {-1.f, -1.f, -1.f}; // RGB
- swap_rb = true;
- l2_normalize = false;
- return true;
-}
-
-bool VPL::Preprocess(Mat* mat, FDTensor* output) {
- return InsightFaceRecognitionModel::Preprocess(mat, output);
-}
-
-bool VPL::Postprocess(std::vector& infer_result,
- FaceRecognitionResult* result) {
- return InsightFaceRecognitionModel::Postprocess(infer_result, result);
-}
-
-bool VPL::Predict(cv::Mat* im, FaceRecognitionResult* result) {
- return InsightFaceRecognitionModel::Predict(im, result);
-}
-
-} // namespace faceid
-} // namespace vision
-} // namespace fastdeploy
\ No newline at end of file
diff --git a/fastdeploy/vision/faceid/contrib/vpl.h b/fastdeploy/vision/faceid/contrib/vpl.h
deleted file mode 100644
index c4a265072..000000000
--- a/fastdeploy/vision/faceid/contrib/vpl.h
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#pragma once
-#include "fastdeploy/fastdeploy_model.h"
-#include "fastdeploy/vision/common/processors/transform.h"
-#include "fastdeploy/vision/common/result.h"
-#include "fastdeploy/vision/faceid/contrib/insightface_rec.h"
-
-namespace fastdeploy {
-
-namespace vision {
-
-namespace faceid {
-/*! @brief VPL model object used when to load a VPL model exported by IngsightFace.
- */
-class FASTDEPLOY_DECL VPL : public InsightFaceRecognitionModel {
- public:
- /** \brief Set path of model file and the configuration of runtime.
- *
- * \param[in] model_file Path of model file, e.g ./vpl.onnx
- * \param[in] params_file Path of parameter file, e.g ppyoloe/model.pdiparams, if the model format is ONNX, this parameter will be ignored
- * \param[in] custom_option RuntimeOption for inference, the default will use cpu, and choose the backend defined in "valid_cpu_backends"
- * \param[in] model_format Model format of the loaded model, default is ONNX format
- */
- VPL(const std::string& model_file, const std::string& params_file = "",
- const RuntimeOption& custom_option = RuntimeOption(),
- const ModelFormat& model_format = ModelFormat::ONNX);
-
- std::string ModelName() const override {
- return "deepinsight/insightface/recognition/vpl";
- }
- /** \brief Predict the face recognition result for an input image
- *
- * \param[in] im The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
- * \param[in] result The output face recognition result will be writen to this structure
- * \return true if the prediction successed, otherwise false
- */
- bool Predict(cv::Mat* im, FaceRecognitionResult* result) override;
-
- private:
- bool Initialize() override;
-
- bool Preprocess(Mat* mat, FDTensor* output) override;
-
- bool Postprocess(std::vector& infer_result,
- FaceRecognitionResult* result) override;
-};
-
-} // namespace faceid
-} // namespace vision
-} // namespace fastdeploy
diff --git a/fastdeploy/vision/faceid/contrib/vpl_pybind.cc b/fastdeploy/vision/faceid/contrib/vpl_pybind.cc
deleted file mode 100644
index 8d3369b95..000000000
--- a/fastdeploy/vision/faceid/contrib/vpl_pybind.cc
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "fastdeploy/pybind/main.h"
-
-namespace fastdeploy {
-void BindVPL(pybind11::module& m) {
- // Bind VPL
- pybind11::class_(m, "VPL")
- .def(pybind11::init())
- .def("predict",
- [](vision::faceid::VPL& self, pybind11::array& data) {
- auto mat = PyArrayToCvMat(data);
- vision::FaceRecognitionResult res;
- self.Predict(&mat, &res);
- return res;
- })
- .def_readwrite("size", &vision::faceid::VPL::size)
- .def_readwrite("alpha", &vision::faceid::VPL::alpha)
- .def_readwrite("beta", &vision::faceid::VPL::beta)
- .def_readwrite("swap_rb", &vision::faceid::VPL::swap_rb)
- .def_readwrite("l2_normalize", &vision::faceid::VPL::l2_normalize);
-}
-
-} // namespace fastdeploy
diff --git a/fastdeploy/vision/faceid/faceid_pybind.cc b/fastdeploy/vision/faceid/faceid_pybind.cc
index 22e9d5140..1b3b2b6cf 100644
--- a/fastdeploy/vision/faceid/faceid_pybind.cc
+++ b/fastdeploy/vision/faceid/faceid_pybind.cc
@@ -15,20 +15,11 @@
#include "fastdeploy/pybind/main.h"
namespace fastdeploy {
+void BindInsightFace(pybind11::module& m);
void BindAdaFace(pybind11::module& m);
-void BindArcFace(pybind11::module& m);
-void BindInsightFaceRecognitionModel(pybind11::module& m);
-void BindCosFace(pybind11::module& m);
-void BindPartialFC(pybind11::module& m);
-void BindVPL(pybind11::module& m);
-
void BindFaceId(pybind11::module& m) {
auto faceid_module = m.def_submodule("faceid", "Face recognition models.");
- BindInsightFaceRecognitionModel(faceid_module);
+ BindInsightFace(faceid_module);
BindAdaFace(faceid_module);
- BindArcFace(faceid_module);
- BindCosFace(faceid_module);
- BindPartialFC(faceid_module);
- BindVPL(faceid_module);
}
} // namespace fastdeploy
diff --git a/fastdeploy/vision/keypointdet/pptinypose/pptinypose.cc b/fastdeploy/vision/keypointdet/pptinypose/pptinypose.cc
index 155ef218f..781a8973c 100755
--- a/fastdeploy/vision/keypointdet/pptinypose/pptinypose.cc
+++ b/fastdeploy/vision/keypointdet/pptinypose/pptinypose.cc
@@ -18,7 +18,7 @@ PPTinyPose::PPTinyPose(const std::string& model_file,
config_file_ = config_file;
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
- valid_xpu_backends = {Backend::LITE};
+ valid_kunlunxin_backends = {Backend::LITE};
runtime_option = custom_option;
runtime_option.model_format = model_format;
runtime_option.model_file = model_file;
diff --git a/fastdeploy/vision/matting/ppmatting/ppmatting.cc b/fastdeploy/vision/matting/ppmatting/ppmatting.cc
index 1a7de4451..ebfa5f2a5 100755
--- a/fastdeploy/vision/matting/ppmatting/ppmatting.cc
+++ b/fastdeploy/vision/matting/ppmatting/ppmatting.cc
@@ -28,7 +28,7 @@ PPMatting::PPMatting(const std::string& model_file,
config_file_ = config_file;
valid_cpu_backends = {Backend::ORT, Backend::PDINFER, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER, Backend::TRT};
- valid_xpu_backends = {Backend::LITE};
+ valid_kunlunxin_backends = {Backend::LITE};
runtime_option = custom_option;
runtime_option.model_format = model_format;
runtime_option.model_file = model_file;
diff --git a/fastdeploy/vision/ocr/ppocr/classifier.cc b/fastdeploy/vision/ocr/ppocr/classifier.cc
index 7783ed828..c76750646 100755
--- a/fastdeploy/vision/ocr/ppocr/classifier.cc
+++ b/fastdeploy/vision/ocr/ppocr/classifier.cc
@@ -32,7 +32,7 @@ Classifier::Classifier(const std::string& model_file,
} else {
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
- valid_xpu_backends = {Backend::LITE};
+ valid_kunlunxin_backends = {Backend::LITE};
valid_ascend_backends = {Backend::LITE};
}
runtime_option = custom_option;
diff --git a/fastdeploy/vision/ocr/ppocr/dbdetector.cc b/fastdeploy/vision/ocr/ppocr/dbdetector.cc
index 3245d2dcc..306869d19 100755
--- a/fastdeploy/vision/ocr/ppocr/dbdetector.cc
+++ b/fastdeploy/vision/ocr/ppocr/dbdetector.cc
@@ -32,7 +32,7 @@ DBDetector::DBDetector(const std::string& model_file,
} else {
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
- valid_xpu_backends = {Backend::LITE};
+ valid_kunlunxin_backends = {Backend::LITE};
valid_ascend_backends = {Backend::LITE};
}
diff --git a/fastdeploy/vision/ocr/ppocr/recognizer.cc b/fastdeploy/vision/ocr/ppocr/recognizer.cc
index 3cbe0c00c..d967df0e2 100755
--- a/fastdeploy/vision/ocr/ppocr/recognizer.cc
+++ b/fastdeploy/vision/ocr/ppocr/recognizer.cc
@@ -34,7 +34,7 @@ Recognizer::Recognizer(const std::string& model_file,
} else {
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
- valid_xpu_backends = {Backend::LITE};
+ valid_kunlunxin_backends = {Backend::LITE};
valid_ascend_backends = {Backend::LITE};
}
diff --git a/fastdeploy/vision/segmentation/ppseg/model.cc b/fastdeploy/vision/segmentation/ppseg/model.cc
index 9c158ef10..7baa7ac7f 100755
--- a/fastdeploy/vision/segmentation/ppseg/model.cc
+++ b/fastdeploy/vision/segmentation/ppseg/model.cc
@@ -29,7 +29,7 @@ PaddleSegModel::PaddleSegModel(const std::string& model_file,
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
valid_rknpu_backends = {Backend::RKNPU2};
valid_timvx_backends = {Backend::LITE};
- valid_xpu_backends = {Backend::LITE};
+ valid_kunlunxin_backends = {Backend::LITE};
valid_ascend_backends = {Backend::LITE};
runtime_option = custom_option;
runtime_option.model_format = model_format;
diff --git a/java/android/app/src/main/AndroidManifest.xml b/java/android/app/src/main/AndroidManifest.xml
index 9a809c9a4..51852eca7 100644
--- a/java/android/app/src/main/AndroidManifest.xml
+++ b/java/android/app/src/main/AndroidManifest.xml
@@ -22,8 +22,7 @@
+ android:name=".segmentation.SegmentationMainActivity">
results = new ArrayList<>();
+
+ @Override
+ protected void onCreate(Bundle savedInstanceState) {
+ super.onCreate(savedInstanceState);
+
+ // Fullscreen
+ requestWindowFeature(Window.FEATURE_NO_TITLE);
+ getWindow().setFlags(WindowManager.LayoutParams.FLAG_FULLSCREEN, WindowManager.LayoutParams.FLAG_FULLSCREEN);
+
+ setContentView(R.layout.face_align_activity_main);
+
+ // Clear all setting items to avoid app crashing due to the incorrect settings
+ initSettings();
+
+ // Check and request CAMERA and WRITE_EXTERNAL_STORAGE permissions
+ if (!checkAllPermissions()) {
+ requestAllPermissions();
+ }
+
+ // Init the camera preview and UI components
+ initView();
+ }
+
+ @SuppressLint("NonConstantResourceId")
+ @Override
+ public void onClick(View v) {
+ switch (v.getId()) {
+ case R.id.btn_switch:
+ svPreview.switchCamera();
+ break;
+ case R.id.btn_shutter:
+ TYPE = BTN_SHUTTER;
+ shutterAndPauseCamera();
+ resultView.setAdapter(null);
+ break;
+ case R.id.btn_settings:
+ startActivity(new Intent(FaceAlignMainActivity.this, FaceAlignSettingsActivity.class));
+ break;
+ case R.id.realtime_toggle_btn:
+ toggleRealtimeStyle();
+ break;
+ case R.id.back_in_preview:
+ finish();
+ break;
+ case R.id.album_select:
+ TYPE = ALBUM_SELECT;
+ // Judge whether authority has been granted.
+ if (ContextCompat.checkSelfPermission(this, Manifest.permission.WRITE_EXTERNAL_STORAGE) != PackageManager.PERMISSION_GRANTED) {
+ // If this permission was requested before the application but the user refused the request, this method will return true.
+ ActivityCompat.requestPermissions(this, new String[]{Manifest.permission.WRITE_EXTERNAL_STORAGE}, REQUEST_PERMISSION_CODE_STORAGE);
+ } else {
+ Intent intent = new Intent(Intent.ACTION_PICK);
+ intent.setType("image/*");
+ startActivityForResult(intent, INTENT_CODE_PICK_IMAGE);
+ }
+ resultView.setAdapter(null);
+ break;
+ case R.id.back_in_result:
+ back();
+ break;
+ }
+ }
+
+ @Override
+ public void onBackPressed() {
+ super.onBackPressed();
+ back();
+ }
+
+ private void back() {
+ resultPageView.setVisibility(View.GONE);
+ cameraPageView.setVisibility(View.VISIBLE);
+ TYPE = REALTIME_DETECT;
+ isShutterBitmapCopied = false;
+ svPreview.onResume();
+ results.clear();
+ }
+
+ private void shutterAndPauseCamera() {
+ new Thread(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ // Sleep some times to ensure picture has been correctly shut.
+ Thread.sleep(TIME_SLEEP_INTERVAL * 10); // 500ms
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ runOnUiThread(new Runnable() {
+ @SuppressLint("SetTextI18n")
+ public void run() {
+ // These codes will run in main thread.
+ svPreview.onPause();
+ cameraPageView.setVisibility(View.GONE);
+ resultPageView.setVisibility(View.VISIBLE);
+ if (shutterBitmap != null && !shutterBitmap.isRecycled()) {
+ detail(shutterBitmap);
+ } else {
+ new AlertDialog.Builder(FaceAlignMainActivity.this)
+ .setTitle("Empty Result!")
+ .setMessage("Current picture is empty, please shutting it again!")
+ .setCancelable(true)
+ .show();
+ }
+ }
+ });
+ }
+ }).start();
+ }
+
+ private void copyBitmapFromCamera(Bitmap ARGB8888ImageBitmap) {
+ if (isShutterBitmapCopied || ARGB8888ImageBitmap == null) {
+ return;
+ }
+ if (!ARGB8888ImageBitmap.isRecycled()) {
+ synchronized (this) {
+ shutterBitmap = ARGB8888ImageBitmap.copy(Bitmap.Config.ARGB_8888, true);
+ }
+ SystemClock.sleep(TIME_SLEEP_INTERVAL);
+ isShutterBitmapCopied = true;
+ }
+ }
+
+ @Override
+ protected void onActivityResult(int requestCode, int resultCode, Intent data) {
+ super.onActivityResult(requestCode, resultCode, data);
+ if (requestCode == INTENT_CODE_PICK_IMAGE) {
+ if (resultCode == Activity.RESULT_OK) {
+ cameraPageView.setVisibility(View.GONE);
+ resultPageView.setVisibility(View.VISIBLE);
+ Uri uri = data.getData();
+ String path = getRealPathFromURI(this, uri);
+ Bitmap bitmap = decodeBitmap(path, 720, 1280);
+ picBitmap = bitmap.copy(Bitmap.Config.ARGB_8888, true);
+ SystemClock.sleep(TIME_SLEEP_INTERVAL * 10); // 500ms
+ detail(picBitmap);
+ }
+ }
+ }
+
+ private void toggleRealtimeStyle() {
+ if (isRealtimeStatusRunning) {
+ isRealtimeStatusRunning = false;
+ realtimeToggleButton.setImageResource(R.drawable.realtime_stop_btn);
+ svPreview.setOnTextureChangedListener(this);
+ tvStatus.setVisibility(View.VISIBLE);
+ } else {
+ isRealtimeStatusRunning = true;
+ realtimeToggleButton.setImageResource(R.drawable.realtime_start_btn);
+ tvStatus.setVisibility(View.GONE);
+ isShutterBitmapCopied = false;
+ // Camera is still working but detecting loop is on pause.
+ svPreview.setOnTextureChangedListener(new CameraSurfaceView.OnTextureChangedListener() {
+ @Override
+ public boolean onTextureChanged(Bitmap ARGB8888ImageBitmap) {
+ if (TYPE == BTN_SHUTTER) {
+ copyBitmapFromCamera(ARGB8888ImageBitmap);
+ }
+ return false;
+ }
+ });
+ }
+ }
+
+ @Override
+ public boolean onTextureChanged(Bitmap ARGB8888ImageBitmap) {
+ if (TYPE == BTN_SHUTTER) {
+ copyBitmapFromCamera(ARGB8888ImageBitmap);
+ return false;
+ }
+
+ boolean modified = false;
+ // TODO: 2022/12/20
+// SegmentationResult result = new SegmentationResult();
+// result.setCxxBufferFlag(true);
+//
+ //long tc = System.currentTimeMillis();
+// predictor.predict(ARGB8888ImageBitmap, result);
+ //timeElapsed += (System.currentTimeMillis() - tc);
+//
+// Visualize.visSegmentation(ARGB8888ImageBitmap, result);
+// modified = result.initialized();
+//
+// result.releaseCxxBuffer();
+
+// frameCounter++;
+// if (frameCounter >= 30) {
+// final int fps = (int) (1000 / (timeElapsed / 30));
+// runOnUiThread(new Runnable() {
+// @SuppressLint("SetTextI18n")
+// public void run() {
+// tvStatus.setText(Integer.toString(fps) + "fps");
+// }
+// });
+// frameCounter = 0;
+// timeElapsed = 0;
+// }
+ return modified;
+ }
+
+ @Override
+ protected void onResume() {
+ super.onResume();
+ // Reload settings and re-initialize the predictor
+ checkAndUpdateSettings();
+ // Open camera until the permissions have been granted
+ if (!checkAllPermissions()) {
+ svPreview.disableCamera();
+ } else {
+ svPreview.enableCamera();
+ }
+ svPreview.onResume();
+ }
+
+ @Override
+ protected void onPause() {
+ super.onPause();
+ svPreview.onPause();
+ }
+
+ @Override
+ protected void onDestroy() {
+ // TODO: 2022/12/20
+// if (predictor != null) {
+// predictor.release();
+// }
+ super.onDestroy();
+ }
+
+ public void initView() {
+ TYPE = REALTIME_DETECT;
+ CameraSurfaceView.EXPECTED_PREVIEW_WIDTH = 480;
+ CameraSurfaceView.EXPECTED_PREVIEW_HEIGHT = 480;
+ svPreview = (CameraSurfaceView) findViewById(R.id.sv_preview);
+ svPreview.setOnTextureChangedListener(this);
+ svPreview.switchCamera();
+
+ tvStatus = (TextView) findViewById(R.id.tv_status);
+ btnSwitch = (ImageButton) findViewById(R.id.btn_switch);
+ btnSwitch.setOnClickListener(this);
+ btnShutter = (ImageButton) findViewById(R.id.btn_shutter);
+ btnShutter.setOnClickListener(this);
+ btnSettings = (ImageButton) findViewById(R.id.btn_settings);
+ btnSettings.setOnClickListener(this);
+ realtimeToggleButton = findViewById(R.id.realtime_toggle_btn);
+ realtimeToggleButton.setOnClickListener(this);
+ backInPreview = findViewById(R.id.back_in_preview);
+ backInPreview.setOnClickListener(this);
+ albumSelectButton = findViewById(R.id.album_select);
+ albumSelectButton.setOnClickListener(this);
+ cameraPageView = findViewById(R.id.camera_page);
+ resultPageView = findViewById(R.id.result_page);
+ resultImage = findViewById(R.id.result_image);
+ backInResult = findViewById(R.id.back_in_result);
+ backInResult.setOnClickListener(this);
+ resultView = findViewById(R.id.result_list_view);
+ }
+
+ private void detail(Bitmap bitmap) {
+ // TODO: 2022/12/20
+ //predictor.predict(bitmap, true, 0.7f);
+ resultImage.setImageBitmap(bitmap);
+ }
+
+ @SuppressLint("ApplySharedPref")
+ public void initSettings() {
+ SharedPreferences sharedPreferences = PreferenceManager.getDefaultSharedPreferences(this);
+ SharedPreferences.Editor editor = sharedPreferences.edit();
+ editor.clear();
+ editor.commit();
+ FaceAlignSettingsActivity.resetSettings();
+ }
+
+ public void checkAndUpdateSettings() {
+ if (FaceAlignSettingsActivity.checkAndUpdateSettings(this)) {
+ String realModelDir = getCacheDir() + "/" + FaceAlignSettingsActivity.modelDir;
+ Utils.copyDirectoryFromAssets(this, FaceAlignSettingsActivity.modelDir, realModelDir);
+
+ String modelFile = realModelDir + "/" + "model.pdmodel";
+ String paramsFile = realModelDir + "/" + "model.pdiparams";
+ String configFile = realModelDir + "/" + "deploy.yaml";
+ RuntimeOption option = new RuntimeOption();
+ option.setCpuThreadNum(FaceAlignSettingsActivity.cpuThreadNum);
+ option.setLitePowerMode(FaceAlignSettingsActivity.cpuPowerMode);
+ if (Boolean.parseBoolean(FaceAlignSettingsActivity.enableLiteFp16)) {
+ option.enableLiteFp16();
+ }
+ // TODO: 2022/12/20
+ //predictor.setIsVerticalScreen(true);
+ //predictor.init(modelFile, paramsFile, configFile, option);
+ }
+ }
+
+ @Override
+ public void onRequestPermissionsResult(int requestCode, @NonNull String[] permissions,
+ @NonNull int[] grantResults) {
+ super.onRequestPermissionsResult(requestCode, permissions, grantResults);
+ if (grantResults[0] != PackageManager.PERMISSION_GRANTED || grantResults[1] != PackageManager.PERMISSION_GRANTED) {
+ new AlertDialog.Builder(FaceAlignMainActivity.this)
+ .setTitle("Permission denied")
+ .setMessage("Click to force quit the app, then open Settings->Apps & notifications->Target " +
+ "App->Permissions to grant all of the permissions.")
+ .setCancelable(false)
+ .setPositiveButton("Exit", new DialogInterface.OnClickListener() {
+ @Override
+ public void onClick(DialogInterface dialog, int which) {
+ FaceAlignMainActivity.this.finish();
+ }
+ }).show();
+ }
+ }
+
+ private void requestAllPermissions() {
+ ActivityCompat.requestPermissions(this, new String[]{Manifest.permission.WRITE_EXTERNAL_STORAGE,
+ Manifest.permission.CAMERA}, 0);
+ }
+
+ private boolean checkAllPermissions() {
+ return ContextCompat.checkSelfPermission(this, Manifest.permission.WRITE_EXTERNAL_STORAGE) == PackageManager.PERMISSION_GRANTED
+ && ContextCompat.checkSelfPermission(this, Manifest.permission.CAMERA) == PackageManager.PERMISSION_GRANTED;
+ }
+
+}
diff --git a/java/android/app/src/main/java/com/baidu/paddle/fastdeploy/app/examples/facealign/FaceAlignSettingsActivity.java b/java/android/app/src/main/java/com/baidu/paddle/fastdeploy/app/examples/facealign/FaceAlignSettingsActivity.java
new file mode 100644
index 000000000..8af92a288
--- /dev/null
+++ b/java/android/app/src/main/java/com/baidu/paddle/fastdeploy/app/examples/facealign/FaceAlignSettingsActivity.java
@@ -0,0 +1,164 @@
+package com.baidu.paddle.fastdeploy.app.examples.facealign;
+
+import android.annotation.SuppressLint;
+import android.content.Context;
+import android.content.SharedPreferences;
+import android.os.Bundle;
+import android.preference.EditTextPreference;
+import android.preference.ListPreference;
+import android.preference.PreferenceManager;
+import android.support.v7.app.ActionBar;
+
+import com.baidu.paddle.fastdeploy.app.examples.R;
+import com.baidu.paddle.fastdeploy.ui.Utils;
+import com.baidu.paddle.fastdeploy.ui.view.AppCompatPreferenceActivity;
+
+import java.util.ArrayList;
+import java.util.List;
+
+public class FaceAlignSettingsActivity extends AppCompatPreferenceActivity implements
+ SharedPreferences.OnSharedPreferenceChangeListener {
+ private static final String TAG = FaceAlignSettingsActivity.class.getSimpleName();
+
+ static public int selectedModelIdx = -1;
+ static public String modelDir = "";
+ static public int cpuThreadNum = 2;
+ static public String cpuPowerMode = "";
+ static public String enableLiteFp16 = "true";
+
+ ListPreference lpChoosePreInstalledModel = null;
+ EditTextPreference etModelDir = null;
+ ListPreference lpCPUThreadNum = null;
+ ListPreference lpCPUPowerMode = null;
+ ListPreference lpEnableLiteFp16 = null;
+
+ List preInstalledModelDirs = null;
+ List preInstalledCPUThreadNums = null;
+ List preInstalledCPUPowerModes = null;
+ List preInstalledEnableLiteFp16s = null;
+
+ @Override
+ public void onCreate(Bundle savedInstanceState) {
+ super.onCreate(savedInstanceState);
+ addPreferencesFromResource(R.xml.face_align_settings);
+ ActionBar supportActionBar = getSupportActionBar();
+ if (supportActionBar != null) {
+ supportActionBar.setDisplayHomeAsUpEnabled(true);
+ }
+
+ // Initialize pre-installed models
+ preInstalledModelDirs = new ArrayList();
+ preInstalledCPUThreadNums = new ArrayList();
+ preInstalledCPUPowerModes = new ArrayList();
+ preInstalledEnableLiteFp16s = new ArrayList();
+ preInstalledModelDirs.add(getString(R.string.FACE_ALIGN_MODEL_DIR_DEFAULT));
+ preInstalledCPUThreadNums.add(getString(R.string.CPU_THREAD_NUM_DEFAULT));
+ preInstalledCPUPowerModes.add(getString(R.string.CPU_POWER_MODE_DEFAULT));
+ preInstalledEnableLiteFp16s.add(getString(R.string.ENABLE_LITE_FP16_MODE_DEFAULT));
+
+ // Setup UI components
+ lpChoosePreInstalledModel =
+ (ListPreference) findPreference(getString(R.string.CHOOSE_PRE_INSTALLED_MODEL_KEY));
+ String[] preInstalledModelNames = new String[preInstalledModelDirs.size()];
+ for (int i = 0; i < preInstalledModelDirs.size(); i++) {
+ preInstalledModelNames[i] = preInstalledModelDirs.get(i).substring(preInstalledModelDirs.get(i).lastIndexOf("/") + 1);
+ }
+ lpChoosePreInstalledModel.setEntries(preInstalledModelNames);
+ lpChoosePreInstalledModel.setEntryValues(preInstalledModelDirs.toArray(new String[preInstalledModelDirs.size()]));
+ lpCPUThreadNum = (ListPreference) findPreference(getString(R.string.CPU_THREAD_NUM_KEY));
+ lpCPUPowerMode = (ListPreference) findPreference(getString(R.string.CPU_POWER_MODE_KEY));
+ etModelDir = (EditTextPreference) findPreference(getString(R.string.MODEL_DIR_KEY));
+ etModelDir.setTitle("Model dir (SDCard: " + Utils.getSDCardDirectory() + ")");
+ lpEnableLiteFp16 = (ListPreference) findPreference(getString(R.string.ENABLE_LITE_FP16_MODE_KEY));
+ }
+
+ @SuppressLint("ApplySharedPref")
+ private void reloadSettingsAndUpdateUI() {
+ SharedPreferences sharedPreferences = getPreferenceScreen().getSharedPreferences();
+
+ String selected_model_dir = sharedPreferences.getString(getString(R.string.CHOOSE_PRE_INSTALLED_MODEL_KEY),
+ getString(R.string.FACE_ALIGN_MODEL_DIR_DEFAULT));
+ int selected_model_idx = lpChoosePreInstalledModel.findIndexOfValue(selected_model_dir);
+ if (selected_model_idx >= 0 && selected_model_idx < preInstalledModelDirs.size() && selected_model_idx != selectedModelIdx) {
+ SharedPreferences.Editor editor = sharedPreferences.edit();
+ editor.putString(getString(R.string.MODEL_DIR_KEY), preInstalledModelDirs.get(selected_model_idx));
+ editor.putString(getString(R.string.CPU_THREAD_NUM_KEY), preInstalledCPUThreadNums.get(selected_model_idx));
+ editor.putString(getString(R.string.CPU_POWER_MODE_KEY), preInstalledCPUPowerModes.get(selected_model_idx));
+ editor.putString(getString(R.string.ENABLE_LITE_FP16_MODE_DEFAULT), preInstalledEnableLiteFp16s.get(selected_model_idx));
+ editor.commit();
+ lpChoosePreInstalledModel.setSummary(selected_model_dir);
+ selectedModelIdx = selected_model_idx;
+ }
+
+ String model_dir = sharedPreferences.getString(getString(R.string.MODEL_DIR_KEY),
+ getString(R.string.FACE_ALIGN_MODEL_DIR_DEFAULT));
+ String cpu_thread_num = sharedPreferences.getString(getString(R.string.CPU_THREAD_NUM_KEY),
+ getString(R.string.CPU_THREAD_NUM_DEFAULT));
+ String cpu_power_mode = sharedPreferences.getString(getString(R.string.CPU_POWER_MODE_KEY),
+ getString(R.string.CPU_POWER_MODE_DEFAULT));
+ String enable_lite_fp16 = sharedPreferences.getString(getString(R.string.ENABLE_LITE_FP16_MODE_KEY),
+ getString(R.string.ENABLE_LITE_FP16_MODE_DEFAULT));
+
+ etModelDir.setSummary(model_dir);
+ lpCPUThreadNum.setValue(cpu_thread_num);
+ lpCPUThreadNum.setSummary(cpu_thread_num);
+ lpCPUPowerMode.setValue(cpu_power_mode);
+ lpCPUPowerMode.setSummary(cpu_power_mode);
+ lpEnableLiteFp16.setValue(enable_lite_fp16);
+ lpEnableLiteFp16.setSummary(enable_lite_fp16);
+
+ }
+
+ static boolean checkAndUpdateSettings(Context ctx) {
+ boolean settingsChanged = false;
+ SharedPreferences sharedPreferences = PreferenceManager.getDefaultSharedPreferences(ctx);
+
+ String model_dir = sharedPreferences.getString(ctx.getString(R.string.MODEL_DIR_KEY),
+ ctx.getString(R.string.FACE_ALIGN_MODEL_DIR_DEFAULT));
+ settingsChanged |= !modelDir.equalsIgnoreCase(model_dir);
+ modelDir = model_dir;
+
+ String cpu_thread_num = sharedPreferences.getString(ctx.getString(R.string.CPU_THREAD_NUM_KEY),
+ ctx.getString(R.string.CPU_THREAD_NUM_DEFAULT));
+ settingsChanged |= cpuThreadNum != Integer.parseInt(cpu_thread_num);
+ cpuThreadNum = Integer.parseInt(cpu_thread_num);
+
+ String cpu_power_mode = sharedPreferences.getString(ctx.getString(R.string.CPU_POWER_MODE_KEY),
+ ctx.getString(R.string.CPU_POWER_MODE_DEFAULT));
+ settingsChanged |= !cpuPowerMode.equalsIgnoreCase(cpu_power_mode);
+ cpuPowerMode = cpu_power_mode;
+
+ String enable_lite_fp16 = sharedPreferences.getString(ctx.getString(R.string.ENABLE_LITE_FP16_MODE_KEY),
+ ctx.getString(R.string.ENABLE_LITE_FP16_MODE_DEFAULT));
+ settingsChanged |= !enableLiteFp16.equalsIgnoreCase(enable_lite_fp16);
+ enableLiteFp16 = enable_lite_fp16;
+
+ return settingsChanged;
+ }
+
+ static void resetSettings() {
+ selectedModelIdx = -1;
+ modelDir = "";
+ cpuThreadNum = 2;
+ cpuPowerMode = "";
+ enableLiteFp16 = "true";
+ }
+
+ @Override
+ protected void onResume() {
+ super.onResume();
+ getPreferenceScreen().getSharedPreferences().registerOnSharedPreferenceChangeListener(this);
+ reloadSettingsAndUpdateUI();
+ }
+
+ @Override
+ protected void onPause() {
+ super.onPause();
+ getPreferenceScreen().getSharedPreferences().unregisterOnSharedPreferenceChangeListener(this);
+ }
+
+ @Override
+ public void onSharedPreferenceChanged(SharedPreferences sharedPreferences, String key) {
+ reloadSettingsAndUpdateUI();
+ }
+}
diff --git a/java/android/app/src/main/java/com/baidu/paddle/fastdeploy/app/examples/facealign/FaceAlignWelcomeActivity.java b/java/android/app/src/main/java/com/baidu/paddle/fastdeploy/app/examples/facealign/FaceAlignWelcomeActivity.java
new file mode 100644
index 000000000..92acc24b4
--- /dev/null
+++ b/java/android/app/src/main/java/com/baidu/paddle/fastdeploy/app/examples/facealign/FaceAlignWelcomeActivity.java
@@ -0,0 +1,31 @@
+package com.baidu.paddle.fastdeploy.app.examples.facealign;
+
+import android.app.Activity;
+import android.content.Intent;
+import android.graphics.Color;
+import android.os.Build;
+import android.os.Bundle;
+import android.support.annotation.Nullable;
+import android.view.View;
+
+import com.baidu.paddle.fastdeploy.app.examples.R;
+import com.baidu.paddle.fastdeploy.app.examples.segmentation.SegmentationMainActivity;
+
+public class FaceAlignWelcomeActivity extends Activity {
+ @Override
+ protected void onCreate(@Nullable Bundle savedInstanceState) {
+ super.onCreate(savedInstanceState);
+ if (Build.VERSION.SDK_INT > Build.VERSION_CODES.LOLLIPOP) {
+ getWindow().getDecorView().setSystemUiVisibility(View.SYSTEM_UI_FLAG_LAYOUT_FULLSCREEN
+ | View.SYSTEM_UI_FLAG_LAYOUT_STABLE
+ );
+ getWindow().setStatusBarColor(Color.TRANSPARENT);
+ }
+ setContentView(R.layout.face_align_welcome);
+ }
+
+ public void startActivity(View view) {
+ Intent intent = new Intent(FaceAlignWelcomeActivity.this, FaceAlignMainActivity.class);
+ startActivity(intent);
+ }
+}
diff --git a/java/android/app/src/main/java/com/baidu/paddle/fastdeploy/app/examples/matting/MattingMainActivity.java b/java/android/app/src/main/java/com/baidu/paddle/fastdeploy/app/examples/matting/MattingMainActivity.java
new file mode 100644
index 000000000..afba8bc92
--- /dev/null
+++ b/java/android/app/src/main/java/com/baidu/paddle/fastdeploy/app/examples/matting/MattingMainActivity.java
@@ -0,0 +1,400 @@
+package com.baidu.paddle.fastdeploy.app.examples.matting;
+
+import static com.baidu.paddle.fastdeploy.ui.Utils.decodeBitmap;
+import static com.baidu.paddle.fastdeploy.ui.Utils.getRealPathFromURI;
+
+import android.Manifest;
+import android.annotation.SuppressLint;
+import android.app.Activity;
+import android.app.AlertDialog;
+import android.content.DialogInterface;
+import android.content.Intent;
+import android.content.SharedPreferences;
+import android.content.pm.PackageManager;
+import android.graphics.Bitmap;
+import android.net.Uri;
+import android.os.Bundle;
+import android.os.SystemClock;
+import android.preference.PreferenceManager;
+import android.support.annotation.NonNull;
+import android.support.v4.app.ActivityCompat;
+import android.support.v4.content.ContextCompat;
+import android.view.View;
+import android.view.ViewGroup;
+import android.view.Window;
+import android.view.WindowManager;
+import android.widget.ImageButton;
+import android.widget.ImageView;
+import android.widget.TextView;
+
+import com.baidu.paddle.fastdeploy.RuntimeOption;
+import com.baidu.paddle.fastdeploy.app.examples.R;
+import com.baidu.paddle.fastdeploy.ui.Utils;
+import com.baidu.paddle.fastdeploy.ui.view.CameraSurfaceView;
+import com.baidu.paddle.fastdeploy.ui.view.ResultListView;
+import com.baidu.paddle.fastdeploy.ui.view.model.BaseResultModel;
+
+import java.util.ArrayList;
+import java.util.List;
+
+public class MattingMainActivity extends Activity implements View.OnClickListener, CameraSurfaceView.OnTextureChangedListener {
+ private static final String TAG = MattingMainActivity.class.getSimpleName();
+
+
+ CameraSurfaceView svPreview;
+ TextView tvStatus;
+ ImageButton btnSwitch;
+ ImageButton btnShutter;
+ ImageButton btnSettings;
+ ImageView realtimeToggleButton;
+ boolean isRealtimeStatusRunning = false;
+ ImageView backInPreview;
+ private ImageView albumSelectButton;
+ private View cameraPageView;
+ private ViewGroup resultPageView;
+ private ImageView resultImage;
+ private ImageView backInResult;
+ private ResultListView resultView;
+ private Bitmap shutterBitmap;
+ private Bitmap picBitmap;
+ private boolean isShutterBitmapCopied = false;
+
+ public static final int TYPE_UNKNOWN = -1;
+ public static final int BTN_SHUTTER = 0;
+ public static final int ALBUM_SELECT = 1;
+ public static final int REALTIME_DETECT = 2;
+ private static int TYPE = REALTIME_DETECT;
+
+ private static final int REQUEST_PERMISSION_CODE_STORAGE = 101;
+ private static final int INTENT_CODE_PICK_IMAGE = 100;
+ private static final int TIME_SLEEP_INTERVAL = 50; // ms
+
+ long timeElapsed = 0;
+ long frameCounter = 0;
+
+ // todo Call 'init' and 'release' manually later
+ //PaddleSegModel predictor = new PaddleSegModel();
+ private List results = new ArrayList<>();
+
+ @Override
+ protected void onCreate(Bundle savedInstanceState) {
+ super.onCreate(savedInstanceState);
+
+ // Fullscreen
+ requestWindowFeature(Window.FEATURE_NO_TITLE);
+ getWindow().setFlags(WindowManager.LayoutParams.FLAG_FULLSCREEN, WindowManager.LayoutParams.FLAG_FULLSCREEN);
+
+ setContentView(R.layout.matting_activity_main);
+
+ // Clear all setting items to avoid app crashing due to the incorrect settings
+ initSettings();
+
+ // Check and request CAMERA and WRITE_EXTERNAL_STORAGE permissions
+ if (!checkAllPermissions()) {
+ requestAllPermissions();
+ }
+
+ // Init the camera preview and UI components
+ initView();
+ }
+
+ @SuppressLint("NonConstantResourceId")
+ @Override
+ public void onClick(View v) {
+ switch (v.getId()) {
+ case R.id.btn_switch:
+ svPreview.switchCamera();
+ break;
+ case R.id.btn_shutter:
+ TYPE = BTN_SHUTTER;
+ shutterAndPauseCamera();
+ resultView.setAdapter(null);
+ break;
+ case R.id.btn_settings:
+ startActivity(new Intent(MattingMainActivity.this, MattingSettingsActivity.class));
+ break;
+ case R.id.realtime_toggle_btn:
+ toggleRealtimeStyle();
+ break;
+ case R.id.back_in_preview:
+ finish();
+ break;
+ case R.id.album_select:
+ TYPE = ALBUM_SELECT;
+ // Judge whether authority has been granted.
+ if (ContextCompat.checkSelfPermission(this, Manifest.permission.WRITE_EXTERNAL_STORAGE) != PackageManager.PERMISSION_GRANTED) {
+ // If this permission was requested before the application but the user refused the request, this method will return true.
+ ActivityCompat.requestPermissions(this, new String[]{Manifest.permission.WRITE_EXTERNAL_STORAGE}, REQUEST_PERMISSION_CODE_STORAGE);
+ } else {
+ Intent intent = new Intent(Intent.ACTION_PICK);
+ intent.setType("image/*");
+ startActivityForResult(intent, INTENT_CODE_PICK_IMAGE);
+ }
+ resultView.setAdapter(null);
+ break;
+ case R.id.back_in_result:
+ back();
+ break;
+ }
+ }
+
+ @Override
+ public void onBackPressed() {
+ super.onBackPressed();
+ back();
+ }
+
+ private void back() {
+ resultPageView.setVisibility(View.GONE);
+ cameraPageView.setVisibility(View.VISIBLE);
+ TYPE = REALTIME_DETECT;
+ isShutterBitmapCopied = false;
+ svPreview.onResume();
+ results.clear();
+ }
+
+ private void shutterAndPauseCamera() {
+ new Thread(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ // Sleep some times to ensure picture has been correctly shut.
+ Thread.sleep(TIME_SLEEP_INTERVAL * 10); // 500ms
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ runOnUiThread(new Runnable() {
+ @SuppressLint("SetTextI18n")
+ public void run() {
+ // These codes will run in main thread.
+ svPreview.onPause();
+ cameraPageView.setVisibility(View.GONE);
+ resultPageView.setVisibility(View.VISIBLE);
+ if (shutterBitmap != null && !shutterBitmap.isRecycled()) {
+ detail(shutterBitmap);
+ } else {
+ new AlertDialog.Builder(MattingMainActivity.this)
+ .setTitle("Empty Result!")
+ .setMessage("Current picture is empty, please shutting it again!")
+ .setCancelable(true)
+ .show();
+ }
+ }
+ });
+ }
+ }).start();
+ }
+
+ private void copyBitmapFromCamera(Bitmap ARGB8888ImageBitmap) {
+ if (isShutterBitmapCopied || ARGB8888ImageBitmap == null) {
+ return;
+ }
+ if (!ARGB8888ImageBitmap.isRecycled()) {
+ synchronized (this) {
+ shutterBitmap = ARGB8888ImageBitmap.copy(Bitmap.Config.ARGB_8888, true);
+ }
+ SystemClock.sleep(TIME_SLEEP_INTERVAL);
+ isShutterBitmapCopied = true;
+ }
+ }
+
+ @Override
+ protected void onActivityResult(int requestCode, int resultCode, Intent data) {
+ super.onActivityResult(requestCode, resultCode, data);
+ if (requestCode == INTENT_CODE_PICK_IMAGE) {
+ if (resultCode == Activity.RESULT_OK) {
+ cameraPageView.setVisibility(View.GONE);
+ resultPageView.setVisibility(View.VISIBLE);
+ Uri uri = data.getData();
+ String path = getRealPathFromURI(this, uri);
+ Bitmap bitmap = decodeBitmap(path, 720, 1280);
+ picBitmap = bitmap.copy(Bitmap.Config.ARGB_8888, true);
+ SystemClock.sleep(TIME_SLEEP_INTERVAL * 10); // 500ms
+ detail(picBitmap);
+ }
+ }
+ }
+
+ private void toggleRealtimeStyle() {
+ if (isRealtimeStatusRunning) {
+ isRealtimeStatusRunning = false;
+ realtimeToggleButton.setImageResource(R.drawable.realtime_stop_btn);
+ svPreview.setOnTextureChangedListener(this);
+ tvStatus.setVisibility(View.VISIBLE);
+ } else {
+ isRealtimeStatusRunning = true;
+ realtimeToggleButton.setImageResource(R.drawable.realtime_start_btn);
+ tvStatus.setVisibility(View.GONE);
+ isShutterBitmapCopied = false;
+ // Camera is still working but detecting loop is on pause.
+ svPreview.setOnTextureChangedListener(new CameraSurfaceView.OnTextureChangedListener() {
+ @Override
+ public boolean onTextureChanged(Bitmap ARGB8888ImageBitmap) {
+ if (TYPE == BTN_SHUTTER) {
+ copyBitmapFromCamera(ARGB8888ImageBitmap);
+ }
+ return false;
+ }
+ });
+ }
+ }
+
+ @Override
+ public boolean onTextureChanged(Bitmap ARGB8888ImageBitmap) {
+ if (TYPE == BTN_SHUTTER) {
+ copyBitmapFromCamera(ARGB8888ImageBitmap);
+ return false;
+ }
+
+ boolean modified = false;
+ // TODO: 2022/12/20
+// SegmentationResult result = new SegmentationResult();
+// result.setCxxBufferFlag(true);
+//
+ //long tc = System.currentTimeMillis();
+// predictor.predict(ARGB8888ImageBitmap, result);
+ //timeElapsed += (System.currentTimeMillis() - tc);
+//
+// Visualize.visSegmentation(ARGB8888ImageBitmap, result);
+// modified = result.initialized();
+//
+// result.releaseCxxBuffer();
+
+// frameCounter++;
+// if (frameCounter >= 30) {
+// final int fps = (int) (1000 / (timeElapsed / 30));
+// runOnUiThread(new Runnable() {
+// @SuppressLint("SetTextI18n")
+// public void run() {
+// tvStatus.setText(Integer.toString(fps) + "fps");
+// }
+// });
+// frameCounter = 0;
+// timeElapsed = 0;
+// }
+ return modified;
+ }
+
+ @Override
+ protected void onResume() {
+ super.onResume();
+ // Reload settings and re-initialize the predictor
+ checkAndUpdateSettings();
+ // Open camera until the permissions have been granted
+ if (!checkAllPermissions()) {
+ svPreview.disableCamera();
+ } else {
+ svPreview.enableCamera();
+ }
+ svPreview.onResume();
+ }
+
+ @Override
+ protected void onPause() {
+ super.onPause();
+ svPreview.onPause();
+ }
+
+ @Override
+ protected void onDestroy() {
+ // TODO: 2022/12/20
+// if (predictor != null) {
+// predictor.release();
+// }
+ super.onDestroy();
+ }
+
+ public void initView() {
+ TYPE = REALTIME_DETECT;
+ CameraSurfaceView.EXPECTED_PREVIEW_WIDTH = 480;
+ CameraSurfaceView.EXPECTED_PREVIEW_HEIGHT = 480;
+ svPreview = (CameraSurfaceView) findViewById(R.id.sv_preview);
+ svPreview.setOnTextureChangedListener(this);
+ svPreview.switchCamera();
+
+ tvStatus = (TextView) findViewById(R.id.tv_status);
+ btnSwitch = (ImageButton) findViewById(R.id.btn_switch);
+ btnSwitch.setOnClickListener(this);
+ btnShutter = (ImageButton) findViewById(R.id.btn_shutter);
+ btnShutter.setOnClickListener(this);
+ btnSettings = (ImageButton) findViewById(R.id.btn_settings);
+ btnSettings.setOnClickListener(this);
+ realtimeToggleButton = findViewById(R.id.realtime_toggle_btn);
+ realtimeToggleButton.setOnClickListener(this);
+ backInPreview = findViewById(R.id.back_in_preview);
+ backInPreview.setOnClickListener(this);
+ albumSelectButton = findViewById(R.id.album_select);
+ albumSelectButton.setOnClickListener(this);
+ cameraPageView = findViewById(R.id.camera_page);
+ resultPageView = findViewById(R.id.result_page);
+ resultImage = findViewById(R.id.result_image);
+ backInResult = findViewById(R.id.back_in_result);
+ backInResult.setOnClickListener(this);
+ resultView = findViewById(R.id.result_list_view);
+ }
+
+ private void detail(Bitmap bitmap) {
+ // TODO: 2022/12/20
+ //predictor.predict(bitmap, true, 0.7f);
+ resultImage.setImageBitmap(bitmap);
+ }
+
+ @SuppressLint("ApplySharedPref")
+ public void initSettings() {
+ SharedPreferences sharedPreferences = PreferenceManager.getDefaultSharedPreferences(this);
+ SharedPreferences.Editor editor = sharedPreferences.edit();
+ editor.clear();
+ editor.commit();
+ MattingSettingsActivity.resetSettings();
+ }
+
+ public void checkAndUpdateSettings() {
+ if (MattingSettingsActivity.checkAndUpdateSettings(this)) {
+ String realModelDir = getCacheDir() + "/" + MattingSettingsActivity.modelDir;
+ Utils.copyDirectoryFromAssets(this, MattingSettingsActivity.modelDir, realModelDir);
+
+ String modelFile = realModelDir + "/" + "model.pdmodel";
+ String paramsFile = realModelDir + "/" + "model.pdiparams";
+ String configFile = realModelDir + "/" + "deploy.yaml";
+ RuntimeOption option = new RuntimeOption();
+ option.setCpuThreadNum(MattingSettingsActivity.cpuThreadNum);
+ option.setLitePowerMode(MattingSettingsActivity.cpuPowerMode);
+ if (Boolean.parseBoolean(MattingSettingsActivity.enableLiteFp16)) {
+ option.enableLiteFp16();
+ }
+ // TODO: 2022/12/20
+ //predictor.setIsVerticalScreen(true);
+ //predictor.init(modelFile, paramsFile, configFile, option);
+ }
+ }
+
+ @Override
+ public void onRequestPermissionsResult(int requestCode, @NonNull String[] permissions,
+ @NonNull int[] grantResults) {
+ super.onRequestPermissionsResult(requestCode, permissions, grantResults);
+ if (grantResults[0] != PackageManager.PERMISSION_GRANTED || grantResults[1] != PackageManager.PERMISSION_GRANTED) {
+ new AlertDialog.Builder(MattingMainActivity.this)
+ .setTitle("Permission denied")
+ .setMessage("Click to force quit the app, then open Settings->Apps & notifications->Target " +
+ "App->Permissions to grant all of the permissions.")
+ .setCancelable(false)
+ .setPositiveButton("Exit", new DialogInterface.OnClickListener() {
+ @Override
+ public void onClick(DialogInterface dialog, int which) {
+ MattingMainActivity.this.finish();
+ }
+ }).show();
+ }
+ }
+
+ private void requestAllPermissions() {
+ ActivityCompat.requestPermissions(this, new String[]{Manifest.permission.WRITE_EXTERNAL_STORAGE,
+ Manifest.permission.CAMERA}, 0);
+ }
+
+ private boolean checkAllPermissions() {
+ return ContextCompat.checkSelfPermission(this, Manifest.permission.WRITE_EXTERNAL_STORAGE) == PackageManager.PERMISSION_GRANTED
+ && ContextCompat.checkSelfPermission(this, Manifest.permission.CAMERA) == PackageManager.PERMISSION_GRANTED;
+ }
+
+}
\ No newline at end of file
diff --git a/java/android/app/src/main/java/com/baidu/paddle/fastdeploy/app/examples/matting/MattingSettingsActivity.java b/java/android/app/src/main/java/com/baidu/paddle/fastdeploy/app/examples/matting/MattingSettingsActivity.java
new file mode 100644
index 000000000..91be07da1
--- /dev/null
+++ b/java/android/app/src/main/java/com/baidu/paddle/fastdeploy/app/examples/matting/MattingSettingsActivity.java
@@ -0,0 +1,164 @@
+package com.baidu.paddle.fastdeploy.app.examples.matting;
+
+import android.annotation.SuppressLint;
+import android.content.Context;
+import android.content.SharedPreferences;
+import android.os.Bundle;
+import android.preference.EditTextPreference;
+import android.preference.ListPreference;
+import android.preference.PreferenceManager;
+import android.support.v7.app.ActionBar;
+
+import com.baidu.paddle.fastdeploy.app.examples.R;
+import com.baidu.paddle.fastdeploy.ui.Utils;
+import com.baidu.paddle.fastdeploy.ui.view.AppCompatPreferenceActivity;
+
+import java.util.ArrayList;
+import java.util.List;
+
+public class MattingSettingsActivity extends AppCompatPreferenceActivity implements
+ SharedPreferences.OnSharedPreferenceChangeListener {
+ private static final String TAG = MattingSettingsActivity.class.getSimpleName();
+
+ static public int selectedModelIdx = -1;
+ static public String modelDir = "";
+ static public int cpuThreadNum = 2;
+ static public String cpuPowerMode = "";
+ static public String enableLiteFp16 = "true";
+
+ ListPreference lpChoosePreInstalledModel = null;
+ EditTextPreference etModelDir = null;
+ ListPreference lpCPUThreadNum = null;
+ ListPreference lpCPUPowerMode = null;
+ ListPreference lpEnableLiteFp16 = null;
+
+ List preInstalledModelDirs = null;
+ List preInstalledCPUThreadNums = null;
+ List preInstalledCPUPowerModes = null;
+ List preInstalledEnableLiteFp16s = null;
+
+ @Override
+ public void onCreate(Bundle savedInstanceState) {
+ super.onCreate(savedInstanceState);
+ addPreferencesFromResource(R.xml.matting_settings);
+ ActionBar supportActionBar = getSupportActionBar();
+ if (supportActionBar != null) {
+ supportActionBar.setDisplayHomeAsUpEnabled(true);
+ }
+
+ // Initialize pre-installed models
+ preInstalledModelDirs = new ArrayList();
+ preInstalledCPUThreadNums = new ArrayList();
+ preInstalledCPUPowerModes = new ArrayList();
+ preInstalledEnableLiteFp16s = new ArrayList();
+ preInstalledModelDirs.add(getString(R.string.MATTING_MODEL_DIR_DEFAULT));
+ preInstalledCPUThreadNums.add(getString(R.string.CPU_THREAD_NUM_DEFAULT));
+ preInstalledCPUPowerModes.add(getString(R.string.CPU_POWER_MODE_DEFAULT));
+ preInstalledEnableLiteFp16s.add(getString(R.string.ENABLE_LITE_FP16_MODE_DEFAULT));
+
+ // Setup UI components
+ lpChoosePreInstalledModel =
+ (ListPreference) findPreference(getString(R.string.CHOOSE_PRE_INSTALLED_MODEL_KEY));
+ String[] preInstalledModelNames = new String[preInstalledModelDirs.size()];
+ for (int i = 0; i < preInstalledModelDirs.size(); i++) {
+ preInstalledModelNames[i] = preInstalledModelDirs.get(i).substring(preInstalledModelDirs.get(i).lastIndexOf("/") + 1);
+ }
+ lpChoosePreInstalledModel.setEntries(preInstalledModelNames);
+ lpChoosePreInstalledModel.setEntryValues(preInstalledModelDirs.toArray(new String[preInstalledModelDirs.size()]));
+ lpCPUThreadNum = (ListPreference) findPreference(getString(R.string.CPU_THREAD_NUM_KEY));
+ lpCPUPowerMode = (ListPreference) findPreference(getString(R.string.CPU_POWER_MODE_KEY));
+ etModelDir = (EditTextPreference) findPreference(getString(R.string.MODEL_DIR_KEY));
+ etModelDir.setTitle("Model dir (SDCard: " + Utils.getSDCardDirectory() + ")");
+ lpEnableLiteFp16 = (ListPreference) findPreference(getString(R.string.ENABLE_LITE_FP16_MODE_KEY));
+ }
+
+ @SuppressLint("ApplySharedPref")
+ private void reloadSettingsAndUpdateUI() {
+ SharedPreferences sharedPreferences = getPreferenceScreen().getSharedPreferences();
+
+ String selected_model_dir = sharedPreferences.getString(getString(R.string.CHOOSE_PRE_INSTALLED_MODEL_KEY),
+ getString(R.string.MATTING_MODEL_DIR_DEFAULT));
+ int selected_model_idx = lpChoosePreInstalledModel.findIndexOfValue(selected_model_dir);
+ if (selected_model_idx >= 0 && selected_model_idx < preInstalledModelDirs.size() && selected_model_idx != selectedModelIdx) {
+ SharedPreferences.Editor editor = sharedPreferences.edit();
+ editor.putString(getString(R.string.MODEL_DIR_KEY), preInstalledModelDirs.get(selected_model_idx));
+ editor.putString(getString(R.string.CPU_THREAD_NUM_KEY), preInstalledCPUThreadNums.get(selected_model_idx));
+ editor.putString(getString(R.string.CPU_POWER_MODE_KEY), preInstalledCPUPowerModes.get(selected_model_idx));
+ editor.putString(getString(R.string.ENABLE_LITE_FP16_MODE_DEFAULT), preInstalledEnableLiteFp16s.get(selected_model_idx));
+ editor.commit();
+ lpChoosePreInstalledModel.setSummary(selected_model_dir);
+ selectedModelIdx = selected_model_idx;
+ }
+
+ String model_dir = sharedPreferences.getString(getString(R.string.MODEL_DIR_KEY),
+ getString(R.string.MATTING_MODEL_DIR_DEFAULT));
+ String cpu_thread_num = sharedPreferences.getString(getString(R.string.CPU_THREAD_NUM_KEY),
+ getString(R.string.CPU_THREAD_NUM_DEFAULT));
+ String cpu_power_mode = sharedPreferences.getString(getString(R.string.CPU_POWER_MODE_KEY),
+ getString(R.string.CPU_POWER_MODE_DEFAULT));
+ String enable_lite_fp16 = sharedPreferences.getString(getString(R.string.ENABLE_LITE_FP16_MODE_KEY),
+ getString(R.string.ENABLE_LITE_FP16_MODE_DEFAULT));
+
+ etModelDir.setSummary(model_dir);
+ lpCPUThreadNum.setValue(cpu_thread_num);
+ lpCPUThreadNum.setSummary(cpu_thread_num);
+ lpCPUPowerMode.setValue(cpu_power_mode);
+ lpCPUPowerMode.setSummary(cpu_power_mode);
+ lpEnableLiteFp16.setValue(enable_lite_fp16);
+ lpEnableLiteFp16.setSummary(enable_lite_fp16);
+
+ }
+
+ static boolean checkAndUpdateSettings(Context ctx) {
+ boolean settingsChanged = false;
+ SharedPreferences sharedPreferences = PreferenceManager.getDefaultSharedPreferences(ctx);
+
+ String model_dir = sharedPreferences.getString(ctx.getString(R.string.MODEL_DIR_KEY),
+ ctx.getString(R.string.MATTING_MODEL_DIR_DEFAULT));
+ settingsChanged |= !modelDir.equalsIgnoreCase(model_dir);
+ modelDir = model_dir;
+
+ String cpu_thread_num = sharedPreferences.getString(ctx.getString(R.string.CPU_THREAD_NUM_KEY),
+ ctx.getString(R.string.CPU_THREAD_NUM_DEFAULT));
+ settingsChanged |= cpuThreadNum != Integer.parseInt(cpu_thread_num);
+ cpuThreadNum = Integer.parseInt(cpu_thread_num);
+
+ String cpu_power_mode = sharedPreferences.getString(ctx.getString(R.string.CPU_POWER_MODE_KEY),
+ ctx.getString(R.string.CPU_POWER_MODE_DEFAULT));
+ settingsChanged |= !cpuPowerMode.equalsIgnoreCase(cpu_power_mode);
+ cpuPowerMode = cpu_power_mode;
+
+ String enable_lite_fp16 = sharedPreferences.getString(ctx.getString(R.string.ENABLE_LITE_FP16_MODE_KEY),
+ ctx.getString(R.string.ENABLE_LITE_FP16_MODE_DEFAULT));
+ settingsChanged |= !enableLiteFp16.equalsIgnoreCase(enable_lite_fp16);
+ enableLiteFp16 = enable_lite_fp16;
+
+ return settingsChanged;
+ }
+
+ static void resetSettings() {
+ selectedModelIdx = -1;
+ modelDir = "";
+ cpuThreadNum = 2;
+ cpuPowerMode = "";
+ enableLiteFp16 = "true";
+ }
+
+ @Override
+ protected void onResume() {
+ super.onResume();
+ getPreferenceScreen().getSharedPreferences().registerOnSharedPreferenceChangeListener(this);
+ reloadSettingsAndUpdateUI();
+ }
+
+ @Override
+ protected void onPause() {
+ super.onPause();
+ getPreferenceScreen().getSharedPreferences().unregisterOnSharedPreferenceChangeListener(this);
+ }
+
+ @Override
+ public void onSharedPreferenceChanged(SharedPreferences sharedPreferences, String key) {
+ reloadSettingsAndUpdateUI();
+ }
+}
\ No newline at end of file
diff --git a/java/android/app/src/main/java/com/baidu/paddle/fastdeploy/app/examples/matting/MattingWelcomeActivity.java b/java/android/app/src/main/java/com/baidu/paddle/fastdeploy/app/examples/matting/MattingWelcomeActivity.java
new file mode 100644
index 000000000..c47ed9b3e
--- /dev/null
+++ b/java/android/app/src/main/java/com/baidu/paddle/fastdeploy/app/examples/matting/MattingWelcomeActivity.java
@@ -0,0 +1,30 @@
+package com.baidu.paddle.fastdeploy.app.examples.matting;
+
+import android.app.Activity;
+import android.content.Intent;
+import android.graphics.Color;
+import android.os.Build;
+import android.os.Bundle;
+import android.support.annotation.Nullable;
+import android.view.View;
+
+import com.baidu.paddle.fastdeploy.app.examples.R;
+
+public class MattingWelcomeActivity extends Activity {
+ @Override
+ protected void onCreate(@Nullable Bundle savedInstanceState) {
+ super.onCreate(savedInstanceState);
+ if (Build.VERSION.SDK_INT > Build.VERSION_CODES.LOLLIPOP) {
+ getWindow().getDecorView().setSystemUiVisibility(View.SYSTEM_UI_FLAG_LAYOUT_FULLSCREEN
+ | View.SYSTEM_UI_FLAG_LAYOUT_STABLE
+ );
+ getWindow().setStatusBarColor(Color.TRANSPARENT);
+ }
+ setContentView(R.layout.matting_welcome);
+ }
+
+ public void startActivity(View view) {
+ Intent intent = new Intent(MattingWelcomeActivity.this, MattingMainActivity.class);
+ startActivity(intent);
+ }
+}
\ No newline at end of file
diff --git a/java/android/app/src/main/res/layout/face_align_activity_main.xml b/java/android/app/src/main/res/layout/face_align_activity_main.xml
new file mode 100644
index 000000000..008899edb
--- /dev/null
+++ b/java/android/app/src/main/res/layout/face_align_activity_main.xml
@@ -0,0 +1,14 @@
+
+
+
+
+
+
+
diff --git a/java/android/app/src/main/res/layout/face_align_camera_page.xml b/java/android/app/src/main/res/layout/face_align_camera_page.xml
new file mode 100644
index 000000000..de2788179
--- /dev/null
+++ b/java/android/app/src/main/res/layout/face_align_camera_page.xml
@@ -0,0 +1,159 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/java/android/app/src/main/res/layout/face_align_result_page.xml b/java/android/app/src/main/res/layout/face_align_result_page.xml
new file mode 100644
index 000000000..f1e4cbd92
--- /dev/null
+++ b/java/android/app/src/main/res/layout/face_align_result_page.xml
@@ -0,0 +1,160 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/java/android/app/src/main/res/layout/face_align_welcome.xml b/java/android/app/src/main/res/layout/face_align_welcome.xml
new file mode 100644
index 000000000..d881b04ec
--- /dev/null
+++ b/java/android/app/src/main/res/layout/face_align_welcome.xml
@@ -0,0 +1,79 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/java/android/app/src/main/res/layout/matting_activity_main.xml b/java/android/app/src/main/res/layout/matting_activity_main.xml
new file mode 100644
index 000000000..f07f0faea
--- /dev/null
+++ b/java/android/app/src/main/res/layout/matting_activity_main.xml
@@ -0,0 +1,14 @@
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/java/android/app/src/main/res/layout/matting_camera_page.xml b/java/android/app/src/main/res/layout/matting_camera_page.xml
new file mode 100644
index 000000000..de2788179
--- /dev/null
+++ b/java/android/app/src/main/res/layout/matting_camera_page.xml
@@ -0,0 +1,159 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/java/android/app/src/main/res/layout/matting_result_page.xml b/java/android/app/src/main/res/layout/matting_result_page.xml
new file mode 100644
index 000000000..f1e4cbd92
--- /dev/null
+++ b/java/android/app/src/main/res/layout/matting_result_page.xml
@@ -0,0 +1,160 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/java/android/app/src/main/res/layout/matting_welcome.xml b/java/android/app/src/main/res/layout/matting_welcome.xml
new file mode 100644
index 000000000..cd85572fb
--- /dev/null
+++ b/java/android/app/src/main/res/layout/matting_welcome.xml
@@ -0,0 +1,76 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/java/android/app/src/main/res/values/strings.xml b/java/android/app/src/main/res/values/strings.xml
index 28d034e99..10eecd3c4 100644
--- a/java/android/app/src/main/res/values/strings.xml
+++ b/java/android/app/src/main/res/values/strings.xml
@@ -41,11 +41,15 @@
models/scrfd_500m_bnkps_shape320x320_pd
models/portrait_pp_humansegv2_lite_256x144_inference_model
+
+ models
models/PP_TinyPose_128x96_infer
models
models
models/uie-nano
+
+ models
拍照识别
实时识别
diff --git a/java/android/app/src/main/res/xml/face_align_settings.xml b/java/android/app/src/main/res/xml/face_align_settings.xml
new file mode 100644
index 000000000..c4e27605e
--- /dev/null
+++ b/java/android/app/src/main/res/xml/face_align_settings.xml
@@ -0,0 +1,37 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/java/android/app/src/main/res/xml/matting_settings.xml b/java/android/app/src/main/res/xml/matting_settings.xml
new file mode 100644
index 000000000..a4feba11d
--- /dev/null
+++ b/java/android/app/src/main/res/xml/matting_settings.xml
@@ -0,0 +1,37 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/python/__init__.py b/python/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/python/fastdeploy/__init__.py b/python/fastdeploy/__init__.py
index b767393f1..42db5c281 100644
--- a/python/fastdeploy/__init__.py
+++ b/python/fastdeploy/__init__.py
@@ -37,3 +37,4 @@ from . import vision
from . import pipeline
from . import text
from .download import download, download_and_decompress, download_model
+from . import serving
diff --git a/python/fastdeploy/runtime.py b/python/fastdeploy/runtime.py
index a593cc0df..23cf697bf 100755
--- a/python/fastdeploy/runtime.py
+++ b/python/fastdeploy/runtime.py
@@ -263,18 +263,18 @@ class RuntimeOption:
return
return self._option.use_gpu(device_id)
- def use_xpu(self,
- device_id=0,
- l3_workspace_size=16 * 1024 * 1024,
- locked=False,
- autotune=True,
- autotune_file="",
- precision="int16",
- adaptive_seqlen=False,
- enable_multi_stream=False):
- """Inference with XPU
+ def use_kunlunxin(self,
+ device_id=0,
+ l3_workspace_size=16 * 1024 * 1024,
+ locked=False,
+ autotune=True,
+ autotune_file="",
+ precision="int16",
+ adaptive_seqlen=False,
+ enable_multi_stream=False):
+ """Inference with KunlunXin XPU
- :param device_id: (int)The index of XPU will be used for inference, default 0
+ :param device_id: (int)The index of KunlunXin XPU will be used for inference, default 0
:param l3_workspace_size: (int)The size of the video memory allocated by the l3 cache, the maximum is 16M, default 16M
:param locked: (bool)Whether the allocated L3 cache can be locked. If false, it means that the L3 cache is not locked,
and the allocated L3 cache can be shared by multiple models, and multiple models
@@ -285,11 +285,11 @@ class RuntimeOption:
the algorithm specified in the file will be used and autotune will not be performed again.
:param precision: (str)Calculation accuracy of multi_encoder
:param adaptive_seqlen: (bool)adaptive_seqlen Is the input of multi_encoder variable length
- :param enable_multi_stream: (bool)Whether to enable the multi stream of xpu.
+ :param enable_multi_stream: (bool)Whether to enable the multi stream of KunlunXin XPU.
"""
- return self._option.use_xpu(device_id, l3_workspace_size, locked,
- autotune, autotune_file, precision,
- adaptive_seqlen, enable_multi_stream)
+ return self._option.use_kunlunxin(device_id, l3_workspace_size, locked,
+ autotune, autotune_file, precision,
+ adaptive_seqlen, enable_multi_stream)
def use_cpu(self):
"""Inference with CPU
diff --git a/python/fastdeploy/serving/__init__.py b/python/fastdeploy/serving/__init__.py
new file mode 100644
index 000000000..7d175762c
--- /dev/null
+++ b/python/fastdeploy/serving/__init__.py
@@ -0,0 +1,14 @@
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from __future__ import absolute_import
diff --git a/python/fastdeploy/serving/handler/__init__.py b/python/fastdeploy/serving/handler/__init__.py
new file mode 100644
index 000000000..a1e40793c
--- /dev/null
+++ b/python/fastdeploy/serving/handler/__init__.py
@@ -0,0 +1,16 @@
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from __future__ import absolute_import
+from .base_handler import BaseModelHandler
+from .vision_model_handler import VisionModelHandler
diff --git a/python/fastdeploy/serving/handler/base_handler.py b/python/fastdeploy/serving/handler/base_handler.py
new file mode 100644
index 000000000..ab6a34427
--- /dev/null
+++ b/python/fastdeploy/serving/handler/base_handler.py
@@ -0,0 +1,28 @@
+# coding:utf-8
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import abc
+from abc import ABCMeta, abstractmethod
+
+
+class BaseModelHandler(metaclass=ABCMeta):
+ def __init__(self):
+ super().__init__()
+
+ @classmethod
+ @abstractmethod
+ def process(cls, predictor, data, parameters):
+ pass
+
diff --git a/python/fastdeploy/serving/handler/vision_model_handler.py b/python/fastdeploy/serving/handler/vision_model_handler.py
new file mode 100644
index 000000000..dc14c0c3f
--- /dev/null
+++ b/python/fastdeploy/serving/handler/vision_model_handler.py
@@ -0,0 +1,30 @@
+# coding:utf-8
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from .base_handler import BaseModelHandler
+from ..utils import base64_to_cv2
+from ...vision.utils import fd_result_to_json
+
+
+class VisionModelHandler(BaseModelHandler):
+ def __init__(self):
+ super().__init__()
+
+ @classmethod
+ def process(cls, predictor, data, parameters):
+ # TODO: support batch predict
+ im = base64_to_cv2(data['image'])
+ result = predictor.predict(im)
+ r_str = fd_result_to_json(result)
+ return r_str
diff --git a/python/fastdeploy/serving/model_manager.py b/python/fastdeploy/serving/model_manager.py
new file mode 100644
index 000000000..ed252d133
--- /dev/null
+++ b/python/fastdeploy/serving/model_manager.py
@@ -0,0 +1,57 @@
+# coding:utf-8
+# copyright (c) 2022 paddlepaddle authors. all rights reserved.
+#
+# licensed under the apache license, version 2.0 (the "license"
+# you may not use this file except in compliance with the license.
+# you may obtain a copy of the license at
+#
+# http://www.apache.org/licenses/license-2.0
+#
+# unless required by applicable law or agreed to in writing, software
+# distributed under the license is distributed on an "as is" basis,
+# without warranties or conditions of any kind, either express or implied.
+# see the license for the specific language governing permissions and
+# limitations under the license.
+
+import os
+import time
+import json
+import logging
+import threading
+# from .predictor import Predictor
+from .handler import BaseModelHandler
+from .utils import lock_predictor
+
+
+class ModelManager:
+ def __init__(self, model_handler, predictor):
+ self._model_handler = model_handler
+ self._predictors = []
+ self._predictor_locks = []
+ self._register(predictor)
+
+ def _register(self, predictor):
+ # Get the model handler
+ if not issubclass(self._model_handler, BaseModelHandler):
+ raise TypeError(
+ "The model_handler must be subclass of BaseModelHandler, please check the type."
+ )
+
+ # TODO: Create multiple predictors to run on different GPUs or different CPU threads
+ self._predictors.append(predictor)
+ self._predictor_locks.append(threading.Lock())
+
+ def _get_predict_id(self):
+ t = time.time()
+ t = int(round(t * 1000))
+ predictor_id = t % len(self._predictors)
+ logging.info("The predictor id: {} is selected by running the model.".
+ format(predictor_id))
+ return predictor_id
+
+ def predict(self, data, parameters):
+ predictor_id = self._get_predict_id()
+ with lock_predictor(self._predictor_locks[predictor_id]):
+ model_output = self._model_handler.process(
+ self._predictors[predictor_id], data, parameters)
+ return model_output
diff --git a/python/fastdeploy/serving/router/__init__.py b/python/fastdeploy/serving/router/__init__.py
new file mode 100644
index 000000000..c3ee45631
--- /dev/null
+++ b/python/fastdeploy/serving/router/__init__.py
@@ -0,0 +1,16 @@
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from __future__ import absolute_import
+from .base_router import BaseRouterManager
+from .http_router import HttpRouterManager
diff --git a/python/fastdeploy/serving/router/base_router.py b/python/fastdeploy/serving/router/base_router.py
new file mode 100644
index 000000000..986d31b5f
--- /dev/null
+++ b/python/fastdeploy/serving/router/base_router.py
@@ -0,0 +1,28 @@
+# coding:utf-8
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import abc
+
+
+class BaseRouterManager(abc.ABC):
+ _app = None
+
+ def __init__(self, app):
+ super().__init__()
+ self._app = app
+
+ @abc.abstractmethod
+ def register_models_router(self):
+ return NotImplemented
diff --git a/python/fastdeploy/serving/router/http_router.py b/python/fastdeploy/serving/router/http_router.py
new file mode 100644
index 000000000..b35640f89
--- /dev/null
+++ b/python/fastdeploy/serving/router/http_router.py
@@ -0,0 +1,80 @@
+# coding:utf-8
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import hashlib
+import typing
+import logging
+from typing import Optional
+
+from fastapi import APIRouter, Request, HTTPException
+from pydantic import BaseModel, Extra, create_model
+
+from .base_router import BaseRouterManager
+
+
+class ResponseBase(BaseModel):
+ text: Optional[str] = None
+
+
+class RequestBase(BaseModel, extra=Extra.forbid):
+ parameters: Optional[dict] = {}
+
+
+class HttpRouterManager(BaseRouterManager):
+ def register_models_router(self, task_name):
+
+ # Url path to register the model
+ paths = [f"/{task_name}"]
+ for path in paths:
+ logging.info("FastDeploy Model request [path]={} is genereated.".
+ format(path))
+
+ # Unique name to create the pydantic model
+ unique_name = hashlib.md5(task_name.encode()).hexdigest()
+
+ # Create request model
+ req_model = create_model(
+ "RequestModel" + unique_name,
+ data=(typing.Any, ...),
+ __base__=RequestBase, )
+
+ # Create response model
+ resp_model = create_model(
+ "ResponseModel" + unique_name,
+ result=(typing.Any, ...),
+ __base__=ResponseBase, )
+
+ # Template predict endpoint function to dynamically serve different models
+ def predict(request: Request, inference_request: req_model):
+ try:
+ result = self._app._model_manager.predict(
+ inference_request.data, inference_request.parameters)
+ except Exception as e:
+ raise HTTPException(
+ status_code=400,
+ detail=f"Error occurred while running predict: {str(e)}")
+ return {"result": result}
+
+ # Register the route and add to the app
+ router = APIRouter()
+ for path in paths:
+ router.add_api_route(
+ path,
+ predict,
+ methods=["post"],
+ summary=f"{task_name.title()}",
+ response_model=resp_model,
+ response_model_exclude_unset=True,
+ response_model_exclude_none=True, )
+ self._app.include_router(router)
diff --git a/python/fastdeploy/serving/server.py b/python/fastdeploy/serving/server.py
new file mode 100644
index 000000000..9f43d8592
--- /dev/null
+++ b/python/fastdeploy/serving/server.py
@@ -0,0 +1,46 @@
+# coding:utf-8
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from fastapi import FastAPI
+from .router import HttpRouterManager
+from .model_manager import ModelManager
+
+
+class SimpleServer(FastAPI):
+ def __init__(self, **kwargs):
+ """
+ Initial function for the FastDeploy SimpleServer.
+ """
+ super().__init__(**kwargs)
+ self._router_manager = HttpRouterManager(self)
+ self._model_manager = None
+ self._service_name = "FastDeploy SimpleServer"
+ self._service_type = None
+
+ def register(self, task_name, model_handler, predictor):
+ """
+ The register function for the SimpleServer, the main register argrument as follows:
+
+ Args:
+ task_name(str): API URL path.
+ model_handler: To process request data, run predictor,
+ and can also add your custom post processing on top of the predictor result
+ predictor: To run model predict
+ """
+ self._server_type = "models"
+ model_manager = ModelManager(model_handler, predictor)
+ self._model_manager = model_manager
+ # Register model server router
+ self._router_manager.register_models_router(task_name)
diff --git a/python/fastdeploy/serving/utils.py b/python/fastdeploy/serving/utils.py
new file mode 100644
index 000000000..405ad5a20
--- /dev/null
+++ b/python/fastdeploy/serving/utils.py
@@ -0,0 +1,40 @@
+# coding:utf-8
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import contextlib
+import base64
+import numpy as np
+import cv2
+
+
+@contextlib.contextmanager
+def lock_predictor(lock):
+ lock.acquire()
+ try:
+ yield
+ finally:
+ lock.release()
+
+
+def cv2_to_base64(image):
+ data = cv2.imencode('.jpg', image)[1]
+ return base64.b64encode(data.tobytes()).decode('utf8')
+
+
+def base64_to_cv2(b64str):
+ data = base64.b64decode(b64str.encode('utf8'))
+ data = np.fromstring(data, np.uint8)
+ data = cv2.imdecode(data, cv2.IMREAD_COLOR)
+ return data
diff --git a/python/fastdeploy/vision/detection/__init__.py b/python/fastdeploy/vision/detection/__init__.py
index afd1cd8ce..70d00bcdb 100755
--- a/python/fastdeploy/vision/detection/__init__.py
+++ b/python/fastdeploy/vision/detection/__init__.py
@@ -19,6 +19,7 @@ from .contrib.scaled_yolov4 import ScaledYOLOv4
from .contrib.nanodet_plus import NanoDetPlus
from .contrib.yolox import YOLOX
from .contrib.yolov5 import *
+from .contrib.fastestdet import *
from .contrib.yolov5lite import YOLOv5Lite
from .contrib.yolov6 import YOLOv6
from .contrib.yolov7end2end_trt import YOLOv7End2EndTRT
diff --git a/python/fastdeploy/vision/detection/contrib/fastestdet.py b/python/fastdeploy/vision/detection/contrib/fastestdet.py
new file mode 100644
index 000000000..2f11ed43d
--- /dev/null
+++ b/python/fastdeploy/vision/detection/contrib/fastestdet.py
@@ -0,0 +1,149 @@
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+import logging
+from .... import FastDeployModel, ModelFormat
+from .... import c_lib_wrap as C
+
+
+class FastestDetPreprocessor:
+ def __init__(self):
+ """Create a preprocessor for FastestDet
+ """
+ self._preprocessor = C.vision.detection.FastestDetPreprocessor()
+
+ def run(self, input_ims):
+ """Preprocess input images for FastestDet
+
+ :param: input_ims: (list of numpy.ndarray)The input image
+ :return: list of FDTensor
+ """
+ return self._preprocessor.run(input_ims)
+
+ @property
+ def size(self):
+ """
+ Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [352, 352]
+ """
+ return self._preprocessor.size
+
+ @size.setter
+ def size(self, wh):
+ assert isinstance(wh, (list, tuple)),\
+ "The value to set `size` must be type of tuple or list."
+ assert len(wh) == 2,\
+ "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+ len(wh))
+ self._preprocessor.size = wh
+
+
+class FastestDetPostprocessor:
+ def __init__(self):
+ """Create a postprocessor for FastestDet
+ """
+ self._postprocessor = C.vision.detection.FastestDetPostprocessor()
+
+ def run(self, runtime_results, ims_info):
+ """Postprocess the runtime results for FastestDet
+
+ :param: runtime_results: (list of FDTensor)The output FDTensor results from runtime
+ :param: ims_info: (list of dict)Record input_shape and output_shape
+ :return: list of DetectionResult(If the runtime_results is predict by batched samples, the length of this list equals to the batch size)
+ """
+ return self._postprocessor.run(runtime_results, ims_info)
+
+ @property
+ def conf_threshold(self):
+ """
+ confidence threshold for postprocessing, default is 0.65
+ """
+ return self._postprocessor.conf_threshold
+
+ @property
+ def nms_threshold(self):
+ """
+ nms threshold for postprocessing, default is 0.45
+ """
+ return self._postprocessor.nms_threshold
+
+ @conf_threshold.setter
+ def conf_threshold(self, conf_threshold):
+ assert isinstance(conf_threshold, float),\
+ "The value to set `conf_threshold` must be type of float."
+ self._postprocessor.conf_threshold = conf_threshold
+
+ @nms_threshold.setter
+ def nms_threshold(self, nms_threshold):
+ assert isinstance(nms_threshold, float),\
+ "The value to set `nms_threshold` must be type of float."
+ self._postprocessor.nms_threshold = nms_threshold
+
+
+class FastestDet(FastDeployModel):
+ def __init__(self,
+ model_file,
+ params_file="",
+ runtime_option=None,
+ model_format=ModelFormat.ONNX):
+ """Load a FastestDet model exported by FastestDet.
+
+ :param model_file: (str)Path of model file, e.g ./FastestDet.onnx
+ :param params_file: (str)Path of parameters file, e.g yolox/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
+ :param runtime_option: (fastdeploy.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
+ :param model_format: (fastdeploy.ModelForamt)Model format of the loaded model
+ """
+
+ super(FastestDet, self).__init__(runtime_option)
+
+ assert model_format == ModelFormat.ONNX, "FastestDet only support model format of ModelFormat.ONNX now."
+ self._model = C.vision.detection.FastestDet(
+ model_file, params_file, self._runtime_option, model_format)
+
+ assert self.initialized, "FastestDet initialize failed."
+
+ def predict(self, input_image):
+ """Detect an input image
+
+ :param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
+ :return: DetectionResult
+ """
+ assert input_image is not None, "Input image is None."
+ return self._model.predict(input_image)
+
+ def batch_predict(self, images):
+ assert len(images) == 1,"FastestDet is only support 1 image in batch_predict"
+ """Classify a batch of input image
+
+ :param im: (list of numpy.ndarray) The input image list, each element is a 3-D array with layout HWC, BGR format
+ :return list of DetectionResult
+ """
+
+ return self._model.batch_predict(images)
+
+ @property
+ def preprocessor(self):
+ """Get FastestDetPreprocessor object of the loaded model
+
+ :return FastestDetPreprocessor
+ """
+ return self._model.preprocessor
+
+ @property
+ def postprocessor(self):
+ """Get FastestDetPostprocessor object of the loaded model
+
+ :return FastestDetPostprocessor
+ """
+ return self._model.postprocessor
diff --git a/python/fastdeploy/vision/faceid/__init__.py b/python/fastdeploy/vision/faceid/__init__.py
index cc95414f4..90958f0c8 100644
--- a/python/fastdeploy/vision/faceid/__init__.py
+++ b/python/fastdeploy/vision/faceid/__init__.py
@@ -13,9 +13,4 @@
# limitations under the License.
from __future__ import absolute_import
-from .contrib.adaface import AdaFace
-from .contrib.arcface import ArcFace
-from .contrib.cosface import CosFace
-from .contrib.insightface_rec import InsightFaceRecognitionModel
-from .contrib.partial_fc import PartialFC
-from .contrib.vpl import VPL
+from .contrib import *
diff --git a/python/fastdeploy/vision/faceid/contrib/__init__.py b/python/fastdeploy/vision/faceid/contrib/__init__.py
index 8034e10bf..a23ab41f7 100644
--- a/python/fastdeploy/vision/faceid/contrib/__init__.py
+++ b/python/fastdeploy/vision/faceid/contrib/__init__.py
@@ -13,3 +13,5 @@
# limitations under the License.
from __future__ import absolute_import
+from .insightface import *
+from .adaface import *
\ No newline at end of file
diff --git a/python/fastdeploy/vision/faceid/contrib/adaface.py b/python/fastdeploy/vision/faceid/contrib/adaface.py
deleted file mode 100644
index 140cdb504..000000000
--- a/python/fastdeploy/vision/faceid/contrib/adaface.py
+++ /dev/null
@@ -1,126 +0,0 @@
-# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import absolute_import
-from .... import FastDeployModel, ModelFormat
-from .... import c_lib_wrap as C
-
-
-class AdaFace(FastDeployModel):
- def __init__(self,
- model_file,
- params_file="",
- runtime_option=None,
- model_format=ModelFormat.PADDLE):
- """Load a AdaFace model exported by InsigtFace.
-
- :param model_file: (str)Path of model file, e.g ./adaface.onnx
- :param params_file: (str)Path of parameters file, e.g yolox/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
- :param runtime_option: (fastdeploy.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
- :param model_format: (fastdeploy.ModelForamt)Model format of the loaded model
- """
- # 调用基函数进行backend_option的初始化
- # 初始化后的option保存在self._runtime_option
- super(AdaFace, self).__init__(runtime_option)
-
- self._model = C.vision.faceid.AdaFace(
- model_file, params_file, self._runtime_option, model_format)
- # 通过self.initialized判断整个模型的初始化是否成功
- assert self.initialized, "AdaFace initialize failed."
-
- def predict(self, input_image):
- """ Predict the face recognition result for an input image
-
- :param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
- :return: FaceRecognitionResult
- """
- return self._model.predict(input_image)
-
- # 一些跟模型有关的属性封装
- # 多数是预处理相关,可通过修改如model.size = [112, 112]改变预处理时resize的大小(前提是模型支持)
- @property
- def size(self):
- """
- Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default (112, 112)
- """
- return self._model.size
-
- @property
- def alpha(self):
- """
- Argument for image preprocessing step, alpha value for normalization, default alpha = [1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f]
- """
- return self._model.alpha
-
- @property
- def beta(self):
- """
- Argument for image preprocessing step, beta values for normalization, default beta = {-1.f, -1.f, -1.f}
-
- """
- return self._model.beta
-
- @property
- def swap_rb(self):
- """
- Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default True.
- """
- return self._model.swap_rb
-
- @property
- def l2_normalize(self):
- """
- Argument for image preprocessing step, whether to apply l2 normalize to embedding values, default False;
- """
- return self._model.l2_normalize
-
- @size.setter
- def size(self, wh):
- assert isinstance(wh, (list, tuple)), \
- "The value to set `size` must be type of tuple or list."
- assert len(wh) == 2, \
- "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
- len(wh))
- self._model.size = wh
-
- @alpha.setter
- def alpha(self, value):
- assert isinstance(value, (list, tuple)), \
- "The value to set `alpha` must be type of tuple or list."
- assert len(value) == 3, \
- "The value to set `alpha` must contatins 3 elements for each channels, but now it contains {} elements.".format(
- len(value))
- self._model.alpha = value
-
- @beta.setter
- def beta(self, value):
- assert isinstance(value, (list, tuple)), \
- "The value to set `beta` must be type of tuple or list."
- assert len(value) == 3, \
- "The value to set `beta` must contatins 3 elements for each channels, but now it contains {} elements.".format(
- len(value))
- self._model.beta = value
-
- @swap_rb.setter
- def swap_rb(self, value):
- assert isinstance(
- value, bool), "The value to set `swap_rb` must be type of bool."
- self._model.swap_rb = value
-
- @l2_normalize.setter
- def l2_normalize(self, value):
- assert isinstance(
- value,
- bool), "The value to set `l2_normalize` must be type of bool."
- self._model.l2_normalize = value
diff --git a/python/fastdeploy/vision/faceid/contrib/adaface/__init__.py b/python/fastdeploy/vision/faceid/contrib/adaface/__init__.py
new file mode 100644
index 000000000..0c4a5f973
--- /dev/null
+++ b/python/fastdeploy/vision/faceid/contrib/adaface/__init__.py
@@ -0,0 +1,109 @@
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from ..... import FastDeployModel, ModelFormat
+from ..... import c_lib_wrap as C
+
+
+class AdaFacePreprocessor:
+ def __init__(self):
+ """Create a preprocessor for AdaFace Model
+ """
+ self._preprocessor = C.vision.faceid.AdaFacePreprocessor()
+
+ def run(self, input_ims):
+ """Preprocess input images for AdaFace Model
+
+ :param: input_ims: (list of numpy.ndarray)The input image
+ :return: list of FDTensor, include image, scale_factor, im_shape
+ """
+ return self._preprocessor.run(input_ims)
+
+
+class AdaFacePostprocessor:
+ def __init__(self):
+ """Create a postprocessor for AdaFace Model
+
+ """
+ self._postprocessor = C.vision.faceid.AdaFacePostprocessor()
+
+ def run(self, runtime_results):
+ """Postprocess the runtime results for PaddleClas Model
+
+ :param: runtime_results: (list of FDTensor)The output FDTensor results from runtime
+ :return: list of FaceRecognitionResult(If the runtime_results is predict by batched samples, the length of this list equals to the batch size)
+ """
+ return self._postprocessor.run(runtime_results)
+
+ @property
+ def l2_normalize(self):
+ """
+ confidence threshold for postprocessing, default is 0.5
+ """
+ return self._postprocessor.l2_normalize
+
+
+class AdaFace(FastDeployModel):
+ def __init__(self,
+ model_file,
+ params_file="",
+ runtime_option=None,
+ model_format=ModelFormat.ONNX):
+ """Load a AdaFace model exported by PaddleClas.
+
+ :param model_file: (str)Path of model file, e.g adaface/model.pdmodel
+ :param params_file: (str)Path of parameters file, e.g adaface/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
+ :param runtime_option: (fastdeploy.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
+ :param model_format: (fastdeploy.ModelForamt)Model format of the loaded model
+ """
+ super(AdaFace, self).__init__(runtime_option)
+ self._model = C.vision.faceid.AdaFace(
+ model_file, params_file, self._runtime_option, model_format)
+ assert self.initialized, "AdaFace model initialize failed."
+
+ def predict(self, im):
+ """Detect an input image
+
+ :param im: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
+ :return: DetectionResult
+ """
+
+ assert im is not None, "The input image data is None."
+ return self._model.predict(im)
+
+ def batch_predict(self, images):
+ """Detect a batch of input image list
+
+ :param im: (list of numpy.ndarray) The input image list, each element is a 3-D array with layout HWC, BGR format
+ :return list of DetectionResult
+ """
+
+ return self._model.batch_predict(images)
+
+ @property
+ def preprocessor(self):
+ """Get AdaFacePreprocessor object of the loaded model
+
+ :return AdaFacePreprocessor
+ """
+ return self._model.preprocessor
+
+ @property
+ def postprocessor(self):
+ """Get AdaFacePostprocessor object of the loaded model
+
+ :return AdaFacePostprocessor
+ """
+ return self._model.postprocessor
diff --git a/python/fastdeploy/vision/faceid/contrib/arcface.py b/python/fastdeploy/vision/faceid/contrib/arcface.py
deleted file mode 100644
index f4341b170..000000000
--- a/python/fastdeploy/vision/faceid/contrib/arcface.py
+++ /dev/null
@@ -1,127 +0,0 @@
-# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import absolute_import
-import logging
-from .... import FastDeployModel, ModelFormat
-from .... import c_lib_wrap as C
-from ..contrib.insightface_rec import InsightFaceRecognitionModel
-
-
-class ArcFace(FastDeployModel):
- def __init__(self,
- model_file,
- params_file="",
- runtime_option=None,
- model_format=ModelFormat.ONNX):
- """Load a ArcFace model exported by InsigtFace.
-
- :param model_file: (str)Path of model file, e.g ./arcface.onnx
- :param params_file: (str)Path of parameters file, e.g yolox/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
- :param runtime_option: (fastdeploy.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
- :param model_format: (fastdeploy.ModelForamt)Model format of the loaded model
- """
- # 调用基函数进行backend_option的初始化
- # 初始化后的option保存在self._runtime_option
- super(ArcFace, self).__init__(runtime_option)
-
- self._model = C.vision.faceid.ArcFace(
- model_file, params_file, self._runtime_option, model_format)
- # 通过self.initialized判断整个模型的初始化是否成功
- assert self.initialized, "ArcFace initialize failed."
-
- def predict(self, input_image):
- """ Predict the face recognition result for an input image
-
- :param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
- :return: FaceRecognitionResult
- """
- return self._model.predict(input_image)
-
- # 一些跟模型有关的属性封装
- # 多数是预处理相关,可通过修改如model.size = [112, 112]改变预处理时resize的大小(前提是模型支持)
- @property
- def size(self):
- """
- Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default (112, 112)
- """
- return self._model.size
-
- @property
- def alpha(self):
- """
- Argument for image preprocessing step, alpha value for normalization, default alpha = [1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f]
- """
- return self._model.alpha
-
- @property
- def beta(self):
- """
- Argument for image preprocessing step, beta values for normalization, default beta = {-1.f, -1.f, -1.f}
- """
- return self._model.beta
-
- @property
- def swap_rb(self):
- """
- Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default True.
- """
- return self._model.swap_rb
-
- @property
- def l2_normalize(self):
- """
- Argument for image preprocessing step, whether to apply l2 normalize to embedding values, default False;
- """
- return self._model.l2_normalize
-
- @size.setter
- def size(self, wh):
- assert isinstance(wh, (list, tuple)),\
- "The value to set `size` must be type of tuple or list."
- assert len(wh) == 2,\
- "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
- len(wh))
- self._model.size = wh
-
- @alpha.setter
- def alpha(self, value):
- assert isinstance(value, (list, tuple)),\
- "The value to set `alpha` must be type of tuple or list."
- assert len(value) == 3,\
- "The value to set `alpha` must contatins 3 elements for each channels, but now it contains {} elements.".format(
- len(value))
- self._model.alpha = value
-
- @beta.setter
- def beta(self, value):
- assert isinstance(value, (list, tuple)),\
- "The value to set `beta` must be type of tuple or list."
- assert len(value) == 3,\
- "The value to set `beta` must contatins 3 elements for each channels, but now it contains {} elements.".format(
- len(value))
- self._model.beta = value
-
- @swap_rb.setter
- def swap_rb(self, value):
- assert isinstance(
- value, bool), "The value to set `swap_rb` must be type of bool."
- self._model.swap_rb = value
-
- @l2_normalize.setter
- def l2_normalize(self, value):
- assert isinstance(
- value,
- bool), "The value to set `l2_normalize` must be type of bool."
- self._model.l2_normalize = value
diff --git a/python/fastdeploy/vision/faceid/contrib/cosface.py b/python/fastdeploy/vision/faceid/contrib/cosface.py
deleted file mode 100644
index 61d1f2cb9..000000000
--- a/python/fastdeploy/vision/faceid/contrib/cosface.py
+++ /dev/null
@@ -1,126 +0,0 @@
-# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import absolute_import
-import logging
-from .... import FastDeployModel, ModelFormat
-from .... import c_lib_wrap as C
-
-
-class CosFace(FastDeployModel):
- def __init__(self,
- model_file,
- params_file="",
- runtime_option=None,
- model_format=ModelFormat.ONNX):
- """Load a CosFace model exported by InsigtFace.
-
- :param model_file: (str)Path of model file, e.g ./cosface.onnx
- :param params_file: (str)Path of parameters file, e.g yolox/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
- :param runtime_option: (fastdeploy.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
- :param model_format: (fastdeploy.ModelForamt)Model format of the loaded model
- """
- # 调用基函数进行backend_option的初始化
- # 初始化后的option保存在self._runtime_option
- super(CosFace, self).__init__(runtime_option)
-
- self._model = C.vision.faceid.CosFace(
- model_file, params_file, self._runtime_option, model_format)
- # 通过self.initialized判断整个模型的初始化是否成功
- assert self.initialized, "CosFace initialize failed."
-
- def predict(self, input_image):
- """ Predict the face recognition result for an input image
-
- :param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
- :return: FaceRecognitionResult
- """
- return self._model.predict(input_image)
-
- # 一些跟模型有关的属性封装
- # 多数是预处理相关,可通过修改如model.size = [112, 112]改变预处理时resize的大小(前提是模型支持)
- @property
- def size(self):
- """
- Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default (112, 112)
- """
- return self._model.size
-
- @property
- def alpha(self):
- """
- Argument for image preprocessing step, alpha value for normalization, default alpha = [1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f]
- """
- return self._model.alpha
-
- @property
- def beta(self):
- """
- Argument for image preprocessing step, beta values for normalization, default beta = {-1.f, -1.f, -1.f}
- """
- return self._model.beta
-
- @property
- def swap_rb(self):
- """
- Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default True.
- """
- return self._model.swap_rb
-
- @property
- def l2_normalize(self):
- """
- Argument for image preprocessing step, whether to apply l2 normalize to embedding values, default False;
- """
- return self._model.l2_normalize
-
- @size.setter
- def size(self, wh):
- assert isinstance(wh, (list, tuple)),\
- "The value to set `size` must be type of tuple or list."
- assert len(wh) == 2,\
- "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
- len(wh))
- self._model.size = wh
-
- @alpha.setter
- def alpha(self, value):
- assert isinstance(value, (list, tuple)),\
- "The value to set `alpha` must be type of tuple or list."
- assert len(value) == 3,\
- "The value to set `alpha` must contatins 3 elements for each channels, but now it contains {} elements.".format(
- len(value))
- self._model.alpha = value
-
- @beta.setter
- def beta(self, value):
- assert isinstance(value, (list, tuple)),\
- "The value to set `beta` must be type of tuple or list."
- assert len(value) == 3,\
- "The value to set `beta` must contatins 3 elements for each channels, but now it contains {} elements.".format(
- len(value))
- self._model.beta = value
-
- @swap_rb.setter
- def swap_rb(self, value):
- assert isinstance(
- value, bool), "The value to set `swap_rb` must be type of bool."
- self._model.swap_rb = value
-
- @l2_normalize.setter
- def l2_normalize(self, value):
- assert isinstance(
- value,
- bool), "The value to set `l2_normalize` must be type of bool."
- self._model.l2_normalize = value
diff --git a/python/fastdeploy/vision/faceid/contrib/insightface/__init__.py b/python/fastdeploy/vision/faceid/contrib/insightface/__init__.py
new file mode 100644
index 000000000..3353c8e44
--- /dev/null
+++ b/python/fastdeploy/vision/faceid/contrib/insightface/__init__.py
@@ -0,0 +1,222 @@
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from ..... import FastDeployModel, ModelFormat
+from ..... import c_lib_wrap as C
+
+
+class InsightFaceRecognitionPreprocessor:
+ def __init__(self):
+ """Create a preprocessor for InsightFaceRecognition Model
+ """
+ self._preprocessor = C.vision.faceid.InsightFaceRecognitionPreprocessor(
+ )
+
+ def run(self, input_ims):
+ """Preprocess input images for InsightFaceRecognition Model
+
+ :param: input_ims: (list of numpy.ndarray)The input image
+ :return: list of FDTensor, include image, scale_factor, im_shape
+ """
+ return self._preprocessor.run(input_ims)
+
+ @property
+ def size(self):
+ """
+ Argument for image preprocessing step, tuple of (width, height),
+ decide the target size after resize, default (112, 112)
+ """
+ return self._preprocessor.size
+
+ @property
+ def alpha(self):
+ """
+ Argument for image preprocessing step, alpha values for normalization,
+ default alpha = {1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f};
+ """
+ return self._preprocessor.alpha
+
+ @property
+ def beta(self):
+ """
+ Argument for image preprocessing step, beta values for normalization,
+ default beta = {-1.f, -1.f, -1.f}
+ """
+ return self._preprocessor.beta
+
+ @property
+ def permute(self):
+ """
+ Argument for image preprocessing step, whether to swap the B and R channel,
+ such as BGR->RGB, default true.
+ """
+ return self._preprocessor.permute
+
+
+class InsightFaceRecognitionPostprocessor:
+ def __init__(self):
+ """Create a postprocessor for InsightFaceRecognition Model
+ """
+ self._postprocessor = C.vision.faceid.InsightFaceRecognitionPostprocessor(
+ )
+
+ def run(self, runtime_results):
+ """Postprocess the runtime results for PaddleClas Model
+
+ :param: runtime_results: (list of FDTensor)The output FDTensor results from runtime
+ :return: list of FaceRecognitionResult(If the runtime_results is predict by batched samples, the length of this list equals to the batch size)
+ """
+ return self._postprocessor.run(runtime_results)
+
+ @property
+ def l2_normalize(self):
+ """
+ confidence threshold for postprocessing, default is 0.5
+ """
+ return self._postprocessor.l2_normalize
+
+
+class InsightFaceRecognitionBase(FastDeployModel):
+ def __init__(self,
+ model_file,
+ params_file="",
+ runtime_option=None,
+ model_format=ModelFormat.ONNX):
+ """Load a InsightFaceRecognitionBase model exported by PaddleClas.
+
+ :param model_file: (str)Path of model file, e.g InsightFaceRecognitionBase/model.pdmodel
+ :param params_file: (str)Path of parameters file, e.g InsightFaceRecognitionBase/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
+ :param runtime_option: (fastdeploy.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
+ :param model_format: (fastdeploy.ModelForamt)Model format of the loaded model
+ """
+ super(InsightFaceRecognitionBase, self).__init__(runtime_option)
+ self._model = C.vision.faceid.InsightFaceRecognitionBase(
+ model_file, params_file, self._runtime_option, model_format)
+ assert self.initialized, "InsightFaceRecognitionBase model initialize failed."
+
+ def predict(self, im):
+ """Detect an input image
+
+ :param im: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
+ :return: DetectionResult
+ """
+
+ assert im is not None, "The input image data is None."
+ return self._model.predict(im)
+
+ def batch_predict(self, images):
+ """Detect a batch of input image list
+
+ :param im: (list of numpy.ndarray) The input image list, each element is a 3-D array with layout HWC, BGR format
+ :return list of DetectionResult
+ """
+
+ return self._model.batch_predict(images)
+
+ @property
+ def preprocessor(self):
+ """Get InsightFaceRecognitionPreprocessor object of the loaded model
+
+ :return InsightFaceRecognitionPreprocessor
+ """
+ return self._model.preprocessor
+
+ @property
+ def postprocessor(self):
+ """Get InsightFaceRecognitionPostprocessor object of the loaded model
+
+ :return InsightFaceRecognitionPostprocessor
+ """
+ return self._model.postprocessor
+
+
+class ArcFace(InsightFaceRecognitionBase):
+ def __init__(self,
+ model_file,
+ params_file="",
+ runtime_option=None,
+ model_format=ModelFormat.ONNX):
+ """Load a ArcFace model exported by PaddleClas.
+ :param model_file: (str)Path of model file, e.g ArcFace/model.pdmodel
+ :param params_file: (str)Path of parameters file, e.g ArcFace/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
+ :param runtime_option: (fastdeploy.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
+ :param model_format: (fastdeploy.ModelForamt)Model format of the loaded model
+ """
+
+ super(InsightFaceRecognitionBase, self).__init__(runtime_option)
+
+ self._model = C.vision.faceid.ArcFace(
+ model_file, params_file, self._runtime_option, model_format)
+ assert self.initialized, "ArcFace model initialize failed."
+
+
+class CosFace(InsightFaceRecognitionBase):
+ def __init__(self,
+ model_file,
+ params_file="",
+ runtime_option=None,
+ model_format=ModelFormat.ONNX):
+ """Load a CosFace model exported by PaddleClas.
+ :param model_file: (str)Path of model file, e.g CosFace/model.pdmodel
+ :param params_file: (str)Path of parameters file, e.g CosFace/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
+ :param runtime_option: (fastdeploy.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
+ :param model_format: (fastdeploy.ModelForamt)Model format of the loaded model
+ """
+
+ super(InsightFaceRecognitionBase, self).__init__(runtime_option)
+
+ self._model = C.vision.faceid.CosFace(
+ model_file, params_file, self._runtime_option, model_format)
+ assert self.initialized, "CosFace model initialize failed."
+
+
+class PartialFC(InsightFaceRecognitionBase):
+ def __init__(self,
+ model_file,
+ params_file="",
+ runtime_option=None,
+ model_format=ModelFormat.ONNX):
+ """Load a PartialFC model exported by PaddleClas.
+ :param model_file: (str)Path of model file, e.g PartialFC/model.pdmodel
+ :param params_file: (str)Path of parameters file, e.g PartialFC/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
+ :param runtime_option: (fastdeploy.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
+ :param model_format: (fastdeploy.ModelForamt)Model format of the loaded model
+ """
+
+ super(InsightFaceRecognitionBase, self).__init__(runtime_option)
+
+ self._model = C.vision.faceid.PartialFC(
+ model_file, params_file, self._runtime_option, model_format)
+ assert self.initialized, "PartialFC model initialize failed."
+
+
+class VPL(InsightFaceRecognitionBase):
+ def __init__(self,
+ model_file,
+ params_file="",
+ runtime_option=None,
+ model_format=ModelFormat.ONNX):
+ """Load a VPL model exported by PaddleClas.
+ :param model_file: (str)Path of model file, e.g VPL/model.pdmodel
+ :param params_file: (str)Path of parameters file, e.g VPL/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
+ :param runtime_option: (fastdeploy.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
+ :param model_format: (fastdeploy.ModelForamt)Model format of the loaded model
+ """
+
+ super(InsightFaceRecognitionBase, self).__init__(runtime_option)
+
+ self._model = C.vision.faceid.VPL(model_file, params_file,
+ self._runtime_option, model_format)
+ assert self.initialized, "VPL model initialize failed."
diff --git a/python/fastdeploy/vision/faceid/contrib/insightface_rec.py b/python/fastdeploy/vision/faceid/contrib/insightface_rec.py
deleted file mode 100644
index ea4aed81a..000000000
--- a/python/fastdeploy/vision/faceid/contrib/insightface_rec.py
+++ /dev/null
@@ -1,126 +0,0 @@
-# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import absolute_import
-import logging
-from .... import FastDeployModel, ModelFormat
-from .... import c_lib_wrap as C
-
-
-class InsightFaceRecognitionModel(FastDeployModel):
- def __init__(self,
- model_file,
- params_file="",
- runtime_option=None,
- model_format=ModelFormat.ONNX):
- """Load a InsightFace model exported by InsigtFace.
-
- :param model_file: (str)Path of model file, e.g ./arcface.onnx
- :param params_file: (str)Path of parameters file, e.g yolox/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
- :param runtime_option: (fastdeploy.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
- :param model_format: (fastdeploy.ModelForamt)Model format of the loaded model
- """
- # 调用基函数进行backend_option的初始化
- # 初始化后的option保存在self._runtime_option
- super(InsightFaceRecognitionModel, self).__init__(runtime_option)
-
- self._model = C.vision.faceid.InsightFaceRecognitionModel(
- model_file, params_file, self._runtime_option, model_format)
- # 通过self.initialized判断整个模型的初始化是否成功
- assert self.initialized, "InsightFaceRecognitionModel initialize failed."
-
- def predict(self, input_image):
- """ Predict the face recognition result for an input image
-
- :param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
- :return: FaceRecognitionResult
- """
- return self._model.predict(input_image)
-
- # 一些跟InsightFaceRecognitionModel模型有关的属性封装
- # 多数是预处理相关,可通过修改如model.size = [112, 112]改变预处理时resize的大小(前提是模型支持)
- @property
- def size(self):
- """
- Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default (112, 112)
- """
- return self._model.size
-
- @property
- def alpha(self):
- """
- Argument for image preprocessing step, alpha value for normalization, default alpha = [1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f]
- """
- return self._model.alpha
-
- @property
- def beta(self):
- """
- Argument for image preprocessing step, beta values for normalization, default beta = {-1.f, -1.f, -1.f}
- """
- return self._model.beta
-
- @property
- def swap_rb(self):
- """
- Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default True.
- """
- return self._model.swap_rb
-
- @property
- def l2_normalize(self):
- """
- Argument for image preprocessing step, whether to apply l2 normalize to embedding values, default False;
- """
- return self._model.l2_normalize
-
- @size.setter
- def size(self, wh):
- assert isinstance(wh, (list, tuple)),\
- "The value to set `size` must be type of tuple or list."
- assert len(wh) == 2,\
- "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
- len(wh))
- self._model.size = wh
-
- @alpha.setter
- def alpha(self, value):
- assert isinstance(value, (list, tuple)),\
- "The value to set `alpha` must be type of tuple or list."
- assert len(value) == 3,\
- "The value to set `alpha` must contatins 3 elements for each channels, but now it contains {} elements.".format(
- len(value))
- self._model.alpha = value
-
- @beta.setter
- def beta(self, value):
- assert isinstance(value, (list, tuple)),\
- "The value to set `beta` must be type of tuple or list."
- assert len(value) == 3,\
- "The value to set `beta` must contatins 3 elements for each channels, but now it contains {} elements.".format(
- len(value))
- self._model.beta = value
-
- @swap_rb.setter
- def swap_rb(self, value):
- assert isinstance(
- value, bool), "The value to set `swap_rb` must be type of bool."
- self._model.swap_rb = value
-
- @l2_normalize.setter
- def l2_normalize(self, value):
- assert isinstance(
- value,
- bool), "The value to set `l2_normalize` must be type of bool."
- self._model.l2_normalize = value
diff --git a/python/fastdeploy/vision/faceid/contrib/partial_fc.py b/python/fastdeploy/vision/faceid/contrib/partial_fc.py
deleted file mode 100644
index 0798af56e..000000000
--- a/python/fastdeploy/vision/faceid/contrib/partial_fc.py
+++ /dev/null
@@ -1,126 +0,0 @@
-# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import absolute_import
-import logging
-from .... import FastDeployModel, ModelFormat
-from .... import c_lib_wrap as C
-
-
-class PartialFC(FastDeployModel):
- def __init__(self,
- model_file,
- params_file="",
- runtime_option=None,
- model_format=ModelFormat.ONNX):
- """Load a PartialFC model exported by InsigtFace.
-
- :param model_file: (str)Path of model file, e.g ./partial_fc.onnx
- :param params_file: (str)Path of parameters file, e.g yolox/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
- :param runtime_option: (fastdeploy.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
- :param model_format: (fastdeploy.ModelForamt)Model format of the loaded model
- """
- # 调用基函数进行backend_option的初始化
- # 初始化后的option保存在self._runtime_option
- super(PartialFC, self).__init__(runtime_option)
-
- self._model = C.vision.faceid.PartialFC(
- model_file, params_file, self._runtime_option, model_format)
- # 通过self.initialized判断整个模型的初始化是否成功
- assert self.initialized, "PartialFC initialize failed."
-
- def predict(self, input_image):
- """ Predict the face recognition result for an input image
-
- :param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
- :return: FaceRecognitionResult
- """
- return self._model.predict(input_image)
-
- # 一些跟模型有关的属性封装
- # 多数是预处理相关,可通过修改如model.size = [112, 112]改变预处理时resize的大小(前提是模型支持)
- @property
- def size(self):
- """
- Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default (112, 112)
- """
- return self._model.size
-
- @property
- def alpha(self):
- """
- Argument for image preprocessing step, alpha value for normalization, default alpha = [1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f]
- """
- return self._model.alpha
-
- @property
- def beta(self):
- """
- Argument for image preprocessing step, beta values for normalization, default beta = {-1.f, -1.f, -1.f}
- """
- return self._model.beta
-
- @property
- def swap_rb(self):
- """
- Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default True.
- """
- return self._model.swap_rb
-
- @property
- def l2_normalize(self):
- """
- Argument for image preprocessing step, whether to apply l2 normalize to embedding values, default False;
- """
- return self._model.l2_normalize
-
- @size.setter
- def size(self, wh):
- assert isinstance(wh, (list, tuple)),\
- "The value to set `size` must be type of tuple or list."
- assert len(wh) == 2,\
- "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
- len(wh))
- self._model.size = wh
-
- @alpha.setter
- def alpha(self, value):
- assert isinstance(value, (list, tuple)),\
- "The value to set `alpha` must be type of tuple or list."
- assert len(value) == 3,\
- "The value to set `alpha` must contatins 3 elements for each channels, but now it contains {} elements.".format(
- len(value))
- self._model.alpha = value
-
- @beta.setter
- def beta(self, value):
- assert isinstance(value, (list, tuple)),\
- "The value to set `beta` must be type of tuple or list."
- assert len(value) == 3,\
- "The value to set `beta` must contatins 3 elements for each channels, but now it contains {} elements.".format(
- len(value))
- self._model.beta = value
-
- @swap_rb.setter
- def swap_rb(self, value):
- assert isinstance(
- value, bool), "The value to set `swap_rb` must be type of bool."
- self._model.swap_rb = value
-
- @l2_normalize.setter
- def l2_normalize(self, value):
- assert isinstance(
- value,
- bool), "The value to set `l2_normalize` must be type of bool."
- self._model.l2_normalize = value
diff --git a/python/fastdeploy/vision/faceid/contrib/vpl.py b/python/fastdeploy/vision/faceid/contrib/vpl.py
deleted file mode 100644
index 5db5b4e67..000000000
--- a/python/fastdeploy/vision/faceid/contrib/vpl.py
+++ /dev/null
@@ -1,126 +0,0 @@
-# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import absolute_import
-import logging
-from .... import FastDeployModel, ModelFormat
-from .... import c_lib_wrap as C
-
-
-class VPL(FastDeployModel):
- def __init__(self,
- model_file,
- params_file="",
- runtime_option=None,
- model_format=ModelFormat.ONNX):
- """Load a VPL model exported by InsigtFace.
-
- :param model_file: (str)Path of model file, e.g ./vpl.onnx
- :param params_file: (str)Path of parameters file, e.g yolox/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
- :param runtime_option: (fastdeploy.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
- :param model_format: (fastdeploy.ModelForamt)Model format of the loaded model
- """
- # 调用基函数进行backend_option的初始化
- # 初始化后的option保存在self._runtime_option
- super(VPL, self).__init__(runtime_option)
-
- self._model = C.vision.faceid.VPL(model_file, params_file,
- self._runtime_option, model_format)
- # 通过self.initialized判断整个模型的初始化是否成功
- assert self.initialized, "VPL initialize failed."
-
- def predict(self, input_image):
- """ Predict the face recognition result for an input image
-
- :param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
- :return: FaceRecognitionResult
- """
- return self._model.predict(input_image)
-
- # 一些跟模型有关的属性封装
- # 多数是预处理相关,可通过修改如model.size = [112, 112]改变预处理时resize的大小(前提是模型支持)
- @property
- def size(self):
- """
- Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default (112, 112)
- """
- return self._model.size
-
- @property
- def alpha(self):
- """
- Argument for image preprocessing step, alpha value for normalization, default alpha = [1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f]
- """
- return self._model.alpha
-
- @property
- def beta(self):
- """
- Argument for image preprocessing step, beta values for normalization, default beta = {-1.f, -1.f, -1.f}
- """
- return self._model.beta
-
- @property
- def swap_rb(self):
- """
- Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default True.
- """
- return self._model.swap_rb
-
- @property
- def l2_normalize(self):
- """
- Argument for image preprocessing step, whether to apply l2 normalize to embedding values, default False;
- """
- return self._model.l2_normalize
-
- @size.setter
- def size(self, wh):
- assert isinstance(wh, (list, tuple)),\
- "The value to set `size` must be type of tuple or list."
- assert len(wh) == 2,\
- "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
- len(wh))
- self._model.size = wh
-
- @alpha.setter
- def alpha(self, value):
- assert isinstance(value, (list, tuple)),\
- "The value to set `alpha` must be type of tuple or list."
- assert len(value) == 3,\
- "The value to set `alpha` must contatins 3 elements for each channels, but now it contains {} elements.".format(
- len(value))
- self._model.alpha = value
-
- @beta.setter
- def beta(self, value):
- assert isinstance(value, (list, tuple)),\
- "The value to set `beta` must be type of tuple or list."
- assert len(value) == 3,\
- "The value to set `beta` must contatins 3 elements for each channels, but now it contains {} elements.".format(
- len(value))
- self._model.beta = value
-
- @swap_rb.setter
- def swap_rb(self, value):
- assert isinstance(
- value, bool), "The value to set `swap_rb` must be type of bool."
- self._model.swap_rb = value
-
- @l2_normalize.setter
- def l2_normalize(self, value):
- assert isinstance(
- value,
- bool), "The value to set `l2_normalize` must be type of bool."
- self._model.l2_normalize = value
diff --git a/python/fastdeploy/vision/utils.py b/python/fastdeploy/vision/utils.py
index d4cc30357..f4e06cf27 100644
--- a/python/fastdeploy/vision/utils.py
+++ b/python/fastdeploy/vision/utils.py
@@ -46,6 +46,81 @@ def classify_to_json(result):
return json.dumps(r_json)
+def keypoint_to_json(result):
+ r_json = {
+ "keypoints": result.keypoints,
+ "scores": result.scores,
+ "num_joints": result.num_joints,
+ }
+ return json.dumps(r_json)
+
+
+def ocr_to_json(result):
+ r_json = {
+ "boxes": result.boxes,
+ "text": result.text,
+ "rec_scores": result.rec_scores,
+ "cls_scores": result.cls_scores,
+ "cls_labels": result.cls_labels,
+ }
+ return json.dumps(r_json)
+
+
+def mot_to_json(result):
+ r_json = {
+ "boxes": result.boxes,
+ "ids": result.ids,
+ "scores": result.scores,
+ "class_ids": result.class_ids,
+ }
+ return json.dumps(r_json)
+
+
+def face_detection_to_json(result):
+ r_json = {
+ "boxes": result.boxes,
+ "landmarks": result.landmarks,
+ "scores": result.scores,
+ "landmarks_per_face": result.landmarks_per_face,
+ }
+ return json.dumps(r_json)
+
+
+def face_alignment_to_json(result):
+ r_json = {"landmarks": result.landmarks, }
+ return json.dumps(r_json)
+
+
+def face_recognition_to_json(result):
+ r_json = {"embedding": result.embedding, }
+ return json.dumps(r_json)
+
+
+def segmentation_to_json(result):
+ r_json = {
+ "label_map": result.label_map,
+ "score_map": result.score_map,
+ "shape": result.shape,
+ "contain_score_map": result.contain_score_map,
+ }
+ return json.dumps(r_json)
+
+
+def matting_to_json(result):
+ r_json = {
+ "alpha": result.alpha,
+ "foreground": result.foreground,
+ "shape": result.shape,
+ "contain_foreground": result.contain_foreground,
+ }
+ return json.dumps(r_json)
+
+
+def head_pose_to_json(result):
+ r_json = {"euler_angles": result.euler_angles, }
+ return json.dumps(r_json)
+
+
def fd_result_to_json(result):
if isinstance(result, list):
r_list = []
@@ -58,7 +133,124 @@ def fd_result_to_json(result):
return mask_to_json(result)
elif isinstance(result, C.vision.ClassifyResult):
return classify_to_json(result)
+ elif isinstance(result, C.vision.KeyPointDetectionResult):
+ return keypoint_to_json(result)
+ elif isinstance(result, C.vision.OCRResult):
+ return ocr_to_json(result)
+ elif isinstance(result, C.vision.MOTResult):
+ return mot_to_json(result)
+ elif isinstance(result, C.vision.FaceDetectionResult):
+ return face_detection_to_json(result)
+ elif isinstance(result, C.vision.FaceAlignmentResult):
+ return face_alignment_to_json(result)
+ elif isinstance(result, C.vision.FaceRecognitionResult):
+ return face_recognition_to_json(result)
+ elif isinstance(result, C.vision.SegmentationResult):
+ return segmentation_to_json(result)
+ elif isinstance(result, C.vision.MattingResult):
+ return matting_to_json(result)
+ elif isinstance(result, C.vision.HeadPoseResult):
+ return head_pose_to_json(result)
else:
assert False, "{} Conversion to JSON format is not supported".format(
type(result))
return {}
+
+
+def json_to_mask(result):
+ mask = C.vision.Mask()
+ mask.data = result['data']
+ mask.shape = result['shape']
+ return mask
+
+
+def json_to_detection(result):
+ masks = []
+ for mask in result['masks']:
+ masks.append(json_to_mask(json.loads(mask)))
+ det_result = C.vision.DetectionResult()
+ det_result.boxes = result['boxes']
+ det_result.scores = result['scores']
+ det_result.label_ids = result['label_ids']
+ det_result.masks = masks
+ det_result.contain_masks = result['contain_masks']
+ return det_result
+
+
+def json_to_classify(result):
+ cls_result = C.vision.ClassifyResult()
+ cls_result.label_ids = result['label_ids']
+ cls_result.scores = result['scores']
+ return cls_result
+
+
+def json_to_keypoint(result):
+ kp_result = C.vision.KeyPointDetectionResult()
+ kp_result.keypoints = result['keypoints']
+ kp_result.scores = result['scores']
+ kp_result.num_joints = result['num_joints']
+ return kp_result
+
+
+def json_to_ocr(result):
+ ocr_result = C.vision.OCRResult()
+ ocr_result.boxes = result['boxes']
+ ocr_result.text = result['text']
+ ocr_result.rec_scores = result['rec_scores']
+ ocr_result.cls_scores = result['cls_scores']
+ ocr_result.cls_labels = result['cls_labels']
+ return ocr_result
+
+
+def json_to_mot(result):
+ mot_result = C.vision.MOTResult()
+ mot_result.boxes = result['boxes']
+ mot_result.ids = result['ids']
+ mot_result.scores = result['scores']
+ mot_result.class_ids = result['class_ids']
+ return mot_result
+
+
+def json_to_face_detection(result):
+ face_result = C.vision.FaceDetectionResult()
+ face_result.boxes = result['boxes']
+ face_result.landmarks = result['landmarks']
+ face_result.scores = result['scores']
+ face_result.landmarks_per_face = result['landmarks_per_face']
+ return face_result
+
+
+def json_to_face_alignment(result):
+ face_result = C.vision.FaceAlignmentResult()
+ face_result.landmarks = result['landmarks']
+ return face_result
+
+
+def json_to_face_recognition(result):
+ face_result = C.vision.FaceRecognitionResult()
+ face_result.embedding = result['embedding']
+ return face_result
+
+
+def json_to_segmentation(result):
+ seg_result = C.vision.SegmentationResult()
+ seg_result.label_map = result['label_map']
+ seg_result.score_map = result['score_map']
+ seg_result.shape = result['shape']
+ seg_result.contain_score_map = result['contain_score_map']
+ return seg_result
+
+
+def json_to_matting(result):
+ matting_result = C.vision.MattingResult()
+ matting_result.alpha = result['alpha']
+ matting_result.foreground = result['foreground']
+ matting_result.shape = result['shape']
+ matting_result.contain_foreground = result['contain_foreground']
+ return matting_result
+
+
+def json_to_head_pose(result):
+ hp_result = C.vision.HeadPoseResult()
+ hp_result.euler_angles = result['euler_angles']
+ return hp_result
diff --git a/python/requirements.txt b/python/requirements.txt
index 2e5fa136c..3c463f4bb 100644
--- a/python/requirements.txt
+++ b/python/requirements.txt
@@ -3,5 +3,6 @@ requests
tqdm
numpy
opencv-python
-fastdeploy-tools==0.0.1
+fastdeploy-tools>=0.0.1
pyyaml
+fastapi
diff --git a/python/setup.py b/python/setup.py
index e4db98211..108c3db94 100755
--- a/python/setup.py
+++ b/python/setup.py
@@ -72,7 +72,7 @@ setup_configs["ENABLE_FLYCV"] = os.getenv("ENABLE_FLYCV", "OFF")
setup_configs["ENABLE_TEXT"] = os.getenv("ENABLE_TEXT", "OFF")
setup_configs["WITH_GPU"] = os.getenv("WITH_GPU", "OFF")
setup_configs["WITH_IPU"] = os.getenv("WITH_IPU", "OFF")
-setup_configs["WITH_XPU"] = os.getenv("WITH_XPU", "OFF")
+setup_configs["WITH_KUNLUNXIN"] = os.getenv("WITH_KUNLUNXIN", "OFF")
setup_configs["BUILD_ON_JETSON"] = os.getenv("BUILD_ON_JETSON", "OFF")
setup_configs["TRT_DIRECTORY"] = os.getenv("TRT_DIRECTORY", "UNDEFINED")
setup_configs["CUDA_DIRECTORY"] = os.getenv("CUDA_DIRECTORY",
diff --git a/serving/Dockerfile b/serving/Dockerfile
index 139d2cd0e..d03611f72 100644
--- a/serving/Dockerfile
+++ b/serving/Dockerfile
@@ -12,6 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+ARG http_proxy
+ARG https_proxy
+
FROM nvcr.io/nvidia/tritonserver:21.10-py3 as full
FROM nvcr.io/nvidia/tritonserver:21.10-py3-min
@@ -24,7 +27,10 @@ COPY serving/TensorRT-8.4.1.5 /opt/TensorRT-8.4.1.5
ENV TZ=Asia/Shanghai \
DEBIAN_FRONTEND=noninteractive \
- DCGM_VERSION=2.2.9
+ DCGM_VERSION=2.2.9 \
+ http_proxy=$http_proxy \
+ https_proxy=$http_proxy
+
RUN apt-get update \
&& apt-key del 7fa2af80 \
&& wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/cuda-keyring_1.0-1_all.deb \
@@ -46,3 +52,6 @@ COPY build/fastdeploy_install /opt/fastdeploy/
ENV LD_LIBRARY_PATH="/opt/TensorRT-8.4.1.5/lib/:/opt/fastdeploy/lib:/opt/fastdeploy/third_libs/install/onnxruntime/lib:/opt/fastdeploy/third_libs/install/paddle2onnx/lib:/opt/fastdeploy/third_libs/install/tensorrt/lib:/opt/fastdeploy/third_libs/install/paddle_inference/paddle/lib:/opt/fastdeploy/third_libs/install/paddle_inference/third_party/install/mkldnn/lib:/opt/fastdeploy/third_libs/install/paddle_inference/third_party/install/mklml/lib:/opt/fastdeploy/third_libs/install/openvino/runtime/lib:$LD_LIBRARY_PATH"
ENV PATH="/opt/tritonserver/bin:$PATH"
+# unset proxy
+ENV http_proxy=
+ENV https_proxy=
diff --git a/serving/Dockerfile_cpu b/serving/Dockerfile_cpu
index 7b07efe40..c16530838 100644
--- a/serving/Dockerfile_cpu
+++ b/serving/Dockerfile_cpu
@@ -12,10 +12,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+ARG http_proxy
+ARG https_proxy
+
FROM paddlepaddle/fastdeploy:21.10-cpu-only-min
ENV TZ=Asia/Shanghai \
- DEBIAN_FRONTEND=noninteractive
+ DEBIAN_FRONTEND=noninteractive \
+ http_proxy=$http_proxy \
+ https_proxy=$http_proxy
RUN apt-get update && apt-get install -y --no-install-recommends apt-utils libgomp1 ffmpeg libsm6 libxext6 \
&& python3 -m pip install -U pip \
@@ -30,3 +35,6 @@ COPY build/fastdeploy_install /opt/fastdeploy/
RUN mv /opt/tritonserver/bin/tritonserver /opt/tritonserver/bin/fastdeployserver
ENV LD_LIBRARY_PATH="/opt/fastdeploy/lib:/opt/fastdeploy/third_libs/install/onnxruntime/lib:/opt/fastdeploy/third_libs/install/paddle2onnx/lib:/opt/fastdeploy/third_libs/install/paddle_inference/paddle/lib:/opt/fastdeploy/third_libs/install/paddle_inference/third_party/install/mkldnn/lib:/opt/fastdeploy/third_libs/install/paddle_inference/third_party/install/mklml/lib:/opt/fastdeploy/third_libs/install/openvino/runtime/lib:$LD_LIBRARY_PATH"
+# unset proxy
+ENV http_proxy=
+ENV https_proxy=
diff --git a/tests/models/test_fastestdet.py b/tests/models/test_fastestdet.py
new file mode 100644
index 000000000..0934b173a
--- /dev/null
+++ b/tests/models/test_fastestdet.py
@@ -0,0 +1,111 @@
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from fastdeploy import ModelFormat
+import fastdeploy as fd
+import cv2
+import os
+import pickle
+import numpy as np
+import runtime_config as rc
+
+
+def test_detection_fastestdet():
+ model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/FastestDet.onnx"
+ input_url1 = "https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg"
+ input_url2 = "https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000570688.jpg"
+ result_url1 = "https://bj.bcebos.com/paddlehub/fastdeploy/fastestdet_result1.pkl"
+ fd.download(model_url, "resources")
+ fd.download(input_url1, "resources")
+ fd.download(input_url2, "resources")
+ fd.download(result_url1, "resources")
+
+ model_file = "resources/FastestDet.onnx"
+ model = fd.vision.detection.FastestDet(
+ model_file, runtime_option=rc.test_option)
+
+ with open("resources/fastestdet_result1.pkl", "rb") as f:
+ expect1 = pickle.load(f)
+
+ # compare diff
+ im1 = cv2.imread("./resources/000000014439.jpg")
+ print(expect1)
+ for i in range(3):
+ # test single predict
+ result1 = model.predict(im1)
+
+ diff_boxes_1 = np.fabs(
+ np.array(result1.boxes) - np.array(expect1["boxes"]))
+
+ diff_label_1 = np.fabs(
+ np.array(result1.label_ids) - np.array(expect1["label_ids"]))
+ diff_scores_1 = np.fabs(
+ np.array(result1.scores) - np.array(expect1["scores"]))
+
+ print(diff_boxes_1.max(), diff_boxes_1.mean())
+ assert diff_boxes_1.max(
+ ) < 1e-04, "There's difference in detection boxes 1."
+ assert diff_label_1.max(
+ ) < 1e-04, "There's difference in detection label 1."
+ assert diff_scores_1.max(
+ ) < 1e-05, "There's difference in detection score 1."
+
+def test_detection_fastestdet_runtime():
+ model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/FastestDet.onnx"
+ input_url1 = "https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg"
+ result_url1 = "https://bj.bcebos.com/paddlehub/fastdeploy/fastestdet_result1.pkl"
+ fd.download(model_url, "resources")
+ fd.download(input_url1, "resources")
+ fd.download(result_url1, "resources")
+
+ model_file = "resources/FastestDet.onnx"
+
+ preprocessor = fd.vision.detection.FastestDetPreprocessor()
+ postprocessor = fd.vision.detection.FastestDetPostprocessor()
+
+ rc.test_option.set_model_path(model_file, model_format=ModelFormat.ONNX)
+ rc.test_option.use_openvino_backend()
+ runtime = fd.Runtime(rc.test_option)
+
+ with open("resources/fastestdet_result1.pkl", "rb") as f:
+ expect1 = pickle.load(f)
+
+ # compare diff
+ im1 = cv2.imread("./resources/000000014439.jpg")
+
+ for i in range(3):
+ # test runtime
+ input_tensors, ims_info = preprocessor.run([im1.copy()])
+ output_tensors = runtime.infer({"input.1": input_tensors[0]})
+ results = postprocessor.run(output_tensors, ims_info)
+ result1 = results[0]
+
+ diff_boxes_1 = np.fabs(
+ np.array(result1.boxes) - np.array(expect1["boxes"]))
+ diff_label_1 = np.fabs(
+ np.array(result1.label_ids) - np.array(expect1["label_ids"]))
+ diff_scores_1 = np.fabs(
+ np.array(result1.scores) - np.array(expect1["scores"]))
+
+ assert diff_boxes_1.max(
+ ) < 1e-04, "There's difference in detection boxes 1."
+ assert diff_label_1.max(
+ ) < 1e-04, "There's difference in detection label 1."
+ assert diff_scores_1.max(
+ ) < 1e-05, "There's difference in detection score 1."
+
+
+if __name__ == "__main__":
+ test_detection_fastestdet()
+ test_detection_fastestdet_runtime()
\ No newline at end of file
diff --git a/tools/common_tools/common_tools.py b/tools/common_tools/common_tools.py
index d945f905a..3a0f1f6c6 100755
--- a/tools/common_tools/common_tools.py
+++ b/tools/common_tools/common_tools.py
@@ -1,10 +1,12 @@
import argparse
import ast
+import uvicorn
def argsparser():
parser = argparse.ArgumentParser(description=__doc__)
- parser.add_argument('tools', choices=['compress', 'convert'])
+ parser.add_argument(
+ 'tools', choices=['compress', 'convert', 'simple_serving'])
## argumentments for auto compression
parser.add_argument(
'--config_path',
@@ -69,6 +71,19 @@ def argsparser():
type=ast.literal_eval,
default=False,
help="Turn on code optimization")
+ ## arguments for simple serving
+ parser.add_argument(
+ "--app",
+ type=str,
+ default="server:app",
+ help="Simple serving app string")
+ parser.add_argument(
+ "--host",
+ type=str,
+ default="127.0.0.1",
+ help="Simple serving host IP address")
+ parser.add_argument(
+ "--port", type=int, default=8000, help="Simple serving host port")
## arguments for other tools
return parser
@@ -116,6 +131,8 @@ def main():
except ImportError:
print(
"Model convert failed! Please check if you have installed it!")
+ if args.tools == "simple_serving":
+ uvicorn.run(args.app, host=args.host, port=args.port, app_dir='.')
if __name__ == '__main__':
diff --git a/tools/rknpu2/config/picodet_s_416_coco_lcnet.yaml b/tools/rknpu2/config/picodet_s_416_coco_lcnet.yaml
new file mode 100644
index 000000000..743bb0483
--- /dev/null
+++ b/tools/rknpu2/config/picodet_s_416_coco_lcnet.yaml
@@ -0,0 +1,15 @@
+mean:
+ -
+ - 128.5
+ - 128.5
+ - 128.5
+std:
+ -
+ - 128.5
+ - 128.5
+ - 128.5
+model_path: ./picodet_s_416_coco_lcnet/picodet_s_416_coco_lcnet.onnx
+outputs_nodes:
+do_quantization: False
+dataset:
+output_folder: "./Portrait_PP_HumanSegV2_Lite_256x144_infer"
diff --git a/tools/setup.py b/tools/setup.py
index de312e02e..20ec6489a 100644
--- a/tools/setup.py
+++ b/tools/setup.py
@@ -3,13 +3,16 @@ import setuptools
long_description = "fastdeploy-tools is a toolkit for FastDeploy, including auto compression .etc.\n\n"
long_description += "Usage of auto compression: fastdeploy compress --config_path=./yolov7_tiny_qat_dis.yaml --method='QAT' --save_dir='./v7_qat_outmodel/' \n"
+install_requires = ['uvicorn==0.16.0']
+
setuptools.setup(
name="fastdeploy-tools", # name of package
- version="0.0.1", #version of package
+ version="0.0.2", #version of package
description="A toolkit for FastDeploy.",
long_description=long_description,
long_description_content_type="text/plain",
packages=setuptools.find_packages(),
+ install_requires=install_requires,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",