mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-04 16:22:57 +08:00
[Model] add PFLD model (#433)
* support face alignment PFLD * add PFLD demo * fixed FaceAlignmentResult * fixed bugs * fixed img size * fixed readme * deal with comments * fixed readme * add pfld testcase * update infer.py * add gflags for example * update c++ readme * add gflags in example * fixed for ci * fixed gflags.cmake * deal with comments * update infer demo Co-authored-by: Jason <jiangjiajun@baidu.com>
This commit is contained in:
10
CMakeLists.txt
Normal file → Executable file
10
CMakeLists.txt
Normal file → Executable file
@@ -487,13 +487,16 @@ if(BUILD_EXAMPLES AND EXISTS ${PROJECT_SOURCE_DIR}/examples)
|
|||||||
if(NOT EXECUTABLE_OUTPUT_PATH STREQUAL ${CMAKE_CURRENT_BINARY_DIR}/bin)
|
if(NOT EXECUTABLE_OUTPUT_PATH STREQUAL ${CMAKE_CURRENT_BINARY_DIR}/bin)
|
||||||
set(EXECUTABLE_OUTPUT_PATH ${CMAKE_CURRENT_BINARY_DIR}/bin)
|
set(EXECUTABLE_OUTPUT_PATH ${CMAKE_CURRENT_BINARY_DIR}/bin)
|
||||||
endif()
|
endif()
|
||||||
|
include(${PROJECT_SOURCE_DIR}/cmake/gflags.cmake)
|
||||||
add_subdirectory(examples)
|
add_subdirectory(examples)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (WITH_TESTING AND EXISTS ${PROJECT_SOURCE_DIR}/tests)
|
if (WITH_TESTING AND EXISTS ${PROJECT_SOURCE_DIR}/tests)
|
||||||
add_definitions(-DWITH_TESTING)
|
add_definitions(-DWITH_TESTING)
|
||||||
include(${PROJECT_SOURCE_DIR}/cmake/gtest.cmake)
|
include(${PROJECT_SOURCE_DIR}/cmake/gtest.cmake)
|
||||||
include(${PROJECT_SOURCE_DIR}/cmake/gflags.cmake)
|
if(NOT BUILD_EXAMPLES)
|
||||||
|
include(${PROJECT_SOURCE_DIR}/cmake/gflags.cmake)
|
||||||
|
endif()
|
||||||
include(${PROJECT_SOURCE_DIR}/cmake/glog.cmake)
|
include(${PROJECT_SOURCE_DIR}/cmake/glog.cmake)
|
||||||
add_subdirectory(tests)
|
add_subdirectory(tests)
|
||||||
endif()
|
endif()
|
||||||
@@ -561,6 +564,11 @@ install(
|
|||||||
DESTINATION ${CMAKE_INSTALL_PREFIX}
|
DESTINATION ${CMAKE_INSTALL_PREFIX}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
install(
|
||||||
|
FILES ${PROJECT_SOURCE_DIR}/cmake/gflags.cmake
|
||||||
|
DESTINATION ${CMAKE_INSTALL_PREFIX}/utils
|
||||||
|
)
|
||||||
|
|
||||||
if(NOT WIN32)
|
if(NOT WIN32)
|
||||||
install(
|
install(
|
||||||
FILES ${PROJECT_SOURCE_DIR}/scripts/fastdeploy_init.sh
|
FILES ${PROJECT_SOURCE_DIR}/scripts/fastdeploy_init.sh
|
||||||
|
@@ -14,8 +14,17 @@
|
|||||||
|
|
||||||
INCLUDE(ExternalProject)
|
INCLUDE(ExternalProject)
|
||||||
|
|
||||||
SET(GFLAGS_PREFIX_DIR ${THIRD_PARTY_PATH}/gflags)
|
if(NOT GIT_URL)
|
||||||
SET(GFLAGS_INSTALL_DIR ${THIRD_PARTY_PATH}/install/gflags)
|
SET(GIT_URL "https://github.com")
|
||||||
|
endif()
|
||||||
|
if(THIRD_PARTY_PATH)
|
||||||
|
SET(GFLAGS_PREFIX_DIR ${THIRD_PARTY_PATH}/gflags)
|
||||||
|
SET(GFLAGS_INSTALL_DIR ${THIRD_PARTY_PATH}/install/gflags)
|
||||||
|
else()
|
||||||
|
# For example cmake
|
||||||
|
SET(GFLAGS_PREFIX_DIR ${FASTDEPLOY_INSTALL_DIR}/installed_fastdeploy/cmake)
|
||||||
|
SET(GFLAGS_INSTALL_DIR ${FASTDEPLOY_INSTALL_DIR}/installed_fastdeploy/cmake/gflags)
|
||||||
|
endif()
|
||||||
SET(GFLAGS_INCLUDE_DIR "${GFLAGS_INSTALL_DIR}/include" CACHE PATH "gflags include directory." FORCE)
|
SET(GFLAGS_INCLUDE_DIR "${GFLAGS_INSTALL_DIR}/include" CACHE PATH "gflags include directory." FORCE)
|
||||||
set(GFLAGS_REPOSITORY ${GIT_URL}/gflags/gflags.git)
|
set(GFLAGS_REPOSITORY ${GIT_URL}/gflags/gflags.git)
|
||||||
set(GFLAGS_TAG "v2.2.2")
|
set(GFLAGS_TAG "v2.2.2")
|
||||||
|
9
docs/api/vision_results/README.md
Normal file → Executable file
9
docs/api/vision_results/README.md
Normal file → Executable file
@@ -7,9 +7,10 @@ FastDeploy根据视觉模型的任务类型,定义了不同的结构体(`fastd
|
|||||||
| ClassifyResult | [C++/Python文档](./classification_result.md) | 图像分类返回结果 | ResNet50、MobileNetV3等 |
|
| ClassifyResult | [C++/Python文档](./classification_result.md) | 图像分类返回结果 | ResNet50、MobileNetV3等 |
|
||||||
| SegmentationResult | [C++/Python文档](./segmentation_result.md) | 图像分割返回结果 | PP-HumanSeg、PP-LiteSeg等 |
|
| SegmentationResult | [C++/Python文档](./segmentation_result.md) | 图像分割返回结果 | PP-HumanSeg、PP-LiteSeg等 |
|
||||||
| DetectionResult | [C++/Python文档](./detection_result.md) | 目标检测返回结果 | PP-YOLOE、YOLOv7系列模型等 |
|
| DetectionResult | [C++/Python文档](./detection_result.md) | 目标检测返回结果 | PP-YOLOE、YOLOv7系列模型等 |
|
||||||
| FaceDetectionResult | [C++/Python文档](./face_detection_result.md) | 目标检测返回结果 | SCRFD、RetinaFace系列模型等 |
|
| FaceDetectionResult | [C++/Python文档](./face_detection_result.md) | 人脸检测返回结果 | SCRFD、RetinaFace系列模型等 |
|
||||||
|
| FaceAlignmentResult | [C++/Python文档](./face_alignment_result.md) | 人脸对齐(人脸关键点检测)返回结果 | PFLD系列模型等 |
|
||||||
| KeyPointDetectionResult | [C++/Python文档](./keypointdetection_result.md) | 关键点检测返回结果 | PP-Tinypose系列模型等 |
|
| KeyPointDetectionResult | [C++/Python文档](./keypointdetection_result.md) | 关键点检测返回结果 | PP-Tinypose系列模型等 |
|
||||||
| FaceRecognitionResult | [C++/Python文档](./face_recognition_result.md) | 目标检测返回结果 | ArcFace、CosFace系列模型等 |
|
| FaceRecognitionResult | [C++/Python文档](./face_recognition_result.md) | 人脸识别返回结果 | ArcFace、CosFace系列模型等 |
|
||||||
| MattingResult | [C++/Python文档](./matting_result.md) | 目标检测返回结果 | MODNet系列模型等 |
|
| MattingResult | [C++/Python文档](./matting_result.md) | 图片/视频抠图返回结果 | MODNet、RVM系列模型等 |
|
||||||
| OCRResult | [C++/Python文档](./ocr_result.md) | 文本框检测,分类和文本识别返回结果 | OCR系列模型等 |
|
| OCRResult | [C++/Python文档](./ocr_result.md) | 文本框检测,分类和文本识别返回结果 | OCR系列模型等 |
|
||||||
| MOTResult | [C++/Python文档](./mot_result.md) | 多目标跟踪返回结果 | pptracking系列模型等 |
|
| MOTResult | [C++/Python文档](./mot_result.md) | 多目标跟踪返回结果 | pptracking系列模型等 |
|
||||||
|
25
docs/api/vision_results/face_alignment_result.md
Normal file
25
docs/api/vision_results/face_alignment_result.md
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
# FaceAlignmentResult 人脸对齐(人脸关键点检测)结果
|
||||||
|
|
||||||
|
FaceAlignmentResult 代码定义在`fastdeploy/vision/common/result.h`中,用于表明人脸landmarks。
|
||||||
|
|
||||||
|
## C++ 定义
|
||||||
|
|
||||||
|
`fastdeploy::vision::FaceAlignmentResult`
|
||||||
|
|
||||||
|
```c++
|
||||||
|
struct FaceAlignmentResult {
|
||||||
|
std::vector<std::array<float, 2>> landmarks;
|
||||||
|
void Clear();
|
||||||
|
std::string Str();
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
- **landmarks**: 成员变量,表示单张人脸图片检测出来的所有关键点
|
||||||
|
- **Clear()**: 成员函数,用于清除结构体中存储的结果
|
||||||
|
- **Str()**: 成员函数,将结构体中的信息以字符串形式输出(用于Debug)
|
||||||
|
|
||||||
|
## Python 定义
|
||||||
|
|
||||||
|
`fastdeploy.vision.FaceAlignmentResult`
|
||||||
|
|
||||||
|
- **landmarks**(list of list(float)): 成员变量,表示单张人脸图片检测出来的所有关键点
|
34
docs/cn/build_and_install/download_prebuilt_libraries.md
Normal file → Executable file
34
docs/cn/build_and_install/download_prebuilt_libraries.md
Normal file → Executable file
@@ -20,6 +20,11 @@ Release版本(当前最新0.4.0)安装
|
|||||||
pip install fastdeploy-gpu-python -f https://www.paddlepaddle.org.cn/whl/fastdeploy.html
|
pip install fastdeploy-gpu-python -f https://www.paddlepaddle.org.cn/whl/fastdeploy.html
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Develop版本(Nightly build)安装
|
||||||
|
```bash
|
||||||
|
pip install fastdeploy-gpu-python==0.0.0 -f https://www.paddlepaddle.org.cn/whl/fastdeploy_nightly_build.html
|
||||||
|
```
|
||||||
|
|
||||||
其中推荐使用Conda配置开发环境
|
其中推荐使用Conda配置开发环境
|
||||||
```bash
|
```bash
|
||||||
conda config --add channels conda-forge && conda install cudatoolkit=11.2 cudnn=8.2
|
conda config --add channels conda-forge && conda install cudatoolkit=11.2 cudnn=8.2
|
||||||
@@ -34,6 +39,13 @@ Release版本(当前最新0.4.0)
|
|||||||
| Linux x64 | [fastdeploy-linux-x64-gpu-0.4.0.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-gpu-0.4.0.tgz) | g++ 8.2, CUDA 11.2, cuDNN 8.2编译产出 |
|
| Linux x64 | [fastdeploy-linux-x64-gpu-0.4.0.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-gpu-0.4.0.tgz) | g++ 8.2, CUDA 11.2, cuDNN 8.2编译产出 |
|
||||||
| Windows x64 | [fastdeploy-win-x64-gpu-0.4.0.zip](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-win-x64-gpu-0.4.0.zip) | Visual Studio 16 2019, CUDA 11.2, cuDNN 8.2编译产出 |
|
| Windows x64 | [fastdeploy-win-x64-gpu-0.4.0.zip](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-win-x64-gpu-0.4.0.zip) | Visual Studio 16 2019, CUDA 11.2, cuDNN 8.2编译产出 |
|
||||||
|
|
||||||
|
Develop版本(Nightly build)
|
||||||
|
|
||||||
|
| 平台 | 文件 | 说明 |
|
||||||
|
| :--- | :--- | :---- |
|
||||||
|
| Linux x64 | [fastdeploy-linux-x64-gpu-0.0.0.tgz](https://fastdeploy.bj.bcebos.com/dev/cpp/fastdeploy-linux-x64-gpu-0.0.0.tgz) | g++ 8.2, CUDA 11.2, cuDNN 8.2编译产出 |
|
||||||
|
| Windows x64 | [fastdeploy-win-x64-gpu-0.0.0.zip](https://fastdeploy.bj.bcebos.com/dev/cpp/fastdeploy-win-x64-gpu-0.0.0.zip) | Visual Studio 16 2019, CUDA 11.2, cuDNN 8.2编译产出 |
|
||||||
|
|
||||||
## CPU部署环境
|
## CPU部署环境
|
||||||
|
|
||||||
### 环境要求
|
### 环境要求
|
||||||
@@ -49,6 +61,11 @@ Release版本(当前最新0.4.0)安装
|
|||||||
pip install fastdeploy-python -f https://www.paddlepaddle.org.cn/whl/fastdeploy.html
|
pip install fastdeploy-python -f https://www.paddlepaddle.org.cn/whl/fastdeploy.html
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Develop版本(Nightly build)安装
|
||||||
|
```bash
|
||||||
|
pip install fastdeploy-python==0.0.0 -f https://www.paddlepaddle.org.cn/whl/fastdeploy_nightly_build.html
|
||||||
|
```
|
||||||
|
|
||||||
## C++ SDK安装
|
## C++ SDK安装
|
||||||
|
|
||||||
Release版本(当前最新0.4.0,Android为0.4.0 pre-release)
|
Release版本(当前最新0.4.0,Android为0.4.0 pre-release)
|
||||||
@@ -57,7 +74,18 @@ Release版本(当前最新0.4.0,Android为0.4.0 pre-release)
|
|||||||
| :--- | :--- | :---- |
|
| :--- | :--- | :---- |
|
||||||
| Linux x64 | [fastdeploy-linux-x64-0.4.0.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-0.4.0.tgz) | g++ 8.2编译产出 |
|
| Linux x64 | [fastdeploy-linux-x64-0.4.0.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-0.4.0.tgz) | g++ 8.2编译产出 |
|
||||||
| Windows x64 | [fastdeploy-win-x64-0.4.0.zip](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-win-x64-0.4.0.zip) | Visual Studio 16 2019编译产出 |
|
| Windows x64 | [fastdeploy-win-x64-0.4.0.zip](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-win-x64-0.4.0.zip) | Visual Studio 16 2019编译产出 |
|
||||||
| Mac OSX x64 | [fastdeploy-osx-x86_64-0.3.0.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-osx-x86_64-0.4.0.tgz) | clang++ 10.0.0编译产出|
|
| Mac OSX x64 | [fastdeploy-osx-x86_64-0.4.0.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-osx-x86_64-0.4.0.tgz) | clang++ 10.0.0编译产出|
|
||||||
| Mac OSX arm64 | [fastdeploy-osx-arm64-0.3.0.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-osx-arm64-0.4.0.tgz) | clang++ 13.0.0编译产出 |
|
| Mac OSX arm64 | [fastdeploy-osx-arm64-0.4.0.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-osx-arm64-0.4.0.tgz) | clang++ 13.0.0编译产出 |
|
||||||
| Linux aarch64 | [fastdeploy-linux-aarch64-0.3.0.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-aarch64-0.4.0.tgz) | g++ 6.3.0编译产出 |
|
| Linux aarch64 | [fastdeploy-linux-aarch64-0.4.0.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-aarch64-0.4.0.tgz) | g++ 6.3.0编译产出 |
|
||||||
| Android armv7&v8 | [fastdeploy-android-0.4.0-shared.tgz](https://bj.bcebos.com/fastdeploy/release/android/fastdeploy-android-0.4.0-shared.tgz) | NDK 25及clang++编译产出, 支持arm64-v8a及armeabi-v7a |
|
| Android armv7&v8 | [fastdeploy-android-0.4.0-shared.tgz](https://bj.bcebos.com/fastdeploy/release/android/fastdeploy-android-0.4.0-shared.tgz) | NDK 25及clang++编译产出, 支持arm64-v8a及armeabi-v7a |
|
||||||
|
|
||||||
|
Develop版本(Nightly build)
|
||||||
|
|
||||||
|
| 平台 | 文件 | 说明 |
|
||||||
|
| :--- | :--- | :---- |
|
||||||
|
| Linux x64 | [fastdeploy-linux-x64-0.0.0.tgz](https://fastdeploy.bj.bcebos.com/dev/cpp/fastdeploy-linux-x64-0.0.0.tgz) | g++ 8.2编译产出 |
|
||||||
|
| Windows x64 | [fastdeploy-win-x64-0.0.0.zip](https://fastdeploy.bj.bcebos.com/dev/cpp/fastdeploy-win-x64-0.0.0.zip) | Visual Studio 16 2019编译产出 |
|
||||||
|
| Mac OSX x64 | - | - |
|
||||||
|
| Mac OSX arm64 | [fastdeploy-osx-arm64-0.0.0.tgz](https://fastdeploy.bj.bcebos.com/dev/cpp/fastdeploy-osx-arm64-0.0.0.tgz) | clang++ 13.0.0编译产出 |
|
||||||
|
| Linux aarch64 | - | - |
|
||||||
|
| Android armv7&v8 | - | - |
|
||||||
|
@@ -16,12 +16,18 @@ FastDeploy supports Computer Vision, Text and NLP model deployment on CPU and Nv
|
|||||||
|
|
||||||
### Python SDK
|
### Python SDK
|
||||||
|
|
||||||
Install the released version(the newest 0.2.1 for now)
|
Install the released version(the newest 0.4.0 for now)
|
||||||
|
|
||||||
```
|
```
|
||||||
pip install fastdeploy-gpu-python -f https://www.paddlepaddle.org.cn/whl/fastdeploy.html
|
pip install fastdeploy-gpu-python -f https://www.paddlepaddle.org.cn/whl/fastdeploy.html
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Install the Develop version(Nightly build)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install fastdeploy-gpu-python==0.0.0 -f https://www.paddlepaddle.org.cn/whl/fastdeploy_nightly_build.html
|
||||||
|
```
|
||||||
|
|
||||||
We recommend users to use Conda to configure the development environment.
|
We recommend users to use Conda to configure the development environment.
|
||||||
|
|
||||||
```
|
```
|
||||||
@@ -30,12 +36,19 @@ conda config --add channels conda-forge && conda install cudatoolkit=11.2 cudnn=
|
|||||||
|
|
||||||
### C++ SDK
|
### C++ SDK
|
||||||
|
|
||||||
Install the released version(Latest 0.2.1)
|
Install the released version(Latest 0.4.0)
|
||||||
|
|
||||||
| Platform | File | Description |
|
| Platform | File | Description |
|
||||||
|:----------- |:--------------------------------------------------------------------------------------------------------------------- |:--------------------------------------------------------- |
|
|:----------- |:--------------------------------------------------------------------------------------------------------------------- |:--------------------------------------------------------- |
|
||||||
| Linux x64 | [fastdeploy-linux-x64-gpu-0.2.1.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-gpu-0.2.1.tgz) | Compiled from g++ 8.2, CUDA 11.2, cuDNN 8.2 |
|
| Linux x64 | [fastdeploy-linux-x64-gpu-0.4.0.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-gpu-0.4.0.tgz) | Compiled from g++ 8.2, CUDA 11.2, cuDNN 8.2 |
|
||||||
| Windows x64 | [fastdeploy-win-x64-gpu-0.2.1.zip](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-win-x64-gpu-0.2.1.zip) | Compiled from Visual Studio 16 2019, CUDA 11.2, cuDNN 8.2 |
|
| Windows x64 | [fastdeploy-win-x64-gpu-0.4.0.zip](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-win-x64-gpu-0.4.0.zip) | Compiled from Visual Studio 16 2019, CUDA 11.2, cuDNN 8.2 |
|
||||||
|
|
||||||
|
Install the Develop version(Nightly build)
|
||||||
|
|
||||||
|
| Platform | File | Description |
|
||||||
|
|:----------- |:--------------------------------------------------------------------------------------------------------------------- |:--------------------------------------------------------- |
|
||||||
|
| Linux x64 | [fastdeploy-linux-x64-gpu-0.0.0.tgz](https://fastdeploy.bj.bcebos.com/dev/cpp/fastdeploy-linux-x64-gpu-0.0.0.tgz | Compiled from g++ 8.2, CUDA 11.2, cuDNN 8.2 |
|
||||||
|
| Windows x64 | [fastdeploy-win-x64-gpu-0.0.0.zip](https://fastdeploy.bj.bcebos.com/dev/cpp/fastdeploy-win-x64-gpu-0.0.0.zip) | Compiled from Visual Studio 16 2019, CUDA 11.2, cuDNN 8.2 |
|
||||||
|
|
||||||
## CPU Deployment Environment
|
## CPU Deployment Environment
|
||||||
|
|
||||||
@@ -48,20 +61,38 @@ FastDeploy supports computer vision, text and NLP model deployment on CPU with P
|
|||||||
|
|
||||||
### Python SDK
|
### Python SDK
|
||||||
|
|
||||||
Install the released version(Latest 0.2.1 for now)
|
Install the released version(Latest 0.4.0 for now)
|
||||||
|
|
||||||
```
|
```
|
||||||
pip install fastdeploy-python -f https://www.paddlepaddle.org.cn/whl/fastdeploy.html
|
pip install fastdeploy-python -f https://www.paddlepaddle.org.cn/whl/fastdeploy.html
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Install the Develop version(Nightly build)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install fastdeploy-python==0.0.0 -f https://www.paddlepaddle.org.cn/whl/fastdeploy_nightly_build.html
|
||||||
|
```
|
||||||
|
|
||||||
### C++ SDK
|
### C++ SDK
|
||||||
|
|
||||||
Install the released version(Latest 0.2.1 for now)
|
Install the released version(Latest 0.4.0 for now)
|
||||||
|
|
||||||
| Platform | File | Description |
|
| Platform | File | Description |
|
||||||
|:------------- |:--------------------------------------------------------------------------------------------------------------------- |:------------------------------ |
|
|:------------- |:--------------------------------------------------------------------------------------------------------------------- |:------------------------------ |
|
||||||
| Linux x64 | [fastdeploy-linux-x64-0.2.1.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-0.2.1.tgz) | Compiled from g++ 8.2 |
|
| Linux x64 | [fastdeploy-linux-x64-0.4.0.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-0.4.0.tgz) | Compiled from g++ 8.2 |
|
||||||
| Windows x64 | [fastdeploy-win-x64-0.2.1.zip](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-win-x64-0.2.1.zip) | Compiled from Visual Studio 16 |
|
| Windows x64 | [fastdeploy-win-x64-0.4.0.zip](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-win-x64-0.4.0.zip) | Compiled from Visual Studio 16 |
|
||||||
| Mac OSX x64 | [fastdeploy-osx-x86_64-0.2.1.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-osx-x86_64-0.2.1.tgz) | - |
|
| Mac OSX x64 | [fastdeploy-osx-x86_64-0.4.0.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-osx-x86_64-0.4.0.tgz) | - |
|
||||||
| Mac OSX arm64 | [fastdeploy-osx-arm64-0.2.1.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-osx-arm64-0.2.1.tgz) | - |
|
| Mac OSX arm64 | [fastdeploy-osx-arm64-0.4.0.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-osx-arm64-0.4.0.tgz) | - |
|
||||||
| Linux aarch64 | [fastdeploy-linux-aarch64-0.2.0.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-aarch64-0.2.0.tgz) | Compiled from g++ 6.3.0 |
|
| Linux aarch64 | [fastdeploy-linux-aarch64-0.2.0.tgz](https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-aarch64-0.2.0.tgz) | Compiled from g++ 6.3.0 |
|
||||||
|
| Android armv7&v8 | [fastdeploy-android-0.4.0-shared.tgz](https://bj.bcebos.com/fastdeploy/release/android/fastdeploy-android-0.4.0-shared.tgz) | Compiled from NDK 25 and clang++, support arm64-v8a and armeabi-v7a |
|
||||||
|
|
||||||
|
Install the Develop version(Nightly build)
|
||||||
|
|
||||||
|
| Platform | File | Description |
|
||||||
|
|:------------- |:--------------------------------------------------------------------------------------------------------------------- |:------------------------------ |
|
||||||
|
| Linux x64 | [fastdeploy-linux-x64-0.0.0.tgz](https://fastdeploy.bj.bcebos.com/dev/cpp/fastdeploy-linux-x64-0.0.0.tgz) | Compiled from g++ 8.2 |
|
||||||
|
| Windows x64 | [fastdeploy-win-x64-0.0.0.zip](https://fastdeploy.bj.bcebos.com/dev/cpp/fastdeploy-win-x64-0.0.0.zip) | Compiled from Visual Studio 16 |
|
||||||
|
| Mac OSX x64 | - | - |
|
||||||
|
| Mac OSX arm64 | [fastdeploy-osx-arm64-0.0.0.tgz](https://fastdeploy.bj.bcebos.com/dev/cpp/fastdeploy-osx-arm64-0.0.0.tgz) | - |
|
||||||
|
| Linux aarch64 | - | - |
|
||||||
|
| Android armv7&v8 | - | - |
|
||||||
|
@@ -54,6 +54,9 @@ function(add_fastdeploy_executable FIELD CC_FILE)
|
|||||||
if(EXISTS ${TEMP_TARGET_FILE} AND TARGET fastdeploy)
|
if(EXISTS ${TEMP_TARGET_FILE} AND TARGET fastdeploy)
|
||||||
add_executable(${TEMP_TARGET_NAME} ${TEMP_TARGET_FILE})
|
add_executable(${TEMP_TARGET_NAME} ${TEMP_TARGET_FILE})
|
||||||
target_link_libraries(${TEMP_TARGET_NAME} PUBLIC fastdeploy)
|
target_link_libraries(${TEMP_TARGET_NAME} PUBLIC fastdeploy)
|
||||||
|
if(TARGET gflags)
|
||||||
|
target_link_libraries(${TEMP_TARGET_NAME} PRIVATE gflags pthread)
|
||||||
|
endif()
|
||||||
config_fastdeploy_executable_link_flags(${TEMP_TARGET_NAME})
|
config_fastdeploy_executable_link_flags(${TEMP_TARGET_NAME})
|
||||||
math(EXPR _EXAMPLES_NUM "${EXAMPLES_NUM} + 1")
|
math(EXPR _EXAMPLES_NUM "${EXAMPLES_NUM} + 1")
|
||||||
set(EXAMPLES_NUM ${_EXAMPLES_NUM} PARENT_SCOPE)
|
set(EXAMPLES_NUM ${_EXAMPLES_NUM} PARENT_SCOPE)
|
||||||
|
7
examples/vision/facealign/README.md
Normal file
7
examples/vision/facealign/README.md
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
# 人脸对齐(人脸关键点检测)模型
|
||||||
|
|
||||||
|
FastDeploy目前支持如下人脸对齐(关键点检测)模型部署
|
||||||
|
|
||||||
|
| 模型 | 说明 | 模型格式 | 版本 |
|
||||||
|
| :--- | :--- | :------- | :--- |
|
||||||
|
| [Hsintao/pfld_106_face_landmarks](./pfld) | PFLD 系列模型 | ONNX | [CommitID:e150195](https://github.com/Hsintao/pfld_106_face_landmarks/commit/e150195) |
|
26
examples/vision/facealign/pfld/README.md
Normal file
26
examples/vision/facealign/pfld/README.md
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
# PFLD 模型部署
|
||||||
|
|
||||||
|
## 模型版本说明
|
||||||
|
|
||||||
|
- [PFLD](https://github.com/Hsintao/pfld_106_face_landmarks/commit/e150195)
|
||||||
|
|
||||||
|
## 支持模型列表
|
||||||
|
|
||||||
|
目前FastDeploy支持如下模型的部署
|
||||||
|
|
||||||
|
- [PFLD 模型](https://github.com/Hsintao/pfld_106_face_landmarks)
|
||||||
|
|
||||||
|
## 下载预训练模型
|
||||||
|
|
||||||
|
为了方便开发者的测试,下面提供了PFLD导出的各系列模型,开发者可直接下载使用。
|
||||||
|
|
||||||
|
| 模型 | 参数大小 | 精度 | 备注 |
|
||||||
|
|:---------------------------------------------------------------- |:----- |:----- | :------ |
|
||||||
|
| [pfld-106-v2.onnx](https://bj.bcebos.com/paddlehub/fastdeploy/pfld-106-v2.onnx) | 4.9M | - |
|
||||||
|
| [pfld-106-v3.onnx](https://bj.bcebos.com/paddlehub/fastdeploy/pfld-106-v3.onnx) | 5.6MB | - |
|
||||||
|
| [pfld-106-lite.onnx](https://bj.bcebos.com/paddlehub/fastdeploy/pfld-106-lite.onnx) | 1.1MB | - |
|
||||||
|
|
||||||
|
## 详细部署文档
|
||||||
|
|
||||||
|
- [Python部署](python)
|
||||||
|
- [C++部署](cpp)
|
14
examples/vision/facealign/pfld/cpp/CMakeLists.txt
Normal file
14
examples/vision/facealign/pfld/cpp/CMakeLists.txt
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
PROJECT(infer_demo C CXX)
|
||||||
|
CMAKE_MINIMUM_REQUIRED (VERSION 3.10)
|
||||||
|
|
||||||
|
# 指定下载解压后的fastdeploy库路径
|
||||||
|
option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.")
|
||||||
|
include(${FASTDEPLOY_INSTALL_DIR}/utils/gflags.cmake)
|
||||||
|
include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
|
||||||
|
|
||||||
|
# 添加FastDeploy依赖头文件
|
||||||
|
include_directories(${FASTDEPLOY_INCS})
|
||||||
|
|
||||||
|
add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cc)
|
||||||
|
# 添加FastDeploy库依赖
|
||||||
|
target_link_libraries(infer_demo ${FASTDEPLOY_LIBS} gflags pthread)
|
84
examples/vision/facealign/pfld/cpp/README.md
Normal file
84
examples/vision/facealign/pfld/cpp/README.md
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
# PFLD C++部署示例
|
||||||
|
|
||||||
|
本目录下提供`infer.cc`快速完成PFLD在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。
|
||||||
|
|
||||||
|
在部署前,需确认以下两个步骤
|
||||||
|
|
||||||
|
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
|
||||||
|
- 2. 根据开发环境,下载预编译部署库和samples代码,参考[FastDeploy预编译库](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
|
||||||
|
|
||||||
|
以Linux上CPU推理为例,在本目录执行如下命令即可完成编译测试,保证 FastDeploy 版本0.6.0以上(x.x.x >= 0.6.0)支持PFLD模型
|
||||||
|
|
||||||
|
```bash
|
||||||
|
mkdir build
|
||||||
|
cd build
|
||||||
|
wget https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-x.x.x.tgz
|
||||||
|
tar xvf fastdeploy-linux-x64-x.x.x.tgz
|
||||||
|
cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-x.x.x
|
||||||
|
make -j
|
||||||
|
|
||||||
|
#下载官方转换好的 PFLD 模型文件和测试图片
|
||||||
|
wget https://bj.bcebos.com/paddlehub/fastdeploy/pfld-106-lite.onnx
|
||||||
|
wget https://bj.bcebos.com/paddlehub/fastdeploy/facealign_input.png
|
||||||
|
|
||||||
|
# CPU推理
|
||||||
|
./infer_demo --model pfld-106-lite.onnx --image facealign_input.png --device cpu
|
||||||
|
# GPU推理
|
||||||
|
./infer_demo --model pfld-106-lite.onnx --image facealign_input.png --device gpu
|
||||||
|
# GPU上TensorRT推理
|
||||||
|
./infer_demo --model pfld-106-lite.onnx --image facealign_input.png --device gpu --backend trt
|
||||||
|
```
|
||||||
|
|
||||||
|
运行完成可视化结果如下图所示
|
||||||
|
|
||||||
|
<div width="500">
|
||||||
|
<img width="470" height="384" float="left" src="https://user-images.githubusercontent.com/19977378/197931737-c2d8e760-a76d-478a-a6c9-4574fb5c70eb.png">
|
||||||
|
</div>
|
||||||
|
|
||||||
|
以上命令只适用于Linux或MacOS, Windows下SDK的使用方式请参考:
|
||||||
|
- [如何在Windows中使用FastDeploy C++ SDK](../../../../../docs/cn/faq/use_sdk_on_windows.md)
|
||||||
|
|
||||||
|
## PFLD C++接口
|
||||||
|
|
||||||
|
### PFLD 类
|
||||||
|
|
||||||
|
```c++
|
||||||
|
fastdeploy::vision::facealign::PFLD(
|
||||||
|
const string& model_file,
|
||||||
|
const string& params_file = "",
|
||||||
|
const RuntimeOption& runtime_option = RuntimeOption(),
|
||||||
|
const ModelFormat& model_format = ModelFormat::ONNX)
|
||||||
|
```
|
||||||
|
|
||||||
|
PFLD模型加载和初始化,其中model_file为导出的ONNX模型格式。
|
||||||
|
|
||||||
|
**参数**
|
||||||
|
|
||||||
|
> * **model_file**(str): 模型文件路径
|
||||||
|
> * **params_file**(str): 参数文件路径,当模型格式为ONNX时,此参数传入空字符串即可
|
||||||
|
> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置
|
||||||
|
> * **model_format**(ModelFormat): 模型格式,默认为ONNX格式
|
||||||
|
|
||||||
|
#### Predict函数
|
||||||
|
|
||||||
|
> ```c++
|
||||||
|
> PFLD::Predict(cv::Mat* im, FaceAlignmentResult* result)
|
||||||
|
> ```
|
||||||
|
>
|
||||||
|
> 模型预测接口,输入图像直接输出landmarks结果。
|
||||||
|
>
|
||||||
|
> **参数**
|
||||||
|
>
|
||||||
|
> > * **im**: 输入图像,注意需为HWC,BGR格式
|
||||||
|
> > * **result**: landmarks结果, FaceAlignmentResult说明参考[视觉模型预测结果](../../../../../docs/api/vision_results/)
|
||||||
|
|
||||||
|
### 类成员变量
|
||||||
|
|
||||||
|
用户可按照自己的实际需求,修改下列预处理参数,从而影响最终的推理和部署效果
|
||||||
|
|
||||||
|
> > * **size**(vector<int>): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[112, 112]
|
||||||
|
|
||||||
|
- [模型介绍](../../)
|
||||||
|
- [Python部署](../python)
|
||||||
|
- [视觉模型预测结果](../../../../../docs/api/vision_results/)
|
||||||
|
- [如何切换模型推理后端引擎](../../../../../docs/cn/faq/how_to_change_backend.md)
|
110
examples/vision/facealign/pfld/cpp/infer.cc
Normal file
110
examples/vision/facealign/pfld/cpp/infer.cc
Normal file
@@ -0,0 +1,110 @@
|
|||||||
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "fastdeploy/vision.h"
|
||||||
|
#include "gflags/gflags.h"
|
||||||
|
|
||||||
|
DEFINE_string(model, "", "Directory of the inference model.");
|
||||||
|
DEFINE_string(image, "", "Path of the image file.");
|
||||||
|
DEFINE_string(device, "cpu",
|
||||||
|
"Type of inference device, support 'cpu' or 'gpu'.");
|
||||||
|
DEFINE_string(backend, "default",
|
||||||
|
"The inference runtime backend, support: ['default', 'ort', "
|
||||||
|
"'paddle', 'ov', 'trt', 'paddle_trt']");
|
||||||
|
DEFINE_bool(use_fp16, false, "Whether to use FP16 mode, only support 'trt' and 'paddle_trt' backend");
|
||||||
|
|
||||||
|
void PrintUsage() {
|
||||||
|
std::cout << "Usage: infer_demo --model model_path --image img_path --device [cpu|gpu] --backend "
|
||||||
|
"[default|ort|paddle|ov|trt|paddle_trt] "
|
||||||
|
"--use_fp16 false"
|
||||||
|
<< std::endl;
|
||||||
|
std::cout << "Default value of device: cpu" << std::endl;
|
||||||
|
std::cout << "Default value of backend: default" << std::endl;
|
||||||
|
std::cout << "Default value of use_fp16: false" << std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool CreateRuntimeOption(fastdeploy::RuntimeOption* option) {
|
||||||
|
if (FLAG_device == "gpu") {
|
||||||
|
option->UseGpu();
|
||||||
|
if (FLAG_backend == "ort") {
|
||||||
|
option->UseOrtBackend();
|
||||||
|
} else if (FLAGS_backend == "paddle") {
|
||||||
|
option->UsePaddleBackend();
|
||||||
|
} else if (FLAGS_backend == "trt" ||
|
||||||
|
FLAGS_backend == "paddle_trt") {
|
||||||
|
option->UseTrtBackend();
|
||||||
|
option->SetTrtInputShape("input", {1, 3, 112, 112});
|
||||||
|
if (FLAGS_backend == "paddle_trt") {
|
||||||
|
option->EnablePaddleToTrt();
|
||||||
|
}
|
||||||
|
if (FLAGS_use_fp16) {
|
||||||
|
option->EnableTrtFP16();
|
||||||
|
}
|
||||||
|
} else if (FLAGS_backend == "default") {
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
std::cout << "While inference with GPU, only support default/ort/paddle/trt/paddle_trt now, " << FLAG_backend << " is not supported." << std::endl;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
} else if (FLAG_device == "cpu") {
|
||||||
|
if (FLAGS_backend == "ort") {
|
||||||
|
option->UseOrtBackend();
|
||||||
|
} else if (FLAGS_backend == "ov") {
|
||||||
|
option->UseOpenVINOBackend();
|
||||||
|
} else if (FLAGS_backend == "paddle") {
|
||||||
|
option->UsePaddleBackend();
|
||||||
|
} else if (FLAGS_backend = "default") {
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
std::cout << "While inference with CPU, only support default/ort/ov/paddle now, " << FLAG_backend << " is not supported." << std::endl;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
std::cerr << "Only support device CPU/GPU now, " << FLAG_device << " is not supported." << std::endl;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int argc, char* argv[]) {
|
||||||
|
google::ParseCommandLineFlags(&argc, &argv, true);
|
||||||
|
auto option = fastdeploy::RuntimeOption();
|
||||||
|
if (!CreateRuntimeOption(&option)) {
|
||||||
|
PrintUsage();
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto model = fastdeploy::vision::facealign::PFLD(FLAGS_model, "", option);
|
||||||
|
if (!model.Initialized()) {
|
||||||
|
std::cerr << "Failed to initialize." << std::endl;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto im = cv::imread(FLAGS_image);
|
||||||
|
auto im_bak = im.clone();
|
||||||
|
|
||||||
|
fastdeploy::vision::FaceAlignmentResult res;
|
||||||
|
if (!model.Predict(&im, &res)) {
|
||||||
|
std::cerr << "Failed to predict." << std::endl;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
std::cout << res.Str() << std::endl;
|
||||||
|
|
||||||
|
auto vis_im = fastdeploy::vision::VisFaceAlignment(im_bak, res);
|
||||||
|
cv::imwrite("vis_result.jpg", vis_im);
|
||||||
|
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
71
examples/vision/facealign/pfld/python/README.md
Normal file
71
examples/vision/facealign/pfld/python/README.md
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
# PFLD Python部署示例
|
||||||
|
|
||||||
|
在部署前,需确认以下两个步骤
|
||||||
|
|
||||||
|
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
|
||||||
|
- 2. FastDeploy Python whl包安装,参考[FastDeploy Python安装](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
|
||||||
|
|
||||||
|
本目录下提供`infer.py`快速完成PFLD在CPU/GPU,以及GPU上通过TensorRT加速部署的示例,保证 FastDeploy 版本 >= 0.6.0 支持PFLD模型。执行如下脚本即可完成
|
||||||
|
|
||||||
|
```bash
|
||||||
|
#下载部署示例代码
|
||||||
|
git clone https://github.com/PaddlePaddle/FastDeploy.git
|
||||||
|
cd FastDeploy/examples/vision/facealign/pfld/python
|
||||||
|
|
||||||
|
# 下载PFLD模型文件和测试图片以及视频
|
||||||
|
## 原版ONNX模型
|
||||||
|
wget https://bj.bcebos.com/paddlehub/fastdeploy/pfld-106-lite.onnx
|
||||||
|
wget https://bj.bcebos.com/paddlehub/fastdeploy/facealign_input.png
|
||||||
|
|
||||||
|
# CPU推理
|
||||||
|
python infer.py --model pfld-106-lite.onnx --image facealign_input.png --device cpu
|
||||||
|
# GPU推理
|
||||||
|
python infer.py --model pfld-106-lite.onnx --image facealign_input.png --device gpu
|
||||||
|
# TRT推理
|
||||||
|
python infer.py --model pfld-106-lite.onnx --image facealign_input.png --device gpu --backend trt
|
||||||
|
```
|
||||||
|
|
||||||
|
运行完成可视化结果如下图所示
|
||||||
|
|
||||||
|
<div width="500">
|
||||||
|
<img width="470" height="384" float="left" src="https://user-images.githubusercontent.com/19977378/197931737-c2d8e760-a76d-478a-a6c9-4574fb5c70eb.png">
|
||||||
|
</div>
|
||||||
|
|
||||||
|
## PFLD Python接口
|
||||||
|
|
||||||
|
```python
|
||||||
|
fd.vision.facealign.PFLD(model_file, params_file=None, runtime_option=None, model_format=ModelFormat.ONNX)
|
||||||
|
```
|
||||||
|
|
||||||
|
PFLD模型加载和初始化,其中model_file为导出的ONNX模型格式
|
||||||
|
|
||||||
|
**参数**
|
||||||
|
|
||||||
|
> * **model_file**(str): 模型文件路径
|
||||||
|
> * **params_file**(str): 参数文件路径,当模型格式为ONNX格式时,此参数无需设定
|
||||||
|
> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置
|
||||||
|
> * **model_format**(ModelFormat): 模型格式,默认为ONNX
|
||||||
|
|
||||||
|
### predict函数
|
||||||
|
|
||||||
|
> ```python
|
||||||
|
> PFLD.predict(input_image)
|
||||||
|
> ```
|
||||||
|
>
|
||||||
|
> 模型预测结口,输入图像直接输出landmarks坐标结果。
|
||||||
|
>
|
||||||
|
> **参数**
|
||||||
|
>
|
||||||
|
> > * **input_image**(np.ndarray): 输入数据,注意需为HWC,BGR格式
|
||||||
|
|
||||||
|
> **返回**
|
||||||
|
>
|
||||||
|
> > 返回`fastdeploy.vision.FaceAlignmentResult`结构体,结构体说明参考文档[视觉模型预测结果](../../../../../docs/api/vision_results/)
|
||||||
|
|
||||||
|
|
||||||
|
## 其它文档
|
||||||
|
|
||||||
|
- [PFLD 模型介绍](..)
|
||||||
|
- [PFLD C++部署](../cpp)
|
||||||
|
- [模型预测结果说明](../../../../../docs/api/vision_results/)
|
||||||
|
- [如何切换模型推理后端引擎](../../../../../docs/cn/faq/how_to_change_backend.md)
|
88
examples/vision/facealign/pfld/python/infer.py
Normal file
88
examples/vision/facealign/pfld/python/infer.py
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
import fastdeploy as fd
|
||||||
|
import cv2
|
||||||
|
import os
|
||||||
|
|
||||||
|
|
||||||
|
def parse_arguments():
|
||||||
|
import argparse
|
||||||
|
import ast
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument("--model", required=True, help="Path of PFLD model.")
|
||||||
|
parser.add_argument("--image", type=str, help="Path of test image file.")
|
||||||
|
parser.add_argument(
|
||||||
|
"--device",
|
||||||
|
type=str,
|
||||||
|
default='cpu',
|
||||||
|
help="Type of inference device, support 'cpu' or 'gpu'.")
|
||||||
|
parser.add_argument(
|
||||||
|
"--backend",
|
||||||
|
type=str,
|
||||||
|
default="ort",
|
||||||
|
help="inference backend, ort, ov, trt, paddle, paddle_trt.")
|
||||||
|
parser.add_argument(
|
||||||
|
"--enable_trt_fp16",
|
||||||
|
type=bool,
|
||||||
|
default=False,
|
||||||
|
help="whether enable fp16 in trt/paddle_trt backend")
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
def build_option(args):
|
||||||
|
option = fd.RuntimeOption()
|
||||||
|
device = args.device
|
||||||
|
backend = args.backend
|
||||||
|
enable_trt_fp16 = args.enable_trt_fp16
|
||||||
|
if device == "gpu":
|
||||||
|
option.use_gpu()
|
||||||
|
if backend == "ort":
|
||||||
|
option.use_ort_backend()
|
||||||
|
elif backend == "paddle":
|
||||||
|
option.use_paddle_backend()
|
||||||
|
elif backend in ["trt", "paddle_trt"]:
|
||||||
|
option.use_trt_backend()
|
||||||
|
option.set_trt_input_shape("input", [1, 3, 112, 112])
|
||||||
|
if backend == "paddle_trt":
|
||||||
|
option.enable_paddle_to_trt()
|
||||||
|
if enable_trt_fp16:
|
||||||
|
option.enable_trt_fp16()
|
||||||
|
elif backend == "default":
|
||||||
|
return option
|
||||||
|
else:
|
||||||
|
raise Exception(
|
||||||
|
"While inference with GPU, only support default/ort/paddle/trt/paddle_trt now, {} is not supported.".
|
||||||
|
format(backend))
|
||||||
|
elif device == "cpu":
|
||||||
|
if backend == "ort":
|
||||||
|
option.use_ort_backend()
|
||||||
|
elif backend == "ov":
|
||||||
|
option.use_openvino_backend()
|
||||||
|
elif backend == "paddle":
|
||||||
|
option.use_paddle_backend()
|
||||||
|
elif backend == "default":
|
||||||
|
return option
|
||||||
|
else:
|
||||||
|
raise Exception(
|
||||||
|
"While inference with CPU, only support default/ort/ov/paddle now, {} is not supported.".
|
||||||
|
format(backend))
|
||||||
|
else:
|
||||||
|
raise Exception(
|
||||||
|
"Only support device CPU/GPU now, {} is not supported.".format(
|
||||||
|
device))
|
||||||
|
|
||||||
|
return option
|
||||||
|
|
||||||
|
|
||||||
|
args = parse_arguments()
|
||||||
|
|
||||||
|
# 配置runtime,加载模型
|
||||||
|
runtime_option = build_option(args)
|
||||||
|
model = fd.vision.facealign.PFLD(args.model, runtime_option=runtime_option)
|
||||||
|
|
||||||
|
# for image
|
||||||
|
im = cv2.imread(args.image)
|
||||||
|
result = model.predict(im.copy())
|
||||||
|
print(result)
|
||||||
|
# 可视化结果
|
||||||
|
vis_im = fd.vision.vis_face_alignment(im, result)
|
||||||
|
cv2.imwrite("visualized_result.jpg", vis_im)
|
||||||
|
print("Visualized result save in ./visualized_result.jpg")
|
@@ -33,6 +33,7 @@
|
|||||||
#include "fastdeploy/vision/facedet/contrib/scrfd.h"
|
#include "fastdeploy/vision/facedet/contrib/scrfd.h"
|
||||||
#include "fastdeploy/vision/facedet/contrib/ultraface.h"
|
#include "fastdeploy/vision/facedet/contrib/ultraface.h"
|
||||||
#include "fastdeploy/vision/facedet/contrib/yolov5face.h"
|
#include "fastdeploy/vision/facedet/contrib/yolov5face.h"
|
||||||
|
#include "fastdeploy/vision/facealign/contrib/pfld.h"
|
||||||
#include "fastdeploy/vision/faceid/contrib/adaface.h"
|
#include "fastdeploy/vision/faceid/contrib/adaface.h"
|
||||||
#include "fastdeploy/vision/faceid/contrib/arcface.h"
|
#include "fastdeploy/vision/faceid/contrib/arcface.h"
|
||||||
#include "fastdeploy/vision/faceid/contrib/cosface.h"
|
#include "fastdeploy/vision/faceid/contrib/cosface.h"
|
||||||
|
10
fastdeploy/vision/common/processors/letter_box.h
Normal file → Executable file
10
fastdeploy/vision/common/processors/letter_box.h
Normal file → Executable file
@@ -21,7 +21,8 @@ namespace vision {
|
|||||||
|
|
||||||
class LetterBoxResize : public Processor {
|
class LetterBoxResize : public Processor {
|
||||||
public:
|
public:
|
||||||
LetterBoxResize(const std::vector<int>& target_size, const std::vector<float>& color) {
|
LetterBoxResize(const std::vector<int>& target_size,
|
||||||
|
const std::vector<float>& color) {
|
||||||
target_size_ = target_size;
|
target_size_ = target_size;
|
||||||
color_ = color;
|
color_ = color;
|
||||||
}
|
}
|
||||||
@@ -30,12 +31,13 @@ class LetterBoxResize : public Processor {
|
|||||||
|
|
||||||
std::string Name() { return "LetterBoxResize"; }
|
std::string Name() { return "LetterBoxResize"; }
|
||||||
|
|
||||||
static bool Run(Mat* mat,const std::vector<int>& target_size, const std::vector<float>& color,
|
static bool Run(Mat* mat, const std::vector<int>& target_size,
|
||||||
|
const std::vector<float>& color,
|
||||||
ProcLib lib = ProcLib::OPENCV);
|
ProcLib lib = ProcLib::OPENCV);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
std::vector<int> target_size_;
|
std::vector<int> target_size_;
|
||||||
std::vector<float> color_;
|
std::vector<float> color_;
|
||||||
};
|
};
|
||||||
} // namespace vision
|
} // namespace vision
|
||||||
} // namespace fastdeploy
|
} // namespace fastdeploy
|
||||||
|
@@ -235,6 +235,30 @@ std::string FaceDetectionResult::Str() {
|
|||||||
return out;
|
return out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void FaceAlignmentResult::Clear() {
|
||||||
|
std::vector<std::array<float, 2>>().swap(landmarks);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FaceAlignmentResult::Reserve(int size) {
|
||||||
|
landmarks.resize(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FaceAlignmentResult::Resize(int size) {
|
||||||
|
landmarks.resize(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string FaceAlignmentResult::Str() {
|
||||||
|
std::string out;
|
||||||
|
|
||||||
|
out = "FaceAlignmentResult: [x, y]\n";
|
||||||
|
for (size_t i = 0; i < landmarks.size(); ++i) {
|
||||||
|
out = out + std::to_string(landmarks[i][0]) + "," +
|
||||||
|
std::to_string(landmarks[i][1]) + "\n";
|
||||||
|
}
|
||||||
|
out += "num_landmarks:" + std::to_string(landmarks.size()) + "\n";
|
||||||
|
return out;
|
||||||
|
}
|
||||||
|
|
||||||
void SegmentationResult::Clear() {
|
void SegmentationResult::Clear() {
|
||||||
std::vector<uint8_t>().swap(label_map);
|
std::vector<uint8_t>().swap(label_map);
|
||||||
std::vector<float>().swap(score_map);
|
std::vector<float>().swap(score_map);
|
||||||
|
21
fastdeploy/vision/common/result.h
Normal file → Executable file
21
fastdeploy/vision/common/result.h
Normal file → Executable file
@@ -28,6 +28,7 @@ enum FASTDEPLOY_DECL ResultType {
|
|||||||
OCR,
|
OCR,
|
||||||
MOT,
|
MOT,
|
||||||
FACE_DETECTION,
|
FACE_DETECTION,
|
||||||
|
FACE_ALIGNMENT,
|
||||||
FACE_RECOGNITION,
|
FACE_RECOGNITION,
|
||||||
MATTING,
|
MATTING,
|
||||||
MASK,
|
MASK,
|
||||||
@@ -212,6 +213,25 @@ struct FASTDEPLOY_DECL FaceDetectionResult : public BaseResult {
|
|||||||
std::string Str();
|
std::string Str();
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*! @brief Face Alignment result structure for all the face alignment models
|
||||||
|
*/
|
||||||
|
struct FASTDEPLOY_DECL FaceAlignmentResult : public BaseResult {
|
||||||
|
/** \brief All the coordinates of detected landmarks for an input image, and the element of `landmarks` is a array of 2 float values, means [x, y]
|
||||||
|
*/
|
||||||
|
std::vector<std::array<float, 2>> landmarks;
|
||||||
|
|
||||||
|
ResultType type = ResultType::FACE_ALIGNMENT;
|
||||||
|
/// Clear facealignment result
|
||||||
|
void Clear();
|
||||||
|
|
||||||
|
void Reserve(int size);
|
||||||
|
|
||||||
|
void Resize(int size);
|
||||||
|
|
||||||
|
/// Debug function, convert the result to string to print
|
||||||
|
std::string Str();
|
||||||
|
};
|
||||||
|
|
||||||
/*! @brief Segmentation result structure for all the segmentation models
|
/*! @brief Segmentation result structure for all the segmentation models
|
||||||
*/
|
*/
|
||||||
struct FASTDEPLOY_DECL SegmentationResult : public BaseResult {
|
struct FASTDEPLOY_DECL SegmentationResult : public BaseResult {
|
||||||
@@ -234,6 +254,7 @@ struct FASTDEPLOY_DECL SegmentationResult : public BaseResult {
|
|||||||
void Reserve(int size);
|
void Reserve(int size);
|
||||||
|
|
||||||
void Resize(int size);
|
void Resize(int size);
|
||||||
|
|
||||||
/// Debug function, convert the result to string to print
|
/// Debug function, convert the result to string to print
|
||||||
std::string Str();
|
std::string Str();
|
||||||
};
|
};
|
||||||
|
137
fastdeploy/vision/facealign/contrib/pfld.cc
Normal file
137
fastdeploy/vision/facealign/contrib/pfld.cc
Normal file
@@ -0,0 +1,137 @@
|
|||||||
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "fastdeploy/vision/facealign/contrib/pfld.h"
|
||||||
|
#include "fastdeploy/utils/perf.h"
|
||||||
|
#include "fastdeploy/vision/utils/utils.h"
|
||||||
|
|
||||||
|
namespace fastdeploy {
|
||||||
|
|
||||||
|
namespace vision {
|
||||||
|
|
||||||
|
namespace facealign {
|
||||||
|
|
||||||
|
PFLD::PFLD(const std::string& model_file,
|
||||||
|
const std::string& params_file,
|
||||||
|
const RuntimeOption& custom_option,
|
||||||
|
const ModelFormat& model_format) {
|
||||||
|
if (model_format == ModelFormat::ONNX) {
|
||||||
|
valid_cpu_backends = {Backend::OPENVINO, Backend::ORT};
|
||||||
|
valid_gpu_backends = {Backend::ORT, Backend::TRT};
|
||||||
|
} else {
|
||||||
|
valid_cpu_backends = {Backend::PDINFER, Backend::ORT};
|
||||||
|
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
|
||||||
|
}
|
||||||
|
runtime_option = custom_option;
|
||||||
|
runtime_option.model_format = model_format;
|
||||||
|
runtime_option.model_file = model_file;
|
||||||
|
runtime_option.params_file = params_file;
|
||||||
|
initialized = Initialize();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool PFLD::Initialize() {
|
||||||
|
// parameters for preprocess
|
||||||
|
size = {112, 112};
|
||||||
|
|
||||||
|
if (!InitRuntime()) {
|
||||||
|
FDERROR << "Failed to initialize fastdeploy backend." << std::endl;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool PFLD::Preprocess(Mat* mat, FDTensor* output,
|
||||||
|
std::map<std::string, std::array<int, 2>>* im_info) {
|
||||||
|
// Resize
|
||||||
|
int resize_w = size[0];
|
||||||
|
int resize_h = size[1];
|
||||||
|
if (resize_h != mat->Height() || resize_w != mat->Width()) {
|
||||||
|
Resize::Run(mat, resize_w, resize_h);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Normalize
|
||||||
|
std::vector<float> alpha = {1.0f / 255.0f, 1.0f / 255.0f, 1.0f / 255.0f};
|
||||||
|
std::vector<float> beta = {0.0f, 0.0f, 0.0f};
|
||||||
|
Convert::Run(mat, alpha, beta);
|
||||||
|
|
||||||
|
// Record output shape of preprocessed image
|
||||||
|
(*im_info)["output_shape"] = {mat->Height(), mat->Width()};
|
||||||
|
|
||||||
|
HWC2CHW::Run(mat);
|
||||||
|
Cast::Run(mat, "float");
|
||||||
|
mat->ShareWithTensor(output);
|
||||||
|
output->shape.insert(output->shape.begin(), 1); // reshape to n, h, w, c
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool PFLD::Postprocess(FDTensor& infer_result, FaceAlignmentResult* result,
|
||||||
|
const std::map<std::string, std::array<int, 2>>& im_info) {
|
||||||
|
FDASSERT(infer_result.shape[0] == 1, "Only support batch = 1 now.");
|
||||||
|
if (infer_result.dtype != FDDataType::FP32) {
|
||||||
|
FDERROR << "Only support post process with float32 data." << std::endl;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto iter_in = im_info.find("input_shape");
|
||||||
|
FDASSERT(iter_in != im_info.end(),
|
||||||
|
"Cannot find input_shape from im_info.");
|
||||||
|
int in_h = iter_in->second[0];
|
||||||
|
int in_w = iter_in->second[1];
|
||||||
|
|
||||||
|
result->Clear();
|
||||||
|
float* data = static_cast<float*>(infer_result.Data());
|
||||||
|
for (size_t i = 0; i < infer_result.shape[1]; i += 2) {
|
||||||
|
float x = data[i];
|
||||||
|
float y = data[i + 1];
|
||||||
|
x = std::min(std::max(0.f, x), 1.0f);
|
||||||
|
y = std::min(std::max(0.f, y), 1.0f);
|
||||||
|
// decode landmarks (default 106 landmarks)
|
||||||
|
result->landmarks.emplace_back(
|
||||||
|
std::array<float, 2>{x * in_w, y * in_h});
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool PFLD::Predict(cv::Mat* im, FaceAlignmentResult* result) {
|
||||||
|
Mat mat(*im);
|
||||||
|
std::vector<FDTensor> input_tensors(1);
|
||||||
|
|
||||||
|
std::map<std::string, std::array<int, 2>> im_info;
|
||||||
|
|
||||||
|
// Record the shape of image and the shape of preprocessed image
|
||||||
|
im_info["input_shape"] = {mat.Height(), mat.Width()};
|
||||||
|
im_info["output_shape"] = {mat.Height(), mat.Width()};
|
||||||
|
|
||||||
|
if (!Preprocess(&mat, &input_tensors[0], &im_info)) {
|
||||||
|
FDERROR << "Failed to preprocess input image." << std::endl;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
input_tensors[0].name = InputInfoOfRuntime(0).name;
|
||||||
|
std::vector<FDTensor> output_tensors;
|
||||||
|
if (!Infer(input_tensors, &output_tensors)) {
|
||||||
|
FDERROR << "Failed to inference." << std::endl;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!Postprocess(output_tensors[1], result, im_info)) {
|
||||||
|
FDERROR << "Failed to post process." << std::endl;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace facealign
|
||||||
|
} // namespace vision
|
||||||
|
} // namespace fastdeploy
|
64
fastdeploy/vision/facealign/contrib/pfld.h
Normal file
64
fastdeploy/vision/facealign/contrib/pfld.h
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
#include "fastdeploy/fastdeploy_model.h"
|
||||||
|
#include "fastdeploy/vision/common/processors/transform.h"
|
||||||
|
#include "fastdeploy/vision/common/result.h"
|
||||||
|
|
||||||
|
namespace fastdeploy {
|
||||||
|
|
||||||
|
namespace vision {
|
||||||
|
|
||||||
|
namespace facealign {
|
||||||
|
/*! @brief PFLD model object used when to load a PFLD model exported by PFLD.
|
||||||
|
*/
|
||||||
|
class FASTDEPLOY_DECL PFLD : public FastDeployModel {
|
||||||
|
public:
|
||||||
|
/** \brief Set path of model file and the configuration of runtime.
|
||||||
|
*
|
||||||
|
* \param[in] model_file Path of model file, e.g ./pfld.onnx
|
||||||
|
* \param[in] params_file Path of parameter file, e.g ppyoloe/model.pdiparams, if the model format is ONNX, this parameter will be ignored
|
||||||
|
* \param[in] custom_option RuntimeOption for inference, the default will use cpu, and choose the backend defined in "valid_cpu_backends"
|
||||||
|
* \param[in] model_format Model format of the loaded model, default is ONNX format
|
||||||
|
*/
|
||||||
|
PFLD(const std::string& model_file, const std::string& params_file = "",
|
||||||
|
const RuntimeOption& custom_option = RuntimeOption(),
|
||||||
|
const ModelFormat& model_format = ModelFormat::ONNX);
|
||||||
|
|
||||||
|
std::string ModelName() const { return "PFLD"; }
|
||||||
|
/** \brief Predict the face detection result for an input image
|
||||||
|
*
|
||||||
|
* \param[in] im The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
* \param[in] result The output face detection result will be writen to this structure
|
||||||
|
* \return true if the prediction successed, otherwise false
|
||||||
|
*/
|
||||||
|
virtual bool Predict(cv::Mat* im, FaceAlignmentResult* result);
|
||||||
|
|
||||||
|
/// tuple of (width, height), default (112, 112)
|
||||||
|
std::vector<int> size;
|
||||||
|
|
||||||
|
private:
|
||||||
|
bool Initialize();
|
||||||
|
|
||||||
|
bool Preprocess(Mat* mat, FDTensor* outputs,
|
||||||
|
std::map<std::string, std::array<int, 2>>* im_info);
|
||||||
|
|
||||||
|
bool Postprocess(FDTensor& infer_result, FaceAlignmentResult* result,
|
||||||
|
const std::map<std::string, std::array<int, 2>>& im_info);
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace facealign
|
||||||
|
} // namespace vision
|
||||||
|
} // namespace fastdeploy
|
31
fastdeploy/vision/facealign/contrib/pfld_pybind.cc
Normal file
31
fastdeploy/vision/facealign/contrib/pfld_pybind.cc
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "fastdeploy/pybind/main.h"
|
||||||
|
|
||||||
|
namespace fastdeploy {
|
||||||
|
void BindPFLD(pybind11::module& m) {
|
||||||
|
pybind11::class_<vision::facealign::PFLD, FastDeployModel>(m, "PFLD")
|
||||||
|
.def(pybind11::init<std::string, std::string, RuntimeOption,
|
||||||
|
ModelFormat>())
|
||||||
|
.def("predict",
|
||||||
|
[](vision::facealign::PFLD& self, pybind11::array& data) {
|
||||||
|
auto mat = PyArrayToCvMat(data);
|
||||||
|
vision::FaceAlignmentResult res;
|
||||||
|
self.Predict(&mat, &res);
|
||||||
|
return res;
|
||||||
|
})
|
||||||
|
.def_readwrite("size", &vision::facealign::PFLD::size);
|
||||||
|
}
|
||||||
|
} // namespace fastdeploy
|
25
fastdeploy/vision/facealign/facealign_pybind.cc
Normal file
25
fastdeploy/vision/facealign/facealign_pybind.cc
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "fastdeploy/pybind/main.h"
|
||||||
|
|
||||||
|
namespace fastdeploy {
|
||||||
|
|
||||||
|
void BindPFLD(pybind11::module& m);
|
||||||
|
|
||||||
|
void BindFaceAlign(pybind11::module& m) {
|
||||||
|
auto facedet_module = m.def_submodule("facealign", "Face alignment models.");
|
||||||
|
BindPFLD(facedet_module);
|
||||||
|
}
|
||||||
|
} // namespace fastdeploy
|
17
fastdeploy/vision/tracking/pptracking/model.h
Normal file → Executable file
17
fastdeploy/vision/tracking/pptracking/model.h
Normal file → Executable file
@@ -18,15 +18,13 @@
|
|||||||
#include "fastdeploy/fastdeploy_model.h"
|
#include "fastdeploy/fastdeploy_model.h"
|
||||||
#include "fastdeploy/vision/common/result.h"
|
#include "fastdeploy/vision/common/result.h"
|
||||||
#include "fastdeploy/vision/tracking/pptracking/tracker.h"
|
#include "fastdeploy/vision/tracking/pptracking/tracker.h"
|
||||||
//#include "fastdeploy/vision/tracking/pptracking/letter_box.h"
|
|
||||||
|
|
||||||
namespace fastdeploy {
|
namespace fastdeploy {
|
||||||
namespace vision {
|
namespace vision {
|
||||||
namespace tracking {
|
namespace tracking {
|
||||||
|
|
||||||
class FASTDEPLOY_DECL PPTracking: public FastDeployModel {
|
class FASTDEPLOY_DECL PPTracking: public FastDeployModel {
|
||||||
|
public:
|
||||||
public:
|
|
||||||
/** \brief Set path of model file and configuration file, and the configuration of runtime
|
/** \brief Set path of model file and configuration file, and the configuration of runtime
|
||||||
*
|
*
|
||||||
* \param[in] model_file Path of model file, e.g pptracking/model.pdmodel
|
* \param[in] model_file Path of model file, e.g pptracking/model.pdmodel
|
||||||
@@ -52,9 +50,7 @@ public:
|
|||||||
*/
|
*/
|
||||||
virtual bool Predict(cv::Mat* img, MOTResult* result);
|
virtual bool Predict(cv::Mat* img, MOTResult* result);
|
||||||
|
|
||||||
|
private:
|
||||||
private:
|
|
||||||
|
|
||||||
bool BuildPreprocessPipelineFromConfig();
|
bool BuildPreprocessPipelineFromConfig();
|
||||||
|
|
||||||
bool Initialize();
|
bool Initialize();
|
||||||
@@ -70,10 +66,7 @@ private:
|
|||||||
float tracked_thresh_;
|
float tracked_thresh_;
|
||||||
float min_box_area_;
|
float min_box_area_;
|
||||||
std::unique_ptr<JDETracker> jdeTracker_;
|
std::unique_ptr<JDETracker> jdeTracker_;
|
||||||
|
|
||||||
};
|
};
|
||||||
|
} // namespace tracking
|
||||||
} // namespace tracking
|
} // namespace vision
|
||||||
} // namespace vision
|
} // namespace fastdeploy
|
||||||
} // namespace fastdeploy
|
|
||||||
|
|
||||||
|
@@ -21,6 +21,7 @@ void BindClassification(pybind11::module& m);
|
|||||||
void BindSegmentation(pybind11::module& m);
|
void BindSegmentation(pybind11::module& m);
|
||||||
void BindMatting(pybind11::module& m);
|
void BindMatting(pybind11::module& m);
|
||||||
void BindFaceDet(pybind11::module& m);
|
void BindFaceDet(pybind11::module& m);
|
||||||
|
void BindFaceAlign(pybind11::module& m);
|
||||||
void BindFaceId(pybind11::module& m);
|
void BindFaceId(pybind11::module& m);
|
||||||
void BindOcr(pybind11::module& m);
|
void BindOcr(pybind11::module& m);
|
||||||
void BindTracking(pybind11::module& m);
|
void BindTracking(pybind11::module& m);
|
||||||
@@ -83,6 +84,18 @@ void BindVision(pybind11::module& m) {
|
|||||||
.def("__repr__", &vision::FaceDetectionResult::Str)
|
.def("__repr__", &vision::FaceDetectionResult::Str)
|
||||||
.def("__str__", &vision::FaceDetectionResult::Str);
|
.def("__str__", &vision::FaceDetectionResult::Str);
|
||||||
|
|
||||||
|
pybind11::class_<vision::FaceAlignmentResult>(m, "FaceAlignmentResult")
|
||||||
|
.def(pybind11::init())
|
||||||
|
.def_readwrite("landmarks", &vision::FaceAlignmentResult::landmarks)
|
||||||
|
.def("__repr__", &vision::FaceAlignmentResult::Str)
|
||||||
|
.def("__str__", &vision::FaceAlignmentResult::Str);
|
||||||
|
|
||||||
|
pybind11::class_<vision::FaceRecognitionResult>(m, "FaceRecognitionResult")
|
||||||
|
.def(pybind11::init())
|
||||||
|
.def_readwrite("embedding", &vision::FaceRecognitionResult::embedding)
|
||||||
|
.def("__repr__", &vision::FaceRecognitionResult::Str)
|
||||||
|
.def("__str__", &vision::FaceRecognitionResult::Str);
|
||||||
|
|
||||||
pybind11::class_<vision::SegmentationResult>(m, "SegmentationResult")
|
pybind11::class_<vision::SegmentationResult>(m, "SegmentationResult")
|
||||||
.def(pybind11::init())
|
.def(pybind11::init())
|
||||||
.def_readwrite("label_map", &vision::SegmentationResult::label_map)
|
.def_readwrite("label_map", &vision::SegmentationResult::label_map)
|
||||||
@@ -91,12 +104,6 @@ void BindVision(pybind11::module& m) {
|
|||||||
.def("__repr__", &vision::SegmentationResult::Str)
|
.def("__repr__", &vision::SegmentationResult::Str)
|
||||||
.def("__str__", &vision::SegmentationResult::Str);
|
.def("__str__", &vision::SegmentationResult::Str);
|
||||||
|
|
||||||
pybind11::class_<vision::FaceRecognitionResult>(m, "FaceRecognitionResult")
|
|
||||||
.def(pybind11::init())
|
|
||||||
.def_readwrite("embedding", &vision::FaceRecognitionResult::embedding)
|
|
||||||
.def("__repr__", &vision::FaceRecognitionResult::Str)
|
|
||||||
.def("__str__", &vision::FaceRecognitionResult::Str);
|
|
||||||
|
|
||||||
pybind11::class_<vision::MattingResult>(m, "MattingResult")
|
pybind11::class_<vision::MattingResult>(m, "MattingResult")
|
||||||
.def(pybind11::init())
|
.def(pybind11::init())
|
||||||
.def_readwrite("alpha", &vision::MattingResult::alpha)
|
.def_readwrite("alpha", &vision::MattingResult::alpha)
|
||||||
@@ -122,6 +129,7 @@ void BindVision(pybind11::module& m) {
|
|||||||
BindClassification(m);
|
BindClassification(m);
|
||||||
BindSegmentation(m);
|
BindSegmentation(m);
|
||||||
BindFaceDet(m);
|
BindFaceDet(m);
|
||||||
|
BindFaceAlign(m);
|
||||||
BindFaceId(m);
|
BindFaceId(m);
|
||||||
BindMatting(m);
|
BindMatting(m);
|
||||||
BindOcr(m);
|
BindOcr(m);
|
||||||
|
43
fastdeploy/vision/visualize/face_alignment.cc
Normal file
43
fastdeploy/vision/visualize/face_alignment.cc
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#ifdef ENABLE_VISION_VISUALIZE
|
||||||
|
|
||||||
|
#include "fastdeploy/vision/visualize/visualize.h"
|
||||||
|
#include "opencv2/imgproc/imgproc.hpp"
|
||||||
|
|
||||||
|
namespace fastdeploy {
|
||||||
|
|
||||||
|
namespace vision {
|
||||||
|
|
||||||
|
cv::Mat VisFaceAlignment(const cv::Mat& im, const FaceAlignmentResult& result,
|
||||||
|
int line_size) {
|
||||||
|
auto vis_im = im.clone();
|
||||||
|
// vis landmarks
|
||||||
|
cv::Scalar landmark_color = cv::Scalar(0, 255, 0);
|
||||||
|
for (size_t i = 0; i < result.landmarks.size(); ++i) {
|
||||||
|
cv::Point landmark;
|
||||||
|
landmark.x = static_cast<int>(
|
||||||
|
result.landmarks[i][0]);
|
||||||
|
landmark.y = static_cast<int>(
|
||||||
|
result.landmarks[i][1]);
|
||||||
|
cv::circle(vis_im, landmark, line_size, landmark_color, -1);
|
||||||
|
}
|
||||||
|
return vis_im;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace vision
|
||||||
|
} // namespace fastdeploy
|
||||||
|
|
||||||
|
#endif
|
@@ -70,6 +70,9 @@ FASTDEPLOY_DECL cv::Mat VisFaceDetection(const cv::Mat& im,
|
|||||||
const FaceDetectionResult& result,
|
const FaceDetectionResult& result,
|
||||||
int line_size = 1,
|
int line_size = 1,
|
||||||
float font_size = 0.5f);
|
float font_size = 0.5f);
|
||||||
|
FASTDEPLOY_DECL cv::Mat VisFaceAlignment(const cv::Mat& im,
|
||||||
|
const FaceAlignmentResult& result,
|
||||||
|
int line_size = 1);
|
||||||
FASTDEPLOY_DECL cv::Mat VisSegmentation(const cv::Mat& im,
|
FASTDEPLOY_DECL cv::Mat VisSegmentation(const cv::Mat& im,
|
||||||
const SegmentationResult& result,
|
const SegmentationResult& result,
|
||||||
float weight = 0.5);
|
float weight = 0.5);
|
||||||
|
@@ -36,6 +36,16 @@ void BindVisualize(pybind11::module& m) {
|
|||||||
vision::Mat(vis_im).ShareWithTensor(&out);
|
vision::Mat(vis_im).ShareWithTensor(&out);
|
||||||
return TensorToPyArray(out);
|
return TensorToPyArray(out);
|
||||||
})
|
})
|
||||||
|
.def("vis_face_alignment",
|
||||||
|
[](pybind11::array& im_data, vision::FaceAlignmentResult& result,
|
||||||
|
int line_size) {
|
||||||
|
auto im = PyArrayToCvMat(im_data);
|
||||||
|
auto vis_im =
|
||||||
|
vision::VisFaceAlignment(im, result, line_size);
|
||||||
|
FDTensor out;
|
||||||
|
vision::Mat(vis_im).ShareWithTensor(&out);
|
||||||
|
return TensorToPyArray(out);
|
||||||
|
})
|
||||||
.def("vis_segmentation",
|
.def("vis_segmentation",
|
||||||
[](pybind11::array& im_data, vision::SegmentationResult& result,
|
[](pybind11::array& im_data, vision::SegmentationResult& result,
|
||||||
float weight) {
|
float weight) {
|
||||||
|
@@ -20,6 +20,7 @@ from . import tracking
|
|||||||
from . import keypointdetection
|
from . import keypointdetection
|
||||||
from . import matting
|
from . import matting
|
||||||
from . import facedet
|
from . import facedet
|
||||||
|
from . import facealign
|
||||||
from . import faceid
|
from . import faceid
|
||||||
from . import ocr
|
from . import ocr
|
||||||
from . import evaluation
|
from . import evaluation
|
||||||
|
16
python/fastdeploy/vision/facealign/__init__.py
Normal file
16
python/fastdeploy/vision/facealign/__init__.py
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from __future__ import absolute_import
|
||||||
|
from .contrib.pfld import PFLD
|
15
python/fastdeploy/vision/facealign/contrib/__init__.py
Normal file
15
python/fastdeploy/vision/facealign/contrib/__init__.py
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from __future__ import absolute_import
|
68
python/fastdeploy/vision/facealign/contrib/pfld.py
Normal file
68
python/fastdeploy/vision/facealign/contrib/pfld.py
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from __future__ import absolute_import
|
||||||
|
import logging
|
||||||
|
from .... import FastDeployModel, ModelFormat
|
||||||
|
from .... import c_lib_wrap as C
|
||||||
|
|
||||||
|
|
||||||
|
class PFLD(FastDeployModel):
|
||||||
|
def __init__(self,
|
||||||
|
model_file,
|
||||||
|
params_file="",
|
||||||
|
runtime_option=None,
|
||||||
|
model_format=ModelFormat.ONNX):
|
||||||
|
"""Load a face alignment model exported by PFLD.
|
||||||
|
|
||||||
|
:param model_file: (str)Path of model file, e.g pfld/pfld-106-v3.onnx
|
||||||
|
:param params_file: (str)Path of parameters file, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
|
||||||
|
:param runtime_option: (fastdeploy.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
|
||||||
|
:param model_format: (fastdeploy.ModelForamt)Model format of the loaded model, default is ONNX
|
||||||
|
"""
|
||||||
|
|
||||||
|
super(PFLD, self).__init__(runtime_option)
|
||||||
|
|
||||||
|
assert model_format == ModelFormat.ONNX, "PFLD only support model format of ModelFormat.ONNX now."
|
||||||
|
self._model = C.vision.facealign.PFLD(
|
||||||
|
model_file, params_file, self._runtime_option, model_format)
|
||||||
|
assert self.initialized, "PFLD initialize failed."
|
||||||
|
|
||||||
|
def predict(self, input_image):
|
||||||
|
"""Detect an input image landmarks
|
||||||
|
|
||||||
|
:param im: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
|
||||||
|
:return: FaceAlignmentResult
|
||||||
|
"""
|
||||||
|
|
||||||
|
return self._model.predict(input_image)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def size(self):
|
||||||
|
"""
|
||||||
|
Returns the preprocess image size, default (112, 112)
|
||||||
|
"""
|
||||||
|
return self._model.size
|
||||||
|
|
||||||
|
@size.setter
|
||||||
|
def size(self, wh):
|
||||||
|
"""
|
||||||
|
Set the preprocess image size, default (112, 112)
|
||||||
|
"""
|
||||||
|
assert isinstance(wh, (list, tuple)),\
|
||||||
|
"The value to set `size` must be type of tuple or list."
|
||||||
|
assert len(wh) == 2,\
|
||||||
|
"The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
|
||||||
|
len(wh))
|
||||||
|
self._model.size = wh
|
10
python/fastdeploy/vision/visualize/__init__.py
Normal file → Executable file
10
python/fastdeploy/vision/visualize/__init__.py
Normal file → Executable file
@@ -36,6 +36,10 @@ def vis_face_detection(im_data, face_det_result, line_size=1, font_size=0.5):
|
|||||||
font_size)
|
font_size)
|
||||||
|
|
||||||
|
|
||||||
|
def vis_face_alignment(im_data, face_align_result, line_size=1):
|
||||||
|
return C.vision.vis_face_alignment(im_data, face_align_result, line_size)
|
||||||
|
|
||||||
|
|
||||||
def vis_segmentation(im_data, seg_result, weight=0.5):
|
def vis_segmentation(im_data, seg_result, weight=0.5):
|
||||||
return C.vision.vis_segmentation(im_data, seg_result, weight)
|
return C.vision.vis_segmentation(im_data, seg_result, weight)
|
||||||
|
|
||||||
@@ -63,12 +67,14 @@ def swap_background_matting(im_data,
|
|||||||
"DEPRECATED: fastdeploy.vision.swap_background_matting is deprecated, please use fastdeploy.vision.swap_background function instead."
|
"DEPRECATED: fastdeploy.vision.swap_background_matting is deprecated, please use fastdeploy.vision.swap_background function instead."
|
||||||
)
|
)
|
||||||
assert isinstance(
|
assert isinstance(
|
||||||
result, C.vision.MattingResult), "The result must be MattingResult type"
|
result,
|
||||||
|
C.vision.MattingResult), "The result must be MattingResult type"
|
||||||
return C.vision.Visualize.swap_background_matting(
|
return C.vision.Visualize.swap_background_matting(
|
||||||
im_data, background, result, remove_small_connected_area)
|
im_data, background, result, remove_small_connected_area)
|
||||||
|
|
||||||
|
|
||||||
def swap_background_segmentation(im_data, background, background_label, result):
|
def swap_background_segmentation(im_data, background, background_label,
|
||||||
|
result):
|
||||||
logging.warning(
|
logging.warning(
|
||||||
"DEPRECATED: fastdeploy.vision.swap_background_segmentation is deprecated, please use fastdeploy.vision.swap_background function instead."
|
"DEPRECATED: fastdeploy.vision.swap_background_segmentation is deprecated, please use fastdeploy.vision.swap_background function instead."
|
||||||
)
|
)
|
||||||
|
42
tests/eval_example/test_pfld.py
Normal file
42
tests/eval_example/test_pfld.py
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import fastdeploy as fd
|
||||||
|
import cv2
|
||||||
|
import os
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
|
||||||
|
def test_facealignment_pfld():
|
||||||
|
model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/pfld-106-lite.onnx"
|
||||||
|
input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/facealign_input.png"
|
||||||
|
output_url = "https://bj.bcebos.com/paddlehub/fastdeploy/result_landmarks.npy"
|
||||||
|
fd.download(model_url, ".")
|
||||||
|
fd.download(input_url, ".")
|
||||||
|
fd.download(output_url, ".")
|
||||||
|
model_path = "pfld-106-lite.onnx"
|
||||||
|
# use ORT
|
||||||
|
runtime_option = fd.RuntimeOption()
|
||||||
|
runtime_option.use_ort_backend()
|
||||||
|
model = fd.vision.facealign.PFLD(model_path, runtime_option=runtime_option)
|
||||||
|
|
||||||
|
# compare diff
|
||||||
|
im = cv2.imread("./facealign_input.png")
|
||||||
|
result = model.predict(im.copy())
|
||||||
|
expect = np.load("./result_landmarks.npy")
|
||||||
|
|
||||||
|
diff = np.fabs(np.array(result.landmarks) - expect)
|
||||||
|
thres = 1e-04
|
||||||
|
assert diff.max() < thres, "The diff is %f, which is bigger than %f" % (
|
||||||
|
diff.max(), thres)
|
Reference in New Issue
Block a user