mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-19 15:04:47 +08:00
[Model] add PFLD model (#433)
* support face alignment PFLD * add PFLD demo * fixed FaceAlignmentResult * fixed bugs * fixed img size * fixed readme * deal with comments * fixed readme * add pfld testcase * update infer.py * add gflags for example * update c++ readme * add gflags in example * fixed for ci * fixed gflags.cmake * deal with comments * update infer demo Co-authored-by: Jason <jiangjiajun@baidu.com>
This commit is contained in:
14
examples/vision/facealign/pfld/cpp/CMakeLists.txt
Normal file
14
examples/vision/facealign/pfld/cpp/CMakeLists.txt
Normal file
@@ -0,0 +1,14 @@
|
||||
PROJECT(infer_demo C CXX)
|
||||
CMAKE_MINIMUM_REQUIRED (VERSION 3.10)
|
||||
|
||||
# 指定下载解压后的fastdeploy库路径
|
||||
option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.")
|
||||
include(${FASTDEPLOY_INSTALL_DIR}/utils/gflags.cmake)
|
||||
include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
|
||||
|
||||
# 添加FastDeploy依赖头文件
|
||||
include_directories(${FASTDEPLOY_INCS})
|
||||
|
||||
add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cc)
|
||||
# 添加FastDeploy库依赖
|
||||
target_link_libraries(infer_demo ${FASTDEPLOY_LIBS} gflags pthread)
|
84
examples/vision/facealign/pfld/cpp/README.md
Normal file
84
examples/vision/facealign/pfld/cpp/README.md
Normal file
@@ -0,0 +1,84 @@
|
||||
# PFLD C++部署示例
|
||||
|
||||
本目录下提供`infer.cc`快速完成PFLD在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。
|
||||
|
||||
在部署前,需确认以下两个步骤
|
||||
|
||||
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
|
||||
- 2. 根据开发环境,下载预编译部署库和samples代码,参考[FastDeploy预编译库](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
|
||||
|
||||
以Linux上CPU推理为例,在本目录执行如下命令即可完成编译测试,保证 FastDeploy 版本0.6.0以上(x.x.x >= 0.6.0)支持PFLD模型
|
||||
|
||||
```bash
|
||||
mkdir build
|
||||
cd build
|
||||
wget https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-x.x.x.tgz
|
||||
tar xvf fastdeploy-linux-x64-x.x.x.tgz
|
||||
cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-x.x.x
|
||||
make -j
|
||||
|
||||
#下载官方转换好的 PFLD 模型文件和测试图片
|
||||
wget https://bj.bcebos.com/paddlehub/fastdeploy/pfld-106-lite.onnx
|
||||
wget https://bj.bcebos.com/paddlehub/fastdeploy/facealign_input.png
|
||||
|
||||
# CPU推理
|
||||
./infer_demo --model pfld-106-lite.onnx --image facealign_input.png --device cpu
|
||||
# GPU推理
|
||||
./infer_demo --model pfld-106-lite.onnx --image facealign_input.png --device gpu
|
||||
# GPU上TensorRT推理
|
||||
./infer_demo --model pfld-106-lite.onnx --image facealign_input.png --device gpu --backend trt
|
||||
```
|
||||
|
||||
运行完成可视化结果如下图所示
|
||||
|
||||
<div width="500">
|
||||
<img width="470" height="384" float="left" src="https://user-images.githubusercontent.com/19977378/197931737-c2d8e760-a76d-478a-a6c9-4574fb5c70eb.png">
|
||||
</div>
|
||||
|
||||
以上命令只适用于Linux或MacOS, Windows下SDK的使用方式请参考:
|
||||
- [如何在Windows中使用FastDeploy C++ SDK](../../../../../docs/cn/faq/use_sdk_on_windows.md)
|
||||
|
||||
## PFLD C++接口
|
||||
|
||||
### PFLD 类
|
||||
|
||||
```c++
|
||||
fastdeploy::vision::facealign::PFLD(
|
||||
const string& model_file,
|
||||
const string& params_file = "",
|
||||
const RuntimeOption& runtime_option = RuntimeOption(),
|
||||
const ModelFormat& model_format = ModelFormat::ONNX)
|
||||
```
|
||||
|
||||
PFLD模型加载和初始化,其中model_file为导出的ONNX模型格式。
|
||||
|
||||
**参数**
|
||||
|
||||
> * **model_file**(str): 模型文件路径
|
||||
> * **params_file**(str): 参数文件路径,当模型格式为ONNX时,此参数传入空字符串即可
|
||||
> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置
|
||||
> * **model_format**(ModelFormat): 模型格式,默认为ONNX格式
|
||||
|
||||
#### Predict函数
|
||||
|
||||
> ```c++
|
||||
> PFLD::Predict(cv::Mat* im, FaceAlignmentResult* result)
|
||||
> ```
|
||||
>
|
||||
> 模型预测接口,输入图像直接输出landmarks结果。
|
||||
>
|
||||
> **参数**
|
||||
>
|
||||
> > * **im**: 输入图像,注意需为HWC,BGR格式
|
||||
> > * **result**: landmarks结果, FaceAlignmentResult说明参考[视觉模型预测结果](../../../../../docs/api/vision_results/)
|
||||
|
||||
### 类成员变量
|
||||
|
||||
用户可按照自己的实际需求,修改下列预处理参数,从而影响最终的推理和部署效果
|
||||
|
||||
> > * **size**(vector<int>): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[112, 112]
|
||||
|
||||
- [模型介绍](../../)
|
||||
- [Python部署](../python)
|
||||
- [视觉模型预测结果](../../../../../docs/api/vision_results/)
|
||||
- [如何切换模型推理后端引擎](../../../../../docs/cn/faq/how_to_change_backend.md)
|
110
examples/vision/facealign/pfld/cpp/infer.cc
Normal file
110
examples/vision/facealign/pfld/cpp/infer.cc
Normal file
@@ -0,0 +1,110 @@
|
||||
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "fastdeploy/vision.h"
|
||||
#include "gflags/gflags.h"
|
||||
|
||||
DEFINE_string(model, "", "Directory of the inference model.");
|
||||
DEFINE_string(image, "", "Path of the image file.");
|
||||
DEFINE_string(device, "cpu",
|
||||
"Type of inference device, support 'cpu' or 'gpu'.");
|
||||
DEFINE_string(backend, "default",
|
||||
"The inference runtime backend, support: ['default', 'ort', "
|
||||
"'paddle', 'ov', 'trt', 'paddle_trt']");
|
||||
DEFINE_bool(use_fp16, false, "Whether to use FP16 mode, only support 'trt' and 'paddle_trt' backend");
|
||||
|
||||
void PrintUsage() {
|
||||
std::cout << "Usage: infer_demo --model model_path --image img_path --device [cpu|gpu] --backend "
|
||||
"[default|ort|paddle|ov|trt|paddle_trt] "
|
||||
"--use_fp16 false"
|
||||
<< std::endl;
|
||||
std::cout << "Default value of device: cpu" << std::endl;
|
||||
std::cout << "Default value of backend: default" << std::endl;
|
||||
std::cout << "Default value of use_fp16: false" << std::endl;
|
||||
}
|
||||
|
||||
bool CreateRuntimeOption(fastdeploy::RuntimeOption* option) {
|
||||
if (FLAG_device == "gpu") {
|
||||
option->UseGpu();
|
||||
if (FLAG_backend == "ort") {
|
||||
option->UseOrtBackend();
|
||||
} else if (FLAGS_backend == "paddle") {
|
||||
option->UsePaddleBackend();
|
||||
} else if (FLAGS_backend == "trt" ||
|
||||
FLAGS_backend == "paddle_trt") {
|
||||
option->UseTrtBackend();
|
||||
option->SetTrtInputShape("input", {1, 3, 112, 112});
|
||||
if (FLAGS_backend == "paddle_trt") {
|
||||
option->EnablePaddleToTrt();
|
||||
}
|
||||
if (FLAGS_use_fp16) {
|
||||
option->EnableTrtFP16();
|
||||
}
|
||||
} else if (FLAGS_backend == "default") {
|
||||
return true;
|
||||
} else {
|
||||
std::cout << "While inference with GPU, only support default/ort/paddle/trt/paddle_trt now, " << FLAG_backend << " is not supported." << std::endl;
|
||||
return false;
|
||||
}
|
||||
} else if (FLAG_device == "cpu") {
|
||||
if (FLAGS_backend == "ort") {
|
||||
option->UseOrtBackend();
|
||||
} else if (FLAGS_backend == "ov") {
|
||||
option->UseOpenVINOBackend();
|
||||
} else if (FLAGS_backend == "paddle") {
|
||||
option->UsePaddleBackend();
|
||||
} else if (FLAGS_backend = "default") {
|
||||
return true;
|
||||
} else {
|
||||
std::cout << "While inference with CPU, only support default/ort/ov/paddle now, " << FLAG_backend << " is not supported." << std::endl;
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
std::cerr << "Only support device CPU/GPU now, " << FLAG_device << " is not supported." << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
google::ParseCommandLineFlags(&argc, &argv, true);
|
||||
auto option = fastdeploy::RuntimeOption();
|
||||
if (!CreateRuntimeOption(&option)) {
|
||||
PrintUsage();
|
||||
return -1;
|
||||
}
|
||||
|
||||
auto model = fastdeploy::vision::facealign::PFLD(FLAGS_model, "", option);
|
||||
if (!model.Initialized()) {
|
||||
std::cerr << "Failed to initialize." << std::endl;
|
||||
return -1;
|
||||
}
|
||||
|
||||
auto im = cv::imread(FLAGS_image);
|
||||
auto im_bak = im.clone();
|
||||
|
||||
fastdeploy::vision::FaceAlignmentResult res;
|
||||
if (!model.Predict(&im, &res)) {
|
||||
std::cerr << "Failed to predict." << std::endl;
|
||||
return -1;
|
||||
}
|
||||
std::cout << res.Str() << std::endl;
|
||||
|
||||
auto vis_im = fastdeploy::vision::VisFaceAlignment(im_bak, res);
|
||||
cv::imwrite("vis_result.jpg", vis_im);
|
||||
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
|
||||
|
||||
return 0;
|
||||
}
|
Reference in New Issue
Block a user