mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-06 17:17:14 +08:00
删除无用代码,更新python脚本
This commit is contained in:
@@ -51,5 +51,5 @@ python tools/rknpu2/export.py --config_path tools/rknpu2/config/PP_TinyPose_256x
|
|||||||
## 详细部署文档
|
## 详细部署文档
|
||||||
|
|
||||||
- [模型详细介绍](../README_CN.md)
|
- [模型详细介绍](../README_CN.md)
|
||||||
- [Python部署](python)
|
- [Python部署](./python)
|
||||||
- [C++部署](cpp)
|
- [C++部署](./cpp)
|
@@ -0,0 +1,70 @@
|
|||||||
|
[English](README.md) | 简体中文
|
||||||
|
# PP-TinyPose Python部署示例
|
||||||
|
|
||||||
|
在部署前,需确认以下两个步骤
|
||||||
|
|
||||||
|
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
|
||||||
|
- 2. 根据开发环境,下载预编译部署库和samples代码,参考[FastDeploy预编译库](../../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
|
||||||
|
|
||||||
|
本目录下提供`pptinypose_infer.py`快速完成PP-TinyPose在NPU加速部署的`单图单人关键点检测`示例。执行如下脚本即可完成
|
||||||
|
|
||||||
|
>> **注意**: PP-Tinypose单模型目前只支持单图单人关键点检测,因此输入的图片应只包含一个人或者进行过裁剪的图像。多人关键点检测请参考[PP-TinyPose Pipeline](../../../det_keypoint_unite/python/README.md)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 下载PP-TinyPose模型文件和测试图片
|
||||||
|
wget https://bj.bcebos.com/paddlehub/fastdeploy/hrnet_demo.jpg
|
||||||
|
|
||||||
|
# CPU推理
|
||||||
|
python pptinypose_infer.py --tinypose_model_dir PP_TinyPose_256x192_infer --image hrnet_demo.jpg
|
||||||
|
```
|
||||||
|
|
||||||
|
运行完成可视化结果如下图所示
|
||||||
|
<div align="center">
|
||||||
|
<img src="https://user-images.githubusercontent.com/16222477/196386764-dd51ad56-c410-4c54-9580-643f282f5a83.jpeg", width=359px, height=423px />
|
||||||
|
</div>
|
||||||
|
|
||||||
|
## PP-TinyPose Python接口
|
||||||
|
|
||||||
|
```python
|
||||||
|
fd.vision.keypointdetection.PPTinyPose(model_file, params_file, config_file, runtime_option=None, model_format=ModelFormat.PADDLE)
|
||||||
|
```
|
||||||
|
|
||||||
|
PP-TinyPose模型加载和初始化,其中model_file, params_file以及config_file为训练模型导出的Paddle inference文件,具体请参考其文档说明[模型导出](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.5/deploy/EXPORT_MODEL.md)
|
||||||
|
|
||||||
|
**参数**
|
||||||
|
|
||||||
|
> * **model_file**(str): 模型文件路径
|
||||||
|
> * **params_file**(str): 参数文件路径
|
||||||
|
> * **config_file**(str): 推理部署配置文件
|
||||||
|
> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置
|
||||||
|
> * **model_format**(ModelFormat): 模型格式,默认为Paddle格式
|
||||||
|
|
||||||
|
### predict函数
|
||||||
|
|
||||||
|
> ```python
|
||||||
|
> PPTinyPose.predict(input_image)
|
||||||
|
> ```
|
||||||
|
>
|
||||||
|
> 模型预测结口,输入图像直接输出检测结果。
|
||||||
|
>
|
||||||
|
> **参数**
|
||||||
|
>
|
||||||
|
> > * **input_image**(np.ndarray): 输入数据,注意需为HWC,BGR格式
|
||||||
|
|
||||||
|
> **返回**
|
||||||
|
>
|
||||||
|
> > 返回`fastdeploy.vision.KeyPointDetectionResult`结构体,结构体说明参考文档[视觉模型预测结果](../../../../../docs/api/vision_results/)
|
||||||
|
|
||||||
|
### 类成员属性
|
||||||
|
#### 后处理参数
|
||||||
|
用户可按照自己的实际需求,修改下列后处理参数,从而影响最终的推理和部署效果
|
||||||
|
|
||||||
|
> > * **use_dark**(bool): 是否使用DARK进行后处理[参考论文](https://arxiv.org/abs/1910.06278)
|
||||||
|
|
||||||
|
|
||||||
|
## 其它文档
|
||||||
|
|
||||||
|
- [PP-TinyPose 模型介绍](..)
|
||||||
|
- [PP-TinyPose C++部署](../cpp)
|
||||||
|
- [模型预测结果说明](../../../../../docs/api/vision_results/)
|
||||||
|
- [如何切换模型推理后端引擎](../../../../../docs/cn/faq/how_to_change_backend.md)
|
@@ -0,0 +1,50 @@
|
|||||||
|
import fastdeploy as fd
|
||||||
|
import cv2
|
||||||
|
import os
|
||||||
|
|
||||||
|
|
||||||
|
def parse_arguments():
|
||||||
|
import argparse
|
||||||
|
import ast
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument(
|
||||||
|
"--tinypose_model_dir",
|
||||||
|
required=True,
|
||||||
|
help="path of paddletinypose model directory")
|
||||||
|
parser.add_argument(
|
||||||
|
"--image", required=True, help="path of test image file.")
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
def build_tinypose_option(args):
|
||||||
|
option = fd.RuntimeOption()
|
||||||
|
option.use_rknpu()
|
||||||
|
return option
|
||||||
|
|
||||||
|
|
||||||
|
args = parse_arguments()
|
||||||
|
|
||||||
|
tinypose_model_file = os.path.join(args.tinypose_model_dir, "PP_TinyPose_256x192_infer_rk3588_unquantized.rknn")
|
||||||
|
tinypose_params_file = os.path.join(args.tinypose_model_dir, "")
|
||||||
|
tinypose_config_file = os.path.join(args.tinypose_model_dir, "infer_cfg.yml")
|
||||||
|
# 配置runtime,加载模型
|
||||||
|
runtime_option = build_tinypose_option(args)
|
||||||
|
tinypose_model = fd.vision.keypointdetection.PPTinyPose(
|
||||||
|
tinypose_model_file,
|
||||||
|
tinypose_params_file,
|
||||||
|
tinypose_config_file,
|
||||||
|
runtime_option=runtime_option,
|
||||||
|
model_format=fd.ModelFormat.RKNN)
|
||||||
|
tinypose_model.disable_normalize()
|
||||||
|
tinypose_model.disable_permute()
|
||||||
|
|
||||||
|
# 预测图片检测结果
|
||||||
|
im = cv2.imread(args.image)
|
||||||
|
tinypose_result = tinypose_model.predict(im)
|
||||||
|
print("Paddle TinyPose Result:\n", tinypose_result)
|
||||||
|
|
||||||
|
# 预测结果可视化
|
||||||
|
vis_im = fd.vision.vis_keypoint_detection(
|
||||||
|
im, tinypose_result, conf_threshold=0.5)
|
||||||
|
cv2.imwrite("visualized_result.jpg", vis_im)
|
||||||
|
print("TinyPose visualized result save in ./visualized_result.jpg")
|
@@ -139,18 +139,12 @@ bool PPTinyPose::Postprocess(std::vector<FDTensor>& infer_result,
|
|||||||
"Only support batch = 1 in FastDeploy now.");
|
"Only support batch = 1 in FastDeploy now.");
|
||||||
result->Clear();
|
result->Clear();
|
||||||
|
|
||||||
std::cout << "Postprocess" << std::endl;
|
|
||||||
std::cout << "infer_result.size() is " << infer_result.size() << std::endl;
|
|
||||||
if (infer_result.size() == 1) {
|
if (infer_result.size() == 1) {
|
||||||
FDTensor result_copy = infer_result[0];
|
FDTensor result_copy = infer_result[0];
|
||||||
std::cout << "Reshape result_copy!" << std::endl;
|
|
||||||
result_copy.Reshape({result_copy.shape[0], result_copy.shape[1],
|
result_copy.Reshape({result_copy.shape[0], result_copy.shape[1],
|
||||||
result_copy.shape[2] * result_copy.shape[3]});
|
result_copy.shape[2] * result_copy.shape[3]});
|
||||||
std::cout << "Resize infer_result!" << std::endl;
|
|
||||||
infer_result.resize(2);
|
infer_result.resize(2);
|
||||||
std::cout << "Do ArgMax!" << std::endl;
|
|
||||||
function::ArgMax(result_copy,&infer_result[1],-1);
|
function::ArgMax(result_copy,&infer_result[1],-1);
|
||||||
std::cout << "Done!" << std::endl;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Calculate output length
|
// Calculate output length
|
||||||
|
Reference in New Issue
Block a user