mirror of
				https://github.com/PaddlePaddle/FastDeploy.git
				synced 2025-10-31 20:02:53 +08:00 
			
		
		
		
	Add external model's example code and Docs. (#102)
* first commit for yolov7 * pybind for yolov7 * CPP README.md * CPP README.md * modified yolov7.cc * README.md * python file modify * delete license in fastdeploy/ * repush the conflict part * README.md modified * README.md modified * file path modified * file path modified * file path modified * file path modified * file path modified * README modified * README modified * move some helpers to private * add examples for yolov7 * api.md modified * api.md modified * api.md modified * YOLOv7 * yolov7 release link * yolov7 release link * yolov7 release link * copyright * change some helpers to private * change variables to const and fix documents. * gitignore * Transfer some funtions to private member of class * Transfer some funtions to private member of class * Merge from develop (#9) * Fix compile problem in different python version (#26) * fix some usage problem in linux * Fix compile problem Co-authored-by: root <root@bjyz-sys-gpu-kongming3.bjyz.baidu.com> * Add PaddleDetetion/PPYOLOE model support (#22) * add ppdet/ppyoloe * Add demo code and documents * add convert processor to vision (#27) * update .gitignore * Added checking for cmake include dir * fixed missing trt_backend option bug when init from trt * remove un-need data layout and add pre-check for dtype * changed RGB2BRG to BGR2RGB in ppcls model * add model_zoo yolov6 c++/python demo * fixed CMakeLists.txt typos * update yolov6 cpp/README.md * add yolox c++/pybind and model_zoo demo * move some helpers to private * fixed CMakeLists.txt typos * add normalize with alpha and beta * add version notes for yolov5/yolov6/yolox * add copyright to yolov5.cc * revert normalize * fixed some bugs in yolox * fixed examples/CMakeLists.txt to avoid conflicts * add convert processor to vision * format examples/CMakeLists summary * Fix bug while the inference result is empty with YOLOv5 (#29) * Add multi-label function for yolov5 * Update README.md Update doc * Update fastdeploy_runtime.cc fix variable option.trt_max_shape wrong name * Update runtime_option.md Update resnet model dynamic shape setting name from images to x * Fix bug when inference result boxes are empty * Delete detection.py Co-authored-by: Jason <jiangjiajun@baidu.com> Co-authored-by: root <root@bjyz-sys-gpu-kongming3.bjyz.baidu.com> Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com> Co-authored-by: huangjianhui <852142024@qq.com> * first commit for yolor * for merge * Develop (#11) * Fix compile problem in different python version (#26) * fix some usage problem in linux * Fix compile problem Co-authored-by: root <root@bjyz-sys-gpu-kongming3.bjyz.baidu.com> * Add PaddleDetetion/PPYOLOE model support (#22) * add ppdet/ppyoloe * Add demo code and documents * add convert processor to vision (#27) * update .gitignore * Added checking for cmake include dir * fixed missing trt_backend option bug when init from trt * remove un-need data layout and add pre-check for dtype * changed RGB2BRG to BGR2RGB in ppcls model * add model_zoo yolov6 c++/python demo * fixed CMakeLists.txt typos * update yolov6 cpp/README.md * add yolox c++/pybind and model_zoo demo * move some helpers to private * fixed CMakeLists.txt typos * add normalize with alpha and beta * add version notes for yolov5/yolov6/yolox * add copyright to yolov5.cc * revert normalize * fixed some bugs in yolox * fixed examples/CMakeLists.txt to avoid conflicts * add convert processor to vision * format examples/CMakeLists summary * Fix bug while the inference result is empty with YOLOv5 (#29) * Add multi-label function for yolov5 * Update README.md Update doc * Update fastdeploy_runtime.cc fix variable option.trt_max_shape wrong name * Update runtime_option.md Update resnet model dynamic shape setting name from images to x * Fix bug when inference result boxes are empty * Delete detection.py Co-authored-by: Jason <jiangjiajun@baidu.com> Co-authored-by: root <root@bjyz-sys-gpu-kongming3.bjyz.baidu.com> Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com> Co-authored-by: huangjianhui <852142024@qq.com> * Yolor (#16) * Develop (#11) (#12) * Fix compile problem in different python version (#26) * fix some usage problem in linux * Fix compile problem Co-authored-by: root <root@bjyz-sys-gpu-kongming3.bjyz.baidu.com> * Add PaddleDetetion/PPYOLOE model support (#22) * add ppdet/ppyoloe * Add demo code and documents * add convert processor to vision (#27) * update .gitignore * Added checking for cmake include dir * fixed missing trt_backend option bug when init from trt * remove un-need data layout and add pre-check for dtype * changed RGB2BRG to BGR2RGB in ppcls model * add model_zoo yolov6 c++/python demo * fixed CMakeLists.txt typos * update yolov6 cpp/README.md * add yolox c++/pybind and model_zoo demo * move some helpers to private * fixed CMakeLists.txt typos * add normalize with alpha and beta * add version notes for yolov5/yolov6/yolox * add copyright to yolov5.cc * revert normalize * fixed some bugs in yolox * fixed examples/CMakeLists.txt to avoid conflicts * add convert processor to vision * format examples/CMakeLists summary * Fix bug while the inference result is empty with YOLOv5 (#29) * Add multi-label function for yolov5 * Update README.md Update doc * Update fastdeploy_runtime.cc fix variable option.trt_max_shape wrong name * Update runtime_option.md Update resnet model dynamic shape setting name from images to x * Fix bug when inference result boxes are empty * Delete detection.py Co-authored-by: Jason <jiangjiajun@baidu.com> Co-authored-by: root <root@bjyz-sys-gpu-kongming3.bjyz.baidu.com> Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com> Co-authored-by: huangjianhui <852142024@qq.com> Co-authored-by: Jason <jiangjiajun@baidu.com> Co-authored-by: root <root@bjyz-sys-gpu-kongming3.bjyz.baidu.com> Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com> Co-authored-by: huangjianhui <852142024@qq.com> * Develop (#13) * Fix compile problem in different python version (#26) * fix some usage problem in linux * Fix compile problem Co-authored-by: root <root@bjyz-sys-gpu-kongming3.bjyz.baidu.com> * Add PaddleDetetion/PPYOLOE model support (#22) * add ppdet/ppyoloe * Add demo code and documents * add convert processor to vision (#27) * update .gitignore * Added checking for cmake include dir * fixed missing trt_backend option bug when init from trt * remove un-need data layout and add pre-check for dtype * changed RGB2BRG to BGR2RGB in ppcls model * add model_zoo yolov6 c++/python demo * fixed CMakeLists.txt typos * update yolov6 cpp/README.md * add yolox c++/pybind and model_zoo demo * move some helpers to private * fixed CMakeLists.txt typos * add normalize with alpha and beta * add version notes for yolov5/yolov6/yolox * add copyright to yolov5.cc * revert normalize * fixed some bugs in yolox * fixed examples/CMakeLists.txt to avoid conflicts * add convert processor to vision * format examples/CMakeLists summary * Fix bug while the inference result is empty with YOLOv5 (#29) * Add multi-label function for yolov5 * Update README.md Update doc * Update fastdeploy_runtime.cc fix variable option.trt_max_shape wrong name * Update runtime_option.md Update resnet model dynamic shape setting name from images to x * Fix bug when inference result boxes are empty * Delete detection.py Co-authored-by: Jason <jiangjiajun@baidu.com> Co-authored-by: root <root@bjyz-sys-gpu-kongming3.bjyz.baidu.com> Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com> Co-authored-by: huangjianhui <852142024@qq.com> * documents * documents * documents * documents * documents * documents * documents * documents * documents * documents * documents * documents * Develop (#14) * Fix compile problem in different python version (#26) * fix some usage problem in linux * Fix compile problem Co-authored-by: root <root@bjyz-sys-gpu-kongming3.bjyz.baidu.com> * Add PaddleDetetion/PPYOLOE model support (#22) * add ppdet/ppyoloe * Add demo code and documents * add convert processor to vision (#27) * update .gitignore * Added checking for cmake include dir * fixed missing trt_backend option bug when init from trt * remove un-need data layout and add pre-check for dtype * changed RGB2BRG to BGR2RGB in ppcls model * add model_zoo yolov6 c++/python demo * fixed CMakeLists.txt typos * update yolov6 cpp/README.md * add yolox c++/pybind and model_zoo demo * move some helpers to private * fixed CMakeLists.txt typos * add normalize with alpha and beta * add version notes for yolov5/yolov6/yolox * add copyright to yolov5.cc * revert normalize * fixed some bugs in yolox * fixed examples/CMakeLists.txt to avoid conflicts * add convert processor to vision * format examples/CMakeLists summary * Fix bug while the inference result is empty with YOLOv5 (#29) * Add multi-label function for yolov5 * Update README.md Update doc * Update fastdeploy_runtime.cc fix variable option.trt_max_shape wrong name * Update runtime_option.md Update resnet model dynamic shape setting name from images to x * Fix bug when inference result boxes are empty * Delete detection.py Co-authored-by: root <root@bjyz-sys-gpu-kongming3.bjyz.baidu.com> Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com> Co-authored-by: huangjianhui <852142024@qq.com> Co-authored-by: Jason <jiangjiajun@baidu.com> Co-authored-by: root <root@bjyz-sys-gpu-kongming3.bjyz.baidu.com> Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com> Co-authored-by: huangjianhui <852142024@qq.com> Co-authored-by: Jason <928090362@qq.com> * add is_dynamic for YOLO series (#22) * first commit test photo * yolov7 doc * yolov7 doc * yolov7 doc * yolov7 doc * add yolov5 docs * modify yolov5 doc * first commit for retinaface * first commit for retinaface * firt commit for ultraface * firt commit for ultraface * firt commit for yolov5face * firt commit for modnet and arcface * firt commit for modnet and arcface * first commit for partial_fc * first commit for partial_fc * first commit for yolox * first commit for yolov6 * first commit for nano_det * first commit for scrfd * first commit for scrfd * first commit for retinaface * first commit for ultraface * first commit for yolov5face * first commit for yolox yolov6 nano * rm jpg * first commit for modnet and modify nano * yolor scaledyolov4 v5lite * first commit for insightface * first commit for insightface * first commit for insightface * docs * docs * docs * docs * docs * add print for detect and modify docs * docs * docs * docs * docs test for insightface * docs test for insightface again * docs test for insightface * modify all wrong expressions in docs * modify all wrong expressions in docs Co-authored-by: Jason <jiangjiajun@baidu.com> Co-authored-by: root <root@bjyz-sys-gpu-kongming3.bjyz.baidu.com> Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com> Co-authored-by: huangjianhui <852142024@qq.com> Co-authored-by: Jason <928090362@qq.com>
This commit is contained in:
		| @@ -10,24 +10,25 @@ | ||||
| ``` | ||||
| #下载retinaface模型文件和测试图片 | ||||
| wget https://bj.bcebos.com/paddlehub/fastdeploy/Pytorch_RetinaFace_mobile0.25-640-640.onnx | ||||
| wget todo | ||||
| wget https://raw.githubusercontent.com/DefTruth/lite.ai.toolkit/main/examples/lite/resources/test_lite_face_detector_3.jpg | ||||
|  | ||||
|  | ||||
| #下载部署示例代码 | ||||
| git clone https://github.com/PaddlePaddle/FastDeploy.git | ||||
| cd examples/vison/detection/retinaface/python/ | ||||
| cd examples/vison//retinaface/python/ | ||||
|  | ||||
| # CPU推理 | ||||
| python infer.py --model Pytorch_RetinaFace_mobile0.25-640-640.onnx --image todo --device cpu | ||||
|  | ||||
| python infer.py --model Pytorch_RetinaFace_mobile0.25-640-640.onnx --image test_lite_face_detector_3.jpg --device cpu | ||||
| # GPU推理 | ||||
| python infer.py --model Pytorch_RetinaFace_mobile0.25-640-640.onnx --image todo --device gpu | ||||
| python infer.py --model Pytorch_RetinaFace_mobile0.25-640-640.onnx --image test_lite_face_detector_3.jpg --device gpu | ||||
| # GPU上使用TensorRT推理 | ||||
| python infer.py --model Pytorch_RetinaFace_mobile0.25-640-640.onnx --image todo --device gpu --use_trt True | ||||
| python infer.py --model Pytorch_RetinaFace_mobile0.25-640-640.onnx --image test_lite_face_detector_3.jpg --device gpu --use_trt True | ||||
| ``` | ||||
|  | ||||
| 运行完成可视化结果如下图所示 | ||||
|  | ||||
| <img width="640" src="https://user-images.githubusercontent.com/67993288/183847558-abcd9a57-9cd9-4891-b09a-710963c99b74.jpg"> | ||||
| <img width="640" src="https://user-images.githubusercontent.com/67993288/184301763-1b950047-c17f-4819-b175-c743b699c3b1.jpg"> | ||||
|  | ||||
| ## RetinaFace Python接口 | ||||
|  | ||||
| @@ -60,15 +61,17 @@ RetinaFace模型加载和初始化,其中model_file为导出的ONNX模型格 | ||||
|  | ||||
| > **返回** | ||||
| > | ||||
| > > 返回`fastdeploy.vision.DetectionResult`结构体,结构体说明参考文档[视觉模型预测结果](../../../../../docs/api/vision_results/) | ||||
| > > 返回`fastdeploy.vision.FaceDetectionResult`结构体,结构体说明参考文档[视觉模型预测结果](../../../../../docs/api/vision_results/) | ||||
|  | ||||
| ### 类成员属性 | ||||
| #### 预处理参数 | ||||
| 用户可按照自己的实际需求,修改下列预处理参数,从而影响最终的推理和部署效果 | ||||
|  | ||||
| > > * **size**(list[int]): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[640, 640] | ||||
| > > * **padding_value**(list[float]): 通过此参数可以修改图片在resize时候做填充(padding)的值, 包含三个浮点型元素, 分别表示三个通道的值, 默认值为[114, 114, 114] | ||||
| > > * **is_no_pad**(bool): 通过此参数让图片是否通过填充的方式进行resize, `is_no_pad=True` 表示不使用填充的方式,默认值为`is_no_pad=False` | ||||
| > > * **is_mini_pad**(bool): 通过此参数可以将resize之后图像的宽高这是为最接近`size`成员变量的值, 并且满足填充的像素大小是可以被`stride`成员变量整除的。默认值为`is_mini_pad=False` | ||||
| > > * **stride**(int): 配合`stris_mini_padide`成员变量使用, 默认值为`stride=32` | ||||
| > > * **variance**(list[float]): 通过此参数可以指定retinaface中的方差variance值,默认是[0.1,0.2], 一般不用修改. | ||||
| > > * **min_sizes**(list[list[int]]): retinaface中的anchor的宽高设置,默认是 {{16, 32}, {64, 128}, {256, 512}},分别和步长8、16和32对应 | ||||
| > > * **downsample_strides**(list[int]): 通过此参数可以修改生成anchor的特征图的下采样倍数, 包含三个整型元素, 分别表示默认的生成anchor的下采样倍数, 默认值为[8, 16, 32] | ||||
| > > * **landmarks_per_face**(int): 指定当前模型检测的人脸所带的关键点个数,默认为5. | ||||
|  | ||||
|  | ||||
|  | ||||
|   | ||||
							
								
								
									
										51
									
								
								examples/vision/facedet/retinaface/python/infer.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										51
									
								
								examples/vision/facedet/retinaface/python/infer.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,51 @@ | ||||
| import fastdeploy as fd | ||||
| import cv2 | ||||
|  | ||||
|  | ||||
| def parse_arguments(): | ||||
|     import argparse | ||||
|     import ast | ||||
|     parser = argparse.ArgumentParser() | ||||
|     parser.add_argument( | ||||
|         "--model", required=True, help="Path of retinaface onnx model.") | ||||
|     parser.add_argument( | ||||
|         "--image", required=True, help="Path of test image file.") | ||||
|     parser.add_argument( | ||||
|         "--device", | ||||
|         type=str, | ||||
|         default='cpu', | ||||
|         help="Type of inference device, support 'cpu' or 'gpu'.") | ||||
|     parser.add_argument( | ||||
|         "--use_trt", | ||||
|         type=ast.literal_eval, | ||||
|         default=False, | ||||
|         help="Wether to use tensorrt.") | ||||
|     return parser.parse_args() | ||||
|  | ||||
|  | ||||
| def build_option(args): | ||||
|     option = fd.RuntimeOption() | ||||
|  | ||||
|     if args.device.lower() == "gpu": | ||||
|         option.use_gpu() | ||||
|  | ||||
|     if args.use_trt: | ||||
|         option.use_trt_backend() | ||||
|         option.set_trt_input_shape("images", [1, 3, 640, 640]) | ||||
|     return option | ||||
|  | ||||
|  | ||||
| args = parse_arguments() | ||||
|  | ||||
| # 配置runtime,加载模型 | ||||
| runtime_option = build_option(args) | ||||
| model = fd.vision.facedet.RetinaFace(args.model, runtime_option=runtime_option) | ||||
|  | ||||
| # 预测图片检测结果 | ||||
| im = cv2.imread(args.image) | ||||
| result = model.predict(im.copy()) | ||||
| print(result) | ||||
| # 预测结果可视化 | ||||
| vis_im = fd.vision.vis_face_detection(im, result) | ||||
| cv2.imwrite("visualized_result.jpg", vis_im) | ||||
| print("Visualized result save in ./visualized_result.jpg") | ||||
		Reference in New Issue
	
	Block a user
	 ziqi-jin
					ziqi-jin