mirror of
				https://github.com/PaddlePaddle/FastDeploy.git
				synced 2025-10-31 11:56:44 +08:00 
			
		
		
		
	Add PaddleClas infer.py (#107)
* Update README.md * Update README.md * Update README.md * Create README.md * Update README.md * Update README.md * Update README.md * Update README.md * Add evaluation calculate time and fix some bugs * Update classification __init__ * Move to ppseg * Add segmentation doc * Add PaddleClas infer.py * Update PaddleClas infer.py * Delete .infer.py.swp Co-authored-by: Jason <jiangjiajun@baidu.com>
This commit is contained in:
		| @@ -1,5 +1,6 @@ | |||||||
| import fastdeploy as fd | import fastdeploy as fd | ||||||
| import cv2 | import cv2 | ||||||
|  | import os | ||||||
|  |  | ||||||
|  |  | ||||||
| def parse_arguments(): | def parse_arguments(): | ||||||
| @@ -9,7 +10,9 @@ def parse_arguments(): | |||||||
|     parser.add_argument( |     parser.add_argument( | ||||||
|         "--model", required=True, help="Path of PaddleClas model.") |         "--model", required=True, help="Path of PaddleClas model.") | ||||||
|     parser.add_argument( |     parser.add_argument( | ||||||
|         "--image", required=True, help="Path of test image file.") |         "--image", type=str, required=True, help="Path of test image file.") | ||||||
|  |     parser.add_argument( | ||||||
|  |         "--topk", type=int, default=1, help="Return topk results.") | ||||||
|     parser.add_argument( |     parser.add_argument( | ||||||
|         "--device", |         "--device", | ||||||
|         type=str, |         type=str, | ||||||
| @@ -31,7 +34,8 @@ def build_option(args): | |||||||
|  |  | ||||||
|     if args.use_trt: |     if args.use_trt: | ||||||
|         option.use_trt_backend() |         option.use_trt_backend() | ||||||
|         option.set_trt_input_shape("images", [1, 3, 640, 640]) |         option.set_trt_input_shape("inputs", [1, 3, 224, 224], | ||||||
|  |                                    [1, 3, 224, 224], [1, 3, 224, 224]) | ||||||
|     return option |     return option | ||||||
|  |  | ||||||
|  |  | ||||||
| @@ -39,9 +43,13 @@ args = parse_arguments() | |||||||
|  |  | ||||||
| # 配置runtime,加载模型 | # 配置runtime,加载模型 | ||||||
| runtime_option = build_option(args) | runtime_option = build_option(args) | ||||||
| model = fd.vision.classification.PaddleClasModel(args.model, runtime_option=runtime_option) | model_file = os.path.join(args.model, "inference.pdmodel") | ||||||
|  | params_file = os.path.join(args.model, "inference.pdiparams") | ||||||
|  | config_file = os.path.join(args.model, "inference_cls.yaml") | ||||||
|  | model = fd.vision.classification.PaddleClasModel( | ||||||
|  |     model_file, params_file, config_file, runtime_option=runtime_option) | ||||||
|  |  | ||||||
| # 预测图片分类结果 | # 预测图片分类结果 | ||||||
| im = cv2.imread(args.image) | im = cv2.imread(args.image) | ||||||
| result = model.predict(im) | result = model.predict(im, args.topk) | ||||||
| print(result) | print(result) | ||||||
|   | |||||||
							
								
								
									
										56
									
								
								examples/vision/segmentation/paddleseg/README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										56
									
								
								examples/vision/segmentation/paddleseg/README.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,56 @@ | |||||||
|  | # PaddleClas 模型部署 | ||||||
|  |  | ||||||
|  | ## 模型版本说明 | ||||||
|  |  | ||||||
|  | - [PaddleClas Release/2.4](https://github.com/PaddlePaddle/PaddleClas/tree/release/2.4) | ||||||
|  |  | ||||||
|  | 目前FastDeploy支持如下模型的部署 | ||||||
|  |  | ||||||
|  | - [PP-LCNet系列模型](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.4/docs/zh_CN/models/PP-LCNet.md) | ||||||
|  | - [PP-LCNetV2系列模型](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.4/docs/zh_CN/models/PP-LCNetV2.md) | ||||||
|  | - [EfficientNet系列模型](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.4/docs/zh_CN/models/EfficientNet_and_ResNeXt101_wsl.md) | ||||||
|  | - [GhostNet系列模型](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.4/docs/zh_CN/models/Mobile.md) | ||||||
|  | - [MobileNet系列模型(包含v1,v2,v3)](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.4/docs/zh_CN/models/Mobile.md) | ||||||
|  | - [ShuffleNet系列模型](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.4/docs/zh_CN/models/Mobile.md) | ||||||
|  | - [SqueezeNet系列模型](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.4/docs/zh_CN/models/Others.md) | ||||||
|  | - [Inception系列模型](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.4/docs/zh_CN/models/Inception.md) | ||||||
|  | - [PP-HGNet系列模型](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.4/docs/zh_CN/models/PP-HGNet.md) | ||||||
|  | - [ResNet系列模型(包含vd系列)](https://github.com/PaddlePaddle/PaddleClas/blob/develop/docs/zh_CN/models/ResNet_and_vd.md) | ||||||
|  |  | ||||||
|  | ## 准备PaddleClas部署模型 | ||||||
|  |  | ||||||
|  | PaddleClas模型导出,请参考其文档说明[模型导出](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.4/docs/zh_CN/inference_deployment/export_model.md#2-%E5%88%86%E7%B1%BB%E6%A8%A1%E5%9E%8B%E5%AF%BC%E5%87%BA)   | ||||||
|  |  | ||||||
|  | 注意:PaddleClas导出的模型仅包含`inference.pdmodel`和`inference.pdiparams`两个文档,但为了满足部署的需求,同时也需准备其提供的通用[inference_cls.yaml](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.4/deploy/configs/inference_cls.yaml)文件,FastDeploy会从yaml文件中获取模型在推理时需要的预处理信息,开发者可直接下载此文件使用。但需根据自己的需求修改yaml文件中的配置参数,具体可比照PaddleClas模型训练[config](https://github.com/PaddlePaddle/PaddleClas/tree/release/2.4/ppcls/configs/ImageNet)中的infer部分的配置信息进行修改。 | ||||||
|  |  | ||||||
|  |  | ||||||
|  | ## 下载预训练模型 | ||||||
|  |  | ||||||
|  | 为了方便开发者的测试,下面提供了PaddleClas导出的部分模型(含inference_cls.yaml文件),开发者可直接下载使用。 | ||||||
|  |  | ||||||
|  | | 模型                                                               | 参数文件大小    |输入Shape |  Top1 | Top5 | | ||||||
|  | |:---------------------------------------------------------------- |:----- |:----- | :----- | :----- | | ||||||
|  | | [PPLCNet_x1_0](https://bj.bcebos.com/paddlehub/fastdeploy/PPLCNet_x1_0_infer.tgz) | 12MB | 224x224 |71.32% | 90.03% | | ||||||
|  | | [PPLCNetV2_base](https://bj.bcebos.com/paddlehub/fastdeploy/PPLCNetV2_base_infer.tgz)  | 26MB  | 224x224 |77.04% | 93.27% | | ||||||
|  | | [EfficientNetB7](https://bj.bcebos.com/paddlehub/fastdeploy/EfficientNetB7_infer.tgz) |  255MB | 600x600 | 84.3% | 96.9% | | ||||||
|  | | [EfficientNetB0_small](https://bj.bcebos.com/paddlehub/fastdeploy/EfficientNetB0_small_infer.tgz)|  18MB | 224x224 | 75.8% | 75.8% | | ||||||
|  | | [GhostNet_x1_3_ssld](https://bj.bcebos.com/paddlehub/fastdeploy/GhostNet_x1_3_ssld_infer.tgz) |  29MB | 224x224 | 75.7% | 92.5% | | ||||||
|  | | [GhostNet_x0_5_ssld](https://bj.bcebos.com/paddlehub/fastdeploy/GhostNet_x0_5_infer.tgz) |  10MB | 224x224 | 66.8% | 86.9% | | ||||||
|  | | [MobileNetV1_x0_25](https://bj.bcebos.com/paddlehub/fastdeploy/MobileNetV1_x0_25_infer.tgz) |  1.9MB | 224x224 | 51.4% | 75.5% | | ||||||
|  | | [MobileNetV1_ssld](https://bj.bcebos.com/paddlehub/fastdeploy/MobileNetV1_ssld_infer.tgz) |  17MB | 224x224 | 77.9% | 93.9% | | ||||||
|  | | [MobileNetV2_x0_25](https://bj.bcebos.com/paddlehub/fastdeploy/MobileNetV2_x0_25_infer.tgz) |  5.9MB | 224x224 | 53.2% | 76.5% | | ||||||
|  | | [MobileNetV2_ssld](https://bj.bcebos.com/paddlehub/fastdeploy/MobileNetV2_ssld_infer.tgz) |  14MB | 224x224 | 76.74% | 93.39% | | ||||||
|  | | [MobileNetV3_small_x0_35_ssld](https://bj.bcebos.com/paddlehub/fastdeploy/MobileNetV3_small_x0_35_ssld_infer.tgz) |  6.4MB | 224x224 | 55.55% | 77.71% | | ||||||
|  | | [MobileNetV3_large_x1_0_ssld](https://bj.bcebos.com/paddlehub/fastdeploy/MobileNetV3_large_x1_0_ssld_infer.tgz) |  22MB | 224x224 | 78.96% | 94.48% | | ||||||
|  | | [ShuffleNetV2_x0_25](https://bj.bcebos.com/paddlehub/fastdeploy/ShuffleNetV2_x0_25_infer.tgz) |  2.4MB | 224x224 | 49.9% | 73.79% | | ||||||
|  | | [ShuffleNetV2_x2_0](https://bj.bcebos.com/paddlehub/fastdeploy/ShuffleNetV2_x2_0_infer.tgz) |  29MB | 224x224 | 73.15% | 91.2% | | ||||||
|  | | [SqueezeNet1_1](https://bj.bcebos.com/paddlehub/fastdeploy/SqueezeNet1_1_infer.tgz) |  4.8MB | 224x224 | 60.1% | 81.9% | | ||||||
|  | | [InceptionV3](https://bj.bcebos.com/paddlehub/fastdeploy/InceptionV3_infer.tgz) |  92MB | 299x299 | 79.14% | 94.59% | | ||||||
|  | | [PPHGNet_tiny_ssld](https://bj.bcebos.com/paddlehub/fastdeploy/PPHGNet_tiny_ssld_infer.tgz) |  57MB | 224x224 | 81.95% | 96.12% | | ||||||
|  | | [PPHGNet_base_ssld](https://bj.bcebos.com/paddlehub/fastdeploy/PPHGNet_base_ssld_infer.tgz) |  274MB | 224x224 | 85.0% | 97.35% | | ||||||
|  | | [ResNet50_vd](https://bj.bcebos.com/paddlehub/fastdeploy/ResNet50_vd_infer.tgz) |  98MB | 224x224 | 79.12% | 94.44% | | ||||||
|  |  | ||||||
|  | ## 详细部署文档 | ||||||
|  |  | ||||||
|  | - [Python部署](python) | ||||||
|  | - [C++部署](cpp) | ||||||
							
								
								
									
										14
									
								
								examples/vision/segmentation/paddleseg/cpp/CMakeLists.txt
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										14
									
								
								examples/vision/segmentation/paddleseg/cpp/CMakeLists.txt
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,14 @@ | |||||||
|  | PROJECT(infer_demo C CXX) | ||||||
|  | CMAKE_MINIMUM_REQUIRED (VERSION 3.12) | ||||||
|  |  | ||||||
|  | # 指定下载解压后的fastdeploy库路径 | ||||||
|  | option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.") | ||||||
|  |  | ||||||
|  | include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake) | ||||||
|  |  | ||||||
|  | # 添加FastDeploy依赖头文件 | ||||||
|  | include_directories(${FASTDEPLOY_INCS}) | ||||||
|  |  | ||||||
|  | add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cc) | ||||||
|  | # 添加FastDeploy库依赖 | ||||||
|  | target_link_libraries(infer_demo ${FASTDEPLOY_LIBS}) | ||||||
							
								
								
									
										77
									
								
								examples/vision/segmentation/paddleseg/cpp/README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										77
									
								
								examples/vision/segmentation/paddleseg/cpp/README.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,77 @@ | |||||||
|  | # YOLOv7 C++部署示例 | ||||||
|  |  | ||||||
|  | 本目录下提供`infer.cc`快速完成YOLOv7在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。 | ||||||
|  |  | ||||||
|  | 在部署前,需确认以下两个步骤 | ||||||
|  |  | ||||||
|  | - 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/quick_start/requirements.md)   | ||||||
|  | - 2. 根据开发环境,下载预编译部署库和samples代码,参考[FastDeploy预编译库](../../../../../docs/compile/prebuilt_libraries.md) | ||||||
|  |  | ||||||
|  | 以Linux上CPU推理为例,在本目录执行如下命令即可完成编译测试 | ||||||
|  |  | ||||||
|  | ``` | ||||||
|  | mkdir build | ||||||
|  | cd build | ||||||
|  | wget https://xxx.tgz | ||||||
|  | tar xvf fastdeploy-linux-x64-0.2.0.tgz | ||||||
|  | cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-0.2.0 | ||||||
|  | make -j | ||||||
|  |  | ||||||
|  | #下载官方转换好的yolov7模型文件和测试图片 | ||||||
|  | wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov7.onnx | ||||||
|  | wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000087038.jpg | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # CPU推理 | ||||||
|  | ./infer_demo yolov7.onnx 000000087038.jpg 0 | ||||||
|  | # GPU推理 | ||||||
|  | ./infer_demo yolov7.onnx 000000087038.jpg 1 | ||||||
|  | # GPU上TensorRT推理 | ||||||
|  | ./infer_demo yolov7.onnx 000000087038.jpg 2 | ||||||
|  | ``` | ||||||
|  |  | ||||||
|  | ## YOLOv7 C++接口 | ||||||
|  |  | ||||||
|  | ### YOLOv7类 | ||||||
|  |  | ||||||
|  | ``` | ||||||
|  | fastdeploy::vision::detection::YOLOv7( | ||||||
|  |         const string& model_file, | ||||||
|  |         const string& params_file = "", | ||||||
|  |         const RuntimeOption& runtime_option = RuntimeOption(), | ||||||
|  |         const Frontend& model_format = Frontend::ONNX) | ||||||
|  | ``` | ||||||
|  |  | ||||||
|  | YOLOv7模型加载和初始化,其中model_file为导出的ONNX模型格式。 | ||||||
|  |  | ||||||
|  | **参数** | ||||||
|  |  | ||||||
|  | > * **model_file**(str): 模型文件路径 | ||||||
|  | > * **params_file**(str): 参数文件路径,当模型格式为ONNX时,此参数传入空字符串即可 | ||||||
|  | > * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 | ||||||
|  | > * **model_format**(Frontend): 模型格式,默认为ONNX格式 | ||||||
|  |  | ||||||
|  | #### Predict函数 | ||||||
|  |  | ||||||
|  | > ``` | ||||||
|  | > YOLOv7::Predict(cv::Mat* im, DetectionResult* result, | ||||||
|  | >                 float conf_threshold = 0.25, | ||||||
|  | >                 float nms_iou_threshold = 0.5) | ||||||
|  | > ``` | ||||||
|  | > | ||||||
|  | > 模型预测接口,输入图像直接输出检测结果。 | ||||||
|  | > | ||||||
|  | > **参数** | ||||||
|  | > | ||||||
|  | > > * **im**: 输入图像,注意需为HWC,BGR格式 | ||||||
|  | > > * **result**: 检测结果,包括检测框,各个框的置信度, DetectionResult说明参考[视觉模型预测结果](../../../../../docs/api/vision_results/) | ||||||
|  | > > * **conf_threshold**: 检测框置信度过滤阈值 | ||||||
|  | > > * **nms_iou_threshold**: NMS处理过程中iou阈值 | ||||||
|  |  | ||||||
|  | ### 类成员变量 | ||||||
|  |  | ||||||
|  | > > * **size**(vector<int>): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[640, 640] | ||||||
|  |  | ||||||
|  | - [模型介绍](../../) | ||||||
|  | - [Python部署](../python) | ||||||
|  | - [视觉模型预测结果](../../../../../docs/api/vision_results/) | ||||||
							
								
								
									
										114
									
								
								examples/vision/segmentation/paddleseg/cpp/infer.cc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										114
									
								
								examples/vision/segmentation/paddleseg/cpp/infer.cc
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,114 @@ | |||||||
|  | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. | ||||||
|  | // | ||||||
|  | // Licensed under the Apache License, Version 2.0 (the "License"); | ||||||
|  | // you may not use this file except in compliance with the License. | ||||||
|  | // You may obtain a copy of the License at | ||||||
|  | // | ||||||
|  | //     http://www.apache.org/licenses/LICENSE-2.0 | ||||||
|  | // | ||||||
|  | // Unless required by applicable law or agreed to in writing, software | ||||||
|  | // distributed under the License is distributed on an "AS IS" BASIS, | ||||||
|  | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||||
|  | // See the License for the specific language governing permissions and | ||||||
|  | // limitations under the License. | ||||||
|  |  | ||||||
|  | #include "fastdeploy/vision.h" | ||||||
|  |  | ||||||
|  | void CpuInfer(const std::string& model_file, const std::string& params_file, | ||||||
|  |               const std::string& config_file, const std::string& image_file) { | ||||||
|  |   auto option = fastdeploy::RuntimeOption(); | ||||||
|  |   option.UseCpu() auto model = | ||||||
|  |       fastdeploy::vision::classification::PaddleClasModel( | ||||||
|  |           model_file, params_file, config_file, option); | ||||||
|  |   if (!model.Initialized()) { | ||||||
|  |     std::cerr << "Failed to initialize." << std::endl; | ||||||
|  |     return; | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   auto im = cv::imread(image_file); | ||||||
|  |  | ||||||
|  |   fastdeploy::vision::ClassifyResult res; | ||||||
|  |   if (!model.Predict(&im, &res)) { | ||||||
|  |     std::cerr << "Failed to predict." << std::endl; | ||||||
|  |     return; | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   // print res | ||||||
|  |   res.Str(); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | void GpuInfer(const std::string& model_file, const std::string& params_file, | ||||||
|  |               const std::string& config_file, const std::string& image_file) { | ||||||
|  |   auto option = fastdeploy::RuntimeOption(); | ||||||
|  |   option.UseGpu(); | ||||||
|  |   auto model = fastdeploy::vision::classification::PaddleClasModel( | ||||||
|  |       model_file, params_file, config_file, option); | ||||||
|  |   if (!model.Initialized()) { | ||||||
|  |     std::cerr << "Failed to initialize." << std::endl; | ||||||
|  |     return; | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   auto im = cv::imread(image_file); | ||||||
|  |  | ||||||
|  |   fastdeploy::vision::ClassifyResult res; | ||||||
|  |   if (!model.Predict(&im, &res)) { | ||||||
|  |     std::cerr << "Failed to predict." << std::endl; | ||||||
|  |     return; | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   // print res | ||||||
|  |   res.Str(); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | void TrtInfer(const std::string& model_file, const std::string& params_file, | ||||||
|  |               const std::string& config_file, const std::string& image_file) { | ||||||
|  |   auto option = fastdeploy::RuntimeOption(); | ||||||
|  |   option.UseGpu(); | ||||||
|  |   option.UseTrtBackend(); | ||||||
|  |   option.SetTrtInputShape("inputs", [ 1, 3, 224, 224 ], [ 1, 3, 224, 224 ], | ||||||
|  |                           [ 1, 3, 224, 224 ]); | ||||||
|  |   auto model = fastdeploy::vision::classification::PaddleClasModel( | ||||||
|  |       model_file, params_file, config_file, option); | ||||||
|  |   if (!model.Initialized()) { | ||||||
|  |     std::cerr << "Failed to initialize." << std::endl; | ||||||
|  |     return; | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   auto im = cv::imread(image_file); | ||||||
|  |  | ||||||
|  |   fastdeploy::vision::ClassifyResult res; | ||||||
|  |   if (!model.Predict(&im, &res)) { | ||||||
|  |     std::cerr << "Failed to predict." << std::endl; | ||||||
|  |     return; | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   // print res | ||||||
|  |   res.Str(); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | int main(int argc, char* argv[]) { | ||||||
|  |   if (argc < 4) { | ||||||
|  |     std::cout << "Usage: infer_demo path/to/model path/to/image run_option, " | ||||||
|  |                  "e.g ./infer_demo ./ResNet50_vd ./test.jpeg 0" | ||||||
|  |               << std::endl; | ||||||
|  |     std::cout << "The data type of run_option is int, 0: run with cpu; 1: run " | ||||||
|  |                  "with gpu; 2: run with gpu and use tensorrt backend." | ||||||
|  |               << std::endl; | ||||||
|  |     return -1; | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   std::string model_file = | ||||||
|  |       argv[1] + "/" + "model.pdmodel" std::string params_file = | ||||||
|  |           argv[1] + "/" + "model.pdiparams" std::string config_file = | ||||||
|  |               argv[1] + "/" + "inference_cls.yaml" std::string image_file = | ||||||
|  |                   argv[2] if (std::atoi(argv[3]) == 0) { | ||||||
|  |     CpuInfer(model_file, params_file, config_file, image_file); | ||||||
|  |   } | ||||||
|  |   else if (std::atoi(argv[3]) == 1) { | ||||||
|  |     GpuInfer(model_file, params_file, config_file, image_file); | ||||||
|  |   } | ||||||
|  |   else if (std::atoi(argv[3]) == 2) { | ||||||
|  |     TrtInfer(model_file, params_file, config_file, image_file); | ||||||
|  |   } | ||||||
|  |   return 0; | ||||||
|  | } | ||||||
							
								
								
									
										75
									
								
								examples/vision/segmentation/paddleseg/python/README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										75
									
								
								examples/vision/segmentation/paddleseg/python/README.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,75 @@ | |||||||
|  | # PaddleClas模型 Python部署示例 | ||||||
|  |  | ||||||
|  | 在部署前,需确认以下两个步骤 | ||||||
|  |  | ||||||
|  | - 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/quick_start/requirements.md)   | ||||||
|  | - 2. FastDeploy Python whl包安装,参考[FastDeploy Python安装](../../../../../docs/quick_start/install.md) | ||||||
|  |  | ||||||
|  | 本目录下提供`infer.py`快速完成ResNet50_vd在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成 | ||||||
|  |  | ||||||
|  | ``` | ||||||
|  | # 下载ResNet50_vd模型文件和测试图片 | ||||||
|  | wget https://bj.bcebos.com/paddlehub/fastdeploy/ResNet50_vd_infer.tgz | ||||||
|  | tar -xvf ResNet50_vd_infer.tgz | ||||||
|  | wget https://gitee.com/paddlepaddle/PaddleClas/raw/release/2.4/deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg | ||||||
|  |  | ||||||
|  |  | ||||||
|  | #下载部署示例代码 | ||||||
|  | git clone https://github.com/PaddlePaddle/FastDeploy.git | ||||||
|  | cd examples/vision/classification/paddleclas/python | ||||||
|  |  | ||||||
|  | # CPU推理 | ||||||
|  | python infer.py --model ResNet50_vd_infer --image ILSVRC2012_val_00000010.jpeg --device cpu | ||||||
|  | # GPU推理 | ||||||
|  | python infer.py --model ResNet50_vd_infer --image ILSVRC2012_val_00000010.jpeg --device gpu | ||||||
|  | # GPU上使用TensorRT推理 (注意:TensorRT推理第一次运行,有序列化模型的操作,有一定耗时,需要耐心等待) | ||||||
|  | python infer.py --model ResNet50_vd_infer --image ILSVRC2012_val_00000010.jpeg --device gpu --use_trt True | ||||||
|  | ``` | ||||||
|  |  | ||||||
|  | 运行完成后返回结果如下所示 | ||||||
|  | ``` | ||||||
|  | ClassifyResult( | ||||||
|  | label_ids: 153, | ||||||
|  | scores: 0.686229, | ||||||
|  | ) | ||||||
|  | ``` | ||||||
|  |  | ||||||
|  | ## PaddleClasModel Python接口 | ||||||
|  |  | ||||||
|  | ``` | ||||||
|  | fd.vision.classification.PaddleClasModel(model_file, params_file, config_file, runtime_option=None, model_format=Frontend.PADDLE) | ||||||
|  | ``` | ||||||
|  |  | ||||||
|  | PaddleClas模型加载和初始化,其中model_file, params_file为训练模型导出的Paddle inference文件,具体请参考其文档说明[模型导出](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.4/docs/zh_CN/inference_deployment/export_model.md#2-%E5%88%86%E7%B1%BB%E6%A8%A1%E5%9E%8B%E5%AF%BC%E5%87%BA) | ||||||
|  |  | ||||||
|  | **参数** | ||||||
|  |  | ||||||
|  | > * **model_file**(str): 模型文件路径 | ||||||
|  | > * **params_file**(str): 参数文件路径 | ||||||
|  | > * **config_file**(str): 推理部署配置文件 | ||||||
|  | > * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 | ||||||
|  | > * **model_format**(Frontend): 模型格式,默认为Paddle格式 | ||||||
|  |  | ||||||
|  | ### predict函数 | ||||||
|  |  | ||||||
|  | > ``` | ||||||
|  | > PaddleClasModel.predict(input_image, topk=1) | ||||||
|  | > ``` | ||||||
|  | > | ||||||
|  | > 模型预测结口,输入图像直接输出检测结果。 | ||||||
|  | > | ||||||
|  | > **参数** | ||||||
|  | > | ||||||
|  | > > * **input_image**(np.ndarray): 输入数据,注意需为HWC,BGR格式 | ||||||
|  | > > * **topk**(int):返回预测概率最高的topk个分类结果 | ||||||
|  |  | ||||||
|  | > **返回** | ||||||
|  | > | ||||||
|  | > > 返回`fastdeploy.vision.ClassifyResult`结构体,结构体说明参考文档[视觉模型预测结果](../../../../../docs/api/vision_results/) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | ## 其它文档 | ||||||
|  |  | ||||||
|  | - [PaddleClas 模型介绍](..) | ||||||
|  | - [PaddleClas C++部署](../cpp) | ||||||
|  | - [模型预测结果说明](../../../../../docs/api/vision_results/) | ||||||
							
								
								
									
										56
									
								
								examples/vision/segmentation/paddleseg/python/infer.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										56
									
								
								examples/vision/segmentation/paddleseg/python/infer.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,56 @@ | |||||||
|  | import fastdeploy as fd | ||||||
|  | import cv2 | ||||||
|  | import os | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def parse_arguments(): | ||||||
|  |     import argparse | ||||||
|  |     import ast | ||||||
|  |     parser = argparse.ArgumentParser() | ||||||
|  |     parser.add_argument( | ||||||
|  |         "--model", required=True, help="Path of PaddleClas model.") | ||||||
|  |     parser.add_argument( | ||||||
|  |         "--image", type=str, required=True, help="Path of test image file.") | ||||||
|  |     parser.add_argument( | ||||||
|  |         "--topk", type=int, default=1, help="Return topk results.") | ||||||
|  |     parser.add_argument( | ||||||
|  |         "--device", | ||||||
|  |         type=str, | ||||||
|  |         default='cpu', | ||||||
|  |         help="Type of inference device, support 'cpu' or 'gpu'.") | ||||||
|  |     parser.add_argument( | ||||||
|  |         "--use_trt", | ||||||
|  |         type=ast.literal_eval, | ||||||
|  |         default=False, | ||||||
|  |         help="Wether to use tensorrt.") | ||||||
|  |     return parser.parse_args() | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def build_option(args): | ||||||
|  |     option = fd.RuntimeOption() | ||||||
|  |  | ||||||
|  |     if args.device.lower() == "gpu": | ||||||
|  |         option.use_gpu() | ||||||
|  |  | ||||||
|  |     if args.use_trt: | ||||||
|  |         option.use_trt_backend() | ||||||
|  |         option.set_trt_input_shape("inputs", [1, 3, 224, 224], | ||||||
|  |                                    [1, 3, 224, 224], [1, 3, 224, 224]) | ||||||
|  |     return option | ||||||
|  |  | ||||||
|  |  | ||||||
|  | args = parse_arguments() | ||||||
|  |  | ||||||
|  | # 配置runtime,加载模型 | ||||||
|  | runtime_option = build_option(args) | ||||||
|  | model_file = os.path.join(args.model, "inference.pdmodel") | ||||||
|  | params_file = os.path.join(args.model, "inference.pdiparams") | ||||||
|  | config_file = os.path.join(args.model, "inference_cls.yaml") | ||||||
|  | #model = fd.vision.classification.PaddleClasModel(model_file, params_file, config_file, runtime_option=runtime_option) | ||||||
|  | model = fd.vision.classification.ResNet50vd( | ||||||
|  |     model_file, params_file, config_file, runtime_option=runtime_option) | ||||||
|  |  | ||||||
|  | # 预测图片分类结果 | ||||||
|  | im = cv2.imread(args.image) | ||||||
|  | result = model.predict(im, args.topk) | ||||||
|  | print(result) | ||||||
		Reference in New Issue
	
	Block a user
	 huangjianhui
					huangjianhui