mirror of
				https://github.com/PaddlePaddle/FastDeploy.git
				synced 2025-10-31 11:56:44 +08:00 
			
		
		
		
	 f36f9324de
			
		
	
	f36f9324de
	
	
	
		
			
			* Pick PPOCR fastdeploy docs from PaddleOCR * improve ppocr * improve readme * remove old PP-OCRv2 and PP-OCRv3 folfers * rename kunlun to kunlunxin * improve readme * improve readme * improve readme --------- Co-authored-by: Jason <jiangjiajun@baidu.com> Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
		
			
				
	
	
		
			78 lines
		
	
	
		
			2.1 KiB
		
	
	
	
		
			Python
		
	
	
		
			Executable File
		
	
	
	
	
			
		
		
	
	
			78 lines
		
	
	
		
			2.1 KiB
		
	
	
	
		
			Python
		
	
	
		
			Executable File
		
	
	
	
	
| # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
 | |
| #
 | |
| # Licensed under the Apache License, Version 2.0 (the "License");
 | |
| # you may not use this file except in compliance with the License.
 | |
| # You may obtain a copy of the License at
 | |
| #
 | |
| #    http://www.apache.org/licenses/LICENSE-2.0
 | |
| #
 | |
| # Unless required by applicable law or agreed to in writing, software
 | |
| # distributed under the License is distributed on an "AS IS" BASIS,
 | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | |
| # See the License for the specific language governing permissions and
 | |
| # limitations under the License.
 | |
| 
 | |
| import fastdeploy as fd
 | |
| import cv2
 | |
| import os
 | |
| 
 | |
| 
 | |
| def parse_arguments():
 | |
|     import argparse
 | |
|     import ast
 | |
|     parser = argparse.ArgumentParser()
 | |
|     parser.add_argument(
 | |
|         "--cls_model",
 | |
|         required=True,
 | |
|         help="Path of Classification model of PPOCR.")
 | |
|     parser.add_argument(
 | |
|         "--image", type=str, required=True, help="Path of test image file.")
 | |
|     parser.add_argument(
 | |
|         "--device",
 | |
|         type=str,
 | |
|         default='cpu',
 | |
|         help="Type of inference device, support 'cpu', 'kunlunxin' or 'gpu'.")
 | |
|     parser.add_argument(
 | |
|         "--device_id",
 | |
|         type=int,
 | |
|         default=0,
 | |
|         help="Define which GPU card used to run model.")
 | |
|     return parser.parse_args()
 | |
| 
 | |
| 
 | |
| def build_option(args):
 | |
| 
 | |
|     cls_option = fd.RuntimeOption()
 | |
| 
 | |
|     if args.device.lower() == "gpu":
 | |
|         cls_option.use_gpu(args.device_id)
 | |
| 
 | |
|     return cls_option
 | |
| 
 | |
| 
 | |
| args = parse_arguments()
 | |
| 
 | |
| cls_model_file = os.path.join(args.cls_model, "inference.pdmodel")
 | |
| cls_params_file = os.path.join(args.cls_model, "inference.pdiparams")
 | |
| 
 | |
| # Set the runtime option
 | |
| cls_option = build_option(args)
 | |
| 
 | |
| # Create the cls_model
 | |
| cls_model = fd.vision.ocr.Classifier(
 | |
|     cls_model_file, cls_params_file, runtime_option=cls_option)
 | |
| 
 | |
| # Set the postprocessing parameters
 | |
| cls_model.postprocessor.cls_thresh = 0.9
 | |
| 
 | |
| # Read the image
 | |
| im = cv2.imread(args.image)
 | |
| 
 | |
| # Predict and return the results
 | |
| result = cls_model.predict(im)
 | |
| 
 | |
| # User can infer a batch of images by following code.
 | |
| # result = cls_model.batch_predict([im])
 | |
| 
 | |
| print(result)
 |