mirror of
				https://github.com/PaddlePaddle/FastDeploy.git
				synced 2025-10-31 03:46:40 +08:00 
			
		
		
		
	Add PaddleInference GPU backend for OCR-Rec model (#258)
* Add PaddleInference GPU backend for Rec model * Improve the order of deafult backends
This commit is contained in:
		| @@ -26,11 +26,12 @@ Classifier::Classifier(const std::string& model_file, | |||||||
|                        const RuntimeOption& custom_option, |                        const RuntimeOption& custom_option, | ||||||
|                        const Frontend& model_format) { |                        const Frontend& model_format) { | ||||||
|   if (model_format == Frontend::ONNX) { |   if (model_format == Frontend::ONNX) { | ||||||
|     valid_cpu_backends = {Backend::ORT, Backend::OPENVINO};  // 指定可用的CPU后端 |     valid_cpu_backends = {Backend::ORT, | ||||||
|  |                           Backend::OPENVINO};  // 指定可用的CPU后端 | ||||||
|     valid_gpu_backends = {Backend::ORT, Backend::TRT};  // 指定可用的GPU后端 |     valid_gpu_backends = {Backend::ORT, Backend::TRT};  // 指定可用的GPU后端 | ||||||
|   } else { |   } else { | ||||||
|     valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO}; |     valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO}; | ||||||
|     valid_gpu_backends = {Backend::PDINFER, Backend::TRT, Backend::ORT}; |     valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT}; | ||||||
|   } |   } | ||||||
|   runtime_option = custom_option; |   runtime_option = custom_option; | ||||||
|   runtime_option.model_format = model_format; |   runtime_option.model_format = model_format; | ||||||
|   | |||||||
| @@ -44,12 +44,12 @@ Recognizer::Recognizer(const std::string& model_file, | |||||||
|                        const RuntimeOption& custom_option, |                        const RuntimeOption& custom_option, | ||||||
|                        const Frontend& model_format) { |                        const Frontend& model_format) { | ||||||
|   if (model_format == Frontend::ONNX) { |   if (model_format == Frontend::ONNX) { | ||||||
|     valid_cpu_backends = {Backend::ORT, Backend::OPENVINO};  // 指定可用的CPU后端 |     valid_cpu_backends = {Backend::ORT, | ||||||
|  |                           Backend::OPENVINO};  // 指定可用的CPU后端 | ||||||
|     valid_gpu_backends = {Backend::ORT, Backend::TRT};  // 指定可用的GPU后端 |     valid_gpu_backends = {Backend::ORT, Backend::TRT};  // 指定可用的GPU后端 | ||||||
|   } else { |   } else { | ||||||
|     // NOTE:此模型暂不支持paddle-inference-Gpu推理 |     valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO}; | ||||||
|     valid_cpu_backends = {Backend::ORT, Backend::PDINFER, Backend::OPENVINO}; |     valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT}; | ||||||
|     valid_gpu_backends = {Backend::ORT, Backend::TRT}; |  | ||||||
|   } |   } | ||||||
|  |  | ||||||
|   runtime_option = custom_option; |   runtime_option = custom_option; | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user
	 yunyaoXYY
					yunyaoXYY