mirror of
				https://github.com/PaddlePaddle/FastDeploy.git
				synced 2025-10-31 11:56:44 +08:00 
			
		
		
		
	Add OpenVINO backend support (#148)
* Add OpenVINO backend support * fix pybind * fix python library path
This commit is contained in:
		| @@ -21,7 +21,7 @@ | ||||
|  | ||||
| namespace fastdeploy { | ||||
|  | ||||
| enum FASTDEPLOY_DECL Backend { UNKNOWN, ORT, TRT, PDINFER }; | ||||
| enum FASTDEPLOY_DECL Backend { UNKNOWN, ORT, TRT, PDINFER, OPENVINO }; | ||||
| // AUTOREC will according to the name of model file | ||||
| // to decide which Frontend is | ||||
| enum FASTDEPLOY_DECL Frontend { AUTOREC, PADDLE, ONNX }; | ||||
| @@ -63,6 +63,9 @@ struct FASTDEPLOY_DECL RuntimeOption { | ||||
|   // use tensorrt backend | ||||
|   void UseTrtBackend(); | ||||
|  | ||||
|   // use openvino backend | ||||
|   void UseOpenVINOBackend(); | ||||
|  | ||||
|   // enable mkldnn while use paddle inference in CPU | ||||
|   void EnablePaddleMKLDNN(); | ||||
|   // disable mkldnn while use paddle inference in CPU | ||||
| @@ -97,7 +100,8 @@ struct FASTDEPLOY_DECL RuntimeOption { | ||||
|  | ||||
|   Backend backend = Backend::UNKNOWN; | ||||
|   // for cpu inference and preprocess | ||||
|   int cpu_thread_num = 8; | ||||
|   // default will let the backend choose their own default value | ||||
|   int cpu_thread_num = -1; | ||||
|   int device_id = 0; | ||||
|  | ||||
|   Device device = Device::CPU; | ||||
| @@ -152,6 +156,8 @@ struct FASTDEPLOY_DECL Runtime { | ||||
|  | ||||
|   void CreateTrtBackend(); | ||||
|  | ||||
|   void CreateOpenVINOBackend(); | ||||
|  | ||||
|   int NumInputs() { return backend_->NumInputs(); } | ||||
|   int NumOutputs() { return backend_->NumOutputs(); } | ||||
|   TensorInfo GetInputInfo(int index); | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Jason
					Jason