mirror of
				https://github.com/PaddlePaddle/FastDeploy.git
				synced 2025-10-27 02:20:31 +08:00 
			
		
		
		
	
		
			
				
	
	
		
			40 lines
		
	
	
		
			1.3 KiB
		
	
	
	
		
			C++
		
	
	
	
	
	
			
		
		
	
	
			40 lines
		
	
	
		
			1.3 KiB
		
	
	
	
		
			C++
		
	
	
	
	
	
| // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
 | |
| //
 | |
| // Licensed under the Apache License, Version 2.0 (the "License");
 | |
| // you may not use this file except in compliance with the License.
 | |
| // You may obtain a copy of the License at
 | |
| //
 | |
| //     http://www.apache.org/licenses/LICENSE-2.0
 | |
| //
 | |
| // Unless required by applicable law or agreed to in writing, software
 | |
| // distributed under the License is distributed on an "AS IS" BASIS,
 | |
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | |
| // See the License for the specific language governing permissions and
 | |
| // limitations under the License.
 | |
| 
 | |
| #pragma once
 | |
| 
 | |
| #include <iostream>
 | |
| #include <memory>
 | |
| #include <string>
 | |
| #include <vector>
 | |
| 
 | |
| #include "fastdeploy/backends/backend.h"
 | |
| #include "onnxruntime_cxx_api.h" // NOLINT
 | |
| 
 | |
| namespace fastdeploy {
 | |
| 
 | |
| // Convert FDDataType to OrtDataType
 | |
| ONNXTensorElementDataType GetOrtDtype(const FDDataType& fd_dtype);
 | |
| 
 | |
| // Convert OrtDataType to FDDataType
 | |
| FDDataType GetFdDtype(const ONNXTensorElementDataType* ort_dtype);
 | |
| 
 | |
| // Create Ort::Value
 | |
| // is_backend_cuda specify if the onnxruntime use CUDAExectionProvider
 | |
| // While is_backend_cuda = true, and tensor.device = Device::GPU
 | |
| // Will directly share the cuda data in tensor to OrtValue
 | |
| Ort::Value CreateOrtValue(FDTensor& tensor, bool is_backend_cuda = false);
 | |
| 
 | |
| } // namespace fastdeploy
 | 
