mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-06 09:07:10 +08:00
FDTensor support GPU device (#190)
* fdtensor support GPU * TRT backend support GPU FDTensor * FDHostAllocator add FASTDEPLOY_DECL * fix FDTensor Data * fix FDTensor dtype Co-authored-by: Jason <jiangjiajun@baidu.com>
This commit is contained in:
@@ -44,8 +44,11 @@ struct PaddleBackendOption {
|
||||
std::vector<std::string> delete_pass_names = {};
|
||||
};
|
||||
|
||||
// convert FD device to paddle place type
|
||||
paddle_infer::PlaceType ConvertFDDeviceToPlace(Device device);
|
||||
|
||||
// Share memory buffer with paddle_infer::Tensor from fastdeploy::FDTensor
|
||||
void ShareTensorFromCpu(paddle_infer::Tensor* tensor, FDTensor& fd_tensor);
|
||||
void ShareTensorFromFDTensor(paddle_infer::Tensor* tensor, FDTensor& fd_tensor);
|
||||
|
||||
// Copy memory data from paddle_infer::Tensor to fastdeploy::FDTensor
|
||||
void CopyTensorToCpu(std::unique_ptr<paddle_infer::Tensor>& tensor,
|
||||
@@ -72,6 +75,8 @@ class PaddleBackend : public BaseBackend {
|
||||
|
||||
TensorInfo GetInputInfo(int index);
|
||||
TensorInfo GetOutputInfo(int index);
|
||||
std::vector<TensorInfo> GetInputInfo();
|
||||
std::vector<TensorInfo> GetOutputInfo();
|
||||
|
||||
private:
|
||||
paddle_infer::Config config_;
|
||||
|
Reference in New Issue
Block a user