mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 16:48:03 +08:00
FDTensor support GPU device (#190)
* fdtensor support GPU * TRT backend support GPU FDTensor * FDHostAllocator add FASTDEPLOY_DECL * fix FDTensor Data * fix FDTensor dtype Co-authored-by: Jason <jiangjiajun@baidu.com>
This commit is contained in:
@@ -71,14 +71,13 @@ void BindRuntime(pybind11::module& m) {
|
||||
std::vector<FDTensor> inputs(data.size());
|
||||
int index = 0;
|
||||
for (auto iter = data.begin(); iter != data.end(); ++iter) {
|
||||
inputs[index].dtype =
|
||||
NumpyDataTypeToFDDataType(iter->second.dtype());
|
||||
inputs[index].shape.insert(
|
||||
inputs[index].shape.begin(), iter->second.shape(),
|
||||
iter->second.shape() + iter->second.ndim());
|
||||
std::vector<int64_t> data_shape;
|
||||
data_shape.insert(data_shape.begin(), iter->second.shape(),
|
||||
iter->second.shape() + iter->second.ndim());
|
||||
auto dtype = NumpyDataTypeToFDDataType(iter->second.dtype());
|
||||
// TODO(jiangjiajun) Maybe skip memory copy is a better choice
|
||||
// use SetExternalData
|
||||
inputs[index].data.resize(iter->second.nbytes());
|
||||
inputs[index].Resize(data_shape, dtype);
|
||||
memcpy(inputs[index].MutableData(), iter->second.mutable_data(),
|
||||
iter->second.nbytes());
|
||||
inputs[index].name = iter->first;
|
||||
@@ -134,4 +133,4 @@ void BindRuntime(pybind11::module& m) {
|
||||
m.def("get_available_backends", []() { return GetAvailableBackends(); });
|
||||
}
|
||||
|
||||
} // namespace fastdeploy
|
||||
} // namespace fastdeploy
|
||||
|
Reference in New Issue
Block a user