mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-21 15:49:31 +08:00
yolov5 servitization optimization (#262)
* yolov5 split pre and post process * yolov5 postprocess * yolov5 postprocess
This commit is contained in:
@@ -67,6 +67,12 @@ void BindRuntime(pybind11::module& m) {
|
||||
pybind11::class_<Runtime>(m, "Runtime")
|
||||
.def(pybind11::init())
|
||||
.def("init", &Runtime::Init)
|
||||
.def("infer",
|
||||
[](Runtime& self, std::vector<FDTensor>& inputs) {
|
||||
std::vector<FDTensor> outputs(self.NumOutputs());
|
||||
self.Infer(inputs, &outputs);
|
||||
return outputs;
|
||||
})
|
||||
.def("infer",
|
||||
[](Runtime& self, std::map<std::string, pybind11::array>& data) {
|
||||
std::vector<FDTensor> inputs(data.size());
|
||||
@@ -132,6 +138,32 @@ void BindRuntime(pybind11::module& m) {
|
||||
.value("FP64", FDDataType::FP64)
|
||||
.value("UINT8", FDDataType::UINT8);
|
||||
|
||||
pybind11::class_<FDTensor>(m, "FDTensor", pybind11::buffer_protocol())
|
||||
.def(pybind11::init())
|
||||
.def("cpu_data",
|
||||
[](FDTensor& self) {
|
||||
auto ptr = self.CpuData();
|
||||
auto numel = self.Numel();
|
||||
auto dtype = FDDataTypeToNumpyDataType(self.dtype);
|
||||
auto base = pybind11::array(dtype, self.shape);
|
||||
return pybind11::array(dtype, self.shape, ptr, base);
|
||||
})
|
||||
.def("resize", static_cast<void (FDTensor::*)(size_t)>(&FDTensor::Resize))
|
||||
.def("resize",
|
||||
static_cast<void (FDTensor::*)(const std::vector<int64_t>&)>(
|
||||
&FDTensor::Resize))
|
||||
.def(
|
||||
"resize",
|
||||
[](FDTensor& self, const std::vector<int64_t>& shape,
|
||||
const FDDataType& dtype, const std::string& name,
|
||||
const Device& device) { self.Resize(shape, dtype, name, device); })
|
||||
.def("numel", &FDTensor::Numel)
|
||||
.def("nbytes", &FDTensor::Nbytes)
|
||||
.def_readwrite("name", &FDTensor::name)
|
||||
.def_readonly("shape", &FDTensor::shape)
|
||||
.def_readonly("dtype", &FDTensor::dtype)
|
||||
.def_readonly("device", &FDTensor::device);
|
||||
|
||||
m.def("get_available_backends", []() { return GetAvailableBackends(); });
|
||||
}
|
||||
|
||||
|
@@ -73,6 +73,13 @@ void PyArrayToTensor(pybind11::array& pyarray, FDTensor* tensor,
|
||||
}
|
||||
}
|
||||
|
||||
void PyArrayToTensorList(std::vector<pybind11::array>& pyarrays, std::vector<FDTensor>* tensors,
|
||||
bool share_buffer) {
|
||||
for(auto i = 0; i < pyarrays.size(); ++i) {
|
||||
PyArrayToTensor(pyarrays[i], &(*tensors)[i], share_buffer);
|
||||
}
|
||||
}
|
||||
|
||||
pybind11::array TensorToPyArray(const FDTensor& tensor) {
|
||||
auto numpy_dtype = FDDataTypeToNumpyDataType(tensor.dtype);
|
||||
auto out = pybind11::array(numpy_dtype, tensor.shape);
|
||||
|
@@ -42,6 +42,9 @@ FDDataType NumpyDataTypeToFDDataType(const pybind11::dtype& np_dtype);
|
||||
|
||||
void PyArrayToTensor(pybind11::array& pyarray, FDTensor* tensor,
|
||||
bool share_buffer = false);
|
||||
void PyArrayToTensorList(std::vector<pybind11::array>& pyarray,
|
||||
std::vector<FDTensor>* tensor,
|
||||
bool share_buffer = false);
|
||||
pybind11::array TensorToPyArray(const FDTensor& tensor);
|
||||
|
||||
#ifdef ENABLE_VISION
|
||||
|
Reference in New Issue
Block a user