[Model] Support YOLOv8 (#1137)

* add GPL lisence

* add GPL-3.0 lisence

* add GPL-3.0 lisence

* add GPL-3.0 lisence

* support yolov8

* add pybind for yolov8

* add yolov8 readme

Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
This commit is contained in:
WJJ1995
2023-01-16 11:24:23 +08:00
committed by GitHub
parent a4b94b2c93
commit 02bd22422e
28 changed files with 1448 additions and 80 deletions

View File

@@ -16,54 +16,82 @@
namespace fastdeploy {
void BindYOLOv5(pybind11::module& m) {
pybind11::class_<vision::detection::YOLOv5Preprocessor>(
m, "YOLOv5Preprocessor")
pybind11::class_<vision::detection::YOLOv5Preprocessor>(m,
"YOLOv5Preprocessor")
.def(pybind11::init<>())
.def("run", [](vision::detection::YOLOv5Preprocessor& self, std::vector<pybind11::array>& im_list) {
std::vector<vision::FDMat> images;
for (size_t i = 0; i < im_list.size(); ++i) {
images.push_back(vision::WrapMat(PyArrayToCvMat(im_list[i])));
}
std::vector<FDTensor> outputs;
std::vector<std::map<std::string, std::array<float, 2>>> ims_info;
if (!self.Run(&images, &outputs, &ims_info)) {
throw std::runtime_error("Failed to preprocess the input data in PaddleClasPreprocessor.");
}
for (size_t i = 0; i < outputs.size(); ++i) {
outputs[i].StopSharing();
}
return make_pair(outputs, ims_info);
})
.def_property("size", &vision::detection::YOLOv5Preprocessor::GetSize, &vision::detection::YOLOv5Preprocessor::SetSize)
.def_property("padding_value", &vision::detection::YOLOv5Preprocessor::GetPaddingValue, &vision::detection::YOLOv5Preprocessor::SetPaddingValue)
.def_property("is_scale_up", &vision::detection::YOLOv5Preprocessor::GetScaleUp, &vision::detection::YOLOv5Preprocessor::SetScaleUp)
.def_property("is_mini_pad", &vision::detection::YOLOv5Preprocessor::GetMiniPad, &vision::detection::YOLOv5Preprocessor::SetMiniPad)
.def_property("stride", &vision::detection::YOLOv5Preprocessor::GetStride, &vision::detection::YOLOv5Preprocessor::SetStride);
.def(
"run",
[](vision::detection::YOLOv5Preprocessor& self,
std::vector<pybind11::array>& im_list) {
std::vector<vision::FDMat> images;
for (size_t i = 0; i < im_list.size(); ++i) {
images.push_back(vision::WrapMat(PyArrayToCvMat(im_list[i])));
}
std::vector<FDTensor> outputs;
std::vector<std::map<std::string, std::array<float, 2>>> ims_info;
if (!self.Run(&images, &outputs, &ims_info)) {
throw std::runtime_error(
"Failed to preprocess the input data in YOLOv5Preprocessor.");
}
for (size_t i = 0; i < outputs.size(); ++i) {
outputs[i].StopSharing();
}
return make_pair(outputs, ims_info);
})
.def_property("size", &vision::detection::YOLOv5Preprocessor::GetSize,
&vision::detection::YOLOv5Preprocessor::SetSize)
.def_property("padding_value",
&vision::detection::YOLOv5Preprocessor::GetPaddingValue,
&vision::detection::YOLOv5Preprocessor::SetPaddingValue)
.def_property("is_scale_up",
&vision::detection::YOLOv5Preprocessor::GetScaleUp,
&vision::detection::YOLOv5Preprocessor::SetScaleUp)
.def_property("is_mini_pad",
&vision::detection::YOLOv5Preprocessor::GetMiniPad,
&vision::detection::YOLOv5Preprocessor::SetMiniPad)
.def_property("stride", &vision::detection::YOLOv5Preprocessor::GetStride,
&vision::detection::YOLOv5Preprocessor::SetStride);
pybind11::class_<vision::detection::YOLOv5Postprocessor>(
m, "YOLOv5Postprocessor")
.def(pybind11::init<>())
.def("run", [](vision::detection::YOLOv5Postprocessor& self, std::vector<FDTensor>& inputs,
const std::vector<std::map<std::string, std::array<float, 2>>>& ims_info) {
std::vector<vision::DetectionResult> results;
if (!self.Run(inputs, &results, ims_info)) {
throw std::runtime_error("Failed to postprocess the runtime result in YOLOv5Postprocessor.");
}
return results;
})
.def("run", [](vision::detection::YOLOv5Postprocessor& self, std::vector<pybind11::array>& input_array,
const std::vector<std::map<std::string, std::array<float, 2>>>& ims_info) {
std::vector<vision::DetectionResult> results;
std::vector<FDTensor> inputs;
PyArrayToTensorList(input_array, &inputs, /*share_buffer=*/true);
if (!self.Run(inputs, &results, ims_info)) {
throw std::runtime_error("Failed to postprocess the runtime result in YOLOv5Postprocessor.");
}
return results;
})
.def_property("conf_threshold", &vision::detection::YOLOv5Postprocessor::GetConfThreshold, &vision::detection::YOLOv5Postprocessor::SetConfThreshold)
.def_property("nms_threshold", &vision::detection::YOLOv5Postprocessor::GetNMSThreshold, &vision::detection::YOLOv5Postprocessor::SetNMSThreshold)
.def_property("multi_label", &vision::detection::YOLOv5Postprocessor::GetMultiLabel, &vision::detection::YOLOv5Postprocessor::SetMultiLabel);
.def("run",
[](vision::detection::YOLOv5Postprocessor& self,
std::vector<FDTensor>& inputs,
const std::vector<std::map<std::string, std::array<float, 2>>>&
ims_info) {
std::vector<vision::DetectionResult> results;
if (!self.Run(inputs, &results, ims_info)) {
throw std::runtime_error(
"Failed to postprocess the runtime result in "
"YOLOv5Postprocessor.");
}
return results;
})
.def("run",
[](vision::detection::YOLOv5Postprocessor& self,
std::vector<pybind11::array>& input_array,
const std::vector<std::map<std::string, std::array<float, 2>>>&
ims_info) {
std::vector<vision::DetectionResult> results;
std::vector<FDTensor> inputs;
PyArrayToTensorList(input_array, &inputs, /*share_buffer=*/true);
if (!self.Run(inputs, &results, ims_info)) {
throw std::runtime_error(
"Failed to postprocess the runtime result in "
"YOLOv5Postprocessor.");
}
return results;
})
.def_property("conf_threshold",
&vision::detection::YOLOv5Postprocessor::GetConfThreshold,
&vision::detection::YOLOv5Postprocessor::SetConfThreshold)
.def_property("nms_threshold",
&vision::detection::YOLOv5Postprocessor::GetNMSThreshold,
&vision::detection::YOLOv5Postprocessor::SetNMSThreshold)
.def_property("multi_label",
&vision::detection::YOLOv5Postprocessor::GetMultiLabel,
&vision::detection::YOLOv5Postprocessor::SetMultiLabel);
pybind11::class_<vision::detection::YOLOv5, FastDeployModel>(m, "YOLOv5")
.def(pybind11::init<std::string, std::string, RuntimeOption,
@@ -75,16 +103,20 @@ void BindYOLOv5(pybind11::module& m) {
self.Predict(mat, &res);
return res;
})
.def("batch_predict", [](vision::detection::YOLOv5& self, std::vector<pybind11::array>& data) {
std::vector<cv::Mat> images;
for (size_t i = 0; i < data.size(); ++i) {
images.push_back(PyArrayToCvMat(data[i]));
}
std::vector<vision::DetectionResult> results;
self.BatchPredict(images, &results);
return results;
})
.def_property_readonly("preprocessor", &vision::detection::YOLOv5::GetPreprocessor)
.def_property_readonly("postprocessor", &vision::detection::YOLOv5::GetPostprocessor);
.def("batch_predict",
[](vision::detection::YOLOv5& self,
std::vector<pybind11::array>& data) {
std::vector<cv::Mat> images;
for (size_t i = 0; i < data.size(); ++i) {
images.push_back(PyArrayToCvMat(data[i]));
}
std::vector<vision::DetectionResult> results;
self.BatchPredict(images, &results);
return results;
})
.def_property_readonly("preprocessor",
&vision::detection::YOLOv5::GetPreprocessor)
.def_property_readonly("postprocessor",
&vision::detection::YOLOv5::GetPostprocessor);
}
} // namespace fastdeploy