mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 16:48:03 +08:00
[Model] Support YOLOv8 (#1137)
* add GPL lisence * add GPL-3.0 lisence * add GPL-3.0 lisence * add GPL-3.0 lisence * support yolov8 * add pybind for yolov8 * add yolov8 readme Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
This commit is contained in:
@@ -25,5 +25,6 @@ from .contrib.yolov5lite import YOLOv5Lite
|
||||
from .contrib.yolov6 import YOLOv6
|
||||
from .contrib.yolov7end2end_trt import YOLOv7End2EndTRT
|
||||
from .contrib.yolov7end2end_ort import YOLOv7End2EndORT
|
||||
from .contrib.yolov8 import *
|
||||
from .ppdet import *
|
||||
from .contrib.rkyolo import *
|
||||
|
4
python/fastdeploy/vision/detection/contrib/yolov5seg.py
Normal file → Executable file
4
python/fastdeploy/vision/detection/contrib/yolov5seg.py
Normal file → Executable file
@@ -180,7 +180,7 @@ class YOLOv5Seg(FastDeployModel):
|
||||
model_file, params_file, self._runtime_option, model_format)
|
||||
assert self.initialized, "YOLOv5Seg initialize failed."
|
||||
|
||||
def predict(self, input_image, conf_threshold=0.25, nms_iou_threshold=0.5):
|
||||
def predict(self, input_image):
|
||||
"""Detect an input image
|
||||
|
||||
:param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
|
||||
@@ -189,8 +189,6 @@ class YOLOv5Seg(FastDeployModel):
|
||||
:return: DetectionResult
|
||||
"""
|
||||
|
||||
self.postprocessor.conf_threshold = conf_threshold
|
||||
self.postprocessor.nms_threshold = nms_iou_threshold
|
||||
return self._model.predict(input_image)
|
||||
|
||||
def batch_predict(self, images):
|
||||
|
217
python/fastdeploy/vision/detection/contrib/yolov8.py
Executable file
217
python/fastdeploy/vision/detection/contrib/yolov8.py
Executable file
@@ -0,0 +1,217 @@
|
||||
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
from .... import FastDeployModel, ModelFormat
|
||||
from .... import c_lib_wrap as C
|
||||
|
||||
|
||||
class YOLOv8Preprocessor:
|
||||
def __init__(self):
|
||||
"""Create a preprocessor for YOLOv8
|
||||
"""
|
||||
self._preprocessor = C.vision.detection.YOLOv8Preprocessor()
|
||||
|
||||
def run(self, input_ims):
|
||||
"""Preprocess input images for YOLOv8
|
||||
|
||||
:param: input_ims: (list of numpy.ndarray)The input image
|
||||
:return: list of FDTensor
|
||||
"""
|
||||
return self._preprocessor.run(input_ims)
|
||||
|
||||
@property
|
||||
def size(self):
|
||||
"""
|
||||
Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640]
|
||||
"""
|
||||
return self._preprocessor.size
|
||||
|
||||
@property
|
||||
def padding_value(self):
|
||||
"""
|
||||
padding value for preprocessing, default [114.0, 114.0, 114.0]
|
||||
"""
|
||||
# padding value, size should be the same as channels
|
||||
return self._preprocessor.padding_value
|
||||
|
||||
@property
|
||||
def is_scale_up(self):
|
||||
"""
|
||||
is_scale_up for preprocessing, the input image only can be zoom out, the maximum resize scale cannot exceed 1.0, default true
|
||||
"""
|
||||
return self._preprocessor.is_scale_up
|
||||
|
||||
@property
|
||||
def is_mini_pad(self):
|
||||
"""
|
||||
is_mini_pad for preprocessing, pad to the minimum rectange which height and width is times of stride, default false
|
||||
"""
|
||||
return self._preprocessor.is_mini_pad
|
||||
|
||||
@property
|
||||
def stride(self):
|
||||
"""
|
||||
stride for preprocessing, only for mini_pad mode, default 32
|
||||
"""
|
||||
return self._preprocessor.stride
|
||||
|
||||
@size.setter
|
||||
def size(self, wh):
|
||||
assert isinstance(wh, (list, tuple)),\
|
||||
"The value to set `size` must be type of tuple or list."
|
||||
assert len(wh) == 2,\
|
||||
"The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
|
||||
len(wh))
|
||||
self._preprocessor.size = wh
|
||||
|
||||
@padding_value.setter
|
||||
def padding_value(self, value):
|
||||
assert isinstance(
|
||||
value,
|
||||
list), "The value to set `padding_value` must be type of list."
|
||||
self._preprocessor.padding_value = value
|
||||
|
||||
@is_scale_up.setter
|
||||
def is_scale_up(self, value):
|
||||
assert isinstance(
|
||||
value,
|
||||
bool), "The value to set `is_scale_up` must be type of bool."
|
||||
self._preprocessor.is_scale_up = value
|
||||
|
||||
@is_mini_pad.setter
|
||||
def is_mini_pad(self, value):
|
||||
assert isinstance(
|
||||
value,
|
||||
bool), "The value to set `is_mini_pad` must be type of bool."
|
||||
self._preprocessor.is_mini_pad = value
|
||||
|
||||
@stride.setter
|
||||
def stride(self, value):
|
||||
assert isinstance(
|
||||
stride, int), "The value to set `stride` must be type of int."
|
||||
self._preprocessor.stride = value
|
||||
|
||||
|
||||
class YOLOv8Postprocessor:
|
||||
def __init__(self):
|
||||
"""Create a postprocessor for YOLOv8
|
||||
"""
|
||||
self._postprocessor = C.vision.detection.YOLOv8Postprocessor()
|
||||
|
||||
def run(self, runtime_results, ims_info):
|
||||
"""Postprocess the runtime results for YOLOv8
|
||||
|
||||
:param: runtime_results: (list of FDTensor)The output FDTensor results from runtime
|
||||
:param: ims_info: (list of dict)Record input_shape and output_shape
|
||||
:return: list of DetectionResult(If the runtime_results is predict by batched samples, the length of this list equals to the batch size)
|
||||
"""
|
||||
return self._postprocessor.run(runtime_results, ims_info)
|
||||
|
||||
@property
|
||||
def conf_threshold(self):
|
||||
"""
|
||||
confidence threshold for postprocessing, default is 0.25
|
||||
"""
|
||||
return self._postprocessor.conf_threshold
|
||||
|
||||
@property
|
||||
def nms_threshold(self):
|
||||
"""
|
||||
nms threshold for postprocessing, default is 0.5
|
||||
"""
|
||||
return self._postprocessor.nms_threshold
|
||||
|
||||
@property
|
||||
def multi_label(self):
|
||||
"""
|
||||
multi_label for postprocessing, set true for eval, default is True
|
||||
"""
|
||||
return self._postprocessor.multi_label
|
||||
|
||||
@conf_threshold.setter
|
||||
def conf_threshold(self, conf_threshold):
|
||||
assert isinstance(conf_threshold, float),\
|
||||
"The value to set `conf_threshold` must be type of float."
|
||||
self._postprocessor.conf_threshold = conf_threshold
|
||||
|
||||
@nms_threshold.setter
|
||||
def nms_threshold(self, nms_threshold):
|
||||
assert isinstance(nms_threshold, float),\
|
||||
"The value to set `nms_threshold` must be type of float."
|
||||
self._postprocessor.nms_threshold = nms_threshold
|
||||
|
||||
@multi_label.setter
|
||||
def multi_label(self, value):
|
||||
assert isinstance(
|
||||
value,
|
||||
bool), "The value to set `multi_label` must be type of bool."
|
||||
self._postprocessor.multi_label = value
|
||||
|
||||
|
||||
class YOLOv8(FastDeployModel):
|
||||
def __init__(self,
|
||||
model_file,
|
||||
params_file="",
|
||||
runtime_option=None,
|
||||
model_format=ModelFormat.ONNX):
|
||||
"""Load a YOLOv8 model exported by YOLOv8.
|
||||
|
||||
:param model_file: (str)Path of model file, e.g ./yolov8s.onnx
|
||||
:param params_file: (str)Path of parameters file, e.g yolox/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
|
||||
:param runtime_option: (fastdeploy.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
|
||||
:param model_format: (fastdeploy.ModelForamt)Model format of the loaded model
|
||||
"""
|
||||
super(YOLOv8, self).__init__(runtime_option)
|
||||
|
||||
self._model = C.vision.detection.YOLOv8(
|
||||
model_file, params_file, self._runtime_option, model_format)
|
||||
assert self.initialized, "YOLOv8 initialize failed."
|
||||
|
||||
def predict(self, input_image):
|
||||
"""Detect an input image
|
||||
|
||||
:param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
|
||||
:param conf_threshold: confidence threshold for postprocessing, default is 0.25
|
||||
:param nms_iou_threshold: iou threshold for NMS, default is 0.5
|
||||
:return: DetectionResult
|
||||
"""
|
||||
|
||||
return self._model.predict(input_image)
|
||||
|
||||
def batch_predict(self, images):
|
||||
"""Classify a batch of input image
|
||||
|
||||
:param im: (list of numpy.ndarray) The input image list, each element is a 3-D array with layout HWC, BGR format
|
||||
:return list of DetectionResult
|
||||
"""
|
||||
|
||||
return self._model.batch_predict(images)
|
||||
|
||||
@property
|
||||
def preprocessor(self):
|
||||
"""Get YOLOv8Preprocessor object of the loaded model
|
||||
|
||||
:return YOLOv8Preprocessor
|
||||
"""
|
||||
return self._model.preprocessor
|
||||
|
||||
@property
|
||||
def postprocessor(self):
|
||||
"""Get YOLOv8Postprocessor object of the loaded model
|
||||
|
||||
:return YOLOv8Postprocessor
|
||||
"""
|
||||
return self._model.postprocessor
|
Reference in New Issue
Block a user