mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 16:48:03 +08:00
[Model] Refactor YOLOv7 module (#611)
* add paddle_trt in benchmark * update benchmark in device * update benchmark * update result doc * fixed for CI * update python api_docs * update index.rst * add runtime cpp examples * deal with comments * Update infer_paddle_tensorrt.py * Add runtime quick start * deal with comments * fixed reused_input_tensors&&reused_output_tensors * fixed docs * fixed headpose typo * fixed typo * refactor yolov5 * update model infer * refactor pybind for yolov5 * rm origin yolov5 * fixed bugs * rm cuda preprocess * fixed bugs * fixed bugs * fixed bug * fixed bug * fix pybind * rm useless code * add convert_and_permute * fixed bugs * fixed im_info for bs_predict * fixed bug * add bs_predict for yolov5 * Add runtime test and batch eval * deal with comments * fixed bug * update testcase * fixed batch eval bug * fixed preprocess bug * refactor yolov7 * add yolov7 testcase * rm resize_after_load and add is_scale_up * fixed bug * set multi_label true Co-authored-by: Jason <928090362@qq.com>
This commit is contained in:
@@ -13,7 +13,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import absolute_import
|
||||
from .contrib.yolov7 import YOLOv7
|
||||
from .contrib.yolov7 import *
|
||||
from .contrib.yolor import YOLOR
|
||||
from .contrib.scaled_yolov4 import ScaledYOLOv4
|
||||
from .contrib.nanodet_plus import NanoDetPlus
|
||||
|
@@ -41,9 +41,19 @@ class YOLOv5Preprocessor:
|
||||
|
||||
@property
|
||||
def padding_value(self):
|
||||
"""
|
||||
padding value for preprocessing, default [114.0, 114.0, 114.0]
|
||||
"""
|
||||
# padding value, size should be the same as channels
|
||||
return self._preprocessor.padding_value
|
||||
|
||||
@property
|
||||
def is_scale_up(self):
|
||||
"""
|
||||
is_scale_up for preprocessing, the input image only can be zoom out, the maximum resize scale cannot exceed 1.0, default true
|
||||
"""
|
||||
return self._preprocessor.is_scale_up
|
||||
|
||||
@size.setter
|
||||
def size(self, wh):
|
||||
assert isinstance(wh, (list, tuple)),\
|
||||
@@ -60,6 +70,13 @@ class YOLOv5Preprocessor:
|
||||
list), "The value to set `padding_value` must be type of list."
|
||||
self._preprocessor.padding_value = value
|
||||
|
||||
@is_scale_up.setter
|
||||
def is_scale_up(self, value):
|
||||
assert isinstance(
|
||||
value,
|
||||
bool), "The value to set `is_scale_up` must be type of bool."
|
||||
self._preprocessor.is_scale_up = value
|
||||
|
||||
|
||||
class YOLOv5Postprocessor:
|
||||
def __init__(self):
|
||||
@@ -93,7 +110,7 @@ class YOLOv5Postprocessor:
|
||||
@property
|
||||
def multi_label(self):
|
||||
"""
|
||||
multi_label for postprocessing, default is true
|
||||
multi_label for postprocessing, set true for eval, default is True
|
||||
"""
|
||||
return self._postprocessor.multi_label
|
||||
|
||||
|
@@ -18,6 +18,108 @@ from .... import FastDeployModel, ModelFormat
|
||||
from .... import c_lib_wrap as C
|
||||
|
||||
|
||||
class YOLOv7Preprocessor:
|
||||
def __init__(self):
|
||||
"""Create a preprocessor for YOLOv7
|
||||
"""
|
||||
self._preprocessor = C.vision.detection.YOLOv7Preprocessor()
|
||||
|
||||
def run(self, input_ims):
|
||||
"""Preprocess input images for YOLOv7
|
||||
|
||||
:param: input_ims: (list of numpy.ndarray)The input image
|
||||
:return: list of FDTensor
|
||||
"""
|
||||
return self._preprocessor.run(input_ims)
|
||||
|
||||
@property
|
||||
def size(self):
|
||||
"""
|
||||
Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640]
|
||||
"""
|
||||
return self._preprocessor.size
|
||||
|
||||
@property
|
||||
def padding_value(self):
|
||||
"""
|
||||
padding value for preprocessing, default [114.0, 114.0, 114.0]
|
||||
"""
|
||||
# padding value, size should be the same as channels
|
||||
return self._preprocessor.padding_value
|
||||
|
||||
@property
|
||||
def is_scale_up(self):
|
||||
"""
|
||||
is_scale_up for preprocessing, the input image only can be zoom out, the maximum resize scale cannot exceed 1.0, default true
|
||||
"""
|
||||
return self._preprocessor.is_scale_up
|
||||
|
||||
@size.setter
|
||||
def size(self, wh):
|
||||
assert isinstance(wh, (list, tuple)),\
|
||||
"The value to set `size` must be type of tuple or list."
|
||||
assert len(wh) == 2,\
|
||||
"The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
|
||||
len(wh))
|
||||
self._preprocessor.size = wh
|
||||
|
||||
@padding_value.setter
|
||||
def padding_value(self, value):
|
||||
assert isinstance(
|
||||
value,
|
||||
list), "The value to set `padding_value` must be type of list."
|
||||
self._preprocessor.padding_value = value
|
||||
|
||||
@is_scale_up.setter
|
||||
def is_scale_up(self, value):
|
||||
assert isinstance(
|
||||
value,
|
||||
bool), "The value to set `is_scale_up` must be type of bool."
|
||||
self._preprocessor.is_scale_up = value
|
||||
|
||||
|
||||
class YOLOv7Postprocessor:
|
||||
def __init__(self):
|
||||
"""Create a postprocessor for YOLOv7
|
||||
"""
|
||||
self._postprocessor = C.vision.detection.YOLOv7Postprocessor()
|
||||
|
||||
def run(self, runtime_results, ims_info):
|
||||
"""Postprocess the runtime results for YOLOv7
|
||||
|
||||
:param: runtime_results: (list of FDTensor)The output FDTensor results from runtime
|
||||
:param: ims_info: (list of dict)Record input_shape and output_shape
|
||||
:return: list of DetectionResult(If the runtime_results is predict by batched samples, the length of this list equals to the batch size)
|
||||
"""
|
||||
return self._postprocessor.run(runtime_results, ims_info)
|
||||
|
||||
@property
|
||||
def conf_threshold(self):
|
||||
"""
|
||||
confidence threshold for postprocessing, default is 0.25
|
||||
"""
|
||||
return self._postprocessor.conf_threshold
|
||||
|
||||
@property
|
||||
def nms_threshold(self):
|
||||
"""
|
||||
nms threshold for postprocessing, default is 0.5
|
||||
"""
|
||||
return self._postprocessor.nms_threshold
|
||||
|
||||
@conf_threshold.setter
|
||||
def conf_threshold(self, conf_threshold):
|
||||
assert isinstance(conf_threshold, float),\
|
||||
"The value to set `conf_threshold` must be type of float."
|
||||
self._postprocessor.conf_threshold = conf_threshold
|
||||
|
||||
@nms_threshold.setter
|
||||
def nms_threshold(self, nms_threshold):
|
||||
assert isinstance(nms_threshold, float),\
|
||||
"The value to set `nms_threshold` must be type of float."
|
||||
self._postprocessor.nms_threshold = nms_threshold
|
||||
|
||||
|
||||
class YOLOv7(FastDeployModel):
|
||||
def __init__(self,
|
||||
model_file,
|
||||
@@ -35,6 +137,7 @@ class YOLOv7(FastDeployModel):
|
||||
# 初始化后的option保存在self._runtime_option
|
||||
super(YOLOv7, self).__init__(runtime_option)
|
||||
|
||||
assert model_format == ModelFormat.ONNX, "YOLOv7 only support model format of ModelFormat.ONNX now."
|
||||
self._model = C.vision.detection.YOLOv7(
|
||||
model_file, params_file, self._runtime_option, model_format)
|
||||
# 通过self.initialized判断整个模型的初始化是否成功
|
||||
@@ -44,96 +147,36 @@ class YOLOv7(FastDeployModel):
|
||||
"""Detect an input image
|
||||
|
||||
:param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
|
||||
:param conf_threshold: confidence threashold for postprocessing, default is 0.25
|
||||
:param nms_iou_threshold: iou threashold for NMS, default is 0.5
|
||||
:param conf_threshold: confidence threshold for postprocessing, default is 0.25
|
||||
:param nms_iou_threshold: iou threshold for NMS, default is 0.5
|
||||
:return: DetectionResult
|
||||
"""
|
||||
return self._model.predict(input_image, conf_threshold,
|
||||
nms_iou_threshold)
|
||||
|
||||
# 一些跟YOLOv7模型有关的属性封装
|
||||
# 多数是预处理相关,可通过修改如model.size = [1280, 1280]改变预处理时resize的大小(前提是模型支持)
|
||||
@property
|
||||
def size(self):
|
||||
self.postprocessor.conf_threshold = conf_threshold
|
||||
self.postprocessor.nms_threshold = nms_iou_threshold
|
||||
return self._model.predict(input_image)
|
||||
|
||||
def batch_predict(self, images):
|
||||
"""Classify a batch of input image
|
||||
|
||||
:param im: (list of numpy.ndarray) The input image list, each element is a 3-D array with layout HWC, BGR format
|
||||
:return list of DetectionResult
|
||||
"""
|
||||
Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640]
|
||||
|
||||
return self._model.batch_predict(images)
|
||||
|
||||
@property
|
||||
def preprocessor(self):
|
||||
"""Get YOLOv7Preprocessor object of the loaded model
|
||||
|
||||
:return YOLOv7Preprocessor
|
||||
"""
|
||||
return self._model.size
|
||||
return self._model.preprocessor
|
||||
|
||||
@property
|
||||
def padding_value(self):
|
||||
# padding value, size should be the same as channels
|
||||
return self._model.padding_value
|
||||
def postprocessor(self):
|
||||
"""Get YOLOv7Postprocessor object of the loaded model
|
||||
|
||||
@property
|
||||
def is_no_pad(self):
|
||||
# while is_mini_pad = false and is_no_pad = true, will resize the image to the set size
|
||||
return self._model.is_no_pad
|
||||
|
||||
@property
|
||||
def is_mini_pad(self):
|
||||
# only pad to the minimum rectange which height and width is times of stride
|
||||
return self._model.is_mini_pad
|
||||
|
||||
@property
|
||||
def is_scale_up(self):
|
||||
# if is_scale_up is false, the input image only can be zoom out, the maximum resize scale cannot exceed 1.0
|
||||
return self._model.is_scale_up
|
||||
|
||||
@property
|
||||
def stride(self):
|
||||
# padding stride, for is_mini_pad
|
||||
return self._model.stride
|
||||
|
||||
@property
|
||||
def max_wh(self):
|
||||
# for offseting the boxes by classes when using NMS
|
||||
return self._model.max_wh
|
||||
|
||||
@size.setter
|
||||
def size(self, wh):
|
||||
assert isinstance(wh, (list, tuple)),\
|
||||
"The value to set `size` must be type of tuple or list."
|
||||
assert len(wh) == 2,\
|
||||
"The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
|
||||
len(wh))
|
||||
self._model.size = wh
|
||||
|
||||
@padding_value.setter
|
||||
def padding_value(self, value):
|
||||
assert isinstance(
|
||||
value,
|
||||
list), "The value to set `padding_value` must be type of list."
|
||||
self._model.padding_value = value
|
||||
|
||||
@is_no_pad.setter
|
||||
def is_no_pad(self, value):
|
||||
assert isinstance(
|
||||
value, bool), "The value to set `is_no_pad` must be type of bool."
|
||||
self._model.is_no_pad = value
|
||||
|
||||
@is_mini_pad.setter
|
||||
def is_mini_pad(self, value):
|
||||
assert isinstance(
|
||||
value,
|
||||
bool), "The value to set `is_mini_pad` must be type of bool."
|
||||
self._model.is_mini_pad = value
|
||||
|
||||
@is_scale_up.setter
|
||||
def is_scale_up(self, value):
|
||||
assert isinstance(
|
||||
value,
|
||||
bool), "The value to set `is_scale_up` must be type of bool."
|
||||
self._model.is_scale_up = value
|
||||
|
||||
@stride.setter
|
||||
def stride(self, value):
|
||||
assert isinstance(
|
||||
value, int), "The value to set `stride` must be type of int."
|
||||
self._model.stride = value
|
||||
|
||||
@max_wh.setter
|
||||
def max_wh(self, value):
|
||||
assert isinstance(
|
||||
value, float), "The value to set `max_wh` must be type of float."
|
||||
self._model.max_wh = value
|
||||
:return YOLOv7Postprocessor
|
||||
"""
|
||||
return self._model.postprocessor
|
||||
|
Reference in New Issue
Block a user