[Model] Refactor PaddleDetection module (#575)

* Add namespace for functions

* Refactor PaddleDetection module

* finish all the single image test

* Update preprocessor.cc

* fix some litte detail

* add python api

* Update postprocessor.cc
This commit is contained in:
Jason
2022-11-15 10:43:23 +08:00
committed by GitHub
parent aa21272eaa
commit beaa0fd190
39 changed files with 1282 additions and 1438 deletions

View File

@@ -23,5 +23,4 @@ from .contrib.yolov5lite import YOLOv5Lite
from .contrib.yolov6 import YOLOv6
from .contrib.yolov7end2end_trt import YOLOv7End2EndTRT
from .contrib.yolov7end2end_ort import YOLOv7End2EndORT
from .ppdet import PPYOLOE, PPYOLO, PPYOLOv2, PaddleYOLOX, PicoDet, FasterRCNN, YOLOv3, MaskRCNN
from .rknpu2 import RKPicoDet
from .ppdet import *

View File

@@ -19,6 +19,40 @@ from .... import FastDeployModel, ModelFormat
from .... import c_lib_wrap as C
class PaddleDetPreprocessor:
def __init__(self, config_file):
"""Create a preprocessor for PaddleDetection Model from configuration file
:param config_file: (str)Path of configuration file, e.g ppyoloe/infer_cfg.yml
"""
self._preprocessor = C.vision.detection.PaddleDetPreprocessor(
config_file)
def run(self, input_ims):
"""Preprocess input images for PaddleDetection Model
:param: input_ims: (list of numpy.ndarray)The input image
:return: list of FDTensor, include image, scale_factor, im_shape
"""
return self._preprocessor.run(input_ims)
class PaddleDetPostprocessor:
def __init__(self):
"""Create a postprocessor for PaddleDetection Model
"""
self._postprocessor = C.vision.detection.PaddleDetPostprocessor()
def run(self, runtime_results):
"""Postprocess the runtime results for PaddleDetection Model
:param: runtime_results: (list of FDTensor)The output FDTensor results from runtime
:return: list of ClassifyResult(If the runtime_results is predict by batched samples, the length of this list equals to the batch size)
"""
return self._postprocessor.run(runtime_results)
class PPYOLOE(FastDeployModel):
def __init__(self,
model_file,
@@ -52,6 +86,31 @@ class PPYOLOE(FastDeployModel):
assert im is not None, "The input image data is None."
return self._model.predict(im)
def batch_predict(self, images):
"""Detect a batch of input image list
:param im: (list of numpy.ndarray) The input image list, each element is a 3-D array with layout HWC, BGR format
:return list of DetectionResult
"""
return self._model.batch_predict(images)
@property
def preprocessor(self):
"""Get PaddleDetPreprocessor object of the loaded model
:return PaddleDetPreprocessor
"""
return self._model.preprocessor
@property
def postprocessor(self):
"""Get PaddleDetPostprocessor object of the loaded model
:return PaddleDetPostprocessor
"""
return self._model.postprocessor
class PPYOLO(PPYOLOE):
def __init__(self,
@@ -77,31 +136,6 @@ class PPYOLO(PPYOLOE):
assert self.initialized, "PPYOLO model initialize failed."
class PPYOLOv2(PPYOLOE):
def __init__(self,
model_file,
params_file,
config_file,
runtime_option=None,
model_format=ModelFormat.PADDLE):
"""Load a PPYOLOv2 model exported by PaddleDetection.
:param model_file: (str)Path of model file, e.g ppyolov2/model.pdmodel
:param params_file: (str)Path of parameters file, e.g ppyolov2/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
:param config_file: (str)Path of configuration file for deployment, e.g ppyoloe/infer_cfg.yml
:param runtime_option: (fastdeploy.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
:param model_format: (fastdeploy.ModelForamt)Model format of the loaded model
"""
super(PPYOLOE, self).__init__(runtime_option)
assert model_format == ModelFormat.PADDLE, "PPYOLOv2 model only support model format of ModelFormat.Paddle now."
self._model = C.vision.detection.PPYOLOv2(
model_file, params_file, config_file, self._runtime_option,
model_format)
assert self.initialized, "PPYOLOv2 model initialize failed."
class PaddleYOLOX(PPYOLOE):
def __init__(self,
model_file,
@@ -202,7 +236,7 @@ class YOLOv3(PPYOLOE):
assert self.initialized, "YOLOv3 model initialize failed."
class MaskRCNN(FastDeployModel):
class MaskRCNN(PPYOLOE):
def __init__(self,
model_file,
params_file,
@@ -211,14 +245,14 @@ class MaskRCNN(FastDeployModel):
model_format=ModelFormat.PADDLE):
"""Load a MaskRCNN model exported by PaddleDetection.
:param model_file: (str)Path of model file, e.g maskrcnn/model.pdmodel
:param params_file: (str)Path of parameters file, e.g maskrcnn/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
:param model_file: (str)Path of model file, e.g fasterrcnn/model.pdmodel
:param params_file: (str)Path of parameters file, e.g fasterrcnn/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
:param config_file: (str)Path of configuration file for deployment, e.g ppyoloe/infer_cfg.yml
:param runtime_option: (fastdeploy.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
:param model_format: (fastdeploy.ModelForamt)Model format of the loaded model
"""
super(MaskRCNN, self).__init__(runtime_option)
super(PPYOLOE, self).__init__(runtime_option)
assert model_format == ModelFormat.PADDLE, "MaskRCNN model only support model format of ModelFormat.Paddle now."
self._model = C.vision.detection.MaskRCNN(
@@ -226,6 +260,12 @@ class MaskRCNN(FastDeployModel):
model_format)
assert self.initialized, "MaskRCNN model initialize failed."
def predict(self, input_image):
assert input_image is not None, "The input image data is None."
return self._model.predict(input_image)
def batch_predict(self, images):
"""Detect a batch of input image list, batch_predict is not supported for maskrcnn now.
:param im: (list of numpy.ndarray) The input image list, each element is a 3-D array with layout HWC, BGR format
:return list of DetectionResult
"""
raise Exception(
"batch_predict is not supported for MaskRCNN model now.")