[Model] Support DINO & DETR and add PaddleDetectionModel class (#1837)

* 添加paddleclas模型

* 更新README_CN

* 更新README_CN

* 更新README

* update get_model.sh

* update get_models.sh

* update paddleseg models

* update paddle_seg models

* update paddle_seg models

* modified test resources

* update benchmark_gpu_trt.sh

* add paddle detection

* add paddledetection to benchmark

* modified benchmark cmakelists

* update benchmark scripts

* modified benchmark function calling

* modified paddledetection documents

* add PaddleDetectonModel

* reset examples/paddledetection

* resolve conflict

* update pybind

* resolve conflict

* fix bug

* delete debug mode

* update checkarch log

* update trt inputs example

* Update README.md

---------

Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
This commit is contained in:
linyangshi
2023-05-05 14:10:33 +08:00
committed by GitHub
parent 6d0261e9e4
commit 9164796645
11 changed files with 378 additions and 3 deletions

View File

@@ -266,6 +266,16 @@ class RuntimeOption:
"""
return self._option.use_ascend()
def disable_valid_backend_check(self):
""" Disable checking validity of backend during inference
"""
return self._option.disable_valid_backend_check()
def enable_valid_backend_check(self):
"""Enable checking validity of backend during inference
"""
return self._option.enable_valid_backend_check()
def set_cpu_thread_num(self, thread_num=-1):
"""Set number of threads if inference with CPU

View File

@@ -800,6 +800,78 @@ class GFL(PPYOLOE):
assert self.initialized, "GFL model initialize failed."
class PaddleDetectionModel(FastDeployModel):
def __init__(self,
model_file,
params_file,
config_file,
runtime_option=None,
model_format=ModelFormat.PADDLE):
"""Load a PaddleDetectionModel model exported by PaddleDetection.
:param model_file: (str)Path of model file, e.g ppyoloe/model.pdmodel
:param params_file: (str)Path of parameters file, e.g ppyoloe/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
:param config_file: (str)Path of configuration file for deployment, e.g ppyoloe/infer_cfg.yml
:param runtime_option: (fastdeploy.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
:param model_format: (fastdeploy.ModelForamt)Model format of the loaded model
"""
super(PaddleDetectionModel, self).__init__(runtime_option)
self._model = C.vision.detection.PaddleDetectionModel(
model_file, params_file, config_file, self._runtime_option,
model_format)
assert self.initialized, "PaddleDetectionModel model initialize failed."
def predict(self, im):
"""Detect an input image
:param im: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
:return: DetectionResult
"""
assert im is not None, "The input image data is None."
return self._model.predict(im)
def batch_predict(self, images):
"""Detect a batch of input image list
:param im: (list of numpy.ndarray) The input image list, each element is a 3-D array with layout HWC, BGR format
:return list of DetectionResult
"""
return self._model.batch_predict(images)
def clone(self):
"""Clone PPYOLOE object
:return: a new PPYOLOE object
"""
class PPYOLOEClone(PPYOLOE):
def __init__(self, model):
self._model = model
clone_model = PPYOLOEClone(self._model.clone())
return clone_model
@property
def preprocessor(self):
"""Get PaddleDetPreprocessor object of the loaded model
:return PaddleDetPreprocessor
"""
return self._model.preprocessor
@property
def postprocessor(self):
"""Get PaddleDetPostprocessor object of the loaded model
:return PaddleDetPostprocessor
"""
return self._model.postprocessor
class PPYOLOER(PPYOLOE):
def __init__(self,
model_file,