mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-07 01:22:59 +08:00
[Model] Add Paddle3D smoke model (#1766)
* add smoke model * add 3d vis * update code * update doc * mv paddle3d from detection to perception * update result for velocity * update code for CI * add set input data for TRT backend * add serving support for smoke model * update code * update code * update code --------- Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
This commit is contained in:
@@ -484,6 +484,29 @@ class RuntimeOption:
|
||||
return self._option.trt_option.set_shape(tensor_name, min_shape,
|
||||
opt_shape, max_shape)
|
||||
|
||||
def set_trt_input_data(self,
|
||||
tensor_name,
|
||||
min_input_data,
|
||||
opt_input_data=None,
|
||||
max_input_data=None):
|
||||
"""Set input data while using TensorRT backend with loadding a model contains dynamic input shape.
|
||||
|
||||
:param tensor_name: (str)Name of input which has dynamic shape
|
||||
:param min_input_data: (list of int)Input data for Minimum shape of the input.
|
||||
:param opt_input_data: (list of int)Input data for Optimize shape of the input, if set to None, it will keep same with min_input_data
|
||||
:param max_input_data: (list of int)Input data for Maximum shape of the input, if set to None, it will keep same with the min_input_data
|
||||
"""
|
||||
logging.warning(
|
||||
"`RuntimeOption.set_trt_input_data` will be deprecated in v1.2.0, please use `RuntimeOption.trt_option.set_input_data()` instead."
|
||||
)
|
||||
if opt_input_data is None and max_input_data is None:
|
||||
opt_input_data = min_input_data
|
||||
opt_input_data = min_input_data
|
||||
else:
|
||||
assert opt_input_data is not None and max_input_data is not None, "Set min_input_data only, or set min_input_data, opt_input_data, max_input_data both."
|
||||
return self._option.trt_option.set_input_data(
|
||||
tensor_name, min_input_data, opt_input_data, max_input_data)
|
||||
|
||||
def set_trt_cache_file(self, cache_file_path):
|
||||
"""Set a cache file path while using TensorRT backend. While loading a Paddle/ONNX model with set_trt_cache_file("./tensorrt_cache/model.trt"), if file `./tensorrt_cache/model.trt` exists, it will skip building tensorrt engine and load the cache file directly; if file `./tensorrt_cache/model.trt` doesn't exist, it will building tensorrt engine and save the engine as binary string to the cache file.
|
||||
|
||||
|
@@ -27,6 +27,7 @@ from . import headpose
|
||||
from . import sr
|
||||
from . import evaluation
|
||||
from . import generation
|
||||
from . import perception
|
||||
from .utils import fd_result_to_json
|
||||
from .visualize import *
|
||||
from .. import C
|
||||
|
0
python/fastdeploy/vision/detection/ppdet/__init__.py
Normal file → Executable file
0
python/fastdeploy/vision/detection/ppdet/__init__.py
Normal file → Executable file
16
python/fastdeploy/vision/perception/__init__.py
Executable file
16
python/fastdeploy/vision/perception/__init__.py
Executable file
@@ -0,0 +1,16 @@
|
||||
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import absolute_import
|
||||
from .paddle3d.smoke import *
|
15
python/fastdeploy/vision/perception/paddle3d/__init__.py
Normal file
15
python/fastdeploy/vision/perception/paddle3d/__init__.py
Normal file
@@ -0,0 +1,15 @@
|
||||
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import absolute_import
|
106
python/fastdeploy/vision/perception/paddle3d/smoke.py
Executable file
106
python/fastdeploy/vision/perception/paddle3d/smoke.py
Executable file
@@ -0,0 +1,106 @@
|
||||
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
from .... import FastDeployModel, ModelFormat
|
||||
from .... import c_lib_wrap as C
|
||||
|
||||
|
||||
class SmokePreprocessor:
|
||||
def __init__(self, config_file):
|
||||
"""Create a preprocessor for Smoke
|
||||
"""
|
||||
self._preprocessor = C.vision.perception.SmokePreprocessor(config_file)
|
||||
|
||||
def run(self, input_ims):
|
||||
"""Preprocess input images for Smoke
|
||||
|
||||
:param: input_ims: (list of numpy.ndarray)The input image
|
||||
:return: list of FDTensor
|
||||
"""
|
||||
return self._preprocessor.run(input_ims)
|
||||
|
||||
|
||||
class SmokePostprocessor:
|
||||
def __init__(self):
|
||||
"""Create a postprocessor for Smoke
|
||||
"""
|
||||
self._postprocessor = C.vision.perception.SmokePostprocessor()
|
||||
|
||||
def run(self, runtime_results):
|
||||
"""Postprocess the runtime results for Smoke
|
||||
|
||||
:param: runtime_results: (list of FDTensor)The output FDTensor results from runtime
|
||||
:return: list of PerceptionResult(If the runtime_results is predict by batched samples, the length of this list equals to the batch size)
|
||||
"""
|
||||
return self._postprocessor.run(runtime_results)
|
||||
|
||||
|
||||
class Smoke(FastDeployModel):
|
||||
def __init__(self,
|
||||
model_file,
|
||||
params_file,
|
||||
config_file,
|
||||
runtime_option=None,
|
||||
model_format=ModelFormat.PADDLE):
|
||||
"""Load a SMoke model exported by Smoke.
|
||||
|
||||
:param model_file: (str)Path of model file, e.g ./smoke.pdmodel
|
||||
:param params_file: (str)Path of parameters file, e.g ./smoke.pdiparams
|
||||
:param config_file: (str)Path of config file, e.g ./infer_cfg.yaml
|
||||
:param runtime_option: (fastdeploy.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
|
||||
:param model_format: (fastdeploy.ModelForamt)Model format of the loaded model
|
||||
"""
|
||||
super(Smoke, self).__init__(runtime_option)
|
||||
|
||||
self._model = C.vision.perception.Smoke(
|
||||
model_file, params_file, config_file, self._runtime_option,
|
||||
model_format)
|
||||
assert self.initialized, "Smoke initialize failed."
|
||||
|
||||
def predict(self, input_image):
|
||||
"""Detect an input image
|
||||
|
||||
:param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
|
||||
:param conf_threshold: confidence threshold for postprocessing, default is 0.25
|
||||
:param nms_iou_threshold: iou threshold for NMS, default is 0.5
|
||||
:return: PerceptionResult
|
||||
"""
|
||||
return self._model.predict(input_image)
|
||||
|
||||
def batch_predict(self, images):
|
||||
"""Classify a batch of input image
|
||||
|
||||
:param im: (list of numpy.ndarray) The input image list, each element is a 3-D array with layout HWC, BGR format
|
||||
:return list of PerceptionResult
|
||||
"""
|
||||
|
||||
return self._model.batch_predict(images)
|
||||
|
||||
@property
|
||||
def preprocessor(self):
|
||||
"""Get SmokePreprocessor object of the loaded model
|
||||
|
||||
:return SmokePreprocessor
|
||||
"""
|
||||
return self._model.preprocessor
|
||||
|
||||
@property
|
||||
def postprocessor(self):
|
||||
"""Get SmokePostprocessor object of the loaded model
|
||||
|
||||
:return SmokePostprocessor
|
||||
"""
|
||||
return self._model.postprocessor
|
27
python/fastdeploy/vision/utils.py
Normal file → Executable file
27
python/fastdeploy/vision/utils.py
Normal file → Executable file
@@ -38,6 +38,19 @@ def detection_to_json(result):
|
||||
return json.dumps(r_json)
|
||||
|
||||
|
||||
def perception_to_json(result):
|
||||
r_json = {
|
||||
"scores": result.scores,
|
||||
"label_ids": result.label_ids,
|
||||
"boxes": result.boxes,
|
||||
"center": result.center,
|
||||
"observation_angle": result.observation_angle,
|
||||
"yaw_angle": result.yaw_angle,
|
||||
"velocity": result.velocity
|
||||
}
|
||||
return json.dumps(r_json)
|
||||
|
||||
|
||||
def classify_to_json(result):
|
||||
r_json = {
|
||||
"label_ids": result.label_ids,
|
||||
@@ -151,6 +164,8 @@ def fd_result_to_json(result):
|
||||
return matting_to_json(result)
|
||||
elif isinstance(result, C.vision.HeadPoseResult):
|
||||
return head_pose_to_json(result)
|
||||
elif isinstance(result, C.vision.PerceptionResult):
|
||||
return perception_to_json(result)
|
||||
else:
|
||||
assert False, "{} Conversion to JSON format is not supported".format(
|
||||
type(result))
|
||||
@@ -177,6 +192,18 @@ def json_to_detection(result):
|
||||
return det_result
|
||||
|
||||
|
||||
def json_to_perception(result):
|
||||
perception_result = C.vision.PerceptionResult()
|
||||
perception_result.scores = result['scores']
|
||||
perception_result.label_ids = result['label_ids']
|
||||
perception_result.boxes = result['boxes']
|
||||
perception_result.center = result['center']
|
||||
perception_result.observation_angle = result['observation_angle']
|
||||
perception_result.yaw_angle = result['yaw_angle']
|
||||
perception_result.velocity = result['velocity']
|
||||
return perception_result
|
||||
|
||||
|
||||
def json_to_classify(result):
|
||||
cls_result = C.vision.ClassifyResult()
|
||||
cls_result.label_ids = result['label_ids']
|
||||
|
@@ -38,6 +38,26 @@ def vis_detection(im_data,
|
||||
line_size, font_size)
|
||||
|
||||
|
||||
def vis_perception(im_data,
|
||||
det_result,
|
||||
config_file,
|
||||
score_threshold=0.0,
|
||||
line_size=1,
|
||||
font_size=0.5):
|
||||
"""Show the visualized results for 3d detection models
|
||||
|
||||
:param im_data: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
|
||||
:param det_result: the result produced by model
|
||||
:param config_file: the config file for detection and visualization
|
||||
:param score_threshold: (float) score_threshold threshold for result scores, the bounding box will not be shown if the score is less than score_threshold
|
||||
:param line_size: (float) line_size line size for bounding boxes
|
||||
:param font_size: (float) font_size font size for text
|
||||
:return: (numpy.ndarray) image with visualized results
|
||||
"""
|
||||
return C.vision.vis_perception(im_data, det_result, config_file,
|
||||
score_threshold, line_size, font_size)
|
||||
|
||||
|
||||
def vis_keypoint_detection(im_data, keypoint_det_result, conf_threshold=0.5):
|
||||
"""Show the visualized results for keypoint detection models
|
||||
|
||||
|
Reference in New Issue
Block a user