[Model] Add Paddle3D smoke model (#1766)

* add smoke model

* add 3d vis

* update code

* update doc

* mv paddle3d from detection to perception

* update result for velocity

* update code for CI

* add set input data for TRT backend

* add serving support for smoke model

* update code

* update code

* update code

---------

Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
This commit is contained in:
yeliang2258
2023-04-14 16:30:56 +08:00
committed by GitHub
parent 81fbd54c9d
commit a509dd8ec1
53 changed files with 2610 additions and 26 deletions

View File

@@ -484,6 +484,29 @@ class RuntimeOption:
return self._option.trt_option.set_shape(tensor_name, min_shape,
opt_shape, max_shape)
def set_trt_input_data(self,
tensor_name,
min_input_data,
opt_input_data=None,
max_input_data=None):
"""Set input data while using TensorRT backend with loadding a model contains dynamic input shape.
:param tensor_name: (str)Name of input which has dynamic shape
:param min_input_data: (list of int)Input data for Minimum shape of the input.
:param opt_input_data: (list of int)Input data for Optimize shape of the input, if set to None, it will keep same with min_input_data
:param max_input_data: (list of int)Input data for Maximum shape of the input, if set to None, it will keep same with the min_input_data
"""
logging.warning(
"`RuntimeOption.set_trt_input_data` will be deprecated in v1.2.0, please use `RuntimeOption.trt_option.set_input_data()` instead."
)
if opt_input_data is None and max_input_data is None:
opt_input_data = min_input_data
opt_input_data = min_input_data
else:
assert opt_input_data is not None and max_input_data is not None, "Set min_input_data only, or set min_input_data, opt_input_data, max_input_data both."
return self._option.trt_option.set_input_data(
tensor_name, min_input_data, opt_input_data, max_input_data)
def set_trt_cache_file(self, cache_file_path):
"""Set a cache file path while using TensorRT backend. While loading a Paddle/ONNX model with set_trt_cache_file("./tensorrt_cache/model.trt"), if file `./tensorrt_cache/model.trt` exists, it will skip building tensorrt engine and load the cache file directly; if file `./tensorrt_cache/model.trt` doesn't exist, it will building tensorrt engine and save the engine as binary string to the cache file.