[Backend] Add pybind & PaddleDetection example for TVM (#1998)

* update

* update

* Update infer_ppyoloe_demo.cc

---------

Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
This commit is contained in:
Zheng-Bicheng
2023-06-04 13:26:47 +08:00
committed by GitHub
parent c634a9260d
commit 8d357814e8
10 changed files with 189 additions and 24 deletions

View File

@@ -244,10 +244,9 @@ class RuntimeOption:
:param enable_multi_stream: (bool)Whether to enable the multi stream of KunlunXin XPU.
:param gm_default_size The default size of context global memory of KunlunXin XPU.
"""
return self._option.use_kunlunxin(device_id, l3_workspace_size, locked,
autotune, autotune_file, precision,
adaptive_seqlen, enable_multi_stream,
gm_default_size)
return self._option.use_kunlunxin(
device_id, l3_workspace_size, locked, autotune, autotune_file,
precision, adaptive_seqlen, enable_multi_stream, gm_default_size)
def use_cpu(self):
"""Inference with CPU
@@ -271,7 +270,7 @@ class RuntimeOption:
def disable_valid_backend_check(self):
""" Disable checking validity of backend during inference
"""
"""
return self._option.disable_valid_backend_check()
def enable_valid_backend_check(self):
@@ -316,6 +315,11 @@ class RuntimeOption:
"""
return self._option.use_ort_backend()
def use_tvm_backend(self):
"""Use TVM Runtime backend, support inference TVM model on CPU.
"""
return self._option.use_tvm_backend()
def use_trt_backend(self):
"""Use TensorRT backend, support inference Paddle/ONNX model on Nvidia GPU.
"""

View File

@@ -57,12 +57,18 @@ setup_configs = dict()
setup_configs["LIBRARY_NAME"] = PACKAGE_NAME
setup_configs["PY_LIBRARY_NAME"] = PACKAGE_NAME + "_main"
# Backend options
setup_configs["ENABLE_RKNPU2_BACKEND"] = os.getenv("ENABLE_RKNPU2_BACKEND", "OFF")
setup_configs["ENABLE_SOPHGO_BACKEND"] = os.getenv("ENABLE_SOPHGO_BACKEND", "OFF")
setup_configs["ENABLE_TVM_BACKEND"] = os.getenv("ENABLE_TVM_BACKEND", "OFF")
setup_configs["ENABLE_RKNPU2_BACKEND"] = os.getenv("ENABLE_RKNPU2_BACKEND",
"OFF")
setup_configs["ENABLE_SOPHGO_BACKEND"] = os.getenv("ENABLE_SOPHGO_BACKEND",
"OFF")
setup_configs["ENABLE_ORT_BACKEND"] = os.getenv("ENABLE_ORT_BACKEND", "OFF")
setup_configs["ENABLE_OPENVINO_BACKEND"] = os.getenv("ENABLE_OPENVINO_BACKEND", "OFF")
setup_configs["ENABLE_PADDLE_BACKEND"] = os.getenv("ENABLE_PADDLE_BACKEND", "OFF")
setup_configs["ENABLE_POROS_BACKEND"] = os.getenv("ENABLE_POROS_BACKEND", "OFF")
setup_configs["ENABLE_OPENVINO_BACKEND"] = os.getenv("ENABLE_OPENVINO_BACKEND",
"OFF")
setup_configs["ENABLE_PADDLE_BACKEND"] = os.getenv("ENABLE_PADDLE_BACKEND",
"OFF")
setup_configs["ENABLE_POROS_BACKEND"] = os.getenv("ENABLE_POROS_BACKEND",
"OFF")
setup_configs["ENABLE_TRT_BACKEND"] = os.getenv("ENABLE_TRT_BACKEND", "OFF")
setup_configs["ENABLE_LITE_BACKEND"] = os.getenv("ENABLE_LITE_BACKEND", "OFF")
setup_configs["ENABLE_VISION"] = os.getenv("ENABLE_VISION", "OFF")
@@ -82,11 +88,14 @@ setup_configs["WITH_KUNLUNXIN"] = os.getenv("WITH_KUNLUNXIN", "OFF")
setup_configs["RKNN2_TARGET_SOC"] = os.getenv("RKNN2_TARGET_SOC", "")
# Custom deps settings
setup_configs["TRT_DIRECTORY"] = os.getenv("TRT_DIRECTORY", "UNDEFINED")
setup_configs["CUDA_DIRECTORY"] = os.getenv("CUDA_DIRECTORY", "/usr/local/cuda")
setup_configs["CUDA_DIRECTORY"] = os.getenv("CUDA_DIRECTORY",
"/usr/local/cuda")
setup_configs["OPENCV_DIRECTORY"] = os.getenv("OPENCV_DIRECTORY", "")
setup_configs["ORT_DIRECTORY"] = os.getenv("ORT_DIRECTORY", "")
setup_configs["PADDLEINFERENCE_DIRECTORY"] = os.getenv("PADDLEINFERENCE_DIRECTORY", "")
setup_configs["PADDLEINFERENCE_VERSION"] = os.getenv("PADDLEINFERENCE_VERSION", "")
setup_configs["PADDLEINFERENCE_DIRECTORY"] = os.getenv(
"PADDLEINFERENCE_DIRECTORY", "")
setup_configs["PADDLEINFERENCE_VERSION"] = os.getenv("PADDLEINFERENCE_VERSION",
"")
setup_configs["PADDLEINFERENCE_URL"] = os.getenv("PADDLEINFERENCE_URL", "")
setup_configs["PADDLE2ONNX_URL"] = os.getenv("PADDLE2ONNX_URL", "")
setup_configs["PADDLELITE_URL"] = os.getenv("PADDLELITE_URL", "")