[Backend] Integrate TensorRT in Paddle Inference backend by default (#381)

* Modify filter of trt

* Add Note for trt libs

* Use paddle inference with trt package

Co-authored-by: Jason <jiangjiajun@baidu.com>
This commit is contained in:
Jack Zhou
2022-10-18 15:29:06 +08:00
committed by GitHub
parent 2665933b7c
commit 173bbf5af4
2 changed files with 17 additions and 9 deletions

View File

@@ -69,7 +69,7 @@ else()
else()
set(PADDLEINFERENCE_FILE "paddle_inference-linux-x64-${PADDLEINFERENCE_VERSION}.tgz")
if(WITH_GPU)
set(PADDLEINFERENCE_FILE "paddle_inference-linux-x64-gpu-${PADDLEINFERENCE_VERSION}.tgz")
set(PADDLEINFERENCE_FILE "paddle_inference-linux-x64-gpu-trt-${PADDLEINFERENCE_VERSION}.tgz")
endif()
endif()
endif()

View File

@@ -182,13 +182,21 @@ def process_libraries(current_dir):
if f.count(flt) > 0:
remain = False
filename = os.path.split(f)[-1]
if filename in [
"libnvinfer_plugin.so", "libnvinfer_plugin.so.8.4.1",
"libnvinfer.so", "libnvinfer.so.8.4.1", "libnvonnxparser.so",
"libnvonnxparser.so.8.4.1", "libnvparsers.so",
"libnvparsers.so.8.4.1"
]:
continue
# Note(zhoushunjie): To add the trt libs below will increase the size of whl package by 450M.
# if filename in [
# "libnvinfer_plugin.so",
# "libnvinfer.so", "libnvonnxparser.so",
# "libnvparsers.so", "libnvcaffe_parser.so"
# ]:
# continue
for lib_prefix in ["libnvinfer_plugin.so.8.",
"libnvinfer.so.8.", "libnvonnxparser.so.8.",
"libnvparsers.so.8.", "libnvcaffe_parser.so.8."]:
if filename.startswith(lib_prefix):
remain = False
break
if remain:
package_data.append(
os.path.relpath(f, os.path.join(current_dir, "fastdeploy")))