diff --git a/CMakeLists.txt b/CMakeLists.txt index 38a28deae..7a4fddcff 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -374,7 +374,7 @@ if(ENABLE_TRT_BACKEND) endif() if(UNIX AND (NOT APPLE) AND (NOT ANDROID)) execute_process(COMMAND sh -c "ls *.so*" WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/tensorrt/lib - COMMAND sh -c "xargs ${PATCHELF_EXE} --set-rpath '$ORIGIN'" WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/tensorrt/lib + COMMAND sh -c "xargs ${PATCHELF_EXE} --force-rpath --set-rpath '$ORIGIN'" WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/tensorrt/lib RESULT_VARIABLE result OUTPUT_VARIABLE curr_out ERROR_VARIABLE curr_out) diff --git a/cmake/paddle_inference.cmake b/cmake/paddle_inference.cmake index 4e62d8330..93ed43db4 100755 --- a/cmake/paddle_inference.cmake +++ b/cmake/paddle_inference.cmake @@ -80,7 +80,7 @@ if(PADDLEINFERENCE_DIRECTORY) endif() else() set(PADDLEINFERENCE_URL_BASE "https://bj.bcebos.com/fastdeploy/third_libs/") - set(PADDLEINFERENCE_VERSION "2.4-dev5") + set(PADDLEINFERENCE_VERSION "2.4-dev6") if(WIN32) if (WITH_GPU) set(PADDLEINFERENCE_FILE "paddle_inference-win-x64-gpu-trt-${PADDLEINFERENCE_VERSION}.zip") diff --git a/python/fastdeploy/__init__.py b/python/fastdeploy/__init__.py index fdee0244c..d7b3dac7a 100755 --- a/python/fastdeploy/__init__.py +++ b/python/fastdeploy/__init__.py @@ -16,6 +16,32 @@ import logging import os import sys +# Create a symbol link to tensorrt library. +trt_directory = os.path.join( + os.path.dirname(os.path.abspath(__file__)), + "libs/third_libs/tensorrt/lib/") +if os.name != "nt" and os.path.exists(trt_directory): + logging.basicConfig(level=logging.INFO) + for trt_lib in [ + "libnvcaffe_parser.so", "libnvinfer_plugin.so", "libnvinfer.so", + "libnvonnxparser.so", "libnvparsers.so" + ]: + dst = os.path.join(trt_directory, trt_lib) + src = os.path.join(trt_directory, trt_lib + ".8") + if not os.path.exists(dst): + try: + os.symlink(src, dst) + logging.info( + f"Create a symbolic link pointing to {src} named {dst}.") + except OSError as e: + logging.warning( + f"Failed to create a symbolic link pointing to {src} by an unprivileged user. " + "It may failed when you use Paddle TensorRT backend. " + "Please use administator privilege to import fastdeploy at first time." + ) + break + logging.basicConfig(level=logging.NOTSET) + # Note(zhoushunjie): Fix the import order of paddle and fastdeploy library. # This solution will be removed it when the confilct of paddle and # fastdeploy is fixed. diff --git a/python/scripts/process_libraries.py.in b/python/scripts/process_libraries.py.in index 25b7a5e4b..e30e7a92d 100644 --- a/python/scripts/process_libraries.py.in +++ b/python/scripts/process_libraries.py.in @@ -183,12 +183,12 @@ def process_libraries(current_dir): remain = False filename = os.path.split(f)[-1] # Note(zhoushunjie): To add the trt libs below will increase the size of whl package by 450M. -# if filename in [ -# "libnvinfer_plugin.so", -# "libnvinfer.so", "libnvonnxparser.so", -# "libnvparsers.so", "libnvcaffe_parser.so" -# ]: -# continue + if filename in [ + "libnvinfer_plugin.so", + "libnvinfer.so", "libnvonnxparser.so", + "libnvparsers.so", "libnvcaffe_parser.so" + ]: + continue for lib_prefix in ["libnvinfer_plugin.so.8.", "libnvinfer.so.8.", "libnvonnxparser.so.8.", diff --git a/scripts/patch_paddle_inference.py b/scripts/patch_paddle_inference.py index f46ab2491..e85071ffd 100644 --- a/scripts/patch_paddle_inference.py +++ b/scripts/patch_paddle_inference.py @@ -30,8 +30,8 @@ def process_paddle_inference(paddle_inference_so_file): ] patchelf_exe = os.getenv("PATCHELF_EXE", "patchelf") - command = "{} --set-rpath '{}' {}".format(patchelf_exe, ":".join(rpaths), - paddle_inference_so_file) + command = "{} --force-rpath --set-rpath '{}' {}".format( + patchelf_exe, ":".join(rpaths), paddle_inference_so_file) if platform.machine() != 'sw_64' and platform.machine() != 'mips64': assert os.system( command) == 0, "patchelf {} failed, the command: {}".format(