diff --git a/CMakeLists.txt b/CMakeLists.txt index 2c4a5fd03..aafe424c6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -28,6 +28,8 @@ if(NOT PY_LIBRARY_NAME) set(PY_LIBRARY_NAME "fastdeploy_main") endif() include(ExternalProject) +set(THIRD_PARTY_PATH ${CMAKE_CURRENT_BINARY_DIR}/third_libs) + add_subdirectory(${CSRCS_DIR_NAME}/fastdeploy) include(${PROJECT_SOURCE_DIR}/cmake/utils.cmake) @@ -37,6 +39,10 @@ if(NOT MSVC) set(CMAKE_CXX_FLAGS "-Wno-format") endif(NOT MSVC) +if(UNIX) + include(${PROJECT_SOURCE_DIR}/cmake/patchelf.cmake) +endif() + if(ANDROID) # To reduce the volume of the library set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g0 -Os -ffunction-sections -fdata-sections") @@ -168,7 +174,6 @@ set(DEPEND_LIBS "") file(READ "${PROJECT_SOURCE_DIR}/VERSION_NUMBER" FASTDEPLOY_VERSION) string(STRIP "${FASTDEPLOY_VERSION}" FASTDEPLOY_VERSION) -set(THIRD_PARTY_PATH ${CMAKE_CURRENT_BINARY_DIR}/third_libs) # Add eigen lib include_directories(${PROJECT_SOURCE_DIR}/third_party/eigen) @@ -278,10 +283,13 @@ if(ENABLE_TRT_BACKEND) execute_process(COMMAND ${Python_EXECUTABLE} ${PROJECT_SOURCE_DIR}/scripts/copy_directory.py ${TRT_DIRECTORY}/lib ${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/tensorrt/lib) if(UNIX) execute_process(COMMAND sh -c "ls *.so*" WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/tensorrt/lib - COMMAND sh -c "xargs patchelf --set-rpath '$ORIGIN'" WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/tensorrt/lib + COMMAND sh -c "xargs ${PATCHELF_EXE} --set-rpath '$ORIGIN'" WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/tensorrt/lib RESULT_VARIABLE result OUTPUT_VARIABLE curr_out ERROR_VARIABLE curr_out) + if(ret EQUAL "1") + message(FATAL_ERROR "Failed to patchelf tensorrt libraries.") + endif() message(STATUS "result:${result} out:${curr_out}") endif() endif() diff --git a/cmake/paddle_inference.cmake b/cmake/paddle_inference.cmake index 0bed5a05a..43c726a23 100644 --- a/cmake/paddle_inference.cmake +++ b/cmake/paddle_inference.cmake @@ -89,7 +89,7 @@ ExternalProject_Add( BUILD_BYPRODUCTS ${PADDLEINFERENCE_COMPILE_LIB}) if(UNIX) - add_custom_target(patchelf_paddle_inference ALL COMMAND bash -c "python ${PROJECT_SOURCE_DIR}/scripts/patch_paddle_inference.py ${PADDLEINFERENCE_INSTALL_DIR}/paddle/lib/libpaddle_inference.so" DEPENDS ${LIBRARY_NAME}) + add_custom_target(patchelf_paddle_inference ALL COMMAND bash -c "PATCHELF_EXE=${PATCHELF_EXE} python ${PROJECT_SOURCE_DIR}/scripts/patch_paddle_inference.py ${PADDLEINFERENCE_INSTALL_DIR}/paddle/lib/libpaddle_inference.so" DEPENDS ${LIBRARY_NAME}) endif() add_library(external_paddle_inference STATIC IMPORTED GLOBAL) diff --git a/cmake/patchelf.cmake b/cmake/patchelf.cmake new file mode 100644 index 000000000..3e8441fc3 --- /dev/null +++ b/cmake/patchelf.cmake @@ -0,0 +1,26 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if(UNIX) + set(PATCHELF_EXE "patchelf") + if(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "aarch64") + set(PATCHELF_URL https://bj.bcebos.com/fastdeploy/third_libs/patchelf-0.15.0-aarch64.tar.gz) + download_and_decompress(${PATCHELF_URL} ${CMAKE_CURRENT_BINARY_DIR}/patchelf-0.15.0-aarch64.tar.gz ${THIRD_PARTY_PATH}/patchelf) + set(PATCHELF_EXE ${THIRD_PARTY_PATH}/patchelf/bin/patchelf) + else() + set(PATCHELF_URL https://bj.bcebos.com/fastdeploy/third_libs/patchelf-0.15.0-x86_64.tar.gz) + download_and_decompress(${PATCHELF_URL} ${CMAKE_CURRENT_BINARY_DIR}/patchelf-0.15.0-x86_64.tar.gz ${THIRD_PARTY_PATH}/patchelf) + set(PATCHELF_EXE ${THIRD_PARTY_PATH}/patchelf/bin/patchelf) + endif() +endif() diff --git a/scripts/patch_paddle_inference.py b/scripts/patch_paddle_inference.py index ad01a975d..9a141d0d8 100644 --- a/scripts/patch_paddle_inference.py +++ b/scripts/patch_paddle_inference.py @@ -17,15 +17,24 @@ import sys import shutil import subprocess import platform +import sys + def process_paddle_inference(paddle_inference_so_file): rpaths = [ - "$ORIGIN", - "$ORIGIN/../../third_party/install/mkldnn/lib/", - "$ORIGIN/../../third_party/install/mklml/lib/", - "$ORIGIN/../../../tensorrt/lib" + "$ORIGIN", "$ORIGIN/../../third_party/install/mkldnn/lib/", + "$ORIGIN/../../third_party/install/mklml/lib/", + "$ORIGIN/../../../tensorrt/lib" ] - command = "patchelf --set-rpath '{}' {}".format(":".join(rpaths), paddle_inference_so_file) + patchelf_exe = os.getenv("PATCHELF_EXE", "patchelf") + command = "{} --set-rpath '{}' {}".format(patchelf_exe, ":".join(rpaths), + paddle_inference_so_file) if platform.machine() != 'sw_64' and platform.machine() != 'mips64': - assert os.system(command) == 0, "patchelf {} failed, the command: {}".format(paddle_inference_so_file, command) + assert os.system( + command) == 0, "patchelf {} failed, the command: {}".format( + paddle_inference_so_file, command) + + +if __name__ == "__main__": + process_paddle_inference(sys.argv[1])