diff --git a/.gitignore b/.gitignore index d907876dc..14b5cd3c4 100644 --- a/.gitignore +++ b/.gitignore @@ -42,4 +42,5 @@ examples/vision/collect_quantize_cc.sh examples/vision/tests_quantize fastdeploy/LICENSE fastdeploy/ThirdPartyNotices.txt -FastDeployCSharp.cmake \ No newline at end of file +FastDeployCSharp.cmake +python/fastdeploy/code_version.py \ No newline at end of file diff --git a/cmake/paddle_inference.cmake b/cmake/paddle_inference.cmake index 944adcd17..1aad4dfda 100755 --- a/cmake/paddle_inference.cmake +++ b/cmake/paddle_inference.cmake @@ -24,9 +24,13 @@ set(PADDLEINFERENCE_PREFIX_DIR ${THIRD_PARTY_PATH}/paddle_inference) set(PADDLEINFERENCE_SOURCE_DIR ${THIRD_PARTY_PATH}/paddle_inference/src/${PADDLEINFERENCE_PROJECT}) set(PADDLEINFERENCE_INSTALL_DIR ${THIRD_PARTY_PATH}/install/paddle_inference) -set(PADDLEINFERENCE_INC_DIR - "${PADDLEINFERENCE_INSTALL_DIR}/paddle/include" - CACHE PATH "paddle_inference include directory." FORCE) +# set(PADDLEINFERENCE_INC_DIR +# "${PADDLEINFERENCE_INSTALL_DIR}/paddle/include" +# CACHE PATH "paddle_inference include directory." FORCE) +# NOTE: The head path need by paddle inference is xxx/paddle_inference, +# not xxx/paddle_inference/paddle/include +set(PADDLEINFERENCE_INC_DIR "${PADDLEINFERENCE_INSTALL_DIR}" + CACHE PATH "paddle_inference include directory." FORCE) set(PADDLEINFERENCE_LIB_DIR "${PADDLEINFERENCE_INSTALL_DIR}/paddle/lib/" CACHE PATH "paddle_inference lib directory." FORCE) @@ -34,7 +38,8 @@ set(CMAKE_BUILD_RPATH "${CMAKE_BUILD_RPATH}" "${PADDLEINFERENCE_LIB_DIR}") if(PADDLEINFERENCE_DIRECTORY) - set(PADDLEINFERENCE_INC_DIR ${PADDLEINFERENCE_DIRECTORY}/paddle/include) + # set(PADDLEINFERENCE_INC_DIR ${PADDLEINFERENCE_DIRECTORY}/paddle/include) + set(PADDLEINFERENCE_INC_DIR ${PADDLEINFERENCE_DIRECTORY}) endif() include_directories(${PADDLEINFERENCE_INC_DIR}) @@ -66,49 +71,56 @@ endif(WIN32) if(PADDLEINFERENCE_DIRECTORY) + # Use custom Paddle Inference libs. if(EXISTS "${THIRD_PARTY_PATH}/install/paddle_inference") file(REMOVE_RECURSE "${THIRD_PARTY_PATH}/install/paddle_inference") endif() find_package(Python COMPONENTS Interpreter Development REQUIRED) message(STATUS "Copying ${PADDLEINFERENCE_DIRECTORY} to ${THIRD_PARTY_PATH}/install/paddle_inference ...") if(WIN32) - message(FATAL_ERROR "Define PADDLEINFERENCE_DIRECTORY is not supported on Windows platform.") + execute_process(COMMAND mkdir -p ${THIRD_PARTY_PATH}/install) + execute_process(COMMAND cp -r ${PADDLEINFERENCE_DIRECTORY} ${THIRD_PARTY_PATH}/install/paddle_inference) else() execute_process(COMMAND mkdir -p ${THIRD_PARTY_PATH}/install) execute_process(COMMAND cp -r ${PADDLEINFERENCE_DIRECTORY} ${THIRD_PARTY_PATH}/install/paddle_inference) execute_process(COMMAND rm -rf ${THIRD_PARTY_PATH}/install/paddle_inference/paddle/lib/*.a) endif() else() + # Use default Paddle Inference libs. set(PADDLEINFERENCE_URL_BASE "https://bj.bcebos.com/fastdeploy/third_libs/") - set(PADDLEINFERENCE_VERSION "2.4-dev7") if(WIN32) - set(PADDLEINFERENCE_VERSION "2.4-dev6") # dev7 for win is not ready now! if (WITH_GPU) - # set(PADDLEINFERENCE_FILE "paddle_inference-win-x64-gpu-trt-${PADDLEINFERENCE_VERSION}.zip") - set(PADDLEINFERENCE_FILE "paddle_inference-win-x64-gpu-trt-2.4-dev-20230410.zip") + set(PADDLEINFERENCE_FILE "paddle_inference-win-x64-gpu-trt8.5.2.2-mkl-avx-0.0.0.575cafb44b.zip") + set(PADDLEINFERENCE_VERSION "0.0.0.575cafb44b") else() - set(PADDLEINFERENCE_FILE "paddle_inference-win-x64-${PADDLEINFERENCE_VERSION}.zip") + set(PADDLEINFERENCE_FILE "paddle_inference-win-x64-mkl-avx-0.0.0.cbdba50933.zip") + set(PADDLEINFERENCE_VERSION "0.0.0.cbdba50933") endif() elseif(APPLE) if(CURRENT_OSX_ARCH MATCHES "arm64") message(FATAL_ERROR "Paddle Backend doesn't support Mac OSX with Arm64 now.") - set(PADDLEINFERENCE_FILE "paddle_inference-osx-arm64-${PADDLEINFERENCE_VERSION}.tgz") else() - set(PADDLEINFERENCE_FILE "paddle_inference-osx-x86_64-${PADDLEINFERENCE_VERSION}.tgz") + # TODO(qiuyanjun): Should remove this old paddle inference lib + set(PADDLEINFERENCE_FILE "paddle_inference-osx-x86_64-2.4-dev3.tgz") + # TODO(qiuyanjun): Should use the commit id to tag the version + set(PADDLEINFERENCE_VERSION "2.4-dev3") endif() else() + # Linux with x86 CPU/Arm CPU/GPU/IPU ... if(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "aarch64") message(FATAL_ERROR "Paddle Backend doesn't support linux aarch64 now.") - set(PADDLEINFERENCE_FILE "paddle_inference-linux-aarch64-${PADDLEINFERENCE_VERSION}.tgz") else() - set(PADDLEINFERENCE_FILE "paddle_inference-linux-x64-${PADDLEINFERENCE_VERSION}.tgz") if(WITH_GPU) - #set(PADDLEINFERENCE_FILE "paddle_inference-linux-x64-gpu-trt-${PADDLEINFERENCE_VERSION}.tgz") - set(PADDLEINFERENCE_FILE "paddle_inference-linux-x64-gpu-trt-2.4-dev-20230408.tgz") + set(PADDLEINFERENCE_FILE "paddle_inference-linux-x64-gpu-trt8.5.2.2-mkl-avx-0.0.0.660f781b77.tgz") + set(PADDLEINFERENCE_VERSION "0.0.0.660f781b77") + else() + set(PADDLEINFERENCE_FILE "paddle_inference-linux-x64-mkl-avx-0.0.0.660f781b77.tgz") + set(PADDLEINFERENCE_VERSION "0.0.0.660f781b77") endif() if (WITH_IPU) - set(PADDLEINFERENCE_VERSION "2.4-dev1") - set(PADDLEINFERENCE_FILE "paddle_inference-linux-x64-ipu-${PADDLEINFERENCE_VERSION}.tgz") + set(PADDLEINFERENCE_FILE "paddle_inference-linux-x64-ipu-2.4-dev1.tgz") + # TODO(qiuyanjun): Should use the commit id to tag the version + set(PADDLEINFERENCE_VERSION "2.4-dev1") endif() if(NEED_ABI0) @@ -116,12 +128,12 @@ else() message(WARNING "While NEED_ABI0=ON, only support CPU now, will fallback to CPU.") endif() set(PADDLEINFERENCE_FILE "paddle_inference-linux-x64-2.4.0-abi0.tgz") + set(PADDLEINFERENCE_VERSION "2.4.0-abi0") endif() endif() endif() set(PADDLEINFERENCE_URL "${PADDLEINFERENCE_URL_BASE}${PADDLEINFERENCE_FILE}") - ExternalProject_Add( ${PADDLEINFERENCE_PROJECT} ${EXTERNAL_PROJECT_LOG_ARGS} @@ -134,8 +146,10 @@ else() INSTALL_COMMAND ${CMAKE_COMMAND} -E copy_directory ${PADDLEINFERENCE_SOURCE_DIR} ${PADDLEINFERENCE_INSTALL_DIR} BUILD_BYPRODUCTS ${PADDLEINFERENCE_COMPILE_LIB}) + endif(PADDLEINFERENCE_DIRECTORY) +# Path Paddle Inference ELF lib file if(UNIX AND (NOT APPLE) AND (NOT ANDROID)) add_custom_target(patchelf_paddle_inference ALL COMMAND bash -c "PATCHELF_EXE=${PATCHELF_EXE} python ${PROJECT_SOURCE_DIR}/scripts/patch_paddle_inference.py ${PADDLEINFERENCE_INSTALL_DIR}/paddle/lib/libpaddle_inference.so" DEPENDS ${LIBRARY_NAME}) endif() diff --git a/fastdeploy/runtime/backends/paddle/paddle_backend.h b/fastdeploy/runtime/backends/paddle/paddle_backend.h index 7ca8c0b14..8bd6d05f4 100755 --- a/fastdeploy/runtime/backends/paddle/paddle_backend.h +++ b/fastdeploy/runtime/backends/paddle/paddle_backend.h @@ -25,7 +25,7 @@ #include "paddle2onnx/converter.h" #endif #include "fastdeploy/utils/unique_ptr.h" -#include "paddle_inference_api.h" // NOLINT +#include "paddle/include/paddle_inference_api.h" // NOLINT namespace fastdeploy { diff --git a/python/fastdeploy/__init__.py b/python/fastdeploy/__init__.py index 360db8e5a..e865b07d8 100755 --- a/python/fastdeploy/__init__.py +++ b/python/fastdeploy/__init__.py @@ -45,6 +45,10 @@ if os.name != "nt" and os.path.exists(trt_directory): # Note(zhoushunjie): Fix the import order of paddle and fastdeploy library. # This solution will be removed it when the confilct of paddle and # fastdeploy is fixed. +try: + import paddle +except: + pass from .c_lib_wrap import ( ModelFormat, diff --git a/python/setup.py b/python/setup.py index df617287f..ba904edbc 100755 --- a/python/setup.py +++ b/python/setup.py @@ -86,6 +86,8 @@ setup_configs["OPENCV_DIRECTORY"] = os.getenv("OPENCV_DIRECTORY", "") setup_configs["ORT_DIRECTORY"] = os.getenv("ORT_DIRECTORY", "") setup_configs["PADDLEINFERENCE_DIRECTORY"] = os.getenv( "PADDLEINFERENCE_DIRECTORY", "") +setup_configs["PADDLEINFERENCE_VERSION"] = os.getenv( + "PADDLEINFERENCE_VERSION", "") setup_configs["RKNN2_TARGET_SOC"] = os.getenv("RKNN2_TARGET_SOC", "") if setup_configs["RKNN2_TARGET_SOC"] != "" or setup_configs[ @@ -376,6 +378,10 @@ if sys.version_info[0] == 3: package_data = {PACKAGE_NAME: ["LICENSE", "ThirdPartyNotices.txt"]} +extra_version_info = "" +if setup_configs["PADDLEINFERENCE_VERSION"] != "": + extra_version_info += ("." + setup_configs["PADDLEINFERENCE_VERSION"]) + if sys.argv[1] == "install" or sys.argv[1] == "bdist_wheel": shutil.copy( os.path.join(TOP_DIR, "ThirdPartyNotices.txt"), @@ -395,7 +401,7 @@ if sys.argv[1] == "install" or sys.argv[1] == "bdist_wheel": package_data[PACKAGE_NAME].extend(all_lib_data) setuptools.setup( name=wheel_name, - version=VersionInfo.version, + version=VersionInfo.version + extra_version_info, ext_modules=ext_modules, description="Deploy Kit Tool For Deeplearning models.", packages=packages, @@ -416,7 +422,7 @@ if sys.argv[1] == "install" or sys.argv[1] == "bdist_wheel": else: setuptools.setup( name=wheel_name, - version=VersionInfo.version, + version=VersionInfo.version + extra_version_info, description="Deploy Kit Tool For Deeplearning models.", ext_modules=ext_modules, cmdclass=cmdclass,