[Python] Add backward compatible for paddle2.4.2 (#1929)

* [Python] Add backward compatible for paddle2.4.2

* [Python] Add backward compatible for paddle2.4.2

* [scripts] update linux build scripts
This commit is contained in:
DefTruth
2023-05-11 19:34:06 +08:00
committed by GitHub
parent 643f41e711
commit 15c29f7e49
6 changed files with 214 additions and 25 deletions

View File

@@ -15,6 +15,7 @@ from __future__ import absolute_import
import logging
import os
import sys
import platform
# Create a symbol link to tensorrt library.
trt_directory = os.path.join(
@@ -42,23 +43,82 @@ if os.name != "nt" and os.path.exists(trt_directory):
break
logging.basicConfig(level=logging.NOTSET)
from .code_version import version, git_version, extra_version_info
from .code_version import enable_trt_backend, enable_paddle_backend, with_gpu
# Note(zhoushunjie): Fix the import order of paddle and fastdeploy library.
# This solution will be removed it when the confilct of paddle and
# fastdeploy is fixed.
try:
# import paddle
import platform
# Note(qiuyanjun): Add backward compatible for paddle 2.4.x
sys_platform = platform.platform().lower()
def get_paddle_version():
paddle_version = ""
try:
import pkg_resources
paddle_version = pkg_resources.require(
"paddlepaddle-gpu")[0].version.split(".post")[0]
except:
try:
paddle_version = pkg_resources.require(
"paddlepaddle")[0].version.split(".post")[0]
except:
pass
return paddle_version
def should_import_paddle():
if "paddle2.4" in extra_version_info:
paddle_version = get_paddle_version()
if paddle_version != "" and paddle_version <= '2.4.2' and paddle_version != "0.0.0":
return True
return False
def should_set_tensorrt():
if with_gpu == 'ON' and enable_paddle_backend == 'ON' and enable_trt_backend == 'ON':
return True
return False
def tensorrt_is_avaliable():
# Note(qiuyanjun): Only support linux now.
found_trt_lib = False
if ('linux' in sys_platform) and ('LD_LIBRARY_PATH' in os.environ.keys()):
for lib_path in os.environ['LD_LIBRARY_PATH'].split(':'):
if os.path.exists(os.path.join(lib_path, 'libnvinfer.so')):
found_trt_lib = True
break
return found_trt_lib
try:
# windows: no conflict between fastdeploy and paddle.
# linux: must import paddle first to solve the conflict.
# macos: still can not solve the conflict between fastdeploy and paddle,
# due to the global flags redefined in paddle/paddle_inference so.
# we got the error (ERROR: flag 'xxx' was defined more than once).
if "linux" in sys_platform:
import paddle
if should_import_paddle():
import paddle # need import paddle first for paddle2.4.x
# check whether tensorrt in LD_LIBRARY_PATH for fastdeploy
if should_set_tensorrt() and (not tensorrt_is_avaliable()):
if os.path.exists(trt_directory):
logging.info(
"\n[WARNING] Can not find TensorRT lib in LD_LIBRARY_PATH for FastDeploy! \
\n[WARNING] Please export [ YOUR CUSTOM TensorRT ] lib path to LD_LIBRARY_PATH first, or run the command: \
\n[WARNING] Linux: 'export LD_LIBRARY_PATH=$(python -c 'from fastdeploy import trt_directory; print(trt_directory)'):$LD_LIBRARY_PATH'")
else:
logging.info(
"\n[WARNING] Can not find TensorRT lib in LD_LIBRARY_PATH for FastDeploy! \
\n[WARNING] Please export [YOUR CUSTOM TensorRT] lib path to LD_LIBRARY_PATH first.")
except:
pass
from .c_lib_wrap import (
ModelFormat,
Backend,
@@ -92,5 +152,5 @@ from . import text
from . import encryption
from .download import download, download_and_decompress, download_model, get_model_list
from . import serving
from .code_version import version, git_version
__version__ = version

View File

@@ -58,7 +58,7 @@ def get_default_cuda_major_version() -> str:
def find_cudart(search_dir: str) -> bool:
if search_dir is None:
print("[FastDeploy][ERROR]: search_dir can not be NoneTpye.")
logging.info("[FastDeploy][ERROR]: search_dir can not be NoneTpye.")
return False
# TODO(qiuyanjun): add Linux cudart *.so check
cudart_lib_name = f"cudart64_{get_default_cuda_major_version()}0.dll"
@@ -71,7 +71,7 @@ def find_cudart_from_sys() -> bool:
sys_paths = os.environ["path"].strip().split(";")
for sys_path in sys_paths:
if find_cudart(sys_path):
print(f"[FastDeploy][INFO]: Successfully found CUDA ToolKit from system PATH env -> {sys_path}")
logging.info(f"[FastDeploy][INFO]: Successfully found CUDA ToolKit from system PATH env -> {sys_path}")
return True
return False
@@ -137,7 +137,7 @@ def add_custom_cuda_path():
\n--- this path should look like: {default_cuda_dir}. \
\n--- Check FAQ: {base_url + 'develop/docs/FAQ.md'}")
return
print(f"[FastDeploy][INFO]: Successfully found CUDA ToolKit from -> {cuda_shared_lib_dir}")
logging.info(f"[FastDeploy][INFO]: Successfully found CUDA ToolKit from -> {cuda_shared_lib_dir}")
if os.name == "nt":
@@ -162,8 +162,8 @@ if os.name == "nt":
try:
from .libs.@PY_LIBRARY_NAME@ import *
except:
raise RuntimeError("FastDeploy initalized failed!")
except Exception as e:
raise RuntimeError(f"FastDeploy initalized failed! Error: {e}")
def TensorInfoStr(tensor_info):

View File

@@ -64,7 +64,8 @@ setup_configs["ENABLE_OPENVINO_BACKEND"] = os.getenv("ENABLE_OPENVINO_BACKEND",
"OFF")
setup_configs["ENABLE_PADDLE_BACKEND"] = os.getenv("ENABLE_PADDLE_BACKEND",
"OFF")
setup_configs["ENABLE_POROS_BACKEND"] = os.getenv("ENABLE_POROS_BACKEND", "OFF")
setup_configs["ENABLE_POROS_BACKEND"] = os.getenv("ENABLE_POROS_BACKEND",
"OFF")
setup_configs["ENABLE_TRT_BACKEND"] = os.getenv("ENABLE_TRT_BACKEND", "OFF")
setup_configs["ENABLE_LITE_BACKEND"] = os.getenv("ENABLE_LITE_BACKEND", "OFF")
setup_configs["PADDLELITE_URL"] = os.getenv("PADDLELITE_URL", "OFF")
@@ -79,15 +80,16 @@ setup_configs["WITH_IPU"] = os.getenv("WITH_IPU", "OFF")
setup_configs["WITH_KUNLUNXIN"] = os.getenv("WITH_KUNLUNXIN", "OFF")
setup_configs["BUILD_ON_JETSON"] = os.getenv("BUILD_ON_JETSON", "OFF")
setup_configs["TRT_DIRECTORY"] = os.getenv("TRT_DIRECTORY", "UNDEFINED")
setup_configs["CUDA_DIRECTORY"] = os.getenv("CUDA_DIRECTORY", "/usr/local/cuda")
setup_configs["CUDA_DIRECTORY"] = os.getenv("CUDA_DIRECTORY",
"/usr/local/cuda")
setup_configs["LIBRARY_NAME"] = PACKAGE_NAME
setup_configs["PY_LIBRARY_NAME"] = PACKAGE_NAME + "_main"
setup_configs["OPENCV_DIRECTORY"] = os.getenv("OPENCV_DIRECTORY", "")
setup_configs["ORT_DIRECTORY"] = os.getenv("ORT_DIRECTORY", "")
setup_configs["PADDLEINFERENCE_DIRECTORY"] = os.getenv(
"PADDLEINFERENCE_DIRECTORY", "")
setup_configs["PADDLEINFERENCE_VERSION"] = os.getenv(
"PADDLEINFERENCE_VERSION", "")
setup_configs["PADDLEINFERENCE_VERSION"] = os.getenv("PADDLEINFERENCE_VERSION",
"")
setup_configs["RKNN2_TARGET_SOC"] = os.getenv("RKNN2_TARGET_SOC", "")
if setup_configs["RKNN2_TARGET_SOC"] != "" or setup_configs[
@@ -121,7 +123,8 @@ extras_require = {}
# Default value is set to TRUE\1 to keep the settings same as the current ones.
# However going forward the recomemded way to is to set this to False\0
USE_MSVC_STATIC_RUNTIME = bool(os.getenv('USE_MSVC_STATIC_RUNTIME', '1') == '1')
USE_MSVC_STATIC_RUNTIME = bool(
os.getenv('USE_MSVC_STATIC_RUNTIME', '1') == '1')
ONNX_NAMESPACE = os.getenv('ONNX_NAMESPACE', 'paddle2onnx')
################################################################################
# Version
@@ -133,9 +136,20 @@ try:
except (OSError, subprocess.CalledProcessError):
git_version = None
extra_version_info = ""
if setup_configs["PADDLEINFERENCE_VERSION"] != "":
extra_version_info += ("." + setup_configs["PADDLEINFERENCE_VERSION"])
with open(os.path.join(TOP_DIR, 'VERSION_NUMBER')) as version_file:
VersionInfo = namedtuple('VersionInfo', ['version', 'git_version'])(
version=version_file.read().strip(), git_version=git_version)
VersionInfo = namedtuple('VersionInfo', [
'version', 'git_version', 'extra_version_info', 'enable_trt_backend',
'enable_paddle_backend', 'with_gpu'
])(version=version_file.read().strip(),
git_version=git_version,
extra_version_info=extra_version_info.strip("."),
enable_trt_backend=setup_configs["ENABLE_TRT_BACKEND"],
enable_paddle_backend=setup_configs["ENABLE_PADDLE_BACKEND"],
with_gpu=setup_configs["WITH_GPU"])
################################################################################
# Pre Check
@@ -151,7 +165,8 @@ assert CMAKE, 'Could not find "cmake" executable!'
@contextmanager
def cd(path):
if not os.path.isabs(path):
raise RuntimeError('Can only cd to absolute path, got: {}'.format(path))
raise RuntimeError('Can only cd to absolute path, got: {}'.format(
path))
orig_path = os.getcwd()
os.chdir(path)
try:
@@ -196,6 +211,10 @@ class create_version(ONNXCommand):
from __future__ import unicode_literals
version = '{version}'
git_version = '{git_version}'
extra_version_info = '{extra_version_info}'
enable_trt_backend = '{enable_trt_backend}'
enable_paddle_backend = '{enable_paddle_backend}'
with_gpu = '{with_gpu}'
'''.format(**dict(VersionInfo._asdict()))))
@@ -378,10 +397,6 @@ if sys.version_info[0] == 3:
package_data = {PACKAGE_NAME: ["LICENSE", "ThirdPartyNotices.txt"]}
extra_version_info = ""
if setup_configs["PADDLEINFERENCE_VERSION"] != "":
extra_version_info += ("." + setup_configs["PADDLEINFERENCE_VERSION"])
if sys.argv[1] == "install" or sys.argv[1] == "bdist_wheel":
shutil.copy(
os.path.join(TOP_DIR, "ThirdPartyNotices.txt"),

View File

@@ -8,6 +8,13 @@ set +x
readonly ROOT_PATH=$(pwd)
readonly BUILD_ROOT=build/Linux
readonly BUILD_DIR=${BUILD_ROOT}/x86_64
readonly PADDLEINFERENCE_DIRECTORY=$1
readonly PADDLEINFERENCE_VERSION=$2
BUILD_WITH_CUSTOM_PADDLE='OFF'
if [[ "$PADDLEINFERENCE_DIRECTORY" != "" ]]; then
BUILD_WITH_CUSTOM_PADDLE='ON'
fi
# -------------------------------------------------------------------------------
# tasks
@@ -69,10 +76,39 @@ __build_fastdeploy_linux_x86_64_shared() {
echo "-- [INFO][built][x86_64]][${BUILD_DIR}/install]"
}
__build_fastdeploy_linux_x86_64_shared_custom_paddle() {
local FASDEPLOY_INSTALL_DIR="${ROOT_PATH}/${BUILD_DIR}/install"
cd "${BUILD_DIR}" && echo "-- [INFO] Working Dir: ${PWD}"
cmake -DCMAKE_BUILD_TYPE=Release \
-DWITH_GPU=OFF \
-DENABLE_ORT_BACKEND=ON \
-DENABLE_PADDLE_BACKEND=ON \
-DENABLE_OPENVINO_BACKEND=ON \
-DENABLE_PADDLE2ONNX=ON \
-DENABLE_VISION=ON \
-DENABLE_BENCHMARK=ON \
-DPADDLEINFERENCE_DIRECTORY=${PADDLEINFERENCE_DIRECTORY} \
-DPADDLEINFERENCE_VERSION=${PADDLEINFERENCE_VERSION} \
-DENABLE_FLYCV=OFF \
-DWITH_FLYCV_STATIC=OFF \
-DBUILD_EXAMPLES=ON \
-DCMAKE_INSTALL_PREFIX=${FASDEPLOY_INSTALL_DIR} \
-Wno-dev ../../.. && make -j8 && make install
echo "-- [INFO][built][x86_64]][${BUILD_DIR}/install]"
echo "-- [INFO][${PADDLEINFERENCE_VERSION}][${PADDLEINFERENCE_DIRECTORY}]"
}
main() {
__make_build_dir
__check_cxx_envs
if [ "${BUILD_WITH_CUSTOM_PADDLE}" == "ON" ]; then
__build_fastdeploy_linux_x86_64_shared_custom_paddle
else
__build_fastdeploy_linux_x86_64_shared
fi
exit 0
}
@@ -80,3 +116,4 @@ main
# Usage:
# ./scripts/linux/build_linux_x86_64_cpp_cpu.sh
# ./scripts/linux/build_linux_x86_64_cpp_cpu.sh paddle_inference-linux-x64-mkl-avx-2.4.2 paddle2.4.2

View File

@@ -8,6 +8,13 @@ set +x
readonly ROOT_PATH=$(pwd)
readonly BUILD_ROOT=build/Linux
readonly BUILD_DIR="${BUILD_ROOT}/x86_64_gpu"
readonly PADDLEINFERENCE_DIRECTORY=$1
readonly PADDLEINFERENCE_VERSION=$2
BUILD_WITH_CUSTOM_PADDLE='OFF'
if [[ "$PADDLEINFERENCE_DIRECTORY" != "" ]]; then
BUILD_WITH_CUSTOM_PADDLE='ON'
fi
# -------------------------------------------------------------------------------
# tasks
@@ -70,10 +77,40 @@ __build_fastdeploy_linux_x86_64_gpu_shared() {
echo "-- [INFO][built][x86_64_gpu}][${BUILD_DIR}/install]"
}
__build_fastdeploy_linux_x86_64_gpu_shared_custom_paddle() {
local FASDEPLOY_INSTALL_DIR="${ROOT_PATH}/${BUILD_DIR}/install"
cd "${BUILD_DIR}" && echo "-- [INFO] Working Dir: ${PWD}"
cmake -DCMAKE_BUILD_TYPE=Release \
-DWITH_GPU=ON \
-DTRT_DIRECTORY=${TRT_DIRECTORY} \
-DCUDA_DIRECTORY=${CUDA_DIRECTORY} \
-DENABLE_ORT_BACKEND=ON \
-DENABLE_TRT_BACKEND=ON \
-DENABLE_PADDLE_BACKEND=ON \
-DPADDLEINFERENCE_DIRECTORY=${PADDLEINFERENCE_DIRECTORY} \
-DPADDLEINFERENCE_VERSION=${PADDLEINFERENCE_VERSION} \
-DENABLE_OPENVINO_BACKEND=ON \
-DENABLE_PADDLE2ONNX=ON \
-DENABLE_VISION=ON \
-DENABLE_BENCHMARK=OFF \
-DBUILD_EXAMPLES=ON \
-DCMAKE_INSTALL_PREFIX=${FASDEPLOY_INSTALL_DIR} \
-Wno-dev ../../.. && make -j8 && make install
echo "-- [INFO][built][x86_64_gpu}][${BUILD_DIR}/install]"
echo "-- [INFO][${PADDLEINFERENCE_VERSION}][${PADDLEINFERENCE_DIRECTORY}]"
}
main() {
__make_build_dir
__check_cxx_envs
if [ "${BUILD_WITH_CUSTOM_PADDLE}" == "ON" ]; then
__build_fastdeploy_linux_x86_64_gpu_shared_custom_paddle
else
__build_fastdeploy_linux_x86_64_gpu_shared
fi
exit 0
}
@@ -81,3 +118,4 @@ main
# Usage:
# ./scripts/linux/build_linux_x86_64_cpp_gpu.sh
# ./scripts/linux/build_linux_x86_64_cpp_gpu.sh paddle_inference-linux-x64-gpu-trt8.5.2.2-mkl-avx-2.4.2 paddle2.4.2

View File

@@ -8,6 +8,13 @@ set +x
readonly ROOT_PATH=$(pwd)
readonly BUILD_ROOT=build/Linux
readonly BUILD_DIR="${BUILD_ROOT}/x86_64_gpu"
readonly PADDLEINFERENCE_DIRECTORY=$1
readonly PADDLEINFERENCE_VERSION=$2
BUILD_WITH_CUSTOM_PADDLE='OFF'
if [[ "$PADDLEINFERENCE_DIRECTORY" != "" ]]; then
BUILD_WITH_CUSTOM_PADDLE='ON'
fi
# -------------------------------------------------------------------------------
# tasks
@@ -70,10 +77,41 @@ __build_fastdeploy_linux_x86_64_gpu_shared() {
echo "-- [INFO][built][x86_64_gpu}][${BUILD_DIR}/install]"
}
__build_fastdeploy_linux_x86_64_gpu_shared_custom_paddle() {
local FASDEPLOY_INSTALL_DIR="${ROOT_PATH}/${BUILD_DIR}/install"
cd "${BUILD_DIR}" && echo "-- [INFO] Working Dir: ${PWD}"
cmake -DCMAKE_BUILD_TYPE=Release \
-DWITH_GPU=ON \
-DTRT_DIRECTORY=${TRT_DIRECTORY} \
-DCUDA_DIRECTORY=${CUDA_DIRECTORY} \
-DENABLE_ORT_BACKEND=ON \
-DENABLE_TRT_BACKEND=ON \
-DENABLE_PADDLE_BACKEND=ON \
-DPADDLEINFERENCE_DIRECTORY=${PADDLEINFERENCE_DIRECTORY} \
-DPADDLEINFERENCE_VERSION=${PADDLEINFERENCE_VERSION} \
-DENABLE_OPENVINO_BACKEND=ON \
-DENABLE_PADDLE2ONNX=ON \
-DENABLE_VISION=ON \
-DENABLE_BENCHMARK=ON \
-DBUILD_EXAMPLES=ON \
-DCMAKE_INSTALL_PREFIX=${FASDEPLOY_INSTALL_DIR} \
-Wno-dev ../../.. && make -j8 && make install
echo "-- [INFO][built][x86_64_gpu}][${BUILD_DIR}/install]"
echo "-- [INFO][${PADDLEINFERENCE_VERSION}][${PADDLEINFERENCE_DIRECTORY}]"
}
main() {
__make_build_dir
__check_cxx_envs
if [ "${BUILD_WITH_CUSTOM_PADDLE}" == "ON" ]; then
__build_fastdeploy_linux_x86_64_gpu_shared_custom_paddle
else
__build_fastdeploy_linux_x86_64_gpu_shared
fi
exit 0
}
@@ -81,3 +119,4 @@ main
# Usage:
# ./scripts/linux/build_linux_x86_64_cpp_gpu.sh
# ./scripts/linux/build_linux_x86_64_cpp_gpu.sh paddle_inference-linux-x64-gpu-trt8.5.2.2-mkl-avx-2.4.2 paddle2.4.2