mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-06 09:07:10 +08:00
[Python] Add backward compatible for paddle2.4.2 (#1929)
* [Python] Add backward compatible for paddle2.4.2 * [Python] Add backward compatible for paddle2.4.2 * [scripts] update linux build scripts
This commit is contained in:
@@ -15,6 +15,7 @@ from __future__ import absolute_import
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import platform
|
||||
|
||||
# Create a symbol link to tensorrt library.
|
||||
trt_directory = os.path.join(
|
||||
@@ -42,23 +43,82 @@ if os.name != "nt" and os.path.exists(trt_directory):
|
||||
break
|
||||
logging.basicConfig(level=logging.NOTSET)
|
||||
|
||||
|
||||
from .code_version import version, git_version, extra_version_info
|
||||
from .code_version import enable_trt_backend, enable_paddle_backend, with_gpu
|
||||
|
||||
# Note(zhoushunjie): Fix the import order of paddle and fastdeploy library.
|
||||
# This solution will be removed it when the confilct of paddle and
|
||||
# fastdeploy is fixed.
|
||||
|
||||
# Note(qiuyanjun): Add backward compatible for paddle 2.4.x
|
||||
sys_platform = platform.platform().lower()
|
||||
|
||||
|
||||
def get_paddle_version():
|
||||
paddle_version = ""
|
||||
try:
|
||||
import pkg_resources
|
||||
paddle_version = pkg_resources.require(
|
||||
"paddlepaddle-gpu")[0].version.split(".post")[0]
|
||||
except:
|
||||
try:
|
||||
paddle_version = pkg_resources.require(
|
||||
"paddlepaddle")[0].version.split(".post")[0]
|
||||
except:
|
||||
pass
|
||||
return paddle_version
|
||||
|
||||
|
||||
def should_import_paddle():
|
||||
if "paddle2.4" in extra_version_info:
|
||||
paddle_version = get_paddle_version()
|
||||
if paddle_version != "" and paddle_version <= '2.4.2' and paddle_version != "0.0.0":
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def should_set_tensorrt():
|
||||
if with_gpu == 'ON' and enable_paddle_backend == 'ON' and enable_trt_backend == 'ON':
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def tensorrt_is_avaliable():
|
||||
# Note(qiuyanjun): Only support linux now.
|
||||
found_trt_lib = False
|
||||
if ('linux' in sys_platform) and ('LD_LIBRARY_PATH' in os.environ.keys()):
|
||||
for lib_path in os.environ['LD_LIBRARY_PATH'].split(':'):
|
||||
if os.path.exists(os.path.join(lib_path, 'libnvinfer.so')):
|
||||
found_trt_lib = True
|
||||
break
|
||||
return found_trt_lib
|
||||
|
||||
|
||||
try:
|
||||
# import paddle
|
||||
import platform
|
||||
sys_platform = platform.platform().lower()
|
||||
# windows: no conflict between fastdeploy and paddle.
|
||||
# linux: must import paddle first to solve the conflict.
|
||||
# macos: still can not solve the conflict between fastdeploy and paddle,
|
||||
# due to the global flags redefined in paddle/paddle_inference so.
|
||||
# we got the error (ERROR: flag 'xxx' was defined more than once).
|
||||
if "linux" in sys_platform:
|
||||
import paddle
|
||||
if should_import_paddle():
|
||||
import paddle # need import paddle first for paddle2.4.x
|
||||
# check whether tensorrt in LD_LIBRARY_PATH for fastdeploy
|
||||
if should_set_tensorrt() and (not tensorrt_is_avaliable()):
|
||||
if os.path.exists(trt_directory):
|
||||
logging.info(
|
||||
"\n[WARNING] Can not find TensorRT lib in LD_LIBRARY_PATH for FastDeploy! \
|
||||
\n[WARNING] Please export [ YOUR CUSTOM TensorRT ] lib path to LD_LIBRARY_PATH first, or run the command: \
|
||||
\n[WARNING] Linux: 'export LD_LIBRARY_PATH=$(python -c 'from fastdeploy import trt_directory; print(trt_directory)'):$LD_LIBRARY_PATH'")
|
||||
else:
|
||||
logging.info(
|
||||
"\n[WARNING] Can not find TensorRT lib in LD_LIBRARY_PATH for FastDeploy! \
|
||||
\n[WARNING] Please export [YOUR CUSTOM TensorRT] lib path to LD_LIBRARY_PATH first.")
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
from .c_lib_wrap import (
|
||||
ModelFormat,
|
||||
Backend,
|
||||
@@ -92,5 +152,5 @@ from . import text
|
||||
from . import encryption
|
||||
from .download import download, download_and_decompress, download_model, get_model_list
|
||||
from . import serving
|
||||
from .code_version import version, git_version
|
||||
|
||||
__version__ = version
|
||||
|
@@ -58,7 +58,7 @@ def get_default_cuda_major_version() -> str:
|
||||
|
||||
def find_cudart(search_dir: str) -> bool:
|
||||
if search_dir is None:
|
||||
print("[FastDeploy][ERROR]: search_dir can not be NoneTpye.")
|
||||
logging.info("[FastDeploy][ERROR]: search_dir can not be NoneTpye.")
|
||||
return False
|
||||
# TODO(qiuyanjun): add Linux cudart *.so check
|
||||
cudart_lib_name = f"cudart64_{get_default_cuda_major_version()}0.dll"
|
||||
@@ -71,7 +71,7 @@ def find_cudart_from_sys() -> bool:
|
||||
sys_paths = os.environ["path"].strip().split(";")
|
||||
for sys_path in sys_paths:
|
||||
if find_cudart(sys_path):
|
||||
print(f"[FastDeploy][INFO]: Successfully found CUDA ToolKit from system PATH env -> {sys_path}")
|
||||
logging.info(f"[FastDeploy][INFO]: Successfully found CUDA ToolKit from system PATH env -> {sys_path}")
|
||||
return True
|
||||
return False
|
||||
|
||||
@@ -137,7 +137,7 @@ def add_custom_cuda_path():
|
||||
\n--- this path should look like: {default_cuda_dir}. \
|
||||
\n--- Check FAQ: {base_url + 'develop/docs/FAQ.md'}")
|
||||
return
|
||||
print(f"[FastDeploy][INFO]: Successfully found CUDA ToolKit from -> {cuda_shared_lib_dir}")
|
||||
logging.info(f"[FastDeploy][INFO]: Successfully found CUDA ToolKit from -> {cuda_shared_lib_dir}")
|
||||
|
||||
|
||||
if os.name == "nt":
|
||||
@@ -162,8 +162,8 @@ if os.name == "nt":
|
||||
|
||||
try:
|
||||
from .libs.@PY_LIBRARY_NAME@ import *
|
||||
except:
|
||||
raise RuntimeError("FastDeploy initalized failed!")
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"FastDeploy initalized failed! Error: {e}")
|
||||
|
||||
|
||||
def TensorInfoStr(tensor_info):
|
||||
|
Reference in New Issue
Block a user