mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 16:48:03 +08:00
[fix][win] fixed windows python wheel cudart link (#98)
* [fix][win] fixed windows python wheel cudart link * remove un-need funcs * format not found cuda dir warnings * fix typos * fix typos * fix typos
This commit is contained in:
@@ -16,7 +16,10 @@ import logging
|
||||
import os
|
||||
import sys
|
||||
|
||||
from .c_lib_wrap import Frontend, Backend, FDDataType, TensorInfo, Device
|
||||
from .c_lib_wrap import (Frontend, Backend, FDDataType, TensorInfo, Device,
|
||||
is_built_with_gpu, is_built_with_ort,
|
||||
is_built_with_paddle, is_built_with_trt,
|
||||
get_default_cuda_directory)
|
||||
from .runtime import Runtime, RuntimeOption
|
||||
from .model import FastDeployModel
|
||||
from . import c_lib_wrap as C
|
||||
|
@@ -16,6 +16,29 @@ import logging
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
||||
def is_built_with_gpu() -> bool:
|
||||
return True if "@WITH_GPU@" == "ON" else False
|
||||
|
||||
|
||||
def is_built_with_ort() -> bool:
|
||||
return True if "@ENABLE_ORT_BACKEND@" == "ON" else False
|
||||
|
||||
|
||||
def is_built_with_trt() -> bool:
|
||||
return True if "@ENABLE_TRT_BACKEND@" == "ON" else False
|
||||
|
||||
|
||||
def is_built_with_paddle() -> bool:
|
||||
return True if "@ENABLE_PADDLE_BACKEND@" == "ON" else False
|
||||
|
||||
|
||||
def get_default_cuda_directory() -> str:
|
||||
if not is_built_with_gpu():
|
||||
return ""
|
||||
return r"@CUDA_DIRECTORY@".strip()
|
||||
|
||||
|
||||
def add_dll_search_dir(dir_path):
|
||||
os.environ["path"] = dir_path + ";" + os.environ["path"]
|
||||
sys.path.insert(0, dir_path)
|
||||
@@ -23,7 +46,41 @@ def add_dll_search_dir(dir_path):
|
||||
os.add_dll_directory(dir_path)
|
||||
|
||||
|
||||
def add_cuda_shared_lib_dir_windows():
|
||||
if is_built_with_gpu():
|
||||
# if FastDeploy built with gpu and want to run
|
||||
# in windows, we need to add CUDA_DIRECTORY into
|
||||
# dll search paths to make sure FastDeploy.dll
|
||||
# can link cudart correctly. we search the
|
||||
# default path firstly and try to add into
|
||||
# paths. User should set it manually if the
|
||||
# cuda toolkit is not locate in the default
|
||||
# path we assume.
|
||||
default_cuda_dir = get_default_cuda_directory()
|
||||
cuda_shared_lib_dir = os.path.join(default_cuda_dir, "bin")
|
||||
# TODO: add FAQ docs reference.
|
||||
if not os.path.exists(cuda_shared_lib_dir):
|
||||
# try to get cuda directory from user's local env
|
||||
custom_cuda_dir = "NOTFOUNDED"
|
||||
custom_cuda_envs = ["CUDA_DIRECTORY", "CUDA_HOME", "CUDA_ROOT", "CUDA_PATH"]
|
||||
for custom_env in custom_cuda_envs:
|
||||
custom_cuda_dir = os.getenv(custom_env, "NOTFOUNDED")
|
||||
custom_cuda_dir = custom_cuda_dir.strip().split(";")[0]
|
||||
if os.path.exists(custom_cuda_dir) and custom_cuda_dir != "NOTFOUNDED":
|
||||
break
|
||||
if not os.path.exists(custom_cuda_dir) or custom_cuda_dir == "NOTFOUNDED":
|
||||
logging.warnings.warn(f"\n--- FastDeploy was built with gpu, \
|
||||
\n--- but the default cuda directory does not exists. \
|
||||
\n--- Please setup one of {custom_cuda_envs} manually, \
|
||||
\n--- this path should look like: {default_cuda_dir}")
|
||||
return
|
||||
cuda_shared_lib_dir = os.path.join(custom_cuda_dir, "bin")
|
||||
add_dll_search_dir(cuda_shared_lib_dir)
|
||||
print(f"[FastDeploy][CUDA]: Found valid cuda directroy and added it: -> {cuda_shared_lib_dir}")
|
||||
|
||||
|
||||
if os.name == "nt":
|
||||
add_cuda_shared_lib_dir_windows()
|
||||
current_path = os.path.abspath(__file__)
|
||||
dirname = os.path.dirname(current_path)
|
||||
third_libs_dir = os.path.join(dirname, "libs")
|
||||
@@ -33,7 +90,12 @@ if os.name == "nt":
|
||||
if d == "lib" or d == "bin":
|
||||
add_dll_search_dir(os.path.join(dirname, root, d))
|
||||
|
||||
from .@PY_LIBRARY_NAME@ import *
|
||||
|
||||
try:
|
||||
from .@PY_LIBRARY_NAME@ import *
|
||||
except:
|
||||
raise RuntimeError("FastDeploy initalized failed!")
|
||||
|
||||
|
||||
def TensorInfoStr(tensor_info):
|
||||
message = "TensorInfo(name : '{}', dtype : '{}', shape : '{}')".format(
|
||||
|
Reference in New Issue
Block a user