mirror of
				https://github.com/PaddlePaddle/FastDeploy.git
				synced 2025-10-31 11:56:44 +08:00 
			
		
		
		
	
		
			
				
	
	
		
			139 lines
		
	
	
		
			4.9 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			139 lines
		
	
	
		
			4.9 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
| # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
 | |
| #
 | |
| # Licensed under the Apache License, Version 2.0 (the "License");
 | |
| # you may not use this file except in compliance with the License.
 | |
| # You may obtain a copy of the License at
 | |
| #
 | |
| #     http://www.apache.org/licenses/LICENSE-2.0
 | |
| #
 | |
| # Unless required by applicable law or agreed to in writing, software
 | |
| # distributed under the License is distributed on an "AS IS" BASIS,
 | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | |
| # See the License for the specific language governing permissions and
 | |
| # limitations under the License.
 | |
| from __future__ import absolute_import
 | |
| import logging
 | |
| import os
 | |
| import sys
 | |
| 
 | |
| 
 | |
| def is_built_with_gpu() -> bool:
 | |
|     return True if "@WITH_GPU@" == "ON" else False
 | |
| 
 | |
| 
 | |
| def is_built_with_ort() -> bool:
 | |
|     return True if "@ENABLE_ORT_BACKEND@" == "ON" else False
 | |
| 
 | |
| 
 | |
| def is_built_with_trt() -> bool:
 | |
|     return True if "@ENABLE_TRT_BACKEND@" == "ON" else False
 | |
| 
 | |
| 
 | |
| def is_built_with_paddle() -> bool:
 | |
|     return True if "@ENABLE_PADDLE_BACKEND@" == "ON" else False
 | |
| 
 | |
| 
 | |
| def get_default_cuda_directory() -> str:
 | |
|     if not is_built_with_gpu():
 | |
|        return ""
 | |
|     return r"@CUDA_DIRECTORY@".strip()
 | |
| 
 | |
| 
 | |
| def add_dll_search_dir(dir_path):
 | |
|     os.environ["path"] = dir_path + ";" + os.environ["path"]
 | |
|     sys.path.insert(0, dir_path)
 | |
|     if sys.version_info[:2] >= (3, 8):
 | |
|         os.add_dll_directory(dir_path)
 | |
| 
 | |
| 
 | |
| def add_cuda_shared_lib_dir_windows():
 | |
|     if is_built_with_gpu():
 | |
|         # if FastDeploy built with gpu and want to run
 | |
|         # in windows, we need to add CUDA_DIRECTORY into
 | |
|         # dll search paths to make sure FastDeploy.dll
 | |
|         # can link cudart correctly. we search the
 | |
|         # default path firstly and try to add into
 | |
|         # paths. User should set it manually if the
 | |
|         # cuda toolkit is not locate in the default
 | |
|         # path we assume.
 | |
|         default_cuda_dir = get_default_cuda_directory()
 | |
|         cuda_shared_lib_dir = os.path.join(default_cuda_dir, "bin")
 | |
|         # TODO: add FAQ docs reference.
 | |
|         if not os.path.exists(cuda_shared_lib_dir):
 | |
|             # try to get cuda directory from user's local env
 | |
|             custom_cuda_dir = "NOTFOUNDED"
 | |
|             custom_cuda_envs = ["CUDA_DIRECTORY", "CUDA_HOME", "CUDA_ROOT", "CUDA_PATH"]
 | |
|             for custom_env in custom_cuda_envs:
 | |
|                 custom_cuda_dir = os.getenv(custom_env, "NOTFOUNDED")
 | |
|                 custom_cuda_dir = custom_cuda_dir.strip().split(";")[0]
 | |
|                 if os.path.exists(custom_cuda_dir) and custom_cuda_dir != "NOTFOUNDED":
 | |
|                     break
 | |
|             if not os.path.exists(custom_cuda_dir) or custom_cuda_dir == "NOTFOUNDED":
 | |
|                 logging.warnings.warn(f"\n--- FastDeploy was built with gpu, \
 | |
|                     \n--- but the default cuda directory does not exists. \
 | |
|                     \n--- Please setup one of {custom_cuda_envs} manually, \
 | |
|                     \n--- this path should look like: {default_cuda_dir}")
 | |
|                 return
 | |
|             cuda_shared_lib_dir = os.path.join(custom_cuda_dir, "bin")
 | |
|         add_dll_search_dir(cuda_shared_lib_dir)
 | |
|         print(f"[FastDeploy][CUDA]: Found valid cuda directroy and added it: -> {cuda_shared_lib_dir}")
 | |
| 
 | |
| 
 | |
| def try_pre_load_fastdeploy_dll(dll_name=None):
 | |
|     # Try pre-load fastdeploy dll in windows to
 | |
|     # make sure the added custom dll directory
 | |
|     # has been activated. Reference:
 | |
|     # [1] https://github.com/conda/conda/issues/10897
 | |
|     # [2] https://github.com/dhermes/bezier/issues/237
 | |
|     from ctypes import cdll
 | |
|     if dll_name is None:
 | |
|         dll_name = "fastdeploy.dll"
 | |
|     try:
 | |
|         dll = cdll.LoadLibrary(dll_name)
 | |
|         del dll
 | |
|     except Exception as e:
 | |
|         raise RuntimeError(f"Can not pre load dll: {dso_name}. {e}")
 | |
| 
 | |
| 
 | |
| if os.name == "nt":
 | |
|     add_cuda_shared_lib_dir_windows()
 | |
|     current_path = os.path.abspath(__file__)
 | |
|     dirname = os.path.dirname(current_path)
 | |
|     third_libs_dir = os.path.join(dirname, "libs")
 | |
|     add_dll_search_dir(third_libs_dir)
 | |
|     for root, dirs, filenames in os.walk(third_libs_dir):
 | |
|         for d in dirs:
 | |
|             if d == "lib" or d == "bin":
 | |
|                 add_dll_search_dir(os.path.join(dirname, root, d))
 | |
|     try_pre_load_fastdeploy_dll()
 | |
| 
 | |
| 
 | |
| try:
 | |
|     from .@PY_LIBRARY_NAME@ import *
 | |
| except:
 | |
|     raise RuntimeError("FastDeploy initalized failed!")
 | |
| 
 | |
| 
 | |
| def TensorInfoStr(tensor_info):
 | |
|     message = "TensorInfo(name : '{}', dtype : '{}', shape : '{}')".format(
 | |
|         tensor_info.name, tensor_info.dtype, tensor_info.shape)
 | |
|     return message
 | |
| 
 | |
| 
 | |
| def RuntimeOptionStr(runtime_option):
 | |
|     attrs = dir(runtime_option)
 | |
|     message = "RuntimeOption(\n"
 | |
|     for attr in attrs:
 | |
|         if attr.startswith("__"):
 | |
|             continue
 | |
|         if hasattr(getattr(runtime_option, attr), "__call__"):
 | |
|             continue
 | |
|         message += "  {} : {}\t\n".format(attr, getattr(runtime_option, attr))
 | |
|     message.strip("\n")
 | |
|     message += ")"
 | |
|     return message
 | |
| 
 | |
| 
 | |
| TensorInfo.__repr__ = TensorInfoStr
 | |
| RuntimeOption.__repr__ = RuntimeOptionStr
 | 
