diff --git a/cmake/paddle_inference.cmake b/cmake/paddle_inference.cmake index 3f7183eff..0bed5a05a 100644 --- a/cmake/paddle_inference.cmake +++ b/cmake/paddle_inference.cmake @@ -88,6 +88,10 @@ ExternalProject_Add( ${CMAKE_COMMAND} -E copy_directory ${PADDLEINFERENCE_SOURCE_DIR} ${PADDLEINFERENCE_INSTALL_DIR} BUILD_BYPRODUCTS ${PADDLEINFERENCE_COMPILE_LIB}) +if(UNIX) + add_custom_target(patchelf_paddle_inference ALL COMMAND bash -c "python ${PROJECT_SOURCE_DIR}/scripts/patch_paddle_inference.py ${PADDLEINFERENCE_INSTALL_DIR}/paddle/lib/libpaddle_inference.so" DEPENDS ${LIBRARY_NAME}) +endif() + add_library(external_paddle_inference STATIC IMPORTED GLOBAL) set_property(TARGET external_paddle_inference PROPERTY IMPORTED_LOCATION ${PADDLEINFERENCE_COMPILE_LIB}) diff --git a/fastdeploy/backends/openvino/ov_backend.cc b/fastdeploy/backends/openvino/ov_backend.cc index 66b08fc6c..a5c11696d 100644 --- a/fastdeploy/backends/openvino/ov_backend.cc +++ b/fastdeploy/backends/openvino/ov_backend.cc @@ -39,6 +39,8 @@ FDDataType OpenVINODataTypeToFD(const ov::element::Type& type) { return FDDataType::FP64; } else if (type == ov::element::i8) { return FDDataType::INT8; + } else if (type == ov::element::u8) { + return FDDataType::UINT8; } else if (type == ov::element::i32) { return FDDataType::INT32; } else if (type == ov::element::i64) { @@ -56,12 +58,14 @@ ov::element::Type FDDataTypeToOV(const FDDataType& type) { return ov::element::f64; } else if (type == FDDataType::INT8) { return ov::element::i8; + } else if (type == FDDataType::UINT8) { + return ov::element::u8; } else if (type == FDDataType::INT32) { return ov::element::i32; } else if (type == FDDataType::INT64) { return ov::element::i64; } - FDASSERT(false, "Only support float/double/int8/int32/int64 now."); + FDASSERT(false, "Only support float/double/int8/uint8/int32/int64 now."); return ov::element::f32; } diff --git a/fastdeploy/backends/ort/utils.cc b/fastdeploy/backends/ort/utils.cc index ae3e45b86..a661c1c9f 100644 --- a/fastdeploy/backends/ort/utils.cc +++ b/fastdeploy/backends/ort/utils.cc @@ -26,6 +26,10 @@ ONNXTensorElementDataType GetOrtDtype(const FDDataType& fd_dtype) { return ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32; } else if (fd_dtype == FDDataType::INT64) { return ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64; + } else if (fd_dtype == FDDataType::UINT8) { + return ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8; + } else if (fd_dtype == FDDataType::INT8) { + return ONNX_TENSOR_ELEMENT_DATA_TYPE_INT8; } FDERROR << "Unrecognized fastdeply data type:" << Str(fd_dtype) << "." << std::endl; diff --git a/fastdeploy/backends/paddle/util.cc b/fastdeploy/backends/paddle/util.cc index 4faba0039..42a506d38 100644 --- a/fastdeploy/backends/paddle/util.cc +++ b/fastdeploy/backends/paddle/util.cc @@ -75,7 +75,9 @@ FDDataType PaddleDataTypeToFD(const paddle_infer::DataType& dtype) { fd_dtype = FDDataType::INT32; } else if (dtype == paddle_infer::UINT8) { fd_dtype = FDDataType::UINT8; - } else { + } else if (dtype == paddle_infer::INT8) { + fd_dtype = FDDataType::INT8; + }else { FDASSERT( false, "Unexpected data type: %d while call CopyTensorToCpu in PaddleBackend.", diff --git a/fastdeploy/vision/detection/ppdet/ppyoloe.cc b/fastdeploy/vision/detection/ppdet/ppyoloe.cc index ff2fb0e4e..69aacb3f1 100644 --- a/fastdeploy/vision/detection/ppdet/ppyoloe.cc +++ b/fastdeploy/vision/detection/ppdet/ppyoloe.cc @@ -157,6 +157,8 @@ bool PPYOLOE::Preprocess(Mat* mat, std::vector* outputs) { return false; } } + + Cast::Run(mat, "float"); outputs->resize(2); (*outputs)[0].name = InputInfoOfRuntime(0).name; diff --git a/python/scripts/__init__.py b/python/scripts/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/python/scripts/build_gpu.sh b/python/scripts/build_gpu.sh new file mode 100644 index 000000000..492256378 --- /dev/null +++ b/python/scripts/build_gpu.sh @@ -0,0 +1,12 @@ +export ENABLE_ORT_BACKEND=ON +export ENABLE_OPENVINO_BACKEND=ON +export ENABLE_PADDLE_BACKEND=ON +export ENABLE_TRT_BACKEND=ON +export TRT_DIRECTORY=/fastdeploy/libs/TensorRT-8.4.1.5 +export CUDA_DIRECTORY=/usr/local/cuda +export ENABLE_VISION=ON +export WITH_GPU=ON +export CMAKE_CXX_COMPILER=/usr/local/gcc-8.2/bin/g++ + +python setup.py build +python setup.py bdist_wheel diff --git a/scripts/process_libraries.py b/python/scripts/process_libraries.py similarity index 100% rename from scripts/process_libraries.py rename to python/scripts/process_libraries.py diff --git a/python/setup.py b/python/setup.py index b08a57858..dac6fe58e 100644 --- a/python/setup.py +++ b/python/setup.py @@ -358,7 +358,6 @@ if sys.argv[1] == "install" or sys.argv[1] == "bdist_wheel": "Didn't detect path: fastdeploy/libs/third_libs exist, please execute `python setup.py build` first" ) sys.exit(0) - sys.path.append(TOP_DIR) from scripts.process_libraries import process_libraries all_lib_data = process_libraries( os.path.split(os.path.abspath(__file__))[0]) diff --git a/scripts/patch_paddle_inference.py b/scripts/patch_paddle_inference.py new file mode 100644 index 000000000..7fdbeae0b --- /dev/null +++ b/scripts/patch_paddle_inference.py @@ -0,0 +1,31 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +import shutil +import subprocess +import platform + +def process_paddle_inference(paddle_inference_so_file): + rpaths = [ + "$ORIGIN", + "$ORIGIN/../../third_party/install/mkldnn/lib/", + "$ORIGIN/../../third_party/install/mklml/lib/", + "$ORIGIN/../../../tensorrt/lib" + ] + + command = "patchelf --set-rpath '{}' {}".format(":".join(rpaths), paddle_inference_so_file) + if platform.machine() != 'sw_64' and platform.machine() != 'mips64': + assert subprocess.Popen(command, shell=True) != 0, "patchelf {} failed, the command: {}".format(command, lib)