Fix bug for part of detection model (#243)

* fix error for part of detection model

* fix error for part of detection model

* add patch paddle inference
This commit is contained in:
Jason
2022-09-16 16:31:34 +08:00
committed by GitHub
parent 4d2fbcb030
commit 5046574b95
10 changed files with 61 additions and 3 deletions

View File

@@ -88,6 +88,10 @@ ExternalProject_Add(
${CMAKE_COMMAND} -E copy_directory ${PADDLEINFERENCE_SOURCE_DIR} ${PADDLEINFERENCE_INSTALL_DIR}
BUILD_BYPRODUCTS ${PADDLEINFERENCE_COMPILE_LIB})
if(UNIX)
add_custom_target(patchelf_paddle_inference ALL COMMAND bash -c "python ${PROJECT_SOURCE_DIR}/scripts/patch_paddle_inference.py ${PADDLEINFERENCE_INSTALL_DIR}/paddle/lib/libpaddle_inference.so" DEPENDS ${LIBRARY_NAME})
endif()
add_library(external_paddle_inference STATIC IMPORTED GLOBAL)
set_property(TARGET external_paddle_inference PROPERTY IMPORTED_LOCATION
${PADDLEINFERENCE_COMPILE_LIB})

View File

@@ -39,6 +39,8 @@ FDDataType OpenVINODataTypeToFD(const ov::element::Type& type) {
return FDDataType::FP64;
} else if (type == ov::element::i8) {
return FDDataType::INT8;
} else if (type == ov::element::u8) {
return FDDataType::UINT8;
} else if (type == ov::element::i32) {
return FDDataType::INT32;
} else if (type == ov::element::i64) {
@@ -56,12 +58,14 @@ ov::element::Type FDDataTypeToOV(const FDDataType& type) {
return ov::element::f64;
} else if (type == FDDataType::INT8) {
return ov::element::i8;
} else if (type == FDDataType::UINT8) {
return ov::element::u8;
} else if (type == FDDataType::INT32) {
return ov::element::i32;
} else if (type == FDDataType::INT64) {
return ov::element::i64;
}
FDASSERT(false, "Only support float/double/int8/int32/int64 now.");
FDASSERT(false, "Only support float/double/int8/uint8/int32/int64 now.");
return ov::element::f32;
}

View File

@@ -26,6 +26,10 @@ ONNXTensorElementDataType GetOrtDtype(const FDDataType& fd_dtype) {
return ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32;
} else if (fd_dtype == FDDataType::INT64) {
return ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64;
} else if (fd_dtype == FDDataType::UINT8) {
return ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8;
} else if (fd_dtype == FDDataType::INT8) {
return ONNX_TENSOR_ELEMENT_DATA_TYPE_INT8;
}
FDERROR << "Unrecognized fastdeply data type:" << Str(fd_dtype) << "."
<< std::endl;

View File

@@ -75,7 +75,9 @@ FDDataType PaddleDataTypeToFD(const paddle_infer::DataType& dtype) {
fd_dtype = FDDataType::INT32;
} else if (dtype == paddle_infer::UINT8) {
fd_dtype = FDDataType::UINT8;
} else {
} else if (dtype == paddle_infer::INT8) {
fd_dtype = FDDataType::INT8;
}else {
FDASSERT(
false,
"Unexpected data type: %d while call CopyTensorToCpu in PaddleBackend.",

View File

@@ -157,6 +157,8 @@ bool PPYOLOE::Preprocess(Mat* mat, std::vector<FDTensor>* outputs) {
return false;
}
}
Cast::Run(mat, "float");
outputs->resize(2);
(*outputs)[0].name = InputInfoOfRuntime(0).name;

View File

View File

@@ -0,0 +1,12 @@
export ENABLE_ORT_BACKEND=ON
export ENABLE_OPENVINO_BACKEND=ON
export ENABLE_PADDLE_BACKEND=ON
export ENABLE_TRT_BACKEND=ON
export TRT_DIRECTORY=/fastdeploy/libs/TensorRT-8.4.1.5
export CUDA_DIRECTORY=/usr/local/cuda
export ENABLE_VISION=ON
export WITH_GPU=ON
export CMAKE_CXX_COMPILER=/usr/local/gcc-8.2/bin/g++
python setup.py build
python setup.py bdist_wheel

View File

@@ -358,7 +358,6 @@ if sys.argv[1] == "install" or sys.argv[1] == "bdist_wheel":
"Didn't detect path: fastdeploy/libs/third_libs exist, please execute `python setup.py build` first"
)
sys.exit(0)
sys.path.append(TOP_DIR)
from scripts.process_libraries import process_libraries
all_lib_data = process_libraries(
os.path.split(os.path.abspath(__file__))[0])

View File

@@ -0,0 +1,31 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import shutil
import subprocess
import platform
def process_paddle_inference(paddle_inference_so_file):
rpaths = [
"$ORIGIN",
"$ORIGIN/../../third_party/install/mkldnn/lib/",
"$ORIGIN/../../third_party/install/mklml/lib/",
"$ORIGIN/../../../tensorrt/lib"
]
command = "patchelf --set-rpath '{}' {}".format(":".join(rpaths), paddle_inference_so_file)
if platform.machine() != 'sw_64' and platform.machine() != 'mips64':
assert subprocess.Popen(command, shell=True) != 0, "patchelf {} failed, the command: {}".format(command, lib)