mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-06 00:57:33 +08:00
Polish cmake files and runtime apis (#36)
* Add custom operator for onnxruntime ans fix paddle backend * Polish cmake files and runtime apis * Remove copy libraries * fix some issue * fix bug * fix bug
This commit is contained in:
111
CMakeLists.txt
111
CMakeLists.txt
@@ -15,8 +15,20 @@
|
|||||||
PROJECT(fastdeploy C CXX)
|
PROJECT(fastdeploy C CXX)
|
||||||
CMAKE_MINIMUM_REQUIRED (VERSION 3.16)
|
CMAKE_MINIMUM_REQUIRED (VERSION 3.16)
|
||||||
|
|
||||||
|
option(CSRCS_DIR_NAME "Name of source code directory")
|
||||||
|
option(LIBRARY_NAME "Name of build library name")
|
||||||
|
option(PY_LIBRARY_NAME "Name of build python library name")
|
||||||
|
if(NOT CSRCS_DIR_NAME)
|
||||||
|
set(CSRCS_DIR_NAME "./")
|
||||||
|
endif()
|
||||||
|
if(NOT LIBRARY_NAME)
|
||||||
|
set(LIBRARY_NAME "fastdeploy")
|
||||||
|
endif()
|
||||||
|
if(NOT PY_LIBRARY_NAME)
|
||||||
|
set(PY_LIBRARY_NAME "fastdeploy_main")
|
||||||
|
endif()
|
||||||
include(ExternalProject)
|
include(ExternalProject)
|
||||||
add_subdirectory(fastdeploy)
|
add_subdirectory(${CSRCS_DIR_NAME}/fastdeploy)
|
||||||
include(external/utils.cmake)
|
include(external/utils.cmake)
|
||||||
|
|
||||||
# Set C++11 as standard for the whole project
|
# Set C++11 as standard for the whole project
|
||||||
@@ -51,7 +63,8 @@ endif()
|
|||||||
|
|
||||||
option(BUILD_FASTDEPLOY_PYTHON "if build python lib for fastdeploy." OFF)
|
option(BUILD_FASTDEPLOY_PYTHON "if build python lib for fastdeploy." OFF)
|
||||||
|
|
||||||
include_directories(${PROJECT_SOURCE_DIR})
|
set(HEAD_DIR "${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}")
|
||||||
|
include_directories(${HEAD_DIR})
|
||||||
include_directories(${CMAKE_CURRENT_BINARY_DIR})
|
include_directories(${CMAKE_CURRENT_BINARY_DIR})
|
||||||
|
|
||||||
if (WITH_VISION_EXAMPLES AND EXISTS ${PROJECT_SOURCE_DIR}/examples)
|
if (WITH_VISION_EXAMPLES AND EXISTS ${PROJECT_SOURCE_DIR}/examples)
|
||||||
@@ -62,12 +75,12 @@ if (WITH_VISION_EXAMPLES AND EXISTS ${PROJECT_SOURCE_DIR}/examples)
|
|||||||
endif()
|
endif()
|
||||||
|
|
||||||
add_definitions(-DFASTDEPLOY_LIB)
|
add_definitions(-DFASTDEPLOY_LIB)
|
||||||
file(GLOB_RECURSE ALL_DEPLOY_SRCS ${PROJECT_SOURCE_DIR}/fastdeploy/*.cc)
|
file(GLOB_RECURSE ALL_DEPLOY_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/*.cc)
|
||||||
file(GLOB_RECURSE DEPLOY_ORT_SRCS ${PROJECT_SOURCE_DIR}/fastdeploy/backends/ort/*.cc)
|
file(GLOB_RECURSE DEPLOY_ORT_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/backends/ort/*.cc)
|
||||||
file(GLOB_RECURSE DEPLOY_PADDLE_SRCS ${PROJECT_SOURCE_DIR}/fastdeploy/backends/paddle/*.cc)
|
file(GLOB_RECURSE DEPLOY_PADDLE_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/backends/paddle/*.cc)
|
||||||
file(GLOB_RECURSE DEPLOY_TRT_SRCS ${PROJECT_SOURCE_DIR}/fastdeploy/backends/tensorrt/*.cc ${PROJECT_SOURCE_DIR}/fastdeploy/backends/tensorrt/*.cpp)
|
file(GLOB_RECURSE DEPLOY_TRT_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/backends/tensorrt/*.cc ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/backends/tensorrt/*.cpp)
|
||||||
file(GLOB_RECURSE DEPLOY_VISION_SRCS ${PROJECT_SOURCE_DIR}/fastdeploy/vision/*.cc)
|
file(GLOB_RECURSE DEPLOY_VISION_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/vision/*.cc)
|
||||||
file(GLOB_RECURSE DEPLOY_PYBIND_SRCS ${PROJECT_SOURCE_DIR}/fastdeploy/pybind/*.cc ${PROJECT_SOURCE_DIR}/fastdeploy/*_pybind.cc)
|
file(GLOB_RECURSE DEPLOY_PYBIND_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/pybind/*.cc ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/*_pybind.cc)
|
||||||
list(REMOVE_ITEM ALL_DEPLOY_SRCS ${DEPLOY_ORT_SRCS} ${DEPLOY_PADDLE_SRCS} ${DEPLOY_TRT_SRCS} ${DEPLOY_VISION_SRCS})
|
list(REMOVE_ITEM ALL_DEPLOY_SRCS ${DEPLOY_ORT_SRCS} ${DEPLOY_PADDLE_SRCS} ${DEPLOY_TRT_SRCS} ${DEPLOY_VISION_SRCS})
|
||||||
|
|
||||||
set(DEPEND_LIBS "")
|
set(DEPEND_LIBS "")
|
||||||
@@ -117,7 +130,7 @@ if(ENABLE_TRT_BACKEND)
|
|||||||
endif()
|
endif()
|
||||||
add_definitions(-DENABLE_TRT_BACKEND)
|
add_definitions(-DENABLE_TRT_BACKEND)
|
||||||
include_directories(${TRT_DIRECTORY}/include)
|
include_directories(${TRT_DIRECTORY}/include)
|
||||||
include_directories(${PROJECT_SOURCE_DIR}/fastdeploy/backends/tensorrt/common)
|
include_directories(${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/backends/tensorrt/common)
|
||||||
list(APPEND ALL_DEPLOY_SRCS ${DEPLOY_TRT_SRCS})
|
list(APPEND ALL_DEPLOY_SRCS ${DEPLOY_TRT_SRCS})
|
||||||
find_library(TRT_INFER_LIB nvinfer ${TRT_DIRECTORY}/lib)
|
find_library(TRT_INFER_LIB nvinfer ${TRT_DIRECTORY}/lib)
|
||||||
find_library(TRT_ONNX_LIB nvonnxparser ${TRT_DIRECTORY}/lib)
|
find_library(TRT_ONNX_LIB nvonnxparser ${TRT_DIRECTORY}/lib)
|
||||||
@@ -125,12 +138,16 @@ if(ENABLE_TRT_BACKEND)
|
|||||||
find_library(TRT_PLUGIN_LIB nvinfer_plugin ${TRT_DIRECTORY}/lib)
|
find_library(TRT_PLUGIN_LIB nvinfer_plugin ${TRT_DIRECTORY}/lib)
|
||||||
list(APPEND DEPEND_LIBS ${TRT_INFER_LIB} ${TRT_ONNX_LIB} ${TRT_CAFFE_LIB} ${TRT_PLUGIN_LIB})
|
list(APPEND DEPEND_LIBS ${TRT_INFER_LIB} ${TRT_ONNX_LIB} ${TRT_CAFFE_LIB} ${TRT_PLUGIN_LIB})
|
||||||
|
|
||||||
# copy tensorrt libraries to third lib
|
if(NOT EXISTS "${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/tensorrt")
|
||||||
# if(EXISTS "${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/tensorrt")
|
file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/tensorrt")
|
||||||
# file(REMOVE_RECURSE "${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/tensorrt/lib")
|
endif()
|
||||||
# endif()
|
if(EXISTS "${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/tensorrt/lib")
|
||||||
# file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/tensorrt/lib")
|
file(REMOVE_RECURSE "${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/tensorrt/lib")
|
||||||
# file(COPY ${TRT_INFER_LIB} ${TRT_ONNX_LIB} ${TRT_CAFFE_LIB} ${TRT_PLUGIN_LIB} DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/tensorrt/lib" FOLLOW_SYMLINK_CHAIN)
|
endif()
|
||||||
|
find_package(Python COMPONENTS Interpreter Development REQUIRED)
|
||||||
|
message(STATUS "Copying ${TRT_DIRECTORY}/lib to ${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/tensorrt/lib ...")
|
||||||
|
execute_process(COMMAND ${Python_EXECUTABLE} ${PROJECT_SOURCE_DIR}/copy_directory.py ${TRT_DIRECTORY}/lib ${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/tensorrt/lib)
|
||||||
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(ENABLE_VISION)
|
if(ENABLE_VISION)
|
||||||
@@ -157,37 +174,37 @@ else()
|
|||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
configure_file(${PROJECT_SOURCE_DIR}/fastdeploy/core/config.h.in ${PROJECT_SOURCE_DIR}/fastdeploy/core/config.h)
|
configure_file(${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/core/config.h.in ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/core/config.h)
|
||||||
configure_file(${PROJECT_SOURCE_DIR}/FastDeploy.cmake.in ${PROJECT_SOURCE_DIR}/FastDeploy.cmake @ONLY)
|
configure_file(${PROJECT_SOURCE_DIR}/FastDeploy.cmake.in ${PROJECT_SOURCE_DIR}/FastDeploy.cmake @ONLY)
|
||||||
|
|
||||||
list(REMOVE_ITEM ALL_DEPLOY_SRCS ${DEPLOY_PYBIND_SRCS})
|
list(REMOVE_ITEM ALL_DEPLOY_SRCS ${DEPLOY_PYBIND_SRCS})
|
||||||
|
|
||||||
add_library(fastdeploy SHARED ${ALL_DEPLOY_SRCS})
|
add_library(${LIBRARY_NAME} SHARED ${ALL_DEPLOY_SRCS})
|
||||||
redefine_file_macro(fastdeploy)
|
redefine_file_macro(${LIBRARY_NAME})
|
||||||
set_target_properties(fastdeploy PROPERTIES COMPILE_FLAGS "-fvisibility=hidden")
|
set_target_properties(${LIBRARY_NAME} PROPERTIES COMPILE_FLAGS "-fvisibility=hidden")
|
||||||
if(NOT APPLE)
|
if(NOT APPLE)
|
||||||
set_target_properties(fastdeploy PROPERTIES LINK_FLAGS "-Wl,--start-group,--exclude-libs,ALL")
|
set_target_properties(${LIBRARY_NAME} PROPERTIES LINK_FLAGS "-Wl,--start-group,--exclude-libs,ALL")
|
||||||
endif()
|
endif()
|
||||||
set_target_properties(fastdeploy PROPERTIES LINK_FLAGS_RELEASE -s)
|
set_target_properties(${LIBRARY_NAME} PROPERTIES LINK_FLAGS_RELEASE -s)
|
||||||
|
|
||||||
file(READ "${PROJECT_SOURCE_DIR}/VERSION_NUMBER" FASTDEPLOY_VERSION)
|
file(READ "${PROJECT_SOURCE_DIR}/VERSION_NUMBER" FASTDEPLOY_VERSION)
|
||||||
string(STRIP "${FASTDEPLOY_VERSION}" FASTDEPLOY_VERSION)
|
string(STRIP "${FASTDEPLOY_VERSION}" FASTDEPLOY_VERSION)
|
||||||
if (APPLE)
|
if (APPLE)
|
||||||
# set_target_properties(fastdeploy PROPERTIES LINK_FLAGS "-undefined dynamic_lookup")
|
# set_target_properties(fastdeploy PROPERTIES LINK_FLAGS "-undefined dynamic_lookup")
|
||||||
set_target_properties(fastdeploy PROPERTIES COMPILE_FLAGS "-fvisibility=hidden")
|
set_target_properties(${LIBRARY_NAME} PROPERTIES COMPILE_FLAGS "-fvisibility=hidden")
|
||||||
elseif(MSVC)
|
elseif(MSVC)
|
||||||
else()
|
else()
|
||||||
set_target_properties(fastdeploy PROPERTIES COMPILE_FLAGS "-fvisibility=hidden")
|
set_target_properties(${LIBRARY_NAME} PROPERTIES COMPILE_FLAGS "-fvisibility=hidden")
|
||||||
set_target_properties(fastdeploy PROPERTIES LINK_FLAGS "-Wl,--exclude-libs,ALL")
|
set_target_properties(${LIBRARY_NAME} PROPERTIES LINK_FLAGS "-Wl,--exclude-libs,ALL")
|
||||||
set_target_properties(fastdeploy PROPERTIES LINK_FLAGS_RELEASE -s)
|
set_target_properties(${LIBRARY_NAME} PROPERTIES LINK_FLAGS_RELEASE -s)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
find_package(OpenMP)
|
find_package(OpenMP)
|
||||||
if(OpenMP_CXX_FOUND)
|
if(OpenMP_CXX_FOUND)
|
||||||
list(APPEND DEPEND_LIBS OpenMP::OpenMP_CXX)
|
list(APPEND DEPEND_LIBS OpenMP::OpenMP_CXX)
|
||||||
endif()
|
endif()
|
||||||
set_target_properties(fastdeploy PROPERTIES VERSION ${FASTDEPLOY_VERSION})
|
set_target_properties(${LIBRARY_NAME} PROPERTIES VERSION ${FASTDEPLOY_VERSION})
|
||||||
target_link_libraries(fastdeploy ${DEPEND_LIBS})
|
target_link_libraries(${LIBRARY_NAME} ${DEPEND_LIBS})
|
||||||
|
|
||||||
# add examples after prepare include paths for third-parties
|
# add examples after prepare include paths for third-parties
|
||||||
if (WITH_VISION_EXAMPLES AND EXISTS ${PROJECT_SOURCE_DIR}/examples)
|
if (WITH_VISION_EXAMPLES AND EXISTS ${PROJECT_SOURCE_DIR}/examples)
|
||||||
@@ -200,15 +217,15 @@ include(external/summary.cmake)
|
|||||||
fastdeploy_summary()
|
fastdeploy_summary()
|
||||||
|
|
||||||
install(
|
install(
|
||||||
TARGETS fastdeploy
|
TARGETS ${LIBRARY_NAME}
|
||||||
LIBRARY DESTINATION lib
|
LIBRARY DESTINATION lib
|
||||||
)
|
)
|
||||||
install(
|
install(
|
||||||
DIRECTORY ${PROJECT_SOURCE_DIR}/fastdeploy
|
DIRECTORY ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy
|
||||||
DESTINATION ${CMAKE_INSTALL_PREFIX}/include
|
DESTINATION ${CMAKE_INSTALL_PREFIX}/include
|
||||||
FILES_MATCHING
|
FILES_MATCHING
|
||||||
PATTERN "*.h"
|
PATTERN "*.h"
|
||||||
PATTERN "${PROJECT_SOURCE_DIR}/fastdeploy/backends/*/*.h"
|
PATTERN "${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/backends/*/*.h"
|
||||||
)
|
)
|
||||||
install(
|
install(
|
||||||
DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/third_libs/install
|
DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/third_libs/install
|
||||||
@@ -243,40 +260,34 @@ if(BUILD_FASTDEPLOY_PYTHON)
|
|||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(NOT ENABLE_VISION)
|
if(NOT ENABLE_VISION)
|
||||||
file(GLOB_RECURSE VISION_PYBIND_SRCS ${PROJECT_SOURCE_DIR}/fastdeploy/vision/*_pybind.cc)
|
file(GLOB_RECURSE VISION_PYBIND_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/vision/*_pybind.cc)
|
||||||
list(REMOVE_ITEM DEPLOY_PYBIND_SRCS ${VISION_PYBIND_SRCS})
|
list(REMOVE_ITEM DEPLOY_PYBIND_SRCS ${VISION_PYBIND_SRCS})
|
||||||
endif()
|
endif()
|
||||||
add_library(fastdeploy_main MODULE ${DEPLOY_PYBIND_SRCS})
|
add_library(${PY_LIBRARY_NAME} MODULE ${DEPLOY_PYBIND_SRCS})
|
||||||
redefine_file_macro(fastdeploy_main)
|
redefine_file_macro(${PY_LIBRARY_NAME})
|
||||||
set_target_properties(fastdeploy_main PROPERTIES PREFIX "")
|
set_target_properties(${PY_LIBRARY_NAME} PROPERTIES PREFIX "")
|
||||||
set_target_properties(fastdeploy_main
|
set_target_properties(${PY_LIBRARY_NAME}
|
||||||
PROPERTIES COMPILE_FLAGS "-fvisibility=hidden")
|
PROPERTIES COMPILE_FLAGS "-fvisibility=hidden")
|
||||||
set_target_properties(fastdeploy_main PROPERTIES SUFFIX ${PY_EXT_SUFFIX})
|
set_target_properties(${PY_LIBRARY_NAME} PROPERTIES SUFFIX ${PY_EXT_SUFFIX})
|
||||||
set_target_properties(fastdeploy_main
|
set_target_properties(${PY_LIBRARY_NAME}
|
||||||
PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR})
|
PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR})
|
||||||
target_include_directories(fastdeploy_main PRIVATE
|
target_include_directories(${PY_LIBRARY_NAME} PRIVATE
|
||||||
$<BUILD_INTERFACE:${CMAKE_CURRENT_BINARY_DIR}>
|
$<BUILD_INTERFACE:${CMAKE_CURRENT_BINARY_DIR}>
|
||||||
$<INSTALL_INTERFACE:include>
|
$<INSTALL_INTERFACE:include>
|
||||||
${PYTHON_INCLUDE_DIR})
|
${PYTHON_INCLUDE_DIR})
|
||||||
|
|
||||||
target_include_directories(fastdeploy_main PUBLIC ${PROJECT_SOURCE_DIR}/third_party/pybind11/include)
|
target_include_directories(${PY_LIBRARY_NAME} PUBLIC ${PROJECT_SOURCE_DIR}/third_party/pybind11/include)
|
||||||
|
|
||||||
if(APPLE)
|
if(APPLE)
|
||||||
set_target_properties(fastdeploy_main
|
set_target_properties(${PY_LIBRARY_NAME}
|
||||||
PROPERTIES LINK_FLAGS "-undefined dynamic_lookup")
|
PROPERTIES LINK_FLAGS "-undefined dynamic_lookup")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(APPLE)
|
target_link_libraries(${PY_LIBRARY_NAME} PUBLIC ${LIBRARY_NAME})
|
||||||
target_link_libraries(fastdeploy_main PUBLIC fastdeploy)
|
|
||||||
elseif(WIN32)
|
|
||||||
target_link_libraries(fastdeploy_main PUBLIC fastdeploy)
|
|
||||||
else()
|
|
||||||
target_link_libraries(fastdeploy_main PUBLIC fastdeploy)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(MSVC)
|
if(MSVC)
|
||||||
target_link_libraries(fastdeploy_main PRIVATE ${PYTHON_LIBRARIES})
|
target_link_libraries(${PY_LIBRARY_NAME} PRIVATE ${PYTHON_LIBRARIES})
|
||||||
target_compile_options(fastdeploy_main
|
target_compile_options(${PY_LIBRARY_NAME}
|
||||||
PRIVATE /MP
|
PRIVATE /MP
|
||||||
/wd4244 # 'argument': conversion from 'google::
|
/wd4244 # 'argument': conversion from 'google::
|
||||||
# protobuf::uint64' to 'int', possible
|
# protobuf::uint64' to 'int', possible
|
||||||
@@ -285,7 +296,7 @@ if(BUILD_FASTDEPLOY_PYTHON)
|
|||||||
# possible loss of data
|
# possible loss of data
|
||||||
/wd4996 # The second parameter is ignored.
|
/wd4996 # The second parameter is ignored.
|
||||||
${EXTRA_FLAGS})
|
${EXTRA_FLAGS})
|
||||||
target_compile_options(fastdeploy_main PRIVATE $<$<NOT:$<CONFIG:Debug>>:/MT> $<$<CONFIG:Debug>:/MTd>)
|
target_compile_options(${PY_LIBRARY_NAME} PRIVATE $<$<NOT:$<CONFIG:Debug>>:/MT> $<$<CONFIG:Debug>:/MTd>)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
endif(BUILD_FASTDEPLOY_PYTHON)
|
endif(BUILD_FASTDEPLOY_PYTHON)
|
||||||
|
@@ -8,6 +8,7 @@ set(ENABLE_TRT_BACKEND @ENABLE_TRT_BACKEND@)
|
|||||||
set(ENABLE_PADDLE_FRONTEND @ENABLE_PADDLE_FRONTEND@)
|
set(ENABLE_PADDLE_FRONTEND @ENABLE_PADDLE_FRONTEND@)
|
||||||
set(ENABLE_VISION @ENABLE_VISION@)
|
set(ENABLE_VISION @ENABLE_VISION@)
|
||||||
set(ENABLE_OPENCV_CUDA @ENABLE_OPENCV_CUDA@)
|
set(ENABLE_OPENCV_CUDA @ENABLE_OPENCV_CUDA@)
|
||||||
|
set(LIBRARY_NAME @LIBRARY_NAME@)
|
||||||
|
|
||||||
set(FASTDEPLOY_LIBS "")
|
set(FASTDEPLOY_LIBS "")
|
||||||
set(FASTDEPLOY_INCS "")
|
set(FASTDEPLOY_INCS "")
|
||||||
@@ -17,7 +18,7 @@ if(NOT CMAKE_CXX_STANDARD)
|
|||||||
set(CMAKE_CXX_STANDARD 11)
|
set(CMAKE_CXX_STANDARD 11)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
find_library(FDLIB fastdeploy ${CMAKE_CURRENT_LIST_DIR}/lib)
|
find_library(FDLIB ${LIBRARY_NAME} ${CMAKE_CURRENT_LIST_DIR}/lib)
|
||||||
list(APPEND FASTDEPLOY_LIBS ${FDLIB})
|
list(APPEND FASTDEPLOY_LIBS ${FDLIB})
|
||||||
|
|
||||||
if(ENABLE_ORT_BACKEND)
|
if(ENABLE_ORT_BACKEND)
|
||||||
@@ -51,13 +52,17 @@ if(WITH_GPU)
|
|||||||
list(APPEND FASTDEPLOY_LIBS ${CUDA_LIB})
|
list(APPEND FASTDEPLOY_LIBS ${CUDA_LIB})
|
||||||
|
|
||||||
if (ENABLE_TRT_BACKEND)
|
if (ENABLE_TRT_BACKEND)
|
||||||
if (NOT TRT_DIRECTORY)
|
# if (NOT TRT_DIRECTORY)
|
||||||
message(FATAL_ERROR "[FastDeploy] Please define TRT_DIRECTORY, e.g -DTRT_DIRECTORY=/usr/downloads/TensorRT-8.4.1.0")
|
# message(FATAL_ERROR "[FastDeploy] Please define TRT_DIRECTORY, e.g -DTRT_DIRECTORY=/usr/downloads/TensorRT-8.4.1.0")
|
||||||
endif()
|
# endif()
|
||||||
find_library(TRT_INFER_LIB nvinfer ${TRT_DIRECTORY}/lib)
|
# find_library(TRT_INFER_LIB nvinfer ${TRT_DIRECTORY}/lib)
|
||||||
find_library(TRT_ONNX_LIB nvonnxparser ${TRT_DIRECTORY}/lib)
|
# find_library(TRT_ONNX_LIB nvonnxparser ${TRT_DIRECTORY}/lib)
|
||||||
find_library(TRT_CAFFE_LIB nvcaffe_parser ${TRT_DIRECTORY}/lib)
|
# find_library(TRT_CAFFE_LIB nvcaffe_parser ${TRT_DIRECTORY}/lib)
|
||||||
find_library(TRT_PLUGIN_LIB nvinfer_plugin ${TRT_DIRECTORY}/lib)
|
# find_library(TRT_PLUGIN_LIB nvinfer_plugin ${TRT_DIRECTORY}/lib)
|
||||||
|
find_library(TRT_INFER_LIB nvinfer ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/tensorrt/lib)
|
||||||
|
find_library(TRT_ONNX_LIB nvonnxparser ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/tensorrt/lib)
|
||||||
|
find_library(TRT_CAFFE_LIB nvcaffe_parser ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/tensorrt/lib)
|
||||||
|
find_library(TRT_PLUGIN_LIB nvinfer_plugin ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/tensorrt/lib)
|
||||||
list(APPEND FASTDEPLOY_LIBS ${TRT_INFER_LIB} ${TRT_ONNX_LIB} ${TRT_CAFFE_LIB} ${TRT_PLUGIN_LIB})
|
list(APPEND FASTDEPLOY_LIBS ${TRT_INFER_LIB} ${TRT_ONNX_LIB} ${TRT_CAFFE_LIB} ${TRT_PLUGIN_LIB})
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
32
copy_directory.py
Normal file
32
copy_directory.py
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import shutil
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
|
||||||
|
def copy_directory(src, dst):
|
||||||
|
if os.path.exists(dst):
|
||||||
|
raise Exception("Destination {} is already exist.".format(dst))
|
||||||
|
if not os.path.exists(src):
|
||||||
|
raise Exception("Source {} is not exist.".format(src))
|
||||||
|
try:
|
||||||
|
shutil.copytree(src, dst, symlinks=True)
|
||||||
|
except:
|
||||||
|
raise Exception("Copy {} to {} failed.".format(src, dst))
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
copy_directory(sys.argv[1], sys.argv[2])
|
@@ -81,10 +81,10 @@ void OrtBackend::BuildOption(const OrtBackendOption& option) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!support_cuda) {
|
if (!support_cuda) {
|
||||||
FDLogger() << "[WARN] Compiled fastdeploy with onnxruntime doesn't "
|
FDWARNING << "Compiled fastdeploy with onnxruntime doesn't "
|
||||||
"support GPU, the available providers are "
|
"support GPU, the available providers are "
|
||||||
<< providers_msg << "will fallback to CPUExecutionProvider."
|
<< providers_msg << "will fallback to CPUExecutionProvider."
|
||||||
<< std::endl;
|
<< std::endl;
|
||||||
option_.use_gpu = false;
|
option_.use_gpu = false;
|
||||||
} else {
|
} else {
|
||||||
FDASSERT(option.gpu_id == 0, "Requires gpu_id == 0, but now gpu_id = " +
|
FDASSERT(option.gpu_id == 0, "Requires gpu_id == 0, but now gpu_id = " +
|
||||||
|
@@ -52,6 +52,61 @@ std::vector<int> toVec(const nvinfer1::Dims& dim) {
|
|||||||
return out;
|
return out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool CheckDynamicShapeConfig(const paddle2onnx::OnnxReader& reader,
|
||||||
|
const TrtBackendOption& option) {
|
||||||
|
paddle2onnx::ModelTensorInfo inputs[reader.NumInputs()];
|
||||||
|
std::string input_shapes[reader.NumInputs()];
|
||||||
|
for (int i = 0; i < reader.NumInputs(); ++i) {
|
||||||
|
reader.GetInputInfo(i, &inputs[i]);
|
||||||
|
|
||||||
|
// change 0 to -1, when input_dim is a string, onnx will make it to zero
|
||||||
|
for (int j = 0; j < inputs[i].rank; ++j) {
|
||||||
|
if (inputs[i].shape[j] <= 0) {
|
||||||
|
inputs[i].shape[j] = -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
input_shapes[i] = "";
|
||||||
|
for (int j = 0; j < inputs[i].rank; ++j) {
|
||||||
|
if (j != inputs[i].rank - 1) {
|
||||||
|
input_shapes[i] += (std::to_string(inputs[i].shape[j]) + ", ");
|
||||||
|
} else {
|
||||||
|
input_shapes[i] += std::to_string(inputs[i].shape[j]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool all_check_passed = true;
|
||||||
|
for (int i = 0; i < reader.NumInputs(); ++i) {
|
||||||
|
bool contain_unknown_dim = false;
|
||||||
|
for (int j = 0; j < inputs[i].rank; ++j) {
|
||||||
|
if (inputs[i].shape[j] < 0) {
|
||||||
|
contain_unknown_dim = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string name(inputs[i].name, strlen(inputs[i].name));
|
||||||
|
FDINFO << "The loaded model's input tensor:" << name
|
||||||
|
<< " has shape [" + input_shapes[i] << "]." << std::endl;
|
||||||
|
if (contain_unknown_dim) {
|
||||||
|
auto iter1 = option.min_shape.find(name);
|
||||||
|
auto iter2 = option.max_shape.find(name);
|
||||||
|
auto iter3 = option.opt_shape.find(name);
|
||||||
|
if (iter1 == option.min_shape.end() || iter2 == option.max_shape.end() ||
|
||||||
|
iter3 == option.opt_shape.end()) {
|
||||||
|
FDERROR << "The loaded model's input tensor:" << name
|
||||||
|
<< " has dynamic shape [" + input_shapes[i] +
|
||||||
|
"], but didn't configure it's shape for tensorrt with "
|
||||||
|
"SetTrtInputShape correctly."
|
||||||
|
<< std::endl;
|
||||||
|
all_check_passed = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return all_check_passed;
|
||||||
|
}
|
||||||
|
|
||||||
bool TrtBackend::InitFromTrt(const std::string& trt_engine_file,
|
bool TrtBackend::InitFromTrt(const std::string& trt_engine_file,
|
||||||
const TrtBackendOption& option) {
|
const TrtBackendOption& option) {
|
||||||
if (initialized_) {
|
if (initialized_) {
|
||||||
@@ -167,13 +222,17 @@ bool TrtBackend::InitFromOnnx(const std::string& model_file,
|
|||||||
onnx_reader.output_names[i] + strlen(onnx_reader.output_names[i]));
|
onnx_reader.output_names[i] + strlen(onnx_reader.output_names[i]));
|
||||||
outputs_order_[name] = i;
|
outputs_order_[name] = i;
|
||||||
}
|
}
|
||||||
|
if (!CheckDynamicShapeConfig(onnx_reader, option)) {
|
||||||
|
FDERROR << "TrtBackend::CheckDynamicShapeConfig failed." << std::endl;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
if (option.serialize_file != "") {
|
if (option.serialize_file != "") {
|
||||||
std::ifstream fin(option.serialize_file, std::ios::binary | std::ios::in);
|
std::ifstream fin(option.serialize_file, std::ios::binary | std::ios::in);
|
||||||
if (fin) {
|
if (fin) {
|
||||||
FDLogger() << "Detect serialized TensorRT Engine file in "
|
FDINFO << "Detect serialized TensorRT Engine file in "
|
||||||
<< option.serialize_file << ", will load it directly."
|
<< option.serialize_file << ", will load it directly."
|
||||||
<< std::endl;
|
<< std::endl;
|
||||||
fin.close();
|
fin.close();
|
||||||
return InitFromTrt(option.serialize_file);
|
return InitFromTrt(option.serialize_file);
|
||||||
}
|
}
|
||||||
@@ -311,9 +370,9 @@ bool TrtBackend::CreateTrtEngine(const std::string& onnx_model,
|
|||||||
|
|
||||||
if (option.enable_fp16) {
|
if (option.enable_fp16) {
|
||||||
if (!builder->platformHasFastFp16()) {
|
if (!builder->platformHasFastFp16()) {
|
||||||
FDLogger() << "[WARN] Detected FP16 is not supported in the current GPU, "
|
FDWARNING << "Detected FP16 is not supported in the current GPU, "
|
||||||
"will use FP32 instead."
|
"will use FP32 instead."
|
||||||
<< std::endl;
|
<< std::endl;
|
||||||
} else {
|
} else {
|
||||||
config->setFlag(nvinfer1::BuilderFlag::kFP16);
|
config->setFlag(nvinfer1::BuilderFlag::kFP16);
|
||||||
}
|
}
|
||||||
@@ -330,33 +389,13 @@ bool TrtBackend::CreateTrtEngine(const std::string& onnx_model,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
FDLogger() << "Start to building TensorRT Engine..." << std::endl;
|
FDINFO << "Start to building TensorRT Engine..." << std::endl;
|
||||||
bool fp16 = builder->platformHasFastFp16();
|
bool fp16 = builder->platformHasFastFp16();
|
||||||
builder->setMaxBatchSize(option.max_batch_size);
|
builder->setMaxBatchSize(option.max_batch_size);
|
||||||
|
|
||||||
config->setMaxWorkspaceSize(option.max_workspace_size);
|
config->setMaxWorkspaceSize(option.max_workspace_size);
|
||||||
|
|
||||||
if (option.fixed_shape.size() > 0) {
|
if (option.max_shape.size() > 0) {
|
||||||
auto profile = builder->createOptimizationProfile();
|
|
||||||
for (auto& item : option.fixed_shape) {
|
|
||||||
FDASSERT(profile->setDimensions(item.first.c_str(),
|
|
||||||
nvinfer1::OptProfileSelector::kMIN,
|
|
||||||
sample::toDims(item.second)),
|
|
||||||
"[TrtBackend] Failed to set min_shape for input: " + item.first +
|
|
||||||
" in TrtBackend.");
|
|
||||||
FDASSERT(profile->setDimensions(item.first.c_str(),
|
|
||||||
nvinfer1::OptProfileSelector::kOPT,
|
|
||||||
sample::toDims(item.second)),
|
|
||||||
"[TrtBackend] Failed to set min_shape for input: " + item.first +
|
|
||||||
" in TrtBackend.");
|
|
||||||
FDASSERT(profile->setDimensions(item.first.c_str(),
|
|
||||||
nvinfer1::OptProfileSelector::kMAX,
|
|
||||||
sample::toDims(item.second)),
|
|
||||||
"[TrtBackend] Failed to set min_shape for input: " + item.first +
|
|
||||||
" in TrtBackend.");
|
|
||||||
}
|
|
||||||
config->addOptimizationProfile(profile);
|
|
||||||
} else if (option.max_shape.size() > 0) {
|
|
||||||
auto profile = builder->createOptimizationProfile();
|
auto profile = builder->createOptimizationProfile();
|
||||||
FDASSERT(option.max_shape.size() == option.min_shape.size() &&
|
FDASSERT(option.max_shape.size() == option.min_shape.size() &&
|
||||||
option.min_shape.size() == option.opt_shape.size(),
|
option.min_shape.size() == option.opt_shape.size(),
|
||||||
@@ -416,10 +455,10 @@ bool TrtBackend::CreateTrtEngine(const std::string& onnx_model,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
FDLogger() << "TensorRT Engine is built succussfully." << std::endl;
|
FDINFO << "TensorRT Engine is built succussfully." << std::endl;
|
||||||
if (option.serialize_file != "") {
|
if (option.serialize_file != "") {
|
||||||
FDLogger() << "Serialize TensorRTEngine to local file "
|
FDINFO << "Serialize TensorRTEngine to local file " << option.serialize_file
|
||||||
<< option.serialize_file << "." << std::endl;
|
<< "." << std::endl;
|
||||||
std::ofstream engine_file(option.serialize_file.c_str());
|
std::ofstream engine_file(option.serialize_file.c_str());
|
||||||
if (!engine_file) {
|
if (!engine_file) {
|
||||||
FDERROR << "Failed to open " << option.serialize_file << " to write."
|
FDERROR << "Failed to open " << option.serialize_file << " to write."
|
||||||
@@ -428,11 +467,11 @@ bool TrtBackend::CreateTrtEngine(const std::string& onnx_model,
|
|||||||
}
|
}
|
||||||
engine_file.write(static_cast<char*>(plan->data()), plan->size());
|
engine_file.write(static_cast<char*>(plan->data()), plan->size());
|
||||||
engine_file.close();
|
engine_file.close();
|
||||||
FDLogger() << "TensorRTEngine is serialized to local file "
|
FDINFO << "TensorRTEngine is serialized to local file "
|
||||||
<< option.serialize_file
|
<< option.serialize_file
|
||||||
<< ", we can load this model from the seralized engine "
|
<< ", we can load this model from the seralized engine "
|
||||||
"directly next time."
|
"directly next time."
|
||||||
<< std::endl;
|
<< std::endl;
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@@ -46,7 +46,6 @@ struct TrtBackendOption {
|
|||||||
bool enable_int8 = false;
|
bool enable_int8 = false;
|
||||||
size_t max_batch_size = 32;
|
size_t max_batch_size = 32;
|
||||||
size_t max_workspace_size = 1 << 30;
|
size_t max_workspace_size = 1 << 30;
|
||||||
std::map<std::string, std::vector<int32_t>> fixed_shape;
|
|
||||||
std::map<std::string, std::vector<int32_t>> max_shape;
|
std::map<std::string, std::vector<int32_t>> max_shape;
|
||||||
std::map<std::string, std::vector<int32_t>> min_shape;
|
std::map<std::string, std::vector<int32_t>> min_shape;
|
||||||
std::map<std::string, std::vector<int32_t>> opt_shape;
|
std::map<std::string, std::vector<int32_t>> opt_shape;
|
||||||
|
@@ -132,9 +132,9 @@ void FastDeployModel::EnableDebug() {
|
|||||||
#ifdef FASTDEPLOY_DEBUG
|
#ifdef FASTDEPLOY_DEBUG
|
||||||
debug_ = true;
|
debug_ = true;
|
||||||
#else
|
#else
|
||||||
FDLogger() << "The compile FastDeploy is not with -DENABLE_DEBUG=ON, so "
|
FDWARNING << "The compile FastDeploy is not with -DENABLE_DEBUG=ON, so "
|
||||||
"cannot enable debug mode."
|
"cannot enable debug mode."
|
||||||
<< std::endl;
|
<< std::endl;
|
||||||
debug_ = false;
|
debug_ = false;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
@@ -77,23 +77,23 @@ bool CheckModelFormat(const std::string& model_file,
|
|||||||
if (model_format == Frontend::PADDLE) {
|
if (model_format == Frontend::PADDLE) {
|
||||||
if (model_file.size() < 8 ||
|
if (model_file.size() < 8 ||
|
||||||
model_file.substr(model_file.size() - 8, 8) != ".pdmodel") {
|
model_file.substr(model_file.size() - 8, 8) != ".pdmodel") {
|
||||||
FDLogger() << "With model format of Frontend::PADDLE, the model file "
|
FDERROR << "With model format of Frontend::PADDLE, the model file "
|
||||||
"should ends with `.pdmodel`, but now it's "
|
"should ends with `.pdmodel`, but now it's "
|
||||||
<< model_file << std::endl;
|
<< model_file << std::endl;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
} else if (model_format == Frontend::ONNX) {
|
} else if (model_format == Frontend::ONNX) {
|
||||||
if (model_file.size() < 5 ||
|
if (model_file.size() < 5 ||
|
||||||
model_file.substr(model_file.size() - 5, 5) != ".onnx") {
|
model_file.substr(model_file.size() - 5, 5) != ".onnx") {
|
||||||
FDLogger() << "With model format of Frontend::ONNX, the model file "
|
FDERROR << "With model format of Frontend::ONNX, the model file "
|
||||||
"should ends with `.onnx`, but now it's "
|
"should ends with `.onnx`, but now it's "
|
||||||
<< model_file << std::endl;
|
<< model_file << std::endl;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
FDLogger() << "Only support model format with frontend Frontend::PADDLE / "
|
FDERROR << "Only support model format with frontend Frontend::PADDLE / "
|
||||||
"Frontend::ONNX."
|
"Frontend::ONNX."
|
||||||
<< std::endl;
|
<< std::endl;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
@@ -116,6 +116,101 @@ Frontend GuessModelFormat(const std::string& model_file) {
|
|||||||
return Frontend::PADDLE;
|
return Frontend::PADDLE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void RuntimeOption::SetModelPath(const std::string& model_path,
|
||||||
|
const std::string& params_path,
|
||||||
|
const std::string& _model_format) {
|
||||||
|
if (_model_format == "paddle") {
|
||||||
|
model_file = model_path;
|
||||||
|
params_file = params_path;
|
||||||
|
model_format = Frontend::PADDLE;
|
||||||
|
} else if (_model_format == "onnx") {
|
||||||
|
model_file = model_path;
|
||||||
|
model_format = Frontend::ONNX;
|
||||||
|
} else {
|
||||||
|
FDASSERT(false, "The model format only can be 'paddle' or 'onnx'.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void RuntimeOption::UseGpu(int gpu_id) {
|
||||||
|
#ifdef WITH_GPU
|
||||||
|
device = Device::GPU;
|
||||||
|
device_id = gpu_id;
|
||||||
|
#else
|
||||||
|
FDWARNING << "The FastDeploy didn't compile with GPU, will force to use CPU."
|
||||||
|
<< std::endl;
|
||||||
|
device = Device::CPU;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
void RuntimeOption::UseCpu() { device = Device::CPU; }
|
||||||
|
|
||||||
|
void RuntimeOption::SetCpuThreadNum(int thread_num) {
|
||||||
|
FDASSERT(thread_num > 0, "The thread_num must be greater than 0.");
|
||||||
|
cpu_thread_num = thread_num;
|
||||||
|
}
|
||||||
|
|
||||||
|
// use paddle inference backend
|
||||||
|
void RuntimeOption::UsePaddleBackend() {
|
||||||
|
#ifdef ENABLE_PADDLE_BACKEND
|
||||||
|
backend = Backend::PDINFER;
|
||||||
|
#else
|
||||||
|
FDASSERT(false, "The FastDeploy didn't compile with Paddle Inference.");
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
// use onnxruntime backend
|
||||||
|
void RuntimeOption::UseOrtBackend() {
|
||||||
|
#ifdef ENABLE_ORT_BACKEND
|
||||||
|
backend = Backend::ORT;
|
||||||
|
#else
|
||||||
|
FDASSERT(false, "The FastDeploy didn't compile with OrtBackend.");
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
void RuntimeOption::UseTrtBackend() {
|
||||||
|
#ifdef ENABLE_TRT_BACKEND
|
||||||
|
backend = Backend::TRT;
|
||||||
|
#else
|
||||||
|
FDASSERT(false, "The FastDeploy didn't compile with TrtBackend.");
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
void RuntimeOption::EnablePaddleMKLDNN() { pd_enable_mkldnn = true; }
|
||||||
|
|
||||||
|
void RuntimeOption::DisablePaddleMKLDNN() { pd_enable_mkldnn = false; }
|
||||||
|
|
||||||
|
void RuntimeOption::SetPaddleMKLDNNCacheSize(int size) {
|
||||||
|
FDASSERT(size > 0, "Parameter size must greater than 0.");
|
||||||
|
pd_mkldnn_cache_size = size;
|
||||||
|
}
|
||||||
|
|
||||||
|
void RuntimeOption::SetTrtInputShape(const std::string& input_name,
|
||||||
|
const std::vector<int32_t>& min_shape,
|
||||||
|
const std::vector<int32_t>& opt_shape,
|
||||||
|
const std::vector<int32_t>& max_shape) {
|
||||||
|
trt_min_shape[input_name].clear();
|
||||||
|
trt_max_shape[input_name].clear();
|
||||||
|
trt_opt_shape[input_name].clear();
|
||||||
|
trt_min_shape[input_name].assign(min_shape.begin(), min_shape.end());
|
||||||
|
if (opt_shape.size() == 0) {
|
||||||
|
trt_opt_shape[input_name].assign(min_shape.begin(), min_shape.end());
|
||||||
|
} else {
|
||||||
|
trt_opt_shape[input_name].assign(opt_shape.begin(), opt_shape.end());
|
||||||
|
}
|
||||||
|
if (max_shape.size() == 0) {
|
||||||
|
trt_max_shape[input_name].assign(min_shape.begin(), min_shape.end());
|
||||||
|
} else {
|
||||||
|
trt_max_shape[input_name].assign(max_shape.begin(), max_shape.end());
|
||||||
|
}
|
||||||
|
FDINFO << trt_min_shape[input_name].size() << " "
|
||||||
|
<< trt_opt_shape[input_name].size() << " "
|
||||||
|
<< trt_max_shape[input_name].size() << std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
void RuntimeOption::EnableTrtFP16() { trt_enable_fp16 = true; }
|
||||||
|
|
||||||
|
void RuntimeOption::DisableTrtFP16() { trt_enable_fp16 = false; }
|
||||||
|
|
||||||
bool Runtime::Init(const RuntimeOption& _option) {
|
bool Runtime::Init(const RuntimeOption& _option) {
|
||||||
option = _option;
|
option = _option;
|
||||||
if (option.model_format == Frontend::AUTOREC) {
|
if (option.model_format == Frontend::AUTOREC) {
|
||||||
@@ -229,7 +324,6 @@ void Runtime::CreateTrtBackend() {
|
|||||||
trt_option.enable_int8 = option.trt_enable_int8;
|
trt_option.enable_int8 = option.trt_enable_int8;
|
||||||
trt_option.max_batch_size = option.trt_max_batch_size;
|
trt_option.max_batch_size = option.trt_max_batch_size;
|
||||||
trt_option.max_workspace_size = option.trt_max_workspace_size;
|
trt_option.max_workspace_size = option.trt_max_workspace_size;
|
||||||
trt_option.fixed_shape = option.trt_fixed_shape;
|
|
||||||
trt_option.max_shape = option.trt_max_shape;
|
trt_option.max_shape = option.trt_max_shape;
|
||||||
trt_option.min_shape = option.trt_min_shape;
|
trt_option.min_shape = option.trt_min_shape;
|
||||||
trt_option.opt_shape = option.trt_opt_shape;
|
trt_option.opt_shape = option.trt_opt_shape;
|
||||||
|
@@ -36,8 +36,58 @@ bool CheckModelFormat(const std::string& model_file,
|
|||||||
Frontend GuessModelFormat(const std::string& model_file);
|
Frontend GuessModelFormat(const std::string& model_file);
|
||||||
|
|
||||||
struct FASTDEPLOY_DECL RuntimeOption {
|
struct FASTDEPLOY_DECL RuntimeOption {
|
||||||
Backend backend = Backend::UNKNOWN;
|
// set path of model file and params file
|
||||||
|
// for onnx, only need to define model_file, but also need to
|
||||||
|
// define model_format
|
||||||
|
// model_format support 'paddle' / 'onnx' now.
|
||||||
|
void SetModelPath(const std::string& model_path,
|
||||||
|
const std::string& params_path = "",
|
||||||
|
const std::string& _model_format = "paddle");
|
||||||
|
|
||||||
|
// set model inference in GPU
|
||||||
|
void UseCpu();
|
||||||
|
|
||||||
|
// set model inference in CPU
|
||||||
|
void UseGpu(int gpu_id = 0);
|
||||||
|
|
||||||
|
// set number of thread while inference in CPU
|
||||||
|
void SetCpuThreadNum(int thread_num);
|
||||||
|
|
||||||
|
// use paddle inference backend
|
||||||
|
void UsePaddleBackend();
|
||||||
|
|
||||||
|
// use onnxruntime backend
|
||||||
|
void UseOrtBackend();
|
||||||
|
|
||||||
|
// use tensorrt backend
|
||||||
|
void UseTrtBackend();
|
||||||
|
|
||||||
|
// enable mkldnn while use paddle inference in CPU
|
||||||
|
void EnablePaddleMKLDNN();
|
||||||
|
// disable mkldnn while use paddle inference in CPU
|
||||||
|
void DisablePaddleMKLDNN();
|
||||||
|
|
||||||
|
// set size of cached shape while enable mkldnn with paddle inference backend
|
||||||
|
void SetPaddleMKLDNNCacheSize(int size);
|
||||||
|
|
||||||
|
// set tensorrt shape while the inputs of model contain dynamic shape
|
||||||
|
// min_shape: the minimum shape
|
||||||
|
// opt_shape: the most common shape while inference, default be empty
|
||||||
|
// max_shape: the maximum shape, default be empty
|
||||||
|
|
||||||
|
// if opt_shape, max_shape are empty, they will keep same with the min_shape
|
||||||
|
// which means the shape will be fixed as min_shape while inference
|
||||||
|
void SetTrtInputShape(
|
||||||
|
const std::string& input_name, const std::vector<int32_t>& min_shape,
|
||||||
|
const std::vector<int32_t>& opt_shape = std::vector<int32_t>(),
|
||||||
|
const std::vector<int32_t>& max_shape = std::vector<int32_t>());
|
||||||
|
|
||||||
|
// enable half precision while use tensorrt backend
|
||||||
|
void EnableTrtFP16();
|
||||||
|
// disable half precision, change to full precision(float32)
|
||||||
|
void DisableTrtFP16();
|
||||||
|
|
||||||
|
Backend backend = Backend::UNKNOWN;
|
||||||
// for cpu inference and preprocess
|
// for cpu inference and preprocess
|
||||||
int cpu_thread_num = 8;
|
int cpu_thread_num = 8;
|
||||||
int device_id = 0;
|
int device_id = 0;
|
||||||
@@ -62,7 +112,6 @@ struct FASTDEPLOY_DECL RuntimeOption {
|
|||||||
int pd_mkldnn_cache_size = 1;
|
int pd_mkldnn_cache_size = 1;
|
||||||
|
|
||||||
// ======Only for Trt Backend=======
|
// ======Only for Trt Backend=======
|
||||||
std::map<std::string, std::vector<int32_t>> trt_fixed_shape;
|
|
||||||
std::map<std::string, std::vector<int32_t>> trt_max_shape;
|
std::map<std::string, std::vector<int32_t>> trt_max_shape;
|
||||||
std::map<std::string, std::vector<int32_t>> trt_min_shape;
|
std::map<std::string, std::vector<int32_t>> trt_min_shape;
|
||||||
std::map<std::string, std::vector<int32_t>> trt_opt_shape;
|
std::map<std::string, std::vector<int32_t>> trt_opt_shape;
|
||||||
|
@@ -55,27 +55,33 @@ class FastDeployModel:
|
|||||||
return self._model.initialized()
|
return self._model.initialized()
|
||||||
|
|
||||||
|
|
||||||
class FastDeployRuntime:
|
class Runtime:
|
||||||
def __init__(self, runtime_option):
|
def __init__(self, runtime_option):
|
||||||
self._runtime = C.Runtime();
|
self._runtime = C.Runtime()
|
||||||
assert self._runtime.init(runtime_option), "Initialize FastDeployRuntime Failed!"
|
assert self._runtime.init(runtime_option), "Initialize Runtime Failed!"
|
||||||
|
|
||||||
def infer(self, data):
|
def infer(self, data):
|
||||||
assert isinstance(data, dict), "The input data should be type of dict."
|
assert isinstance(data, dict), "The input data should be type of dict."
|
||||||
return self._runtime.infer(data)
|
return self._runtime.infer(data)
|
||||||
|
|
||||||
def num_inputs(self):
|
def num_inputs(self):
|
||||||
return self._runtime.num_inputs();
|
return self._runtime.num_inputs()
|
||||||
|
|
||||||
def num_outputs(self):
|
def num_outputs(self):
|
||||||
return self._runtime.num_outputs();
|
return self._runtime.num_outputs()
|
||||||
|
|
||||||
def get_input_info(self, index):
|
def get_input_info(self, index):
|
||||||
assert isinstance(index, int), "The input parameter index should be type of int."
|
assert isinstance(
|
||||||
assert index < self.num_inputs(), "The input parameter index:{} should less than number of inputs:{}.".format(index, self.num_inputs)
|
index, int), "The input parameter index should be type of int."
|
||||||
|
assert index < self.num_inputs(
|
||||||
|
), "The input parameter index:{} should less than number of inputs:{}.".format(
|
||||||
|
index, self.num_inputs)
|
||||||
return self._runtime.get_input_info(index)
|
return self._runtime.get_input_info(index)
|
||||||
|
|
||||||
def get_output_info(self, index):
|
def get_output_info(self, index):
|
||||||
assert isinstance(index, int), "The input parameter index should be type of int."
|
assert isinstance(
|
||||||
assert index < self.num_outputs(), "The input parameter index:{} should less than number of outputs:{}.".format(index, self.num_outputs)
|
index, int), "The input parameter index should be type of int."
|
||||||
|
assert index < self.num_outputs(
|
||||||
|
), "The input parameter index:{} should less than number of outputs:{}.".format(
|
||||||
|
index, self.num_outputs)
|
||||||
return self._runtime.get_output_info(index)
|
return self._runtime.get_output_info(index)
|
||||||
|
@@ -19,6 +19,20 @@ namespace fastdeploy {
|
|||||||
void BindRuntime(pybind11::module& m) {
|
void BindRuntime(pybind11::module& m) {
|
||||||
pybind11::class_<RuntimeOption>(m, "RuntimeOption")
|
pybind11::class_<RuntimeOption>(m, "RuntimeOption")
|
||||||
.def(pybind11::init())
|
.def(pybind11::init())
|
||||||
|
.def("set_model_path", &RuntimeOption::SetModelPath)
|
||||||
|
.def("use_gpu", &RuntimeOption::UseGpu)
|
||||||
|
.def("use_cpu", &RuntimeOption::UseCpu)
|
||||||
|
.def("set_cpu_thread_num", &RuntimeOption::SetCpuThreadNum)
|
||||||
|
.def("use_paddle_backend", &RuntimeOption::UsePaddleBackend)
|
||||||
|
.def("use_ort_backend", &RuntimeOption::UseOrtBackend)
|
||||||
|
.def("use_trt_backend", &RuntimeOption::UseTrtBackend)
|
||||||
|
.def("enable_paddle_mkldnn", &RuntimeOption::EnablePaddleMKLDNN)
|
||||||
|
.def("disable_paddle_mkldnn", &RuntimeOption::DisablePaddleMKLDNN)
|
||||||
|
.def("set_paddle_mkldnn_cache_size",
|
||||||
|
&RuntimeOption::SetPaddleMKLDNNCacheSize)
|
||||||
|
.def("set_trt_input_shape", &RuntimeOption::SetTrtInputShape)
|
||||||
|
.def("enable_trt_fp16", &RuntimeOption::EnableTrtFP16)
|
||||||
|
.def("disable_trt_fp16", &RuntimeOption::DisableTrtFP16)
|
||||||
.def_readwrite("model_file", &RuntimeOption::model_file)
|
.def_readwrite("model_file", &RuntimeOption::model_file)
|
||||||
.def_readwrite("params_file", &RuntimeOption::params_file)
|
.def_readwrite("params_file", &RuntimeOption::params_file)
|
||||||
.def_readwrite("model_format", &RuntimeOption::model_format)
|
.def_readwrite("model_format", &RuntimeOption::model_format)
|
||||||
@@ -30,7 +44,6 @@ void BindRuntime(pybind11::module& m) {
|
|||||||
.def_readwrite("ort_inter_op_num_threads",
|
.def_readwrite("ort_inter_op_num_threads",
|
||||||
&RuntimeOption::ort_inter_op_num_threads)
|
&RuntimeOption::ort_inter_op_num_threads)
|
||||||
.def_readwrite("ort_execution_mode", &RuntimeOption::ort_execution_mode)
|
.def_readwrite("ort_execution_mode", &RuntimeOption::ort_execution_mode)
|
||||||
.def_readwrite("trt_fixed_shape", &RuntimeOption::trt_fixed_shape)
|
|
||||||
.def_readwrite("trt_max_shape", &RuntimeOption::trt_max_shape)
|
.def_readwrite("trt_max_shape", &RuntimeOption::trt_max_shape)
|
||||||
.def_readwrite("trt_opt_shape", &RuntimeOption::trt_opt_shape)
|
.def_readwrite("trt_opt_shape", &RuntimeOption::trt_opt_shape)
|
||||||
.def_readwrite("trt_min_shape", &RuntimeOption::trt_min_shape)
|
.def_readwrite("trt_min_shape", &RuntimeOption::trt_min_shape)
|
||||||
@@ -49,41 +62,43 @@ void BindRuntime(pybind11::module& m) {
|
|||||||
pybind11::class_<Runtime>(m, "Runtime")
|
pybind11::class_<Runtime>(m, "Runtime")
|
||||||
.def(pybind11::init())
|
.def(pybind11::init())
|
||||||
.def("init", &Runtime::Init)
|
.def("init", &Runtime::Init)
|
||||||
.def("infer", [](Runtime& self,
|
.def("infer",
|
||||||
std::map<std::string, pybind11::array>& data) {
|
[](Runtime& self, std::map<std::string, pybind11::array>& data) {
|
||||||
std::vector<FDTensor> inputs(data.size());
|
std::vector<FDTensor> inputs(data.size());
|
||||||
int index = 0;
|
int index = 0;
|
||||||
for (auto iter = data.begin(); iter != data.end(); ++iter) {
|
for (auto iter = data.begin(); iter != data.end(); ++iter) {
|
||||||
inputs[index].dtype = NumpyDataTypeToFDDataType(iter->second.dtype());
|
inputs[index].dtype =
|
||||||
inputs[index].shape.insert(
|
NumpyDataTypeToFDDataType(iter->second.dtype());
|
||||||
inputs[index].shape.begin(), iter->second.shape(),
|
inputs[index].shape.insert(
|
||||||
iter->second.shape() + iter->second.ndim());
|
inputs[index].shape.begin(), iter->second.shape(),
|
||||||
// TODO(jiangjiajun) Maybe skip memory copy is a better choice
|
iter->second.shape() + iter->second.ndim());
|
||||||
// use SetExternalData
|
// TODO(jiangjiajun) Maybe skip memory copy is a better choice
|
||||||
inputs[index].data.resize(iter->second.nbytes());
|
// use SetExternalData
|
||||||
memcpy(inputs[index].data.data(), iter->second.mutable_data(),
|
inputs[index].data.resize(iter->second.nbytes());
|
||||||
iter->second.nbytes());
|
memcpy(inputs[index].data.data(), iter->second.mutable_data(),
|
||||||
inputs[index].name = iter->first;
|
iter->second.nbytes());
|
||||||
}
|
inputs[index].name = iter->first;
|
||||||
|
}
|
||||||
|
|
||||||
std::vector<FDTensor> outputs(self.NumOutputs());
|
std::vector<FDTensor> outputs(self.NumOutputs());
|
||||||
self.Infer(inputs, &outputs);
|
self.Infer(inputs, &outputs);
|
||||||
|
|
||||||
std::vector<pybind11::array> results;
|
std::vector<pybind11::array> results;
|
||||||
results.reserve(outputs.size());
|
results.reserve(outputs.size());
|
||||||
for (size_t i = 0; i < outputs.size(); ++i) {
|
for (size_t i = 0; i < outputs.size(); ++i) {
|
||||||
auto numpy_dtype = FDDataTypeToNumpyDataType(outputs[i].dtype);
|
auto numpy_dtype = FDDataTypeToNumpyDataType(outputs[i].dtype);
|
||||||
results.emplace_back(pybind11::array(numpy_dtype, outputs[i].shape));
|
results.emplace_back(
|
||||||
memcpy(results[i].mutable_data(), outputs[i].data.data(),
|
pybind11::array(numpy_dtype, outputs[i].shape));
|
||||||
outputs[i].Numel() * FDDataTypeSize(outputs[i].dtype));
|
memcpy(results[i].mutable_data(), outputs[i].data.data(),
|
||||||
}
|
outputs[i].Numel() * FDDataTypeSize(outputs[i].dtype));
|
||||||
return results;
|
}
|
||||||
})
|
return results;
|
||||||
.def("num_inputs", &Runtime::NumInputs)
|
})
|
||||||
.def("num_outputs", &Runtime::NumOutputs)
|
.def("num_inputs", &Runtime::NumInputs)
|
||||||
.def("get_input_info", &Runtime::GetInputInfo)
|
.def("num_outputs", &Runtime::NumOutputs)
|
||||||
.def("get_output_info", &Runtime::GetOutputInfo)
|
.def("get_input_info", &Runtime::GetInputInfo)
|
||||||
.def_readonly("option", &Runtime::option);
|
.def("get_output_info", &Runtime::GetOutputInfo)
|
||||||
|
.def_readonly("option", &Runtime::option);
|
||||||
|
|
||||||
pybind11::enum_<Backend>(m, "Backend", pybind11::arithmetic(),
|
pybind11::enum_<Backend>(m, "Backend", pybind11::arithmetic(),
|
||||||
"Backend for inference.")
|
"Backend for inference.")
|
||||||
|
@@ -31,4 +31,4 @@ FDLogger& FDLogger::operator<<(std::ostream& (*os)(std::ostream&)) {
|
|||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace fastdeploy
|
} // namespace fastdeploy
|
||||||
|
@@ -69,13 +69,17 @@ class FASTDEPLOY_DECL FDLogger {
|
|||||||
#define __REL_FILE__ __FILE__
|
#define __REL_FILE__ __FILE__
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define FDERROR \
|
#define FDERROR \
|
||||||
FDLogger(true, "[ERROR]") \
|
FDLogger(true, "[ERROR]") \
|
||||||
<< __REL_FILE__ << "(" << __LINE__ << ")::" << __FUNCTION__ << "\t"
|
<< __REL_FILE__ << "(" << __LINE__ << ")::" << __FUNCTION__ << "\t"
|
||||||
|
|
||||||
#define FDERROR \
|
#define FDWARNING \
|
||||||
FDLogger(true, "[ERROR]") << __REL_FILE__ << "(" << __LINE__ \
|
FDLogger(true, "[WARNING]") \
|
||||||
<< ")::" << __FUNCTION__ << "\t"
|
<< __REL_FILE__ << "(" << __LINE__ << ")::" << __FUNCTION__ << "\t"
|
||||||
|
|
||||||
|
#define FDINFO \
|
||||||
|
FDLogger(true, "[INFO]") << __REL_FILE__ << "(" << __LINE__ \
|
||||||
|
<< ")::" << __FUNCTION__ << "\t"
|
||||||
|
|
||||||
#define FDASSERT(condition, message) \
|
#define FDASSERT(condition, message) \
|
||||||
if (!(condition)) { \
|
if (!(condition)) { \
|
||||||
|
@@ -29,9 +29,8 @@ bool Cast::CpuRun(Mat* mat) {
|
|||||||
im->convertTo(*im, CV_64FC(c));
|
im->convertTo(*im, CV_64FC(c));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
FDLogger() << "[WARN] Cast not support for " << dtype_
|
FDWARNING << "Cast not support for " << dtype_
|
||||||
<< " now! will skip this operation."
|
<< " now! will skip this operation." << std::endl;
|
||||||
<< std::endl;
|
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@@ -49,9 +48,8 @@ bool Cast::GpuRun(Mat* mat) {
|
|||||||
im->convertTo(*im, CV_64FC(c));
|
im->convertTo(*im, CV_64FC(c));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
FDLogger() << "[WARN] Cast not support for " << dtype_
|
FDWARNING << "Cast not support for " << dtype_
|
||||||
<< " now! will skip this operation."
|
<< " now! will skip this operation." << std::endl;
|
||||||
<< std::endl;
|
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@@ -62,5 +60,5 @@ bool Cast::Run(Mat* mat, const std::string& dtype, ProcLib lib) {
|
|||||||
return c(mat, lib);
|
return c(mat, lib);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace vision
|
} // namespace vision
|
||||||
} // namespace fastdeploy
|
} // namespace fastdeploy
|
||||||
|
@@ -5,7 +5,7 @@ CMAKE_MINIMUM_REQUIRED (VERSION 3.16)
|
|||||||
# add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0)
|
# add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0)
|
||||||
|
|
||||||
# 指定下载解压后的fastdeploy库路径
|
# 指定下载解压后的fastdeploy库路径
|
||||||
set(FASTDEPLOY_INSTALL_DIR ${PROJECT_SOURCE_DIR}/fastdeploy-linux-x64-0.3.0/)
|
set(FASTDEPLOY_INSTALL_DIR /fastdeploy/CustomOp/FastDeploy/build1/fastdeploy-linux-x64-gpu-0.3.0)
|
||||||
|
|
||||||
include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
|
include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
|
||||||
|
|
||||||
|
@@ -23,11 +23,7 @@ int main() {
|
|||||||
std::string img_path = "000000014439_640x640.jpg";
|
std::string img_path = "000000014439_640x640.jpg";
|
||||||
std::string vis_path = "vis.jpeg";
|
std::string vis_path = "vis.jpeg";
|
||||||
|
|
||||||
auto option = fastdeploy::RuntimeOption();
|
auto model = vis::ppdet::PPYOLOE(model_file, params_file, config_file);
|
||||||
option.device = fastdeploy::Device::CPU;
|
|
||||||
option.backend = fastdeploy::Backend::PDINFER;
|
|
||||||
auto model =
|
|
||||||
vis::ppdet::PPYOLOE(model_file, params_file, config_file, option);
|
|
||||||
if (!model.Initialized()) {
|
if (!model.Initialized()) {
|
||||||
std::cerr << "Init Failed." << std::endl;
|
std::cerr << "Init Failed." << std::endl;
|
||||||
return -1;
|
return -1;
|
||||||
|
@@ -5,7 +5,8 @@ CMAKE_MINIMUM_REQUIRED (VERSION 3.16)
|
|||||||
# add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0)
|
# add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0)
|
||||||
|
|
||||||
# 指定下载解压后的fastdeploy库路径
|
# 指定下载解压后的fastdeploy库路径
|
||||||
set(FASTDEPLOY_INSTALL_DIR ${PROJECT_SOURCE_DIR}/fastdeploy-linux-x64-0.3.0/)
|
set(FASTDEPLOY_INSTALL_DIR /fastdeploy/CustomOp/FastDeploy/build1/fastdeploy-linux-x64-gpu-0.3.0)
|
||||||
|
|
||||||
|
|
||||||
include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
|
include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
|
||||||
|
|
||||||
|
@@ -5,7 +5,7 @@ CMAKE_MINIMUM_REQUIRED (VERSION 3.16)
|
|||||||
# add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0)
|
# add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0)
|
||||||
|
|
||||||
# 指定下载解压后的fastdeploy库路径
|
# 指定下载解压后的fastdeploy库路径
|
||||||
set(FASTDEPLOY_INSTALL_DIR ${PROJECT_SOURCE_DIR}/fastdeploy-linux-x64-0.0.3/)
|
set(FASTDEPLOY_INSTALL_DIR /fastdeploy/CustomOp/FastDeploy/build1/fastdeploy-linux-x64-gpu-0.3.0)
|
||||||
|
|
||||||
include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
|
include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
|
||||||
|
|
||||||
|
@@ -20,4 +20,3 @@ cv2.imwrite("vis_result.jpg", im)
|
|||||||
|
|
||||||
# 输出预测结果
|
# 输出预测结果
|
||||||
print(result)
|
print(result)
|
||||||
print(model.runtime_option)
|
|
||||||
|
29
setup.py
29
setup.py
@@ -126,7 +126,7 @@ class ONNXCommand(setuptools.Command):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
def GetAllFiles(dirname):
|
def get_all_files(dirname):
|
||||||
files = list()
|
files = list()
|
||||||
for root, dirs, filenames in os.walk(dirname):
|
for root, dirs, filenames in os.walk(dirname):
|
||||||
for f in filenames:
|
for f in filenames:
|
||||||
@@ -353,23 +353,22 @@ if sys.argv[1] == "install" or sys.argv[1] == "bdist_wheel":
|
|||||||
|
|
||||||
if os.path.exists("fastdeploy/libs/third_libs"):
|
if os.path.exists("fastdeploy/libs/third_libs"):
|
||||||
shutil.rmtree("fastdeploy/libs/third_libs")
|
shutil.rmtree("fastdeploy/libs/third_libs")
|
||||||
shutil.copytree(
|
# shutil.copytree(
|
||||||
".setuptools-cmake-build/third_libs/install",
|
# ".setuptools-cmake-build/third_libs/install",
|
||||||
"fastdeploy/libs/third_libs",
|
# "fastdeploy/libs/third_libs",
|
||||||
symlinks=True)
|
# symlinks=True)
|
||||||
|
|
||||||
all_files = GetAllFiles("fastdeploy/libs")
|
|
||||||
for f in all_files:
|
|
||||||
package_data[PACKAGE_NAME].append(os.path.relpath(f, "fastdeploy"))
|
|
||||||
|
|
||||||
if platform.system().lower() == "linux":
|
if platform.system().lower() == "linux":
|
||||||
rpaths = ["${ORIGIN}"]
|
rpaths = ["${ORIGIN}"]
|
||||||
for root, dirs, files in os.walk("fastdeploy/libs/third_libs"):
|
for root, dirs, files in os.walk(
|
||||||
|
".setuptools-cmake-build/third_libs/install"):
|
||||||
for d in dirs:
|
for d in dirs:
|
||||||
if d == "lib":
|
if d == "lib":
|
||||||
path = os.path.relpath(
|
path = os.path.relpath(
|
||||||
os.path.join(root, d), "fastdeploy/libs")
|
os.path.join(root, d),
|
||||||
rpaths.append("${ORIGIN}/" + format(path))
|
".setuptools-cmake-build/third_libs/install")
|
||||||
|
rpaths.append("${ORIGIN}/" + os.path.join(
|
||||||
|
"libs/third_libs", path))
|
||||||
rpaths = ":".join(rpaths)
|
rpaths = ":".join(rpaths)
|
||||||
command = "patchelf --set-rpath '{}' ".format(rpaths) + os.path.join(
|
command = "patchelf --set-rpath '{}' ".format(rpaths) + os.path.join(
|
||||||
"fastdeploy/libs", pybind_so_file)
|
"fastdeploy/libs", pybind_so_file)
|
||||||
@@ -379,6 +378,12 @@ if sys.argv[1] == "install" or sys.argv[1] == "bdist_wheel":
|
|||||||
command) == 0, "patchelf {} failed, the command: {}".format(
|
command) == 0, "patchelf {} failed, the command: {}".format(
|
||||||
command, pybind_so_file)
|
command, pybind_so_file)
|
||||||
|
|
||||||
|
all_files = get_all_files("fastdeploy/libs")
|
||||||
|
for f in all_files:
|
||||||
|
if f.count("third_libs") > 0:
|
||||||
|
continue
|
||||||
|
package_data[PACKAGE_NAME].append(os.path.relpath(f, "fastdeploy"))
|
||||||
|
|
||||||
setuptools.setup(
|
setuptools.setup(
|
||||||
name=PACKAGE_NAME,
|
name=PACKAGE_NAME,
|
||||||
version=VersionInfo.version,
|
version=VersionInfo.version,
|
||||||
|
Reference in New Issue
Block a user