Merge remote-tracking branch 'upstream/develop' into doc

This commit is contained in:
felixhjh
2023-02-15 07:21:05 +00:00
30 changed files with 1741 additions and 79 deletions

View File

@@ -74,7 +74,7 @@ option(WITH_TIMVX "Whether to compile for TIMVX deploy." OFF)
option(WITH_KUNLUNXIN "Whether to compile for KunlunXin XPU deploy." OFF)
option(WITH_TESTING "Whether to compile with unittest." OFF)
option(WITH_CAPI "Whether to compile with c api." OFF)
option(WITH_CSHARPAPI "Whether to compile with c# api" OFF)
############################# Options for Android cross compiling #########################
if(ANDROID)
option(WITH_OPENCV_STATIC "Whether to use OpenCV static lib for Android." OFF)
@@ -424,8 +424,15 @@ if(WITH_CAPI)
endif()
endif()
if(WITH_CSHARPAPI)
if(MSVC)
add_subdirectory(${PROJECT_SOURCE_DIR}/csharp)
endif()
endif()
configure_file(${PROJECT_SOURCE_DIR}/FastDeploy.cmake.in ${PROJECT_SOURCE_DIR}/FastDeploy.cmake @ONLY)
configure_file(${PROJECT_SOURCE_DIR}/FastDeployCSharp.cmake.in ${PROJECT_SOURCE_DIR}/FastDeployCSharp.cmake @ONLY)
configure_file(${PROJECT_SOURCE_DIR}/python/fastdeploy/c_lib_wrap.py.in ${PROJECT_SOURCE_DIR}/python/fastdeploy/c_lib_wrap.py)
configure_file(${PROJECT_SOURCE_DIR}/python/scripts/process_libraries.py.in ${PROJECT_SOURCE_DIR}/python/scripts/process_libraries.py)
@@ -678,6 +685,7 @@ install(
${PROJECT_SOURCE_DIR}/ThirdPartyNotices.txt
${PROJECT_SOURCE_DIR}/VERSION_NUMBER
${PROJECT_SOURCE_DIR}/FastDeploy.cmake
${PROJECT_SOURCE_DIR}/FastDeployCSharp.cmake
${PROJECT_SOURCE_DIR}/cmake/FastDeployConfig.cmake
${PROJECT_SOURCE_DIR}/cmake/utils.cmake
${PROJECT_SOURCE_DIR}/cmake/openmp.cmake

14
FastDeployCSharp.cmake Normal file
View File

@@ -0,0 +1,14 @@
list(APPEND FASTDEPLOY_DOTNET_REFERENCES
"Microsoft.CSharp"
"System"
"System.Core"
"System.Data"
"System.Deployment"
"System.Drawing"
"System.Net.Http"
"System.Xml"
"System.Reflection"
"${CMAKE_CURRENT_LIST_DIR}/csharp_lib/fastdeploy_csharp.dll")
set(FASTDEPLOY_PACKAGE_REFERENCES "OpenCvSharp4_4.7.0.20230115;OpenCvSharp4.runtime.win_4.7.0.20230115")

13
FastDeployCSharp.cmake.in Normal file
View File

@@ -0,0 +1,13 @@
list(APPEND FASTDEPLOY_DOTNET_REFERENCES
"Microsoft.CSharp"
"System"
"System.Core"
"System.Data"
"System.Deployment"
"System.Drawing"
"System.Net.Http"
"System.Xml"
"System.Reflection"
"${CMAKE_CURRENT_LIST_DIR}/csharp_lib/fastdeploy_csharp.dll")
set(FASTDEPLOY_PACKAGE_REFERENCES "OpenCvSharp4_4.7.0.20230115;OpenCvSharp4.runtime.win_4.7.0.20230115")

58
csharp/CMakeLists.txt Normal file
View File

@@ -0,0 +1,58 @@
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##################################### Building: FastDeploy C# API #######################################
PROJECT(fastdeploy_csharp CSharp)
CMAKE_MINIMUM_REQUIRED (VERSION 3.10)
option(ENABLE_VISION "Whether to enable vision models usage." OFF)
message("fastdeploy_csharp_SOURCE_DIR: ${fastdeploy_csharp_SOURCE_DIR}")
file(GLOB_RECURSE DEPLOY_CSHARPAPI_SRCS ${fastdeploy_csharp_SOURCE_DIR}/fastdeploy/*.cs)
if(NOT ENABLE_VISION)
file(GLOB_RECURSE DEPLOY_VISION_CSHARPAPI_SRCS ${fastdeploy_csharp_SOURCE_DIR}/fastdeploy/vision/*.cs)
list(REMOVE_ITEM DEPLOY_CSHARPAPI_SRCS ${DEPLOY_VISION_CSHARPAPI_SRCS})
endif()
# Define the DLL target, including all relevant project files.
add_library(${PROJECT_NAME} SHARED ${DEPLOY_CSHARPAPI_SRCS})
# Set the C# language version (defaults to 3.0 if not set).
set(CMAKE_CSharp_FLAGS "/langversion:10")
# Add in some .NET reference libraries.
set_property(TARGET ${PROJECT_NAME} PROPERTY VS_DOTNET_REFERENCES
"Microsoft.CSharp"
"System"
"System.Core"
"System.Data"
"System.Deployment"
"System.Drawing"
"System.Net.Http"
"System.Xml"
)
set_property(TARGET ${PROJECT_NAME}
PROPERTY VS_PACKAGE_REFERENCES "OpenCvSharp4_4.7.0.20230115"
)
##################################### Installing: FastDeploy C# API #######################################
install(
TARGETS ${PROJECT_NAME}
LIBRARY DESTINATION csharp_lib
ARCHIVE DESTINATION csharp_lib
RUNTIME DESTINATION csharp_lib
)

View File

@@ -0,0 +1,53 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
namespace fastdeploy {
public enum ModelFormat {
AUTOREC, ///< Auto recognize the model format by model file name
PADDLE, ///< Model with paddlepaddle format
ONNX, ///< Model with ONNX format
RKNN, ///< Model with RKNN format
TORCHSCRIPT, ///< Model with TorchScript format
SOPHGO, ///< Model with SOPHGO format
}
public enum rknpu2_CpuName {
RK356X = 0, /* run on RK356X. */
RK3588 = 1, /* default,run on RK3588. */
UNDEFINED,
}
public enum rknpu2_CoreMask {
RKNN_NPU_CORE_AUTO = 0, //< default, run on NPU core randomly.
RKNN_NPU_CORE_0 = 1, //< run on NPU core 0.
RKNN_NPU_CORE_1 = 2, //< run on NPU core 1.
RKNN_NPU_CORE_2 = 4, //< run on NPU core 2.
RKNN_NPU_CORE_0_1 =
RKNN_NPU_CORE_0 | RKNN_NPU_CORE_1, //< run on NPU core 1 and core 2.
RKNN_NPU_CORE_0_1_2 =
RKNN_NPU_CORE_0_1 | RKNN_NPU_CORE_2, //< run on NPU core 1 and core 2.
RKNN_NPU_CORE_UNDEFINED,
}
public enum LitePowerMode {
LITE_POWER_HIGH = 0, ///< Use Lite Backend with high power mode
LITE_POWER_LOW = 1, ///< Use Lite Backend with low power mode
LITE_POWER_FULL = 2, ///< Use Lite Backend with full power mode
LITE_POWER_NO_BIND = 3, ///< Use Lite Backend with no bind power mode
LITE_POWER_RAND_HIGH = 4, ///< Use Lite Backend with rand high mode
LITE_POWER_RAND_LOW = 5 ///< Use Lite Backend with rand low power mode
}
}

View File

@@ -0,0 +1,541 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
using System;
using System.IO;
using System.Runtime.InteropServices;
namespace fastdeploy {
public class RuntimeOption {
public RuntimeOption() {
fd_runtime_option_wrapper = FD_C_CreateRuntimeOptionWrapper();
}
~RuntimeOption() {
FD_C_DestroyRuntimeOptionWrapper(fd_runtime_option_wrapper);
}
public void SetModelPath(string model_path, string params_path = "",
ModelFormat format = ModelFormat.PADDLE) {
FD_C_RuntimeOptionWrapperSetModelPath(fd_runtime_option_wrapper, model_path,
params_path, format);
}
public void SetModelBuffer(string model_buffer, string params_buffer = "",
ModelFormat format = ModelFormat.PADDLE) {
FD_C_RuntimeOptionWrapperSetModelBuffer(
fd_runtime_option_wrapper, model_buffer, params_buffer, format);
}
public void UseCpu() {
FD_C_RuntimeOptionWrapperUseCpu(fd_runtime_option_wrapper);
}
public void UseGpu(int gpu_id = 0) {
FD_C_RuntimeOptionWrapperUseGpu(fd_runtime_option_wrapper, gpu_id);
}
public void
UseRKNPU2(rknpu2_CpuName rknpu2_name = rknpu2_CpuName.RK3588,
rknpu2_CoreMask rknpu2_core = rknpu2_CoreMask.RKNN_NPU_CORE_0) {
FD_C_RuntimeOptionWrapperUseRKNPU2(fd_runtime_option_wrapper, rknpu2_name,
rknpu2_core);
}
public void UseTimVX() {
FD_C_RuntimeOptionWrapperUseTimVX(fd_runtime_option_wrapper);
}
public void UseAscend() {
FD_C_RuntimeOptionWrapperUseAscend(fd_runtime_option_wrapper);
}
public void
UseKunlunXin(int kunlunxin_id = 0, int l3_workspace_size = 0xfffc00,
bool locked = false, bool autotune = true,
string autotune_file = "", string precision = "int16",
bool adaptive_seqlen = false, bool enable_multi_stream = false) {
FD_C_RuntimeOptionWrapperUseKunlunXin(
fd_runtime_option_wrapper, kunlunxin_id, l3_workspace_size, locked,
autotune, autotune_file, precision, adaptive_seqlen,
enable_multi_stream);
}
public void UseSophgo() {
FD_C_RuntimeOptionWrapperUseSophgo(fd_runtime_option_wrapper);
}
public void SetExternalStream(IntPtr external_stream) {
FD_C_RuntimeOptionWrapperSetExternalStream(fd_runtime_option_wrapper,
external_stream);
}
public void SetCpuThreadNum(int thread_num) {
FD_C_RuntimeOptionWrapperSetCpuThreadNum(fd_runtime_option_wrapper,
thread_num);
}
public void SetOrtGraphOptLevel(int level = -1) {
FD_C_RuntimeOptionWrapperSetOrtGraphOptLevel(fd_runtime_option_wrapper,
level);
}
public void UsePaddleBackend() {
FD_C_RuntimeOptionWrapperUsePaddleBackend(fd_runtime_option_wrapper);
}
public void UsePaddleInferBackend() {
FD_C_RuntimeOptionWrapperUsePaddleInferBackend(fd_runtime_option_wrapper);
}
public void UseOrtBackend() {
FD_C_RuntimeOptionWrapperUseOrtBackend(fd_runtime_option_wrapper);
}
public void UseSophgoBackend() {
FD_C_RuntimeOptionWrapperUseSophgoBackend(fd_runtime_option_wrapper);
}
public void UseTrtBackend() {
FD_C_RuntimeOptionWrapperUseTrtBackend(fd_runtime_option_wrapper);
}
public void UsePorosBackend() {
FD_C_RuntimeOptionWrapperUsePorosBackend(fd_runtime_option_wrapper);
}
public void UseOpenVINOBackend() {
FD_C_RuntimeOptionWrapperUseOpenVINOBackend(fd_runtime_option_wrapper);
}
public void UseLiteBackend() {
FD_C_RuntimeOptionWrapperUseLiteBackend(fd_runtime_option_wrapper);
}
public void UsePaddleLiteBackend() {
FD_C_RuntimeOptionWrapperUsePaddleLiteBackend(fd_runtime_option_wrapper);
}
public void SetPaddleMKLDNN(bool pd_mkldnn = true) {
FD_C_RuntimeOptionWrapperSetPaddleMKLDNN(fd_runtime_option_wrapper,
pd_mkldnn);
}
public void EnablePaddleToTrt() {
FD_C_RuntimeOptionWrapperEnablePaddleToTrt(fd_runtime_option_wrapper);
}
public void DeletePaddleBackendPass(string delete_pass_name) {
FD_C_RuntimeOptionWrapperDeletePaddleBackendPass(fd_runtime_option_wrapper,
delete_pass_name);
}
public void EnablePaddleLogInfo() {
FD_C_RuntimeOptionWrapperEnablePaddleLogInfo(fd_runtime_option_wrapper);
}
public void DisablePaddleLogInfo() {
FD_C_RuntimeOptionWrapperDisablePaddleLogInfo(fd_runtime_option_wrapper);
}
public void SetPaddleMKLDNNCacheSize(int size) {
FD_C_RuntimeOptionWrapperSetPaddleMKLDNNCacheSize(fd_runtime_option_wrapper,
size);
}
public void SetOpenVINODevice(string name = "CPU") {
FD_C_RuntimeOptionWrapperSetOpenVINODevice(fd_runtime_option_wrapper, name);
}
public void SetLiteOptimizedModelDir(string optimized_model_dir) {
FD_C_RuntimeOptionWrapperSetLiteOptimizedModelDir(fd_runtime_option_wrapper,
optimized_model_dir);
}
public void SetLiteSubgraphPartitionPath(
string nnadapter_subgraph_partition_config_path) {
FD_C_RuntimeOptionWrapperSetLiteSubgraphPartitionPath(
fd_runtime_option_wrapper, nnadapter_subgraph_partition_config_path);
}
public void SetLiteSubgraphPartitionConfigBuffer(
string nnadapter_subgraph_partition_config_buffer) {
FD_C_RuntimeOptionWrapperSetLiteSubgraphPartitionConfigBuffer(
fd_runtime_option_wrapper, nnadapter_subgraph_partition_config_buffer);
}
public void SetLiteContextProperties(string nnadapter_context_properties) {
FD_C_RuntimeOptionWrapperSetLiteContextProperties(
fd_runtime_option_wrapper, nnadapter_context_properties);
}
public void SetLiteModelCacheDir(string nnadapter_model_cache_dir) {
FD_C_RuntimeOptionWrapperSetLiteModelCacheDir(fd_runtime_option_wrapper,
nnadapter_model_cache_dir);
}
public void SetLiteMixedPrecisionQuantizationConfigPath(
string nnadapter_mixed_precision_quantization_config_path) {
FD_C_RuntimeOptionWrapperSetLiteMixedPrecisionQuantizationConfigPath(
fd_runtime_option_wrapper,
nnadapter_mixed_precision_quantization_config_path);
}
public void EnableLiteFP16() {
FD_C_RuntimeOptionWrapperEnableLiteFP16(fd_runtime_option_wrapper);
}
public void DisableLiteFP16() {
FD_C_RuntimeOptionWrapperDisableLiteFP16(fd_runtime_option_wrapper);
}
public void EnableLiteInt8() {
FD_C_RuntimeOptionWrapperEnableLiteInt8(fd_runtime_option_wrapper);
}
public void DisableLiteInt8() {
FD_C_RuntimeOptionWrapperDisableLiteInt8(fd_runtime_option_wrapper);
}
public void SetLitePowerMode(LitePowerMode mode) {
FD_C_RuntimeOptionWrapperSetLitePowerMode(fd_runtime_option_wrapper, mode);
}
public void EnableTrtFP16() {
FD_C_RuntimeOptionWrapperEnableTrtFP16(fd_runtime_option_wrapper);
}
public void DisableTrtFP16() {
FD_C_RuntimeOptionWrapperDisableTrtFP16(fd_runtime_option_wrapper);
}
public void SetTrtCacheFile(string cache_file_path) {
FD_C_RuntimeOptionWrapperSetTrtCacheFile(fd_runtime_option_wrapper,
cache_file_path);
}
public void EnablePinnedMemory() {
FD_C_RuntimeOptionWrapperEnablePinnedMemory(fd_runtime_option_wrapper);
}
public void DisablePinnedMemory() {
FD_C_RuntimeOptionWrapperDisablePinnedMemory(fd_runtime_option_wrapper);
}
public void EnablePaddleTrtCollectShape() {
FD_C_RuntimeOptionWrapperEnablePaddleTrtCollectShape(
fd_runtime_option_wrapper);
}
public void DisablePaddleTrtCollectShape() {
FD_C_RuntimeOptionWrapperDisablePaddleTrtCollectShape(
fd_runtime_option_wrapper);
}
public void SetOpenVINOStreams(int num_streams) {
FD_C_RuntimeOptionWrapperSetOpenVINOStreams(fd_runtime_option_wrapper,
num_streams);
}
public void UseIpu(int device_num = 1, int micro_batch_size = 1,
bool enable_pipelining = false, int batches_per_step = 1) {
FD_C_RuntimeOptionWrapperUseIpu(fd_runtime_option_wrapper, device_num,
micro_batch_size, enable_pipelining,
batches_per_step);
}
public IntPtr GetWrapperPtr() { return fd_runtime_option_wrapper; }
// Below are underlying C api
private IntPtr fd_runtime_option_wrapper;
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreateRuntimeOptionWrapper")]
private static extern IntPtr FD_C_CreateRuntimeOptionWrapper();
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_DestroyRuntimeOptionWrapper")]
private static extern void
FD_C_DestroyRuntimeOptionWrapper(IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperSetModelPath")]
private static extern void
FD_C_RuntimeOptionWrapperSetModelPath(IntPtr fd_runtime_option_wrapper,
string model_path, string params_path,
ModelFormat format);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperSetModelBuffer")]
private static extern void FD_C_RuntimeOptionWrapperSetModelBuffer(
IntPtr fd_runtime_option_wrapper, string model_buffer,
string params_buffer, ModelFormat format);
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_RuntimeOptionWrapperUseCpu")]
private static extern void
FD_C_RuntimeOptionWrapperUseCpu(IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_RuntimeOptionWrapperUseGpu")]
private static extern void
FD_C_RuntimeOptionWrapperUseGpu(IntPtr fd_runtime_option_wrapper, int gpu_id);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperUseRKNPU2")]
private static extern void
FD_C_RuntimeOptionWrapperUseRKNPU2(IntPtr fd_runtime_option_wrapper,
rknpu2_CpuName rknpu2_name,
rknpu2_CoreMask rknpu2_core);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperUseTimVX")]
private static extern void
FD_C_RuntimeOptionWrapperUseTimVX(IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperUseAscend")]
private static extern void
FD_C_RuntimeOptionWrapperUseAscend(IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperUseKunlunXin")]
private static extern void FD_C_RuntimeOptionWrapperUseKunlunXin(
IntPtr fd_runtime_option_wrapper, int kunlunxin_id, int l3_workspace_size,
bool locked, bool autotune, string autotune_file, string precision,
bool adaptive_seqlen, bool enable_multi_stream);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperUseSophgo")]
private static extern void
FD_C_RuntimeOptionWrapperUseSophgo(IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperSetExternalStream")]
private static extern void
FD_C_RuntimeOptionWrapperSetExternalStream(IntPtr fd_runtime_option_wrapper,
IntPtr external_stream);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperSetCpuThreadNum")]
private static extern void
FD_C_RuntimeOptionWrapperSetCpuThreadNum(IntPtr fd_runtime_option_wrapper,
int thread_num);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperSetOrtGraphOptLevel")]
private static extern void
FD_C_RuntimeOptionWrapperSetOrtGraphOptLevel(IntPtr fd_runtime_option_wrapper,
int level);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperUsePaddleBackend")]
private static extern void
FD_C_RuntimeOptionWrapperUsePaddleBackend(IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperUsePaddleInferBackend")]
private static extern void FD_C_RuntimeOptionWrapperUsePaddleInferBackend(
IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperUseOrtBackend")]
private static extern void
FD_C_RuntimeOptionWrapperUseOrtBackend(IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperUseSophgoBackend")]
private static extern void
FD_C_RuntimeOptionWrapperUseSophgoBackend(IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperUseTrtBackend")]
private static extern void
FD_C_RuntimeOptionWrapperUseTrtBackend(IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperUsePorosBackend")]
private static extern void
FD_C_RuntimeOptionWrapperUsePorosBackend(IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperUseOpenVINOBackend")]
private static extern void
FD_C_RuntimeOptionWrapperUseOpenVINOBackend(IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperUseLiteBackend")]
private static extern void
FD_C_RuntimeOptionWrapperUseLiteBackend(IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperUsePaddleLiteBackend")]
private static extern void FD_C_RuntimeOptionWrapperUsePaddleLiteBackend(
IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperSetPaddleMKLDNN")]
private static extern void
FD_C_RuntimeOptionWrapperSetPaddleMKLDNN(IntPtr fd_runtime_option_wrapper,
bool pd_mkldnn);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperEnablePaddleToTrt")]
private static extern void
FD_C_RuntimeOptionWrapperEnablePaddleToTrt(IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperDeletePaddleBackendPass")]
private static extern void FD_C_RuntimeOptionWrapperDeletePaddleBackendPass(
IntPtr fd_runtime_option_wrapper, string delete_pass_name);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperEnablePaddleLogInfo")]
private static extern void FD_C_RuntimeOptionWrapperEnablePaddleLogInfo(
IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperDisablePaddleLogInfo")]
private static extern void FD_C_RuntimeOptionWrapperDisablePaddleLogInfo(
IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperSetPaddleMKLDNNCacheSize")]
private static extern void FD_C_RuntimeOptionWrapperSetPaddleMKLDNNCacheSize(
IntPtr fd_runtime_option_wrapper, int size);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperSetOpenVINODevice")]
private static extern void
FD_C_RuntimeOptionWrapperSetOpenVINODevice(IntPtr fd_runtime_option_wrapper,
string name);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperSetLiteOptimizedModelDir")]
private static extern void FD_C_RuntimeOptionWrapperSetLiteOptimizedModelDir(
IntPtr fd_runtime_option_wrapper, string optimized_model_dir);
[DllImport("fastdeploy.dll",
EntryPoint =
"FD_C_RuntimeOptionWrapperSetLiteSubgraphPartitionPath")]
private static extern void
FD_C_RuntimeOptionWrapperSetLiteSubgraphPartitionPath(
IntPtr fd_runtime_option_wrapper,
string nnadapter_subgraph_partition_config_path);
[DllImport(
"fastdeploy.dll",
EntryPoint =
"FD_C_RuntimeOptionWrapperSetLiteSubgraphPartitionConfigBuffer")]
private static extern void
FD_C_RuntimeOptionWrapperSetLiteSubgraphPartitionConfigBuffer(
IntPtr fd_runtime_option_wrapper,
string nnadapter_subgraph_partition_config_buffer);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperSetLiteContextProperties")]
private static extern void FD_C_RuntimeOptionWrapperSetLiteContextProperties(
IntPtr fd_runtime_option_wrapper, string nnadapter_context_properties);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperSetLiteModelCacheDir")]
private static extern void FD_C_RuntimeOptionWrapperSetLiteModelCacheDir(
IntPtr fd_runtime_option_wrapper, string nnadapter_model_cache_dir);
[DllImport(
"fastdeploy.dll",
EntryPoint =
"FD_C_RuntimeOptionWrapperSetLiteMixedPrecisionQuantizationConfigPath")]
private static extern void
FD_C_RuntimeOptionWrapperSetLiteMixedPrecisionQuantizationConfigPath(
IntPtr fd_runtime_option_wrapper,
string nnadapter_mixed_precision_quantization_config_path);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperEnableLiteFP16")]
private static extern void
FD_C_RuntimeOptionWrapperEnableLiteFP16(IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperDisableLiteFP16")]
private static extern void
FD_C_RuntimeOptionWrapperDisableLiteFP16(IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperEnableLiteInt8")]
private static extern void
FD_C_RuntimeOptionWrapperEnableLiteInt8(IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperDisableLiteInt8")]
private static extern void
FD_C_RuntimeOptionWrapperDisableLiteInt8(IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperSetLitePowerMode")]
private static extern void
FD_C_RuntimeOptionWrapperSetLitePowerMode(IntPtr fd_runtime_option_wrapper,
LitePowerMode mode);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperEnableTrtFP16")]
private static extern void
FD_C_RuntimeOptionWrapperEnableTrtFP16(IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperDisableTrtFP16")]
private static extern void
FD_C_RuntimeOptionWrapperDisableTrtFP16(IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperSetTrtCacheFile")]
private static extern void
FD_C_RuntimeOptionWrapperSetTrtCacheFile(IntPtr fd_runtime_option_wrapper,
string cache_file_path);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperEnablePinnedMemory")]
private static extern void
FD_C_RuntimeOptionWrapperEnablePinnedMemory(IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperDisablePinnedMemory")]
private static extern void FD_C_RuntimeOptionWrapperDisablePinnedMemory(
IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint =
"FD_C_RuntimeOptionWrapperEnablePaddleTrtCollectShape")]
private static extern void
FD_C_RuntimeOptionWrapperEnablePaddleTrtCollectShape(
IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint =
"FD_C_RuntimeOptionWrapperDisablePaddleTrtCollectShape")]
private static extern void
FD_C_RuntimeOptionWrapperDisablePaddleTrtCollectShape(
IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperSetOpenVINOStreams")]
private static extern void
FD_C_RuntimeOptionWrapperSetOpenVINOStreams(IntPtr fd_runtime_option_wrapper,
int num_streams);
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_RuntimeOptionWrapperUseIpu")]
private static extern void
FD_C_RuntimeOptionWrapperUseIpu(IntPtr fd_runtime_option_wrapper,
int device_num, int micro_batch_size,
bool enable_pipelining, int batches_per_step);
}
}

View File

@@ -0,0 +1,125 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
using System;
using System.IO;
using System.Runtime.InteropServices;
using fastdeploy.vision;
namespace fastdeploy {
namespace types_internal_c {
[StructLayout(LayoutKind.Sequential)]
public struct FD_OneDimArrayUint8 {
public nuint size;
public IntPtr data; // byte[]
}
[StructLayout(LayoutKind.Sequential)]
public struct FD_OneDimArrayInt32 {
public nuint size;
public IntPtr data; // int[]
}
[StructLayout(LayoutKind.Sequential)]
public struct FD_OneDimArraySize {
public nuint size;
public IntPtr data; // nuint[]
}
[StructLayout(LayoutKind.Sequential)]
public struct FD_OneDimArrayInt64 {
public nuint size;
public IntPtr data; // long[]
}
[StructLayout(LayoutKind.Sequential)]
public struct FD_OneDimArrayFloat {
public nuint size;
public IntPtr data; // float[]
}
[StructLayout(LayoutKind.Sequential)]
public struct FD_Cstr {
public nuint size;
public string data;
}
[StructLayout(LayoutKind.Sequential)]
public struct FD_OneDimArrayCstr {
public nuint size;
public IntPtr data; // FD_Cstr[]
}
[StructLayout(LayoutKind.Sequential)]
public struct FD_TwoDimArraySize {
public nuint size;
public IntPtr data; // FD_OneDimArraySize[]
}
[StructLayout(LayoutKind.Sequential)]
public struct FD_TwoDimArrayFloat {
public nuint size;
public IntPtr data; // FD_OneDimArrayFloat[]
}
public enum FD_ResultType {
UNKNOWN_RESULT,
CLASSIFY,
DETECTION,
SEGMENTATION,
OCR,
MOT,
FACE_DETECTION,
FACE_ALIGNMENT,
FACE_RECOGNITION,
MATTING,
MASK,
KEYPOINT_DETECTION,
HEADPOSE
}
[StructLayout(LayoutKind.Sequential)]
public struct FD_ClassifyResult {
public FD_OneDimArrayInt32 label_ids;
public FD_OneDimArrayFloat scores;
public FD_ResultType type;
}
[StructLayout(LayoutKind.Sequential)]
public struct FD_Mask {
public FD_OneDimArrayUint8 data;
public FD_OneDimArrayInt64 shape;
public FD_ResultType type;
}
[StructLayout(LayoutKind.Sequential)]
public struct FD_OneDimMask {
public nint size;
public IntPtr data; // FD_Mask*
}
[StructLayout(LayoutKind.Sequential)]
public struct FD_DetectionResult {
public FD_TwoDimArrayFloat boxes;
public FD_OneDimArrayFloat scores;
public FD_OneDimArrayInt32 label_ids;
public FD_OneDimMask masks;
[MarshalAs(UnmanagedType.U1)]
public bool contain_masks;
public FD_ResultType type;
}
}
}

View File

@@ -0,0 +1,100 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
using System;
using System.IO;
using System.Runtime.InteropServices;
using System.Collections.Generic;
using OpenCvSharp;
using fastdeploy.types_internal_c;
namespace fastdeploy {
namespace vision {
namespace classification {
class PaddleClasModel {
public PaddleClasModel(string model_file, string params_file,
string config_file, RuntimeOption custom_option = null,
ModelFormat model_format = ModelFormat.PADDLE) {
if (custom_option == null) {
custom_option = new RuntimeOption();
}
fd_paddleclas_model_wrapper = FD_C_CreatePaddleClasModelWrapper(
model_file, params_file, config_file, custom_option.GetWrapperPtr(),
model_format);
}
~PaddleClasModel() {
FD_C_DestroyPaddleClasModelWrapper(fd_paddleclas_model_wrapper);
}
public ClassifyResult Predict(Mat img) {
IntPtr fd_classify_result_wrapper_ptr = FD_C_CreateClassifyResultWrapper();
FD_C_PaddleClasModelWrapperPredict(
fd_paddleclas_model_wrapper, img.CvPtr,
fd_classify_result_wrapper_ptr); // predict
IntPtr fd_classify_result_ptr = FD_C_ClassifyResultWrapperGetData(
fd_classify_result_wrapper_ptr); // get result from wrapper
FD_ClassifyResult fd_classify_result =
(FD_ClassifyResult)Marshal.PtrToStructure(fd_classify_result_ptr,
typeof(FD_ClassifyResult));
ClassifyResult classify_result =
ConvertResult.ConvertCResultToClassifyResult(fd_classify_result);
FD_C_DestroyClassifyResultWrapper(
fd_classify_result_wrapper_ptr); // free fd_classify_result_wrapper_ptr
FD_C_DestroyClassifyResult(
fd_classify_result_ptr); // free fd_classify_result_ptr
return classify_result;
}
// below are underlying C api
private IntPtr fd_paddleclas_model_wrapper;
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_CreatePaddleClasModelWrapper")]
private static extern IntPtr FD_C_CreatePaddleClasModelWrapper(
string model_file, string params_file, string config_file,
IntPtr fd_runtime_option_wrapper, ModelFormat model_format);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_DestroyPaddleClasModelWrapper")]
private static extern void
FD_C_DestroyPaddleClasModelWrapper(IntPtr fd_paddleclas_model_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_PaddleClasModelWrapperPredict")]
private static extern bool
FD_C_PaddleClasModelWrapperPredict(IntPtr fd_paddleclas_model_wrapper,
IntPtr img,
IntPtr fd_classify_result_wrapper);
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreateClassifyResultWrapper")]
private static extern IntPtr FD_C_CreateClassifyResultWrapper();
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_DestroyClassifyResultWrapper")]
private static extern void
FD_C_DestroyClassifyResultWrapper(IntPtr fd_classify_result_wrapper);
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_DestroyClassifyResult")]
private static extern void
FD_C_DestroyClassifyResult(IntPtr fd_classify_result);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_ClassifyResultWrapperGetData")]
private static extern IntPtr
FD_C_ClassifyResultWrapperGetData(IntPtr fd_classify_result_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_CreateClassifyResultWrapperFromData")]
private static extern IntPtr
FD_C_CreateClassifyResultWrapperFromData(IntPtr fd_classify_result);
}
}
}
}

View File

@@ -0,0 +1,94 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
using System;
using System.IO;
using System.Runtime.InteropServices;
using System.Collections.Generic;
using OpenCvSharp;
using fastdeploy.types_internal_c;
namespace fastdeploy {
namespace vision {
namespace detection {
public class PPYOLOE {
public PPYOLOE(string model_file, string params_file, string config_file,
RuntimeOption custom_option = null,
ModelFormat model_format = ModelFormat.PADDLE) {
if (custom_option == null) {
custom_option = new RuntimeOption();
}
fd_ppyoloe_wrapper =
FD_C_CreatesPPYOLOEWrapper(model_file, params_file, config_file,
custom_option.GetWrapperPtr(), model_format);
}
~PPYOLOE() { FD_C_DestroyPPYOLOEWrapper(fd_ppyoloe_wrapper); }
public DetectionResult Predict(Mat img) {
IntPtr fd_detection_result_wrapper_ptr =
FD_C_CreateDetectionResultWrapper();
FD_C_PPYOLOEWrapperPredict(fd_ppyoloe_wrapper, img.CvPtr,
fd_detection_result_wrapper_ptr); // predict
IntPtr fd_detection_result_ptr = FD_C_DetectionResultWrapperGetData(
fd_detection_result_wrapper_ptr); // get result from wrapper
FD_DetectionResult fd_detection_result =
(FD_DetectionResult)Marshal.PtrToStructure(fd_detection_result_ptr,
typeof(FD_DetectionResult));
DetectionResult detection_result =
ConvertResult.ConvertCResultToDetectionResult(fd_detection_result);
FD_C_DestroyDetectionResultWrapper(
fd_detection_result_wrapper_ptr); // free fd_detection_result_wrapper_ptr
FD_C_DestroyDetectionResult(
fd_detection_result_ptr); // free fd_detection_result_ptr
return detection_result;
}
// below are underlying C api
private IntPtr fd_ppyoloe_wrapper;
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreatesPPYOLOEWrapper")]
private static extern IntPtr FD_C_CreatesPPYOLOEWrapper(
string model_file, string params_file, string config_file,
IntPtr fd_runtime_option_wrapper, ModelFormat model_format);
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_DestroyPPYOLOEWrapper")]
private static extern void
FD_C_DestroyPPYOLOEWrapper(IntPtr fd_ppyoloe_wrapper);
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_PPYOLOEWrapperPredict")]
private static extern bool
FD_C_PPYOLOEWrapperPredict(IntPtr fd_ppyoloe_wrapper, IntPtr img,
IntPtr fd_detection_result_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_CreateDetectionResultWrapper")]
private static extern IntPtr FD_C_CreateDetectionResultWrapper();
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_DestroyDetectionResultWrapper")]
private static extern void
FD_C_DestroyDetectionResultWrapper(IntPtr fd_detection_result_wrapper);
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_DestroyDetectionResult")]
private static extern void
FD_C_DestroyDetectionResult(IntPtr fd_detection_result);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_DetectionResultWrapperGetData")]
private static extern IntPtr
FD_C_DetectionResultWrapperGetData(IntPtr fd_detection_result_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_CreateDetectionResultWrapperFromData")]
private static extern IntPtr
FD_C_CreateDetectionResultWrapperFromData(IntPtr fd_detection_result);
}
}
}
}

View File

@@ -0,0 +1,272 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
using System;
using System.IO;
using System.Runtime.InteropServices;
using System.Collections.Generic;
using fastdeploy.types_internal_c;
namespace fastdeploy {
namespace vision {
public enum ResultType {
UNKNOWN_RESULT,
CLASSIFY,
DETECTION,
SEGMENTATION,
OCR,
MOT,
FACE_DETECTION,
FACE_ALIGNMENT,
FACE_RECOGNITION,
MATTING,
MASK,
KEYPOINT_DETECTION,
HEADPOSE
}
public struct Mask {
public List<byte> data;
public List<long> shape;
public ResultType type;
public Mask() {
this.data = new List<byte>();
this.shape = new List<long>();
this.type = ResultType.MASK;
}
}
public struct ClassifyResult {
public List<int> label_ids;
public List<float> scores;
public ResultType type;
public ClassifyResult() {
this.label_ids = new List<int>();
this.scores = new List<float>();
this.type = ResultType.CLASSIFY;
}
}
public struct DetectionResult {
public List<float[]> boxes;
public List<float> scores;
public List<int> label_ids;
public List<Mask> masks;
public bool contain_masks;
public ResultType type;
public DetectionResult() {
this.boxes = new List<float[]>();
this.scores = new List<float>();
this.label_ids = new List<int>();
this.masks = new List<Mask>();
this.contain_masks = false;
this.type = ResultType.DETECTION;
}
}
public class ConvertResult {
public static FD_ClassifyResult
ConvertClassifyResultToCResult(ClassifyResult classify_result) {
FD_ClassifyResult fd_classify_result = new FD_ClassifyResult();
// copy label_ids
// Create a managed array
fd_classify_result.label_ids.size = (uint)classify_result.label_ids.Count;
int[] label_ids = new int[fd_classify_result.label_ids.size];
// Copy data from Link to Array
classify_result.label_ids.CopyTo(label_ids);
// Copy data to unmanaged memory
int size = Marshal.SizeOf(label_ids[0]) * label_ids.Length;
fd_classify_result.label_ids.data = Marshal.AllocHGlobal(size);
Marshal.Copy(label_ids, 0, fd_classify_result.label_ids.data,
label_ids.Length);
// copy scores
// Create a managed array
fd_classify_result.scores.size = (uint)classify_result.scores.Count;
float[] scores = new float[fd_classify_result.scores.size];
// Copy data from Link to Array
classify_result.scores.CopyTo(scores);
// Copy data to unmanaged memory
size = Marshal.SizeOf(scores[0]) * scores.Length;
fd_classify_result.scores.data = Marshal.AllocHGlobal(size);
Marshal.Copy(scores, 0, fd_classify_result.scores.data, scores.Length);
fd_classify_result.type = (FD_ResultType)classify_result.type;
return fd_classify_result;
}
public static ClassifyResult
ConvertCResultToClassifyResult(FD_ClassifyResult fd_classify_result) {
ClassifyResult classify_result = new ClassifyResult();
// copy label_ids
int[] label_ids = new int[fd_classify_result.label_ids.size];
Marshal.Copy(fd_classify_result.label_ids.data, label_ids, 0,
label_ids.Length);
classify_result.label_ids = new List<int>(label_ids);
// copy scores
float[] scores = new float[fd_classify_result.scores.size];
Marshal.Copy(fd_classify_result.scores.data, scores, 0, scores.Length);
classify_result.scores = new List<float>(scores);
classify_result.type = (ResultType)fd_classify_result.type;
return classify_result;
}
public static FD_DetectionResult
ConvertDetectionResultToCResult(DetectionResult detection_result) {
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
// copy boxes
int boxes_coordinate_dim = 4;
int size;
fd_detection_result.boxes.size = (uint)detection_result.boxes.Count;
FD_OneDimArraySize[] boxes =
new FD_OneDimArraySize[fd_detection_result.boxes.size];
// Copy each box
for (int i = 0; i < (int)fd_detection_result.boxes.size; i++) {
boxes[i].size = (uint)detection_result.boxes[i].Length;
float[] boxes_i = new float[boxes_coordinate_dim];
detection_result.boxes[i].CopyTo(boxes_i, 0);
size = Marshal.SizeOf(boxes_i[0]) * boxes_i.Length;
boxes[i].data = Marshal.AllocHGlobal(size);
Marshal.Copy(boxes_i, 0, boxes[i].data, boxes_i.Length);
}
// Copy data to unmanaged memory
size = Marshal.SizeOf(boxes[0]) * boxes.Length;
fd_detection_result.boxes.data = Marshal.AllocHGlobal(size);
for (int i = 0; i < boxes.Length; i++) {
Marshal.StructureToPtr(
boxes[i],
fd_detection_result.boxes.data + i * Marshal.SizeOf(boxes[0]), true);
}
// copy scores
fd_detection_result.scores.size = (uint)detection_result.scores.Count;
float[] scores = new float[fd_detection_result.scores.size];
// Copy data from Link to Array
detection_result.scores.CopyTo(scores);
// Copy data to unmanaged memory
size = Marshal.SizeOf(scores[0]) * scores.Length;
fd_detection_result.scores.data = Marshal.AllocHGlobal(size);
Marshal.Copy(scores, 0, fd_detection_result.scores.data, scores.Length);
// copy label_ids
fd_detection_result.label_ids.size = (uint)detection_result.label_ids.Count;
int[] label_ids = new int[fd_detection_result.label_ids.size];
// Copy data from Link to Array
detection_result.label_ids.CopyTo(label_ids);
// Copy data to unmanaged memory
size = Marshal.SizeOf(label_ids[0]) * label_ids.Length;
fd_detection_result.label_ids.data = Marshal.AllocHGlobal(size);
Marshal.Copy(label_ids, 0, fd_detection_result.label_ids.data,
label_ids.Length);
// copy masks
fd_detection_result.masks.size = detection_result.masks.Count;
FD_Mask[] masks = new FD_Mask[fd_detection_result.masks.size];
// copy each mask
for (int i = 0; i < (int)fd_detection_result.masks.size; i++) {
// copy data in mask
masks[i].data.size = (uint)detection_result.masks[i].data.Count;
byte[] masks_data_i = new byte[masks[i].data.size];
detection_result.masks[i].data.CopyTo(masks_data_i);
size = Marshal.SizeOf(masks_data_i[0]) * masks_data_i.Length;
masks[i].data.data = Marshal.AllocHGlobal(size);
Marshal.Copy(masks_data_i, 0, masks[i].data.data, masks_data_i.Length);
// copy shape in mask
masks[i].shape.size = (uint)detection_result.masks[i].shape.Count;
long[] masks_shape_i = new long[masks[i].shape.size];
detection_result.masks[i].shape.CopyTo(masks_shape_i);
size = Marshal.SizeOf(masks_shape_i[0]) * masks_shape_i.Length;
masks[i].shape.data = Marshal.AllocHGlobal(size);
Marshal.Copy(masks_shape_i, 0, masks[i].shape.data, masks_shape_i.Length);
// copy type
masks[i].type = (FD_ResultType)detection_result.masks[i].type;
}
if (fd_detection_result.masks.size != 0) {
size = Marshal.SizeOf(masks[0]) * masks.Length;
fd_detection_result.masks.data = Marshal.AllocHGlobal(size);
for (int i = 0; i < masks.Length; i++) {
Marshal.StructureToPtr(masks[i],
fd_detection_result.masks.data +
i * Marshal.SizeOf(masks[0]),
true);
}
}
fd_detection_result.contain_masks = detection_result.contain_masks;
fd_detection_result.type = (FD_ResultType)detection_result.type;
return fd_detection_result;
}
public static DetectionResult
ConvertCResultToDetectionResult(FD_DetectionResult fd_detection_result) {
DetectionResult detection_result = new DetectionResult();
// copy boxes
detection_result.boxes = new List<float[]>();
FD_OneDimArraySize[] boxes =
new FD_OneDimArraySize[fd_detection_result.boxes.size];
Console.WriteLine(fd_detection_result.boxes.size);
for (int i = 0; i < (int)fd_detection_result.boxes.size; i++) {
boxes[i] = (FD_OneDimArraySize)Marshal.PtrToStructure(
fd_detection_result.boxes.data + i * Marshal.SizeOf(boxes[0]),
typeof(FD_OneDimArraySize));
float[] box_i = new float[boxes[i].size];
Marshal.Copy(boxes[i].data, box_i, 0, box_i.Length);
detection_result.boxes.Add(box_i);
}
// copy scores
float[] scores = new float[fd_detection_result.scores.size];
Marshal.Copy(fd_detection_result.scores.data, scores, 0, scores.Length);
detection_result.scores = new List<float>(scores);
// copy label_ids
int[] label_ids = new int[fd_detection_result.label_ids.size];
Marshal.Copy(fd_detection_result.label_ids.data, label_ids, 0,
label_ids.Length);
detection_result.label_ids = new List<int>(label_ids);
// copy masks
detection_result.masks = new List<Mask>();
FD_Mask[] fd_masks = new FD_Mask[fd_detection_result.masks.size];
for (int i = 0; i < (int)fd_detection_result.masks.size; i++) {
fd_masks[i] = (FD_Mask)Marshal.PtrToStructure(
fd_detection_result.masks.data + i * Marshal.SizeOf(fd_masks[0]),
typeof(FD_Mask));
Mask mask_i = new Mask();
byte[] mask_i_data = new byte[fd_masks[i].data.size];
Marshal.Copy(fd_masks[i].data.data, mask_i_data, 0, mask_i_data.Length);
long[] mask_i_shape = new long[fd_masks[i].shape.size];
Marshal.Copy(fd_masks[i].shape.data, mask_i_shape, 0,
mask_i_shape.Length);
mask_i.type = (ResultType)fd_masks[i].type;
detection_result.masks.Add(mask_i);
}
detection_result.contain_masks = fd_detection_result.contain_masks;
detection_result.type = (ResultType)fd_detection_result.type;
return detection_result;
}
}
}
}

View File

@@ -0,0 +1,45 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
using System;
using System.IO;
using System.Runtime.InteropServices;
using System.Collections.Generic;
using OpenCvSharp;
using fastdeploy.types_internal_c;
namespace fastdeploy {
namespace vision {
public class Visualize {
public static Mat VisDetection(Mat im, DetectionResult detection_result,
float score_threshold = 0.0f,
int line_size = 1, float font_size = 0.5f) {
FD_DetectionResult fd_detection_result =
ConvertResult.ConvertDetectionResultToCResult(detection_result);
IntPtr result_ptr =
FD_C_VisDetection(im.CvPtr, ref fd_detection_result, score_threshold,
line_size, font_size);
return new Mat(result_ptr);
}
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_VisDetection")]
private static extern IntPtr
FD_C_VisDetection(IntPtr im, ref FD_DetectionResult fd_detection_result,
float score_threshold, int line_size, float font_size);
}
}
}

View File

@@ -2,7 +2,7 @@
# 在 Windows 使用 FastDeploy C++ SDK
【**注意**】**编译只支持Release模不支持Debug模**
【**注意**】**编译只支持Release模不支持Debug模**
## 1. 准备环境和Windows部署库
<div id="Environment"></div>

View File

@@ -0,0 +1,23 @@
PROJECT(infer_demo CSharp)
CMAKE_MINIMUM_REQUIRED (VERSION 3.10)
# Set the C# language version (defaults to 3.0 if not set).
set(CMAKE_CSharp_FLAGS "/langversion:10")
set(CMAKE_DOTNET_TARGET_FRAMEWORK "net6.0")
set(CMAKE_DOTNET_SDK "Microsoft.NET.Sdk")
# 指定下载解压后的fastdeploy库路径
option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.")
include(${FASTDEPLOY_INSTALL_DIR}/FastDeployCSharp.cmake)
add_executable(infer_ppyoloe_demo ${PROJECT_SOURCE_DIR}/infer_ppyoloe.cs)
set_property(TARGET infer_ppyoloe_demo PROPERTY VS_DOTNET_REFERENCES
${FASTDEPLOY_DOTNET_REFERENCES}
)
set_property(TARGET infer_ppyoloe_demo
PROPERTY VS_PACKAGE_REFERENCES ${FASTDEPLOY_PACKAGE_REFERENCES}
)

View File

@@ -0,0 +1,97 @@
English | [简体中文](README_CN.md)
# PaddleDetection C# Deployment Example
This directory provides examples that `infer_xxx.cs` to fastly finish the deployment of PaddleDetection models, including PPYOLOE on CPU/GPU.
Before deployment, two steps require confirmation
- 1. Software and hardware should meet the requirements. Please refer to [FastDeploy Environment Requirements](../../../../../docs/en/build_and_install/download_prebuilt_libraries.md)
- 2. Download the precompiled deployment library and samples code according to your development environment. Refer to [FastDeploy Precompiled Library](../../../../../docs/en/build_and_install/download_prebuilt_libraries.md)
Please follow below instructions to compile and test in Windows. FastDeploy version 1.0.4 or above (x.x.x>=1.0.4) is required to support this model.
## 1. Download C# package management tool nuget client
> https://dist.nuget.org/win-x86-commandline/v6.4.0/nuget.exe
Add nuget program into system variable **PATH**
## 2. Download model and image for test
> https://bj.bcebos.com/paddlehub/fastdeploy/ppyoloe_crn_l_300e_coco.tgz # (unzip it after download)
> https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
## 3. Compile example code
Open `x64 Native Tools Command Prompt for VS 2019` command tool on Windows, cd to the demo path of ppyoloe and execute commands
```shell
cd D:\Download\fastdeploy-win-x64-gpu-x.x.x\examples\vision\detection\paddledetection\csharp
mkdir build && cd build
cmake .. -G "Visual Studio 16 2019" -A x64 -DFASTDEPLOY_INSTALL_DIR=D:\Download\fastdeploy-win-x64-gpu-x.x.x -DCUDA_DIRECTORY="C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.2"
nuget restore
msbuild infer_demo.sln /m:4 /p:Configuration=Release /p:Platform=x64
```
For more information about how to use FastDeploy SDK to compile a project with Visual Studio 2019. Please refer to
- [Using the FastDeploy C++ SDK on Windows Platform](../../../../../docs/en/faq/use_sdk_on_windows.md)
## 4. Execute compiled program
fastdeploy.dll and related dynamic libraries are required by the program. FastDeploy provide a script to copy all required dll to your program path.
```shell
cd D:\Download\fastdeploy-win-x64-gpu-x.x.x
fastdeploy_init.bat install %cd% D:\Download\fastdeploy-win-x64-gpu-x.x.x\examples\vision\detection\paddledetection\csharp\build\Release
```
Then you can run your program and test the model with image
```shell
cd Release
infer_ppyoloe_demo.exe ppyoloe_crn_l_300e_coco 000000014439.jpg 0 # CPU
infer_ppyoloe_demo.exe ppyoloe_crn_l_300e_coco 000000014439.jpg 1 # GPU
```
## PaddleDetection C# Interface
### Model Class
```c#
fastdeploy.vision.detection.PPYOLOE(
string model_file,
string params_file,
string config_file
fastdeploy.RuntimeOption runtime_option = null,
fastdeploy.ModelFormat model_format = ModelFormat.PADDLE)
```
> PaddleDetection PPYOLOE initialization.
> **Params**
>> * **model_file**(str): Model file path
>> * **params_file**(str): Parameter file path
>> * **config_file**(str): Configuration file path, which is the deployment yaml file exported by PaddleDetection
>> * **runtime_option**(RuntimeOption): Backend inference configuration. null by default, which is the default configuration
>> * **model_format**(ModelFormat): Model format. Paddle format by default
#### Predict Function
```c#
fastdeploy.DetectionResult Predict(OpenCvSharp.Mat im)
```
> Model prediction interface. Input images and output results directly.
>
> **Params**
>
>> * **im**(Mat): Input images in HWC or BGR format
>
> **Return**
>
>> * **result**(DetectionResult): Detection result, including detection box and confidence of each box. Refer to [Vision Model Prediction Result](../../../../../docs/api/vision_results/) for DetectionResult
- [Model Description](../../)
- [Python Deployment](../python)
- [Vision Model prediction results](../../../../../docs/api/vision_results/)
- [How to switch the model inference backend engine](../../../../../docs/en/faq/how_to_change_backend.md)

View File

@@ -0,0 +1,99 @@
[English](README.md) | 简体中文
# PaddleDetection C#部署示例
本目录下提供`infer_xxx.cs`来调用C# API快速完成PaddleDetection模型PPYOLOE在CPU/GPU上部署的示例。
在部署前,需确认以下两个步骤
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
- 2. 根据开发环境下载预编译部署库和samples代码参考[FastDeploy预编译库](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
在Windows下执行如下操作完成编译测试支持此模型需保证FastDeploy版本1.0.4以上(x.x.x>=1.0.4)
## 1. 下载C#包管理程序nuget客户端
> https://dist.nuget.org/win-x86-commandline/v6.4.0/nuget.exe
下载完成后将该程序添加到环境变量**PATH**中
## 2. 下载模型文件和测试图片
> https://bj.bcebos.com/paddlehub/fastdeploy/ppyoloe_crn_l_300e_coco.tgz # (下载后解压缩)
> https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
## 3. 编译示例代码
本文档编译的示例代码可在解压的库中找到编译工具依赖VS 2019的安装**Windows打开x64 Native Tools Command Prompt for VS 2019命令工具**,通过如下命令开始编译
```shell
cd D:\Download\fastdeploy-win-x64-gpu-x.x.x\examples\vision\detection\paddledetection\csharp
mkdir build && cd build
cmake .. -G "Visual Studio 16 2019" -A x64 -DFASTDEPLOY_INSTALL_DIR=D:\Download\fastdeploy-win-x64-gpu-x.x.x -DCUDA_DIRECTORY="C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.2"
nuget restore
msbuild infer_demo.sln /m:4 /p:Configuration=Release /p:Platform=x64
```
关于使用Visual Studio 2019创建sln工程或者CMake工程等方式编译的更详细信息可参考如下文档
- [在 Windows 使用 FastDeploy C++ SDK](../../../../../docs/cn/faq/use_sdk_on_windows.md)
- [FastDeploy C++库在Windows上的多种使用方式](../../../../../docs/cn/faq/use_sdk_on_windows_build.md)
## 4. 运行可执行程序
注意Windows上运行时需要将FastDeploy依赖的库拷贝至可执行程序所在目录, 或者配置环境变量。FastDeploy提供了工具帮助我们快速将所有依赖库拷贝至可执行程序所在目录,通过如下命令将所有依赖的dll文件拷贝至可执行程序所在的目录
```shell
cd D:\Download\fastdeploy-win-x64-gpu-x.x.x
fastdeploy_init.bat install %cd% D:\Download\fastdeploy-win-x64-gpu-x.x.x\examples\vision\detection\paddledetection\csharp\build\Release
```
将dll拷贝到当前路径后准备好模型和图片使用如下命令运行可执行程序即可
```shell
cd Release
infer_ppyoloe_demo.exe ppyoloe_crn_l_300e_coco 000000014439.jpg 0 # CPU
infer_ppyoloe_demo.exe ppyoloe_crn_l_300e_coco 000000014439.jpg 1 # GPU
```
## PaddleDetection C#接口
### 模型
```c#
fastdeploy.vision.detection.PPYOLOE(
string model_file,
string params_file,
string config_file
fastdeploy.RuntimeOption runtime_option = null,
fastdeploy.ModelFormat model_format = ModelFormat.PADDLE)
```
> PaddleDetection PPYOLOE模型加载和初始化。
> **参数**
>> * **model_file**(str): 模型文件路径
>> * **params_file**(str): 参数文件路径
>> * **config_file**(str): 配置文件路径即PaddleDetection导出的部署yaml文件
>> * **runtime_option**(RuntimeOption): 后端推理配置默认为null即采用默认配置
>> * **model_format**(ModelFormat): 模型格式默认为PADDLE格式
#### Predict函数
```c#
fastdeploy.DetectionResult Predict(OpenCvSharp.Mat im)
```
> 模型预测接口,输入图像直接输出检测结果。
>
> **参数**
>
>> * **im**(Mat): 输入图像注意需为HWCBGR格式
>
> **返回值**
>
>> * **result**(DetectionResult): 检测结果,包括检测框,各个框的置信度, DetectionResult说明参考[视觉模型预测结果](../../../../../docs/api/vision_results/)
- [模型介绍](../../)
- [Python部署](../python)
- [视觉模型预测结果](../../../../../docs/api/vision_results/)
- [如何切换模型推理后端引擎](../../../../../docs/cn/faq/how_to_change_backend.md)

View File

@@ -0,0 +1,57 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
using System;
using System.IO;
using System.Runtime.InteropServices;
using OpenCvSharp;
using fastdeploy;
namespace Test
{
public class TestPPYOLOE
{
public static void Main(string[] args)
{
if (args.Length < 3) {
Console.WriteLine(
"Usage: infer_demo path/to/model_dir path/to/image run_option, " +
"e.g ./infer_model ./ppyolo_dirname ./test.jpeg 0"
);
Console.WriteLine( "The data type of run_option is int, 0: run with cpu; 1: run with gpu");
return;
}
string model_dir = args[0];
string image_path = args[1];
string model_file = model_dir + "\\" + "model.pdmodel";
string params_file = model_dir + "\\" + "model.pdiparams";
string config_file = model_dir + "\\" + "infer_cfg.yml";
RuntimeOption runtimeoption = new RuntimeOption();
int device_option = Int32.Parse(args[2]);
if(device_option==0){
runtimeoption.UseCpu();
}else{
runtimeoption.UseGpu();
}
vision.detection.PPYOLOE model = new vision.detection.PPYOLOE(model_file, params_file, config_file, runtimeoption, ModelFormat.PADDLE);
Mat image = Cv2.ImRead(image_path);
vision.DetectionResult res = model.Predict(image);
Mat res_img = vision.Visualize.VisDetection(image, res, 0, 1, 0.5f);
Cv2.ImShow("result.png", res_img);
Cv2.WaitKey(0);
}
}
}

View File

@@ -106,9 +106,9 @@ void GpuInfer(const std::string& model_dir, const std::string& image_file,
auto vis_im = fastdeploy::vision::VisMatting(im, res);
auto vis_im_with_bg = fastdeploy::vision::SwapBackground(im, bg, res);
cv::imwrite("visualized_result.jpg", vis_im_with_bg);
cv::imwrite("visualized_result_fg.jpg", vis_im);
cv::imwrite("visualized_result_fg.png", vis_im);
std::cout << "Visualized result save in ./visualized_result_replaced_bg.jpg "
"and ./visualized_result_fg.jpg"
"and ./visualized_result_fg.png"
<< std::endl;
}

View File

@@ -67,8 +67,8 @@ print(result)
# 可视化结果
vis_im = fd.vision.vis_matting(im, result)
vis_im_with_bg = fd.vision.swap_background(im, bg, result)
cv2.imwrite("visualized_result_fg.jpg", vis_im)
cv2.imwrite("visualized_result_fg.png", vis_im)
cv2.imwrite("visualized_result_replaced_bg.jpg", vis_im_with_bg)
print(
"Visualized result save in ./visualized_result_replaced_bg.jpg and ./visualized_result_fg.jpg"
"Visualized result save in ./visualized_result_replaced_bg.jpg and ./visualized_result_fg.png"
)

View File

@@ -34,4 +34,19 @@ typedef enum _rknpu2_core_mask {
RKNN_NPU_CORE_UNDEFINED,
} CoreMask;
} // namespace rknpu2
struct RKNPU2BackendOption {
rknpu2::CpuName cpu_name = rknpu2::CpuName::RK3588;
// The specification of NPU core setting.It has the following choices :
// RKNN_NPU_CORE_AUTO : Referring to automatic mode, meaning that it will
// select the idle core inside the NPU.
// RKNN_NPU_CORE_0 : Running on the NPU0 core
// RKNN_NPU_CORE_1: Runing on the NPU1 core
// RKNN_NPU_CORE_2: Runing on the NPU2 core
// RKNN_NPU_CORE_0_1: Running on both NPU0 and NPU1 core simultaneously.
// RKNN_NPU_CORE_0_1_2: Running on both NPU0, NPU1 and NPU2 simultaneously.
rknpu2::CoreMask core_mask = rknpu2::CoreMask::RKNN_NPU_CORE_AUTO;
};
} // namespace fastdeploy

View File

@@ -76,17 +76,24 @@ void RKNPU2Backend::BuildOption(const RKNPU2BackendOption& option) {
}
/***************************************************************
* @name InitFromRKNN
* @name Init
* @brief Initialize RKNN model
* @param model_file: Binary data for the RKNN model or the path of RKNN
*model. params_file: None option: config
* @return bool
* @note None
***************************************************************/
bool RKNPU2Backend::InitFromRKNN(const std::string& model_file,
const RKNPU2BackendOption& option) {
bool RKNPU2Backend::Init(const RuntimeOption& runtime_option) {
if (!(Supported(runtime_option.model_format, Backend::RKNPU2) && Supported(runtime_option.device, Backend::RKNPU2))) {
return false;
}
if (runtime_option.model_from_memory_) {
FDERROR << "RKNPU2 backend doesn't support load model from memory, please load model from disk." << std::endl;
return false;
}
// LoadModel
if (!this->LoadModel((char*)model_file.data())) {
if (!this->LoadModel((char*)runtime_option.model_file.data())) {
FDERROR << "load model failed" << std::endl;
return false;
}
@@ -98,7 +105,7 @@ bool RKNPU2Backend::InitFromRKNN(const std::string& model_file,
}
// BuildOption
this->BuildOption(option);
this->BuildOption(runtime_option.rknpu2_option);
// SetCoreMask if RK3588
if (this->option_.cpu_name == rknpu2::CpuName::RK3588) {
@@ -124,7 +131,7 @@ bool RKNPU2Backend::InitFromRKNN(const std::string& model_file,
* @return bool
* @note Only support RK3588
***************************************************************/
bool RKNPU2Backend::SetCoreMask(const rknpu2::CoreMask& core_mask) {
bool RKNPU2Backend::SetCoreMask(const rknpu2::CoreMask& core_mask) const {
int ret = rknn_set_core_mask(ctx, static_cast<rknn_core_mask>(core_mask));
if (ret != RKNN_SUCC) {
FDERROR << "rknn_set_core_mask fail! ret=" << ret << std::endl;

View File

@@ -24,40 +24,13 @@
#include <vector>
namespace fastdeploy {
struct RKNPU2BackendOption {
rknpu2::CpuName cpu_name = rknpu2::CpuName::RK356X;
// The specification of NPU core setting.It has the following choices :
// RKNN_NPU_CORE_AUTO : Referring to automatic mode, meaning that it will
// select the idle core inside the NPU.
// RKNN_NPU_CORE_0 : Running on the NPU0 core
// RKNN_NPU_CORE_1: Runing on the NPU1 core
// RKNN_NPU_CORE_2: Runing on the NPU2 core
// RKNN_NPU_CORE_0_1: Running on both NPU0 and NPU1 core simultaneously.
// RKNN_NPU_CORE_0_1_2: Running on both NPU0, NPU1 and NPU2 simultaneously.
rknpu2::CoreMask core_mask = rknpu2::CoreMask::RKNN_NPU_CORE_AUTO;
};
class RKNPU2Backend : public BaseBackend {
public:
RKNPU2Backend() = default;
virtual ~RKNPU2Backend();
// RKNN API
bool LoadModel(void* model);
bool GetSDKAndDeviceVersion();
bool SetCoreMask(const rknpu2::CoreMask& core_mask);
bool GetModelInputOutputInfos();
// BaseBackend API
void BuildOption(const RKNPU2BackendOption& option);
bool InitFromRKNN(const std::string& model_file,
const RKNPU2BackendOption& option = RKNPU2BackendOption());
bool Init(const RuntimeOption& runtime_option);
int NumInputs() const override {
return static_cast<int>(inputs_desc_.size());
@@ -75,6 +48,18 @@ class RKNPU2Backend : public BaseBackend {
bool copy_to_fd = true) override;
private:
// BaseBackend API
void BuildOption(const RKNPU2BackendOption& option);
// RKNN API
bool LoadModel(void* model);
bool GetSDKAndDeviceVersion();
bool SetCoreMask(const rknpu2::CoreMask& core_mask) const;
bool GetModelInputOutputInfos();
// The object of rknn context.
rknn_context ctx{};
// The structure rknn_sdk_version is used to indicate the version

View File

@@ -96,6 +96,37 @@ static std::map<Device, std::vector<Backend>>
{Device::SOPHGOTPUD, {Backend::SOPHGOTPU}}
};
inline bool Supported(ModelFormat format, Backend backend) {
auto iter = s_default_backends_by_format.find(format);
if (iter == s_default_backends_by_format.end()) {
FDERROR << "Didn't find format is registered in s_default_backends_by_format." << std::endl;
return false;
}
for (size_t i = 0; i < iter->second.size(); ++i) {
if (iter->second[i] == backend) {
return true;
}
}
std::string msg = Str(iter->second);
FDERROR << backend << " only supports " << msg << ", but now it's " << format << "." << std::endl;
return false;
}
inline bool Supported(Device device, Backend backend) {
auto iter = s_default_backends_by_device.find(device);
if (iter == s_default_backends_by_device.end()) {
FDERROR << "Didn't find device is registered in s_default_backends_by_device." << std::endl;
return false;
}
for (size_t i = 0; i < iter->second.size(); ++i) {
if (iter->second[i] == backend) {
return true;
}
}
std::string msg = Str(iter->second);
FDERROR << backend << " only supports " << msg << ", but now it's " << device << "." << std::endl;
return false;
}
FASTDEPLOY_DECL std::ostream& operator<<(std::ostream& o, const Backend& b);
FASTDEPLOY_DECL std::ostream& operator<<(std::ostream& o, const Device& d);

View File

@@ -169,6 +169,7 @@ bool Runtime::Init(const RuntimeOption& _option) {
<< std::endl;
return false;
}
backend_->benchmark_option_ = option.benchmark_option;
return true;
}
@@ -282,7 +283,6 @@ void Runtime::CreatePaddleBackend() {
option.paddle_infer_option.trt_option.gpu_id = option.device_id;
backend_ = utils::make_unique<PaddleBackend>();
auto casted_backend = dynamic_cast<PaddleBackend*>(backend_.get());
casted_backend->benchmark_option_ = option.benchmark_option;
if (option.model_from_memory_) {
FDASSERT(
@@ -313,7 +313,6 @@ void Runtime::CreatePaddleBackend() {
void Runtime::CreateOpenVINOBackend() {
#ifdef ENABLE_OPENVINO_BACKEND
backend_ = utils::make_unique<OpenVINOBackend>();
backend_->benchmark_option_ = option.benchmark_option;
FDASSERT(backend_->Init(option), "Failed to initialize OpenVINOBackend.");
#else
FDASSERT(false,
@@ -327,7 +326,6 @@ void Runtime::CreateOpenVINOBackend() {
void Runtime::CreateOrtBackend() {
#ifdef ENABLE_ORT_BACKEND
backend_ = utils::make_unique<OrtBackend>();
backend_->benchmark_option_ = option.benchmark_option;
FDASSERT(backend_->Init(option), "Failed to initialize Backend::ORT.");
#else
@@ -348,7 +346,6 @@ void Runtime::CreateTrtBackend() {
option.trt_option.enable_pinned_memory = option.enable_pinned_memory;
option.trt_option.external_stream_ = option.external_stream_;
backend_ = utils::make_unique<TrtBackend>();
backend_->benchmark_option_ = option.benchmark_option;
FDASSERT(backend_->Init(option), "Failed to initialize TensorRT backend.");
#else
FDASSERT(false,
@@ -362,7 +359,6 @@ void Runtime::CreateTrtBackend() {
void Runtime::CreateLiteBackend() {
#ifdef ENABLE_LITE_BACKEND
backend_ = utils::make_unique<LiteBackend>();
backend_->benchmark_option_ = option.benchmark_option;
FDASSERT(backend_->Init(option),
"Load model from nb file failed while initializing LiteBackend.");
@@ -376,20 +372,9 @@ void Runtime::CreateLiteBackend() {
}
void Runtime::CreateRKNPU2Backend() {
FDASSERT(option.model_from_memory_ == false,
"RKNPU2Backend don't support to load model from memory");
FDASSERT(option.device == Device::RKNPU,
"Backend::RKNPU2 only supports Device::RKNPU2");
FDASSERT(option.model_format == ModelFormat::RKNN,
"RKNPU2Backend only support model format of ModelFormat::RKNN");
#ifdef ENABLE_RKNPU2_BACKEND
auto rknpu2_option = RKNPU2BackendOption();
rknpu2_option.cpu_name = option.rknpu2_cpu_name_;
rknpu2_option.core_mask = option.rknpu2_core_mask_;
backend_ = utils::make_unique<RKNPU2Backend>();
auto casted_backend = dynamic_cast<RKNPU2Backend*>(backend_.get());
FDASSERT(casted_backend->InitFromRKNN(option.model_file, rknpu2_option),
"Load model from nb file failed while initializing LiteBackend.");
FDASSERT(backend_->Init(option), "Failed to initialize RKNPU2 backend.");
#else
FDASSERT(false,
"RKNPU2Backend is not available, please compiled with "

View File

@@ -60,8 +60,8 @@ void RuntimeOption::UseCpu() { device = Device::CPU; }
void RuntimeOption::UseRKNPU2(fastdeploy::rknpu2::CpuName rknpu2_name,
fastdeploy::rknpu2::CoreMask rknpu2_core) {
rknpu2_cpu_name_ = rknpu2_name;
rknpu2_core_mask_ = rknpu2_core;
rknpu2_option.cpu_name = rknpu2_name;
rknpu2_option.core_mask = rknpu2_core;
device = Device::RKNPU;
}

View File

@@ -151,6 +151,8 @@ struct FASTDEPLOY_DECL RuntimeOption {
OpenVINOBackendOption openvino_option;
/// Option to configure Paddle Lite backend
LiteBackendOption paddle_lite_option;
/// Option to configure RKNPU2 backend
RKNPU2BackendOption rknpu2_option;
/** \brief Set the profile mode as 'true'.
*
@@ -199,12 +201,6 @@ struct FASTDEPLOY_DECL RuntimeOption {
bool enable_pinned_memory = false;
// ======Only for RKNPU2 Backend=======
fastdeploy::rknpu2::CpuName rknpu2_cpu_name_ =
fastdeploy::rknpu2::CpuName::RK3588;
fastdeploy::rknpu2::CoreMask rknpu2_core_mask_ =
fastdeploy::rknpu2::CoreMask::RKNN_NPU_CORE_AUTO;
// *** The belowing api are deprecated, will be removed in v1.2.0
// *** Do not use it anymore
void SetPaddleMKLDNN(bool pd_mkldnn = true);

View File

@@ -207,7 +207,7 @@ template <typename T>
std::string Str(const std::vector<T>& shape) {
std::ostringstream oss;
oss << "[ " << shape[0];
for (int i = 1; i < shape.size(); ++i) {
for (size_t i = 1; i < shape.size(); ++i) {
oss << " ," << shape[i];
}
oss << " ]";

View File

@@ -19,11 +19,13 @@ namespace fastdeploy {
namespace vision {
cv::Mat VisMatting(const cv::Mat& im, const MattingResult& result,
bool transparent_background, float transparent_threshold,
bool remove_small_connected_area) {
FDASSERT((!im.empty()), "im can't be empty!");
FDASSERT((im.channels() == 3), "Only support 3 channels mat!");
auto vis_img = im.clone();
cv::Mat transparent_vis_mat;
int channel = im.channels();
int out_h = static_cast<int>(result.shape[0]);
int out_w = static_cast<int>(result.shape[1]);
int height = im.rows;
@@ -43,6 +45,14 @@ cv::Mat VisMatting(const cv::Mat& im, const MattingResult& result,
(vis_img).convertTo((vis_img), CV_8UC3);
}
if (transparent_background) {
if (vis_img.channels() != 4) {
cv::cvtColor(vis_img, transparent_vis_mat, cv::COLOR_BGR2BGRA);
vis_img = transparent_vis_mat;
channel = 4;
}
}
uchar* vis_data = static_cast<uchar*>(vis_img.data);
uchar* im_data = static_cast<uchar*>(im.data);
float* alpha_data = reinterpret_cast<float*>(alpha.data);
@@ -50,15 +60,35 @@ cv::Mat VisMatting(const cv::Mat& im, const MattingResult& result,
for (size_t i = 0; i < height; ++i) {
for (size_t j = 0; j < width; ++j) {
float alpha_val = alpha_data[i * width + j];
vis_data[i * width * 3 + j * 3 + 0] = cv::saturate_cast<uchar>(
static_cast<float>(im_data[i * width * 3 + j * 3 + 0]) * alpha_val +
(1.f - alpha_val) * 153.f);
vis_data[i * width * 3 + j * 3 + 1] = cv::saturate_cast<uchar>(
static_cast<float>(im_data[i * width * 3 + j * 3 + 1]) * alpha_val +
(1.f - alpha_val) * 255.f);
vis_data[i * width * 3 + j * 3 + 2] = cv::saturate_cast<uchar>(
static_cast<float>(im_data[i * width * 3 + j * 3 + 2]) * alpha_val +
(1.f - alpha_val) * 120.f);
if (transparent_background ) {
if (alpha_val < transparent_threshold) {
vis_data[i * width * channel + j * channel + 3] =
cv::saturate_cast<uchar>(0.f);
} else {
vis_data[i * width * channel + j * channel + 0] =
cv::saturate_cast<uchar>(
static_cast<float>(im_data[i * width * 3 + j * 3 + 0]));
vis_data[i * width * channel + j * channel + 1] =
cv::saturate_cast<uchar>(
static_cast<float>(im_data[i * width * 3 + j * 3 + 1]));
vis_data[i * width * channel + j * channel + 2] =
cv::saturate_cast<uchar>(
static_cast<float>(im_data[i * width * 3 + j * 3 + 2]));
}
} else {
vis_data[i * width * channel + j * channel + 0] =
cv::saturate_cast<uchar>(
static_cast<float>(im_data[i * width * 3 + j * 3 + 0]) *
alpha_val + (1.f - alpha_val) * 153.f);
vis_data[i * width * channel + j * channel + 1] =
cv::saturate_cast<uchar>(
static_cast<float>(im_data[i * width * 3 + j * 3 + 1]) *
alpha_val + (1.f - alpha_val) * 255.f);
vis_data[i * width * channel + j * channel + 2] =
cv::saturate_cast<uchar>(
static_cast<float>(im_data[i * width * 3 + j * 3 + 2]) *
alpha_val + (1.f - alpha_val) * 120.f);
}
}
}
return vis_img;

View File

@@ -143,11 +143,15 @@ FASTDEPLOY_DECL cv::Mat VisSegmentation(const cv::Mat& im,
*
* \param[in] im the input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
* \param[in] result the result produced by model
* \param[in] transparent_background if transparent_background==true, the background will with transparent color
* \param[in] transparent_threshold since the alpha value in MattringResult is a float between [0, 1], transparent_threshold is used to filter background pixel
* \param[in] remove_small_connected_area if remove_small_connected_area==true, the visualized result will not include the small connected areas
* \return cv::Mat type stores the visualized results
*/
FASTDEPLOY_DECL cv::Mat VisMatting(const cv::Mat& im,
const MattingResult& result,
bool transparent_background = false,
float transparent_threshold = 0.999,
bool remove_small_connected_area = false);
/** \brief Show the visualized results for Ocr models
*
@@ -184,6 +188,7 @@ FASTDEPLOY_DECL cv::Mat SwapBackground(const cv::Mat& im,
const cv::Mat& background,
const SegmentationResult& result,
int background_label);
/** \brief Show the visualized results for key point detection models
*
* \param[in] im the input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format

View File

@@ -102,10 +102,12 @@ void BindVisualize(pybind11::module& m) {
})
.def("vis_matting",
[](pybind11::array& im_data, vision::MattingResult& result,
bool transparent_background, float transparent_threshold,
bool remove_small_connected_area) {
cv::Mat im = PyArrayToCvMat(im_data);
auto vis_im =
vision::VisMatting(im, result, remove_small_connected_area);
auto vis_im = vision::VisMatting(
im, result, transparent_background, transparent_threshold,
remove_small_connected_area);
FDTensor out;
vision::Mat(vis_im).ShareWithTensor(&out);
return TensorToPyArray(out);

View File

@@ -95,15 +95,22 @@ def vis_matting_alpha(im_data,
remove_small_connected_area)
def vis_matting(im_data, matting_result, remove_small_connected_area=False):
def vis_matting(im_data,
matting_result,
transparent_background=False,
transparent_threshold=0.99,
remove_small_connected_area=False):
"""Show the visualized results for matting models
:param im_data: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
:param matting_result: the result produced by model
:param transparent_background: whether visulizing matting result with transparent background
:param transparent_threshold: since the alpha value in MattringResult is a float between [0, 1], transparent_threshold is used to filter background pixel
:param remove_small_connected_area: (bool) if remove_small_connected_area==True, the visualized result will not include the small connected areas
:return: (numpy.ndarray) image with visualized results
"""
return C.vision.vis_matting(im_data, matting_result,
transparent_background, transparent_threshold,
remove_small_connected_area)