Merge branch 'develop' of https://github.com/PaddlePaddle/FastDeploy into develop

This commit is contained in:
jiangjiajun
2023-02-17 06:43:12 +00:00
236 changed files with 10419 additions and 2454 deletions

View File

@@ -70,11 +70,12 @@ option(ENABLE_CVCUDA "Whether to enable NVIDIA CV-CUDA to boost image preprocess
option(ENABLE_ENCRYPTION "Whether to enable ENCRYPTION." OFF)
option(ENABLE_BENCHMARK "Whether to enable Benchmark mode." OFF)
option(WITH_ASCEND "Whether to compile for Huawei Ascend deploy." OFF)
option(WITH_DIRECTML "Whether to compile for onnxruntime DirectML deploy." OFF)
option(WITH_TIMVX "Whether to compile for TIMVX deploy." OFF)
option(WITH_KUNLUNXIN "Whether to compile for KunlunXin XPU deploy." OFF)
option(WITH_TESTING "Whether to compile with unittest." OFF)
option(WITH_CAPI "Whether to compile with c api." OFF)
option(WITH_CSHARPAPI "Whether to compile with c# api" OFF)
############################# Options for Android cross compiling #########################
if(ANDROID)
option(WITH_OPENCV_STATIC "Whether to use OpenCV static lib for Android." OFF)
@@ -300,10 +301,16 @@ if(WITH_GPU)
include_directories(${CUDA_DIRECTORY}/include)
if(WIN32)
find_library(CUDA_LIB cudart ${CUDA_DIRECTORY}/lib/x64)
find_library(NVJPEG_LIB nvjpeg ${CUDA_DIRECTORY}/lib/x64)
add_definitions(-DENABLE_NVJPEG)
else()
find_library(CUDA_LIB cudart ${CUDA_DIRECTORY}/lib64)
if(NOT BUILD_ON_JETSON)
find_library(NVJPEG_LIB nvjpeg ${CUDA_DIRECTORY}/lib64)
add_definitions(-DENABLE_NVJPEG)
endif()
endif()
list(APPEND DEPEND_LIBS ${CUDA_LIB})
list(APPEND DEPEND_LIBS ${CUDA_LIB} ${NVJPEG_LIB})
# build CUDA source files in fastdeploy, CUDA source files include CUDA preprocessing, TRT plugins, etc.
enable_language(CUDA)
@@ -424,8 +431,15 @@ if(WITH_CAPI)
endif()
endif()
if(WITH_CSHARPAPI)
if(MSVC)
add_subdirectory(${PROJECT_SOURCE_DIR}/csharp)
endif()
endif()
configure_file(${PROJECT_SOURCE_DIR}/FastDeploy.cmake.in ${PROJECT_SOURCE_DIR}/FastDeploy.cmake @ONLY)
configure_file(${PROJECT_SOURCE_DIR}/FastDeployCSharp.cmake.in ${PROJECT_SOURCE_DIR}/FastDeployCSharp.cmake @ONLY)
configure_file(${PROJECT_SOURCE_DIR}/python/fastdeploy/c_lib_wrap.py.in ${PROJECT_SOURCE_DIR}/python/fastdeploy/c_lib_wrap.py)
configure_file(${PROJECT_SOURCE_DIR}/python/scripts/process_libraries.py.in ${PROJECT_SOURCE_DIR}/python/scripts/process_libraries.py)
@@ -678,6 +692,7 @@ install(
${PROJECT_SOURCE_DIR}/ThirdPartyNotices.txt
${PROJECT_SOURCE_DIR}/VERSION_NUMBER
${PROJECT_SOURCE_DIR}/FastDeploy.cmake
${PROJECT_SOURCE_DIR}/FastDeployCSharp.cmake
${PROJECT_SOURCE_DIR}/cmake/FastDeployConfig.cmake
${PROJECT_SOURCE_DIR}/cmake/utils.cmake
${PROJECT_SOURCE_DIR}/cmake/openmp.cmake

View File

@@ -169,21 +169,25 @@ if(ENABLE_POROS_BACKEND)
endif()
if(WITH_GPU)
if (NOT CUDA_DIRECTORY)
if(NOT CUDA_DIRECTORY)
set(CUDA_DIRECTORY "/usr/local/cuda")
endif()
if(WIN32)
find_library(CUDA_LIB cudart ${CUDA_DIRECTORY}/lib/x64)
find_library(NVJPEG_LIB nvjpeg ${CUDA_DIRECTORY}/lib/x64)
else()
find_library(CUDA_LIB cudart ${CUDA_DIRECTORY}/lib64)
if(NOT BUILD_ON_JETSON)
find_library(NVJPEG_LIB nvjpeg ${CUDA_DIRECTORY}/lib64)
endif()
endif()
if(NOT CUDA_LIB)
message(FATAL_ERROR "[FastDeploy] Cannot find library cudart in ${CUDA_DIRECTORY}, Please define CUDA_DIRECTORY, e.g -DCUDA_DIRECTORY=/path/to/cuda")
endif()
list(APPEND FASTDEPLOY_LIBS ${CUDA_LIB})
list(APPEND FASTDEPLOY_LIBS ${CUDA_LIB} ${NVJPEG_LIB})
list(APPEND FASTDEPLOY_INCS ${CUDA_DIRECTORY}/include)
if (ENABLE_TRT_BACKEND)
if(ENABLE_TRT_BACKEND)
if(BUILD_ON_JETSON)
find_library(TRT_INFER_LIB nvinfer /usr/lib/aarch64-linux-gnu/)
find_library(TRT_ONNX_LIB nvonnxparser /usr/lib/aarch64-linux-gnu/)

13
FastDeployCSharp.cmake Normal file
View File

@@ -0,0 +1,13 @@
list(APPEND FASTDEPLOY_DOTNET_REFERENCES
"Microsoft.CSharp"
"System"
"System.Core"
"System.Data"
"System.Deployment"
"System.Drawing"
"System.Net.Http"
"System.Xml"
"System.Reflection"
"${CMAKE_CURRENT_LIST_DIR}/csharp_lib/fastdeploy_csharp.dll")
set(FASTDEPLOY_PACKAGE_REFERENCES "OpenCvSharp4_4.7.0.20230115;OpenCvSharp4.runtime.win_4.7.0.20230115")

13
FastDeployCSharp.cmake.in Normal file
View File

@@ -0,0 +1,13 @@
list(APPEND FASTDEPLOY_DOTNET_REFERENCES
"Microsoft.CSharp"
"System"
"System.Core"
"System.Data"
"System.Deployment"
"System.Drawing"
"System.Net.Http"
"System.Xml"
"System.Reflection"
"${CMAKE_CURRENT_LIST_DIR}/csharp_lib/fastdeploy_csharp.dll")
set(FASTDEPLOY_PACKAGE_REFERENCES "OpenCvSharp4_4.7.0.20230115;OpenCvSharp4.runtime.win_4.7.0.20230115")

6
benchmark/cpp/CMakeLists.txt Executable file → Normal file
View File

@@ -10,11 +10,17 @@ include_directories(${FASTDEPLOY_INCS})
add_executable(benchmark_yolov5 ${PROJECT_SOURCE_DIR}/benchmark_yolov5.cc)
add_executable(benchmark_ppyolov8 ${PROJECT_SOURCE_DIR}/benchmark_ppyolov8.cc)
add_executable(benchmark_ppcls ${PROJECT_SOURCE_DIR}/benchmark_ppcls.cc)
add_executable(benchmark_precision_ppyolov8 ${PROJECT_SOURCE_DIR}/benchmark_precision_ppyolov8.cc)
if(UNIX AND (NOT APPLE) AND (NOT ANDROID))
target_link_libraries(benchmark_yolov5 ${FASTDEPLOY_LIBS} gflags pthread)
target_link_libraries(benchmark_ppyolov8 ${FASTDEPLOY_LIBS} gflags pthread)
target_link_libraries(benchmark_ppcls ${FASTDEPLOY_LIBS} gflags pthread)
target_link_libraries(benchmark_precision_ppyolov8 ${FASTDEPLOY_LIBS} gflags pthread)
else()
target_link_libraries(benchmark_yolov5 ${FASTDEPLOY_LIBS} gflags)
target_link_libraries(benchmark_ppyolov8 ${FASTDEPLOY_LIBS} gflags)
target_link_libraries(benchmark_ppcls ${FASTDEPLOY_LIBS} gflags)
target_link_libraries(benchmark_precision_ppyolov8 ${FASTDEPLOY_LIBS} gflags)
endif()

View File

@@ -0,0 +1,36 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "flags.h"
#include "macros.h"
#include "option.h"
int main(int argc, char* argv[]) {
// Initialization
auto option = fastdeploy::RuntimeOption();
if (!CreateRuntimeOption(&option, argc, argv, true)) {
return -1;
}
auto im = cv::imread(FLAGS_image);
// Set max_batch_size 1 for best performance
option.trt_option.max_batch_size = 1;
auto model_file = FLAGS_model + sep + "inference.pdmodel";
auto params_file = FLAGS_model + sep + "inference.pdiparams";
auto config_file = FLAGS_model + sep + "inference_cls.yaml";
auto model_ppcls = fastdeploy::vision::classification::PaddleClasModel(
model_file, params_file, config_file, option);
fastdeploy::vision::ClassifyResult res;
BENCHMARK_MODEL(model_ppcls, model_ppcls.Predict(im, &res))
return 0;
}

11
benchmark/cpp/benchmark_ppyolov8.cc Executable file → Normal file
View File

@@ -12,20 +12,17 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "macros.h"
#include "flags.h"
#include "macros.h"
#include "option.h"
int main(int argc, char* argv[]) {
google::ParseCommandLineFlags(&argc, &argv, true);
auto im = cv::imread(FLAGS_image);
// Initialization
auto option = fastdeploy::RuntimeOption();
if (!CreateRuntimeOption(&option)) {
PrintUsage();
return false;
if (!CreateRuntimeOption(&option, argc, argv, true)) {
return -1;
}
PrintBenchmarkInfo();
auto im = cv::imread(FLAGS_image);
auto model_file = FLAGS_model + sep + "model.pdmodel";
auto params_file = FLAGS_model + sep + "model.pdiparams";
auto config_file = FLAGS_model + sep + "infer_cfg.yml";

View File

@@ -0,0 +1,87 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "flags.h"
#include "macros.h"
#include "option.h"
namespace vision = fastdeploy::vision;
namespace benchmark = fastdeploy::benchmark;
int main(int argc, char* argv[]) {
#if defined(ENABLE_BENCHMARK) && defined(ENABLE_VISION)
// Initialization
auto option = fastdeploy::RuntimeOption();
if (!CreateRuntimeOption(&option, argc, argv, true)) {
return -1;
}
auto im = cv::imread(FLAGS_image);
auto model_file = FLAGS_model + sep + "model.pdmodel";
auto params_file = FLAGS_model + sep + "model.pdiparams";
auto config_file = FLAGS_model + sep + "infer_cfg.yml";
auto model_ppyolov8 = vision::detection::PaddleYOLOv8(model_file, params_file,
config_file, option);
vision::DetectionResult res;
// Run once at least
model_ppyolov8.Predict(im, &res);
// 1. Test result diff
std::cout << "=============== Test result diff =================\n";
// Save result to -> disk.
std::string det_result_path = "ppyolov8_result.txt";
benchmark::ResultManager::SaveDetectionResult(res, det_result_path);
// Load result from <- disk.
vision::DetectionResult res_loaded;
benchmark::ResultManager::LoadDetectionResult(&res_loaded, det_result_path);
// Calculate diff between two results.
auto det_diff =
benchmark::ResultManager::CalculateDiffStatis(&res, &res_loaded);
std::cout << "diff: mean=" << det_diff.mean << ",max=" << det_diff.max
<< ",min=" << det_diff.min << std::endl;
// 2. Test tensor diff
std::cout << "=============== Test tensor diff =================\n";
std::vector<vision::DetectionResult> bacth_res;
std::vector<fastdeploy::FDTensor> input_tensors, output_tensors;
std::vector<cv::Mat> imgs;
imgs.push_back(im);
std::vector<vision::FDMat> fd_images = vision::WrapMat(imgs);
model_ppyolov8.GetPreprocessor().Run(&fd_images, &input_tensors);
input_tensors[0].name = "image";
input_tensors[1].name = "scale_factor";
input_tensors[2].name = "im_shape";
input_tensors.pop_back();
model_ppyolov8.Infer(input_tensors, &output_tensors);
model_ppyolov8.GetPostprocessor().Run(output_tensors, &bacth_res);
// Save tensor to -> disk.
auto& tensor_dump = output_tensors[0];
std::string det_tensor_path = "ppyolov8_tensor.txt";
benchmark::ResultManager::SaveFDTensor(tensor_dump, det_tensor_path);
// Load tensor from <- disk.
fastdeploy::FDTensor tensor_loaded;
benchmark::ResultManager::LoadFDTensor(&tensor_loaded, det_tensor_path);
// Calculate diff between two tensors.
auto det_tensor_diff = benchmark::ResultManager::CalculateDiffStatis(
&tensor_dump, &tensor_loaded);
std::cout << "diff: mean=" << det_tensor_diff.mean
<< ",max=" << det_tensor_diff.max << ",min=" << det_tensor_diff.min
<< std::endl;
// 3. Run profiling
BENCHMARK_MODEL(model_ppyolov8, model_ppyolov8.Predict(im, &res))
auto vis_im = vision::VisDetection(im, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
#endif
return 0;
}

13
benchmark/cpp/benchmark_yolov5.cc Executable file → Normal file
View File

@@ -12,20 +12,17 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "macros.h"
#include "flags.h"
#include "macros.h"
#include "option.h"
int main(int argc, char* argv[]) {
google::ParseCommandLineFlags(&argc, &argv, true);
auto im = cv::imread(FLAGS_image);
// Initialization
auto option = fastdeploy::RuntimeOption();
if (!CreateRuntimeOption(&option)) {
PrintUsage();
return false;
if (!CreateRuntimeOption(&option, argc, argv, true)) {
return -1;
}
PrintBenchmarkInfo();
auto im = cv::imread(FLAGS_image);
auto model_yolov5 =
fastdeploy::vision::detection::YOLOv5(FLAGS_model, "", option);
fastdeploy::vision::DetectionResult res;
@@ -34,4 +31,4 @@ int main(int argc, char* argv[]) {
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
return 0;
}
}

View File

@@ -15,11 +15,12 @@
#pragma once
#include "gflags/gflags.h"
#include "fastdeploy/benchmark/utils.h"
#ifdef WIN32
const char sep = '\\';
static const char sep = '\\';
#else
const char sep = '/';
static const char sep = '/';
#endif
DEFINE_string(model, "", "Directory of the inference model.");
@@ -44,7 +45,7 @@ DEFINE_bool(
collect_memory_info, false, "Whether to collect memory info");
DEFINE_int32(sampling_interval, 50, "How often to collect memory info(ms).");
void PrintUsage() {
static void PrintUsage() {
std::cout << "Usage: infer_demo --model model_path --image img_path --device "
"[cpu|gpu|xpu] --backend "
"[default|ort|paddle|ov|trt|paddle_trt|lite] "
@@ -55,7 +56,7 @@ void PrintUsage() {
std::cout << "Default value of use_fp16: false" << std::endl;
}
void PrintBenchmarkInfo() {
static void PrintBenchmarkInfo() {
// Get model name
std::vector<std::string> model_names;
fastdeploy::benchmark::Split(FLAGS_model, model_names, sep);
@@ -76,7 +77,9 @@ void PrintBenchmarkInfo() {
ss << "device_id: " << FLAGS_device_id << std::endl;
}
ss << "backend: " << FLAGS_backend << std::endl;
ss << "cpu_thread_nums: " << FLAGS_cpu_thread_nums << std::endl;
if (FLAGS_device == "cpu") {
ss << "cpu_thread_nums: " << FLAGS_cpu_thread_nums << std::endl;
}
ss << "use_fp16: " << FLAGS_use_fp16 << std::endl;
ss << "collect_memory_info: " << FLAGS_collect_memory_info << std::endl;
if (FLAGS_collect_memory_info) {

View File

@@ -16,7 +16,9 @@
#include "fastdeploy/vision.h"
static bool CreateRuntimeOption(fastdeploy::RuntimeOption* option) {
static bool CreateRuntimeOption(fastdeploy::RuntimeOption* option,
int argc, char* argv[], bool remove_flags) {
google::ParseCommandLineFlags(&argc, &argv, remove_flags);
if (FLAGS_profile_mode == "runtime") {
option->EnableProfiling(FLAGS_include_h2d_d2h, FLAGS_repeat, FLAGS_warmup);
}
@@ -29,10 +31,11 @@ static bool CreateRuntimeOption(fastdeploy::RuntimeOption* option) {
} else if (FLAGS_backend == "trt" || FLAGS_backend == "paddle_trt") {
option->UseTrtBackend();
if (FLAGS_backend == "paddle_trt") {
option->EnablePaddleToTrt();
option->UsePaddleInferBackend();
option->paddle_infer_option.enable_trt = true;
}
if (FLAGS_use_fp16) {
option->EnableTrtFP16();
option->trt_option.enable_fp16 = true;
}
} else if (FLAGS_backend == "default") {
return true;
@@ -40,6 +43,7 @@ static bool CreateRuntimeOption(fastdeploy::RuntimeOption* option) {
std::cout << "While inference with GPU, only support "
"default/ort/paddle/trt/paddle_trt now, "
<< FLAGS_backend << " is not supported." << std::endl;
PrintUsage();
return false;
}
} else if (FLAGS_device == "cpu") {
@@ -53,7 +57,7 @@ static bool CreateRuntimeOption(fastdeploy::RuntimeOption* option) {
} else if (FLAGS_backend == "lite") {
option->UsePaddleLiteBackend();
if (FLAGS_use_fp16) {
option->EnableLiteFP16();
option->paddle_lite_option.enable_fp16 = true;
}
} else if (FLAGS_backend == "default") {
return true;
@@ -61,6 +65,7 @@ static bool CreateRuntimeOption(fastdeploy::RuntimeOption* option) {
std::cout << "While inference with CPU, only support "
"default/ort/ov/paddle/lite now, "
<< FLAGS_backend << " is not supported." << std::endl;
PrintUsage();
return false;
}
} else if (FLAGS_device == "xpu") {
@@ -72,7 +77,7 @@ static bool CreateRuntimeOption(fastdeploy::RuntimeOption* option) {
} else if (FLAGS_backend == "lite") {
option->UsePaddleLiteBackend();
if (FLAGS_use_fp16) {
option->EnableLiteFP16();
option->paddle_lite_option.enable_fp16 = true;
}
} else if (FLAGS_backend == "default") {
return true;
@@ -80,13 +85,15 @@ static bool CreateRuntimeOption(fastdeploy::RuntimeOption* option) {
std::cout << "While inference with XPU, only support "
"default/ort/paddle/lite now, "
<< FLAGS_backend << " is not supported." << std::endl;
PrintUsage();
return false;
}
} else {
std::cerr << "Only support device CPU/GPU/XPU now, " << FLAGS_device
<< " is not supported." << std::endl;
PrintUsage();
return false;
}
PrintBenchmarkInfo();
return true;
}

View File

@@ -102,7 +102,10 @@ def build_option(args):
elif backend in ["trt", "paddle_trt"]:
option.use_trt_backend()
if backend == "paddle_trt":
option.enable_paddle_to_trt()
option.use_paddle_infer_backend()
option.paddle_infer_option.enable_trt = True
# Set max_batch_size 1 for best performance
option.trt_option.max_batch_size = 1
if enable_trt_fp16:
option.enable_trt_fp16()
elif backend == "default":

View File

@@ -115,7 +115,8 @@ def build_option(args):
elif backend in ["trt", "paddle_trt"]:
option.use_trt_backend()
if backend == "paddle_trt":
option.enable_paddle_to_trt()
option.use_paddle_infer_backend()
option.paddle_infer_option.enable_trt = True
if enable_trt_fp16:
option.enable_trt_fp16()
elif backend == "default":

5
benchmark/python/benchmark_ppocr.py Normal file → Executable file
View File

@@ -92,8 +92,9 @@ def build_option(args):
elif backend in ["trt", "paddle_trt"]:
option.use_trt_backend()
if backend == "paddle_trt":
option.enable_paddle_trt_collect_shape()
option.enable_paddle_to_trt()
option.paddle_infer_option.collect_trt_shape = True
option.use_paddle_infer_backend()
option.paddle_infer_option.enable_trt = True
if enable_trt_fp16:
option.enable_trt_fp16()
elif backend == "default":

View File

@@ -96,8 +96,9 @@ def build_option(args):
option.set_trt_input_shape("x", [1, 3, 192, 192],
[1, 3, 192, 192], [1, 3, 192, 192])
if backend == "paddle_trt":
option.enable_paddle_trt_collect_shape()
option.enable_paddle_to_trt()
option.paddle_infer_option.collect_trt_shape = True
option.use_paddle_infer_backend()
option.paddle_infer_option.enable_trt = True
if enable_trt_fp16:
option.enable_trt_fp16()
elif backend == "default":

5
benchmark/python/benchmark_uie.py Normal file → Executable file
View File

@@ -76,8 +76,9 @@ def build_option(args):
else:
option.use_trt_backend()
if args.backend == 'paddle_trt':
option.enable_paddle_to_trt()
option.enable_paddle_trt_collect_shape()
option.paddle_infer_option.collect_trt_shape = True
option.use_paddle_infer_backend()
option.paddle_infer_option.enable_trt = True
trt_file = os.path.join(args.model_dir, "infer.trt")
option.set_trt_input_shape(
'input_ids',

View File

@@ -85,7 +85,8 @@ def build_option(args):
elif backend in ["trt", "paddle_trt"]:
option.use_trt_backend()
if backend == "paddle_trt":
option.enable_paddle_to_trt()
option.use_paddle_infer_backend()
option.paddle_infer_option.enable_trt = True
if enable_trt_fp16:
option.enable_trt_fp16()
elif backend == "default":

View File

@@ -67,6 +67,11 @@ typedef struct FD_C_TwoDimArrayFloat {
typedef void* FD_C_Mat;
typedef struct FD_C_OneDimMat {
size_t size;
FD_C_Mat* data;
} FD_C_OneDimMat;
#ifdef __cplusplus
extern "C" {
#endif

View File

@@ -18,38 +18,108 @@ namespace fastdeploy {
#ifdef ENABLE_VISION
std::unique_ptr<fastdeploy::vision::classification::PaddleClasModel>&
FD_C_CheckAndConvertPaddleClasModelWrapper(
FD_C_PaddleClasModelWrapper* fd_c_paddleclas_model_wrapper) {
FDASSERT(
fd_c_paddleclas_model_wrapper != nullptr,
"The pointer of fd_c_paddleclas_model_wrapper shouldn't be nullptr.");
return fd_c_paddleclas_model_wrapper->paddleclas_model;
}
// results:
std::unique_ptr<fastdeploy::vision::detection::PPYOLOE>&
FD_C_CheckAndConvertPPYOLOEWrapper(FD_C_PPYOLOEWrapper* fd_c_ppyoloe_wrapper) {
FDASSERT(fd_c_ppyoloe_wrapper != nullptr,
"The pointer of fd_c_ppyoloe_wrapper shouldn't be nullptr.");
return fd_c_ppyoloe_wrapper->ppyoloe_model;
}
// ClassifyResult
DECL_AND_IMPLEMENT_RESULT_FUNC_FOR_GET_PTR_FROM_WRAPPER(
ClassifyResult, fd_classify_result_wrapper, classify_result)
// DetectionResult
DECL_AND_IMPLEMENT_RESULT_FUNC_FOR_GET_PTR_FROM_WRAPPER(
DetectionResult, fd_detection_result_wrapper, detection_result)
std::unique_ptr<fastdeploy::vision::ClassifyResult>&
FD_C_CheckAndConvertClassifyResultWrapper(
FD_C_ClassifyResultWrapper* fd_c_classify_result_wrapper) {
FDASSERT(fd_c_classify_result_wrapper != nullptr,
"The pointer of fd_c_classify_result_wrapper shouldn't be nullptr.");
return fd_c_classify_result_wrapper->classify_result;
}
// Models:
// Classification
// PaddleClasModel
DECL_AND_IMPLEMENT_CLASSIFICATION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(
PaddleClasModel, fd_paddleclas_model_wrapper, paddleclas_model)
// detection models:
// PPYOLOE
DECL_AND_IMPLEMENT_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(
PPYOLOE, fd_ppyoloe_wrapper, ppyoloe_model)
// PicoDet
DECL_AND_IMPLEMENT_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(
PicoDet, fd_picodet_wrapper, picodet_model)
// PPYOLO
DECL_AND_IMPLEMENT_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(
PPYOLO, fd_ppyolo_wrapper, ppyolo_model)
// YOLOv3
DECL_AND_IMPLEMENT_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(
YOLOv3, fd_yolov3_wrapper, yolov3_model)
// PaddleYOLOX
DECL_AND_IMPLEMENT_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(
PaddleYOLOX, fd_paddleyolox_wrapper, paddleyolox_model)
// FasterRCNN
DECL_AND_IMPLEMENT_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(
FasterRCNN, fd_fasterrcnn_wrapper, fasterrcnn_model)
// MaskRCNN
DECL_AND_IMPLEMENT_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(
MaskRCNN, fd_maskrcnn_wrapper, maskrcnn_model)
// SSD
DECL_AND_IMPLEMENT_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(SSD,
fd_ssd_wrapper,
ssd_model)
// PaddleYOLOv5
DECL_AND_IMPLEMENT_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(
PaddleYOLOv5, fd_paddleyolov5_wrapper, paddleyolov5_model)
// PaddleYOLOv6
DECL_AND_IMPLEMENT_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(
PaddleYOLOv6, fd_paddleyolov6_wrapper, paddleyolov6_model)
// PaddleYOLOv7
DECL_AND_IMPLEMENT_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(
PaddleYOLOv7, fd_paddleyolov7_wrapper, paddleyolov7_model)
// PaddleYOLOv8
DECL_AND_IMPLEMENT_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(
PaddleYOLOv8, fd_paddleyolov8_wrapper, paddleyolov8_model)
// RTMDet
DECL_AND_IMPLEMENT_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(
RTMDet, fd_rtmdet_wrapper, rtmdet_model)
// CascadeRCNN
DECL_AND_IMPLEMENT_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(
CascadeRCNN, fd_cascadercnn_wrapper, cascadercnn_model)
// PSSDet
DECL_AND_IMPLEMENT_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(
PSSDet, fd_pssdet_wrapper, pssdet_model)
// RetinaNet
DECL_AND_IMPLEMENT_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(
RetinaNet, fd_retinanet_wrapper, retinanet_model)
// FCOS
DECL_AND_IMPLEMENT_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(
FCOS, fd_fcos_wrapper, fcos_model)
// TTFNet
DECL_AND_IMPLEMENT_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(
TTFNet, fd_ttfnet_wrapper, ttfnet_model)
// TOOD
DECL_AND_IMPLEMENT_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(
TOOD, fd_tood_wrapper, tood_model)
// GFL
DECL_AND_IMPLEMENT_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(GFL,
fd_gfl_wrapper,
gfl_model)
std::unique_ptr<fastdeploy::vision::DetectionResult>&
FD_C_CheckAndConvertDetectionResultWrapper(
FD_C_DetectionResultWrapper* fd_c_detection_result_wrapper) {
FDASSERT(
fd_c_detection_result_wrapper != nullptr,
"The pointer of fd_c_detection_result_wrapper shouldn't be nullptr.");
return fd_c_detection_result_wrapper->detection_result;
}
#endif
std::unique_ptr<fastdeploy::RuntimeOption>&

View File

@@ -23,39 +23,246 @@
#include "fastdeploy/vision/common/result.h"
#include "fastdeploy/vision/detection/ppdet/model.h"
typedef struct FD_C_ClassifyResultWrapper {
std::unique_ptr<fastdeploy::vision::ClassifyResult> classify_result;
} FD_C_ClassifyResultWrapper;
#define DEFINE_RESULT_WRAPPER_STRUCT(typename, varname) typedef struct FD_C_##typename##Wrapper { \
std::unique_ptr<fastdeploy::vision::typename> varname; \
} FD_C_##typename##Wrapper
typedef struct FD_C_DetectionResultWrapper {
std::unique_ptr<fastdeploy::vision::DetectionResult> detection_result;
} FD_C_DetectionResultWrapper;
#define DEFINE_CLASSIFICATION_MODEL_WRAPPER_STRUCT(typename, varname) typedef struct FD_C_##typename##Wrapper { \
std::unique_ptr<fastdeploy::vision::classification::typename> \
varname; \
} FD_C_##typename##Wrapper
typedef struct FD_C_PaddleClasModelWrapper {
std::unique_ptr<fastdeploy::vision::classification::PaddleClasModel>
paddleclas_model;
} FD_C_PaddleClasModelWrapper;
#define DEFINE_DETECTION_MODEL_WRAPPER_STRUCT(typename, varname) typedef struct FD_C_##typename##Wrapper { \
std::unique_ptr<fastdeploy::vision::detection::typename> varname; \
} FD_C_##typename##Wrapper
typedef struct FD_C_PPYOLOEWrapper {
std::unique_ptr<fastdeploy::vision::detection::PPYOLOE> ppyoloe_model;
} FD_C_PPYOLOEWrapper;
// ------------- belows are wrapper struct define --------------------- //
// Results:
// ClassifyResult
DEFINE_RESULT_WRAPPER_STRUCT(ClassifyResult, classify_result);
// DetectionResult
DEFINE_RESULT_WRAPPER_STRUCT(DetectionResult, detection_result);
// Models:
// Classification
// PaddleClasModel
DEFINE_CLASSIFICATION_MODEL_WRAPPER_STRUCT(PaddleClasModel, paddleclas_model);
// Detection
// PPYOLOE
DEFINE_DETECTION_MODEL_WRAPPER_STRUCT(PPYOLOE, ppyoloe_model);
// PicoDet
DEFINE_DETECTION_MODEL_WRAPPER_STRUCT(PicoDet, picodet_model);
// PPYOLO
DEFINE_DETECTION_MODEL_WRAPPER_STRUCT(PPYOLO, ppyolo_model);
// YOLOv3
DEFINE_DETECTION_MODEL_WRAPPER_STRUCT(YOLOv3, yolov3_model);
// PaddleYOLOX
DEFINE_DETECTION_MODEL_WRAPPER_STRUCT(PaddleYOLOX, paddleyolox_model);
// FasterRCNN
DEFINE_DETECTION_MODEL_WRAPPER_STRUCT(FasterRCNN, fasterrcnn_model);
// MaskRCNN
DEFINE_DETECTION_MODEL_WRAPPER_STRUCT(MaskRCNN, maskrcnn_model);
// SSD
DEFINE_DETECTION_MODEL_WRAPPER_STRUCT(SSD, ssd_model);
// PaddleYOLOv5
DEFINE_DETECTION_MODEL_WRAPPER_STRUCT(PaddleYOLOv5, paddleyolov5_model);
// PaddleYOLOv6
DEFINE_DETECTION_MODEL_WRAPPER_STRUCT(PaddleYOLOv6, paddleyolov6_model);
// PaddleYOLOv7
DEFINE_DETECTION_MODEL_WRAPPER_STRUCT(PaddleYOLOv7, paddleyolov7_model);
// PaddleYOLOv8
DEFINE_DETECTION_MODEL_WRAPPER_STRUCT(PaddleYOLOv8, paddleyolov8_model);
// RTMDet
DEFINE_DETECTION_MODEL_WRAPPER_STRUCT(RTMDet, rtmdet_model);
// CascadeRCNN
DEFINE_DETECTION_MODEL_WRAPPER_STRUCT(CascadeRCNN, cascadercnn_model);
// PSSDet
DEFINE_DETECTION_MODEL_WRAPPER_STRUCT(PSSDet, pssdet_model);
// RetinaNet
DEFINE_DETECTION_MODEL_WRAPPER_STRUCT(RetinaNet, retinanet_model);
// FCOS
DEFINE_DETECTION_MODEL_WRAPPER_STRUCT(FCOS, fcos_model);
// TTFNet
DEFINE_DETECTION_MODEL_WRAPPER_STRUCT(TTFNet, ttfnet_model);
// TOOD
DEFINE_DETECTION_MODEL_WRAPPER_STRUCT(TOOD, tood_model);
// GFL
DEFINE_DETECTION_MODEL_WRAPPER_STRUCT(GFL, gfl_model);
// ------------- belows are function declaration for get ptr from wrapper --------------------- //
#define DECLARE_RESULT_FUNC_FOR_GET_PTR_FROM_WRAPPER(typename, varname) std::unique_ptr<fastdeploy::vision::typename>& \
FD_C_CheckAndConvert##typename##Wrapper( \
FD_C_##typename##Wrapper* varname)
#define DECLARE_CLASSIFICATION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(typename, varname) std::unique_ptr<fastdeploy::vision::classification::typename>& \
FD_C_CheckAndConvert##typename##Wrapper( \
FD_C_##typename##Wrapper* varname)
#define DECLARE_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(typename, varname) std::unique_ptr<fastdeploy::vision::detection::typename>& \
FD_C_CheckAndConvert##typename##Wrapper( \
FD_C_##typename##Wrapper* varname)
namespace fastdeploy {
std::unique_ptr<fastdeploy::vision::ClassifyResult>&
FD_C_CheckAndConvertClassifyResultWrapper(
FD_C_ClassifyResultWrapper* fd_classify_result_wrapper);
std::unique_ptr<fastdeploy::vision::DetectionResult>&
FD_C_CheckAndConvertDetectionResultWrapper(
FD_C_DetectionResultWrapper* fd_detection_result_wrapper);
std::unique_ptr<fastdeploy::vision::classification::PaddleClasModel>&
FD_C_CheckAndConvertPaddleClasModelWrapper(
FD_C_PaddleClasModelWrapper* fd_paddleclas_model_wrapper);
std::unique_ptr<fastdeploy::vision::detection::PPYOLOE>&
FD_C_CheckAndConvertPPYOLOEWrapper(FD_C_PPYOLOEWrapper* fd_ppyoloe_wrapper);
// results:
// ClassifyResult
DECLARE_RESULT_FUNC_FOR_GET_PTR_FROM_WRAPPER(ClassifyResult,
fd_classify_result_wrapper);
// DetectionResult
DECLARE_RESULT_FUNC_FOR_GET_PTR_FROM_WRAPPER(DetectionResult,
fd_detection_result_wrapper);
// Models:
// Classification
// PaddleClasModel
DECLARE_CLASSIFICATION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(
PaddleClasModel, fd_paddleclas_model_wrapper);
// detection models:
// PPYOLOE
DECLARE_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(PPYOLOE,
fd_ppyoloe_wrapper);
// PicoDet
DECLARE_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(PicoDet,
fd_picodet_wrapper);
// PPYOLO
DECLARE_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(PPYOLO,
fd_ppyolo_wrapper);
// YOLOv3
DECLARE_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(YOLOv3,
fd_yolov3_wrapper);
// PaddleYOLOX
DECLARE_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(PaddleYOLOX,
fd_paddleyolox_wrapper);
// FasterRCNN
DECLARE_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(FasterRCNN,
fd_fasterrcnn_wrapper);
// MaskRCNN
DECLARE_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(MaskRCNN,
fd_maskrcnn_wrapper);
// SSD
DECLARE_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(SSD,
fd_ssd_wrapper);
// PaddleYOLOv5
DECLARE_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(PaddleYOLOv5,
fd_paddleyolov5_wrapper);
// PaddleYOLOv6
DECLARE_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(PaddleYOLOv6,
fd_paddleyolov6_wrapper);
// PaddleYOLOv7
DECLARE_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(PaddleYOLOv7,
fd_paddleyolov7_wrapper);
// PaddleYOLOv8
DECLARE_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(PaddleYOLOv8,
fd_paddleyolov8_wrapper);
// RTMDet
DECLARE_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(RTMDet,
fd_rtmdet_wrapper);
// CascadeRCNN
DECLARE_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(CascadeRCNN,
fd_cascadercnn_wrapper);
// PSSDet
DECLARE_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(PSSDet,
fd_pssdet_wrapper);
// RetinaNet
DECLARE_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(RetinaNet,
fd_retinanet_wrapper);
// FCOS
DECLARE_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(FCOS,
fd_fcos_wrapper);
// TTFNet
DECLARE_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(TTFNet,
fd_ttfnet_wrapper);
// TOOD
DECLARE_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(TOOD,
fd_tood_wrapper);
// GFL
DECLARE_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(GFL,
fd_gfl_wrapper);
} // namespace fastdeploy
#endif
typedef struct FD_C_RuntimeOptionWrapper {
std::unique_ptr<fastdeploy::RuntimeOption> runtime_option;
} FD_C_RuntimeOptionWrapper;
@@ -68,3 +275,27 @@ FD_C_CheckAndConvertRuntimeOptionWrapper(
#define CHECK_AND_CONVERT_FD_TYPE(TYPENAME, variable_name) \
fastdeploy::FD_C_CheckAndConvert##TYPENAME(variable_name)
#define DECL_AND_IMPLEMENT_RESULT_FUNC_FOR_GET_PTR_FROM_WRAPPER(typename, var_wrapper_name, var_ptr_name) std::unique_ptr<fastdeploy::vision::typename>& \
FD_C_CheckAndConvert##typename##Wrapper( \
FD_C_##typename##Wrapper* var_wrapper_name) { \
FDASSERT(var_wrapper_name != nullptr, \
"The pointer of " #var_wrapper_name " shouldn't be nullptr."); \
return var_wrapper_name->var_ptr_name; \
}
#define DECL_AND_IMPLEMENT_CLASSIFICATION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(typename, var_wrapper_name, var_ptr_name) std::unique_ptr<fastdeploy::vision::classification::typename>& \
FD_C_CheckAndConvert##typename##Wrapper( \
FD_C_##typename##Wrapper* var_wrapper_name) { \
FDASSERT(var_wrapper_name != nullptr, \
"The pointer of " #var_wrapper_name " shouldn't be nullptr."); \
return var_wrapper_name->var_ptr_name; \
}
#define DECL_AND_IMPLEMENT_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(typename, var_wrapper_name, var_ptr_name) std::unique_ptr<fastdeploy::vision::detection::typename>& \
FD_C_CheckAndConvert##typename##Wrapper( \
FD_C_##typename##Wrapper* var_wrapper_name) { \
FDASSERT(var_wrapper_name != nullptr, \
"The pointer of " #var_wrapper_name " shouldn't be nullptr."); \
return var_wrapper_name->var_ptr_name; \
}

View File

@@ -38,21 +38,82 @@ FD_C_PaddleClasModelWrapper* FD_C_CreatePaddleClasModelWrapper(
}
void FD_C_DestroyPaddleClasModelWrapper(
__fd_take FD_C_PaddleClasModelWrapper* fd_c_paddleclas_model_wrapper) {
FD_C_PaddleClasModelWrapper* fd_c_paddleclas_model_wrapper) {
delete fd_c_paddleclas_model_wrapper;
}
FD_C_Bool FD_C_PaddleClasModelWrapperPredict(
__fd_take FD_C_PaddleClasModelWrapper* fd_c_paddleclas_model_wrapper,
FD_C_Mat img, FD_C_ClassifyResultWrapper* fd_c_classify_result_wrapper) {
FD_C_PaddleClasModelWrapper* fd_c_paddleclas_model_wrapper, FD_C_Mat img,
FD_C_ClassifyResult* fd_c_classify_result) {
cv::Mat* im = reinterpret_cast<cv::Mat*>(img);
auto& paddleclas_model = CHECK_AND_CONVERT_FD_TYPE(
PaddleClasModelWrapper, fd_c_paddleclas_model_wrapper);
FD_C_ClassifyResultWrapper* fd_c_classify_result_wrapper =
FD_C_CreateClassifyResultWrapper();
auto& classify_result = CHECK_AND_CONVERT_FD_TYPE(
ClassifyResultWrapper, fd_c_classify_result_wrapper);
return paddleclas_model->Predict(im, classify_result.get());
bool successful = paddleclas_model->Predict(im, classify_result.get());
if (successful) {
FD_C_ClassifyResult* res =
FD_C_ClassifyResultWrapperGetData(fd_c_classify_result_wrapper);
*fd_c_classify_result = *res;
}
return successful;
}
FD_C_Bool FD_C_PaddleClasModelWrapperInitialized(
FD_C_PaddleClasModelWrapper* fd_c_paddleclas_model_wrapper) {
auto& paddleclas_model = CHECK_AND_CONVERT_FD_TYPE(
PaddleClasModelWrapper, fd_c_paddleclas_model_wrapper);
return paddleclas_model->Initialized();
}
FD_C_ClassifyResult* FD_C_ClassifyResultToC(
fastdeploy::vision::ClassifyResult* classify_result) {
// Internal use, transfer fastdeploy::vision::ClassifyResult to
// FD_C_ClassifyResult
FD_C_ClassifyResult* fd_c_classify_result_data = new FD_C_ClassifyResult();
// copy label_ids
fd_c_classify_result_data->label_ids.size = classify_result->label_ids.size();
fd_c_classify_result_data->label_ids.data =
new int32_t[fd_c_classify_result_data->label_ids.size];
memcpy(fd_c_classify_result_data->label_ids.data,
classify_result->label_ids.data(),
sizeof(int32_t) * fd_c_classify_result_data->label_ids.size);
// copy scores
fd_c_classify_result_data->scores.size = classify_result->scores.size();
fd_c_classify_result_data->scores.data =
new float[fd_c_classify_result_data->scores.size];
memcpy(fd_c_classify_result_data->scores.data, classify_result->scores.data(),
sizeof(float) * fd_c_classify_result_data->scores.size);
fd_c_classify_result_data->type =
static_cast<FD_C_ResultType>(classify_result->type);
return fd_c_classify_result_data;
}
FD_C_Bool FD_C_PaddleClasModelWrapperBatchPredict(
FD_C_PaddleClasModelWrapper* fd_c_paddleclas_model_wrapper,
FD_C_OneDimMat imgs, FD_C_OneDimClassifyResult* results) {
std::vector<cv::Mat> imgs_vec;
std::vector<fastdeploy::vision::ClassifyResult> results_out;
for (int i = 0; i < imgs.size; i++) {
imgs_vec.push_back(*(reinterpret_cast<cv::Mat*>(imgs.data[i])));
}
auto& paddleclas_model = CHECK_AND_CONVERT_FD_TYPE(
PaddleClasModelWrapper, fd_c_paddleclas_model_wrapper);
bool successful = paddleclas_model->BatchPredict(imgs_vec, &results_out);
if (successful) {
// copy results back to FD_C_OneDimClassifyResult
results->size = results_out.size();
results->data = new FD_C_ClassifyResult[results->size];
for (int i = 0; i < results_out.size(); i++) {
results->data[i] = *FD_C_ClassifyResultToC(&results_out[i]);
}
}
return successful;
}
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -54,12 +54,36 @@ FASTDEPLOY_CAPI_EXPORT extern void FD_C_DestroyPaddleClasModelWrapper(
*
* \param[in] fd_c_paddleclas_model_wrapper pointer to FD_C_PaddleClasModelWrapper object
* \param[in] img pointer to cv::Mat image
* \param[in] fd_c_classify_result_wrapper pointer to FD_C_PaddleClasModelWrapper object, which stores the result.
* \param[in] fd_c_classify_result pointer to FD_C_ClassifyResult object, which stores the result.
*/
FASTDEPLOY_CAPI_EXPORT extern FD_C_Bool FD_C_PaddleClasModelWrapperPredict(
__fd_take FD_C_PaddleClasModelWrapper* fd_c_paddleclas_model_wrapper,
FD_C_Mat img, FD_C_ClassifyResultWrapper* fd_c_classify_result_wrapper);
__fd_keep FD_C_PaddleClasModelWrapper* fd_c_paddleclas_model_wrapper,
FD_C_Mat img, FD_C_ClassifyResult* fd_c_classify_result_wrapper);
/** \brief Check if the model is initialized successfully
*
* \param[in] fd_c_paddleclas_model_wrapper pointer to FD_C_PaddleClasModelWrapper object
*
* \return Return a bool of value true if initialized successfully
*/
FASTDEPLOY_CAPI_EXPORT extern FD_C_Bool FD_C_PaddleClasModelWrapperInitialized(
__fd_keep FD_C_PaddleClasModelWrapper* fd_c_paddleclas_model_wrapper);
/** \brief Predict the classification results for a batch of input images
*
* \param[in] fd_c_paddleclas_model_wrapper pointer to FD_C_PaddleClasModelWrapper object
* \param[in] imgs The input image list, each element comes from cv::imread()
* \param[in] results The output classification result list
* \return true if the prediction successed, otherwise false
*/
FASTDEPLOY_CAPI_EXPORT extern FD_C_Bool FD_C_PaddleClasModelWrapperBatchPredict(
__fd_keep FD_C_PaddleClasModelWrapper* fd_c_paddleclas_model_wrapper,
FD_C_OneDimMat imgs,
FD_C_OneDimClassifyResult* results);
#ifdef __cplusplus
} // extern "C"

View File

@@ -0,0 +1,119 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#define DECLARE_CREATE_WRAPPER_FUNCTION(model_type) FASTDEPLOY_CAPI_EXPORT extern __fd_give FD_C_##model_type##Wrapper* \
FD_C_Creates##model_type##Wrapper( \
const char* model_file, const char* params_file, const char* config_file, \
FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper, \
const FD_C_ModelFormat model_format)
#define DECLARE_DESTROY_WRAPPER_FUNCTION(model_type, wrapper_var_name) FASTDEPLOY_CAPI_EXPORT extern void \
FD_C_Destroy##model_type##Wrapper(__fd_take FD_C_##model_type##Wrapper* wrapper_var_name);
#define DECLARE_PREDICT_FUNCTION(model_type, wrapper_var_name) FASTDEPLOY_CAPI_EXPORT extern FD_C_Bool FD_C_##model_type##WrapperPredict( \
__fd_take FD_C_##model_type##Wrapper* wrapper_var_name, FD_C_Mat img, \
FD_C_DetectionResult* fd_c_detection_result)
#define DECLARE_INITIALIZED_FUNCTION(model_type, wrapper_var_name) FASTDEPLOY_CAPI_EXPORT extern FD_C_Bool FD_C_##model_type##WrapperInitialized( \
__fd_keep FD_C_##model_type##Wrapper* wrapper_var_name)
#define DECLARE_BATCH_PREDICT_FUNCTION(model_type, wrapper_var_name) FASTDEPLOY_CAPI_EXPORT extern FD_C_Bool FD_C_##model_type##WrapperBatchPredict( \
__fd_keep FD_C_##model_type##Wrapper* wrapper_var_name, \
FD_C_OneDimMat imgs, \
FD_C_OneDimDetectionResult* results)
#define IMPLEMENT_CREATE_WRAPPER_FUNCTION(model_type, var_name) \
auto& runtime_option = CHECK_AND_CONVERT_FD_TYPE(RuntimeOptionWrapper, \
fd_c_runtime_option_wrapper); \
FD_C_##model_type##Wrapper* fd_c_##model_type##_wrapper = new FD_C_##model_type##Wrapper(); \
fd_c_##model_type##_wrapper->var_name = \
std::unique_ptr<fastdeploy::vision::detection::model_type>( \
new fastdeploy::vision::detection::model_type( \
std::string(model_file), std::string(params_file), \
std::string(config_file), *runtime_option, \
static_cast<fastdeploy::ModelFormat>(model_format))); \
return fd_c_##model_type##_wrapper
#define IMPLEMENT_DESTROY_WRAPPER_FUNCTION(model_type, wrapper_var_name) delete wrapper_var_name
#define IMPLEMENT_PREDICT_FUNCTION(model_type, wrapper_var_name) \
cv::Mat* im = reinterpret_cast<cv::Mat*>(img); \
auto& model = \
CHECK_AND_CONVERT_FD_TYPE(model_type##Wrapper, wrapper_var_name); \
FD_C_DetectionResultWrapper* fd_c_detection_result_wrapper = \
FD_C_CreateDetectionResultWrapper(); \
auto& detection_result = CHECK_AND_CONVERT_FD_TYPE( \
DetectionResultWrapper, fd_c_detection_result_wrapper); \
bool successful = model->Predict(im, detection_result.get()); \
if (successful) { \
FD_C_DetectionResult* res = \
FD_C_DetectionResultWrapperGetData(fd_c_detection_result_wrapper); \
*fd_c_detection_result = *res; \
} \
return successful
#define IMPLEMENT_INITIALIZED_FUNCTION(model_type, wrapper_var_name) auto& model = \
CHECK_AND_CONVERT_FD_TYPE(model_type##Wrapper, wrapper_var_name); \
return model->Initialized();
#define IMPLEMENT_BATCH_PREDICT_FUNCTION(model_type, wrapper_var_name) std::vector<cv::Mat> imgs_vec; \
std::vector<fastdeploy::vision::DetectionResult> results_out; \
for (int i = 0; i < imgs.size; i++) { \
imgs_vec.push_back(*(reinterpret_cast<cv::Mat*>(imgs.data[i]))); \
} \
auto& model = \
CHECK_AND_CONVERT_FD_TYPE(model_type##Wrapper, wrapper_var_name); \
bool successful = model->BatchPredict(imgs_vec, &results_out); \
if (successful) { \
results->size = results_out.size(); \
results->data = new FD_C_DetectionResult[results->size]; \
for (int i = 0; i < results_out.size(); i++) { \
results->data[i] = *FD_C_DetectionResultToC(&results_out[i]); \
} \
} \
return successful;
#define DECLARE_AND_IMPLEMENT_CREATE_WRAPPER_FUNCTION(model_type, var_name) FD_C_##model_type##Wrapper* FD_C_Creates##model_type##Wrapper(\
const char* model_file, const char* params_file, const char* config_file, \
FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper, \
const FD_C_ModelFormat model_format) { \
IMPLEMENT_CREATE_WRAPPER_FUNCTION(model_type, var_name); \
}
#define DECLARE_AND_IMPLEMENT_DESTROY_WRAPPER_FUNCTION(model_type, wrapper_var_name) void FD_C_Destroy##model_type##Wrapper( \
__fd_take FD_C_##model_type##Wrapper* wrapper_var_name) { \
IMPLEMENT_DESTROY_WRAPPER_FUNCTION(model_type, wrapper_var_name); \
}
#define DECLARE_AND_IMPLEMENT_PREDICT_FUNCTION(model_type, wrapper_var_name) FD_C_Bool FD_C_##model_type##WrapperPredict( \
FD_C_##model_type##Wrapper* wrapper_var_name, FD_C_Mat img, \
FD_C_DetectionResult* fd_c_detection_result) { \
IMPLEMENT_PREDICT_FUNCTION(model_type, wrapper_var_name); \
}
#define DECLARE_AND_IMPLEMENT_INITIALIZED_FUNCTION(model_type, wrapper_var_name) FD_C_Bool FD_C_##model_type##WrapperInitialized( \
FD_C_##model_type##Wrapper* wrapper_var_name) { \
IMPLEMENT_INITIALIZED_FUNCTION(model_type, wrapper_var_name); \
}
#define DECLARE_AND_IMPLEMENT_BATCH_PREDICT_FUNCTION(model_type, wrapper_var_name) FD_C_Bool FD_C_##model_type##WrapperBatchPredict( \
FD_C_##model_type##Wrapper* wrapper_var_name, FD_C_OneDimMat imgs, \
FD_C_OneDimDetectionResult* results) { \
IMPLEMENT_BATCH_PREDICT_FUNCTION(model_type, wrapper_var_name); \
}

View File

@@ -21,38 +21,248 @@
extern "C" {
#endif
// PPYOLOE
FD_C_PPYOLOEWrapper* FD_C_CreatesPPYOLOEWrapper(
const char* model_file, const char* params_file, const char* config_file,
FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper,
const FD_C_ModelFormat model_format) {
auto& runtime_option = CHECK_AND_CONVERT_FD_TYPE(RuntimeOptionWrapper,
fd_c_runtime_option_wrapper);
FD_C_PPYOLOEWrapper* fd_c_ppyoloe_wrapper = new FD_C_PPYOLOEWrapper();
fd_c_ppyoloe_wrapper->ppyoloe_model =
std::unique_ptr<fastdeploy::vision::detection::PPYOLOE>(
new fastdeploy::vision::detection::PPYOLOE(
std::string(model_file), std::string(params_file),
std::string(config_file), *runtime_option,
static_cast<fastdeploy::ModelFormat>(model_format)));
return fd_c_ppyoloe_wrapper;
IMPLEMENT_CREATE_WRAPPER_FUNCTION(PPYOLOE, ppyoloe_model);
}
void FD_C_DestroyPPYOLOEWrapper(
__fd_take FD_C_PPYOLOEWrapper* fd_c_ppyoloe_wrapper) {
delete fd_c_ppyoloe_wrapper;
__fd_take FD_C_PPYOLOEWrapper* fd_ppyoloe_wrapper) {
IMPLEMENT_DESTROY_WRAPPER_FUNCTION(PPYOLOE, fd_ppyoloe_wrapper);
}
FD_C_Bool FD_C_PPYOLOEWrapperPredict(
FD_C_PPYOLOEWrapper* fd_c_ppyoloe_wrapper, FD_C_Mat img,
FD_C_DetectionResultWrapper* fd_c_detection_result_wrapper) {
cv::Mat* im = reinterpret_cast<cv::Mat*>(img);
auto& ppyoloe_model =
CHECK_AND_CONVERT_FD_TYPE(PPYOLOEWrapper, fd_c_ppyoloe_wrapper);
auto& detection_result = CHECK_AND_CONVERT_FD_TYPE(
DetectionResultWrapper, fd_c_detection_result_wrapper);
return ppyoloe_model->Predict(im, detection_result.get());
FD_C_PPYOLOEWrapper* fd_ppyoloe_wrapper, FD_C_Mat img,
FD_C_DetectionResult* fd_c_detection_result) {
IMPLEMENT_PREDICT_FUNCTION(PPYOLOE, fd_ppyoloe_wrapper);
}
FD_C_Bool FD_C_PPYOLOEWrapperInitialized(
FD_C_PPYOLOEWrapper* fd_ppyoloe_wrapper) {
IMPLEMENT_INITIALIZED_FUNCTION(PPYOLOE, fd_ppyoloe_wrapper);
}
FD_C_DetectionResult* FD_C_DetectionResultToC(
fastdeploy::vision::DetectionResult* detection_result) {
// Internal use, transfer fastdeploy::vision::DetectionResult to
// FD_C_DetectionResult
FD_C_DetectionResult* fd_c_detection_result = new FD_C_DetectionResult();
// copy boxes
const int boxes_coordinate_dim = 4;
fd_c_detection_result->boxes.size = detection_result->boxes.size();
fd_c_detection_result->boxes.data =
new FD_C_OneDimArrayFloat[fd_c_detection_result->boxes.size];
for (size_t i = 0; i < detection_result->boxes.size(); i++) {
fd_c_detection_result->boxes.data[i].size = boxes_coordinate_dim;
fd_c_detection_result->boxes.data[i].data = new float[boxes_coordinate_dim];
for (size_t j = 0; j < boxes_coordinate_dim; j++) {
fd_c_detection_result->boxes.data[i].data[j] =
detection_result->boxes[i][j];
}
}
// copy scores
fd_c_detection_result->scores.size = detection_result->scores.size();
fd_c_detection_result->scores.data =
new float[fd_c_detection_result->scores.size];
memcpy(fd_c_detection_result->scores.data, detection_result->scores.data(),
sizeof(float) * fd_c_detection_result->scores.size);
// copy label_ids
fd_c_detection_result->label_ids.size = detection_result->label_ids.size();
fd_c_detection_result->label_ids.data =
new int32_t[fd_c_detection_result->label_ids.size];
memcpy(fd_c_detection_result->label_ids.data,
detection_result->label_ids.data(),
sizeof(int32_t) * fd_c_detection_result->label_ids.size);
// copy masks
fd_c_detection_result->masks.size = detection_result->masks.size();
fd_c_detection_result->masks.data =
new FD_C_Mask[fd_c_detection_result->masks.size];
for (size_t i = 0; i < detection_result->masks.size(); i++) {
// copy data in mask
fd_c_detection_result->masks.data[i].data.size =
detection_result->masks[i].data.size();
fd_c_detection_result->masks.data[i].data.data =
new uint8_t[detection_result->masks[i].data.size()];
memcpy(fd_c_detection_result->masks.data[i].data.data,
detection_result->masks[i].data.data(),
sizeof(uint8_t) * detection_result->masks[i].data.size());
// copy shape in mask
fd_c_detection_result->masks.data[i].shape.size =
detection_result->masks[i].shape.size();
fd_c_detection_result->masks.data[i].shape.data =
new int64_t[detection_result->masks[i].shape.size()];
memcpy(fd_c_detection_result->masks.data[i].shape.data,
detection_result->masks[i].shape.data(),
sizeof(int64_t) * detection_result->masks[i].shape.size());
fd_c_detection_result->masks.data[i].type =
static_cast<FD_C_ResultType>(detection_result->masks[i].type);
}
fd_c_detection_result->contain_masks = detection_result->contain_masks;
fd_c_detection_result->type =
static_cast<FD_C_ResultType>(detection_result->type);
return fd_c_detection_result;
}
FD_C_Bool FD_C_PPYOLOEWrapperBatchPredict(
FD_C_PPYOLOEWrapper* fd_ppyoloe_wrapper, FD_C_OneDimMat imgs,
FD_C_OneDimDetectionResult* results) {
IMPLEMENT_BATCH_PREDICT_FUNCTION(PPYOLOE, fd_ppyoloe_wrapper);
}
// PicoDet
DECLARE_AND_IMPLEMENT_CREATE_WRAPPER_FUNCTION(PicoDet, picodet_model)
DECLARE_AND_IMPLEMENT_DESTROY_WRAPPER_FUNCTION(PicoDet, fd_picodet_wrapper)
DECLARE_AND_IMPLEMENT_PREDICT_FUNCTION(PicoDet, fd_picodet_wrapper)
DECLARE_AND_IMPLEMENT_INITIALIZED_FUNCTION(PicoDet, fd_picodet_wrapper)
DECLARE_AND_IMPLEMENT_BATCH_PREDICT_FUNCTION(PicoDet, fd_picodet_wrapper)
// PPYOLO
DECLARE_AND_IMPLEMENT_CREATE_WRAPPER_FUNCTION(PPYOLO, ppyolo_model)
DECLARE_AND_IMPLEMENT_DESTROY_WRAPPER_FUNCTION(PPYOLO, fd_ppyolo_wrapper)
DECLARE_AND_IMPLEMENT_PREDICT_FUNCTION(PPYOLO, fd_ppyolo_wrapper)
DECLARE_AND_IMPLEMENT_INITIALIZED_FUNCTION(PPYOLO, fd_ppyolo_wrapper)
DECLARE_AND_IMPLEMENT_BATCH_PREDICT_FUNCTION(PPYOLO, fd_ppyolo_wrapper)
// YOLOv3
DECLARE_AND_IMPLEMENT_CREATE_WRAPPER_FUNCTION(YOLOv3, yolov3_model)
DECLARE_AND_IMPLEMENT_DESTROY_WRAPPER_FUNCTION(YOLOv3, fd_yolov3_wrapper)
DECLARE_AND_IMPLEMENT_PREDICT_FUNCTION(YOLOv3, fd_yolov3_wrapper)
DECLARE_AND_IMPLEMENT_INITIALIZED_FUNCTION(YOLOv3, fd_yolov3_wrapper)
DECLARE_AND_IMPLEMENT_BATCH_PREDICT_FUNCTION(YOLOv3, fd_yolov3_wrapper)
// PaddleYOLOX
DECLARE_AND_IMPLEMENT_CREATE_WRAPPER_FUNCTION(PaddleYOLOX, paddleyolox_model)
DECLARE_AND_IMPLEMENT_DESTROY_WRAPPER_FUNCTION(PaddleYOLOX,
fd_paddleyolox_wrapper)
DECLARE_AND_IMPLEMENT_PREDICT_FUNCTION(PaddleYOLOX, fd_paddleyolox_wrapper)
DECLARE_AND_IMPLEMENT_INITIALIZED_FUNCTION(PaddleYOLOX, fd_paddleyolox_wrapper)
DECLARE_AND_IMPLEMENT_BATCH_PREDICT_FUNCTION(PaddleYOLOX,
fd_paddleyolox_wrapper)
// FasterRCNN
DECLARE_AND_IMPLEMENT_CREATE_WRAPPER_FUNCTION(FasterRCNN, fasterrcnn_model)
DECLARE_AND_IMPLEMENT_DESTROY_WRAPPER_FUNCTION(FasterRCNN,
fd_fasterrcnn_wrapper)
DECLARE_AND_IMPLEMENT_PREDICT_FUNCTION(FasterRCNN, fd_fasterrcnn_wrapper)
DECLARE_AND_IMPLEMENT_INITIALIZED_FUNCTION(FasterRCNN, fd_fasterrcnn_wrapper)
DECLARE_AND_IMPLEMENT_BATCH_PREDICT_FUNCTION(FasterRCNN, fd_fasterrcnn_wrapper)
// MaskRCNN
DECLARE_AND_IMPLEMENT_CREATE_WRAPPER_FUNCTION(MaskRCNN, maskrcnn_model)
DECLARE_AND_IMPLEMENT_DESTROY_WRAPPER_FUNCTION(MaskRCNN, fd_maskrcnn_wrapper)
DECLARE_AND_IMPLEMENT_PREDICT_FUNCTION(MaskRCNN, fd_maskrcnn_wrapper)
DECLARE_AND_IMPLEMENT_INITIALIZED_FUNCTION(MaskRCNN, fd_maskrcnn_wrapper)
DECLARE_AND_IMPLEMENT_BATCH_PREDICT_FUNCTION(MaskRCNN, fd_maskrcnn_wrapper)
// SSD
DECLARE_AND_IMPLEMENT_CREATE_WRAPPER_FUNCTION(SSD, ssd_model)
DECLARE_AND_IMPLEMENT_DESTROY_WRAPPER_FUNCTION(SSD, fd_ssd_wrapper)
DECLARE_AND_IMPLEMENT_PREDICT_FUNCTION(SSD, fd_ssd_wrapper)
DECLARE_AND_IMPLEMENT_INITIALIZED_FUNCTION(SSD, fd_ssd_wrapper)
DECLARE_AND_IMPLEMENT_BATCH_PREDICT_FUNCTION(SSD, fd_ssd_wrapper)
// PaddleYOLOv5
DECLARE_AND_IMPLEMENT_CREATE_WRAPPER_FUNCTION(PaddleYOLOv5, paddleyolov5_model)
DECLARE_AND_IMPLEMENT_DESTROY_WRAPPER_FUNCTION(PaddleYOLOv5,
fd_paddleyolov5_wrapper)
DECLARE_AND_IMPLEMENT_PREDICT_FUNCTION(PaddleYOLOv5, fd_paddleyolov5_wrapper)
DECLARE_AND_IMPLEMENT_INITIALIZED_FUNCTION(PaddleYOLOv5,
fd_paddleyolov5_wrapper)
DECLARE_AND_IMPLEMENT_BATCH_PREDICT_FUNCTION(PaddleYOLOv5,
fd_paddleyolov5_wrapper)
// PaddleYOLOv6
DECLARE_AND_IMPLEMENT_CREATE_WRAPPER_FUNCTION(PaddleYOLOv6, paddleyolov6_model)
DECLARE_AND_IMPLEMENT_DESTROY_WRAPPER_FUNCTION(PaddleYOLOv6,
fd_paddleyolov6_wrapper)
DECLARE_AND_IMPLEMENT_PREDICT_FUNCTION(PaddleYOLOv6, fd_paddleyolov6_wrapper)
DECLARE_AND_IMPLEMENT_INITIALIZED_FUNCTION(PaddleYOLOv6,
fd_paddleyolov6_wrapper)
DECLARE_AND_IMPLEMENT_BATCH_PREDICT_FUNCTION(PaddleYOLOv6,
fd_paddleyolov6_wrapper)
// PaddleYOLOv7
DECLARE_AND_IMPLEMENT_CREATE_WRAPPER_FUNCTION(PaddleYOLOv7, paddleyolov7_model)
DECLARE_AND_IMPLEMENT_DESTROY_WRAPPER_FUNCTION(PaddleYOLOv7,
fd_paddleyolov7_wrapper)
DECLARE_AND_IMPLEMENT_PREDICT_FUNCTION(PaddleYOLOv7, fd_paddleyolov7_wrapper)
DECLARE_AND_IMPLEMENT_INITIALIZED_FUNCTION(PaddleYOLOv7,
fd_paddleyolov7_wrapper)
DECLARE_AND_IMPLEMENT_BATCH_PREDICT_FUNCTION(PaddleYOLOv7,
fd_paddleyolov7_wrapper)
// PaddleYOLOv8
DECLARE_AND_IMPLEMENT_CREATE_WRAPPER_FUNCTION(PaddleYOLOv8, paddleyolov8_model)
DECLARE_AND_IMPLEMENT_DESTROY_WRAPPER_FUNCTION(PaddleYOLOv8,
fd_paddleyolov8_wrapper)
DECLARE_AND_IMPLEMENT_PREDICT_FUNCTION(PaddleYOLOv8, fd_paddleyolov8_wrapper)
DECLARE_AND_IMPLEMENT_INITIALIZED_FUNCTION(PaddleYOLOv8,
fd_paddleyolov8_wrapper)
DECLARE_AND_IMPLEMENT_BATCH_PREDICT_FUNCTION(PaddleYOLOv8,
fd_paddleyolov8_wrapper)
// RTMDet
DECLARE_AND_IMPLEMENT_CREATE_WRAPPER_FUNCTION(RTMDet, rtmdet_model)
DECLARE_AND_IMPLEMENT_DESTROY_WRAPPER_FUNCTION(RTMDet, fd_rtmdet_wrapper)
DECLARE_AND_IMPLEMENT_PREDICT_FUNCTION(RTMDet, fd_rtmdet_wrapper)
DECLARE_AND_IMPLEMENT_INITIALIZED_FUNCTION(RTMDet, fd_rtmdet_wrapper)
DECLARE_AND_IMPLEMENT_BATCH_PREDICT_FUNCTION(RTMDet, fd_rtmdet_wrapper)
// CascadeRCNN
DECLARE_AND_IMPLEMENT_CREATE_WRAPPER_FUNCTION(CascadeRCNN, cascadercnn_model)
DECLARE_AND_IMPLEMENT_DESTROY_WRAPPER_FUNCTION(CascadeRCNN,
fd_cascadercnn_wrapper)
DECLARE_AND_IMPLEMENT_PREDICT_FUNCTION(CascadeRCNN, fd_cascadercnn_wrapper)
DECLARE_AND_IMPLEMENT_INITIALIZED_FUNCTION(CascadeRCNN, fd_cascadercnn_wrapper)
DECLARE_AND_IMPLEMENT_BATCH_PREDICT_FUNCTION(CascadeRCNN,
fd_cascadercnn_wrapper)
// PSSDet
DECLARE_AND_IMPLEMENT_CREATE_WRAPPER_FUNCTION(PSSDet, pssdet_model)
DECLARE_AND_IMPLEMENT_DESTROY_WRAPPER_FUNCTION(PSSDet, fd_pssdet_wrapper)
DECLARE_AND_IMPLEMENT_PREDICT_FUNCTION(PSSDet, fd_pssdet_wrapper)
DECLARE_AND_IMPLEMENT_INITIALIZED_FUNCTION(PSSDet, fd_pssdet_wrapper)
DECLARE_AND_IMPLEMENT_BATCH_PREDICT_FUNCTION(PSSDet, fd_pssdet_wrapper)
// RetinaNet
DECLARE_AND_IMPLEMENT_CREATE_WRAPPER_FUNCTION(RetinaNet, retinanet_model)
DECLARE_AND_IMPLEMENT_DESTROY_WRAPPER_FUNCTION(RetinaNet, fd_retinanet_wrapper)
DECLARE_AND_IMPLEMENT_PREDICT_FUNCTION(RetinaNet, fd_retinanet_wrapper)
DECLARE_AND_IMPLEMENT_INITIALIZED_FUNCTION(RetinaNet, fd_retinanet_wrapper)
DECLARE_AND_IMPLEMENT_BATCH_PREDICT_FUNCTION(RetinaNet, fd_retinanet_wrapper)
// FCOS
DECLARE_AND_IMPLEMENT_CREATE_WRAPPER_FUNCTION(FCOS, fcos_model)
DECLARE_AND_IMPLEMENT_DESTROY_WRAPPER_FUNCTION(FCOS, fd_fcos_wrapper)
DECLARE_AND_IMPLEMENT_PREDICT_FUNCTION(FCOS, fd_fcos_wrapper)
DECLARE_AND_IMPLEMENT_INITIALIZED_FUNCTION(FCOS, fd_fcos_wrapper)
DECLARE_AND_IMPLEMENT_BATCH_PREDICT_FUNCTION(FCOS, fd_fcos_wrapper)
// TTFNet
DECLARE_AND_IMPLEMENT_CREATE_WRAPPER_FUNCTION(TTFNet, ttfnet_model)
DECLARE_AND_IMPLEMENT_DESTROY_WRAPPER_FUNCTION(TTFNet, fd_ttfnet_wrapper)
DECLARE_AND_IMPLEMENT_PREDICT_FUNCTION(TTFNet, fd_ttfnet_wrapper)
DECLARE_AND_IMPLEMENT_INITIALIZED_FUNCTION(TTFNet, fd_ttfnet_wrapper)
DECLARE_AND_IMPLEMENT_BATCH_PREDICT_FUNCTION(TTFNet, fd_ttfnet_wrapper)
// TOOD
DECLARE_AND_IMPLEMENT_CREATE_WRAPPER_FUNCTION(TOOD, tood_model)
DECLARE_AND_IMPLEMENT_DESTROY_WRAPPER_FUNCTION(TOOD, fd_tood_wrapper)
DECLARE_AND_IMPLEMENT_PREDICT_FUNCTION(TOOD, fd_tood_wrapper)
DECLARE_AND_IMPLEMENT_INITIALIZED_FUNCTION(TOOD, fd_tood_wrapper)
DECLARE_AND_IMPLEMENT_BATCH_PREDICT_FUNCTION(TOOD, fd_tood_wrapper)
// GFL
DECLARE_AND_IMPLEMENT_CREATE_WRAPPER_FUNCTION(GFL, gfl_model)
DECLARE_AND_IMPLEMENT_DESTROY_WRAPPER_FUNCTION(GFL, fd_gfl_wrapper)
DECLARE_AND_IMPLEMENT_PREDICT_FUNCTION(GFL, fd_gfl_wrapper)
DECLARE_AND_IMPLEMENT_INITIALIZED_FUNCTION(GFL, fd_gfl_wrapper)
DECLARE_AND_IMPLEMENT_BATCH_PREDICT_FUNCTION(GFL, fd_gfl_wrapper)
#ifdef __cplusplus
}
#endif
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -90,6 +90,16 @@ FD_C_ClassifyResultWrapper* FD_C_CreateClassifyResultWrapperFromData(
return fd_c_classify_result_wrapper;
}
char* FD_C_ClassifyResultWrapperStr(
FD_C_ClassifyResultWrapper* fd_c_classify_result_wrapper) {
auto& classify_result = CHECK_AND_CONVERT_FD_TYPE(
ClassifyResultWrapper, fd_c_classify_result_wrapper);
std::string information = classify_result->Str();
char* cstr = new char[information.length() + 1];
std::strcpy(cstr, information.c_str());
return cstr;
}
// Detection Results
FD_C_DetectionResultWrapper* FD_C_CreateDetectionResultWrapper() {
@@ -237,6 +247,17 @@ FD_C_DetectionResultWrapper* FD_C_CreateDetectionResultWrapperFromData(
return fd_c_detection_result_wrapper;
}
char* FD_C_DetectionResultWrapperStr(
FD_C_DetectionResultWrapper* fd_c_detection_result_wrapper) {
auto& detection_result = CHECK_AND_CONVERT_FD_TYPE(
DetectionResultWrapper, fd_c_detection_result_wrapper);
std::string information = detection_result->Str();
char* cstr = new char[information.length() + 1];
std::strcpy(cstr, information.c_str());
return cstr;
}
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -30,6 +30,11 @@ typedef struct FD_C_ClassifyResult {
FD_C_ResultType type;
} FD_C_ClassifyResult;
typedef struct FD_C_OneDimClassifyResult {
size_t size;
FD_C_ClassifyResult* data;
} FD_C_OneDimClassifyResult;
typedef struct FD_C_Mask {
FD_C_OneDimArrayUint8 data;
FD_C_OneDimArrayInt64 shape;
@@ -50,6 +55,11 @@ typedef struct FD_C_DetectionResult {
FD_C_ResultType type;
} FD_C_DetectionResult;
typedef struct FD_C_OneDimDetectionResult {
size_t size;
FD_C_DetectionResult* data;
} FD_C_OneDimDetectionResult;
// Classification Results
/** \brief Create a new FD_C_ClassifyResultWrapper object
@@ -95,6 +105,16 @@ FASTDEPLOY_CAPI_EXPORT extern __fd_give FD_C_ClassifyResultWrapper*
FD_C_CreateClassifyResultWrapperFromData(
__fd_keep FD_C_ClassifyResult* fd_c_classify_result);
/** \brief Print ClassifyResult formated information
*
* \param[in] fd_c_classify_result_wrapper pointer to FD_C_ClassifyResultWrapper object
* \return Return a string pointer
*/
FASTDEPLOY_CAPI_EXPORT extern __fd_give char*
FD_C_ClassifyResultWrapperStr(
__fd_keep FD_C_ClassifyResultWrapper* fd_c_classify_result_wrapper);
// Detection Results
/** \brief Create a new FD_C_DetectionResultWrapper object
@@ -140,6 +160,16 @@ FASTDEPLOY_CAPI_EXPORT extern __fd_give FD_C_DetectionResultWrapper*
FD_C_CreateDetectionResultWrapperFromData(
__fd_keep FD_C_DetectionResult* fd_c_detection_result);
/** \brief Print DetectionResult formated information
*
* \param[in] fd_c_detection_result_wrapper pointer to FD_C_DetectionResultWrapper object
* \return Return a string pointer
*/
FASTDEPLOY_CAPI_EXPORT extern __fd_give char*
FD_C_DetectionResultWrapperStr(
__fd_keep FD_C_DetectionResultWrapper* fd_c_detection_result_wrapper);
#ifdef __cplusplus
} // extern "C"
#endif

View File

@@ -29,11 +29,31 @@ FD_C_Mat FD_C_VisDetection(FD_C_Mat im,
FD_C_CreateDetectionResultWrapperFromData(fd_c_detection_result);
auto& detection_result = CHECK_AND_CONVERT_FD_TYPE(
DetectionResultWrapper, fd_c_detection_result_wrapper);
cv::Mat result = fastdeploy::vision::Visualize::VisDetection(
cv::Mat result = fastdeploy::vision::VisDetection(
*(reinterpret_cast<cv::Mat*>(im)), *detection_result, score_threshold,
line_size, font_size);
return new cv::Mat(result);
}
FD_C_Mat FD_C_VisDetectionWithLabel(FD_C_Mat im,
FD_C_DetectionResult* fd_c_detection_result,
FD_C_OneDimArrayCstr* labels,
float score_threshold, int line_size,
float font_size) {
std::vector<std::string> labels_in;
for (int i = 0; i < labels->size; i++) {
labels_in.emplace_back(labels->data[i].data);
}
FD_C_DetectionResultWrapper* fd_c_detection_result_wrapper =
FD_C_CreateDetectionResultWrapperFromData(fd_c_detection_result);
auto& detection_result = CHECK_AND_CONVERT_FD_TYPE(
DetectionResultWrapper, fd_c_detection_result_wrapper);
cv::Mat result = fastdeploy::vision::VisDetection(
*(reinterpret_cast<cv::Mat*>(im)), *detection_result, labels_in,
score_threshold, line_size, font_size);
return new cv::Mat(result);
}
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -22,15 +22,37 @@
extern "C" {
#endif
/** \brief Visualize Detection
/** \brief Show the visualized results for detection models
*
* \return Return a pointer to cv::Mat object
* \param[in] im the input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
* \param[in] result the result produced by model
* \param[in] score_threshold threshold for result scores, the bounding box will not be shown if the score is less than score_threshold
* \param[in] line_size line size for bounding boxes
* \param[in] font_size font size for text
* \return cv::Mat type stores the visualized results
*/
FASTDEPLOY_CAPI_EXPORT extern __fd_give FD_C_Mat
FD_C_VisDetection(FD_C_Mat im, FD_C_DetectionResult* fd_detection_result,
float score_threshold, int line_size, float font_size);
/** \brief Show the visualized results with custom labels for detection models
*
* \param[in] im the input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
* \param[in] result the result produced by model
* \param[in] labels the visualized result will show the bounding box contain class label
* \param[in] score_threshold threshold for result scores, the bounding box will not be shown if the score is less than score_threshold
* \param[in] line_size line size for bounding boxes
* \param[in] font_size font size for text
* \return cv::Mat type stores the visualized results
*/
FASTDEPLOY_CAPI_EXPORT extern __fd_give FD_C_Mat FD_C_VisDetectionWithLabel(
FD_C_Mat im,
FD_C_DetectionResult* fd_detection_result,
FD_C_OneDimArrayCstr* labels,
float score_threshold,
int line_size, float font_size);
#ifdef __cplusplus
} // extern "C"
#endif

View File

@@ -12,9 +12,6 @@ if(WIN32)
if(ENABLE_POROS_BACKEND)
message(FATAL_ERROR "-DENABLE_POROS_BACKEND=ON doesn't support on non 64-bit system now.")
endif()
if(ENABLE_VISION)
message(FATAL_ERROR "-DENABLE_VISION=ON doesn't support on non 64-bit system now.")
endif()
endif()
endif()

View File

@@ -44,14 +44,20 @@ set(CMAKE_BUILD_RPATH "${CMAKE_BUILD_RPATH}" "${ONNXRUNTIME_LIB_DIR}")
set(ONNXRUNTIME_VERSION "1.12.0")
set(ONNXRUNTIME_URL_PREFIX "https://bj.bcebos.com/paddle2onnx/libs/")
if(WIN32)
if(WIN32)
if(WITH_GPU)
set(ONNXRUNTIME_FILENAME "onnxruntime-win-x64-gpu-${ONNXRUNTIME_VERSION}.zip")
elseif(WITH_DIRECTML)
set(ONNXRUNTIME_FILENAME "onnxruntime-directml-win-x64.zip")
else()
set(ONNXRUNTIME_FILENAME "onnxruntime-win-x64-${ONNXRUNTIME_VERSION}.zip")
endif()
if(NOT CMAKE_CL_64)
set(ONNXRUNTIME_FILENAME "onnxruntime-win-x86-${ONNXRUNTIME_VERSION}.zip")
if(WITH_DIRECTML)
set(ONNXRUNTIME_FILENAME "onnxruntime-directml-win-x86.zip")
else()
set(ONNXRUNTIME_FILENAME "onnxruntime-win-x86-${ONNXRUNTIME_VERSION}.zip")
endif()
endif()
elseif(APPLE)
if(CURRENT_OSX_ARCH MATCHES "arm64")

View File

@@ -15,7 +15,11 @@
set(COMPRESSED_SUFFIX ".tgz")
if(WIN32)
set(OPENCV_FILENAME "opencv-win-x64-3.4.16")
if(NOT CMAKE_CL_64)
set(OPENCV_FILENAME "opencv-win-x86-3.4.16")
else()
set(OPENCV_FILENAME "opencv-win-x64-3.4.16")
endif()
set(COMPRESSED_SUFFIX ".zip")
elseif(APPLE)
if(CURRENT_OSX_ARCH MATCHES "arm64")
@@ -51,6 +55,12 @@ endif()
set(OPENCV_INSTALL_DIR ${THIRD_PARTY_PATH}/install/)
if(ANDROID)
set(OPENCV_URL_PREFIX "https://bj.bcebos.com/fastdeploy/third_libs")
elseif(WIN32)
if(NOT CMAKE_CL_64)
set(OPENCV_URL_PREFIX "https://bj.bcebos.com/fastdeploy/third_libs")
else()
set(OPENCV_URL_PREFIX "https://bj.bcebos.com/paddle2onnx/libs")
endif()
else() # TODO: use fastdeploy/third_libs instead.
set(OPENCV_URL_PREFIX "https://bj.bcebos.com/paddle2onnx/libs")
endif()

View File

@@ -41,7 +41,9 @@ function(fastdeploy_summary)
message(STATUS " ENABLE_OPENVINO_BACKEND : ${ENABLE_OPENVINO_BACKEND}")
message(STATUS " ENABLE_BENCHMARK : ${ENABLE_BENCHMARK}")
message(STATUS " WITH_GPU : ${WITH_GPU}")
message(STATUS " WITH_TESTING : ${WITH_TESTING}")
message(STATUS " WITH_ASCEND : ${WITH_ASCEND}")
message(STATUS " WITH_DIRECTML : ${WITH_DIRECTML}")
message(STATUS " WITH_TIMVX : ${WITH_TIMVX}")
message(STATUS " WITH_KUNLUNXIN : ${WITH_KUNLUNXIN}")
message(STATUS " WITH_CAPI : ${WITH_CAPI}")

58
csharp/CMakeLists.txt Normal file
View File

@@ -0,0 +1,58 @@
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##################################### Building: FastDeploy C# API #######################################
PROJECT(fastdeploy_csharp CSharp)
CMAKE_MINIMUM_REQUIRED (VERSION 3.10)
option(ENABLE_VISION "Whether to enable vision models usage." OFF)
message("fastdeploy_csharp_SOURCE_DIR: ${fastdeploy_csharp_SOURCE_DIR}")
file(GLOB_RECURSE DEPLOY_CSHARPAPI_SRCS ${fastdeploy_csharp_SOURCE_DIR}/fastdeploy/*.cs)
if(NOT ENABLE_VISION)
file(GLOB_RECURSE DEPLOY_VISION_CSHARPAPI_SRCS ${fastdeploy_csharp_SOURCE_DIR}/fastdeploy/vision/*.cs)
list(REMOVE_ITEM DEPLOY_CSHARPAPI_SRCS ${DEPLOY_VISION_CSHARPAPI_SRCS})
endif()
# Define the DLL target, including all relevant project files.
add_library(${PROJECT_NAME} SHARED ${DEPLOY_CSHARPAPI_SRCS})
# Set the C# language version (defaults to 3.0 if not set).
set(CMAKE_CSharp_FLAGS "/langversion:10")
# Add in some .NET reference libraries.
set_property(TARGET ${PROJECT_NAME} PROPERTY VS_DOTNET_REFERENCES
"Microsoft.CSharp"
"System"
"System.Core"
"System.Data"
"System.Deployment"
"System.Drawing"
"System.Net.Http"
"System.Xml"
)
set_property(TARGET ${PROJECT_NAME}
PROPERTY VS_PACKAGE_REFERENCES "OpenCvSharp4_4.7.0.20230115"
)
##################################### Installing: FastDeploy C# API #######################################
install(
TARGETS ${PROJECT_NAME}
LIBRARY DESTINATION csharp_lib
ARCHIVE DESTINATION csharp_lib
RUNTIME DESTINATION csharp_lib
)

View File

@@ -0,0 +1,53 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
namespace fastdeploy {
public enum ModelFormat {
AUTOREC, ///< Auto recognize the model format by model file name
PADDLE, ///< Model with paddlepaddle format
ONNX, ///< Model with ONNX format
RKNN, ///< Model with RKNN format
TORCHSCRIPT, ///< Model with TorchScript format
SOPHGO, ///< Model with SOPHGO format
}
public enum rknpu2_CpuName {
RK356X = 0, /* run on RK356X. */
RK3588 = 1, /* default,run on RK3588. */
UNDEFINED,
}
public enum rknpu2_CoreMask {
RKNN_NPU_CORE_AUTO = 0, //< default, run on NPU core randomly.
RKNN_NPU_CORE_0 = 1, //< run on NPU core 0.
RKNN_NPU_CORE_1 = 2, //< run on NPU core 1.
RKNN_NPU_CORE_2 = 4, //< run on NPU core 2.
RKNN_NPU_CORE_0_1 =
RKNN_NPU_CORE_0 | RKNN_NPU_CORE_1, //< run on NPU core 1 and core 2.
RKNN_NPU_CORE_0_1_2 =
RKNN_NPU_CORE_0_1 | RKNN_NPU_CORE_2, //< run on NPU core 1 and core 2.
RKNN_NPU_CORE_UNDEFINED,
}
public enum LitePowerMode {
LITE_POWER_HIGH = 0, ///< Use Lite Backend with high power mode
LITE_POWER_LOW = 1, ///< Use Lite Backend with low power mode
LITE_POWER_FULL = 2, ///< Use Lite Backend with full power mode
LITE_POWER_NO_BIND = 3, ///< Use Lite Backend with no bind power mode
LITE_POWER_RAND_HIGH = 4, ///< Use Lite Backend with rand high mode
LITE_POWER_RAND_LOW = 5 ///< Use Lite Backend with rand low power mode
}
}

View File

@@ -0,0 +1,541 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
using System;
using System.IO;
using System.Runtime.InteropServices;
namespace fastdeploy {
public class RuntimeOption {
public RuntimeOption() {
fd_runtime_option_wrapper = FD_C_CreateRuntimeOptionWrapper();
}
~RuntimeOption() {
FD_C_DestroyRuntimeOptionWrapper(fd_runtime_option_wrapper);
}
public void SetModelPath(string model_path, string params_path = "",
ModelFormat format = ModelFormat.PADDLE) {
FD_C_RuntimeOptionWrapperSetModelPath(fd_runtime_option_wrapper, model_path,
params_path, format);
}
public void SetModelBuffer(string model_buffer, string params_buffer = "",
ModelFormat format = ModelFormat.PADDLE) {
FD_C_RuntimeOptionWrapperSetModelBuffer(
fd_runtime_option_wrapper, model_buffer, params_buffer, format);
}
public void UseCpu() {
FD_C_RuntimeOptionWrapperUseCpu(fd_runtime_option_wrapper);
}
public void UseGpu(int gpu_id = 0) {
FD_C_RuntimeOptionWrapperUseGpu(fd_runtime_option_wrapper, gpu_id);
}
public void
UseRKNPU2(rknpu2_CpuName rknpu2_name = rknpu2_CpuName.RK3588,
rknpu2_CoreMask rknpu2_core = rknpu2_CoreMask.RKNN_NPU_CORE_0) {
FD_C_RuntimeOptionWrapperUseRKNPU2(fd_runtime_option_wrapper, rknpu2_name,
rknpu2_core);
}
public void UseTimVX() {
FD_C_RuntimeOptionWrapperUseTimVX(fd_runtime_option_wrapper);
}
public void UseAscend() {
FD_C_RuntimeOptionWrapperUseAscend(fd_runtime_option_wrapper);
}
public void
UseKunlunXin(int kunlunxin_id = 0, int l3_workspace_size = 0xfffc00,
bool locked = false, bool autotune = true,
string autotune_file = "", string precision = "int16",
bool adaptive_seqlen = false, bool enable_multi_stream = false) {
FD_C_RuntimeOptionWrapperUseKunlunXin(
fd_runtime_option_wrapper, kunlunxin_id, l3_workspace_size, locked,
autotune, autotune_file, precision, adaptive_seqlen,
enable_multi_stream);
}
public void UseSophgo() {
FD_C_RuntimeOptionWrapperUseSophgo(fd_runtime_option_wrapper);
}
public void SetExternalStream(IntPtr external_stream) {
FD_C_RuntimeOptionWrapperSetExternalStream(fd_runtime_option_wrapper,
external_stream);
}
public void SetCpuThreadNum(int thread_num) {
FD_C_RuntimeOptionWrapperSetCpuThreadNum(fd_runtime_option_wrapper,
thread_num);
}
public void SetOrtGraphOptLevel(int level = -1) {
FD_C_RuntimeOptionWrapperSetOrtGraphOptLevel(fd_runtime_option_wrapper,
level);
}
public void UsePaddleBackend() {
FD_C_RuntimeOptionWrapperUsePaddleBackend(fd_runtime_option_wrapper);
}
public void UsePaddleInferBackend() {
FD_C_RuntimeOptionWrapperUsePaddleInferBackend(fd_runtime_option_wrapper);
}
public void UseOrtBackend() {
FD_C_RuntimeOptionWrapperUseOrtBackend(fd_runtime_option_wrapper);
}
public void UseSophgoBackend() {
FD_C_RuntimeOptionWrapperUseSophgoBackend(fd_runtime_option_wrapper);
}
public void UseTrtBackend() {
FD_C_RuntimeOptionWrapperUseTrtBackend(fd_runtime_option_wrapper);
}
public void UsePorosBackend() {
FD_C_RuntimeOptionWrapperUsePorosBackend(fd_runtime_option_wrapper);
}
public void UseOpenVINOBackend() {
FD_C_RuntimeOptionWrapperUseOpenVINOBackend(fd_runtime_option_wrapper);
}
public void UseLiteBackend() {
FD_C_RuntimeOptionWrapperUseLiteBackend(fd_runtime_option_wrapper);
}
public void UsePaddleLiteBackend() {
FD_C_RuntimeOptionWrapperUsePaddleLiteBackend(fd_runtime_option_wrapper);
}
public void SetPaddleMKLDNN(bool pd_mkldnn = true) {
FD_C_RuntimeOptionWrapperSetPaddleMKLDNN(fd_runtime_option_wrapper,
pd_mkldnn);
}
public void EnablePaddleToTrt() {
FD_C_RuntimeOptionWrapperEnablePaddleToTrt(fd_runtime_option_wrapper);
}
public void DeletePaddleBackendPass(string delete_pass_name) {
FD_C_RuntimeOptionWrapperDeletePaddleBackendPass(fd_runtime_option_wrapper,
delete_pass_name);
}
public void EnablePaddleLogInfo() {
FD_C_RuntimeOptionWrapperEnablePaddleLogInfo(fd_runtime_option_wrapper);
}
public void DisablePaddleLogInfo() {
FD_C_RuntimeOptionWrapperDisablePaddleLogInfo(fd_runtime_option_wrapper);
}
public void SetPaddleMKLDNNCacheSize(int size) {
FD_C_RuntimeOptionWrapperSetPaddleMKLDNNCacheSize(fd_runtime_option_wrapper,
size);
}
public void SetOpenVINODevice(string name = "CPU") {
FD_C_RuntimeOptionWrapperSetOpenVINODevice(fd_runtime_option_wrapper, name);
}
public void SetLiteOptimizedModelDir(string optimized_model_dir) {
FD_C_RuntimeOptionWrapperSetLiteOptimizedModelDir(fd_runtime_option_wrapper,
optimized_model_dir);
}
public void SetLiteSubgraphPartitionPath(
string nnadapter_subgraph_partition_config_path) {
FD_C_RuntimeOptionWrapperSetLiteSubgraphPartitionPath(
fd_runtime_option_wrapper, nnadapter_subgraph_partition_config_path);
}
public void SetLiteSubgraphPartitionConfigBuffer(
string nnadapter_subgraph_partition_config_buffer) {
FD_C_RuntimeOptionWrapperSetLiteSubgraphPartitionConfigBuffer(
fd_runtime_option_wrapper, nnadapter_subgraph_partition_config_buffer);
}
public void SetLiteContextProperties(string nnadapter_context_properties) {
FD_C_RuntimeOptionWrapperSetLiteContextProperties(
fd_runtime_option_wrapper, nnadapter_context_properties);
}
public void SetLiteModelCacheDir(string nnadapter_model_cache_dir) {
FD_C_RuntimeOptionWrapperSetLiteModelCacheDir(fd_runtime_option_wrapper,
nnadapter_model_cache_dir);
}
public void SetLiteMixedPrecisionQuantizationConfigPath(
string nnadapter_mixed_precision_quantization_config_path) {
FD_C_RuntimeOptionWrapperSetLiteMixedPrecisionQuantizationConfigPath(
fd_runtime_option_wrapper,
nnadapter_mixed_precision_quantization_config_path);
}
public void EnableLiteFP16() {
FD_C_RuntimeOptionWrapperEnableLiteFP16(fd_runtime_option_wrapper);
}
public void DisableLiteFP16() {
FD_C_RuntimeOptionWrapperDisableLiteFP16(fd_runtime_option_wrapper);
}
public void EnableLiteInt8() {
FD_C_RuntimeOptionWrapperEnableLiteInt8(fd_runtime_option_wrapper);
}
public void DisableLiteInt8() {
FD_C_RuntimeOptionWrapperDisableLiteInt8(fd_runtime_option_wrapper);
}
public void SetLitePowerMode(LitePowerMode mode) {
FD_C_RuntimeOptionWrapperSetLitePowerMode(fd_runtime_option_wrapper, mode);
}
public void EnableTrtFP16() {
FD_C_RuntimeOptionWrapperEnableTrtFP16(fd_runtime_option_wrapper);
}
public void DisableTrtFP16() {
FD_C_RuntimeOptionWrapperDisableTrtFP16(fd_runtime_option_wrapper);
}
public void SetTrtCacheFile(string cache_file_path) {
FD_C_RuntimeOptionWrapperSetTrtCacheFile(fd_runtime_option_wrapper,
cache_file_path);
}
public void EnablePinnedMemory() {
FD_C_RuntimeOptionWrapperEnablePinnedMemory(fd_runtime_option_wrapper);
}
public void DisablePinnedMemory() {
FD_C_RuntimeOptionWrapperDisablePinnedMemory(fd_runtime_option_wrapper);
}
public void EnablePaddleTrtCollectShape() {
FD_C_RuntimeOptionWrapperEnablePaddleTrtCollectShape(
fd_runtime_option_wrapper);
}
public void DisablePaddleTrtCollectShape() {
FD_C_RuntimeOptionWrapperDisablePaddleTrtCollectShape(
fd_runtime_option_wrapper);
}
public void SetOpenVINOStreams(int num_streams) {
FD_C_RuntimeOptionWrapperSetOpenVINOStreams(fd_runtime_option_wrapper,
num_streams);
}
public void UseIpu(int device_num = 1, int micro_batch_size = 1,
bool enable_pipelining = false, int batches_per_step = 1) {
FD_C_RuntimeOptionWrapperUseIpu(fd_runtime_option_wrapper, device_num,
micro_batch_size, enable_pipelining,
batches_per_step);
}
public IntPtr GetWrapperPtr() { return fd_runtime_option_wrapper; }
// Below are underlying C api
private IntPtr fd_runtime_option_wrapper;
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreateRuntimeOptionWrapper")]
private static extern IntPtr FD_C_CreateRuntimeOptionWrapper();
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_DestroyRuntimeOptionWrapper")]
private static extern void
FD_C_DestroyRuntimeOptionWrapper(IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperSetModelPath")]
private static extern void
FD_C_RuntimeOptionWrapperSetModelPath(IntPtr fd_runtime_option_wrapper,
string model_path, string params_path,
ModelFormat format);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperSetModelBuffer")]
private static extern void FD_C_RuntimeOptionWrapperSetModelBuffer(
IntPtr fd_runtime_option_wrapper, string model_buffer,
string params_buffer, ModelFormat format);
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_RuntimeOptionWrapperUseCpu")]
private static extern void
FD_C_RuntimeOptionWrapperUseCpu(IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_RuntimeOptionWrapperUseGpu")]
private static extern void
FD_C_RuntimeOptionWrapperUseGpu(IntPtr fd_runtime_option_wrapper, int gpu_id);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperUseRKNPU2")]
private static extern void
FD_C_RuntimeOptionWrapperUseRKNPU2(IntPtr fd_runtime_option_wrapper,
rknpu2_CpuName rknpu2_name,
rknpu2_CoreMask rknpu2_core);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperUseTimVX")]
private static extern void
FD_C_RuntimeOptionWrapperUseTimVX(IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperUseAscend")]
private static extern void
FD_C_RuntimeOptionWrapperUseAscend(IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperUseKunlunXin")]
private static extern void FD_C_RuntimeOptionWrapperUseKunlunXin(
IntPtr fd_runtime_option_wrapper, int kunlunxin_id, int l3_workspace_size,
bool locked, bool autotune, string autotune_file, string precision,
bool adaptive_seqlen, bool enable_multi_stream);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperUseSophgo")]
private static extern void
FD_C_RuntimeOptionWrapperUseSophgo(IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperSetExternalStream")]
private static extern void
FD_C_RuntimeOptionWrapperSetExternalStream(IntPtr fd_runtime_option_wrapper,
IntPtr external_stream);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperSetCpuThreadNum")]
private static extern void
FD_C_RuntimeOptionWrapperSetCpuThreadNum(IntPtr fd_runtime_option_wrapper,
int thread_num);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperSetOrtGraphOptLevel")]
private static extern void
FD_C_RuntimeOptionWrapperSetOrtGraphOptLevel(IntPtr fd_runtime_option_wrapper,
int level);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperUsePaddleBackend")]
private static extern void
FD_C_RuntimeOptionWrapperUsePaddleBackend(IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperUsePaddleInferBackend")]
private static extern void FD_C_RuntimeOptionWrapperUsePaddleInferBackend(
IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperUseOrtBackend")]
private static extern void
FD_C_RuntimeOptionWrapperUseOrtBackend(IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperUseSophgoBackend")]
private static extern void
FD_C_RuntimeOptionWrapperUseSophgoBackend(IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperUseTrtBackend")]
private static extern void
FD_C_RuntimeOptionWrapperUseTrtBackend(IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperUsePorosBackend")]
private static extern void
FD_C_RuntimeOptionWrapperUsePorosBackend(IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperUseOpenVINOBackend")]
private static extern void
FD_C_RuntimeOptionWrapperUseOpenVINOBackend(IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperUseLiteBackend")]
private static extern void
FD_C_RuntimeOptionWrapperUseLiteBackend(IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperUsePaddleLiteBackend")]
private static extern void FD_C_RuntimeOptionWrapperUsePaddleLiteBackend(
IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperSetPaddleMKLDNN")]
private static extern void
FD_C_RuntimeOptionWrapperSetPaddleMKLDNN(IntPtr fd_runtime_option_wrapper,
bool pd_mkldnn);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperEnablePaddleToTrt")]
private static extern void
FD_C_RuntimeOptionWrapperEnablePaddleToTrt(IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperDeletePaddleBackendPass")]
private static extern void FD_C_RuntimeOptionWrapperDeletePaddleBackendPass(
IntPtr fd_runtime_option_wrapper, string delete_pass_name);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperEnablePaddleLogInfo")]
private static extern void FD_C_RuntimeOptionWrapperEnablePaddleLogInfo(
IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperDisablePaddleLogInfo")]
private static extern void FD_C_RuntimeOptionWrapperDisablePaddleLogInfo(
IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperSetPaddleMKLDNNCacheSize")]
private static extern void FD_C_RuntimeOptionWrapperSetPaddleMKLDNNCacheSize(
IntPtr fd_runtime_option_wrapper, int size);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperSetOpenVINODevice")]
private static extern void
FD_C_RuntimeOptionWrapperSetOpenVINODevice(IntPtr fd_runtime_option_wrapper,
string name);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperSetLiteOptimizedModelDir")]
private static extern void FD_C_RuntimeOptionWrapperSetLiteOptimizedModelDir(
IntPtr fd_runtime_option_wrapper, string optimized_model_dir);
[DllImport("fastdeploy.dll",
EntryPoint =
"FD_C_RuntimeOptionWrapperSetLiteSubgraphPartitionPath")]
private static extern void
FD_C_RuntimeOptionWrapperSetLiteSubgraphPartitionPath(
IntPtr fd_runtime_option_wrapper,
string nnadapter_subgraph_partition_config_path);
[DllImport(
"fastdeploy.dll",
EntryPoint =
"FD_C_RuntimeOptionWrapperSetLiteSubgraphPartitionConfigBuffer")]
private static extern void
FD_C_RuntimeOptionWrapperSetLiteSubgraphPartitionConfigBuffer(
IntPtr fd_runtime_option_wrapper,
string nnadapter_subgraph_partition_config_buffer);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperSetLiteContextProperties")]
private static extern void FD_C_RuntimeOptionWrapperSetLiteContextProperties(
IntPtr fd_runtime_option_wrapper, string nnadapter_context_properties);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperSetLiteModelCacheDir")]
private static extern void FD_C_RuntimeOptionWrapperSetLiteModelCacheDir(
IntPtr fd_runtime_option_wrapper, string nnadapter_model_cache_dir);
[DllImport(
"fastdeploy.dll",
EntryPoint =
"FD_C_RuntimeOptionWrapperSetLiteMixedPrecisionQuantizationConfigPath")]
private static extern void
FD_C_RuntimeOptionWrapperSetLiteMixedPrecisionQuantizationConfigPath(
IntPtr fd_runtime_option_wrapper,
string nnadapter_mixed_precision_quantization_config_path);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperEnableLiteFP16")]
private static extern void
FD_C_RuntimeOptionWrapperEnableLiteFP16(IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperDisableLiteFP16")]
private static extern void
FD_C_RuntimeOptionWrapperDisableLiteFP16(IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperEnableLiteInt8")]
private static extern void
FD_C_RuntimeOptionWrapperEnableLiteInt8(IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperDisableLiteInt8")]
private static extern void
FD_C_RuntimeOptionWrapperDisableLiteInt8(IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperSetLitePowerMode")]
private static extern void
FD_C_RuntimeOptionWrapperSetLitePowerMode(IntPtr fd_runtime_option_wrapper,
LitePowerMode mode);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperEnableTrtFP16")]
private static extern void
FD_C_RuntimeOptionWrapperEnableTrtFP16(IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperDisableTrtFP16")]
private static extern void
FD_C_RuntimeOptionWrapperDisableTrtFP16(IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperSetTrtCacheFile")]
private static extern void
FD_C_RuntimeOptionWrapperSetTrtCacheFile(IntPtr fd_runtime_option_wrapper,
string cache_file_path);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperEnablePinnedMemory")]
private static extern void
FD_C_RuntimeOptionWrapperEnablePinnedMemory(IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperDisablePinnedMemory")]
private static extern void FD_C_RuntimeOptionWrapperDisablePinnedMemory(
IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint =
"FD_C_RuntimeOptionWrapperEnablePaddleTrtCollectShape")]
private static extern void
FD_C_RuntimeOptionWrapperEnablePaddleTrtCollectShape(
IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint =
"FD_C_RuntimeOptionWrapperDisablePaddleTrtCollectShape")]
private static extern void
FD_C_RuntimeOptionWrapperDisablePaddleTrtCollectShape(
IntPtr fd_runtime_option_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_RuntimeOptionWrapperSetOpenVINOStreams")]
private static extern void
FD_C_RuntimeOptionWrapperSetOpenVINOStreams(IntPtr fd_runtime_option_wrapper,
int num_streams);
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_RuntimeOptionWrapperUseIpu")]
private static extern void
FD_C_RuntimeOptionWrapperUseIpu(IntPtr fd_runtime_option_wrapper,
int device_num, int micro_batch_size,
bool enable_pipelining, int batches_per_step);
}
}

View File

@@ -0,0 +1,145 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
using System;
using System.IO;
using System.Runtime.InteropServices;
using fastdeploy.vision;
namespace fastdeploy {
namespace types_internal_c {
[StructLayout(LayoutKind.Sequential)]
public struct FD_OneDimArrayUint8 {
public nuint size;
public IntPtr data; // byte[]
}
[StructLayout(LayoutKind.Sequential)]
public struct FD_OneDimArrayInt32 {
public nuint size;
public IntPtr data; // int[]
}
[StructLayout(LayoutKind.Sequential)]
public struct FD_OneDimArraySize {
public nuint size;
public IntPtr data; // nuint[]
}
[StructLayout(LayoutKind.Sequential)]
public struct FD_OneDimArrayInt64 {
public nuint size;
public IntPtr data; // long[]
}
[StructLayout(LayoutKind.Sequential)]
public struct FD_OneDimArrayFloat {
public nuint size;
public IntPtr data; // float[]
}
[StructLayout(LayoutKind.Sequential)]
public struct FD_Cstr {
public nuint size;
public string data;
}
[StructLayout(LayoutKind.Sequential)]
public struct FD_OneDimArrayCstr {
public nuint size;
public IntPtr data; // FD_Cstr[]
}
[StructLayout(LayoutKind.Sequential)]
public struct FD_TwoDimArraySize {
public nuint size;
public IntPtr data; // FD_OneDimArraySize[]
}
[StructLayout(LayoutKind.Sequential)]
public struct FD_TwoDimArrayFloat {
public nuint size;
public IntPtr data; // FD_OneDimArrayFloat[]
}
public enum FD_ResultType {
UNKNOWN_RESULT,
CLASSIFY,
DETECTION,
SEGMENTATION,
OCR,
MOT,
FACE_DETECTION,
FACE_ALIGNMENT,
FACE_RECOGNITION,
MATTING,
MASK,
KEYPOINT_DETECTION,
HEADPOSE
}
[StructLayout(LayoutKind.Sequential)]
public struct FD_ClassifyResult {
public FD_OneDimArrayInt32 label_ids;
public FD_OneDimArrayFloat scores;
public FD_ResultType type;
}
[StructLayout(LayoutKind.Sequential)]
public struct FD_OneDimClassifyResult {
public nuint size;
public IntPtr data; // FD_ClassifyResult[]
}
[StructLayout(LayoutKind.Sequential)]
public struct FD_Mask {
public FD_OneDimArrayUint8 data;
public FD_OneDimArrayInt64 shape;
public FD_ResultType type;
}
[StructLayout(LayoutKind.Sequential)]
public struct FD_OneDimMask {
public nint size;
public IntPtr data; // FD_Mask*
}
[StructLayout(LayoutKind.Sequential)]
public struct FD_DetectionResult {
public FD_TwoDimArrayFloat boxes;
public FD_OneDimArrayFloat scores;
public FD_OneDimArrayInt32 label_ids;
public FD_OneDimMask masks;
[MarshalAs(UnmanagedType.U1)]
public bool contain_masks;
public FD_ResultType type;
}
[StructLayout(LayoutKind.Sequential)]
public struct FD_OneDimDetectionResult {
public nuint size;
public IntPtr data; // FD_DetectionResult[]
}
[StructLayout(LayoutKind.Sequential)]
public struct FD_OneDimMat {
public nuint size;
public IntPtr data; // Mat[]
}
}
}

View File

@@ -0,0 +1,141 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
using System;
using System.IO;
using System.Runtime.InteropServices;
using System.Collections.Generic;
using OpenCvSharp;
using fastdeploy.types_internal_c;
namespace fastdeploy {
namespace vision {
namespace classification {
public class PaddleClasModel {
public PaddleClasModel(string model_file, string params_file,
string config_file, RuntimeOption custom_option = null,
ModelFormat model_format = ModelFormat.PADDLE) {
if (custom_option == null) {
custom_option = new RuntimeOption();
}
fd_paddleclas_model_wrapper = FD_C_CreatePaddleClasModelWrapper(
model_file, params_file, config_file, custom_option.GetWrapperPtr(),
model_format);
}
~PaddleClasModel() {
FD_C_DestroyPaddleClasModelWrapper(fd_paddleclas_model_wrapper);
}
public string ModelName() {
return "PaddleClas/Model";
}
public ClassifyResult Predict(Mat img) {
FD_ClassifyResult fd_classify_result = new FD_ClassifyResult();
if(! FD_C_PaddleClasModelWrapperPredict(
fd_paddleclas_model_wrapper, img.CvPtr,
ref fd_classify_result))
{
return null;
} // predict
ClassifyResult classify_result =
ConvertResult.ConvertCResultToClassifyResult(fd_classify_result);
return classify_result;
}
public List<ClassifyResult> BatchPredict(List<Mat> imgs){
FD_OneDimMat imgs_in = new FD_OneDimMat();
imgs_in.size = (nuint)imgs.Count;
// Copy data to unmanaged memory
IntPtr[] mat_ptrs = new IntPtr[imgs_in.size];
for(int i=0;i < (int)imgs.Count; i++){
mat_ptrs[i] = imgs[i].CvPtr;
}
int size = Marshal.SizeOf(new IntPtr()) * (int)imgs_in.size;
imgs_in.data = Marshal.AllocHGlobal(size);
Marshal.Copy(mat_ptrs, 0, imgs_in.data,
mat_ptrs.Length);
FD_OneDimClassifyResult fd_classify_result_array = new FD_OneDimClassifyResult();
if (!FD_C_PaddleClasModelWrapperBatchPredict(fd_paddleclas_model_wrapper, ref imgs_in, ref fd_classify_result_array)){
return null;
}
List<ClassifyResult> results_out = new List<ClassifyResult>();
for(int i=0;i < (int)imgs.Count; i++){
FD_ClassifyResult fd_classify_result = (FD_ClassifyResult)Marshal.PtrToStructure(
fd_classify_result_array.data + i * Marshal.SizeOf(new FD_ClassifyResult()),
typeof(FD_ClassifyResult));
results_out.Add(ConvertResult.ConvertCResultToClassifyResult(fd_classify_result));
}
return results_out;
}
public bool Initialized() {
return FD_C_PaddleClasModelWrapperInitialized(fd_paddleclas_model_wrapper);
}
// below are underlying C api
private IntPtr fd_paddleclas_model_wrapper;
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_CreatePaddleClasModelWrapper")]
private static extern IntPtr FD_C_CreatePaddleClasModelWrapper(
string model_file, string params_file, string config_file,
IntPtr fd_runtime_option_wrapper, ModelFormat model_format);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_DestroyPaddleClasModelWrapper")]
private static extern void
FD_C_DestroyPaddleClasModelWrapper(IntPtr fd_paddleclas_model_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_PaddleClasModelWrapperPredict")]
private static extern bool
FD_C_PaddleClasModelWrapperPredict(IntPtr fd_paddleclas_model_wrapper,
IntPtr img,
ref FD_ClassifyResult fd_classify_result);
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreateClassifyResultWrapper")]
private static extern IntPtr FD_C_CreateClassifyResultWrapper();
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_DestroyClassifyResultWrapper")]
private static extern void
FD_C_DestroyClassifyResultWrapper(IntPtr fd_classify_result_wrapper);
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_DestroyClassifyResult")]
private static extern void
FD_C_DestroyClassifyResult(IntPtr fd_classify_result);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_ClassifyResultWrapperGetData")]
private static extern IntPtr
FD_C_ClassifyResultWrapperGetData(IntPtr fd_classify_result_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_CreateClassifyResultWrapperFromData")]
private static extern IntPtr
FD_C_CreateClassifyResultWrapperFromData(IntPtr fd_classify_result);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_PaddleClasModelWrapperInitialized")]
private static extern bool
FD_C_PaddleClasModelWrapperInitialized(IntPtr fd_paddleclas_model_wrapper);
[DllImport("fastdeploy.dll",
EntryPoint = "FD_C_PaddleClasModelWrapperBatchPredict")]
private static extern bool
FD_C_PaddleClasModelWrapperBatchPredict(IntPtr fd_paddleclas_model_wrapper,
ref FD_OneDimMat imgs,
ref FD_OneDimClassifyResult results);
}
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,359 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
using System;
using System.IO;
using System.Runtime.InteropServices;
using System.Collections.Generic;
using fastdeploy.types_internal_c;
namespace fastdeploy {
namespace vision {
public enum ResultType {
UNKNOWN_RESULT,
CLASSIFY,
DETECTION,
SEGMENTATION,
OCR,
MOT,
FACE_DETECTION,
FACE_ALIGNMENT,
FACE_RECOGNITION,
MATTING,
MASK,
KEYPOINT_DETECTION,
HEADPOSE
}
public class Mask {
public List<byte> data;
public List<long> shape;
public ResultType type;
public Mask() {
this.data = new List<byte>();
this.shape = new List<long>();
this.type = ResultType.MASK;
}
public override string ToString() {
string information = "Mask(" ;
int ndim = this.shape.Count;
for (int i = 0; i < ndim; i++) {
if (i < ndim - 1) {
information += this.shape[i].ToString() + ",";
} else {
information += this.shape[i].ToString();
}
}
information += ")\n";
return information;
}
}
public class ClassifyResult {
public List<int> label_ids;
public List<float> scores;
public ResultType type;
public ClassifyResult() {
this.label_ids = new List<int>();
this.scores = new List<float>();
this.type = ResultType.CLASSIFY;
}
public string ToString() {
string information;
information = "ClassifyResult(\nlabel_ids: ";
for (int i = 0; i < label_ids.Count; i++) {
information = information + label_ids[i].ToString() + ", ";
}
information += "\nscores: ";
for (int i = 0; i < scores.Count; i++) {
information = information + scores[i].ToString() + ", ";
}
information += "\n)";
return information;
}
}
public class DetectionResult {
public List<float[]> boxes;
public List<float> scores;
public List<int> label_ids;
public List<Mask> masks;
public bool contain_masks;
public ResultType type;
public DetectionResult() {
this.boxes = new List<float[]>();
this.scores = new List<float>();
this.label_ids = new List<int>();
this.masks = new List<Mask>();
this.contain_masks = false;
this.type = ResultType.DETECTION;
}
public string ToString() {
string information;
if (!contain_masks) {
information = "DetectionResult: [xmin, ymin, xmax, ymax, score, label_id]\n";
} else {
information =
"DetectionResult: [xmin, ymin, xmax, ymax, score, label_id, mask_shape]\n";
}
for (int i = 0; i < boxes.Count; i++) {
information = information + boxes[i][0].ToString() + "," +
boxes[i][1].ToString() + ", " + boxes[i][2].ToString() +
", " + boxes[i][3].ToString() + ", " +
scores[i].ToString() + ", " + label_ids[i].ToString();
if (!contain_masks) {
information += "\n";
} else {
information += ", " + masks[i].ToString();
}
}
return information;
}
}
public class ConvertResult {
public static FD_ClassifyResult
ConvertClassifyResultToCResult(ClassifyResult classify_result) {
FD_ClassifyResult fd_classify_result = new FD_ClassifyResult();
// copy label_ids
// Create a managed array
fd_classify_result.label_ids.size = (uint)classify_result.label_ids.Count;
int[] label_ids = new int[fd_classify_result.label_ids.size];
// Copy data from Link to Array
classify_result.label_ids.CopyTo(label_ids);
// Copy data to unmanaged memory
int size = Marshal.SizeOf(label_ids[0]) * label_ids.Length;
fd_classify_result.label_ids.data = Marshal.AllocHGlobal(size);
Marshal.Copy(label_ids, 0, fd_classify_result.label_ids.data,
label_ids.Length);
// copy scores
// Create a managed array
fd_classify_result.scores.size = (uint)classify_result.scores.Count;
float[] scores = new float[fd_classify_result.scores.size];
// Copy data from Link to Array
classify_result.scores.CopyTo(scores);
// Copy data to unmanaged memory
size = Marshal.SizeOf(scores[0]) * scores.Length;
fd_classify_result.scores.data = Marshal.AllocHGlobal(size);
Marshal.Copy(scores, 0, fd_classify_result.scores.data, scores.Length);
fd_classify_result.type = (FD_ResultType)classify_result.type;
return fd_classify_result;
}
public static ClassifyResult
ConvertCResultToClassifyResult(FD_ClassifyResult fd_classify_result) {
ClassifyResult classify_result = new ClassifyResult();
// copy label_ids
int[] label_ids = new int[fd_classify_result.label_ids.size];
Marshal.Copy(fd_classify_result.label_ids.data, label_ids, 0,
label_ids.Length);
classify_result.label_ids = new List<int>(label_ids);
// copy scores
float[] scores = new float[fd_classify_result.scores.size];
Marshal.Copy(fd_classify_result.scores.data, scores, 0, scores.Length);
classify_result.scores = new List<float>(scores);
classify_result.type = (ResultType)fd_classify_result.type;
return classify_result;
}
public static FD_DetectionResult
ConvertDetectionResultToCResult(DetectionResult detection_result) {
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
// copy boxes
int boxes_coordinate_dim = 4;
int size;
fd_detection_result.boxes.size = (uint)detection_result.boxes.Count;
FD_OneDimArraySize[] boxes =
new FD_OneDimArraySize[fd_detection_result.boxes.size];
// Copy each box
for (int i = 0; i < (int)fd_detection_result.boxes.size; i++) {
boxes[i].size = (uint)detection_result.boxes[i].Length;
float[] boxes_i = new float[boxes_coordinate_dim];
detection_result.boxes[i].CopyTo(boxes_i, 0);
size = Marshal.SizeOf(boxes_i[0]) * boxes_i.Length;
boxes[i].data = Marshal.AllocHGlobal(size);
Marshal.Copy(boxes_i, 0, boxes[i].data, boxes_i.Length);
}
// Copy data to unmanaged memory
size = Marshal.SizeOf(boxes[0]) * boxes.Length;
fd_detection_result.boxes.data = Marshal.AllocHGlobal(size);
for (int i = 0; i < boxes.Length; i++) {
Marshal.StructureToPtr(
boxes[i],
fd_detection_result.boxes.data + i * Marshal.SizeOf(boxes[0]), true);
}
// copy scores
fd_detection_result.scores.size = (uint)detection_result.scores.Count;
float[] scores = new float[fd_detection_result.scores.size];
// Copy data from Link to Array
detection_result.scores.CopyTo(scores);
// Copy data to unmanaged memory
size = Marshal.SizeOf(scores[0]) * scores.Length;
fd_detection_result.scores.data = Marshal.AllocHGlobal(size);
Marshal.Copy(scores, 0, fd_detection_result.scores.data, scores.Length);
// copy label_ids
fd_detection_result.label_ids.size = (uint)detection_result.label_ids.Count;
int[] label_ids = new int[fd_detection_result.label_ids.size];
// Copy data from Link to Array
detection_result.label_ids.CopyTo(label_ids);
// Copy data to unmanaged memory
size = Marshal.SizeOf(label_ids[0]) * label_ids.Length;
fd_detection_result.label_ids.data = Marshal.AllocHGlobal(size);
Marshal.Copy(label_ids, 0, fd_detection_result.label_ids.data,
label_ids.Length);
// copy masks
fd_detection_result.masks.size = detection_result.masks.Count;
FD_Mask[] masks = new FD_Mask[fd_detection_result.masks.size];
// copy each mask
for (int i = 0; i < (int)fd_detection_result.masks.size; i++) {
// copy data in mask
masks[i].data.size = (uint)detection_result.masks[i].data.Count;
byte[] masks_data_i = new byte[masks[i].data.size];
detection_result.masks[i].data.CopyTo(masks_data_i);
size = Marshal.SizeOf(masks_data_i[0]) * masks_data_i.Length;
masks[i].data.data = Marshal.AllocHGlobal(size);
Marshal.Copy(masks_data_i, 0, masks[i].data.data, masks_data_i.Length);
// copy shape in mask
masks[i].shape.size = (uint)detection_result.masks[i].shape.Count;
long[] masks_shape_i = new long[masks[i].shape.size];
detection_result.masks[i].shape.CopyTo(masks_shape_i);
size = Marshal.SizeOf(masks_shape_i[0]) * masks_shape_i.Length;
masks[i].shape.data = Marshal.AllocHGlobal(size);
Marshal.Copy(masks_shape_i, 0, masks[i].shape.data, masks_shape_i.Length);
// copy type
masks[i].type = (FD_ResultType)detection_result.masks[i].type;
}
if (fd_detection_result.masks.size != 0) {
size = Marshal.SizeOf(masks[0]) * masks.Length;
fd_detection_result.masks.data = Marshal.AllocHGlobal(size);
for (int i = 0; i < masks.Length; i++) {
Marshal.StructureToPtr(masks[i],
fd_detection_result.masks.data +
i * Marshal.SizeOf(masks[0]),
true);
}
}
fd_detection_result.contain_masks = detection_result.contain_masks;
fd_detection_result.type = (FD_ResultType)detection_result.type;
return fd_detection_result;
}
public static DetectionResult
ConvertCResultToDetectionResult(FD_DetectionResult fd_detection_result) {
DetectionResult detection_result = new DetectionResult();
// copy boxes
detection_result.boxes = new List<float[]>();
FD_OneDimArraySize[] boxes =
new FD_OneDimArraySize[fd_detection_result.boxes.size];
Console.WriteLine(fd_detection_result.boxes.size);
for (int i = 0; i < (int)fd_detection_result.boxes.size; i++) {
boxes[i] = (FD_OneDimArraySize)Marshal.PtrToStructure(
fd_detection_result.boxes.data + i * Marshal.SizeOf(boxes[0]),
typeof(FD_OneDimArraySize));
float[] box_i = new float[boxes[i].size];
Marshal.Copy(boxes[i].data, box_i, 0, box_i.Length);
detection_result.boxes.Add(box_i);
}
// copy scores
float[] scores = new float[fd_detection_result.scores.size];
Marshal.Copy(fd_detection_result.scores.data, scores, 0, scores.Length);
detection_result.scores = new List<float>(scores);
// copy label_ids
int[] label_ids = new int[fd_detection_result.label_ids.size];
Marshal.Copy(fd_detection_result.label_ids.data, label_ids, 0,
label_ids.Length);
detection_result.label_ids = new List<int>(label_ids);
// copy masks
detection_result.masks = new List<Mask>();
FD_Mask[] fd_masks = new FD_Mask[fd_detection_result.masks.size];
for (int i = 0; i < (int)fd_detection_result.masks.size; i++) {
fd_masks[i] = (FD_Mask)Marshal.PtrToStructure(
fd_detection_result.masks.data + i * Marshal.SizeOf(fd_masks[0]),
typeof(FD_Mask));
Mask mask_i = new Mask();
byte[] mask_i_data = new byte[fd_masks[i].data.size];
Marshal.Copy(fd_masks[i].data.data, mask_i_data, 0, mask_i_data.Length);
long[] mask_i_shape = new long[fd_masks[i].shape.size];
Marshal.Copy(fd_masks[i].shape.data, mask_i_shape, 0,
mask_i_shape.Length);
mask_i.type = (ResultType)fd_masks[i].type;
detection_result.masks.Add(mask_i);
}
detection_result.contain_masks = fd_detection_result.contain_masks;
detection_result.type = (ResultType)fd_detection_result.type;
return detection_result;
}
public static FD_OneDimArrayCstr
ConvertStringArrayToCOneDimArrayCstr(string[] strs){
FD_OneDimArrayCstr fd_one_dim_cstr = new FD_OneDimArrayCstr();
fd_one_dim_cstr.size = (nuint)strs.Length;
// Copy data to unmanaged memory
FD_Cstr[] c_strs = new FD_Cstr[strs.Length];
int size = Marshal.SizeOf(c_strs[0]) * c_strs.Length;
fd_one_dim_cstr.data = Marshal.AllocHGlobal(size);
for (int i = 0; i < strs.Length; i++) {
c_strs[i].size = (nuint)strs[i].Length;
c_strs[i].data = strs[i];
Marshal.StructureToPtr(
c_strs[i],
fd_one_dim_cstr.data + i * Marshal.SizeOf(c_strs[0]), true);
}
return fd_one_dim_cstr;
}
public static string[]
ConvertCOneDimArrayCstrToStringArray(FD_OneDimArrayCstr c_strs){
string[] strs = new string[c_strs.size];
for(int i=0; i<(int)c_strs.size; i++){
FD_Cstr cstr = (FD_Cstr)Marshal.PtrToStructure(
c_strs.data + i * Marshal.SizeOf(new FD_Cstr()),
typeof(FD_Cstr));
strs[i] = cstr.data;
}
return strs;
}
}
}
}

View File

@@ -0,0 +1,69 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
using System;
using System.IO;
using System.Runtime.InteropServices;
using System.Collections.Generic;
using OpenCvSharp;
using fastdeploy.types_internal_c;
namespace fastdeploy {
namespace vision {
public class Visualize {
public static Mat VisDetection(Mat im, DetectionResult detection_result,
float score_threshold = 0.0f,
int line_size = 1, float font_size = 0.5f) {
FD_DetectionResult fd_detection_result =
ConvertResult.ConvertDetectionResultToCResult(detection_result);
IntPtr result_ptr =
FD_C_VisDetection(im.CvPtr, ref fd_detection_result, score_threshold,
line_size, font_size);
return new Mat(result_ptr);
}
public static Mat VisDetection(Mat im, DetectionResult detection_result,
string[] labels,
float score_threshold = 0.0f,
int line_size = 1, float font_size = 0.5f) {
FD_DetectionResult fd_detection_result =
ConvertResult.ConvertDetectionResultToCResult(detection_result);
FD_OneDimArrayCstr labels_in = ConvertResult.ConvertStringArrayToCOneDimArrayCstr(labels);
IntPtr result_ptr =
FD_C_VisDetectionWithLabel(im.CvPtr, ref fd_detection_result,
ref labels_in, score_threshold,
line_size, font_size);
return new Mat(result_ptr);
}
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_VisDetection")]
private static extern IntPtr
FD_C_VisDetection(IntPtr im, ref FD_DetectionResult fd_detection_result,
float score_threshold, int line_size, float font_size);
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_VisDetectionWithLabel")]
private static extern IntPtr
FD_C_VisDetectionWithLabel(IntPtr im, ref FD_DetectionResult fd_detection_result,
ref FD_OneDimArrayCstr labels,
float score_threshold, int line_size, float font_size);
}
}
}

View File

@@ -13,6 +13,7 @@
- [瑞芯微RK3588、RK356X部署环境编译安装](cn/build_and_install/rknpu2.md)
- [晶晨A311D部署环境编译安装](cn/build_and_install/a311d.md)
- [华为昇腾部署环境编译安装](cn/build_and_install/huawei_ascend.md)
- [算能TPU部署环境编译安装](cn/build_and_install/sophgo.md)
- [Jetson部署环境编译安装](cn/build_and_install/jetson.md)
- [Android平台部署环境编译安装](cn/build_and_install/android.md)
- [服务化部署镜像编译安装](../serving/docs/zh_CN/compile.md)

View File

@@ -1,35 +1,27 @@
简体中文 | [English](face_detection_result.md)
# FaceDetectionResult 人脸检测结果
[English](face_alignment_result.md) | 简体中文
FaceDetectionResult 代码定义在`fastdeploy/vision/common/result.h`用于表明人脸检测出来的目标框、人脸landmarks目标置信度和每张人脸的landmark数量。
# FaceAlignmentResult 人脸对齐(人脸关键点检测)结果
FaceAlignmentResult 代码定义在`fastdeploy/vision/common/result.h`用于表明人脸landmarks。
## C++ 定义
`fastdeploy::vision::FaceDetectionResult`
`fastdeploy::vision::FaceAlignmentResult`
```c++
struct FaceDetectionResult {
std::vector<std::array<float, 4>> boxes;
struct FaceAlignmentResult {
std::vector<std::array<float, 2>> landmarks;
std::vector<float> scores;
int landmarks_per_face;
void Clear();
std::string Str();
};
```
- **boxes**: 成员变量,表示单张图片检测出来的所有目标框坐标,`boxes.size()`表示框的个数每个框以4个float数值依次表示xmin, ymin, xmax, ymax 即左上角和右下角坐标
- **scores**: 成员变量,表示单张图片检测出来的所有目标置信度,其元素个数与`boxes.size()`一致
- **landmarks**: 成员变量,表示单张图片检测出来的所有人脸的关键点,其元素个数与`boxes.size()`一致
- **landmarks_per_face**: 成员变量,表示每个人脸框中的关键点的数量。
- **landmarks**: 成员变量,表示单张人脸图片检测出来的所有关键点
- **Clear()**: 成员函数,用于清除结构体中存储的结果
- **Str()**: 成员函数将结构体中的信息以字符串形式输出用于Debug
## Python 定义
`fastdeploy.vision.FaceDetectionResult`
`fastdeploy.vision.FaceAlignmentResult`
- **boxes**(list of list(float)): 成员变量,表示单张图片检测出来的所有目标框坐标。boxes是一个list其每个元素为一个长度为4的list 表示为一个框每个框以4个float数值依次表示xmin, ymin, xmax, ymax 即左上角和右下角坐标
- **scores**(list of float): 成员变量,表示单张图片检测出来的所有目标置信度
- **landmarks**(list of list(float)): 成员变量,表示单张图片检测出来的所有人脸的关键点
- **landmarks_per_face**(int): 成员变量,表示每个人脸框中的关键点的数量。
- **landmarks**(list of list(float)): 成员变量,表示单张人脸图片检测出来的所有关键点

View File

@@ -19,7 +19,7 @@ struct SegmentationResult {
```
- **label_map**: Member variable which indicates the segmentation category of each pixel in a single image. `label_map.size()` indicates the number of pixel points of a image.
- **score_map**: Member variable which indicates the predicted segmentation category probability value (specified as `--output_op argmax` when export) corresponding to label_map, or the probability value normalized by softmax (specified as `--output_op softmax` when export, or as `--output_op when exporting the model). none` when export while setting the [class member attribute](../../../examples/vision/segmentation/paddleseg/cpp/) as `apply_softmax=True` during model initialization).
- **score_map**: Member variable which indicates the predicted segmentation category probability value corresponding to the label_map one-to-one, the member variable is not empty only when `--output_op none` is specified when exporting the PaddleSeg model, otherwise the member variable is empty.
- **shape**: Member variable which indicates the shape of the output image as H\*W.
- **Clear()**: Member function used to clear the results stored in the structure.
- **Str()**: Member function used to output the information in the structure as string (for Debug).
@@ -29,5 +29,5 @@ struct SegmentationResult {
`fastdeploy.vision.SegmentationResult`
- **label_map**(list of int): Member variable which indicates the segmentation category of each pixel in a single image.
- **score_map**(list of float): Member variable which indicates the predicted segmentation category probability value (specified as `--output_op argmax` when export) corresponding to label_map, or the probability value normalized by softmax (specified as `--output_op softmax` when export, or as `--output_op when exporting the model). none` when export while setting the [class member attribute](../../../examples/vision/segmentation/paddleseg/cpp/) as `apply_softmax=True` during model initialization).
- **score_map**(list of float): Member variable which indicates the predicted segmentation category probability value corresponding to the label_map one-to-one, the member variable is not empty only when `--output_op none` is specified when exporting the PaddleSeg model, otherwise the member variable is empty.
- **shape**(list of int): Member variable which indicates the shape of the output image as H\*W.

View File

@@ -14,14 +14,16 @@ struct SegmentationResult {
std::vector<int64_t> shape;
bool contain_score_map = false;
void Clear();
void Free();
std::string Str();
};
```
- **label_map**: 成员变量,表示单张图片每个像素点的分割类别,`label_map.size()`表示图片像素点的个数
- **score_map**: 成员变量与label_map一一对应的所预测的分割类别概率值(当导出模型时指定`--output_op argmax`)或者经过softmax归一化化后的概率值(当导出模型时指定`--output_op softmax`或者导出模型时指定`--output_op none`同时模型初始化的时候设置模型[类成员属性](../../../examples/vision/segmentation/paddleseg/cpp/)`apply_softmax=True`)
- **score_map**: 成员变量与label_map一一对应的所预测的分割类别概率值只有导出PaddleSeg模型时指定`--output_op none`时,该成员变量才不为空,否则该成员变量为空
- **shape**: 成员变量表示输出图片的shape为H\*W
- **Clear()**: 成员函数,用于清除结构体中存储的结果
- **Free()**: 成员函数,用于清除结构体中存储的结果并释放内存
- **Str()**: 成员函数将结构体中的信息以字符串形式输出用于Debug
## Python 定义
@@ -29,5 +31,5 @@ struct SegmentationResult {
`fastdeploy.vision.SegmentationResult`
- **label_map**(list of int): 成员变量,表示单张图片每个像素点的分割类别
- **score_map**(list of float): 成员变量与label_map一一对应的所预测的分割类别概率值(当导出模型时指定`--output_op argmax`)或者经过softmax归一化化后的概率值(当导出模型时指定`--output_op softmax`或者导出模型时指定`--output_op none`同时模型初始化的时候设置模型[类成员属性](../../../examples/vision/segmentation/paddleseg/python/)`apply_softmax=true`)
- **score_map**(list of float): 成员变量与label_map一一对应的所预测的分割类别概率值只有导出PaddleSeg模型时指定`--output_op none`时,该成员变量才不为空,否则该成员变量为空
- **shape**(list of int): 成员变量表示输出图片的shape为H\*W

View File

@@ -0,0 +1,324 @@
[English](./vision_results_en.md) | 简体中文
# 视觉模型预测结果说明
## ClassifyResult 图像分类结果
ClassifyResult代码定义在`fastdeploy/vision/common/result.h`中,用于表明图像的分类结果和置信度。
### C++ 定义
```c++
fastdeploy::vision::ClassifyResult
```
```c++
struct ClassifyResult {
std::vector<int32_t> label_ids;
std::vector<float> scores;
void Clear();
std::string Str();
};
```
- **label_ids**: 成员变量表示单张图片的分类结果其个数根据在使用分类模型时传入的topk决定例如可以返回top 5的分类结果
- **scores**: 成员变量表示单张图片在相应分类结果上的置信度其个数根据在使用分类模型时传入的topk决定例如可以返回top 5的分类置信度
- **Clear()**: 成员函数,用于清除结构体中存储的结果
- **Str()**: 成员函数将结构体中的信息以字符串形式输出用于Debug
## SegmentationResult 图像分割结果
SegmentationResult代码定义在`fastdeploy/vision/common/result.h`中,用于表明图像中每个像素预测出来的分割类别和分割类别的概率值。
### C++ 定义
```c++
fastdeploy::vision::SegmentationResult
```
```c++
struct SegmentationResult {
std::vector<uint8_t> label_map;
std::vector<float> score_map;
std::vector<int64_t> shape;
bool contain_score_map = false;
void Clear();
void Free();
std::string Str();
};
```
- **label_map**: 成员变量,表示单张图片每个像素点的分割类别,`label_map.size()`表示图片像素点的个数
- **score_map**: 成员变量与label_map一一对应的所预测的分割类别概率值只有导出PaddleSeg模型时指定`--output_op none`时,该成员变量才不为空,否则该成员变量为空
- **shape**: 成员变量表示输出图片的shape为H\*W
- **Clear()**: 成员函数,用于清除结构体中存储的结果
- **Free()**: 成员函数,用于清除结构体中存储的结果并释放内存
- **Str()**: 成员函数将结构体中的信息以字符串形式输出用于Debug
## DetectionResult 目标检测结果
DetectionResult代码定义在`fastdeploy/vision/common/result.h`中,用于表明图像检测出来的目标框、目标类别和目标置信度。
### C++ 定义
```c++
fastdeploy::vision::DetectionResult
```
```c++
struct DetectionResult {
std::vector<std::array<float, 4>> boxes;
std::vector<float> scores;
std::vector<int32_t> label_ids;
std::vector<Mask> masks;
bool contain_masks = false;
void Clear();
std::string Str();
};
```
- **boxes**: 成员变量,表示单张图片检测出来的所有目标框坐标,`boxes.size()`表示框的个数每个框以4个float数值依次表示xmin, ymin, xmax, ymax 即左上角和右下角坐标
- **scores**: 成员变量,表示单张图片检测出来的所有目标置信度,其元素个数与`boxes.size()`一致
- **label_ids**: 成员变量,表示单张图片检测出来的所有目标类别,其元素个数与`boxes.size()`一致
- **masks**: 成员变量表示单张图片检测出来的所有实例mask其元素个数及shape大小与`boxes`一致
- **contain_masks**: 成员变量表示检测结果中是否包含实例mask实例分割模型的结果此项一般为true.
- **Clear()**: 成员函数,用于清除结构体中存储的结果
- **Str()**: 成员函数将结构体中的信息以字符串形式输出用于Debug
```c++
fastdeploy::vision::Mask
```
```c++
struct Mask {
std::vector<int32_t> data;
std::vector<int64_t> shape; // (H,W) ...
void Clear();
std::string Str();
};
```
- **data**: 成员变量表示检测到的一个mask
- **shape**: 成员变量表示mask的shape如 (h,w)
- **Clear()**: 成员函数,用于清除结构体中存储的结果
- **Str()**: 成员函数将结构体中的信息以字符串形式输出用于Debug
## FaceAlignmentResult 人脸对齐(人脸关键点检测)结果
FaceAlignmentResult 代码定义在`fastdeploy/vision/common/result.h`中用于表明人脸landmarks。
### C++ 定义
```c++
fastdeploy::vision::FaceAlignmentResult
```
```c++
struct FaceAlignmentResult {
std::vector<std::array<float, 2>> landmarks;
void Clear();
std::string Str();
};
```
- **landmarks**: 成员变量,表示单张人脸图片检测出来的所有关键点
- **Clear()**: 成员函数,用于清除结构体中存储的结果
- **Str()**: 成员函数将结构体中的信息以字符串形式输出用于Debug
## KeyPointDetectionResult 目标检测结果
KeyPointDetectionResult 代码定义在`fastdeploy/vision/common/result.h`中,用于表明图像中目标行为的各个关键点坐标和置信度。
### C++ 定义
```c++
fastdeploy::vision::KeyPointDetectionResult
```
```c++
struct KeyPointDetectionResult {
std::vector<std::array<float, 2>> keypoints;
std::vector<float> scores;
int num_joints = -1;
void Clear();
std::string Str();
};
```
- **keypoints**: 成员变量,表示识别到的目标行为的关键点坐标。
`keypoints.size()= N * J`
- `N`:图片中的目标数量
- `J`num_joints一个目标的关键点数量
- **scores**: 成员变量,表示识别到的目标行为的关键点坐标的置信度。
`scores.size()= N * J`
- `N`:图片中的目标数量
- `J`:num_joints一个目标的关键点数量
- **num_joints**: 成员变量,一个目标的关键点数量
- **Clear()**: 成员函数,用于清除结构体中存储的结果
- **Str()**: 成员函数将结构体中的信息以字符串形式输出用于Debug
## FaceRecognitionResult 人脸识别结果
FaceRecognitionResult 代码定义在`fastdeploy/vision/common/result.h`中用于表明人脸识别模型对图像特征的embedding。
### C++ 定义
```c++
fastdeploy::vision::FaceRecognitionResult
```
```c++
struct FaceRecognitionResult {
std::vector<float> embedding;
void Clear();
std::string Str();
};
```
- **embedding**: 成员变量表示人脸识别模型最终的提取的特征embedding可以用来计算人脸之间的特征相似度。
- **Clear()**: 成员函数,用于清除结构体中存储的结果
- **Str()**: 成员函数将结构体中的信息以字符串形式输出用于Debug
## MattingResult 抠图结果
MattingResult 代码定义在`fastdeploy/vision/common/result.h`中用于表明模型预测的alpha透明度的值预测的前景等。
### C++ 定义
```c++
fastdeploy::vision::MattingResult
```
```c++
struct MattingResult {
std::vector<float> alpha;
std::vector<float> foreground;
std::vector<int64_t> shape;
bool contain_foreground = false;
void Clear();
std::string Str();
};
```
- **alpha**: 是一维向量为预测的alpha透明度的值值域为[0.,1.]长度为hxwh,w为输入图像的高和宽
- **foreground**: 是一维向量,为预测的前景,值域为[0.,255.]长度为hxwxch,w为输入图像的高和宽c一般为3foreground不是一定有的只有模型本身预测了前景这个属性才会有效
- **contain_foreground**: 表示预测的结果是否包含前景
- **shape**: 表示输出结果的shape当contain_foreground为falseshape只包含(h,w)当contain_foreground为trueshape包含(h,w,c), c一般为3
- **Clear()**: 成员函数,用于清除结构体中存储的结果
- **Str()**: 成员函数将结构体中的信息以字符串形式输出用于Debug
## OCRResult OCR预测结果
OCRResult代码定义在`fastdeploy/vision/common/result.h`中,用于表明图像检测和识别出来的文本框,文本框方向分类,以及文本框内的文本内容
### C++ 定义
```c++
fastdeploy::vision::OCRResult
```
```c++
struct OCRResult {
std::vector<std::array<int, 8>> boxes;
std::vector<std::string> text;
std::vector<float> rec_scores;
std::vector<float> cls_scores;
std::vector<int32_t> cls_labels;
ResultType type = ResultType::OCR;
void Clear();
std::string Str();
};
```
- **boxes**: 成员变量,表示单张图片检测出来的所有目标框坐标,`boxes.size()`表示单张图内检测出的框的个数每个框以8个int数值依次表示框的4个坐标点顺序为左下右下右上左上
- **text**: 成员变量,表示多个文本框内被识别出来的文本内容,其元素个数与`boxes.size()`一致
- **rec_scores**: 成员变量,表示文本框内识别出来的文本的置信度,其元素个数与`boxes.size()`一致
- **cls_scores**: 成员变量,表示文本框的分类结果的置信度,其元素个数与`boxes.size()`一致
- **cls_labels**: 成员变量,表示文本框的方向分类类别,其元素个数与`boxes.size()`一致
- **Clear()**: 成员函数,用于清除结构体中存储的结果
- **Str()**: 成员函数将结构体中的信息以字符串形式输出用于Debug
## FaceDetectionResult 人脸检测结果
FaceDetectionResult 代码定义在`fastdeploy/vision/common/result.h`中用于表明人脸检测出来的目标框、人脸landmarks目标置信度和每张人脸的landmark数量。
### C++ 定义
```c++
fastdeploy::vision::FaceDetectionResult
```
```c++
struct FaceDetectionResult {
std::vector<std::array<float, 4>> boxes;
std::vector<std::array<float, 2>> landmarks;
std::vector<float> scores;
int landmarks_per_face;
void Clear();
std::string Str();
};
```
- **boxes**: 成员变量,表示单张图片检测出来的所有目标框坐标,`boxes.size()`表示框的个数每个框以4个float数值依次表示xmin, ymin, xmax, ymax 即左上角和右下角坐标
- **scores**: 成员变量,表示单张图片检测出来的所有目标置信度,其元素个数与`boxes.size()`一致
- **landmarks**: 成员变量,表示单张图片检测出来的所有人脸的关键点,其元素个数与`boxes.size()`一致
- **landmarks_per_face**: 成员变量,表示每个人脸框中的关键点的数量。
- **Clear()**: 成员函数,用于清除结构体中存储的结果
- **Str()**: 成员函数将结构体中的信息以字符串形式输出用于Debug
## HeadPoseResult 头部姿态结果
HeadPoseResult 代码定义在`fastdeploy/vision/common/result.h`中,用于表明头部姿态结果。
### C++ 定义
```c++
fastdeploy::vision::HeadPoseResult
```
```c++
struct HeadPoseResult {
std::vector<float> euler_angles;
void Clear();
std::string Str();
};
```
- **euler_angles**: 成员变量,表示单张人脸图片预测的欧拉角,存放的顺序是(yaw, pitch, roll) yaw 代表水平转角pitch 代表垂直角roll 代表翻滚角,值域都为 [-90,+90]度
- **Clear()**: 成员函数,用于清除结构体中存储的结果
- **Str()**: 成员函数将结构体中的信息以字符串形式输出用于Debug
API:`fastdeploy.vision.HeadPoseResult`, 该结果返回:
- **euler_angles**(list of float): 成员变量,表示单张人脸图片预测的欧拉角,存放的顺序是(yaw, pitch, roll) yaw 代表水平转角pitch 代表垂直角roll 代表翻滚角,值域都为 [-90, +90]度
## MOTResult 多目标跟踪结果
MOTResult代码定义在`fastdeploy/vision/common/result.h`中用于表明多目标跟踪中的检测出来的目标框、目标跟踪id、目标类别和目标置信度。
### C++ 定义
```c++
fastdeploy::vision::MOTResult
```
```c++
struct MOTResult{
// left top right bottom
std::vector<std::array<int, 4>> boxes;
std::vector<int> ids;
std::vector<float> scores;
std::vector<int> class_ids;
void Clear();
std::string Str();
};
```
- **boxes**: 成员变量,表示单帧画面中检测出来的所有目标框坐标,`boxes.size()`表示框的个数每个框以4个float数值依次表示xmin, ymin, xmax, ymax 即左上角和右下角坐标
- **ids**: 成员变量表示单帧画面中所有目标的id其元素个数与`boxes.size()`一致
- **scores**: 成员变量,表示单帧画面检测出来的所有目标置信度,其元素个数与`boxes.size()`一致
- **class_ids**: 成员变量,表示单帧画面出来的所有目标类别,其元素个数与`boxes.size()`一致
- **Clear()**: 成员函数,用于清除结构体中存储的结果
- **Str()**: 成员函数将结构体中的信息以字符串形式输出用于Debug

View File

@@ -0,0 +1,290 @@
English | [简体中文](./vision_results_cn.md)
# Description of Vision Results
## Image Classification Result
The ClassifyResult code is defined in `fastdeploy/vision/common/result.h`, and is used to indicate the classification result and confidence level of the image.
### C++ Definition
```c++
fastdeploy::vision::ClassifyResult
```
```c++
struct ClassifyResult {
std::vector<int32_t> label_ids;
std::vector<float> scores;
void Clear();
std::string Str();
};
```
- **label_ids**: Member variable which indicates the classification results of a single image. Its number is determined by the topk passed in when using the classification model, e.g. it can return the top 5 classification results.
- **scores**: Member variable which indicates the confidence level of a single image on the corresponding classification result. Its number is determined by the topk passed in when using the classification model, e.g. it can return the top 5 classification confidence level.
- **Clear()**: Member function used to clear the results stored in the structure.
- **Str()**: Member function used to output the information in the structure as string (for Debug).
## Segmentation Result
The SegmentationResult code is defined in `fastdeploy/vision/common/result.h`, indicating the segmentation category and the segmentation category probability predicted in each pixel in the image.
### C++ Definition
```c++
fastdeploy::vision::SegmentationResult
```
```c++
struct SegmentationResult {
std::vector<uint8_t> label_map;
std::vector<float> score_map;
std::vector<int64_t> shape;
bool contain_score_map = false;
void Clear();
std::string Str();
};
```
- **label_map**: Member variable which indicates the segmentation category of each pixel in a single image. `label_map.size()` indicates the number of pixel points of a image.
- **score_map**: Member variable which indicates the predicted segmentation category probability value corresponding to the label_map one-to-one, the member variable is not empty only when `--output_op none` is specified when exporting the PaddleSeg model, otherwise the member variable is empty.
- **shape**: Member variable which indicates the shape of the output image as H\*W.
- **Clear()**: Member function used to clear the results stored in the structure.
- **Str()**: Member function used to output the information in the structure as string (for Debug).
## Target Detection Result
The DetectionResult code is defined in `fastdeploy/vision/common/result.h`, and is used to indicate the target frame, target class and target confidence level detected in the image.
### C++ Definition
```c++
fastdeploy::vision::DetectionResult
```
```c++
struct DetectionResult {
std::vector<std::array<float, 4>> boxes;
std::vector<float> scores;
std::vector<int32_t> label_ids;
std::vector<Mask> masks;
bool contain_masks = false;
void Clear();
std::string Str();
};
```
- **boxes**: Member variable which indicates the coordinates of all detected target boxes in a single image. `boxes.size()` indicates the number of boxes, each box is represented by 4 float values in order of xmin, ymin, xmax, ymax, i.e. the coordinates of the top left and bottom right corner.
- **scores**: Member variable which indicates the confidence level of all targets detected in a single image, where the number of elements is the same as `boxes.size()`.
- **label_ids**: Member variable which indicates all target categories detected in a single image, where the number of elements is the same as `boxes.size()`.
- **masks**: Member variable which indicates all detected instance masks of a single image, where the number of elements and the shape size are the same as `boxes`.
- **contain_masks**: Member variable which indicates whether the detected result contains instance masks, which is generally true for the instance segmentation model.
- **Clear()**: Member function used to clear the results stored in the structure.
- **Str()**: Member function used to output the information in the structure as string (for Debug).
```c++
fastdeploy::vision::Mask
```
```c++
struct Mask {
std::vector<int32_t> data;
std::vector<int64_t> shape; // (H,W) ...
void Clear();
std::string Str();
};
```
- **data**: Member variable which indicates a detected mask.
- **shape**: Member variable which indicates the shape of the mask, e.g. (h,w).
- **Clear()**: Member function used to clear the results stored in the structure.
- **Str()**: Member function used to output the information in the structure as string (for Debug).
## Face Detection Result
The FaceDetectionResult code is defined in `fastdeploy/vision/common/result.h`, and is used to indicate the target frames, face landmarks, target confidence and the number of landmark per face.
### C++ Definition
```c++
fastdeploy::vision::FaceDetectionResult
```
```c++
struct FaceDetectionResult {
std::vector<std::array<float, 4>> boxes;
std::vector<std::array<float, 2>> landmarks;
std::vector<float> scores;
int landmarks_per_face;
void Clear();
std::string Str();
};
```
- **boxes**: Member variable which indicates the coordinates of all detected target boxes in a single image. `boxes.size()` indicates the number of boxes, each box is represented by 4 float values in order of xmin, ymin, xmax, ymax, i.e. the coordinates of the top left and bottom right corner.
- **scores**: Member variable which indicates the confidence level of all targets detected in a single image, where the number of elements is the same as `boxes.size()`.
- **landmarks**: Member variable which indicates the keypoints of all faces detected in a single image, where the number of elements is the same as `boxes.size()`.
- **landmarks_per_face**: Member variable which indicates the number of keypoints in each face box.
- **Clear()**: Member function used to clear the results stored in the structure.
- **Str()**: Member function used to output the information in the structure as string (for Debug).
## Keypoint Detection Result
The KeyPointDetectionResult code is defined in `fastdeploy/vision/common/result.h`, and is used to indicate the coordinates and confidence level of each keypoint of the target's behavior in the image.
### C++ Definition
```c++
fastdeploy::vision::KeyPointDetectionResult
```
```c++
struct KeyPointDetectionResult {
std::vector<std::array<float, 2>> keypoints;
std::vector<float> scores;
int num_joints = -1;
void Clear();
std::string Str();
};
```
- **keypoints**: Member variable which indicates the coordinates of the identified target behavior keypoint.
` keypoints.size() = N * J`:
- `N`: the number of targets in the image
- `J`: num_joints (the number of keypoints of a target)
- **scores**: Member variable which indicates the confidence level of the keypoint coordinates of the identified target behavior.
`scores.size() = N * J`:
- `N`: the number of targets in the picture
- `J`:num_joints (the number of keypoints of a target)
- **num_joints**: Member variable which indicates the number of keypoints of a target.
- **Clear()**: Member function used to clear the results stored in the structure.
- **Str()**: Member function used to output the information in the structure as string (for Debug).
## Face Recognition Result
The FaceRecognitionResult code is defined in `fastdeploy/vision/common/result.h`, and is used to indicate the image features embedding in the face recognition model.
### C++ Definition
```c++
fastdeploy::vision::FaceRecognitionResult
```
```c++
struct FaceRecognitionResult {
std::vector<float> embedding;
void Clear();
std::string Str();
};
```
- **embedding**: Member variable which indicates the final extracted feature embedding of the face recognition model, and can be used to calculate the facial feature similarity.
- **Clear()**: Member function used to clear the results stored in the structure.
- **Str()**: Member function used to output the information in the structure as string (for Debug).
## Matting Result
The MattingResult code is defined in `fastdeploy/vision/common/result.h`, and is used to indicate the predicted value of alpha transparency predicted and the predicted foreground, etc.
### C++ Definition
```c++
fastdeploy::vision::MattingResult
```
```c++
struct MattingResult {
std::vector<float> alpha;
std::vector<float> foreground;
std::vector<int64_t> shape;
bool contain_foreground = false;
void Clear();
std::string Str();
};
```
- **alpha**: It is a one-dimensional vector, indicating the predicted value of alpha transparency. The value range is [0.,1.], and the length is hxw, in which h,w represent the height and the width of the input image seperately.
- **foreground**: It is a one-dimensional vector, indicating the predicted foreground. The value range is [0.,255.], and the length is hxwxc, in which h,w represent the height and the width of the input image, and c is generally 3. This vector is valid only when the model itself predicts the foreground.
- **contain_foreground**: Used to indicate whether the result contains foreground.
- **shape**: Used to indicate the shape of the output. When contain_foreground is false, the shape only contains (h,w), while when contain_foreground is true, the shape contains (h,w,c), in which c is generally 3.
- **Clear()**: Member function used to clear the results stored in the structure.
- **Str()**: Member function used to output the information in the structure as string (for Debug).
## OCR prediction result
The OCRResult code is defined in `fastdeploy/vision/common/result.h`, and is used to indicate the text box detected in the image, text box orientation classification, and the text content.
### C++ Definition
```c++
fastdeploy::vision::OCRResult
```
```c++
struct OCRResult {
std::vector<std::array<int, 8>> boxes;
std::vector<std::string> text;
std::vector<float> rec_scores;
std::vector<float> cls_scores;
std::vector<int32_t> cls_labels;
ResultType type = ResultType::OCR;
void Clear();
std::string Str();
};
```
- **boxes**: Member variable which indicates the coordinates of all detected target boxes in a single image. `boxes.size()` indicates the number of detected boxes. Each box is represented by 8 int values to indicate the 4 coordinates of the box, in the order of lower left, lower right, upper right, upper left.
- **text**: Member variable which indicates the content of the recognized text in multiple text boxes, where the element number is the same as `boxes.size()`.
- **rec_scores**: Member variable which indicates the confidence level of the recognized text, where the element number is the same as `boxes.size()`.
- **cls_scores**: Member variable which indicates the confidence level of the classification result of the text box, where the element number is the same as `boxes.size()`.
- **cls_labels**: Member variable which indicates the directional category of the textbox, where the element number is the same as `boxes.size()`.
- **Clear()**: Member function used to clear the results stored in the structure.
- **Str()**: Member function used to output the information in the structure as string (for Debug).
## Face Alignment Result
The FaceAlignmentResult code is defined in `fastdeploy/vision/common/result.h`, and is used to indicate face landmarks.
### C++ Definition
```c++
fastdeploy::vision::FaceAlignmentResult
```
```c++
struct FaceAlignmentResult {
std::vector<std::array<float, 2>> landmarks;
void Clear();
std::string Str();
};
```
- **landmarks**: Member variable which indicates all the key points detected in a single face image.
- **Clear()**: Member function used to clear the results stored in the structure.
- **Str()**: Member function used to output the information in the structure as string (for Debug).
## Head Pose Result
The HeadPoseResult code is defined in `fastdeploy/vision/common/result.h`, and is used to indicate the head pose result.
### C++ Definition
```c++
fastdeploy::vision::HeadPoseResult
```
```c++
struct HeadPoseResult {
std::vector<float> euler_angles;
void Clear();
std::string Str();
};
```
- **euler_angles**: Member variable which indicates the Euler angles predicted for a single face image, stored in the order (yaw, pitch, roll), with yaw representing the horizontal turn angle, pitch representing the vertical angle, and roll representing the roll angle, all with a value range of [-90,+90].
- **Clear()**: Member function used to clear the results stored in the structure.
- **Str()**: Member function used to output the information in the structure as string (for Debug).

View File

@@ -4,6 +4,7 @@
## FastDeploy预编译库安装
- [FastDeploy预编译库下载安装](download_prebuilt_libraries.md)
>> **注意**FastDeploy目前只提供部分环境的预编译库其他环境需要参考下方文档自行编译
## 自行编译安装
- [NVIDIA GPU部署环境](gpu.md)

View File

@@ -2,6 +2,16 @@
# 晶晨 A311D 部署环境编译安装
## 导航目录
* [简介以及编译选项](#简介以及编译选项)
* [交叉编译环境搭建](#交叉编译环境搭建)
* [基于 Paddle Lite 的 FastDeploy 交叉编译库编译](#基于-paddle-lite-的-fastdeploy-交叉编译库编译)
* [准备设备运行环境](#准备设备运行环境)
* [基于 FastDeploy 在 A311D 上的部署示例](#基于-fastdeploy-在-a311d-上的部署示例)
## 简介以及编译选项
FastDeploy 基于 Paddle Lite 后端支持在晶晨 NPU 上进行部署推理。
更多详细的信息请参考:[Paddle Lite部署示例](https://www.paddlepaddle.org.cn/lite/develop/demo_guides/verisilicon_timvx.html)。

View File

@@ -0,0 +1,59 @@
[English](../../en/build_and_install/directml.md) | 简体中文
# DirectML部署库编译
Direct Machine Learning (DirectML) 是Windows系统上用于机器学习的一款高性能, 提供硬件加速的 DirectX 12 库.
目前, Fastdeploy的ONNX Runtime后端已集成DirectML,让用户可以在支持DirectX 12的 AMD/Intel/Nvidia/Qualcomm的GPU上部署模型.
更多详细介绍可见:
- [ONNX Runtime DirectML Execution Provider](https://onnxruntime.ai/docs/execution-providers/DirectML-ExecutionProvider.html)
# DirectML使用需求
- 编译需求: Visuald Studio 2017 及其以上工具链.
- 操作系统: Windows10, 1903 版本, 及其更新版本. (DirectML为操作系统的组成部分, 无需单独安装)
- 硬件需求: 支持DirectX 12的显卡, 例如, AMD GCN 第一代及以上版本/ Intel Haswell HD集成显卡及以上版本/Nvidia Kepler架构及以上版本/ Qualcomm Adreno 600及以上版本.
# 编译DirectML部署库
DirectML是基于ONNX Runtime后端集成, 所以要使用DirectML, 用户需要打开编译ONNX Runtime的选项. 同时, FastDeploy的DirectML支持x64/x86(Win32)架构的程序构建.
x64示例, 在Windows菜单中找到`x64 Native Tools Command Prompt for VS 2019`打开,执行如下命令
```bat
git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy
mkdir build && cd build
cmake .. -G "Visual Studio 16 2019" -A x64 ^
-DWITH_DIRECTML=ON ^
-DENABLE_ORT_BACKEND=ON ^
-DENABLE_VISION=ON ^
-DCMAKE_INSTALL_PREFIX="D:\Paddle\compiled_fastdeploy" ^
msbuild fastdeploy.sln /m /p:Configuration=Release /p:Platform=x64
msbuild INSTALL.vcxproj /m /p:Configuration=Release /p:Platform=x64
```
编译完成后,即在`CMAKE_INSTALL_PREFIX`指定的目录下生成C++推理库.
如您使用CMake GUI可参考文档[Windows使用CMakeGUI + Visual Studio 2019 IDE编译](../faq/build_on_win_with_gui.md)
x86(Win32)示例, 在Windows菜单中找到`x86 Native Tools Command Prompt for VS 2019`打开,执行如下命令
```bat
git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy
mkdir build && cd build
cmake .. -G "Visual Studio 16 2019" -A Win32 ^
-DWITH_DIRECTML=ON ^
-DENABLE_ORT_BACKEND=ON ^
-DENABLE_VISION=ON ^
-DCMAKE_INSTALL_PREFIX="D:\Paddle\compiled_fastdeploy" ^
msbuild fastdeploy.sln /m /p:Configuration=Release /p:Platform=Win32
msbuild INSTALL.vcxproj /m /p:Configuration=Release /p:Platform=Win32
```
编译完成后,即在`CMAKE_INSTALL_PREFIX`指定的目录下生成C++推理库.
如您使用CMake GUI可参考文档[Windows使用CMakeGUI + Visual Studio 2019 IDE编译](../faq/build_on_win_with_gui.md)
# 使用DirectML库
DirectML编译库的使用方式, 和其他硬件在Windows上使用的方式一样, 参考以下链接.
- [FastDeploy C++库在Windows上的多种使用方式](../faq/use_sdk_on_windows_build.md)
- [在 Windows 使用 FastDeploy C++ SDK](../faq/use_sdk_on_windows.md)

View File

@@ -1,5 +1,17 @@
# 华为昇腾NPU 部署环境编译准备
## 导航目录
* [简介以及编译选项](#简介以及编译选项)
* [华为昇腾环境准备](#一华为昇腾环境准备)
* [编译环境搭建](#二编译环境搭建)
* [基于 Paddle Lite 的 C++ FastDeploy 库编译](#三基于-paddle-lite-的-c-fastdeploy-库编译)
* [基于 Paddle Lite 的 Python FastDeploy 库编译](#四基于-paddle-lite-的-python-fastdeploy-库编译)
* [昇腾部署时开启FlyCV](#五昇腾部署时开启flycv)
* [昇腾部署Demo参考](#六昇腾部署demo参考)
## 简介以及编译选项
FastDeploy基于 Paddle-Lite 后端, 支持在华为昇腾NPU上进行部署推理。
更多详细的信息请参考:[Paddle Lite部署示例](https://github.com/PaddlePaddle/Paddle-Lite/blob/develop/docs/demo_guides/huawei_ascend_npu.md)。
@@ -114,7 +126,7 @@ python setup.py bdist_wheel
## 五.昇腾部署时开启FlyCV
[FlyCV](https://github.com/PaddlePaddle/FlyCV) 是一款高性能计算机图像处理库, 针对ARM架构做了很多优化, 相比其他图像处理库性能更为出色.
FastDeploy现在已经集成FlyCV, 用户可以在支持的硬件平台上使用FlyCV, 实现模型端到端推理性能的加速.
模型端到端推理中, 预处理和后处理阶段为CPU计算, 当用户使用ARM CPU + 昇腾的硬件平台时, 我们推荐用户使用FlyCV, 可以实现端到端的推理性能加速, 详见[FLyCV使用文档](./boost_cv_by_flycv.md).
模型端到端推理中, 预处理和后处理阶段为CPU计算, 当用户使用ARM CPU + 昇腾的硬件平台时, 我们推荐用户使用FlyCV, 可以实现端到端的推理性能加速, 详见[FlyCV使用文档](../faq/boost_cv_by_flycv.md).
## 六.昇腾部署Demo参考

View File

@@ -2,6 +2,16 @@
# 瑞芯微 RV1126 部署环境编译安装
## 导航目录
* [简介以及编译选项](#简介以及编译选项)
* [交叉编译环境搭建](#交叉编译环境搭建)
* [基于 Paddle Lite 的 FastDeploy 交叉编译库编译](#基于-paddle-lite-的-fastdeploy-交叉编译库编译)
* [准备设备运行环境](#准备设备运行环境)
* [基于 FastDeploy 在 RV1126 上的部署示例](#基于-fastdeploy-在-rv1126-上的部署示例)
## 简介以及编译选项
FastDeploy基于 Paddle Lite 后端支持在瑞芯微RockchipSoc 上进行部署推理。
更多详细的信息请参考:[Paddle Lite部署示例](https://www.paddlepaddle.org.cn/lite/develop/demo_guides/verisilicon_timvx.html)。

View File

@@ -8,25 +8,28 @@ RKNPU2模型导出只支持在x86Linux平台上进行导出安装流程请参
ONNX模型不能直接调用RK芯片中的NPU进行运算需要把ONNX模型转换为RKNN模型具体流程请查看[RKNPU2转换文档](./export.md)
## RKNPU2已经支持的模型列表
以下环境测试的速度均为端到端,测试环境如下:
* 设备型号: RK3588
* ARM CPU使用ONNX框架进行测试
FastDeploy在RK3588s上进行了测试测试环境如下:
* 设备型号: RK3588-s
* NPU均使用单核进行测试
| 任务场景 | 模型 | 模型版本(表示已经测试的版本) | ARM CPU/RKNN速度(ms) |
|----------------------|--------------------------------------------------------------------------------------------------|--------------------------|--------------------|
| Detection | [Picodet](../../../../examples/vision/detection/paddledetection/rknpu2/README.md) | Picodet-s | 162/112 |
| Detection | [PaddleDetection Yolov8](../../../../examples/vision/detection/paddledetection/rknpu2/README.md) | yolov8-n | -/100 |
| Detection | [PPYOLOE](../../../../examples/vision/detection/paddledetection/rknpu2/README.md) | ppyoloe-s(int8) | -/77 |
| Detection | [RKYOLOV5](../../../../examples/vision/detection/rkyolo/README.md) | YOLOV5-S-Relu(int8) | -/57 |
| Detection | [RKYOLOX](../../../../examples/vision/detection/rkyolo/README.md) | - | -/- |
| Detection | [RKYOLOV7](../../../../examples/vision/detection/rkyolo/README.md) | - | -/- |
| Segmentation | [Unet](../../../../examples/vision/segmentation/paddleseg/rknpu2/README.md) | Unet-cityscapes | -/- |
| Segmentation | [PP-HumanSegV2Lite](../../../../examples/vision/segmentation/paddleseg/rknpu2/README.md) | portrait(int8) | 133/43 |
| Segmentation | [PP-HumanSegV2Lite](../../../../examples/vision/segmentation/paddleseg/rknpu2/README.md) | human(int8) | 133/43 |
| Face Detection | [SCRFD](../../../../examples/vision/facedet/scrfd/rknpu2/README.md) | SCRFD-2.5G-kps-640(int8) | 108/42 |
| Face FaceRecognition | [InsightFace](../../../../examples/vision/faceid/insightface/rknpu2/README_CN.md) | ms1mv3_arcface_r18(int8) | 81/12 |
| Classification | [ResNet](../../../../examples/vision/classification/paddleclas/rknpu2/README.md) | ResNet50_vd | -/33 |
以下环境测试的速度均为端到端测试速度根据芯片体质的不同,速度会上下有所浮动,仅供参考。
| 任务场景 | 模型及其example | 模型版本 | 是否量化 | RKNN速度(ms) |
|----------------------|--------------------------------------------------------------------------------------------------|--------------------------|------|------------|
| Classification | [ResNet](../../../../examples/vision/classification/paddleclas/rknpu2/README.md) | ResNet50_vd | 否 | 33 |
| Detection | [Picodet](../../../../examples/vision/detection/paddledetection/rknpu2/README.md) | Picodet-s | 否 | 112 |
| Detection | [PaddleDetection Yolov8](../../../../examples/vision/detection/paddledetection/rknpu2/README.md) | yolov8-n | | 100 |
| Detection | [PPYOLOE](../../../../examples/vision/detection/paddledetection/rknpu2/README.md) | ppyoloe-s(int8) | 是 | 141 |
| Detection | [RKYOLOV5](../../../../examples/vision/detection/rkyolo/README.md) | YOLOV5-S-Relu(int8) | 是 | 57 |
| Detection | [RKYOLOX](../../../../examples/vision/detection/rkyolo/README.md) | yolox-s | 是 | 130 |
| Detection | [RKYOLOV7](../../../../examples/vision/detection/rkyolo/README.md) | yolov7-tiny | 是 | 58 |
| Segmentation | [Unet](../../../../examples/vision/segmentation/paddleseg/rknpu2/README.md) | Unet-cityscapes | 否 | - |
| Segmentation | [PP-HumanSegV2Lite](../../../../examples/vision/segmentation/paddleseg/rknpu2/README.md) | portrait(int8) | 是 | 43 |
| Segmentation | [PP-HumanSegV2Lite](../../../../examples/vision/segmentation/paddleseg/rknpu2/README.md) | human(int8) | 是 | 43 |
| Face Detection | [SCRFD](../../../../examples/vision/facedet/scrfd/rknpu2/README.md) | SCRFD-2.5G-kps-640(int8) | 是 | 42 |
| Face FaceRecognition | [InsightFace](../../../../examples/vision/faceid/insightface/rknpu2/README_CN.md) | ms1mv3_arcface_r18(int8) | 是 | 12 |
## 预编译库下载

View File

@@ -0,0 +1,25 @@
[English](faq.md)| 简体中文
# 视觉模型预测结果常见问题
## 将视觉模型预测结果转换为numpy格式
这里以[SegmentationResult](./segmentation_result_CN.md)为例展示如何抽取SegmentationResult中的label_map或者score_map来转为numpy格式同时也可以利用已有数据new SegmentationResult结构体
``` python
import fastdeploy as fd
import cv2
import numpy as np
model = fd.vision.segmentation.PaddleSegModel(
model_file, params_file, config_file)
im = cv2.imread(image)
result = model.predict(im)
# convert label_map and score_map to numpy format
numpy_label_map = np.array(result.label_map)
numpy_score_map = np.array(result.score_map)
# create SegmentationResult object
result = fd.C.vision.SegmentationResult()
result.label_map = numpy_label_map.tolist()
result.score_map = numpy_score_map.tolist()
```
>> **注意**: 以上为示例代码,具体请参考[PaddleSeg example](../../../examples/vision/segmentation/paddleseg/)

View File

@@ -0,0 +1,57 @@
English | [中文](../../cn/build_and_install/directml.md)
# How to Build DirectML Deployment Environment
Direct Machine Learning (DirectML) is a high-performance, hardware-accelerated DirectX 12 library for machine learning on Windows systems.
Currently, Fastdeploy's ONNX Runtime backend has DirectML integrated, allowing users to deploy models on AMD/Intel/Nvidia/Qualcomm GPUs with DirectX 12 support.
More details:
- [ONNX Runtime DirectML Execution Provider](https://onnxruntime.ai/docs/execution-providers/DirectML-ExecutionProvider.html)
# DirectML requirements
- Compilation requirements: Visual Studio 2017 toolchain and above.
- Operating system: Windows 10, version 1903, and newer. (DirectML is part of the operating system and does not need to be installed separately)
- Hardware requirements: DirectX 12 supported graphics cards, e.g., AMD GCN 1st generation and above/ Intel Haswell HD integrated graphics and above/ Nvidia Kepler architecture and above/ Qualcomm Adreno 600 and above.
# How to Build and Install DirectML C++ SDK
The DirectML is integrated with the ONNX Runtime backend, so to use DirectML, users need to turn on the option to compile ONNX Runtime. Also, FastDeploy's DirectML supports building programs for x64/x86 (Win32) architectures.
For the x64 example, in the Windows menu, find `x64 Native Tools Command Prompt for VS 2019` and open it by executing the following command
```bat
git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy
mkdir build && cd build
cmake .. -G "Visual Studio 16 2019" -A x64 ^
-DWITH_DIRECTML=ON ^
-DENABLE_ORT_BACKEND=ON ^
-DENABLE_VISION=ON ^
-DCMAKE_INSTALL_PREFIX="D:\Paddle\compiled_fastdeploy" ^
msbuild fastdeploy.sln /m /p:Configuration=Release /p:Platform=x64
msbuild INSTALL.vcxproj /m /p:Configuration=Release /p:Platform=x64
```
Once compiled, the C++ inference library is generated in the directory specified by `CMAKE_INSTALL_PREFIX`
If you use CMake GUI, please refer to [How to Compile with CMakeGUI + Visual Studio 2019 IDE on Windows](../faq/build_on_win_with_gui.md)
For the x86(Win32) example, in the Windows menu, find `x86 Native Tools Command Prompt for VS 2019` and open it by executing the following command
```bat
git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy
mkdir build && cd build
cmake .. -G "Visual Studio 16 2019" -A Win32 ^
-DWITH_DIRECTML=ON ^
-DENABLE_ORT_BACKEND=ON ^
-DENABLE_VISION=ON ^
-DCMAKE_INSTALL_PREFIX="D:\Paddle\compiled_fastdeploy" ^
msbuild fastdeploy.sln /m /p:Configuration=Release /p:Platform=Win32
msbuild INSTALL.vcxproj /m /p:Configuration=Release /p:Platform=Win32
```
Once compiled, the C++ inference library is generated in the directory specified by `CMAKE_INSTALL_PREFIX`
If you use CMake GUI, please refer to [How to Compile with CMakeGUI + Visual Studio 2019 IDE on Windows](../faq/build_on_win_with_gui.md)
# How to use compiled DirectML SDK.
The DirectML compiled library can be used in the same way as any other hardware on Windows, see the following link.
- [Using the FastDeploy C++ SDK on Windows Platform](../faq/use_sdk_on_windows.md)

View File

@@ -0,0 +1,77 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "fastdeploy/runtime.h"
namespace fd = fastdeploy;
int main(int argc, char* argv[]) {
// create option
fd::RuntimeOption runtime_option;
// model and param files
std::string model_file = "mobilenetv2/inference.pdmodel";
std::string params_file = "mobilenetv2/inference.pdiparams";
// read model From disk.
// runtime_option.SetModelPath(model_file, params_file,
// fd::ModelFormat::PADDLE);
// read model from buffer
std::string model_buffer, params_buffer;
fd::ReadBinaryFromFile(model_file, &model_buffer);
fd::ReadBinaryFromFile(params_file, &params_buffer);
runtime_option.SetModelBuffer(model_buffer, params_buffer,
fd::ModelFormat::PADDLE);
// setup other option
runtime_option.SetCpuThreadNum(12);
// use ONNX Runtime DirectML
runtime_option.UseOrtBackend();
runtime_option.UseDirectML();
// init runtime
std::unique_ptr<fd::Runtime> runtime =
std::unique_ptr<fd::Runtime>(new fd::Runtime());
if (!runtime->Init(runtime_option)) {
std::cerr << "--- Init FastDeploy Runitme Failed! "
<< "\n--- Model: " << model_file << std::endl;
return -1;
} else {
std::cout << "--- Init FastDeploy Runitme Done! "
<< "\n--- Model: " << model_file << std::endl;
}
// init input tensor shape
fd::TensorInfo info = runtime->GetInputInfo(0);
info.shape = {1, 3, 224, 224};
std::vector<fd::FDTensor> input_tensors(1);
std::vector<fd::FDTensor> output_tensors(1);
std::vector<float> inputs_data;
inputs_data.resize(1 * 3 * 224 * 224);
for (size_t i = 0; i < inputs_data.size(); ++i) {
inputs_data[i] = std::rand() % 1000 / 1000.0f;
}
input_tensors[0].SetExternalData({1, 3, 224, 224}, fd::FDDataType::FP32,
inputs_data.data());
// get input name
input_tensors[0].name = info.name;
runtime->Infer(input_tensors, &output_tensors);
output_tensors[0].PrintInfo();
return 0;
}

View File

@@ -84,11 +84,13 @@ int main(int argc, char* argv[]) {
runtime_option.SetModelPath(model_file, "", fd::ModelFormat::TORCHSCRIPT);
runtime_option.UsePorosBackend();
runtime_option.UseGpu(0);
runtime_option.is_dynamic = true;
// Compile runtime
std::unique_ptr<fd::Runtime> runtime =
std::unique_ptr<fd::Runtime>(new fd::Runtime());
runtime->Init(runtime_option);
if (!runtime->Compile(prewarm_datas, runtime_option)) {
std::cerr << "--- Init FastDeploy Runitme Failed! "
<< "\n--- Model: " << model_file << std::endl;
@@ -114,4 +116,4 @@ int main(int argc, char* argv[]) {
output_tensors[0].PrintInfo();
return 0;
}
}

View File

@@ -1,7 +1,7 @@
English | [简体中文](README_CN.md)
# PaddleClas C++ Deployment Example
This directory provides examples that `infer.cc` fast finishes the deployment of PaddleClas models on CPU/GPU and GPU accelerated by TensorRT.
This directory provides examples that `infer.cc` fast finishes the deployment of PaddleClas models on CPU/GPU and GPU accelerated by TensorRT.
Before deployment, two steps require confirmation.
@@ -13,13 +13,13 @@ Taking ResNet50_vd inference on Linux as an example, the compilation test can be
```bash
mkdir build
cd build
# Download FastDeploy precompiled library. Users can choose your appropriate version in the`FastDeploy Precompiled Library` mentioned above
# Download FastDeploy precompiled library. Users can choose your appropriate version in the`FastDeploy Precompiled Library` mentioned above
wget https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-x.x.x.tgz
tar xvf fastdeploy-linux-x64-x.x.x.tgz
cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-x.x.x
make -j
# Download ResNet50_vd model file and test images
# Download ResNet50_vd model file and test images
wget https://bj.bcebos.com/paddlehub/fastdeploy/ResNet50_vd_infer.tgz
tar -xvf ResNet50_vd_infer.tgz
wget https://gitee.com/paddlepaddle/PaddleClas/raw/release/2.4/deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg
@@ -35,12 +35,14 @@ wget https://gitee.com/paddlepaddle/PaddleClas/raw/release/2.4/deploy/images/Ima
./infer_demo ResNet50_vd_infer ILSVRC2012_val_00000010.jpeg 3
# KunlunXin XPU inference
./infer_demo ResNet50_vd_infer ILSVRC2012_val_00000010.jpeg 4
# Ascend inference
./infer_demo ResNet50_vd_infer ILSVRC2012_val_00000010.jpeg 5
```
The above command works for Linux or MacOS. Refer to
The above command works for Linux or MacOS. Refer to
- [How to use FastDeploy C++ SDK in Windows](../../../../../docs/cn/faq/use_sdk_on_windows.md) for SDK use-pattern in Windows
## PaddleClas C++ Interface
## PaddleClas C++ Interface
### PaddleClas Class
@@ -57,8 +59,8 @@ PaddleClas model loading and initialization, where model_file and params_file ar
**Parameter**
> * **model_file**(str): Model file path
> * **params_file**(str): Parameter file path
> * **model_file**(str): Model file path
> * **params_file**(str): Parameter file path
> * **config_file**(str): Inference deployment configuration file
> * **runtime_option**(RuntimeOption): Backend inference configuration. None by default. (use the default configuration)
> * **model_format**(ModelFormat): Model format. Paddle format by default

View File

@@ -96,7 +96,8 @@ void IpuInfer(const std::string& model_dir, const std::string& image_file) {
std::cout << res.Str() << std::endl;
}
void KunlunXinInfer(const std::string& model_dir, const std::string& image_file) {
void KunlunXinInfer(const std::string& model_dir,
const std::string& image_file) {
auto model_file = model_dir + sep + "inference.pdmodel";
auto params_file = model_dir + sep + "inference.pdiparams";
auto config_file = model_dir + sep + "inference_cls.yaml";
@@ -152,7 +153,7 @@ void AscendInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "inference.pdmodel";
auto params_file = model_dir + sep + "inference.pdiparams";
auto config_file = model_dir + sep + "inference_cls.yaml";
auto option = fastdeploy::RuntimeOption();
option.UseAscend();
@@ -172,14 +173,14 @@ void AscendInfer(const std::string& model_dir, const std::string& image_file) {
std::cout << res.Str() << std::endl;
}
int main(int argc, char* argv[]) {
if (argc < 4) {
std::cout << "Usage: infer_demo path/to/model path/to/image run_option, "
"e.g ./infer_demo ./ResNet50_vd ./test.jpeg 0"
<< std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
"with gpu; 2: run with gpu and use tensorrt backend; 3: run with ipu; 4: run with kunlunxin."
"with gpu; 2: run with gpu and use tensorrt backend; 3: run "
"with ipu; 4: run with kunlunxin."
<< std::endl;
return -1;
}

View File

@@ -0,0 +1,23 @@
PROJECT(infer_demo CSharp)
CMAKE_MINIMUM_REQUIRED (VERSION 3.10)
# Set the C# language version (defaults to 3.0 if not set).
set(CMAKE_CSharp_FLAGS "/langversion:10")
set(CMAKE_DOTNET_TARGET_FRAMEWORK "net6.0")
set(CMAKE_DOTNET_SDK "Microsoft.NET.Sdk")
# 指定下载解压后的fastdeploy库路径
option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.")
include(${FASTDEPLOY_INSTALL_DIR}/FastDeployCSharp.cmake)
add_executable(infer_ppyoloe_demo ${PROJECT_SOURCE_DIR}/infer_ppyoloe.cs)
set_property(TARGET infer_ppyoloe_demo PROPERTY VS_DOTNET_REFERENCES
${FASTDEPLOY_DOTNET_REFERENCES}
)
set_property(TARGET infer_ppyoloe_demo
PROPERTY VS_PACKAGE_REFERENCES ${FASTDEPLOY_PACKAGE_REFERENCES}
)

View File

@@ -0,0 +1,97 @@
English | [简体中文](README_CN.md)
# PaddleDetection C# Deployment Example
This directory provides examples that `infer_xxx.cs` to fastly finish the deployment of PaddleDetection models, including PPYOLOE on CPU/GPU.
Before deployment, two steps require confirmation
- 1. Software and hardware should meet the requirements. Please refer to [FastDeploy Environment Requirements](../../../../../docs/en/build_and_install/download_prebuilt_libraries.md)
- 2. Download the precompiled deployment library and samples code according to your development environment. Refer to [FastDeploy Precompiled Library](../../../../../docs/en/build_and_install/download_prebuilt_libraries.md)
Please follow below instructions to compile and test in Windows. FastDeploy version 1.0.4 or above (x.x.x>=1.0.4) is required to support this model.
## 1. Download C# package management tool nuget client
> https://dist.nuget.org/win-x86-commandline/v6.4.0/nuget.exe
Add nuget program into system variable **PATH**
## 2. Download model and image for test
> https://bj.bcebos.com/paddlehub/fastdeploy/ppyoloe_crn_l_300e_coco.tgz # (unzip it after download)
> https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
## 3. Compile example code
Open `x64 Native Tools Command Prompt for VS 2019` command tool on Windows, cd to the demo path of ppyoloe and execute commands
```shell
cd D:\Download\fastdeploy-win-x64-gpu-x.x.x\examples\vision\detection\paddledetection\csharp
mkdir build && cd build
cmake .. -G "Visual Studio 16 2019" -A x64 -DFASTDEPLOY_INSTALL_DIR=D:\Download\fastdeploy-win-x64-gpu-x.x.x -DCUDA_DIRECTORY="C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.2"
nuget restore
msbuild infer_demo.sln /m:4 /p:Configuration=Release /p:Platform=x64
```
For more information about how to use FastDeploy SDK to compile a project with Visual Studio 2019. Please refer to
- [Using the FastDeploy C++ SDK on Windows Platform](../../../../../docs/en/faq/use_sdk_on_windows.md)
## 4. Execute compiled program
fastdeploy.dll and related dynamic libraries are required by the program. FastDeploy provide a script to copy all required dll to your program path.
```shell
cd D:\Download\fastdeploy-win-x64-gpu-x.x.x
fastdeploy_init.bat install %cd% D:\Download\fastdeploy-win-x64-gpu-x.x.x\examples\vision\detection\paddledetection\csharp\build\Release
```
Then you can run your program and test the model with image
```shell
cd Release
infer_ppyoloe_demo.exe ppyoloe_crn_l_300e_coco 000000014439.jpg 0 # CPU
infer_ppyoloe_demo.exe ppyoloe_crn_l_300e_coco 000000014439.jpg 1 # GPU
```
## PaddleDetection C# Interface
### Model Class
```c#
fastdeploy.vision.detection.PPYOLOE(
string model_file,
string params_file,
string config_file
fastdeploy.RuntimeOption runtime_option = null,
fastdeploy.ModelFormat model_format = ModelFormat.PADDLE)
```
> PaddleDetection PPYOLOE initialization.
> **Params**
>> * **model_file**(str): Model file path
>> * **params_file**(str): Parameter file path
>> * **config_file**(str): Configuration file path, which is the deployment yaml file exported by PaddleDetection
>> * **runtime_option**(RuntimeOption): Backend inference configuration. null by default, which is the default configuration
>> * **model_format**(ModelFormat): Model format. Paddle format by default
#### Predict Function
```c#
fastdeploy.DetectionResult Predict(OpenCvSharp.Mat im)
```
> Model prediction interface. Input images and output results directly.
>
> **Params**
>
>> * **im**(Mat): Input images in HWC or BGR format
>
> **Return**
>
>> * **result**(DetectionResult): Detection result, including detection box and confidence of each box. Refer to [Vision Model Prediction Result](../../../../../docs/api/vision_results/) for DetectionResult
- [Model Description](../../)
- [Python Deployment](../python)
- [Vision Model prediction results](../../../../../docs/api/vision_results/)
- [How to switch the model inference backend engine](../../../../../docs/en/faq/how_to_change_backend.md)

View File

@@ -0,0 +1,99 @@
[English](README.md) | 简体中文
# PaddleDetection C#部署示例
本目录下提供`infer_xxx.cs`来调用C# API快速完成PaddleDetection模型PPYOLOE在CPU/GPU上部署的示例。
在部署前,需确认以下两个步骤
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
- 2. 根据开发环境下载预编译部署库和samples代码参考[FastDeploy预编译库](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
在Windows下执行如下操作完成编译测试支持此模型需保证FastDeploy版本1.0.4以上(x.x.x>=1.0.4)
## 1. 下载C#包管理程序nuget客户端
> https://dist.nuget.org/win-x86-commandline/v6.4.0/nuget.exe
下载完成后将该程序添加到环境变量**PATH**中
## 2. 下载模型文件和测试图片
> https://bj.bcebos.com/paddlehub/fastdeploy/ppyoloe_crn_l_300e_coco.tgz # (下载后解压缩)
> https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
## 3. 编译示例代码
本文档编译的示例代码可在解压的库中找到编译工具依赖VS 2019的安装**Windows打开x64 Native Tools Command Prompt for VS 2019命令工具**,通过如下命令开始编译
```shell
cd D:\Download\fastdeploy-win-x64-gpu-x.x.x\examples\vision\detection\paddledetection\csharp
mkdir build && cd build
cmake .. -G "Visual Studio 16 2019" -A x64 -DFASTDEPLOY_INSTALL_DIR=D:\Download\fastdeploy-win-x64-gpu-x.x.x -DCUDA_DIRECTORY="C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.2"
nuget restore
msbuild infer_demo.sln /m:4 /p:Configuration=Release /p:Platform=x64
```
关于使用Visual Studio 2019创建sln工程或者CMake工程等方式编译的更详细信息可参考如下文档
- [在 Windows 使用 FastDeploy C++ SDK](../../../../../docs/cn/faq/use_sdk_on_windows.md)
- [FastDeploy C++库在Windows上的多种使用方式](../../../../../docs/cn/faq/use_sdk_on_windows_build.md)
## 4. 运行可执行程序
注意Windows上运行时需要将FastDeploy依赖的库拷贝至可执行程序所在目录, 或者配置环境变量。FastDeploy提供了工具帮助我们快速将所有依赖库拷贝至可执行程序所在目录,通过如下命令将所有依赖的dll文件拷贝至可执行程序所在的目录
```shell
cd D:\Download\fastdeploy-win-x64-gpu-x.x.x
fastdeploy_init.bat install %cd% D:\Download\fastdeploy-win-x64-gpu-x.x.x\examples\vision\detection\paddledetection\csharp\build\Release
```
将dll拷贝到当前路径后准备好模型和图片使用如下命令运行可执行程序即可
```shell
cd Release
infer_ppyoloe_demo.exe ppyoloe_crn_l_300e_coco 000000014439.jpg 0 # CPU
infer_ppyoloe_demo.exe ppyoloe_crn_l_300e_coco 000000014439.jpg 1 # GPU
```
## PaddleDetection C#接口
### 模型
```c#
fastdeploy.vision.detection.PPYOLOE(
string model_file,
string params_file,
string config_file
fastdeploy.RuntimeOption runtime_option = null,
fastdeploy.ModelFormat model_format = ModelFormat.PADDLE)
```
> PaddleDetection PPYOLOE模型加载和初始化。
> **参数**
>> * **model_file**(str): 模型文件路径
>> * **params_file**(str): 参数文件路径
>> * **config_file**(str): 配置文件路径即PaddleDetection导出的部署yaml文件
>> * **runtime_option**(RuntimeOption): 后端推理配置默认为null即采用默认配置
>> * **model_format**(ModelFormat): 模型格式默认为PADDLE格式
#### Predict函数
```c#
fastdeploy.DetectionResult Predict(OpenCvSharp.Mat im)
```
> 模型预测接口,输入图像直接输出检测结果。
>
> **参数**
>
>> * **im**(Mat): 输入图像注意需为HWCBGR格式
>
> **返回值**
>
>> * **result**(DetectionResult): 检测结果,包括检测框,各个框的置信度, DetectionResult说明参考[视觉模型预测结果](../../../../../docs/api/vision_results/)
- [模型介绍](../../)
- [Python部署](../python)
- [视觉模型预测结果](../../../../../docs/api/vision_results/)
- [如何切换模型推理后端引擎](../../../../../docs/cn/faq/how_to_change_backend.md)

View File

@@ -0,0 +1,57 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
using System;
using System.IO;
using System.Runtime.InteropServices;
using OpenCvSharp;
using fastdeploy;
namespace Test
{
public class TestPPYOLOE
{
public static void Main(string[] args)
{
if (args.Length < 3) {
Console.WriteLine(
"Usage: infer_demo path/to/model_dir path/to/image run_option, " +
"e.g ./infer_model ./ppyolo_dirname ./test.jpeg 0"
);
Console.WriteLine( "The data type of run_option is int, 0: run with cpu; 1: run with gpu");
return;
}
string model_dir = args[0];
string image_path = args[1];
string model_file = model_dir + "\\" + "model.pdmodel";
string params_file = model_dir + "\\" + "model.pdiparams";
string config_file = model_dir + "\\" + "infer_cfg.yml";
RuntimeOption runtimeoption = new RuntimeOption();
int device_option = Int32.Parse(args[2]);
if(device_option==0){
runtimeoption.UseCpu();
}else{
runtimeoption.UseGpu();
}
vision.detection.PPYOLOE model = new vision.detection.PPYOLOE(model_file, params_file, config_file, runtimeoption, ModelFormat.PADDLE);
Mat image = Cv2.ImRead(image_path);
vision.DetectionResult res = model.Predict(image);
Mat res_img = vision.Visualize.VisDetection(image, res, 0, 1, 0.5f);
Cv2.ImShow("result.png", res_img);
Cv2.WaitKey(0);
}
}
}

View File

@@ -50,7 +50,7 @@ paddle2onnx --model_dir picodet_s_416_coco_lcnet \
# 固定shape
python -m paddle2onnx.optimize --input_model picodet_s_416_coco_lcnet/picodet_s_416_coco_lcnet.onnx \
--output_model picodet_s_416_coco_lcnet/picodet_s_416_coco_lcnet.onnx \
--input_shape_dict "{'image':[1,3,416,416]}"
--input_shape_dict "{'image':[1,3,416,416], 'scale_factor':[1,2]}"
```
### 编写yaml文件
@@ -73,11 +73,12 @@ std:
```
**修改outputs参数**
由于Paddle2ONNX版本的不同转换模型的输出节点名称也有所不同请使用[Netron](https://netron.app)对模型进行可视化并找到以下蓝色方框标记的NonMaxSuppression节点红色方框的节点名称即为目标名称。
例如使用Netron可视化后得到以下图片:
![](https://user-images.githubusercontent.com/58363586/212599781-e1952da7-6eae-4951-8ca7-bab7e6940692.png)
![](https://ai-studio-static-online.cdn.bcebos.com/8bce6b904a6b479e8b30da9f7c719fad57517ffb2f234aeca3b8ace0761754d5)
找到蓝色方框标记的NonMaxSuppression节点可以看到红色方框标记的两个节点名称为p2o.Div.79和p2o.Concat.9,因此需要修改outputs参数修改后如下:
@@ -96,6 +97,16 @@ python tools/rknpu2/export.py --config_path tools/rknpu2/config/picodet_s_416_co
--target_platform rk3588
```
## RKNN模型列表
为了方便大家测试我们提供picodet和ppyoloe两个模型解压后即可使用:
| 模型名称 | 下载地址 |
|-----------------------------|-----------------------------------------------------------------------------------|
| picodet_s_416_coco_lcnet | https://bj.bcebos.com/paddlehub/fastdeploy/rknpu2/picodet_s_416_coco_lcnet.zip |
| ppyoloe_plus_crn_s_80e_coco | https://bj.bcebos.com/paddlehub/fastdeploy/rknpu2/ppyoloe_plus_crn_s_80e_coco.zip |
## 其他链接

View File

@@ -14,3 +14,6 @@ target_link_libraries(infer_picodet_demo ${FASTDEPLOY_LIBS})
add_executable(infer_yolov8_demo ${PROJECT_SOURCE_DIR}/infer_yolov8_demo.cc)
target_link_libraries(infer_yolov8_demo ${FASTDEPLOY_LIBS})
add_executable(infer_ppyoloe_demo ${PROJECT_SOURCE_DIR}/infer_ppyoloe_demo.cc)
target_link_libraries(infer_ppyoloe_demo ${FASTDEPLOY_LIBS})

View File

@@ -12,7 +12,7 @@
以上步骤请参考[RK2代NPU部署库编译](../../../../../../docs/cn/build_and_install/rknpu2.md)实现
```bash
以picodet为例进行推理部署
# 以picodet为例进行推理部署
mkdir build
cd build
@@ -23,6 +23,8 @@ cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-x.x.x
make -j
# 下载PPYOLOE模型文件和测试图片
wget https://bj.bcebos.com/paddlehub/fastdeploy/rknpu2/picodet_s_416_coco_lcnet.zip
unzip picodet_s_416_coco_lcnet.zip
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
# CPU推理
@@ -31,13 +33,6 @@ wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/0000000
./infer_picodet_demo ./picodet_s_416_coco_lcnet 000000014439.jpg 1
```
## 运行例程
```bash
cd ./build/install
./infer_picodet model/picodet_s_416_coco_lcnet images/000000014439.jpg
```
## 文档导航
- [模型介绍](../../)

View File

@@ -0,0 +1,95 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "fastdeploy/vision.h"
void ONNXInfer(const std::string& model_dir, const std::string& image_file) {
std::string model_file = model_dir + "/yolov8_n_500e_coco.onnx";
std::string params_file;
std::string config_file = model_dir + "/infer_cfg.yml";
auto option = fastdeploy::RuntimeOption();
option.UseCpu();
auto format = fastdeploy::ModelFormat::ONNX;
auto model = fastdeploy::vision::detection::PPYOLOE(
model_file, params_file, config_file, option, format);
fastdeploy::TimeCounter tc;
tc.Start();
auto im = cv::imread(image_file);
fastdeploy::vision::DetectionResult res;
if (!model.Predict(im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
auto vis_im = fastdeploy::vision::VisDetection(im, res, 0.5);
tc.End();
tc.PrintInfo("PPDet in ONNX");
std::cout << res.Str() << std::endl;
cv::imwrite("infer_onnx.jpg", vis_im);
std::cout << "Visualized result saved in ./infer_onnx.jpg" << std::endl;
}
void RKNPU2Infer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + "/ppyoloe_plus_crn_s_80e_coco_rk3588_quantized.rknn";
auto params_file = "";
auto config_file = model_dir + "/infer_cfg.yml";
auto option = fastdeploy::RuntimeOption();
option.UseRKNPU2();
auto format = fastdeploy::ModelFormat::RKNN;
auto model = fastdeploy::vision::detection::PPYOLOE(
model_file, params_file, config_file, option, format);
model.GetPreprocessor().DisablePermute();
model.GetPreprocessor().DisableNormalize();
model.GetPostprocessor().ApplyDecodeAndNMS();
auto im = cv::imread(image_file);
fastdeploy::vision::DetectionResult res;
fastdeploy::TimeCounter tc;
tc.Start();
if (!model.Predict(&im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
tc.End();
tc.PrintInfo("PPDet in RKNPU2");
std::cout << res.Str() << std::endl;
auto vis_im = fastdeploy::vision::VisDetection(im, res, 0.5);
cv::imwrite("infer_rknpu2.jpg", vis_im);
std::cout << "Visualized result saved in ./infer_rknpu2.jpg" << std::endl;
}
int main(int argc, char* argv[]) {
if (argc < 4) {
std::cout
<< "Usage: infer_demo path/to/model_dir path/to/image run_option, "
"e.g ./infer_model ./picodet_model_dir ./test.jpeg"
<< std::endl;
return -1;
}
if (std::atoi(argv[3]) == 0) {
ONNXInfer(argv[1], argv[2]);
} else if (std::atoi(argv[3]) == 1) {
RKNPU2Infer(argv[1], argv[2]);
}
return 0;
}

View File

@@ -22,11 +22,11 @@ def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_file",
default="./picodet_s_416_coco_lcnet_non_postprocess/picodet_xs_416_coco_lcnet.onnx",
default="./picodet_s_416_coco_lcnet/picodet_s_416_coco_lcnet_rk3588_unquantized.rknn",
help="Path of rknn model.")
parser.add_argument(
"--config_file",
default="./picodet_s_416_coco_lcnet_non_postprocess/infer_cfg.yml",
default="./picodet_s_416_coco_lcnet/infer_cfg.yml",
help="Path of config.")
parser.add_argument(
"--image",

View File

@@ -6,11 +6,21 @@ RKYOLO参考[rknn_model_zoo](https://github.com/airockchip/rknn_model_zoo/tree/m
## 支持模型列表
FastDeploy目前支持以下三个模型的部署:
* RKYOLOV5
* RKYOLOX
* RKYOLOv7
## 模型转换example
为了方便大家测试,我们提供了三个转换过后的模型,大家可以直接下载使用。
如果你有转换模型的需求,请参考[RKNN_model_convert](https://github.com/airockchip/rknn_model_zoo/tree/main/models/CV/object_detection/yolo/RKNN_model_convert)
| 模型名称 | 下载地址 |
|--------------------|---------------------------------------------------------------------|
| yolov5-s-relu-int8 | https://bj.bcebos.com/paddlehub/fastdeploy/rknpu2/yolov5-s-relu.zip |
| yolov7-tiny-int8 | https://bj.bcebos.com/paddlehub/fastdeploy/rknpu2/yolov7-tiny.zip |
| yolox-s-int8 | https://bj.bcebos.com/paddlehub/fastdeploy/rknpu2/yolox-s.zip |
请参考[RKNN_model_convert](https://github.com/airockchip/rknn_model_zoo/tree/main/models/CV/object_detection/yolo/RKNN_model_convert)
## 其他链接

View File

@@ -0,0 +1,55 @@
[English](README.md) | 简体中文
# PP-TinyPose RKNPU2部署示例
## 模型版本说明
- [PaddleDetection release/2.5](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.5)
目前FastDeploy支持如下模型的部署
- [PP-TinyPose系列模型](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.5/configs/keypoint/tiny_pose/README.md)
## 准备PP-TinyPose部署模型
PP-TinyPose模型导出请参考其文档说明[模型导出](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.5/deploy/EXPORT_MODEL.md)
**注意**:PP-TinyPose导出的模型包含`model.pdmodel``model.pdiparams``infer_cfg.yml`三个文件FastDeploy会从yaml文件中获取模型在推理时需要的预处理信息。
## 模型转换example
### Paddle模型转换为ONNX模型
由于Rockchip提供的rknn-toolkit2工具暂时不支持Paddle模型直接导出为RKNN模型因此需要先将Paddle模型导出为ONNX模型再将ONNX模型转为RKNN模型。
```bash
# 下载Paddle静态图模型并解压
wget https://bj.bcebos.com/paddlehub/fastdeploy/PP_TinyPose_256x192_infer.tgz
tar -xvf PP_TinyPose_256x192_infer.tgz
# 静态图转ONNX模型注意这里的save_file请和压缩包名对齐
paddle2onnx --model_dir PP_TinyPose_256x192_infer \
--model_filename model.pdmodel \
--params_filename model.pdiparams \
--save_file PP_TinyPose_256x192_infer/PP_TinyPose_256x192_infer.onnx \
--enable_dev_version True
# 固定shape
python -m paddle2onnx.optimize --input_model PP_TinyPose_256x192_infer/PP_TinyPose_256x192_infer.onnx \
--output_model PP_TinyPose_256x192_infer/PP_TinyPose_256x192_infer.onnx \
--input_shape_dict "{'image':[1,3,256,192]}"
```
### ONNX模型转RKNN模型
为了方便大家使用我们提供了python脚本通过我们预配置的config文件你将能够快速地转换ONNX模型到RKNN模型
```bash
python tools/rknpu2/export.py --config_path tools/rknpu2/config/PP_TinyPose_256x192_unquantized.yaml \
--target_platform rk3588
```
## 详细部署文档
- [模型详细介绍](../README_CN.md)
- [Python部署](./python)
- [C++部署](./cpp)

View File

@@ -0,0 +1,13 @@
PROJECT(infer_demo C CXX)
CMAKE_MINIMUM_REQUIRED (VERSION 3.12)
# 指定下载解压后的fastdeploy库路径
option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.")
include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
# 添加FastDeploy依赖头文件
include_directories(${FASTDEPLOY_INCS})
add_executable(infer_tinypose_demo ${PROJECT_SOURCE_DIR}/pptinypose_infer.cc)
target_link_libraries(infer_tinypose_demo ${FASTDEPLOY_LIBS})

View File

@@ -0,0 +1,83 @@
[English](README.md) | 简体中文
# PP-TinyPose C++部署示例
本目录下提供`pptinypose_infer.cc`快速完成PP-TinyPose通过NPU加速部署的`单图单人关键点检测`示例
>> **注意**: PP-Tinypose单模型目前只支持单图单人关键点检测因此输入的图片应只包含一个人或者进行过裁剪的图像。多人关键点检测请参考[PP-TinyPose Pipeline](../../../det_keypoint_unite/cpp/README.md)
在部署前,需确认以下两个步骤
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
- 2. 根据开发环境下载预编译部署库和samples代码参考[FastDeploy预编译库](../../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
以Linux上推理为例在本目录执行如下命令即可完成编译测试支持此模型需保证FastDeploy版本1.0.3以上(x.x.x>=1.0.3)
```bash
mkdir build
cd build
cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-x.x.x
make -j
# 下载PP-TinyPose模型文件和测试图片
wget https://bj.bcebos.com/paddlehub/fastdeploy/PP_TinyPose_256x192_infer.tgz
tar -xvf PP_TinyPose_256x192_infer.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/hrnet_demo.jpg
# NPU推理
sudo ./infer_tinypose_demo ./PP_TinyPose_256x192_infer ./hrnet_demo.jpg
```
运行完成可视化结果如下图所示
<div align="center">
<img src="https://user-images.githubusercontent.com/16222477/196386764-dd51ad56-c410-4c54-9580-643f282f5a83.jpeg", width=359px, height=423px />
</div>
以上命令只适用于Linux或MacOS, Windows下SDK的使用方式请参考:
- [如何在Windows中使用FastDeploy C++ SDK](../../../../../docs/cn/faq/use_sdk_on_windows.md)
## PP-TinyPose C++接口
### PP-TinyPose类
```c++
fastdeploy::vision::keypointdetection::PPTinyPose(
const string& model_file,
const string& params_file = "",
const string& config_file,
const RuntimeOption& runtime_option = RuntimeOption(),
const ModelFormat& model_format = ModelFormat::PADDLE)
```
PPTinyPose模型加载和初始化其中model_file为导出的Paddle模型格式。
**参数**
> * **model_file**(str): 模型文件路径
> * **params_file**(str): 参数文件路径
> * **config_file**(str): 推理部署配置文件
> * **runtime_option**(RuntimeOption): 后端推理配置默认为None即采用默认配置
> * **model_format**(ModelFormat): 模型格式默认为Paddle格式
#### Predict函数
> ```c++
> PPTinyPose::Predict(cv::Mat* im, KeyPointDetectionResult* result)
> ```
>
> 模型预测接口,输入图像直接输出关键点检测结果。
>
> **参数**
>
> > * **im**: 输入图像注意需为HWCBGR格式
> > * **result**: 关键点检测结果,包括关键点的坐标以及关键点对应的概率值, KeyPointDetectionResult说明参考[视觉模型预测结果](../../../../../docs/api/vision_results/)
### 类成员属性
#### 后处理参数
> > * **use_dark**(bool): 是否使用DARK进行后处理[参考论文](https://arxiv.org/abs/1910.06278)
- [模型介绍](../../../)
- [Python部署](../../python)
- [视觉模型预测结果](../../../../../../docs/api/vision_results/)
- [如何切换模型推理后端引擎](../../../../../../docs/cn/faq/how_to_change_backend.md)

View File

@@ -0,0 +1,66 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "fastdeploy/vision.h"
void RKNPU2Infer(const std::string& tinypose_model_dir,
const std::string& image_file) {
auto tinypose_model_file =
tinypose_model_dir + "/PP_TinyPose_256x192_infer_rk3588_unquantized.rknn";
std::cout << tinypose_model_file << std::endl;
auto tinypose_params_file = "";
auto tinypose_config_file = tinypose_model_dir + "/infer_cfg.yml";
auto option = fastdeploy::RuntimeOption();
option.UseRKNPU2();
auto tinypose_model = fastdeploy::vision::keypointdetection::PPTinyPose(
tinypose_model_file, tinypose_params_file, tinypose_config_file, option,
fastdeploy::RKNN);
if (!tinypose_model.Initialized()) {
std::cerr << "TinyPose Model Failed to initialize." << std::endl;
return;
}
tinypose_model.DisablePermute();
tinypose_model.DisableNormalize();
auto im = cv::imread(image_file);
fastdeploy::vision::KeyPointDetectionResult res;
if (!tinypose_model.Predict(&im, &res)) {
std::cerr << "TinyPose Prediction Failed." << std::endl;
return;
} else {
std::cout << "TinyPose Prediction Done!" << std::endl;
}
std::cout << res.Str() << std::endl;
auto tinypose_vis_im = fastdeploy::vision::VisKeypointDetection(im, res, 0.5);
cv::imwrite("tinypose_vis_result.jpg", tinypose_vis_im);
std::cout << "TinyPose visualized result saved in ./tinypose_vis_result.jpg"
<< std::endl;
}
int main(int argc, char* argv[]) {
if (argc < 3) {
std::cout << "Usage: infer_demo path/to/pptinypose_model_dir path/to/image "
"run_option, "
"e.g ./infer_model ./pptinypose_model_dir ./test.jpeg"
<< std::endl;
return -1;
}
RKNPU2Infer(argv[1], argv[2]);
return 0;
}

View File

@@ -0,0 +1,70 @@
[English](README.md) | 简体中文
# PP-TinyPose Python部署示例
在部署前,需确认以下两个步骤
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
- 2. 根据开发环境下载预编译部署库和samples代码参考[FastDeploy预编译库](../../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
本目录下提供`pptinypose_infer.py`快速完成PP-TinyPose在NPU加速部署的`单图单人关键点检测`示例。执行如下脚本即可完成
>> **注意**: PP-Tinypose单模型目前只支持单图单人关键点检测因此输入的图片应只包含一个人或者进行过裁剪的图像。多人关键点检测请参考[PP-TinyPose Pipeline](../../../det_keypoint_unite/python/README.md)
```bash
# 下载PP-TinyPose模型文件和测试图片
wget https://bj.bcebos.com/paddlehub/fastdeploy/hrnet_demo.jpg
# CPU推理
python pptinypose_infer.py --tinypose_model_dir PP_TinyPose_256x192_infer --image hrnet_demo.jpg
```
运行完成可视化结果如下图所示
<div align="center">
<img src="https://user-images.githubusercontent.com/16222477/196386764-dd51ad56-c410-4c54-9580-643f282f5a83.jpeg", width=359px, height=423px />
</div>
## PP-TinyPose Python接口
```python
fd.vision.keypointdetection.PPTinyPose(model_file, params_file, config_file, runtime_option=None, model_format=ModelFormat.PADDLE)
```
PP-TinyPose模型加载和初始化其中model_file, params_file以及config_file为训练模型导出的Paddle inference文件具体请参考其文档说明[模型导出](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.5/deploy/EXPORT_MODEL.md)
**参数**
> * **model_file**(str): 模型文件路径
> * **params_file**(str): 参数文件路径
> * **config_file**(str): 推理部署配置文件
> * **runtime_option**(RuntimeOption): 后端推理配置默认为None即采用默认配置
> * **model_format**(ModelFormat): 模型格式默认为Paddle格式
### predict函数
> ```python
> PPTinyPose.predict(input_image)
> ```
>
> 模型预测结口,输入图像直接输出检测结果。
>
> **参数**
>
> > * **input_image**(np.ndarray): 输入数据注意需为HWCBGR格式
> **返回**
>
> > 返回`fastdeploy.vision.KeyPointDetectionResult`结构体,结构体说明参考文档[视觉模型预测结果](../../../../../docs/api/vision_results/)
### 类成员属性
#### 后处理参数
用户可按照自己的实际需求,修改下列后处理参数,从而影响最终的推理和部署效果
> > * **use_dark**(bool): 是否使用DARK进行后处理[参考论文](https://arxiv.org/abs/1910.06278)
## 其它文档
- [PP-TinyPose 模型介绍](..)
- [PP-TinyPose C++部署](../cpp)
- [模型预测结果说明](../../../../../docs/api/vision_results/)
- [如何切换模型推理后端引擎](../../../../../docs/cn/faq/how_to_change_backend.md)

View File

@@ -0,0 +1,50 @@
import fastdeploy as fd
import cv2
import os
def parse_arguments():
import argparse
import ast
parser = argparse.ArgumentParser()
parser.add_argument(
"--tinypose_model_dir",
required=True,
help="path of paddletinypose model directory")
parser.add_argument(
"--image", required=True, help="path of test image file.")
return parser.parse_args()
def build_tinypose_option(args):
option = fd.RuntimeOption()
option.use_rknpu()
return option
args = parse_arguments()
tinypose_model_file = os.path.join(args.tinypose_model_dir, "PP_TinyPose_256x192_infer_rk3588_unquantized.rknn")
tinypose_params_file = os.path.join(args.tinypose_model_dir, "")
tinypose_config_file = os.path.join(args.tinypose_model_dir, "infer_cfg.yml")
# 配置runtime加载模型
runtime_option = build_tinypose_option(args)
tinypose_model = fd.vision.keypointdetection.PPTinyPose(
tinypose_model_file,
tinypose_params_file,
tinypose_config_file,
runtime_option=runtime_option,
model_format=fd.ModelFormat.RKNN)
tinypose_model.disable_normalize()
tinypose_model.disable_permute()
# 预测图片检测结果
im = cv2.imread(args.image)
tinypose_result = tinypose_model.predict(im)
print("Paddle TinyPose Result:\n", tinypose_result)
# 预测结果可视化
vis_im = fd.vision.vis_keypoint_detection(
im, tinypose_result, conf_threshold=0.5)
cv2.imwrite("visualized_result.jpg", vis_im)
print("TinyPose visualized result save in ./visualized_result.jpg")

View File

@@ -35,7 +35,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file,
auto vis_im_with_bg =
fastdeploy::vision::SwapBackground(im, bg, res);
cv::imwrite("visualized_result.jpg", vis_im_with_bg);
cv::imwrite("visualized_result_fg.jpg", vis_im);
cv::imwrite("visualized_result_fg.png", vis_im);
std::cout << "Visualized result save in ./visualized_result_replaced_bg.jpg "
"and ./visualized_result_fg.jpg"
<< std::endl;
@@ -65,7 +65,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file,
auto vis_im_with_bg =
fastdeploy::vision::SwapBackground(im, bg, res);
cv::imwrite("visualized_result.jpg", vis_im_with_bg);
cv::imwrite("visualized_result_fg.jpg", vis_im);
cv::imwrite("visualized_result_fg.png", vis_im);
std::cout << "Visualized result save in ./visualized_result_replaced_bg.jpg "
"and ./visualized_result_fg.jpg"
<< std::endl;
@@ -96,7 +96,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file,
auto vis_im_with_bg =
fastdeploy::vision::SwapBackground(im, bg, res);
cv::imwrite("visualized_result.jpg", vis_im_with_bg);
cv::imwrite("visualized_result_fg.jpg", vis_im);
cv::imwrite("visualized_result_fg.png", vis_im);
std::cout << "Visualized result save in ./visualized_result_replaced_bg.jpg "
"and ./visualized_result_fg.jpg"
<< std::endl;

View File

@@ -57,7 +57,7 @@ print(result)
# 可视化结果
vis_im = fd.vision.vis_matting_alpha(im, result)
vis_im_with_bg = fd.vision.swap_background(im, bg, result)
cv2.imwrite("visualized_result_fg.jpg", vis_im)
cv2.imwrite("visualized_result_fg.png", vis_im)
cv2.imwrite("visualized_result_replaced_bg.jpg", vis_im_with_bg)
print(
"Visualized result save in ./visualized_result_replaced_bg.jpg and ./visualized_result_fg.jpg"

View File

@@ -1,42 +1,3 @@
English | [简体中文](README_CN.md)
# PP-Matting Model Deployment
PaddleSeg Matting deployment examples, please refer to [document](../../segmentation/ppmatting/README_CN.md).
## Model Description
- [PP-Matting Release/2.6](https://github.com/PaddlePaddle/PaddleSeg/tree/release/2.6/Matting)
## List of Supported Models
Now FastDeploy supports the deployment of the following models
- [PP-Matting models](https://github.com/PaddlePaddle/PaddleSeg/tree/release/2.6/Matting)
- [PP-HumanMatting models](https://github.com/PaddlePaddle/PaddleSeg/tree/release/2.6/Matting)
- [ModNet models](https://github.com/PaddlePaddle/PaddleSeg/tree/release/2.6/Matting)
## Export Deployment Model
Before deployment, PP-Matting needs to be exported into the deployment model. Refer to [Export Model](https://github.com/PaddlePaddle/PaddleSeg/tree/release/2.6/Matting) for more information. (Tips: You need to set the `--input_shape` parameter of the export script when exporting PP-Matting and PP-HumanMatting models)
## Download Pre-trained Models
For developers' testing, models exported by PP-Matting are provided below. Developers can download and use them directly.
The accuracy metric is sourced from the model description in PP-Matting. (Accuracy data are not provided) Refer to the introduction in PP-Matting for more details.
| Model | Parameter Size | Accuracy | Note |
|:---------------------------------------------------------------- |:----- |:----- | :------ |
| [PP-Matting-512](https://bj.bcebos.com/paddlehub/fastdeploy/PP-Matting-512.tgz) | 106MB | - |
| [PP-Matting-1024](https://bj.bcebos.com/paddlehub/fastdeploy/PP-Matting-1024.tgz) | 106MB | - |
| [PP-HumanMatting](https://bj.bcebos.com/paddlehub/fastdeploy/PPHumanMatting.tgz) | 247MB | - |
| [Modnet-ResNet50_vd](https://bj.bcebos.com/paddlehub/fastdeploy/PPModnet_ResNet50_vd.tgz) | 355MB | - |
| [Modnet-MobileNetV2](https://bj.bcebos.com/paddlehub/fastdeploy/PPModnet_MobileNetV2.tgz) | 28MB | - |
| [Modnet-HRNet_w18](https://bj.bcebos.com/paddlehub/fastdeploy/PPModnet_HRNet_w18.tgz) | 51MB | - |
## Detailed Deployment Tutorials
- [Python Deployment](python)
- [C++ Deployment](cpp)
PaddleSeg Matting的部署示例请参考[文档](../../segmentation/ppmatting/README_CN.md).

View File

@@ -1,93 +0,0 @@
English | [简体中文](README_CN.md)
# PP-Matting C++ Deployment Example
This directory provides examples that `infer.cc` fast finishes the deployment of PP-Matting on CPU/GPU and GPU accelerated by TensorRT.
Before deployment, two steps require confirmation
- 1. Software and hardware should meet the requirements. Please refer to [FastDeploy Environment Requirements](../../../../../docs/en/build_and_install/download_prebuilt_libraries.md)
- 2. Download the precompiled deployment library and samples code according to your development environment. Refer to [FastDeploy Precompiled Library](../../../../../docs/en/build_and_install/download_prebuilt_libraries.md)
Taking the PP-Matting inference on Linux as an example, the compilation test can be completed by executing the following command in this directory. FastDeploy version 0.7.0 or above (x.x.x>=0.7.0) is required to support this model.
```bash
mkdir build
cd build
# Download the FastDeploy precompiled library. Users can choose your appropriate version in the `FastDeploy Precompiled Library` mentioned above
wget https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-x.x.x.tgz
tar xvf fastdeploy-linux-x64-x.x.x.tgz
cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-x.x.x
make -j
# Download PP-Matting model files and test images
wget https://bj.bcebos.com/paddlehub/fastdeploy/PP-Matting-512.tgz
tar -xvf PP-Matting-512.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/matting_input.jpg
wget https://bj.bcebos.com/paddlehub/fastdeploy/matting_bgr.jpg
# CPU inference
./infer_demo PP-Matting-512 matting_input.jpg matting_bgr.jpg 0
# GPU inference
./infer_demo PP-Matting-512 matting_input.jpg matting_bgr.jpg 1
# TensorRT inference on GPU
./infer_demo PP-Matting-512 matting_input.jpg matting_bgr.jpg 2
# kunlunxin XPU inference
./infer_demo PP-Matting-512 matting_input.jpg matting_bgr.jpg 3
```
The visualized result after running is as follows
<div width="840">
<img width="200" height="200" float="left" src="https://user-images.githubusercontent.com/67993288/186852040-759da522-fca4-4786-9205-88c622cd4a39.jpg">
<img width="200" height="200" float="left" src="https://user-images.githubusercontent.com/67993288/186852587-48895efc-d24a-43c9-aeec-d7b0362ab2b9.jpg">
<img width="200" height="200" float="left" src="https://user-images.githubusercontent.com/67993288/186852116-cf91445b-3a67-45d9-a675-c69fe77c383a.jpg">
<img width="200" height="200" float="left" src="https://user-images.githubusercontent.com/67993288/186852554-6960659f-4fd7-4506-b33b-54e1a9dd89bf.jpg">
</div>
The above command works for Linux or MacOS. For SDK use-pattern in Windows, refer to:
- [How to use FastDeploy C++ SDK in Windows](../../../../../docs/en/faq/use_sdk_on_windows.md)
## PP-Matting C++ Interface
### PPMatting Class
```c++
fastdeploy::vision::matting::PPMatting(
const string& model_file,
const string& params_file = "",
const string& config_file,
const RuntimeOption& runtime_option = RuntimeOption(),
const ModelFormat& model_format = ModelFormat::PADDLE)
```
PP-Matting model loading and initialization, among which model_file is the exported Paddle model format.
**Parameter**
> * **model_file**(str): Model file path
> * **params_file**(str): Parameter file path
> * **config_file**(str): Inference deployment configuration file
> * **runtime_option**(RuntimeOption): Backend inference configuration. None by default, which is the default configuration
> * **model_format**(ModelFormat): Model format. Paddle format by default
#### Predict Function
> ```c++
> PPMatting::Predict(cv::Mat* im, MattingResult* result)
> ```
>
> Model prediction interface. Input images and output detection results.
>
> **Parameter**
>
> > * **im**: Input images in HWC or BGR format
> > * **result**: The segmentation result, including the predicted label of the segmentation and the corresponding probability of the label. Refer to [Vision Model Prediction Results](../../../../../docs/api/vision_results/) for the description of SegmentationResult
### Class Member Variable
#### Pre-processing Parameter
Users can modify the following pre-processing parameters to their needs, which affects the final inference and deployment results
- [Model Description](../../)
- [Python Deployment](../python)
- [Vision Model Prediction Results](../../../../../docs/api/vision_results/)
- [How to switch the model inference backend engine](../../../../../docs/en/faq/how_to_change_backend.md)

View File

@@ -1,94 +0,0 @@
[English](README.md) | 简体中文
# PP-Matting C++部署示例
本目录下提供`infer.cc`快速完成PP-Matting在CPU/GPU以及GPU上通过TensorRT加速部署的示例。
在部署前,需确认以下两个步骤
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
- 2. 根据开发环境下载预编译部署库和samples代码参考[FastDeploy预编译库](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
以Linux上 PP-Matting 推理为例在本目录执行如下命令即可完成编译测试支持此模型需保证FastDeploy版本0.7.0以上(x.x.x>=0.7.0)
```bash
mkdir build
cd build
# 下载FastDeploy预编译库用户可在上文提到的`FastDeploy预编译库`中自行选择合适的版本使用
wget https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-x.x.x.tgz
tar xvf fastdeploy-linux-x64-x.x.x.tgz
cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-x.x.x
make -j
# 下载PP-Matting模型文件和测试图片
wget https://bj.bcebos.com/paddlehub/fastdeploy/PP-Matting-512.tgz
tar -xvf PP-Matting-512.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/matting_input.jpg
wget https://bj.bcebos.com/paddlehub/fastdeploy/matting_bgr.jpg
# CPU推理
./infer_demo PP-Matting-512 matting_input.jpg matting_bgr.jpg 0
# GPU推理
./infer_demo PP-Matting-512 matting_input.jpg matting_bgr.jpg 1
# GPU上TensorRT推理
./infer_demo PP-Matting-512 matting_input.jpg matting_bgr.jpg 2
# 昆仑芯XPU推理
./infer_demo PP-Matting-512 matting_input.jpg matting_bgr.jpg 3
```
运行完成可视化结果如下图所示
<div width="840">
<img width="200" height="200" float="left" src="https://user-images.githubusercontent.com/67993288/186852040-759da522-fca4-4786-9205-88c622cd4a39.jpg">
<img width="200" height="200" float="left" src="https://user-images.githubusercontent.com/67993288/186852587-48895efc-d24a-43c9-aeec-d7b0362ab2b9.jpg">
<img width="200" height="200" float="left" src="https://user-images.githubusercontent.com/67993288/186852116-cf91445b-3a67-45d9-a675-c69fe77c383a.jpg">
<img width="200" height="200" float="left" src="https://user-images.githubusercontent.com/67993288/186852554-6960659f-4fd7-4506-b33b-54e1a9dd89bf.jpg">
</div>
以上命令只适用于Linux或MacOS, Windows下SDK的使用方式请参考:
- [如何在Windows中使用FastDeploy C++ SDK](../../../../../docs/cn/faq/use_sdk_on_windows.md)
## PP-Matting C++接口
### PPMatting类
```c++
fastdeploy::vision::matting::PPMatting(
const string& model_file,
const string& params_file = "",
const string& config_file,
const RuntimeOption& runtime_option = RuntimeOption(),
const ModelFormat& model_format = ModelFormat::PADDLE)
```
PP-Matting模型加载和初始化其中model_file为导出的Paddle模型格式。
**参数**
> * **model_file**(str): 模型文件路径
> * **params_file**(str): 参数文件路径
> * **config_file**(str): 推理部署配置文件
> * **runtime_option**(RuntimeOption): 后端推理配置默认为None即采用默认配置
> * **model_format**(ModelFormat): 模型格式默认为Paddle格式
#### Predict函数
> ```c++
> PPMatting::Predict(cv::Mat* im, MattingResult* result)
> ```
>
> 模型预测接口,输入图像直接输出检测结果。
>
> **参数**
>
> > * **im**: 输入图像注意需为HWCBGR格式
> > * **result**: 分割结果,包括分割预测的标签以及标签对应的概率值, MattingResult说明参考[视觉模型预测结果](../../../../../docs/api/vision_results/)
### 类成员属性
#### 预处理参数
用户可按照自己的实际需求,修改下列预处理参数,从而影响最终的推理和部署效果
- [模型介绍](../../)
- [Python部署](../python)
- [视觉模型预测结果](../../../../../docs/api/vision_results/)
- [如何切换模型推理后端引擎](../../../../../docs/cn/faq/how_to_change_backend.md)

View File

@@ -1,81 +0,0 @@
English | [简体中文](README_CN.md)
# PP-Matting Python Deployment Example
Before deployment, two steps require confirmation
- 1. Software and hardware should meet the requirements. Please refer to [FastDeploy Environment Requirements](../../../../../docs/en/build_and_install/download_prebuilt_libraries.md)
- 2. Install FastDeploy Python whl package. Refer to [FastDeploy Python Installation](../../../../../docs/en/build_and_install/download_prebuilt_libraries.md)
This directory provides examples that `infer.py` fast finishes the deployment of PP-Matting on CPU/GPU and GPU accelerated by TensorRT. The script is as follows
```bash
# Download the deployment example code
git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy/examples/vision/matting/ppmatting/python
# Download PP-Matting model files and test images
wget https://bj.bcebos.com/paddlehub/fastdeploy/PP-Matting-512.tgz
tar -xvf PP-Matting-512.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/matting_input.jpg
wget https://bj.bcebos.com/paddlehub/fastdeploy/matting_bgr.jpg
# CPU inference
python infer.py --model PP-Matting-512 --image matting_input.jpg --bg matting_bgr.jpg --device cpu
# GPU inference
python infer.py --model PP-Matting-512 --image matting_input.jpg --bg matting_bgr.jpg --device gpu
# TensorRT inference on GPUAttention: It is somewhat time-consuming for the operation of model serialization when running TensorRT inference for the first time. Please be patient.
python infer.py --model PP-Matting-512 --image matting_input.jpg --bg matting_bgr.jpg --device gpu --use_trt True
# kunlunxin XPU inference
python infer.py --model PP-Matting-512 --image matting_input.jpg --bg matting_bgr.jpg --device kunlunxin
```
The visualized result after running is as follows
<div width="840">
<img width="200" height="200" float="left" src="https://user-images.githubusercontent.com/67993288/186852040-759da522-fca4-4786-9205-88c622cd4a39.jpg">
<img width="200" height="200" float="left" src="https://user-images.githubusercontent.com/67993288/186852587-48895efc-d24a-43c9-aeec-d7b0362ab2b9.jpg">
<img width="200" height="200" float="left" src="https://user-images.githubusercontent.com/67993288/186852116-cf91445b-3a67-45d9-a675-c69fe77c383a.jpg">
<img width="200" height="200" float="left" src="https://user-images.githubusercontent.com/67993288/186852554-6960659f-4fd7-4506-b33b-54e1a9dd89bf.jpg">
</div>
## PP-Matting Python Interface
```python
fd.vision.matting.PPMatting(model_file, params_file, config_file, runtime_option=None, model_format=ModelFormat.PADDLE)
```
PP-Matting model loading and initialization, among which model_file, params_file, and config_file are the Paddle inference files exported from the training model. Refer to [Model Export](https://github.com/PaddlePaddle/PaddleSeg/tree/release/2.6/Matting) for more information
**Parameter**
> * **model_file**(str): Model file path
> * **params_file**(str): Parameter file path
> * **config_file**(str): Inference deployment configuration file
> * **runtime_option**(RuntimeOption): Backend inference configuration. None by default, which is the default configuration
> * **model_format**(ModelFormat): Model format. Paddle format by default
### predict function
> ```python
> PPMatting.predict(input_image)
> ```
>
> Model prediction interface. Input images and output detection results.
>
> **Parameter**
>
> > * **input_image**(np.ndarray): Input data in HWC or BGR format
> **Return**
>
> > Return `fastdeploy.vision.MattingResult` structure. Refer to [Vision Model Prediction Results](../../../../../docs/api/vision_results/) for the description of the structure.
### Class Member Variable
#### Pre-processing Parameter
Users can modify the following pre-processing parameters to their needs, which affects the final inference and deployment results
## Other Documents
- [PP-Matting Model Description](..)
- [PP-Matting C++ Deployment](../cpp)
- [Model Prediction Results](../../../../../docs/api/vision_results/)
- [How to switch the model inference backend engine](../../../../../docs/en/faq/how_to_change_backend.md)

View File

@@ -1,81 +0,0 @@
[English](README.md) | 简体中文
# PP-Matting Python部署示例
在部署前,需确认以下两个步骤
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
- 2. FastDeploy Python whl包安装参考[FastDeploy Python安装](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
本目录下提供`infer.py`快速完成PP-Matting在CPU/GPU以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成
```bash
#下载部署示例代码
git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy/examples/vision/matting/ppmatting/python
# 下载PP-Matting模型文件和测试图片
wget https://bj.bcebos.com/paddlehub/fastdeploy/PP-Matting-512.tgz
tar -xvf PP-Matting-512.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/matting_input.jpg
wget https://bj.bcebos.com/paddlehub/fastdeploy/matting_bgr.jpg
# CPU推理
python infer.py --model PP-Matting-512 --image matting_input.jpg --bg matting_bgr.jpg --device cpu
# GPU推理
python infer.py --model PP-Matting-512 --image matting_input.jpg --bg matting_bgr.jpg --device gpu
# GPU上使用TensorRT推理 注意TensorRT推理第一次运行有序列化模型的操作有一定耗时需要耐心等待
python infer.py --model PP-Matting-512 --image matting_input.jpg --bg matting_bgr.jpg --device gpu --use_trt True
# 昆仑芯XPU推理
python infer.py --model PP-Matting-512 --image matting_input.jpg --bg matting_bgr.jpg --device kunlunxin
```
运行完成可视化结果如下图所示
<div width="840">
<img width="200" height="200" float="left" src="https://user-images.githubusercontent.com/67993288/186852040-759da522-fca4-4786-9205-88c622cd4a39.jpg">
<img width="200" height="200" float="left" src="https://user-images.githubusercontent.com/67993288/186852587-48895efc-d24a-43c9-aeec-d7b0362ab2b9.jpg">
<img width="200" height="200" float="left" src="https://user-images.githubusercontent.com/67993288/186852116-cf91445b-3a67-45d9-a675-c69fe77c383a.jpg">
<img width="200" height="200" float="left" src="https://user-images.githubusercontent.com/67993288/186852554-6960659f-4fd7-4506-b33b-54e1a9dd89bf.jpg">
</div>
## PP-Matting Python接口
```python
fd.vision.matting.PPMatting(model_file, params_file, config_file, runtime_option=None, model_format=ModelFormat.PADDLE)
```
PP-Matting模型加载和初始化其中model_file, params_file以及config_file为训练模型导出的Paddle inference文件具体请参考其文档说明[模型导出](https://github.com/PaddlePaddle/PaddleSeg/tree/release/2.6/Matting)
**参数**
> * **model_file**(str): 模型文件路径
> * **params_file**(str): 参数文件路径
> * **config_file**(str): 推理部署配置文件
> * **runtime_option**(RuntimeOption): 后端推理配置默认为None即采用默认配置
> * **model_format**(ModelFormat): 模型格式默认为Paddle格式
### predict函数
> ```python
> PPMatting.predict(input_image)
> ```
>
> 模型预测结口,输入图像直接输出检测结果。
>
> **参数**
>
> > * **input_image**(np.ndarray): 输入数据注意需为HWCBGR格式
> **返回**
>
> > 返回`fastdeploy.vision.MattingResult`结构体,结构体说明参考文档[视觉模型预测结果](../../../../../docs/api/vision_results/)
### 类成员属性
#### 预处理参数
用户可按照自己的实际需求,修改下列预处理参数,从而影响最终的推理和部署效果
## 其它文档
- [PP-Matting 模型介绍](..)
- [PP-Matting C++部署](../cpp)
- [模型预测结果说明](../../../../../docs/api/vision_results/)
- [如何切换模型推理后端引擎](../../../../../docs/cn/faq/how_to_change_backend.md)

View File

@@ -41,7 +41,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file,
auto vis_im_with_bg =
fastdeploy::vision::SwapBackground(im_bak, bg, res);
cv::imwrite("visualized_result.jpg", vis_im_with_bg);
cv::imwrite("visualized_result_fg.jpg", vis_im);
cv::imwrite("visualized_result_fg.png", vis_im);
std::cout << "Visualized result save in ./visualized_result.jpg "
"and ./visualized_result_fg.jpg"
<< std::endl;
@@ -69,7 +69,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file,
auto vis_im_with_bg =
fastdeploy::vision::SwapBackground(im_bak, bg, res);
cv::imwrite("visualized_result.jpg", vis_im_with_bg);
cv::imwrite("visualized_result_fg.jpg", vis_im);
cv::imwrite("visualized_result_fg.png", vis_im);
std::cout << "Visualized result save in ./visualized_result_replaced_bg.jpg "
"and ./visualized_result_fg.jpg"
<< std::endl;
@@ -103,7 +103,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file,
auto vis_im_with_bg =
fastdeploy::vision::SwapBackground(im_bak, bg, res);
cv::imwrite("visualized_result.jpg", vis_im_with_bg);
cv::imwrite("visualized_result_fg.jpg", vis_im);
cv::imwrite("visualized_result_fg.png", vis_im);
std::cout << "Visualized result save in ./visualized_result.jpg "
"and ./visualized_result_fg.jpg"
<< std::endl;

View File

@@ -105,7 +105,7 @@ if args.image is not None:
# 可视化结果
vis_im = fd.vision.vis_matting(im, result)
vis_im_with_bg = fd.vision.swap_background(im, bg, result)
cv2.imwrite("visualized_result_fg.jpg", vis_im)
cv2.imwrite("visualized_result_fg.png", vis_im)
cv2.imwrite("visualized_result_replaced_bg.jpg", vis_im_with_bg)
print(
"Visualized result save in ./visualized_result_replaced_bg.jpg and ./visualized_result_fg.jpg"

View File

@@ -1,49 +1,32 @@
English | [简体中文](README_CN.md)
# PaddleSeg Model Deployment
# PaddleSeg高性能全场景模型部署方案—FastDeploy
## Model Description
## FastDeploy介绍
- [PaddleSeg develop](https://github.com/PaddlePaddle/PaddleSeg/tree/develop)
[FastDeploy](https://github.com/PaddlePaddle/FastDeploy)是一款全场景、易用灵活、极致高效的AI推理部署工具使用FastDeploy可以简单高效的在10+款硬件上对PaddleSeg模型进行快速部署
FastDeploy currently supports the deployment of the following models
## 支持如下的硬件部署
- [U-Net models](https://github.com/PaddlePaddle/PaddleSeg/blob/release/2.6/configs/unet/README.md)
- [PP-LiteSeg models](https://github.com/PaddlePaddle/PaddleSeg/blob/release/2.6/configs/pp_liteseg/README.md)
- [PP-HumanSeg models](https://github.com/PaddlePaddle/PaddleSeg/blob/release/2.6/contrib/PP-HumanSeg/README.md)
- [FCN models](https://github.com/PaddlePaddle/PaddleSeg/blob/release/2.6/configs/fcn/README.md)
- [DeepLabV3 models](https://github.com/PaddlePaddle/PaddleSeg/blob/release/2.6/configs/deeplabv3/README.md)
【Attention】For **PP-Matting**、**PP-HumanMatting** and **ModNet** deployment, please refer to [Matting Model Deployment](../../matting)
## Prepare PaddleSeg Deployment Model
For the export of the PaddleSeg model, refer to [Model Export](https://github.com/PaddlePaddle/PaddleSeg/blob/develop/docs/model_export_cn.md) for more information
**Attention**
- The exported PaddleSeg model contains three files, including `model.pdmodel``model.pdiparams` and `deploy.yaml`. FastDeploy will get the pre-processing information for inference from yaml files.
## Download Pre-trained Model
For developers' testing, part of the PaddleSeg exported models are provided below.
- without-argmax export mode: **Not specified**`--input_shape`**specified**`--output_op none`
- with-argmax export mode**Not specified**`--input_shape`**specified**`--output_op argmax`
Developers can download directly.
| 硬件支持列表 | | | |
|:----- | :-- | :-- | :-- |
| [NVIDIA GPU](cpu-gpu) | [X86 CPU](cpu-gpu)| [飞腾CPU](cpu-gpu) | [ARM CPU](cpu-gpu) |
| [Intel GPU(独立显卡/集成显卡)](cpu-gpu) | [昆仑](kunlun) | [昇腾](ascend) | [瑞芯微](rockchip) |
| [晶晨](amlogic) | [算能](sophgo) |
| Model | Parameter Size | Input Shape | mIoU | mIoU (flip) | mIoU (ms+flip) |
|:---------------------------------------------------------------- |:----- |:----- | :----- | :----- | :----- |
| [Unet-cityscapes-with-argmax](https://bj.bcebos.com/paddlehub/fastdeploy/Unet_cityscapes_with_argmax_infer.tgz) \| [Unet-cityscapes-without-argmax](https://bj.bcebos.com/paddlehub/fastdeploy/Unet_cityscapes_without_argmax_infer.tgz) | 52MB | 1024x512 | 65.00% | 66.02% | 66.89% |
| [PP-LiteSeg-B(STDC2)-cityscapes-with-argmax](https://bj.bcebos.com/paddlehub/fastdeploy/PP_LiteSeg_B_STDC2_cityscapes_with_argmax_infer.tgz) \| [PP-LiteSeg-B(STDC2)-cityscapes-without-argmax](https://bj.bcebos.com/paddlehub/fastdeploy/PP_LiteSeg_B_STDC2_cityscapes_without_argmax_infer.tgz) | 31MB | 1024x512 | 79.04% | 79.52% | 79.85% |
|[PP-HumanSegV1-Lite-with-argmax(General Portrait Segmentation Model)](https://bj.bcebos.com/paddlehub/fastdeploy/Portrait_PP_HumanSegV1_Lite_with_argmax_infer.tgz) \| [PP-HumanSegV1-Lite-without-argmax(General Portrait Segmentation Model)](https://bj.bcebos.com/paddlehub/fastdeploy/PP_HumanSegV1_Lite_infer.tgz) | 543KB | 192x192 | 86.2% | - | - |
|[PP-HumanSegV2-Lite-with-argmax(General Portrait Segmentation Model)](https://bj.bcebos.com/paddlehub/fastdeploy/PP_HumanSegV2_Lite_192x192_with_argmax_infer.tgz) \| [PP-HumanSegV2-Lite-without-argmax(General Portrait Segmentation Model)](https://bj.bcebos.com/paddlehub/fastdeploy/PP_HumanSegV2_Lite_192x192_infer.tgz) | 12MB | 192x192 | 92.52% | - | - |
| [PP-HumanSegV2-Mobile-with-argmax(General Portrait Segmentation Model)](https://bj.bcebos.com/paddlehub/fastdeploy/PP_HumanSegV2_Mobile_192x192_with_argmax_infer.tgz) \| [PP-HumanSegV2-Mobile-without-argmax(General Portrait Segmentation Model)](https://bj.bcebos.com/paddlehub/fastdeploy/PP_HumanSegV2_Mobile_192x192_infer.tgz) | 29MB | 192x192 | 93.13% | - | - |
|[PP-HumanSegV1-Server-with-argmax(General Portrait Segmentation Model)](https://bj.bcebos.com/paddlehub/fastdeploy/PP_HumanSegV1_Server_with_argmax_infer.tgz) \| [PP-HumanSegV1-Server-without-argmax(General Portrait Segmentation Model)](https://bj.bcebos.com/paddlehub/fastdeploy/PP_HumanSegV1_Server_infer.tgz) | 103MB | 512x512 | 96.47% | - | - |
| [Portait-PP-HumanSegV2-Lite-with-argmax(Portrait Segmentation Model)](https://bj.bcebos.com/paddlehub/fastdeploy/Portrait_PP_HumanSegV2_Lite_256x144_with_argmax_infer.tgz) \| [Portait-PP-HumanSegV2-Lite-without-argmax(Portrait Segmentation Model)](https://bj.bcebos.com/paddlehub/fastdeploy/Portrait_PP_HumanSegV2_Lite_256x144_infer.tgz) | 3.6M | 256x144 | 96.63% | - | - |
| [FCN-HRNet-W18-cityscapes-with-argmax](https://bj.bcebos.com/paddlehub/fastdeploy/FCN_HRNet_W18_cityscapes_with_argmax_infer.tgz) \| [FCN-HRNet-W18-cityscapes-without-argmax](https://bj.bcebos.com/paddlehub/fastdeploy/FCN_HRNet_W18_cityscapes_without_argmax_infer.tgz)(GPU inference for ONNXRuntime is not supported now) | 37MB | 1024x512 | 78.97% | 79.49% | 79.74% |
| [Deeplabv3-ResNet101-OS8-cityscapes-with-argmax](https://bj.bcebos.com/paddlehub/fastdeploy/Deeplabv3_ResNet101_OS8_cityscapes_with_argmax_infer.tgz) \| [Deeplabv3-ResNet101-OS8-cityscapes-without-argmax](https://bj.bcebos.com/paddlehub/fastdeploy/Deeplabv3_ResNet101_OS8_cityscapes_without_argmax_infer.tgz) | 150MB | 1024x512 | 79.90% | 80.22% | 80.47% |
## 更多部署方式
## Detailed Deployment Tutorials
- [Android ARM CPU部署](android)
- [服务化Serving部署](serving)
- [web部署](web)
- [模型自动化压缩工具](quantize)
- [Python Deployment](python)
- [C++ Deployment](cpp)
## 常见问题
遇到问题可查看常见问题集合文档或搜索FastDeploy issues链接如下
[常见问题集合](https://github.com/PaddlePaddle/FastDeploy/tree/develop/docs/cn/faq)
[FastDeploy issues](https://github.com/PaddlePaddle/FastDeploy/issues)
若以上方式都无法解决问题欢迎给FastDeploy提交新的[issue](https://github.com/PaddlePaddle/FastDeploy/issues)

View File

@@ -1,12 +0,0 @@
English | [简体中文](README_CN.md)
# Deployment of PP-LiteSeg Quantification Model on A311D
Now FastDeploy allows deploying PP-LiteSeg quantization model to A311D based on Paddle Lite.
For model quantization and download of quantized models, refer to [Model Quantization](../quantize/README.md)
## Detailed Deployment Tutorials
Only C++ deployment is supported on A311D.
- [C++ deployment](cpp)

View File

@@ -1,12 +0,0 @@
[English](README.md) | 简体中文
# PP-LiteSeg 量化模型在 A311D 上的部署
目前 FastDeploy 已经支持基于 Paddle Lite 部署 PP-LiteSeg 量化模型到 A311D 上。
模型的量化和量化模型的下载请参考:[模型量化](../quantize/README.md)
## 详细部署文档
在 A311D 上只支持 C++ 的部署。
- [C++部署](cpp)

View File

@@ -1,59 +0,0 @@
English | [简体中文](README_CN.md)
# PP-LiteSeg Quantitative Model C++ Deployment Example
`infer.cc` in this directory can help you quickly complete the inference acceleration of PP-LiteSeg quantization model deployment on A311D.
## Deployment Preparations
### FastDeploy Cross-compile Environment Preparations
1. For the software and hardware environment, and the cross-compile environment, please refer to [FastDeploy Cross-compile environment](../../../../../../docs/en/build_and_install/a311d.md#Cross-compilation-environment-construction).
### Model Preparations
1. You can directly use the quantized model provided by FastDeploy for deployment.
2. You can use one-click automatical compression tool provided by FastDeploy to quantize model by yourself, and use the generated quantized model for deployment.(Note: The quantized classification model still needs the deploy.yaml file in the FP32 model folder. Self-quantized model folder does not contain this yaml file, you can copy it from the FP32 model folder to the quantized model folder.)
3. The model requires heterogeneous computation. Please refer to: [Heterogeneous Computation](./../../../../../../docs/en/faq/heterogeneous_computing_on_timvx_npu.md). Since the model is already provided, you can test the heterogeneous file we provide first to verify whether the accuracy meets the requirements.
For more information, please refer to [Model Quantization](../../quantize/README.md)
## Deploying the Quantized PP-LiteSeg Segmentation model on A311D
Please follow these steps to complete the deployment of the PP-LiteSeg quantization model on A311D.
1. Cross-compile the FastDeploy library as described in [Cross-compile FastDeploy](../../../../../../docs/en/build_and_install/a311d.md#FastDeploy-cross-compilation-library-compilation-based-on-Paddle-Lite)
2. Copy the compiled library to the current directory. You can run this line:
```bash
cp -r FastDeploy/build/fastdeploy-timvx/ FastDeploy/examples/vision/segmentation/paddleseg/a311d/cpp
```
3. Download the model and example images required for deployment in current path.
```bash
cd FastDeploy/examples/vision/segmentation/paddleseg/a311d/cpp
mkdir models && mkdir images
wget https://bj.bcebos.com/fastdeploy/models/rk1/ppliteseg.tar.gz
tar -xvf ppliteseg.tar.gz
cp -r ppliteseg models
wget https://paddleseg.bj.bcebos.com/dygraph/demo/cityscapes_demo.png
cp -r cityscapes_demo.png images
```
4. Compile the deployment example. You can run the following lines:
```bash
cd FastDeploy/examples/vision/segmentation/paddleseg/a311d/cpp
mkdir build && cd build
cmake -DCMAKE_TOOLCHAIN_FILE=${PWD}/../fastdeploy-timvx/toolchain.cmake -DFASTDEPLOY_INSTALL_DIR=${PWD}/../fastdeploy-timvx -DTARGET_ABI=arm64 ..
make -j8
make install
# After success, an install folder will be created with a running demo and libraries required for deployment.
```
5. Deploy the PP-LiteSeg segmentation model to A311D based on adb. You can run the following lines:
```bash
# Go to the install directory.
cd FastDeploy/examples/vision/segmentation/paddleseg/a311d/cpp/build/install/
# The following line represents: bash run_with_adb.sh, demo needed to run, model path, image path, DEVICE ID.
bash run_with_adb.sh infer_demo ppliteseg cityscapes_demo.png $DEVICE_ID
```
The output is:
<img width="640" src="https://user-images.githubusercontent.com/30516196/205544166-9b2719ff-ed82-4908-b90a-095de47392e1.png">
Please note that the model deployed on A311D needs to be quantized. You can refer to [Model Quantization](../../../../../../docs/en/quantize.md).

View File

@@ -1,59 +0,0 @@
[English](README.md) | 简体中文
# PP-LiteSeg 量化模型 C++ 部署示例
本目录下提供的 `infer.cc`,可以帮助用户快速完成 PP-LiteSeg 量化模型在 A311D 上的部署推理加速。
## 部署准备
### FastDeploy 交叉编译环境准备
1. 软硬件环境满足要求,以及交叉编译环境的准备,请参考:[FastDeploy 交叉编译环境准备](../../../../../../docs/cn/build_and_install/a311d.md#交叉编译环境搭建)
### 模型准备
1. 用户可以直接使用由 FastDeploy 提供的量化模型进行部署。
2. 用户可以使用 FastDeploy 提供的一键模型自动化压缩工具,自行进行模型量化, 并使用产出的量化模型进行部署.(注意: 推理量化后的分类模型仍然需要FP32模型文件夹下的 deploy.yaml 文件, 自行量化的模型文件夹内不包含此 yaml 文件, 用户从FP32模型文件夹下复制此yaml文件到量化后的模型文件夹内即可.)
3. 模型需要异构计算,异构计算文件可以参考:[异构计算](./../../../../../../docs/cn/faq/heterogeneous_computing_on_timvx_npu.md),由于 FastDeploy 已经提供了模型,可以先测试我们提供的异构文件,验证精度是否符合要求。
更多量化相关相关信息可查阅[模型量化](../../quantize/README.md)
## 在 A311D 上部署量化后的 PP-LiteSeg 分割模型
请按照以下步骤完成在 A311D 上部署 PP-LiteSeg 量化模型:
1. 交叉编译编译 FastDeploy 库,具体请参考:[交叉编译 FastDeploy](../../../../../../docs/cn/build_and_install/a311d.md#基于-paddle-lite-的-fastdeploy-交叉编译库编译)
2. 将编译后的库拷贝到当前目录,可使用如下命令:
```bash
cp -r FastDeploy/build/fastdeploy-timvx/ FastDeploy/examples/vision/segmentation/paddleseg/a311d/cpp
```
3. 在当前路径下载部署所需的模型和示例图片:
```bash
cd FastDeploy/examples/vision/segmentation/paddleseg/a311d/cpp
mkdir models && mkdir images
wget https://bj.bcebos.com/fastdeploy/models/rk1/ppliteseg.tar.gz
tar -xvf ppliteseg.tar.gz
cp -r ppliteseg models
wget https://paddleseg.bj.bcebos.com/dygraph/demo/cityscapes_demo.png
cp -r cityscapes_demo.png images
```
4. 编译部署示例,可使入如下命令:
```bash
cd FastDeploy/examples/vision/segmentation/paddleseg/a311d/cpp
mkdir build && cd build
cmake -DCMAKE_TOOLCHAIN_FILE=${PWD}/../fastdeploy-timvx/toolchain.cmake -DFASTDEPLOY_INSTALL_DIR=${PWD}/../fastdeploy-timvx -DTARGET_ABI=arm64 ..
make -j8
make install
# 成功编译之后,会生成 install 文件夹,里面有一个运行 demo 和部署所需的库
```
5. 基于 adb 工具部署 PP-LiteSeg 分割模型到晶晨 A311D可使用如下命令
```bash
# 进入 install 目录
cd FastDeploy/examples/vision/segmentation/paddleseg/a311d/cpp/build/install/
# 如下命令表示bash run_with_adb.sh 需要运行的demo 模型路径 图片路径 设备的DEVICE_ID
bash run_with_adb.sh infer_demo ppliteseg cityscapes_demo.png $DEVICE_ID
```
部署成功后运行结果如下:
<img width="640" src="https://user-images.githubusercontent.com/30516196/205544166-9b2719ff-ed82-4908-b90a-095de47392e1.png">
需要特别注意的是,在 A311D 上部署的模型需要是量化后的模型,模型的量化请参考:[模型量化](../../../../../../docs/cn/quantize.md)

View File

@@ -0,0 +1,45 @@
[English](README.md) | 简体中文
# PaddleSeg在晶晨NPU上通过FastDeploy部署模型
## PaddleSeg支持部署的晶晨芯片型号
支持如下芯片的部署
- Amlogic A311D
- Amlogic C308X
- Amlogic S905D3
本示例基于晶晨A311D来介绍如何使用FastDeploy部署PaddleSeg模型
晶晨A311D是一款先进的AI应用处理器。PaddleSeg支持通过FastDeploy在A311D上基于Paddle-Lite部署相关Segmentation模型
>> **注意**需要注意的是芯原verisilicon作为 IP 设计厂商本身并不提供实体SoC产品而是授权其 IP 给芯片厂商晶晨Amlogic瑞芯微Rockchip等。因此本文是适用于被芯原授权了 NPU IP 的芯片产品。只要芯片产品没有大副修改芯原的底层库,则该芯片就可以使用本文档作为 Paddle Lite 推理部署的参考和教程。在本文中,晶晨 SoC 中的 NPU 和 瑞芯微 SoC 中的 NPU 统称为芯原 NPU。
## 晶晨A311D支持的PaddleSeg模型
- [PaddleSeg](https://github.com/PaddlePaddle/PaddleSeg)
>> **注意**支持PaddleSeg高于2.6版本的Segmentation模型
目前晶晨A311D所支持的PaddleSeg模型如下
- [PP-LiteSeg系列模型](https://github.com/PaddlePaddle/PaddleSeg/blob/develop/configs/pp_liteseg/README.md)
## 预导出的量化推理模型
为了方便开发者的测试下面提供了PaddleSeg导出的部分量化后的推理模型开发者可直接下载使用。
| 模型 | 参数文件大小 |输入Shape | mIoU | mIoU (flip) | mIoU (ms+flip) |
|:---------------------------------------------------------------- |:----- |:----- | :----- | :----- | :----- |
| [PP-LiteSeg-T(STDC1)-cityscapes-without-argmax](https://bj.bcebos.com/fastdeploy/models/rk1/ppliteseg.tar.gz)| 31MB | 1024x512 | 77.04% | 77.73% | 77.46% |
**注意**
- PaddleSeg量化模型包含`model.pdmodel``model.pdiparams``deploy.yaml``subgraph.txt`四个文件FastDeploy会从yaml文件中获取模型在推理时需要的预处理信息subgraph.txt是为了异构计算而存储的配置文件
- 若以上列表中无满足要求的模型可参考下方教程自行导出适配A311D的模型
## PaddleSeg动态图模型导出为A311D支持的INT8模型
模型导出分为以下两步
1. PaddleSeg训练的动态图模型导出为推理静态图模型请参考其文档说明[模型导出](https://github.com/PaddlePaddle/PaddleSeg/blob/develop/docs/model_export_cn.md)
晶晨A311D仅支持INT8
2. 将推理模型量化压缩为INT8模型FastDeploy模型量化的方法及一键自动化压缩工具可以参考[模型量化](../../../quantize/README.md)
## 详细部署文档
目前A311D上只支持C++的部署。
- [C++部署](cpp)

View File

@@ -0,0 +1,59 @@
[English](README.md) | 简体中文
# PP-LiteSeg 量化模型 C++ 部署示例
本目录下提供的 `infer.cc`,可以帮助用户快速完成 PP-LiteSeg 量化模型在晶晨 A311D 上的部署推理加速。
## 部署准备
### FastDeploy 交叉编译环境准备
软硬件环境满足要求,以及交叉编译环境的准备,请参考:[FastDeploy](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/build_and_install#自行编译安装)
### 模型准备
1. 用户可以直接使用由[FastDeploy 提供的量化模型](../README_CN.md#晶晨a311d支持的paddleseg模型)进行部署。
2. 若FastDeploy没有提供满足要求的量化模型用户可以参考[PaddleSeg动态图模型导出为A311D支持的INT8模型](../README_CN.md#paddleseg动态图模型导出为a311d支持的int8模型)自行导出或训练量化模型
3. 若上述导出或训练的模型出现精度下降或者报错则需要使用异构计算使得模型算子部分跑在A311D的ARM CPU上进行调试以及精度验证其中异构计算所需的文件是subgraph.txt。具体关于异构计算可参考[异构计算](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/faq/heterogeneous_computing_on_timvx_npu.md)。
## 在 A311D 上部署量化后的 PP-LiteSeg 分割模型
请按照以下步骤完成在 A311D 上部署 PP-LiteSeg 量化模型:
1. 将编译后的库拷贝到当前目录,可使用如下命令:
```bash
cp -r FastDeploy/build/fastdeploy-timvx/ path/to/paddleseg/amlogic/a311d/cpp
```
2. 在当前路径下载部署所需的模型和示例图片:
```bash
cd path/to/paddleseg/amlogic/a311d/cpp
mkdir models && mkdir images
wget https://bj.bcebos.com/fastdeploy/models/rk1/ppliteseg.tar.gz
tar -xvf ppliteseg.tar.gz
cp -r ppliteseg models
wget https://paddleseg.bj.bcebos.com/dygraph/demo/cityscapes_demo.png
cp -r cityscapes_demo.png images
```
3. 编译部署示例,可使入如下命令:
```bash
cd path/to/paddleseg/amlogic/a311d/cpp
mkdir build && cd build
cmake -DCMAKE_TOOLCHAIN_FILE=${PWD}/../fastdeploy-timvx/toolchain.cmake -DFASTDEPLOY_INSTALL_DIR=${PWD}/../fastdeploy-timvx -DTARGET_ABI=arm64 ..
make -j8
make install
# 成功编译之后,会生成 install 文件夹,里面有一个运行 demo 和部署所需的库
```
4. 基于 adb 工具部署 PP-LiteSeg 分割模型到晶晨 A311D可使用如下命令
```bash
# 进入 install 目录
cd path/to/paddleseg/amlogic/a311d/cpp/build/install/
cp ../../run_with_adb.sh .
# 如下命令表示bash run_with_adb.sh 需要运行的demo 模型路径 图片路径 设备的DEVICE_ID
bash run_with_adb.sh infer_demo ppliteseg cityscapes_demo.png $DEVICE_ID
```
部署成功后运行结果如下:
<img width="640" src="https://user-images.githubusercontent.com/30516196/205544166-9b2719ff-ed82-4908-b90a-095de47392e1.png">
## 快速链接
- [PaddleSeg C++ API文档](https://www.paddlepaddle.org.cn/fastdeploy-api-doc/cpp/html/namespacefastdeploy_1_1vision_1_1segmentation.html)
- [FastDeploy部署PaddleSeg模型概览](../../)

View File

@@ -24,13 +24,13 @@ void InitAndInfer(const std::string& model_dir, const std::string& image_file) {
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "deploy.yaml";
auto subgraph_file = model_dir + sep + "subgraph.txt";
fastdeploy::vision::EnableFlyCV();
fastdeploy::vision::EnableFlyCV();
fastdeploy::RuntimeOption option;
option.UseTimVX();
option.SetLiteSubgraphPartitionPath(subgraph_file);
auto model = fastdeploy::vision::segmentation::PaddleSegModel(
model_file, params_file, config_file,option);
model_file, params_file, config_file, option);
assert(model.Initialized());

View File

@@ -1,5 +1,5 @@
English | [简体中文](README_CN.md)
# PaddleSeg Android Demo for Target Detection
# PaddleSeg Android Demo for Image Segmentation
For real-time portrait segmentation on Android, this demo has good ease of use and openness. You can run your own training model in the demo.
@@ -10,7 +10,7 @@ For real-time portrait segmentation on Android, this demo has good ease of use a
## Deployment Steps
1. Target detection PaddleSeg Demo is located in `fastdeploy/examples/vision/segmentation/paddleseg/android` directory.
1. Image Segmentation PaddleSeg Demo is located in `fastdeploy/examples/vision/segmentation/paddleseg/android` directory.
2. Please use Android Studio to open paddleseg/android project.
3. Connect your phone to your computer, turn on USB debugging and file transfer mode, and connect your own mobile device on Android Studio (your phone needs to be enabled to allow software installation from USB).
@@ -19,7 +19,7 @@ For real-time portrait segmentation on Android, this demo has good ease of use a
</p>
> **Notes:**
>> If you encounter an NDK configuration error during importing, compiling or running the program, please open ` File > Project Structure > SDK Location` and change `Andriod SDK location` to your locally configured SDK path.
>> If you encounter an NDK configuration error during importing, compiling or running the program, please open ` File > Project Structure > SDK Location` and change `Andriod SDK location` to your locally configured SDK path.
4. Click the Run button to automatically compile the APP and install it to your phone. (The process will automatically download the pre-compiled FastDeploy Android library and model files, internet connection required.)
The success interface is as follows. Figure 1: Install APP on phone; Figure 2: The opening interface, it will automatically recognize the person in the picture and draw the mask; Figure 3: APP setting options, click setting in the upper right corner, and you can set different options.
@@ -159,11 +159,11 @@ model.init(modelFile, paramFile, configFile, option);
For details, please refer to [SegmentationMainActivity](./app/src/main/java/com/baidu/paddle/fastdeploy/app/examples/segmentation/SegmentationMainActivity.java).
## Replace FastDeploy SDK and model
Steps to replace the FastDeploy prediction libraries and model are very simple. The location of the prediction library is `app/libs/fastdeploy-android-sdk-xxx.aar`, where `xxx` indicates the version of the prediction library you are currently using. The location of the model is, `app/src/main/assets/models/portrait_pp_humansegv2_lite_256x144_inference_model`.
Steps to replace the FastDeploy prediction libraries and model are very simple. The location of the prediction library is `app/libs/fastdeploy-android-sdk-xxx.aar`, where `xxx` indicates the version of the prediction library you are currently using. The location of the model is, `app/src/main/assets/models/portrait_pp_humansegv2_lite_256x144_inference_model`.
- Replace FastDeploy Android SDK: Download or compile the latest FastDeploy Android SDK, unzip it and put it in the `app/libs` directory. For details please refer to:
- [Use FastDeploy Java SDK on Android](../../../../../java/android/)
- [Use FastDeploy Java SDK on Android](https://github.com/PaddlePaddle/FastDeploy/tree/develop/java/android)
- Steps for replacing the PaddleSeg model.
- Steps for replacing the PaddleSeg model.
- Put your PaddleSeg model in `app/src/main/assets/models`;
- Modify the model path in `app/src/main/res/values/strings.xml`, such as:
```xml
@@ -173,5 +173,5 @@ For details, please refer to [SegmentationMainActivity](./app/src/main/java/com/
## Other Documenets
If you are interested in more FastDeploy Java API documents and how to access the FastDeploy C++ API via JNI, you can refer to the following:
- [Use FastDeploy Java SDK on Android](../../../../../java/android/)
- [Use FastDeploy C++ SDK on Android](../../../../../docs/en/faq/use_cpp_sdk_on_android.md)
- [Use FastDeploy Java SDK on Android](https://github.com/PaddlePaddle/FastDeploy/tree/develop/java/android)
- [Use FastDeploy C++ SDK on Android](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/faq/use_cpp_sdk_on_android.md)

View File

@@ -1,5 +1,5 @@
[English](README.md) | 简体中文
# 目标检测 PaddleSeg Android Demo 使用文档
# 图像分割 PaddleSeg Android Demo 使用文档
在 Android 上实现实时的人像分割功能,此 Demo 有很好的的易用性和开放性,如在 Demo 中跑自己训练好的模型等。
@@ -10,7 +10,7 @@
## 部署步骤
1. 目标检测 PaddleSeg Demo 位于 `fastdeploy/examples/vision/segmentation/paddleseg/android` 目录
1. 图像分割 PaddleSeg Demo 位于 `path/to/paddleseg/android` 目录
2. 用 Android Studio 打开 paddleseg/android 工程
3. 手机连接电脑,打开 USB 调试和文件传输模式,并在 Android Studio 上连接自己的手机设备(手机需要开启允许从 USB 安装软件权限)
@@ -161,7 +161,7 @@ model.init(modelFile, paramFile, configFile, option);
## 替换 FastDeploy SDK和模型
替换FastDeploy预测库和模型的步骤非常简单。预测库所在的位置为 `app/libs/fastdeploy-android-sdk-xxx.aar`,其中 `xxx` 表示当前您使用的预测库版本号。模型所在的位置为,`app/src/main/assets/models/portrait_pp_humansegv2_lite_256x144_inference_model`。
- 替换FastDeploy Android SDK: 下载或编译最新的FastDeploy Android SDK解压缩后放在 `app/libs` 目录下;详细配置文档可参考:
- [在 Android 中使用 FastDeploy Java SDK](../../../../../java/android/)
- [在 Android 中使用 FastDeploy Java SDK](https://github.com/PaddlePaddle/FastDeploy/tree/develop/java/android)
- 替换PaddleSeg模型的步骤
- 将您的PaddleSeg模型放在 `app/src/main/assets/models` 目录下;
@@ -173,5 +173,5 @@ model.init(modelFile, paramFile, configFile, option);
## 更多参考文档
如果您想知道更多的FastDeploy Java API文档以及如何通过JNI来接入FastDeploy C++ API感兴趣可以参考以下内容:
- [在 Android 中使用 FastDeploy Java SDK](../../../../../java/android/)
- [在 Android 中使用 FastDeploy C++ SDK](../../../../../docs/cn/faq/use_cpp_sdk_on_android.md)
- [在 Android 中使用 FastDeploy Java SDK](https://github.com/PaddlePaddle/FastDeploy/tree/develop/java/android)
- [在 Android 中使用 FastDeploy C++ SDK](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/faq/use_cpp_sdk_on_android.md)

View File

@@ -0,0 +1,51 @@
[English](README.md) | 简体中文
# PaddleSeg利用FastDeploy在华为昇腾上部署模型
PaddleSeg支持通过FastDeploy在华为昇腾上部署Segmentation相关模型
## 支持的PaddleSeg模型
- [PaddleSeg](https://github.com/PaddlePaddle/PaddleSeg)
>> **注意**支持PaddleSeg高于2.6版本的Segmentation模型
目前FastDeploy支持如下模型的部署
- [PP-LiteSeg系列模型](https://github.com/PaddlePaddle/PaddleSeg/blob/develop/configs/pp_liteseg/README.md)
- [PP-HumanSeg系列模型](https://github.com/PaddlePaddle/PaddleSeg/blob/develop/contrib/PP-HumanSeg/README.md)
- [FCN系列模型](https://github.com/PaddlePaddle/PaddleSeg/blob/develop/configs/fcn/README.md)
- [DeepLabV3系列模型](https://github.com/PaddlePaddle/PaddleSeg/blob/develop/configs/deeplabv3/README.md)
- [SegFormer系列模型](https://github.com/PaddlePaddle/PaddleSeg/blob/develop/configs/segformer/README.md)
>>**注意** 若需要在华为昇腾上部署**PP-Matting**、**PP-HumanMatting**请从[Matting模型部署](../../ppmatting/)下载对应模型,部署过程与此文档一致
## 准备PaddleSeg部署模型
PaddleSeg模型导出请参考其文档说明[模型导出](https://github.com/PaddlePaddle/PaddleSeg/blob/develop/docs/model_export_cn.md)
**注意**
- PaddleSeg导出的模型包含`model.pdmodel``model.pdiparams``deploy.yaml`三个文件FastDeploy会从yaml文件中获取模型在推理时需要的预处理信息
## 预导出的推理模型
为了方便开发者的测试下面提供了PaddleSeg导出的部分推理模型模型
- without-argmax导出方式为**不指定**`--input_shape`**指定**`--output_op none`
- with-argmax导出方式为**不指定**`--input_shape`**指定**`--output_op argmax`
开发者可直接下载使用。
| 模型 | 参数文件大小 |输入Shape | mIoU | mIoU (flip) | mIoU (ms+flip) |
|:---------------------------------------------------------------- |:----- |:----- | :----- | :----- | :----- |
| [PP-LiteSeg-B(STDC2)-cityscapes-with-argmax](https://bj.bcebos.com/paddlehub/fastdeploy/PP_LiteSeg_B_STDC2_cityscapes_with_argmax_infer.tgz) \| [PP-LiteSeg-B(STDC2)-cityscapes-without-argmax](https://bj.bcebos.com/paddlehub/fastdeploy/PP_LiteSeg_B_STDC2_cityscapes_without_argmax_infer.tgz) | 31MB | 1024x512 | 79.04% | 79.52% | 79.85% |
|[PP-HumanSegV1-Lite-with-argmax(通用人像分割模型)](https://bj.bcebos.com/paddlehub/fastdeploy/Portrait_PP_HumanSegV1_Lite_with_argmax_infer.tgz) \| [PP-HumanSegV1-Lite-without-argmax(通用人像分割模型)](https://bj.bcebos.com/paddlehub/fastdeploy/PP_HumanSegV1_Lite_infer.tgz) | 543KB | 192x192 | 86.2% | - | - |
|[PP-HumanSegV2-Lite-with-argmax(通用人像分割模型)](https://bj.bcebos.com/paddlehub/fastdeploy/PP_HumanSegV2_Lite_192x192_with_argmax_infer.tgz) \| [PP-HumanSegV2-Lite-without-argmax(通用人像分割模型)](https://bj.bcebos.com/paddlehub/fastdeploy/PP_HumanSegV2_Lite_192x192_infer.tgz) | 12MB | 192x192 | 92.52% | - | - |
| [PP-HumanSegV2-Mobile-with-argmax(通用人像分割模型)](https://bj.bcebos.com/paddlehub/fastdeploy/PP_HumanSegV2_Mobile_192x192_with_argmax_infer.tgz) \| [PP-HumanSegV2-Mobile-without-argmax(通用人像分割模型)](https://bj.bcebos.com/paddlehub/fastdeploy/PP_HumanSegV2_Mobile_192x192_infer.tgz) | 29MB | 192x192 | 93.13% | - | - |
|[PP-HumanSegV1-Server-with-argmax(通用人像分割模型)](https://bj.bcebos.com/paddlehub/fastdeploy/PP_HumanSegV1_Server_with_argmax_infer.tgz) \| [PP-HumanSegV1-Server-without-argmax(通用人像分割模型)](https://bj.bcebos.com/paddlehub/fastdeploy/PP_HumanSegV1_Server_infer.tgz) | 103MB | 512x512 | 96.47% | - | - |
| [Portait-PP-HumanSegV2-Lite-with-argmax(肖像分割模型)](https://bj.bcebos.com/paddlehub/fastdeploy/Portrait_PP_HumanSegV2_Lite_256x144_with_argmax_infer.tgz) \| [Portait-PP-HumanSegV2-Lite-without-argmax(肖像分割模型)](https://bj.bcebos.com/paddlehub/fastdeploy/Portrait_PP_HumanSegV2_Lite_256x144_infer.tgz) | 3.6M | 256x144 | 96.63% | - | - |
| [FCN-HRNet-W18-cityscapes-with-argmax](https://bj.bcebos.com/paddlehub/fastdeploy/FCN_HRNet_W18_cityscapes_with_argmax_infer.tgz) \| [FCN-HRNet-W18-cityscapes-without-argmax](https://bj.bcebos.com/paddlehub/fastdeploy/FCN_HRNet_W18_cityscapes_without_argmax_infer.tgz)(暂时不支持ONNXRuntime的GPU推理) | 37MB | 1024x512 | 78.97% | 79.49% | 79.74% |
| [Deeplabv3-ResNet101-OS8-cityscapes-with-argmax](https://bj.bcebos.com/paddlehub/fastdeploy/Deeplabv3_ResNet101_OS8_cityscapes_with_argmax_infer.tgz) \| [Deeplabv3-ResNet101-OS8-cityscapes-without-argmax](https://bj.bcebos.com/paddlehub/fastdeploy/Deeplabv3_ResNet101_OS8_cityscapes_without_argmax_infer.tgz) | 150MB | 1024x512 | 79.90% | 80.22% | 80.47% |
| [SegFormer_B0-cityscapes-with-argmax](https://bj.bcebos.com/paddlehub/fastdeploy/SegFormer_B0-cityscapes-with-argmax.tgz) \| [SegFormer_B0-cityscapes-without-argmax](https://bj.bcebos.com/paddlehub/fastdeploy/SegFormer_B0-cityscapes-without-argmax.tgz) | 15MB | 1024x1024 | 76.73% | 77.16% | - |
## 详细部署文档
- [Python部署](python)
- [C++部署](cpp)

View File

@@ -0,0 +1,38 @@
[English](README.md) | 简体中文
# PaddleSeg C++部署示例
本目录下提供`infer.cc`快速完成PP-LiteSeg在华为昇腾上部署的示例。
## 华为昇腾NPU编译FastDeploy环境准备
在部署前需自行编译基于华为昇腾NPU的预测库参考文档[华为昇腾NPU部署环境编译](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/build_and_install#自行编译安装)
>>**注意** **PP-Matting**、**PP-HumanMatting**的模型,请从[Matting模型部署](../../../ppmatting/)下载
```bash
#下载部署示例代码
cd path/to/paddleseg/ascend/cpp
mkdir build
cd build
# 使用编译完成的FastDeploy库编译infer_demo
cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-ascend
make -j
# 下载PP-LiteSeg模型文件和测试图片
wget https://bj.bcebos.com/paddlehub/fastdeploy/PP_LiteSeg_B_STDC2_cityscapes_without_argmax_infer.tgz
tar -xvf PP_LiteSeg_B_STDC2_cityscapes_without_argmax_infer.tgz
wget https://paddleseg.bj.bcebos.com/dygraph/demo/cityscapes_demo.png
# 华为昇腾推理
./infer_demo PP_LiteSeg_B_STDC2_cityscapes_without_argmax_infer cityscapes_demo.png
```
运行完成可视化结果如下图所示
<div align="center">
<img src="https://user-images.githubusercontent.com/16222477/191712880-91ae128d-247a-43e0-b1e3-cafae78431e0.jpg", width=512px, height=256px />
</div>
## 快速链接
- [PaddleSeg C++ API文档](https://www.paddlepaddle.org.cn/fastdeploy-api-doc/cpp/html/namespacefastdeploy_1_1vision_1_1segmentation.html)
- [FastDeploy部署PaddleSeg模型概览](../../)
- [Python部署](../python)

View File

@@ -13,25 +13,28 @@
// limitations under the License.
#include "fastdeploy/vision.h"
#ifdef WIN32
const char sep = '\\';
#else
const char sep = '/';
#endif
void InitAndInfer(const std::string& model_dir, const std::string& image_file,
const fastdeploy::RuntimeOption& option) {
void AscendInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "deploy.yaml";
auto option = fastdeploy::RuntimeOption();
option.UseAscend();
auto model = fastdeploy::vision::segmentation::PaddleSegModel(
model_file, params_file, config_file,option);
model_file, params_file, config_file, option);
assert(model.Initialized());
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return;
}
auto im = cv::imread(image_file);
auto im_bak = im.clone();
fastdeploy::vision::SegmentationResult res;
if (!model.Predict(im, &res)) {
@@ -40,37 +43,20 @@ void InitAndInfer(const std::string& model_dir, const std::string& image_file,
}
std::cout << res.Str() << std::endl;
auto vis_im = fastdeploy::vision::VisSegmentation(im, res, 0.5);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
int main(int argc, char* argv[]) {
if (argc < 4) {
std::cout << "Usage: infer_demo path/to/quant_model "
"path/to/image "
"run_option, "
"e.g ./infer_demo ./ResNet50_vd_quant ./test.jpeg 0"
<< std::endl;
std::cout << "The data type of run_option is int, 0: run on cpu with ORT "
"backend; 1: run "
"on gpu with TensorRT backend. "
<< std::endl;
if (argc < 3) {
std::cout
<< "Usage: infer_demo path/to/model_dir path/to/image run_option, "
"e.g ./infer_model ./ppseg_model_dir ./test.jpeg"
<< std::endl;
return -1;
}
fastdeploy::RuntimeOption option;
int flag = std::atoi(argv[3]);
if (flag == 0) {
option.UseCpu();
option.UseOrtBackend();
} else if (flag == 1) {
option.UseCpu();
option.UsePaddleInferBackend();
}
std::string model_dir = argv[1];
std::string test_image = argv[2];
InitAndInfer(model_dir, test_image, option);
AscendInfer(argv[1], argv[2]);
return 0;
}
}

Some files were not shown because too many files have changed in this diff Show More