Merge branch 'develop' of https://github.com/PaddlePaddle/FastDeploy into develop

This commit is contained in:
yunyaoXYY
2023-02-20 03:00:35 +00:00
50 changed files with 3182 additions and 218 deletions

View File

@@ -1,13 +0,0 @@
list(APPEND FASTDEPLOY_DOTNET_REFERENCES
"Microsoft.CSharp"
"System"
"System.Core"
"System.Data"
"System.Deployment"
"System.Drawing"
"System.Net.Http"
"System.Xml"
"System.Reflection"
"${CMAKE_CURRENT_LIST_DIR}/csharp_lib/fastdeploy_csharp.dll")
set(FASTDEPLOY_PACKAGE_REFERENCES "OpenCvSharp4_4.7.0.20230115;OpenCvSharp4.runtime.win_4.7.0.20230115")

View File

@@ -23,7 +23,12 @@
typedef struct FD_C_OneDimArrayUint8 {
size_t size;
uint8_t* data;
} FD_C_OneDimArrayUint8; // std::vector<int32_t>
} FD_C_OneDimArrayUint8; // std::vector<uint8>
typedef struct FD_C_OneDimArrayInt8 {
size_t size;
int8_t* data;
} FD_C_OneDimArrayInt8; // std::vector<int8>
typedef struct FD_C_OneDimArrayInt32 {
size_t size;
@@ -60,6 +65,21 @@ typedef struct FD_C_TwoDimArraySize {
FD_C_OneDimArraySize* data;
} FD_C_TwoDimArraySize; // std::vector<std::vector<size_t>>
typedef struct FD_C_TwoDimArrayInt8 {
size_t size;
FD_C_OneDimArrayInt8* data;
} FD_C_TwoDimArrayInt8; // std::vector<std::vector<int8>>
typedef struct FD_C_TwoDimArrayInt32 {
size_t size;
FD_C_OneDimArrayInt32* data;
} FD_C_TwoDimArrayInt32; // std::vector<std::vector<int32_t>>
typedef struct FD_C_ThreeDimArrayInt32 {
size_t size;
FD_C_TwoDimArrayInt32* data;
} FD_C_ThreeDimArrayInt32; // std::vector<std::vector<std::vector<int32_t>>>
typedef struct FD_C_TwoDimArrayFloat {
size_t size;
FD_C_OneDimArrayFloat* data;

View File

@@ -27,6 +27,11 @@ DECL_AND_IMPLEMENT_RESULT_FUNC_FOR_GET_PTR_FROM_WRAPPER(
DECL_AND_IMPLEMENT_RESULT_FUNC_FOR_GET_PTR_FROM_WRAPPER(
DetectionResult, fd_detection_result_wrapper, detection_result)
// OCRResult
DECL_AND_IMPLEMENT_RESULT_FUNC_FOR_GET_PTR_FROM_WRAPPER(OCRResult,
fd_ocr_result_wrapper,
ocr_result)
// Models:
// Classification
@@ -120,6 +125,28 @@ DECL_AND_IMPLEMENT_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(GFL,
fd_gfl_wrapper,
gfl_model)
// OCR models
// Recognizer
DECL_AND_IMPLEMENT_OCR_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(
Recognizer, fd_recognizer_wrapper, recognizer_model);
// DBDetector
DECL_AND_IMPLEMENT_OCR_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(
DBDetector, fd_dbdetector_wrapper, dbdetector_model);
// Classifier
DECL_AND_IMPLEMENT_OCR_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(
Classifier, fd_classifier_wrapper, classifier_model);
// PPOCRv2
DECL_AND_IMPLEMENT_PIPELINE_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(
PPOCRv2, fd_ppocrv2_wrapper, ppocrv2_model);
// PPOCRv3
DECL_AND_IMPLEMENT_PIPELINE_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(
PPOCRv3, fd_ppocrv3_wrapper, ppocrv3_model);
#endif
std::unique_ptr<fastdeploy::RuntimeOption>&

View File

@@ -22,6 +22,11 @@
#include "fastdeploy/vision/classification/ppcls/model.h"
#include "fastdeploy/vision/common/result.h"
#include "fastdeploy/vision/detection/ppdet/model.h"
#include "fastdeploy/vision/ocr/ppocr/classifier.h"
#include "fastdeploy/vision/ocr/ppocr/dbdetector.h"
#include "fastdeploy/vision/ocr/ppocr/recognizer.h"
#include "fastdeploy/vision/ocr/ppocr/ppocr_v2.h"
#include "fastdeploy/vision/ocr/ppocr/ppocr_v3.h"
#define DEFINE_RESULT_WRAPPER_STRUCT(typename, varname) typedef struct FD_C_##typename##Wrapper { \
std::unique_ptr<fastdeploy::vision::typename> varname; \
@@ -36,6 +41,14 @@
std::unique_ptr<fastdeploy::vision::detection::typename> varname; \
} FD_C_##typename##Wrapper
#define DEFINE_OCR_MODEL_WRAPPER_STRUCT(typename, varname) typedef struct FD_C_##typename##Wrapper { \
std::unique_ptr<fastdeploy::vision::ocr::typename> varname; \
} FD_C_##typename##Wrapper
#define DEFINE_PIPELINE_MODEL_WRAPPER_STRUCT(typename, varname) typedef struct FD_C_##typename##Wrapper { \
std::unique_ptr<fastdeploy::pipeline::typename> varname; \
} FD_C_##typename##Wrapper
// ------------- belows are wrapper struct define --------------------- //
// Results:
@@ -47,6 +60,10 @@ DEFINE_RESULT_WRAPPER_STRUCT(ClassifyResult, classify_result);
DEFINE_RESULT_WRAPPER_STRUCT(DetectionResult, detection_result);
// OCRResult
DEFINE_RESULT_WRAPPER_STRUCT(OCRResult, ocr_result);
// Models:
// Classification
@@ -119,6 +136,23 @@ DEFINE_DETECTION_MODEL_WRAPPER_STRUCT(TOOD, tood_model);
// GFL
DEFINE_DETECTION_MODEL_WRAPPER_STRUCT(GFL, gfl_model);
// OCR models
// Recognizer
DEFINE_OCR_MODEL_WRAPPER_STRUCT(Recognizer, recognizer_model);
// DBDetector
DEFINE_OCR_MODEL_WRAPPER_STRUCT(DBDetector, dbdetector_model);
// Classifier
DEFINE_OCR_MODEL_WRAPPER_STRUCT(Classifier, classifier_model);
// PPOCRv2
DEFINE_PIPELINE_MODEL_WRAPPER_STRUCT(PPOCRv2, ppocrv2_model);
// PPOCRv3
DEFINE_PIPELINE_MODEL_WRAPPER_STRUCT(PPOCRv3, ppocrv3_model);
// ------------- belows are function declaration for get ptr from wrapper --------------------- //
@@ -134,6 +168,16 @@ FD_C_CheckAndConvert##typename##Wrapper( \
FD_C_CheckAndConvert##typename##Wrapper( \
FD_C_##typename##Wrapper* varname)
#define DECLARE_OCR_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(typename, varname) std::unique_ptr<fastdeploy::vision::ocr::typename>& \
FD_C_CheckAndConvert##typename##Wrapper( \
FD_C_##typename##Wrapper* varname)
#define DECLARE_PIPELINE_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(typename, varname) std::unique_ptr<fastdeploy::pipeline::typename>& \
FD_C_CheckAndConvert##typename##Wrapper( \
FD_C_##typename##Wrapper* varname)
namespace fastdeploy {
// results:
@@ -145,6 +189,12 @@ DECLARE_RESULT_FUNC_FOR_GET_PTR_FROM_WRAPPER(ClassifyResult,
DECLARE_RESULT_FUNC_FOR_GET_PTR_FROM_WRAPPER(DetectionResult,
fd_detection_result_wrapper);
// OCRResult
DECLARE_RESULT_FUNC_FOR_GET_PTR_FROM_WRAPPER(OCRResult,
fd_ocr_result_wrapper);
// Models:
// Classification
@@ -257,6 +307,23 @@ DECLARE_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(TOOD,
DECLARE_DETECTION_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(GFL,
fd_gfl_wrapper);
// OCR models
// Recognizer
DECLARE_OCR_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(Recognizer, fd_recognizer_wrapper);
// DBDetector
DECLARE_OCR_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(DBDetector, fd_dbdetector_wrapper);
// Classifier
DECLARE_OCR_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(Classifier, fd_classifier_wrapper);
// PPOCRv2
DECLARE_PIPELINE_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(PPOCRv2, fd_ppocrv2_wrapper);
// PPOCRv3
DECLARE_PIPELINE_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(PPOCRv3, fd_ppocrv3_wrapper);
} // namespace fastdeploy
#endif
@@ -299,3 +366,20 @@ FD_C_CheckAndConvert##typename##Wrapper( \
"The pointer of " #var_wrapper_name " shouldn't be nullptr."); \
return var_wrapper_name->var_ptr_name; \
}
#define DECL_AND_IMPLEMENT_OCR_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(typename, var_wrapper_name, var_ptr_name) std::unique_ptr<fastdeploy::vision::ocr::typename>& \
FD_C_CheckAndConvert##typename##Wrapper( \
FD_C_##typename##Wrapper* var_wrapper_name) { \
FDASSERT(var_wrapper_name != nullptr, \
"The pointer of " #var_wrapper_name " shouldn't be nullptr."); \
return var_wrapper_name->var_ptr_name; \
}
#define DECL_AND_IMPLEMENT_PIPELINE_MODEL_FUNC_FOR_GET_PTR_FROM_WRAPPER(typename, var_wrapper_name, var_ptr_name) std::unique_ptr<fastdeploy::pipeline::typename>& \
FD_C_CheckAndConvert##typename##Wrapper( \
FD_C_##typename##Wrapper* var_wrapper_name) { \
FDASSERT(var_wrapper_name != nullptr, \
"The pointer of " #var_wrapper_name " shouldn't be nullptr."); \
return var_wrapper_name->var_ptr_name; \
}

View File

@@ -18,6 +18,7 @@
#ifdef ENABLE_VISION
#include "fastdeploy_capi/vision/classification/ppcls/model.h"
#include "fastdeploy_capi/vision/detection/ppdet/model.h"
#include "fastdeploy_capi/vision/ocr/ppocr/model.h"
#include "fastdeploy_capi/vision/result.h"
#include "fastdeploy_capi/vision/visualize.h"
#endif

View File

@@ -15,7 +15,7 @@
#pragma once
#define DECLARE_CREATE_WRAPPER_FUNCTION(model_type) FASTDEPLOY_CAPI_EXPORT extern __fd_give FD_C_##model_type##Wrapper* \
FD_C_Creates##model_type##Wrapper( \
FD_C_Create##model_type##Wrapper( \
const char* model_file, const char* params_file, const char* config_file, \
FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper, \
const FD_C_ModelFormat model_format)
@@ -88,7 +88,7 @@ return model->Initialized();
} \
return successful;
#define DECLARE_AND_IMPLEMENT_CREATE_WRAPPER_FUNCTION(model_type, var_name) FD_C_##model_type##Wrapper* FD_C_Creates##model_type##Wrapper(\
#define DECLARE_AND_IMPLEMENT_CREATE_WRAPPER_FUNCTION(model_type, var_name) FD_C_##model_type##Wrapper* FD_C_Create##model_type##Wrapper(\
const char* model_file, const char* params_file, const char* config_file, \
FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper, \
const FD_C_ModelFormat model_format) { \

View File

@@ -23,7 +23,7 @@ extern "C" {
// PPYOLOE
FD_C_PPYOLOEWrapper* FD_C_CreatesPPYOLOEWrapper(
FD_C_PPYOLOEWrapper* FD_C_CreatePPYOLOEWrapper(
const char* model_file, const char* params_file, const char* config_file,
FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper,
const FD_C_ModelFormat model_format) {

View File

@@ -0,0 +1,59 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#define OCR_DECLARE_DESTROY_WRAPPER_FUNCTION(model_type, wrapper_var_name) FASTDEPLOY_CAPI_EXPORT extern void \
FD_C_Destroy##model_type##Wrapper(__fd_take FD_C_##model_type##Wrapper* wrapper_var_name);
#define OCR_DECLARE_INITIALIZED_FUNCTION(model_type, wrapper_var_name) FASTDEPLOY_CAPI_EXPORT extern FD_C_Bool FD_C_##model_type##WrapperInitialized( \
__fd_keep FD_C_##model_type##Wrapper* wrapper_var_name)
#define OCR_IMPLEMENT_DESTROY_WRAPPER_FUNCTION(model_type, wrapper_var_name) delete wrapper_var_name
#define OCR_DECLARE_AND_IMPLEMENT_DESTROY_WRAPPER_FUNCTION(model_type, wrapper_var_name) void FD_C_Destroy##model_type##Wrapper( \
__fd_take FD_C_##model_type##Wrapper* wrapper_var_name) { \
OCR_IMPLEMENT_DESTROY_WRAPPER_FUNCTION(model_type, wrapper_var_name); \
}
#define OCR_IMPLEMENT_INITIALIZED_FUNCTION(model_type, wrapper_var_name) auto& model = \
CHECK_AND_CONVERT_FD_TYPE(model_type##Wrapper, wrapper_var_name); \
return model->Initialized();
#define OCR_DECLARE_AND_IMPLEMENT_INITIALIZED_FUNCTION(model_type, wrapper_var_name) FD_C_Bool FD_C_##model_type##WrapperInitialized( \
FD_C_##model_type##Wrapper* wrapper_var_name) { \
OCR_IMPLEMENT_INITIALIZED_FUNCTION(model_type, wrapper_var_name); \
}
#define PIPELINE_DECLARE_DESTROY_WRAPPER_FUNCTION(model_type, wrapper_var_name) FASTDEPLOY_CAPI_EXPORT extern void \
FD_C_Destroy##model_type##Wrapper(__fd_take FD_C_##model_type##Wrapper* wrapper_var_name);
#define PIPELINE_DECLARE_INITIALIZED_FUNCTION(model_type, wrapper_var_name) FASTDEPLOY_CAPI_EXPORT extern FD_C_Bool FD_C_##model_type##WrapperInitialized( \
__fd_keep FD_C_##model_type##Wrapper* wrapper_var_name)
#define PIPELINE_IMPLEMENT_DESTROY_WRAPPER_FUNCTION(model_type, wrapper_var_name) delete wrapper_var_name
#define PIPELINE_DECLARE_AND_IMPLEMENT_DESTROY_WRAPPER_FUNCTION(model_type, wrapper_var_name) void FD_C_Destroy##model_type##Wrapper( \
__fd_take FD_C_##model_type##Wrapper* wrapper_var_name) { \
PIPELINE_IMPLEMENT_DESTROY_WRAPPER_FUNCTION(model_type, wrapper_var_name); \
}
#define PIPELINE_IMPLEMENT_INITIALIZED_FUNCTION(model_type, wrapper_var_name) auto& model = \
CHECK_AND_CONVERT_FD_TYPE(model_type##Wrapper, wrapper_var_name); \
return model->Initialized();
#define PIPELINE_DECLARE_AND_IMPLEMENT_INITIALIZED_FUNCTION(model_type, wrapper_var_name) FD_C_Bool FD_C_##model_type##WrapperInitialized( \
FD_C_##model_type##Wrapper* wrapper_var_name) { \
PIPELINE_IMPLEMENT_INITIALIZED_FUNCTION(model_type, wrapper_var_name); \
}

View File

@@ -0,0 +1,503 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "fastdeploy_capi/vision/ocr/ppocr/model.h"
#include "fastdeploy_capi/types_internal.h"
#include "fastdeploy_capi/vision/visualize.h"
#ifdef __cplusplus
extern "C" {
#endif
// Recognizer
FD_C_RecognizerWrapper* FD_C_CreateRecognizerWrapper(
const char* model_file, const char* params_file, const char* label_path,
FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper,
const FD_C_ModelFormat model_format) {
auto& runtime_option = CHECK_AND_CONVERT_FD_TYPE(RuntimeOptionWrapper,
fd_c_runtime_option_wrapper);
FD_C_RecognizerWrapper* fd_c_recognizer_wrapper =
new FD_C_RecognizerWrapper();
fd_c_recognizer_wrapper->recognizer_model =
std::unique_ptr<fastdeploy::vision::ocr::Recognizer>(
new fastdeploy::vision::ocr::Recognizer(
std::string(model_file), std::string(params_file),
std::string(label_path), *runtime_option,
static_cast<fastdeploy::ModelFormat>(model_format)));
return fd_c_recognizer_wrapper;
}
OCR_DECLARE_AND_IMPLEMENT_DESTROY_WRAPPER_FUNCTION(Recognizer,
fd_c_recognizer_wrapper)
FD_C_Bool FD_C_RecognizerWrapperPredict(
FD_C_RecognizerWrapper* fd_c_recognizer_wrapper, FD_C_Mat img,
FD_C_Cstr* text, float* rec_score) {
cv::Mat* im = reinterpret_cast<cv::Mat*>(img);
auto& model =
CHECK_AND_CONVERT_FD_TYPE(RecognizerWrapper, fd_c_recognizer_wrapper);
std::string res_string;
bool successful = model->Predict(*im, &res_string, rec_score);
if (successful) {
text->size = res_string.size();
text->data = new char[res_string.size() + 1];
strcpy(text->data, res_string.c_str());
}
return successful;
}
OCR_DECLARE_AND_IMPLEMENT_INITIALIZED_FUNCTION(Recognizer,
fd_c_recognizer_wrapper)
FD_C_Bool FD_C_RecognizerWrapperBatchPredict(
FD_C_RecognizerWrapper* fd_c_recognizer_wrapper, FD_C_OneDimMat imgs,
FD_C_OneDimArrayCstr* texts, FD_C_OneDimArrayFloat* rec_scores) {
std::vector<cv::Mat> imgs_vec;
std::vector<std::string> texts_out;
std::vector<float> rec_scores_out;
for (int i = 0; i < imgs.size; i++) {
imgs_vec.push_back(*(reinterpret_cast<cv::Mat*>(imgs.data[i])));
}
auto& model =
CHECK_AND_CONVERT_FD_TYPE(RecognizerWrapper, fd_c_recognizer_wrapper);
bool successful = model->BatchPredict(imgs_vec, &texts_out, &rec_scores_out);
if (successful) {
// copy results back to FD_C_OneDimArrayCstr and FD_C_OneDimArrayFloat
texts->size = texts_out.size();
texts->data = new FD_C_Cstr[texts->size];
for (int i = 0; i < texts_out.size(); i++) {
texts->data[i].size = texts_out[i].length();
texts->data[i].data = new char[texts_out[i].length() + 1];
strncpy(texts->data[i].data, texts_out[i].c_str(), texts_out[i].length());
}
rec_scores->size = rec_scores_out.size();
rec_scores->data = new float[rec_scores->size];
memcpy(rec_scores->data, rec_scores_out.data(),
sizeof(float) * rec_scores->size);
}
return successful;
}
FD_C_Bool FD_C_RecognizerWrapperBatchPredictWithIndex(
FD_C_RecognizerWrapper* fd_c_recognizer_wrapper, FD_C_OneDimMat imgs,
FD_C_OneDimArrayCstr* texts, FD_C_OneDimArrayFloat* rec_scores,
size_t start_index, size_t end_index, FD_C_OneDimArrayInt32 indices) {
std::vector<cv::Mat> imgs_vec;
std::vector<std::string> texts_out;
std::vector<float> rec_scores_out;
std::vector<int> indices_in;
for (int i = 0; i < imgs.size; i++) {
imgs_vec.push_back(*(reinterpret_cast<cv::Mat*>(imgs.data[i])));
}
for (int i = 0; i < indices.size; i++) {
indices_in.push_back(indices.data[i]);
}
auto& model =
CHECK_AND_CONVERT_FD_TYPE(RecognizerWrapper, fd_c_recognizer_wrapper);
bool successful = model->BatchPredict(imgs_vec, &texts_out, &rec_scores_out,
start_index, end_index, indices_in);
if (successful) {
// copy results back to FD_C_OneDimArrayCstr and FD_C_OneDimArrayFloat
texts->size = texts_out.size();
texts->data = new FD_C_Cstr[texts->size];
for (int i = 0; i < texts_out.size(); i++) {
texts->data[i].size = texts_out[i].length();
texts->data[i].data = new char[texts_out[i].length() + 1];
strncpy(texts->data[i].data, texts_out[i].c_str(), texts_out[i].length());
}
rec_scores->size = rec_scores_out.size();
rec_scores->data = new float[rec_scores->size];
memcpy(rec_scores->data, rec_scores_out.data(),
sizeof(float) * rec_scores->size);
}
return successful;
}
// Classifier
FD_C_ClassifierWrapper* FD_C_CreateClassifierWrapper(
const char* model_file, const char* params_file,
FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper,
const FD_C_ModelFormat model_format) {
auto& runtime_option = CHECK_AND_CONVERT_FD_TYPE(RuntimeOptionWrapper,
fd_c_runtime_option_wrapper);
FD_C_ClassifierWrapper* fd_c_classifier_wrapper =
new FD_C_ClassifierWrapper();
fd_c_classifier_wrapper->classifier_model =
std::unique_ptr<fastdeploy::vision::ocr::Classifier>(
new fastdeploy::vision::ocr::Classifier(
std::string(model_file), std::string(params_file),
*runtime_option,
static_cast<fastdeploy::ModelFormat>(model_format)));
return fd_c_classifier_wrapper;
}
OCR_DECLARE_AND_IMPLEMENT_DESTROY_WRAPPER_FUNCTION(Classifier,
fd_c_classifier_wrapper)
FD_C_Bool FD_C_ClassifierWrapperPredict(
FD_C_ClassifierWrapper* fd_c_classifier_wrapper, FD_C_Mat img,
int32_t* cls_label, float* cls_score) {
cv::Mat* im = reinterpret_cast<cv::Mat*>(img);
auto& model =
CHECK_AND_CONVERT_FD_TYPE(ClassifierWrapper, fd_c_classifier_wrapper);
bool successful = model->Predict(*im, cls_label, cls_score);
return successful;
}
OCR_DECLARE_AND_IMPLEMENT_INITIALIZED_FUNCTION(Classifier,
fd_c_classifier_wrapper)
FD_C_Bool FD_C_ClassifierWrapperBatchPredict(
FD_C_ClassifierWrapper* fd_c_classifier_wrapper, FD_C_OneDimMat imgs,
FD_C_OneDimArrayInt32* cls_labels, FD_C_OneDimArrayFloat* cls_scores) {
std::vector<cv::Mat> imgs_vec;
std::vector<int> cls_labels_out;
std::vector<float> cls_scores_out;
for (int i = 0; i < imgs.size; i++) {
imgs_vec.push_back(*(reinterpret_cast<cv::Mat*>(imgs.data[i])));
}
auto& model =
CHECK_AND_CONVERT_FD_TYPE(ClassifierWrapper, fd_c_classifier_wrapper);
bool successful =
model->BatchPredict(imgs_vec, &cls_labels_out, &cls_scores_out);
if (successful) {
// copy results back to FD_C_OneDimArrayInt32 and FD_C_OneDimArrayFloat
cls_labels->size = cls_labels_out.size();
cls_labels->data = new int[cls_labels->size];
memcpy(cls_labels->data, cls_labels_out.data(),
sizeof(int) * cls_labels->size);
cls_scores->size = cls_scores_out.size();
cls_scores->data = new float[cls_scores->size];
memcpy(cls_scores->data, cls_scores_out.data(),
sizeof(int) * cls_scores->size);
}
return successful;
}
FD_C_Bool FD_C_ClassifierWrapperBatchPredictWithIndex(
FD_C_ClassifierWrapper* fd_c_classifier_wrapper, FD_C_OneDimMat imgs,
FD_C_OneDimArrayInt32* cls_labels, FD_C_OneDimArrayFloat* cls_scores,
size_t start_index, size_t end_index) {
std::vector<cv::Mat> imgs_vec;
std::vector<int> cls_labels_out;
std::vector<float> cls_scores_out;
for (int i = 0; i < imgs.size; i++) {
imgs_vec.push_back(*(reinterpret_cast<cv::Mat*>(imgs.data[i])));
}
auto& model =
CHECK_AND_CONVERT_FD_TYPE(ClassifierWrapper, fd_c_classifier_wrapper);
bool successful = model->BatchPredict(
imgs_vec, &cls_labels_out, &cls_scores_out, start_index, end_index);
if (successful) {
// copy results back to FD_C_OneDimArrayInt32 and FD_C_OneDimArrayFloat
cls_labels->size = cls_labels_out.size();
cls_labels->data = new int[cls_labels->size];
memcpy(cls_labels->data, cls_labels_out.data(),
sizeof(int) * cls_labels->size);
cls_scores->size = cls_scores_out.size();
cls_scores->data = new float[cls_scores->size];
memcpy(cls_scores->data, cls_scores_out.data(),
sizeof(int) * cls_scores->size);
}
return successful;
}
// DBDetector
FD_C_DBDetectorWrapper* FD_C_CreateDBDetectorWrapper(
const char* model_file, const char* params_file,
FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper,
const FD_C_ModelFormat model_format) {
auto& runtime_option = CHECK_AND_CONVERT_FD_TYPE(RuntimeOptionWrapper,
fd_c_runtime_option_wrapper);
FD_C_DBDetectorWrapper* fd_c_dbdetector_wrapper =
new FD_C_DBDetectorWrapper();
fd_c_dbdetector_wrapper->dbdetector_model =
std::unique_ptr<fastdeploy::vision::ocr::DBDetector>(
new fastdeploy::vision::ocr::DBDetector(
std::string(model_file), std::string(params_file),
*runtime_option,
static_cast<fastdeploy::ModelFormat>(model_format)));
return fd_c_dbdetector_wrapper;
}
OCR_DECLARE_AND_IMPLEMENT_DESTROY_WRAPPER_FUNCTION(DBDetector,
fd_c_dbdetector_wrapper)
FD_C_Bool FD_C_DBDetectorWrapperPredict(
FD_C_DBDetectorWrapper* fd_c_dbdetector_wrapper, FD_C_Mat img,
FD_C_TwoDimArrayInt32* boxes_result) {
cv::Mat* im = reinterpret_cast<cv::Mat*>(img);
std::vector<std::array<int, 8>> boxes_result_out;
auto& model =
CHECK_AND_CONVERT_FD_TYPE(DBDetectorWrapper, fd_c_dbdetector_wrapper);
bool successful = model->Predict(*im, &boxes_result_out);
if (successful) {
// copy boxes
const int boxes_coordinate_dim = 8;
boxes_result->size = boxes_result_out.size();
boxes_result->data = new FD_C_OneDimArrayInt32[boxes_result->size];
for (size_t i = 0; i < boxes_result_out.size(); i++) {
boxes_result->data[i].size = boxes_coordinate_dim;
boxes_result->data[i].data = new int[boxes_coordinate_dim];
for (size_t j = 0; j < boxes_coordinate_dim; j++) {
boxes_result->data[i].data[j] = boxes_result_out[i][j];
}
}
}
return successful;
}
OCR_DECLARE_AND_IMPLEMENT_INITIALIZED_FUNCTION(DBDetector,
fd_c_dbdetector_wrapper)
FD_C_Bool FD_C_DBDetectorWrapperBatchPredict(
FD_C_DBDetectorWrapper* fd_c_dbdetector_wrapper, FD_C_OneDimMat imgs,
FD_C_ThreeDimArrayInt32* det_results) {
std::vector<cv::Mat> imgs_vec;
std::vector<std::vector<std::array<int, 8>>> det_results_out;
for (int i = 0; i < imgs.size; i++) {
imgs_vec.push_back(*(reinterpret_cast<cv::Mat*>(imgs.data[i])));
}
auto& model =
CHECK_AND_CONVERT_FD_TYPE(DBDetectorWrapper, fd_c_dbdetector_wrapper);
bool successful = model->BatchPredict(imgs_vec, &det_results_out);
if (successful) {
// copy results back to FD_C_ThreeDimArrayInt32
det_results->size = det_results_out.size();
det_results->data = new FD_C_TwoDimArrayInt32[det_results->size];
for (int batch_indx = 0; batch_indx < det_results->size; batch_indx++) {
const int boxes_coordinate_dim = 8;
det_results->data[batch_indx].size = det_results_out[batch_indx].size();
det_results->data[batch_indx].data =
new FD_C_OneDimArrayInt32[det_results->data[batch_indx].size];
for (size_t i = 0; i < det_results_out[batch_indx].size(); i++) {
det_results->data[batch_indx].data[i].size = boxes_coordinate_dim;
det_results->data[batch_indx].data[i].data =
new int[boxes_coordinate_dim];
for (size_t j = 0; j < boxes_coordinate_dim; j++) {
det_results->data[batch_indx].data[i].data[j] =
det_results_out[batch_indx][i][j];
}
}
}
}
return successful;
}
// PPOCRv2
FD_C_PPOCRv2Wrapper* FD_C_CreatePPOCRv2Wrapper(
FD_C_DBDetectorWrapper* fd_c_det_model_wrapper,
FD_C_ClassifierWrapper* fd_c_cls_model_wrapper,
FD_C_RecognizerWrapper* fd_c_rec_model_wrapper) {
FD_C_PPOCRv2Wrapper* fd_c_ppocrv2_wrapper = new FD_C_PPOCRv2Wrapper();
auto& det_model =
CHECK_AND_CONVERT_FD_TYPE(DBDetectorWrapper, fd_c_det_model_wrapper);
auto& cls_model =
CHECK_AND_CONVERT_FD_TYPE(ClassifierWrapper, fd_c_cls_model_wrapper);
auto& rec_model =
CHECK_AND_CONVERT_FD_TYPE(RecognizerWrapper, fd_c_rec_model_wrapper);
fd_c_ppocrv2_wrapper->ppocrv2_model =
std::unique_ptr<fastdeploy::pipeline::PPOCRv2>(
new fastdeploy::pipeline::PPOCRv2(det_model.get(), cls_model.get(),
rec_model.get()));
return fd_c_ppocrv2_wrapper;
}
PIPELINE_DECLARE_AND_IMPLEMENT_DESTROY_WRAPPER_FUNCTION(PPOCRv2,
fd_c_ppocrv2_wrapper)
FD_C_Bool FD_C_PPOCRv2WrapperPredict(FD_C_PPOCRv2Wrapper* fd_c_ppocrv2_wrapper,
FD_C_Mat img,
FD_C_OCRResult* fd_c_ocr_result) {
cv::Mat* im = reinterpret_cast<cv::Mat*>(img);
auto& model = CHECK_AND_CONVERT_FD_TYPE(PPOCRv2Wrapper, fd_c_ppocrv2_wrapper);
FD_C_OCRResultWrapper* fd_c_ocr_result_wrapper =
FD_C_CreateOCRResultWrapper();
auto& ocr_result =
CHECK_AND_CONVERT_FD_TYPE(OCRResultWrapper, fd_c_ocr_result_wrapper);
bool successful = model->Predict(im, ocr_result.get());
if (successful) {
FD_C_OCRResult* res = FD_C_OCRResultWrapperGetData(fd_c_ocr_result_wrapper);
*fd_c_ocr_result = *res;
}
return successful;
}
PIPELINE_DECLARE_AND_IMPLEMENT_INITIALIZED_FUNCTION(PPOCRv2,
fd_c_ppocrv2_wrapper)
FD_C_OCRResult* FD_C_OCRResultToC(fastdeploy::vision::OCRResult* ocr_result) {
// Internal use, transfer fastdeploy::vision::OCRResult to
// FD_C_OCRResult
FD_C_OCRResult* fd_c_ocr_result = new FD_C_OCRResult();
// copy boxes
const int boxes_coordinate_dim = 8;
fd_c_ocr_result->boxes.size = ocr_result->boxes.size();
fd_c_ocr_result->boxes.data =
new FD_C_OneDimArrayInt32[fd_c_ocr_result->boxes.size];
for (size_t i = 0; i < ocr_result->boxes.size(); i++) {
fd_c_ocr_result->boxes.data[i].size = boxes_coordinate_dim;
fd_c_ocr_result->boxes.data[i].data = new int[boxes_coordinate_dim];
for (size_t j = 0; j < boxes_coordinate_dim; j++) {
fd_c_ocr_result->boxes.data[i].data[j] = ocr_result->boxes[i][j];
}
}
// copy text
fd_c_ocr_result->text.size = ocr_result->text.size();
fd_c_ocr_result->text.data = new FD_C_Cstr[fd_c_ocr_result->text.size];
for (size_t i = 0; i < ocr_result->text.size(); i++) {
fd_c_ocr_result->text.data[i].size = ocr_result->text[i].length();
fd_c_ocr_result->text.data[i].data =
new char[ocr_result->text[i].length() + 1];
strncpy(fd_c_ocr_result->text.data[i].data, ocr_result->text[i].c_str(),
ocr_result->text[i].length());
}
// copy rec_scores
fd_c_ocr_result->rec_scores.size = ocr_result->rec_scores.size();
fd_c_ocr_result->rec_scores.data =
new float[fd_c_ocr_result->rec_scores.size];
memcpy(fd_c_ocr_result->rec_scores.data, ocr_result->rec_scores.data(),
sizeof(float) * fd_c_ocr_result->rec_scores.size);
// copy cls_scores
fd_c_ocr_result->cls_scores.size = ocr_result->cls_scores.size();
fd_c_ocr_result->cls_scores.data =
new float[fd_c_ocr_result->cls_scores.size];
memcpy(fd_c_ocr_result->cls_scores.data, ocr_result->cls_scores.data(),
sizeof(float) * fd_c_ocr_result->cls_scores.size);
// copy cls_labels
fd_c_ocr_result->cls_labels.size = ocr_result->cls_labels.size();
fd_c_ocr_result->cls_labels.data =
new int32_t[fd_c_ocr_result->cls_labels.size];
memcpy(fd_c_ocr_result->cls_labels.data, ocr_result->cls_labels.data(),
sizeof(int32_t) * fd_c_ocr_result->cls_labels.size);
// copy type
fd_c_ocr_result->type = static_cast<FD_C_ResultType>(ocr_result->type);
return fd_c_ocr_result;
}
FD_C_Bool FD_C_PPOCRv2WrapperBatchPredict(
FD_C_PPOCRv2Wrapper* fd_c_ppocrv2_wrapper, FD_C_OneDimMat imgs,
FD_C_OneDimOCRResult* results) {
std::vector<cv::Mat> imgs_vec;
std::vector<fastdeploy::vision::OCRResult> results_out;
for (int i = 0; i < imgs.size; i++) {
imgs_vec.push_back(*(reinterpret_cast<cv::Mat*>(imgs.data[i])));
}
auto& model = CHECK_AND_CONVERT_FD_TYPE(PPOCRv2Wrapper, fd_c_ppocrv2_wrapper);
bool successful = model->BatchPredict(imgs_vec, &results_out);
if (successful) {
// copy results back to FD_C_OneDimOCRResult
results->size = results_out.size();
results->data = new FD_C_OCRResult[results->size];
for (int i = 0; i < results_out.size(); i++) {
results->data[i] = *FD_C_OCRResultToC(&results_out[i]);
}
}
return successful;
}
// PPOCRv3
FD_C_PPOCRv3Wrapper* FD_C_CreatePPOCRv3Wrapper(
FD_C_DBDetectorWrapper* fd_c_det_model_wrapper,
FD_C_ClassifierWrapper* fd_c_cls_model_wrapper,
FD_C_RecognizerWrapper* fd_c_rec_model_wrapper) {
FD_C_PPOCRv3Wrapper* fd_c_ppocrv3_wrapper = new FD_C_PPOCRv3Wrapper();
auto& det_model =
CHECK_AND_CONVERT_FD_TYPE(DBDetectorWrapper, fd_c_det_model_wrapper);
auto& cls_model =
CHECK_AND_CONVERT_FD_TYPE(ClassifierWrapper, fd_c_cls_model_wrapper);
auto& rec_model =
CHECK_AND_CONVERT_FD_TYPE(RecognizerWrapper, fd_c_rec_model_wrapper);
fd_c_ppocrv3_wrapper->ppocrv3_model =
std::unique_ptr<fastdeploy::pipeline::PPOCRv3>(
new fastdeploy::pipeline::PPOCRv3(det_model.get(), cls_model.get(),
rec_model.get()));
return fd_c_ppocrv3_wrapper;
}
PIPELINE_DECLARE_AND_IMPLEMENT_DESTROY_WRAPPER_FUNCTION(PPOCRv3,
fd_c_ppocrv3_wrapper)
FD_C_Bool FD_C_PPOCRv3WrapperPredict(FD_C_PPOCRv3Wrapper* fd_c_ppocrv3_wrapper,
FD_C_Mat img,
FD_C_OCRResult* fd_c_ocr_result) {
cv::Mat* im = reinterpret_cast<cv::Mat*>(img);
auto& model = CHECK_AND_CONVERT_FD_TYPE(PPOCRv3Wrapper, fd_c_ppocrv3_wrapper);
FD_C_OCRResultWrapper* fd_c_ocr_result_wrapper =
FD_C_CreateOCRResultWrapper();
auto& ocr_result =
CHECK_AND_CONVERT_FD_TYPE(OCRResultWrapper, fd_c_ocr_result_wrapper);
bool successful = model->Predict(im, ocr_result.get());
if (successful) {
FD_C_OCRResult* res = FD_C_OCRResultWrapperGetData(fd_c_ocr_result_wrapper);
*fd_c_ocr_result = *res;
}
return successful;
}
PIPELINE_DECLARE_AND_IMPLEMENT_INITIALIZED_FUNCTION(PPOCRv3,
fd_c_ppocrv3_wrapper)
FD_C_Bool FD_C_PPOCRv3WrapperBatchPredict(
FD_C_PPOCRv3Wrapper* fd_c_ppocrv3_wrapper, FD_C_OneDimMat imgs,
FD_C_OneDimOCRResult* results) {
std::vector<cv::Mat> imgs_vec;
std::vector<fastdeploy::vision::OCRResult> results_out;
for (int i = 0; i < imgs.size; i++) {
imgs_vec.push_back(*(reinterpret_cast<cv::Mat*>(imgs.data[i])));
}
auto& model = CHECK_AND_CONVERT_FD_TYPE(PPOCRv3Wrapper, fd_c_ppocrv3_wrapper);
bool successful = model->BatchPredict(imgs_vec, &results_out);
if (successful) {
// copy results back to FD_C_OneDimOCRResult
results->size = results_out.size();
results->data = new FD_C_OCRResult[results->size];
for (int i = 0; i < results_out.size(); i++) {
results->data[i] = *FD_C_OCRResultToC(&results_out[i]);
}
}
return successful;
}
#ifdef __cplusplus
}
#endif

View File

@@ -0,0 +1,348 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "fastdeploy_capi/fd_common.h"
#include "fastdeploy_capi/fd_type.h"
#include "fastdeploy_capi/runtime_option.h"
#include "fastdeploy_capi/vision/result.h"
#include "fastdeploy_capi/vision/ocr/ppocr/base_define.h"
#ifdef __cplusplus
extern "C" {
#endif
// Recognizer
typedef struct FD_C_RecognizerWrapper FD_C_RecognizerWrapper;
/** \brief Create a new FD_C_RecognizerWrapper object
*
* \param[in] model_file Path of model file, e.g ./ch_PP-OCRv3_rec_infer/model.pdmodel.
* \param[in] params_file Path of parameter file, e.g ./ch_PP-OCRv3_rec_infer/model.pdiparams, if the model format is ONNX, this parameter will be ignored.
* \param[in] label_path Path of label file used by OCR recognition model. e.g ./ppocr_keys_v1.txt
* \param[in] custom_option RuntimeOption for inference, the default will use cpu, and choose the backend defined in `valid_cpu_backends`.
* \param[in] model_format Model format of the loaded model, default is Paddle format.
*
* \return Return a pointer to FD_C_RecognizerWrapper object
*/
FASTDEPLOY_CAPI_EXPORT extern __fd_give FD_C_RecognizerWrapper*
FD_C_CreateRecognizerWrapper(
const char* model_file, const char* params_file, const char* label_path,
FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper,
const FD_C_ModelFormat model_format);
/** \brief Destroy a FD_C_RecognizerWrapper object
*
* \param[in] fd_c_recognizer_wrapper pointer to FD_C_RecognizerWrapper object
*/
OCR_DECLARE_DESTROY_WRAPPER_FUNCTION(Recognizer, fd_c_recognizer_wrapper);
/** \brief Predict the ocr result for an input image
*
* \param[in] fd_c_recognizer_wrapper pointer to FD_C_RecognizerWrapper object
* \param[in] img The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format.
* \param[in] text The text result of rec model will be written into this parameter.
* \param[in] rec_score The sccore result of rec model will be written into this parameter.
* \return true if the prediction is successed, otherwise false.
*/
FASTDEPLOY_CAPI_EXPORT extern FD_C_Bool FD_C_RecognizerWrapperPredict(
__fd_keep FD_C_RecognizerWrapper* fd_c_recognizer_wrapper, FD_C_Mat img,
FD_C_Cstr* text, float* rec_score);
/** \brief Check if the model is initialized successfully
*
* \param[in] fd_c_recognizer_wrapper pointer to FD_C_RecognizerWrapper object
*
* \return Return a bool of value true if initialized successfully
*/
OCR_DECLARE_INITIALIZED_FUNCTION(Recognizer, fd_c_recognizer_wrapper);
/** \brief Predict the ocr results for a batch of input images
*
* \param[in] fd_c_recognizer_wrapper pointer to FD_C_RecognizerWrapper object
* \param[in] imgs The list of input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format.
* \param[in] texts The list of text results of rec model will be written into this vector.
* \param[in] rec_scores The list of sccore result of rec model will be written into this vector.
*
* \return true if the prediction successed, otherwise false
*/
FASTDEPLOY_CAPI_EXPORT extern FD_C_Bool FD_C_RecognizerWrapperBatchPredict(
__fd_keep FD_C_RecognizerWrapper* fd_c_recognizer_wrapper, FD_C_OneDimMat imgs,
FD_C_OneDimArrayCstr* texts, FD_C_OneDimArrayFloat* rec_scores);
FASTDEPLOY_CAPI_EXPORT extern FD_C_Bool FD_C_RecognizerWrapperBatchPredictWithIndex(
__fd_keep FD_C_RecognizerWrapper* fd_c_recognizer_wrapper, FD_C_OneDimMat imgs,
FD_C_OneDimArrayCstr* texts, FD_C_OneDimArrayFloat* rec_scores,
size_t start_index, size_t end_index,
FD_C_OneDimArrayInt32 indices);
// Classifier
typedef struct FD_C_ClassifierWrapper FD_C_ClassifierWrapper;
/** \brief Create a new FD_C_ClassifierWrapper object
*
* \param[in] model_file Path of model file, e.g ./ch_ppocr_mobile_v2.0_cls_infer/model.pdmodel.
* \param[in] params_file Path of parameter file, e.g ./ch_ppocr_mobile_v2.0_cls_infer/model.pdiparams, if the model format is ONNX, this parameter will be ignored.
* \param[in] fd_c_runtime_option_wrapper RuntimeOption for inference, the default will use cpu, and choose the backend defined in `valid_cpu_backends`.
* \param[in] model_format Model format of the loaded model, default is Paddle format.
*
* \return Return a pointer to FD_C_ClassifierWrapper object
*/
FASTDEPLOY_CAPI_EXPORT extern __fd_give FD_C_ClassifierWrapper*
FD_C_CreateClassifierWrapper(
const char* model_file, const char* params_file,
FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper,
const FD_C_ModelFormat model_format);
/** \brief Destroy a FD_C_ClassifierWrapper object
*
* \param[in] fd_c_classifier_wrapper pointer to FD_C_ClassifierWrapper object
*/
OCR_DECLARE_DESTROY_WRAPPER_FUNCTION(Classifier, fd_c_classifier_wrapper);
/** \brief Predict the input image and get OCR classification model cls_result.
*
* \param[in] fd_c_classifier_wrapper pointer to FD_C_ClassifierWrapper object
* \param[in] img The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format.
* \param[in] cls_label The label result of cls model will be written in to this param.
* \param[in] cls_score The score result of cls model will be written in to this param.
* \return true if the prediction is successed, otherwise false.
*/
FASTDEPLOY_CAPI_EXPORT extern FD_C_Bool FD_C_ClassifierWrapperPredict(
__fd_keep FD_C_ClassifierWrapper* fd_c_classifier_wrapper, FD_C_Mat img,
int32_t* cls_label, float* cls_score);
/** \brief Check if the model is initialized successfully
*
* \param[in] fd_c_classifier_wrapper pointer to FD_C_ClassifierWrapper object
*
* \return Return a bool of value true if initialized successfully
*/
OCR_DECLARE_INITIALIZED_FUNCTION(Classifier, fd_c_classifier_wrapper);
/** \brief BatchPredict the input image and get OCR classification model cls_result.
*
* \param[in] fd_c_classifier_wrapper pointer to FD_C_ClassifierWrapper object
* \param[in] imgs The list of input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format.
* \param[in] cls_labels The label results of cls model will be written in to this vector.
* \param[in] cls_scores The score results of cls model will be written in to this vector.
* \return true if the prediction is successed, otherwise false.
*/
FASTDEPLOY_CAPI_EXPORT extern FD_C_Bool FD_C_ClassifierWrapperBatchPredict(
__fd_keep FD_C_ClassifierWrapper* fd_c_classifier_wrapper, FD_C_OneDimMat imgs,
FD_C_OneDimArrayInt32* cls_labels, FD_C_OneDimArrayFloat* cls_scores);
FASTDEPLOY_CAPI_EXPORT extern FD_C_Bool FD_C_ClassifierWrapperBatchPredictWithIndex(
__fd_keep FD_C_ClassifierWrapper* fd_c_classifier_wrapper, FD_C_OneDimMat imgs,
FD_C_OneDimArrayInt32* cls_labels, FD_C_OneDimArrayFloat* cls_scores,
size_t start_index, size_t end_index);
// DBDetector
typedef struct FD_C_DBDetectorWrapper FD_C_DBDetectorWrapper;
/** \brief Create a new FD_C_DBDetectorWrapper object
*
* \param[in] model_file Path of model file, e.g ./ch_PP-OCRv3_det_infer/model.pdmodel.
* \param[in] params_file Path of parameter file, e.g ./ch_PP-OCRv3_det_infer/model.pdiparams, if the model format is ONNX, this parameter will be ignored.
* \param[in] custom_option RuntimeOption for inference, the default will use cpu, and choose the backend defined in `valid_cpu_backends`.
* \param[in] model_format Model format of the loaded model, default is Paddle format.
*
* \return Return a pointer to FD_C_DBDetectorWrapper object
*/
FASTDEPLOY_CAPI_EXPORT extern __fd_give FD_C_DBDetectorWrapper*
FD_C_CreateDBDetectorWrapper(
const char* model_file, const char* params_file,
FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper,
const FD_C_ModelFormat model_format);
/** \brief Destroy a FD_C_DBDetectorWrapper object
*
* \param[in] fd_c_dbdetector_wrapper pointer to FD_C_DBDetectorWrapper object
*/
OCR_DECLARE_DESTROY_WRAPPER_FUNCTION(DBDetector, fd_c_dbdetector_wrapper);
/** \brief Predict the input image and get OCR detection model result.
*
* \param[in] fd_c_dbdetector_wrapper pointer to FD_C_DBDetectorWrapper object
* \param[in] img The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format.
* \param[in] boxes_result The output of OCR detection model result will be writen to this structure.
* \return true if the prediction is successed, otherwise false.
*/
FASTDEPLOY_CAPI_EXPORT extern FD_C_Bool FD_C_DBDetectorWrapperPredict(
__fd_keep FD_C_DBDetectorWrapper* fd_c_dbdetector_wrapper, FD_C_Mat img,
FD_C_TwoDimArrayInt32* boxes_result);
/** \brief Check if the model is initialized successfully
*
* \param[in] fd_c_dbdetector_wrapper pointer to FD_C_DBDetectorWrapper object
*
* \return Return a bool of value true if initialized successfully
*/
OCR_DECLARE_INITIALIZED_FUNCTION(DBDetector, fd_c_dbdetector_wrapper);
/** \brief BatchPredict the input image and get OCR detection model result.
*
* \param[in] fd_c_dbdetector_wrapper pointer to FD_C_DBDetectorWrapper object
* \param[in] imgs The list input of image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format.
* \param[in] det_results The output of OCR detection model result will be writen to this structure.
*
* \return true if the prediction is successed, otherwise false.
*/
FASTDEPLOY_CAPI_EXPORT extern FD_C_Bool FD_C_DBDetectorWrapperBatchPredict(
__fd_keep FD_C_DBDetectorWrapper* fd_c_dbdetector_wrapper, FD_C_OneDimMat imgs,
FD_C_ThreeDimArrayInt32* det_results);
// PPOCRv2
typedef struct FD_C_PPOCRv2Wrapper FD_C_PPOCRv2Wrapper;
/** \brief Set up the detection model path, classification model path and recognition model path respectively.
*
* \param[in] det_model Path of detection model, e.g ./ch_PP-OCRv2_det_infer
* \param[in] cls_model Path of classification model, e.g ./ch_ppocr_mobile_v2.0_cls_infer
* \param[in] rec_model Path of recognition model, e.g ./ch_PP-OCRv2_rec_infer
*/
FASTDEPLOY_CAPI_EXPORT extern __fd_give FD_C_PPOCRv2Wrapper*
FD_C_CreatePPOCRv2Wrapper(
FD_C_DBDetectorWrapper* det_model,
FD_C_ClassifierWrapper* cls_model,
FD_C_RecognizerWrapper* rec_model);
/** \brief Destroy a FD_C_PPOCRv2Wrapper object
*
* \param[in] fd_c_ppocrv2_wrapper pointer to FD_C_PPOCRv2Wrapper object
*/
OCR_DECLARE_DESTROY_WRAPPER_FUNCTION(PPOCRv2, fd_c_ppocrv2_wrapper);
/** \brief Predict the input image and get OCR result.
*
* \param[in] fd_c_ppocrv2_wrapper pointer to FD_C_PPOCRv2Wrapper object
* \param[in] img The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format.
* \param[in] result The output OCR result will be writen to this structure.
* \return true if the prediction successed, otherwise false.
*/
FASTDEPLOY_CAPI_EXPORT extern FD_C_Bool FD_C_PPOCRv2WrapperPredict(
__fd_keep FD_C_PPOCRv2Wrapper* fd_c_ppocrv2_wrapper, FD_C_Mat img,
FD_C_OCRResult* result);
/** \brief Check if the model is initialized successfully
*
* \param[in] fd_c_ppocrv2_wrapper pointer to FD_C_PPOCRv2Wrapper object
*
* \return Return a bool of value true if initialized successfully
*/
OCR_DECLARE_INITIALIZED_FUNCTION(PPOCRv2, fd_c_ppocrv2_wrapper);
/** \brief BatchPredict the input image and get OCR result.
*
* \param[in] fd_c_ppocrv2_wrapper pointer to FD_C_PPOCRv2Wrapper object
* \param[in] imgs The list of input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format.
* \param[in] batch_result The output list of OCR result will be writen to this structure.
* \return true if the prediction successed, otherwise false.
*/
FASTDEPLOY_CAPI_EXPORT extern FD_C_Bool FD_C_PPOCRv2WrapperBatchPredict(
__fd_keep FD_C_PPOCRv2Wrapper* fd_c_ppocrv2_wrapper, FD_C_OneDimMat imgs,
FD_C_OneDimOCRResult* batch_result);
// PPOCRv3
typedef struct FD_C_PPOCRv3Wrapper FD_C_PPOCRv3Wrapper;
/** \brief Set up the detection model path, classification model path and recognition model path respectively.
*
* \param[in] det_model Path of detection model, e.g ./ch_PP-OCRv2_det_infer
* \param[in] cls_model Path of classification model, e.g ./ch_ppocr_mobile_v2.0_cls_infer
* \param[in] rec_model Path of recognition model, e.g ./ch_PP-OCRv2_rec_infer
*/
FASTDEPLOY_CAPI_EXPORT extern __fd_give FD_C_PPOCRv3Wrapper*
FD_C_CreatePPOCRv3Wrapper(
FD_C_DBDetectorWrapper* det_model,
FD_C_ClassifierWrapper* cls_model,
FD_C_RecognizerWrapper* rec_model);
/** \brief Destroy a FD_C_PPOCRv3Wrapper object
*
* \param[in] fd_c_ppocrv3_wrapper pointer to FD_C_PPOCRv3Wrapper object
*/
OCR_DECLARE_DESTROY_WRAPPER_FUNCTION(PPOCRv3, fd_c_ppocrv3_wrapper);
/** \brief Predict the input image and get OCR result.
*
* \param[in] fd_c_ppocrv3_wrapper pointer to FD_C_PPOCRv3Wrapper object
* \param[in] img The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format.
* \param[in] result The output OCR result will be writen to this structure.
* \return true if the prediction successed, otherwise false.
*/
FASTDEPLOY_CAPI_EXPORT extern FD_C_Bool FD_C_PPOCRv3WrapperPredict(
__fd_keep FD_C_PPOCRv3Wrapper* fd_c_ppocrv3_wrapper, FD_C_Mat img,
FD_C_OCRResult* result);
/** \brief Check if the model is initialized successfully
*
* \param[in] fd_c_ppocrv3_wrapper pointer to FD_C_PPOCRv3Wrapper object
*
* \return Return a bool of value true if initialized successfully
*/
OCR_DECLARE_INITIALIZED_FUNCTION(PPOCRv3, fd_c_ppocrv3_wrapper);
/** \brief BatchPredict the input image and get OCR result.
*
* \param[in] fd_c_ppocrv3_wrapper pointer to FD_C_PPOCRv3Wrapper object
* \param[in] imgs The list of input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format.
* \param[in] batch_result The output list of OCR result will be writen to this structure.
* \return true if the prediction successed, otherwise false.
*/
FASTDEPLOY_CAPI_EXPORT extern FD_C_Bool FD_C_PPOCRv3WrapperBatchPredict(
__fd_keep FD_C_PPOCRv3Wrapper* fd_c_ppocrv3_wrapper, FD_C_OneDimMat imgs,
FD_C_OneDimOCRResult* batch_result);
#ifdef __cplusplus
} // extern "C"
#endif

View File

@@ -258,6 +258,146 @@ char* FD_C_DetectionResultWrapperStr(
return cstr;
}
// OCR Results
FD_C_OCRResultWrapper* FD_C_CreateOCRResultWrapper() {
FD_C_OCRResultWrapper* fd_c_ocr_result_wrapper = new FD_C_OCRResultWrapper();
fd_c_ocr_result_wrapper->ocr_result =
std::unique_ptr<fastdeploy::vision::OCRResult>(
new fastdeploy::vision::OCRResult());
return fd_c_ocr_result_wrapper;
}
void FD_C_DestroyOCRResultWrapper(
__fd_take FD_C_OCRResultWrapper* fd_c_ocr_result_wrapper) {
delete fd_c_ocr_result_wrapper;
}
void FD_C_DestroyOCRResult(__fd_take FD_C_OCRResult* fd_c_ocr_result) {
if (fd_c_ocr_result == nullptr) return;
// delete boxes
for (size_t i = 0; i < fd_c_ocr_result->boxes.size; i++) {
delete[] fd_c_ocr_result->boxes.data[i].data;
}
delete[] fd_c_ocr_result->boxes.data;
// delete text
for (size_t i = 0; i < fd_c_ocr_result->text.size; i++) {
delete[] fd_c_ocr_result->text.data[i].data;
}
delete[] fd_c_ocr_result->text.data;
// delete rec_scores
delete[] fd_c_ocr_result->rec_scores.data;
// delete cls_scores
delete[] fd_c_ocr_result->cls_scores.data;
// delete cls_labels
delete[] fd_c_ocr_result->cls_labels.data;
delete fd_c_ocr_result;
}
FD_C_OCRResult* FD_C_OCRResultWrapperGetData(
__fd_keep FD_C_OCRResultWrapper* fd_c_ocr_result_wrapper) {
auto& ocr_result =
CHECK_AND_CONVERT_FD_TYPE(OCRResultWrapper, fd_c_ocr_result_wrapper);
FD_C_OCRResult* fd_c_ocr_result = new FD_C_OCRResult();
// copy boxes
const int boxes_coordinate_dim = 8;
fd_c_ocr_result->boxes.size = ocr_result->boxes.size();
fd_c_ocr_result->boxes.data =
new FD_C_OneDimArrayInt32[fd_c_ocr_result->boxes.size];
for (size_t i = 0; i < ocr_result->boxes.size(); i++) {
fd_c_ocr_result->boxes.data[i].size = boxes_coordinate_dim;
fd_c_ocr_result->boxes.data[i].data = new int[boxes_coordinate_dim];
for (size_t j = 0; j < boxes_coordinate_dim; j++) {
fd_c_ocr_result->boxes.data[i].data[j] = ocr_result->boxes[i][j];
}
}
// copy text
fd_c_ocr_result->text.size = ocr_result->text.size();
fd_c_ocr_result->text.data = new FD_C_Cstr[fd_c_ocr_result->text.size];
for (size_t i = 0; i < ocr_result->text.size(); i++) {
fd_c_ocr_result->text.data[i].size = ocr_result->text[i].length();
fd_c_ocr_result->text.data[i].data =
new char[ocr_result->text[i].length() + 1];
strncpy(fd_c_ocr_result->text.data[i].data, ocr_result->text[i].c_str(),
ocr_result->text[i].length());
}
// copy rec_scores
fd_c_ocr_result->rec_scores.size = ocr_result->rec_scores.size();
fd_c_ocr_result->rec_scores.data =
new float[fd_c_ocr_result->rec_scores.size];
memcpy(fd_c_ocr_result->rec_scores.data, ocr_result->rec_scores.data(),
sizeof(float) * fd_c_ocr_result->rec_scores.size);
// copy cls_scores
fd_c_ocr_result->cls_scores.size = ocr_result->cls_scores.size();
fd_c_ocr_result->cls_scores.data =
new float[fd_c_ocr_result->cls_scores.size];
memcpy(fd_c_ocr_result->cls_scores.data, ocr_result->cls_scores.data(),
sizeof(float) * fd_c_ocr_result->cls_scores.size);
// copy cls_labels
fd_c_ocr_result->cls_labels.size = ocr_result->cls_labels.size();
fd_c_ocr_result->cls_labels.data =
new int32_t[fd_c_ocr_result->cls_labels.size];
memcpy(fd_c_ocr_result->cls_labels.data, ocr_result->cls_labels.data(),
sizeof(int32_t) * fd_c_ocr_result->cls_labels.size);
// copy type
fd_c_ocr_result->type = static_cast<FD_C_ResultType>(ocr_result->type);
return fd_c_ocr_result;
}
FD_C_OCRResultWrapper* FD_C_CreateOCRResultWrapperFromData(
__fd_keep FD_C_OCRResult* fd_c_ocr_result) {
FD_C_OCRResultWrapper* fd_c_ocr_result_wrapper =
FD_C_CreateOCRResultWrapper();
auto& ocr_result =
CHECK_AND_CONVERT_FD_TYPE(OCRResultWrapper, fd_c_ocr_result_wrapper);
// copy boxes
const int boxes_coordinate_dim = 8;
ocr_result->boxes.resize(fd_c_ocr_result->boxes.size);
for (size_t i = 0; i < fd_c_ocr_result->boxes.size; i++) {
for (size_t j = 0; j < boxes_coordinate_dim; j++) {
ocr_result->boxes[i][j] = fd_c_ocr_result->boxes.data[i].data[j];
}
}
// copy text
ocr_result->text.resize(fd_c_ocr_result->text.size);
for (size_t i = 0; i < ocr_result->text.size(); i++) {
ocr_result->text[i] = std::string(fd_c_ocr_result->text.data[i].data);
}
// copy rec_scores
ocr_result->rec_scores.resize(fd_c_ocr_result->rec_scores.size);
memcpy(ocr_result->rec_scores.data(), fd_c_ocr_result->rec_scores.data,
sizeof(float) * fd_c_ocr_result->rec_scores.size);
// copy cls_scores
ocr_result->cls_scores.resize(fd_c_ocr_result->cls_scores.size);
memcpy(ocr_result->cls_scores.data(), fd_c_ocr_result->cls_scores.data,
sizeof(float) * fd_c_ocr_result->cls_scores.size);
// copy cls_labels
ocr_result->cls_labels.resize(fd_c_ocr_result->cls_labels.size);
memcpy(ocr_result->cls_labels.data(), fd_c_ocr_result->cls_labels.data,
sizeof(int32_t) * fd_c_ocr_result->cls_labels.size);
// copy type
ocr_result->type =
static_cast<fastdeploy::vision::ResultType>(fd_c_ocr_result->type);
return fd_c_ocr_result_wrapper;
}
char* FD_C_OCRResultWrapperStr(FD_C_OCRResultWrapper* fd_c_ocr_result_wrapper) {
auto& ocr_result =
CHECK_AND_CONVERT_FD_TYPE(OCRResultWrapper, fd_c_ocr_result_wrapper);
std::string information = ocr_result->Str();
char* cstr = new char[information.length() + 1];
std::strcpy(cstr, information.c_str());
return cstr;
}
#ifdef __cplusplus
}
#endif

View File

@@ -19,6 +19,7 @@
typedef struct FD_C_ClassifyResultWrapper FD_C_ClassifyResultWrapper;
typedef struct FD_C_DetectionResultWrapper FD_C_DetectionResultWrapper;
typedef struct FD_C_OCRResultWrapper FD_C_OCRResultWrapper;
#ifdef __cplusplus
extern "C" {
@@ -60,6 +61,21 @@ typedef struct FD_C_OneDimDetectionResult {
FD_C_DetectionResult* data;
} FD_C_OneDimDetectionResult;
typedef struct FD_C_OCRResult {
FD_C_TwoDimArrayInt32 boxes;
FD_C_OneDimArrayCstr text;
FD_C_OneDimArrayFloat rec_scores;
FD_C_OneDimArrayFloat cls_scores;
FD_C_OneDimArrayInt32 cls_labels;
FD_C_ResultType type;
} FD_C_OCRResult;
typedef struct FD_C_OneDimOCRResult {
size_t size;
FD_C_OCRResult* data;
} FD_C_OneDimOCRResult;
// Classification Results
/** \brief Create a new FD_C_ClassifyResultWrapper object
@@ -170,6 +186,63 @@ FASTDEPLOY_CAPI_EXPORT extern __fd_give char*
FD_C_DetectionResultWrapperStr(
__fd_keep FD_C_DetectionResultWrapper* fd_c_detection_result_wrapper);
// OCR Results
/** \brief Create a new FD_C_OCRResultWrapper object
*
* \return Return a pointer to FD_C_OCRResultWrapper object
*/
FASTDEPLOY_CAPI_EXPORT extern __fd_give FD_C_OCRResultWrapper*
FD_C_CreateOCRResultWrapper();
/** \brief Destroy a FD_C_OCRResultWrapper object
*
* \param[in] fd_c_ocr_result_wrapper pointer to FD_C_OCRResultWrapper object
*/
FASTDEPLOY_CAPI_EXPORT extern void FD_C_DestroyOCRResultWrapper(
__fd_take FD_C_OCRResultWrapper* fd_c_ocr_result_wrapper);
/** \brief Destroy a FD_C_OCRResult object
*
* \param[in] fd_c_ocr_result pointer to FD_C_OCRResult object
*/
FASTDEPLOY_CAPI_EXPORT extern void FD_C_DestroyOCRResult(
__fd_take FD_C_OCRResult* fd_c_ocr_result);
/** \brief Get a FD_C_OCRResult object from FD_C_OCRResultWrapper object
*
* \param[in] fd_c_ocr_result_wrapper pointer to FD_C_OCRResultWrapper object
* \return Return a pointer to FD_C_OCRResult object
*/
FASTDEPLOY_CAPI_EXPORT extern __fd_give FD_C_OCRResult*
FD_C_OCRResultWrapperGetData(
__fd_keep FD_C_OCRResultWrapper* fd_c_ocr_result_wrapper);
/** \brief Create a new FD_C_OCRResultWrapper object from FD_C_OCRResult object
*
* \param[in] fd_c_ocr_result pointer to FD_C_OCRResult object
* \return Return a pointer to FD_C_OCRResultWrapper object
*/
FASTDEPLOY_CAPI_EXPORT extern __fd_give FD_C_OCRResultWrapper*
FD_C_CreateOCRResultWrapperFromData(
__fd_keep FD_C_OCRResult* fd_c_ocr_result);
/** \brief Print OCRResult formated information
*
* \param[in] fd_c_ocr_result_wrapper pointer to FD_C_OCRResultWrapper object
* \return Return a string pointer
*/
FASTDEPLOY_CAPI_EXPORT extern __fd_give char*
FD_C_OCRResultWrapperStr(
__fd_keep FD_C_OCRResultWrapper* fd_c_ocr_result_wrapper);
#ifdef __cplusplus
} // extern "C"
#endif

View File

@@ -54,6 +54,48 @@ FD_C_Mat FD_C_VisDetectionWithLabel(FD_C_Mat im,
return new cv::Mat(result);
}
FD_C_Mat FD_C_VisClassification(FD_C_Mat im,
FD_C_ClassifyResult* fd_c_classify_result,
int top_k, float score_threshold,
float font_size) {
FD_C_ClassifyResultWrapper* fd_c_classify_result_wrapper =
FD_C_CreateClassifyResultWrapperFromData(fd_c_classify_result);
auto& classify_result = CHECK_AND_CONVERT_FD_TYPE(
ClassifyResultWrapper, fd_c_classify_result_wrapper);
cv::Mat result = fastdeploy::vision::VisClassification(
*(reinterpret_cast<cv::Mat*>(im)), *classify_result, top_k,
score_threshold, font_size);
return new cv::Mat(result);
}
FD_C_Mat FD_C_VisClassificationWithLabel(
FD_C_Mat im, FD_C_ClassifyResult* fd_c_classify_result,
FD_C_OneDimArrayCstr* labels, int top_k, float score_threshold,
float font_size) {
std::vector<std::string> labels_in;
for (int i = 0; i < labels->size; i++) {
labels_in.emplace_back(labels->data[i].data);
}
FD_C_ClassifyResultWrapper* fd_c_classify_result_wrapper =
FD_C_CreateClassifyResultWrapperFromData(fd_c_classify_result);
auto& classify_result = CHECK_AND_CONVERT_FD_TYPE(
ClassifyResultWrapper, fd_c_classify_result_wrapper);
cv::Mat result = fastdeploy::vision::VisClassification(
*(reinterpret_cast<cv::Mat*>(im)), *classify_result, labels_in, top_k,
score_threshold, font_size);
return new cv::Mat(result);
}
FD_C_Mat FD_C_VisOcr(FD_C_Mat im, FD_C_OCRResult* fd_c_ocr_result) {
FD_C_OCRResultWrapper* fd_c_ocr_result_wrapper =
FD_C_CreateOCRResultWrapperFromData(fd_c_ocr_result);
auto& ocr_result =
CHECK_AND_CONVERT_FD_TYPE(OCRResultWrapper, fd_c_ocr_result_wrapper);
cv::Mat result = fastdeploy::vision::VisOcr(*(reinterpret_cast<cv::Mat*>(im)),
*ocr_result);
return new cv::Mat(result);
}
#ifdef __cplusplus
}
#endif

View File

@@ -53,6 +53,43 @@ FASTDEPLOY_CAPI_EXPORT extern __fd_give FD_C_Mat FD_C_VisDetectionWithLabel(
float score_threshold,
int line_size, float font_size);
/** \brief Show the visualized results for classification models
*
* \param[in] im the input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
* \param[in] result the result produced by model
* \param[in] top_k the length of return values, e.g., if topk==2, the result will include the 2 most possible class label for input image.
* \param[in] score_threshold threshold for top_k scores, the class will not be shown if the score is less than score_threshold
* \param[in] font_size font size
* \return cv::Mat type stores the visualized results
*/
FASTDEPLOY_CAPI_EXPORT extern __fd_give FD_C_Mat FD_C_VisClassification(
FD_C_Mat im, FD_C_ClassifyResult* result, int top_k,
float score_threshold, float font_size);
/** \brief Show the visualized results with custom labels for classification models
*
* \param[in] im the input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
* \param[in] result the result produced by model
* \param[in] labels custom labels for user, the visualized result will show the corresponding custom labels
* \param[in] top_k the length of return values, e.g., if topk==2, the result will include the 2 most possible class label for input image.
* \param[in] score_threshold threshold for top_k scores, the class will not be shown if the score is less than score_threshold
* \param[in] font_size font size
* \return cv::Mat type stores the visualized results
*/
FASTDEPLOY_CAPI_EXPORT extern __fd_give FD_C_Mat FD_C_VisClassificationWithLabel(
FD_C_Mat im, FD_C_ClassifyResult* result,
FD_C_OneDimArrayCstr* labels, int top_k,
float score_threshold, float font_size);
/** \brief Show the visualized results for Ocr models
*
* \param[in] im the input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
* \param[in] result the result produced by model
* \return cv::Mat type stores the visualized results
*/
FASTDEPLOY_CAPI_EXPORT extern __fd_give FD_C_Mat FD_C_VisOcr(FD_C_Mat im, FD_C_OCRResult* ocr_result);
#ifdef __cplusplus
} // extern "C"
#endif

View File

@@ -33,7 +33,7 @@ public class PPYOLOE {
custom_option = new RuntimeOption();
}
fd_ppyoloe_wrapper =
FD_C_CreatesPPYOLOEWrapper(model_file, params_file, config_file,
FD_C_CreatePPYOLOEWrapper(model_file, params_file, config_file,
custom_option.GetWrapperPtr(), model_format);
}
@@ -84,8 +84,8 @@ public class PPYOLOE {
// below are underlying C api
private IntPtr fd_ppyoloe_wrapper;
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreatesPPYOLOEWrapper")]
private static extern IntPtr FD_C_CreatesPPYOLOEWrapper(
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreatePPYOLOEWrapper")]
private static extern IntPtr FD_C_CreatePPYOLOEWrapper(
string model_file, string params_file, string config_file,
IntPtr fd_runtime_option_wrapper, ModelFormat model_format);
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_DestroyPPYOLOEWrapper")]
@@ -136,7 +136,7 @@ public class PicoDet {
custom_option = new RuntimeOption();
}
fd_picodet_wrapper =
FD_C_CreatesPicoDetWrapper(model_file, params_file, config_file,
FD_C_CreatePicoDetWrapper(model_file, params_file, config_file,
custom_option.GetWrapperPtr(), model_format);
}
@@ -187,8 +187,8 @@ public class PicoDet {
// below are underlying C api
private IntPtr fd_picodet_wrapper;
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreatesPicoDetWrapper")]
private static extern IntPtr FD_C_CreatesPicoDetWrapper(
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreatePicoDetWrapper")]
private static extern IntPtr FD_C_CreatePicoDetWrapper(
string model_file, string params_file, string config_file,
IntPtr fd_runtime_option_wrapper, ModelFormat model_format);
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_DestroyPicoDetWrapper")]
@@ -241,7 +241,7 @@ public class PPYOLO {
custom_option = new RuntimeOption();
}
fd_ppyolo_wrapper =
FD_C_CreatesPPYOLOWrapper(model_file, params_file, config_file,
FD_C_CreatePPYOLOWrapper(model_file, params_file, config_file,
custom_option.GetWrapperPtr(), model_format);
}
@@ -292,8 +292,8 @@ public class PPYOLO {
// below are underlying C api
private IntPtr fd_ppyolo_wrapper;
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreatesPPYOLOWrapper")]
private static extern IntPtr FD_C_CreatesPPYOLOWrapper(
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreatePPYOLOWrapper")]
private static extern IntPtr FD_C_CreatePPYOLOWrapper(
string model_file, string params_file, string config_file,
IntPtr fd_runtime_option_wrapper, ModelFormat model_format);
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_DestroyPPYOLOWrapper")]
@@ -345,7 +345,7 @@ public class YOLOv3 {
custom_option = new RuntimeOption();
}
fd_yolov3_wrapper =
FD_C_CreatesYOLOv3Wrapper(model_file, params_file, config_file,
FD_C_CreateYOLOv3Wrapper(model_file, params_file, config_file,
custom_option.GetWrapperPtr(), model_format);
}
@@ -396,8 +396,8 @@ public class YOLOv3 {
// below are underlying C api
private IntPtr fd_yolov3_wrapper;
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreatesYOLOv3Wrapper")]
private static extern IntPtr FD_C_CreatesYOLOv3Wrapper(
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreateYOLOv3Wrapper")]
private static extern IntPtr FD_C_CreateYOLOv3Wrapper(
string model_file, string params_file, string config_file,
IntPtr fd_runtime_option_wrapper, ModelFormat model_format);
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_DestroyYOLOv3Wrapper")]
@@ -449,7 +449,7 @@ public class PaddleYOLOX {
custom_option = new RuntimeOption();
}
fd_paddleyolox_wrapper =
FD_C_CreatesPaddleYOLOXWrapper(model_file, params_file, config_file,
FD_C_CreatePaddleYOLOXWrapper(model_file, params_file, config_file,
custom_option.GetWrapperPtr(), model_format);
}
@@ -500,8 +500,8 @@ public class PaddleYOLOX {
// below are underlying C api
private IntPtr fd_paddleyolox_wrapper;
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreatesPaddleYOLOXWrapper")]
private static extern IntPtr FD_C_CreatesPaddleYOLOXWrapper(
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreatePaddleYOLOXWrapper")]
private static extern IntPtr FD_C_CreatePaddleYOLOXWrapper(
string model_file, string params_file, string config_file,
IntPtr fd_runtime_option_wrapper, ModelFormat model_format);
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_DestroyPaddleYOLOXWrapper")]
@@ -553,7 +553,7 @@ public class FasterRCNN {
custom_option = new RuntimeOption();
}
fd_fasterrcnn_wrapper =
FD_C_CreatesFasterRCNNWrapper(model_file, params_file, config_file,
FD_C_CreateFasterRCNNWrapper(model_file, params_file, config_file,
custom_option.GetWrapperPtr(), model_format);
}
@@ -604,8 +604,8 @@ public class FasterRCNN {
// below are underlying C api
private IntPtr fd_fasterrcnn_wrapper;
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreatesFasterRCNNWrapper")]
private static extern IntPtr FD_C_CreatesFasterRCNNWrapper(
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreateFasterRCNNWrapper")]
private static extern IntPtr FD_C_CreateFasterRCNNWrapper(
string model_file, string params_file, string config_file,
IntPtr fd_runtime_option_wrapper, ModelFormat model_format);
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_DestroyFasterRCNNWrapper")]
@@ -657,7 +657,7 @@ public class MaskRCNN {
custom_option = new RuntimeOption();
}
fd_maskrcnn_wrapper =
FD_C_CreatesMaskRCNNWrapper(model_file, params_file, config_file,
FD_C_CreateMaskRCNNWrapper(model_file, params_file, config_file,
custom_option.GetWrapperPtr(), model_format);
}
@@ -708,8 +708,8 @@ public class MaskRCNN {
// below are underlying C api
private IntPtr fd_maskrcnn_wrapper;
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreatesMaskRCNNWrapper")]
private static extern IntPtr FD_C_CreatesMaskRCNNWrapper(
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreateMaskRCNNWrapper")]
private static extern IntPtr FD_C_CreateMaskRCNNWrapper(
string model_file, string params_file, string config_file,
IntPtr fd_runtime_option_wrapper, ModelFormat model_format);
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_DestroyMaskRCNNWrapper")]
@@ -761,7 +761,7 @@ public class SSD {
custom_option = new RuntimeOption();
}
fd_ssd_wrapper =
FD_C_CreatesSSDWrapper(model_file, params_file, config_file,
FD_C_CreateSSDWrapper(model_file, params_file, config_file,
custom_option.GetWrapperPtr(), model_format);
}
@@ -812,8 +812,8 @@ public class SSD {
// below are underlying C api
private IntPtr fd_ssd_wrapper;
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreatesSSDWrapper")]
private static extern IntPtr FD_C_CreatesSSDWrapper(
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreateSSDWrapper")]
private static extern IntPtr FD_C_CreateSSDWrapper(
string model_file, string params_file, string config_file,
IntPtr fd_runtime_option_wrapper, ModelFormat model_format);
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_DestroySSDWrapper")]
@@ -865,7 +865,7 @@ public class PaddleYOLOv5 {
custom_option = new RuntimeOption();
}
fd_paddleyolov5_wrapper =
FD_C_CreatesPaddleYOLOv5Wrapper(model_file, params_file, config_file,
FD_C_CreatePaddleYOLOv5Wrapper(model_file, params_file, config_file,
custom_option.GetWrapperPtr(), model_format);
}
@@ -916,8 +916,8 @@ public class PaddleYOLOv5 {
// below are underlying C api
private IntPtr fd_paddleyolov5_wrapper;
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreatesPaddleYOLOv5Wrapper")]
private static extern IntPtr FD_C_CreatesPaddleYOLOv5Wrapper(
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreatePaddleYOLOv5Wrapper")]
private static extern IntPtr FD_C_CreatePaddleYOLOv5Wrapper(
string model_file, string params_file, string config_file,
IntPtr fd_runtime_option_wrapper, ModelFormat model_format);
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_DestroyPaddleYOLOv5Wrapper")]
@@ -969,7 +969,7 @@ public class PaddleYOLOv6 {
custom_option = new RuntimeOption();
}
fd_paddleyolov6_wrapper =
FD_C_CreatesPaddleYOLOv6Wrapper(model_file, params_file, config_file,
FD_C_CreatePaddleYOLOv6Wrapper(model_file, params_file, config_file,
custom_option.GetWrapperPtr(), model_format);
}
@@ -1020,8 +1020,8 @@ public class PaddleYOLOv6 {
// below are underlying C api
private IntPtr fd_paddleyolov6_wrapper;
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreatesPaddleYOLOv6Wrapper")]
private static extern IntPtr FD_C_CreatesPaddleYOLOv6Wrapper(
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreatePaddleYOLOv6Wrapper")]
private static extern IntPtr FD_C_CreatePaddleYOLOv6Wrapper(
string model_file, string params_file, string config_file,
IntPtr fd_runtime_option_wrapper, ModelFormat model_format);
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_DestroyPaddleYOLOv6Wrapper")]
@@ -1073,7 +1073,7 @@ public class PaddleYOLOv7 {
custom_option = new RuntimeOption();
}
fd_paddleyolov7_wrapper =
FD_C_CreatesPaddleYOLOv7Wrapper(model_file, params_file, config_file,
FD_C_CreatePaddleYOLOv7Wrapper(model_file, params_file, config_file,
custom_option.GetWrapperPtr(), model_format);
}
@@ -1124,8 +1124,8 @@ public class PaddleYOLOv7 {
// below are underlying C api
private IntPtr fd_paddleyolov7_wrapper;
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreatesPaddleYOLOv7Wrapper")]
private static extern IntPtr FD_C_CreatesPaddleYOLOv7Wrapper(
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreatePaddleYOLOv7Wrapper")]
private static extern IntPtr FD_C_CreatePaddleYOLOv7Wrapper(
string model_file, string params_file, string config_file,
IntPtr fd_runtime_option_wrapper, ModelFormat model_format);
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_DestroyPaddleYOLOv7Wrapper")]
@@ -1177,7 +1177,7 @@ public class PaddleYOLOv8 {
custom_option = new RuntimeOption();
}
fd_paddleyolov8_wrapper =
FD_C_CreatesPaddleYOLOv8Wrapper(model_file, params_file, config_file,
FD_C_CreatePaddleYOLOv8Wrapper(model_file, params_file, config_file,
custom_option.GetWrapperPtr(), model_format);
}
@@ -1228,8 +1228,8 @@ public class PaddleYOLOv8 {
// below are underlying C api
private IntPtr fd_paddleyolov8_wrapper;
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreatesPaddleYOLOv8Wrapper")]
private static extern IntPtr FD_C_CreatesPaddleYOLOv8Wrapper(
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreatePaddleYOLOv8Wrapper")]
private static extern IntPtr FD_C_CreatePaddleYOLOv8Wrapper(
string model_file, string params_file, string config_file,
IntPtr fd_runtime_option_wrapper, ModelFormat model_format);
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_DestroyPaddleYOLOv8Wrapper")]
@@ -1281,7 +1281,7 @@ public class RTMDet {
custom_option = new RuntimeOption();
}
fd_rtmdet_wrapper =
FD_C_CreatesRTMDetWrapper(model_file, params_file, config_file,
FD_C_CreateRTMDetWrapper(model_file, params_file, config_file,
custom_option.GetWrapperPtr(), model_format);
}
@@ -1332,8 +1332,8 @@ public class RTMDet {
// below are underlying C api
private IntPtr fd_rtmdet_wrapper;
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreatesRTMDetWrapper")]
private static extern IntPtr FD_C_CreatesRTMDetWrapper(
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreateRTMDetWrapper")]
private static extern IntPtr FD_C_CreateRTMDetWrapper(
string model_file, string params_file, string config_file,
IntPtr fd_runtime_option_wrapper, ModelFormat model_format);
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_DestroyRTMDetWrapper")]
@@ -1385,7 +1385,7 @@ public class CascadeRCNN {
custom_option = new RuntimeOption();
}
fd_cascadercnn_wrapper =
FD_C_CreatesCascadeRCNNWrapper(model_file, params_file, config_file,
FD_C_CreateCascadeRCNNWrapper(model_file, params_file, config_file,
custom_option.GetWrapperPtr(), model_format);
}
@@ -1436,8 +1436,8 @@ public class CascadeRCNN {
// below are underlying C api
private IntPtr fd_cascadercnn_wrapper;
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreatesCascadeRCNNWrapper")]
private static extern IntPtr FD_C_CreatesCascadeRCNNWrapper(
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreateCascadeRCNNWrapper")]
private static extern IntPtr FD_C_CreateCascadeRCNNWrapper(
string model_file, string params_file, string config_file,
IntPtr fd_runtime_option_wrapper, ModelFormat model_format);
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_DestroyCascadeRCNNWrapper")]
@@ -1489,7 +1489,7 @@ public class PSSDet {
custom_option = new RuntimeOption();
}
fd_pssdet_wrapper =
FD_C_CreatesPSSDetWrapper(model_file, params_file, config_file,
FD_C_CreatePSSDetWrapper(model_file, params_file, config_file,
custom_option.GetWrapperPtr(), model_format);
}
@@ -1540,8 +1540,8 @@ public class PSSDet {
// below are underlying C api
private IntPtr fd_pssdet_wrapper;
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreatesPSSDetWrapper")]
private static extern IntPtr FD_C_CreatesPSSDetWrapper(
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreatePSSDetWrapper")]
private static extern IntPtr FD_C_CreatePSSDetWrapper(
string model_file, string params_file, string config_file,
IntPtr fd_runtime_option_wrapper, ModelFormat model_format);
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_DestroyPSSDetWrapper")]
@@ -1593,7 +1593,7 @@ public class RetinaNet {
custom_option = new RuntimeOption();
}
fd_retinanet_wrapper =
FD_C_CreatesRetinaNetWrapper(model_file, params_file, config_file,
FD_C_CreateRetinaNetWrapper(model_file, params_file, config_file,
custom_option.GetWrapperPtr(), model_format);
}
@@ -1644,8 +1644,8 @@ public class RetinaNet {
// below are underlying C api
private IntPtr fd_retinanet_wrapper;
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreatesRetinaNetWrapper")]
private static extern IntPtr FD_C_CreatesRetinaNetWrapper(
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreateRetinaNetWrapper")]
private static extern IntPtr FD_C_CreateRetinaNetWrapper(
string model_file, string params_file, string config_file,
IntPtr fd_runtime_option_wrapper, ModelFormat model_format);
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_DestroyRetinaNetWrapper")]
@@ -1697,7 +1697,7 @@ public class FCOS {
custom_option = new RuntimeOption();
}
fd_fcos_wrapper =
FD_C_CreatesFCOSWrapper(model_file, params_file, config_file,
FD_C_CreateFCOSWrapper(model_file, params_file, config_file,
custom_option.GetWrapperPtr(), model_format);
}
@@ -1748,8 +1748,8 @@ public class FCOS {
// below are underlying C api
private IntPtr fd_fcos_wrapper;
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreatesFCOSWrapper")]
private static extern IntPtr FD_C_CreatesFCOSWrapper(
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreateFCOSWrapper")]
private static extern IntPtr FD_C_CreateFCOSWrapper(
string model_file, string params_file, string config_file,
IntPtr fd_runtime_option_wrapper, ModelFormat model_format);
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_DestroyFCOSWrapper")]
@@ -1801,7 +1801,7 @@ public class TTFNet {
custom_option = new RuntimeOption();
}
fd_ttfnet_wrapper =
FD_C_CreatesTTFNetWrapper(model_file, params_file, config_file,
FD_C_CreateTTFNetWrapper(model_file, params_file, config_file,
custom_option.GetWrapperPtr(), model_format);
}
@@ -1852,8 +1852,8 @@ public class TTFNet {
// below are underlying C api
private IntPtr fd_ttfnet_wrapper;
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreatesTTFNetWrapper")]
private static extern IntPtr FD_C_CreatesTTFNetWrapper(
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreateTTFNetWrapper")]
private static extern IntPtr FD_C_CreateTTFNetWrapper(
string model_file, string params_file, string config_file,
IntPtr fd_runtime_option_wrapper, ModelFormat model_format);
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_DestroyTTFNetWrapper")]
@@ -1905,7 +1905,7 @@ public class TOOD {
custom_option = new RuntimeOption();
}
fd_tood_wrapper =
FD_C_CreatesTOODWrapper(model_file, params_file, config_file,
FD_C_CreateTOODWrapper(model_file, params_file, config_file,
custom_option.GetWrapperPtr(), model_format);
}
@@ -1956,8 +1956,8 @@ public class TOOD {
// below are underlying C api
private IntPtr fd_tood_wrapper;
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreatesTOODWrapper")]
private static extern IntPtr FD_C_CreatesTOODWrapper(
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreateTOODWrapper")]
private static extern IntPtr FD_C_CreateTOODWrapper(
string model_file, string params_file, string config_file,
IntPtr fd_runtime_option_wrapper, ModelFormat model_format);
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_DestroyTOODWrapper")]
@@ -2009,7 +2009,7 @@ public class GFL {
custom_option = new RuntimeOption();
}
fd_gfl_wrapper =
FD_C_CreatesGFLWrapper(model_file, params_file, config_file,
FD_C_CreateGFLWrapper(model_file, params_file, config_file,
custom_option.GetWrapperPtr(), model_format);
}
@@ -2060,8 +2060,8 @@ public class GFL {
// below are underlying C api
private IntPtr fd_gfl_wrapper;
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreatesGFLWrapper")]
private static extern IntPtr FD_C_CreatesGFLWrapper(
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_CreateGFLWrapper")]
private static extern IntPtr FD_C_CreateGFLWrapper(
string model_file, string params_file, string config_file,
IntPtr fd_runtime_option_wrapper, ModelFormat model_format);
[DllImport("fastdeploy.dll", EntryPoint = "FD_C_DestroyGFLWrapper")]

View File

@@ -27,3 +27,44 @@ struct ClassifyResult {
- **label_ids**(list of int): Member variable which indicates the classification results of a single image. Its number is determined by the topk passed in when using the classification model, e.g. it can return the top 5 classification results.
- **scores**(list of float): Member variable which indicates the confidence level of a single image on the corresponding classification result. Its number is determined by the topk passed in when using the classification model, e.g. it can return the top 5 classification confidence level.
## C# Definition
`fastdeploy.vision.ClassifyResult`
```C#
public struct ClassifyResult {
public List<int> label_ids;
public List<float> scores;
}
```
- **label_ids**(list of int): Member variable which indicates the classification results of a single image. Its number is determined by the topk passed in when using the classification model, e.g. it can return the top 5 classification results.
- **scores**(list of float): Member variable which indicates the confidence level of a single image on the corresponding classification result. Its number is determined by the topk passed in when using the classification model, e.g. it can return the top 5 classification confidence level.
## C Definition
```c
typedef struct FD_C_ClassifyResult {
FD_C_OneDimArrayInt32 label_ids;
FD_C_OneDimArrayFloat scores;
} FD_C_ClassifyResult;
```
- **label_ids**(FD_C_OneDimArrayInt32): Member variable which indicates the classification results of a single image. Its number is determined by the topk passed in when using the classification model, e.g. it can return the top 5 classification results.FD_C_OneDimArrayInt32 includes two fieldsi.e. size and datain which size represents the number of elementsand data is the array to store elements.
```c
typedef struct FD_C_OneDimArrayInt32 {
size_t size;
int32_t* data;
} FD_C_OneDimArrayInt32;
```
- **scores**(FD_C_OneDimArrayFloat): Member variable which indicates the confidence level of a single image on the corresponding classification result. Its number is determined by the topk passed in when using the classification model, e.g. it can return the top 5 classification confidence level. FD_C_OneDimArrayFloat includes two fieldsi.e. size and datain which size represents the number of elementsand data is the array to store elements.
```c
typedef struct FD_C_OneDimArrayFloat {
size_t size;
float* data;
} FD_C_OneDimArrayFloat;
```

View File

@@ -27,3 +27,44 @@ struct ClassifyResult {
- **label_ids**(list of int): 成员变量表示单张图片的分类结果其个数根据在使用分类模型时传入的topk决定例如可以返回top 5的分类结果
- **scores**(list of float): 成员变量表示单张图片在相应分类结果上的置信度其个数根据在使用分类模型时传入的topk决定例如可以返回top 5的分类置信度
## C# 定义
`fastdeploy.vision.ClassifyResult`
```C#
public struct ClassifyResult {
public List<int> label_ids;
public List<float> scores;
}
```
- **label_ids**(list of int): 成员变量表示单张图片的分类结果其个数根据在使用分类模型时传入的topk决定例如可以返回top 5的分类结果
- **scores**(list of float): 成员变量表示单张图片在相应分类结果上的置信度其个数根据在使用分类模型时传入的topk决定例如可以返回top 5的分类置信度
## C定义
```c
typedef struct FD_C_ClassifyResult {
FD_C_OneDimArrayInt32 label_ids;
FD_C_OneDimArrayFloat scores;
} FD_C_ClassifyResult;
```
- **label_ids**(FD_C_OneDimArrayInt32): 成员变量表示单张图片的分类结果其个数根据在使用分类模型时传入的topk决定例如可以返回top 5的分类结果。FD_C_OneDimArrayInt32包含两个字段size和data其中size表示数组的大小data表示存储结果的数组。
```c
typedef struct FD_C_OneDimArrayInt32 {
size_t size;
int32_t* data;
} FD_C_OneDimArrayInt32;
```
- **scores**(FD_C_OneDimArrayFloat): 成员变量表示单张图片在相应分类结果上的置信度其个数根据在使用分类模型时传入的topk决定例如可以返回top 5的分类置信度。FD_C_OneDimArrayFloat包含两个字段size和data其中size表示数组的大小data表示存储结果的数组。
```c
typedef struct FD_C_OneDimArrayFloat {
size_t size;
float* data;
} FD_C_OneDimArrayFloat;
```

View File

@@ -64,3 +64,92 @@ fastdeploy.vision.Mask
```
- **data**: Member variable which indicates a detected mask.
- **shape**: Member variable which indicates the shape of the mask, e.g. (h,w).
## C# Definition
```c#
fastdeploy.vision.DetectionResult
```
```C#
public struct DetectionResult {
public List<float[]> boxes;
public List<float> scores;
public List<int> label_ids;
public List<Mask> masks;
public bool contain_masks;
}
```
- **boxes**(list of array(float)): Member variable which indicates the coordinates of all detected target boxes in a single frame. It is a list, and each element in it is also a list of length 4, representing a box with 4 float values representing xmin, ymin, xmax, ymax, i.e. the coordinates of the top left and bottom right corner.
- **scores**(list of float): Member variable which indicates the confidence level of all targets detected in a single image.
- **label_ids**(list of int): Member variable which indicates all target categories detected in a single image.
- **masks**: Member variable which indicates all detected instance masks of a single image, where the number of elements and the shape size are the same as `boxes`.
- **contain_masks**: Member variable which indicates whether the detected result contains instance masks, which is generally true for the instance segmentation model.
```C#
public struct Mask {
public List<byte> data;
public List<long> shape;
}
```
- **data**: Member variable which indicates a detected mask.
- **shape**: Member variable which indicates the shape of the mask, e.g. (h,w).
## C Definition
```c
typedef struct FD_C_DetectionResult {
FD_C_TwoDimArrayFloat boxes;
FD_C_OneDimArrayFloat scores;
FD_C_OneDimArrayInt32 label_ids;
FD_C_OneDimMask masks;
FD_C_Bool contain_masks;
} FD_C_DetectionResult;
```
- **boxes**(FD_C_TwoDimArrayFloat): Member variable which indicates the coordinates of all detected target boxes in a single frame. It is a list, and each element in it is also a list of length 4, representing a box with 4 float values representing xmin, ymin, xmax, ymax, i.e. the coordinates of the top left and bottom right corner. FD_C_TwoDimArrayFloat includes two fieldsi.e. size and datain which size represents the number of elementsand data is the array to store elements of type FD_C_OneDimArrayFloat.
```c
typedef struct FD_C_TwoDimArrayFloat {
size_t size;
FD_C_OneDimArrayFloat* data;
}
```
- **scores**(FD_C_OneDimArrayFloat): Member variable which indicates the confidence level of all targets detected in a single image. FD_C_OneDimArrayFloat includes two fieldsi.e. size and datain which size represents the number of elementsand data is the array to store elements.
```c
typedef struct FD_C_OneDimArrayFloat {
size_t size;
float* data;
} FD_C_OneDimArrayFloat;
```
- **label_ids**(FD_C_OneDimArrayInt32): Member variable which indicates all target categories detected in a single image. FD_C_OneDimArrayInt32 includes two fieldsi.e. size and datain which size represents the number of elementsand data is the array to store elements.
```c
typedef struct FD_C_OneDimArrayInt32 {
size_t size;
int32_t* data;
} FD_C_OneDimArrayInt32;
```
- **masks**(FD_C_OneDimMask): Member variable which indicates all detected instance masks of a single image, where the number of elements and the shape size are the same as `boxes`.
```c
typedef struct FD_C_OneDimMask {
size_t size;
FD_C_Mask* data;
} FD_C_OneDimMask;
```
```c
typedef struct FD_C_Mask {
FD_C_OneDimArrayUint8 data;
FD_C_OneDimArrayInt64 shape;
} FD_C_Mask;
```
- **contain_masks**: Member variable which indicates whether the detected result contains instance masks, which is generally true for the instance segmentation model.

View File

@@ -1,4 +1,4 @@
简体中文 [English](detection_result.md)
简体中文 [English](detection_result.md)
# DetectionResult 目标检测结果
DetectionResult代码定义在`fastdeploy/vision/common/result.h`中,用于表明图像检测出来的目标框、目标类别和目标置信度。
@@ -63,3 +63,94 @@ fastdeploy.vision.Mask
```
- **data**: 成员变量表示检测到的一个mask
- **shape**: 成员变量表示mask的shape如 (h,w)
## C# 定义
```c#
fastdeploy.vision.DetectionResult
```
```C#
public struct DetectionResult {
public List<float[]> boxes;
public List<float> scores;
public List<int> label_ids;
public List<Mask> masks;
public bool contain_masks;
}
```
- **boxes**(list of array(float)): 成员变量表示单张图片检测出来的所有目标框坐标。boxes是一个list其每个元素为一个长度为4的数组 表示为一个框每个框以4个float数值依次表示xmin, ymin, xmax, ymax 即左上角和右下角坐标
- **scores**(list of float): 成员变量,表示单张图片检测出来的所有目标置信度
- **label_ids**(list of int): 成员变量,表示单张图片检测出来的所有目标类别
- **masks**: 成员变量表示单张图片检测出来的所有实例mask其元素个数及shape大小与`boxes`一致
- **contain_masks**: 成员变量表示检测结果中是否包含实例mask实例分割模型的结果此项一般为True.
```C#
public struct Mask {
public List<byte> data;
public List<long> shape;
}
```
- **data**: 成员变量表示检测到的一个mask
- **shape**: 成员变量表示mask的shape如 (h,w)
## C定义
```c
typedef struct FD_C_DetectionResult {
FD_C_TwoDimArrayFloat boxes;
FD_C_OneDimArrayFloat scores;
FD_C_OneDimArrayInt32 label_ids;
FD_C_OneDimMask masks;
FD_C_Bool contain_masks;
} FD_C_DetectionResult;
```
- **boxes**(FD_C_TwoDimArrayFloat): 成员变量表示单张图片检测出来的所有目标框坐标。boxes是一个list其每个元素为一个长度为4的数组 表示为一个框每个框以4个float数值依次表示xmin, ymin, xmax, ymax 即左上角和右下角坐标。FD_C_TwoDimArrayFloat表示一个二维数组size表示所包含的一维数组的个数data表示FD_C_OneDimArrayFloat的一维数组。
```c
typedef struct FD_C_TwoDimArrayFloat {
size_t size;
FD_C_OneDimArrayFloat* data;
}
```
- **scores**(FD_C_OneDimArrayFloat): 成员变量表示单张图片检测出来的所有目标置信度。FD_C_OneDimArrayFloat包含两个字段size和data其中size表示数组的大小data表示存储结果的数组。
```c
typedef struct FD_C_OneDimArrayFloat {
size_t size;
float* data;
} FD_C_OneDimArrayFloat;
```
- **label_ids**(FD_C_OneDimArrayInt32): 成员变量表示单张图片检测出来的所有目标类别。FD_C_OneDimArrayInt32包含两个字段size和data其中size表示数组的大小data表示存储结果的数组。
```c
typedef struct FD_C_OneDimArrayInt32 {
size_t size;
int32_t* data;
} FD_C_OneDimArrayInt32;
```
- **masks**(FD_C_OneDimMask): 成员变量表示单张图片检测出来的所有实例mask其元素个数及shape大小与`boxes`一致
```c
typedef struct FD_C_OneDimMask {
size_t size;
FD_C_Mask* data;
} FD_C_OneDimMask;
```
```c
typedef struct FD_C_Mask {
FD_C_OneDimArrayUint8 data;
FD_C_OneDimArrayInt64 shape;
} FD_C_Mask;
```
- **contain_masks**: 成员变量表示检测结果中是否包含实例mask实例分割模型的结果此项一般为True.

View File

@@ -12,6 +12,8 @@
| ENABLE_OPENVINO_BACKEND | Linux(x64)/Windows(x64)/Mac OSX(x86) | 默认OFF是否编译集成OpenVINO后端 |
| ENABLE_VISION | Linux(x64)/Windows(x64)/Mac OSX(x86) | 默认OFF是否编译集成视觉模型的部署模块 |
| ENABLE_TEXT | Linux(x64)/Windows(x64)/Mac OSX(x86) | 默认OFF是否编译集成文本NLP模型的部署模块 |
| ENABLE_CAPI | Linux(x64)/Windows(x64)/Mac OSX(x86) | 默认OFF是否编译集成C API |
| ENABLE_CSHARPAPI | Windows(x64) | 默认OFF是否编译集成C# API |
第三方库依赖指定(不设定如下参数,会自动下载预编译库)
| 选项 | 说明 |
@@ -67,7 +69,8 @@ cmake .. -G "Visual Studio 16 2019" -A x64 ^
-DENABLE_OPENVINO_BACKEND=ON ^
-DENABLE_VISION=ON ^
-DENABLE_TEXT=ON ^
-DCMAKE_INSTALL_PREFIX="D:\Paddle\compiled_fastdeploy" ^
-DCMAKE_INSTALL_PREFIX="D:\Paddle\compiled_fastdeploy"
% nuget restore please execute it when ENABLE_CSHARPAPI to prepare dependencies in C#)
msbuild fastdeploy.sln /m /p:Configuration=Release /p:Platform=x64
msbuild INSTALL.vcxproj /m /p:Configuration=Release /p:Platform=x64
```

View File

@@ -16,6 +16,8 @@
| ENABLE_TEXT | Linux(x64)/Windows(x64) | 默认OFF是否编译集成文本NLP模型的部署模块 |
| CUDA_DIRECTORY | Linux(x64)/Windows(x64) | 默认/usr/local/cuda要求CUDA>=11.2 |
| TRT_DIRECTORY | Linux(x64)/Windows(x64) | 默认为空要求TensorRT>=8.4 指定路径如/Download/TensorRT-8.5 |
| ENABLE_CAPI | Linux(x64)/Windows(x64)/Mac OSX(x86) | 默认OFF是否编译集成C API |
| ENABLE_CSHARPAPI | Windows(x64) | 默认OFF是否编译集成C# API |
第三方库依赖指定(不设定如下参数,会自动下载预编译库)
| 选项 | 说明 |
@@ -86,6 +88,7 @@ cmake .. -G "Visual Studio 16 2019" -A x64 ^
-DTRT_DIRECTORY="D:\Paddle\TensorRT-8.4.1.5" ^
-DCUDA_DIRECTORY="C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.2" ^
-DCMAKE_INSTALL_PREFIX="D:\Paddle\compiled_fastdeploy"
% nuget restore please execute it when ENABLE_CSHARPAPI to prepare dependencies in C#)
msbuild fastdeploy.sln /m /p:Configuration=Release /p:Platform=x64
msbuild INSTALL.vcxproj /m /p:Configuration=Release /p:Platform=x64
```

View File

@@ -9,10 +9,12 @@ Please do not modify other cmake paramters exclude the following options.
| Option | Supported Platform | Description |
|:------------------------|:------- | :--------------------------------------------------------------------------|
| ENABLE_ORT_BACKEND | Linux(x64/aarch64)/Windows(x64)/Mac OSX(arm64/x86) | Default OFF, whether to intergrate ONNX Runtime backend |
| ENABLE_PADDLE_BACKEND | Linux(x64)/Windows(x64) | Default OFF, whether to intergrate Paddle Inference backend |
| ENABLE_PADDLE_BACKEND | Linux(x64)/Windows(x64) | Default OFF, whether to intergrate Paddle Inference backend |
| ENABLE_OPENVINO_BACKEND | Linux(x64)/Windows(x64)/Mac OSX(x86) | Default OFF, whether to intergrate OpenVINO backend |
| ENABLE_VISION | Linux(x64/aarch64)/Windows(x64)/Mac OSX(arm64/x86) | Default OFF, whether to intergrate vision models |
| ENABLE_TEXT | Linux(x64/aarch64)/Windows(x64)/Mac OSX(arm64/x86) | Default OFF, whether to intergrate text models |
| ENABLE_CAPI | Linux(x64)/Windows(x64)/Mac OSX(x86) | Default OFF, whether to intergrate C API |
| ENABLE_CSHARPAPI | Windows(x64) | Default OFF, whether to intergrate C# API |
The configuration for third libraries(Optional, if the following option is not defined, the prebuilt third libraries will download automaticly while building FastDeploy).
| Option | Description |
@@ -51,7 +53,7 @@ make install
### Windows
Prerequisite for Compiling on Windows:
Prerequisite for Compiling on Windows:
- Windows 10/11 x64
- Visual Studio 2019
@@ -68,6 +70,7 @@ cmake .. -G "Visual Studio 16 2019" -A x64 \
-DENABLE_OPENVINO_BACKEND=ON \
-DENABLE_VISION=ON \
-DCMAKE_INSTALL_PREFIX="D:\Paddle\compiled_fastdeploy"
% nuget restore please execute it when ENABLE_CSHARPAPI to prepare dependencies in C#)
msbuild fastdeploy.sln /m /p:Configuration=Release /p:Platform=x64
msbuild INSTALL.vcxproj /m /p:Configuration=Release /p:Platform=x64
```
@@ -78,7 +81,7 @@ If you use CMake GUI, please refer to [How to Compile with CMakeGUI + Visual Stu
## How to Build and Install Python SDK
Prerequisite for Compiling:
Prerequisite for Compiling:
- gcc/g++ >= 5.4 (8.2 is recommended)
- cmake >= 3.18.0

View File

@@ -9,13 +9,15 @@ Please do not modify other cmake paramters exclude the following options.
| Option | Supported Platform | Description |
|:------------------------|:------- | :--------------------------------------------------------------------------|
| ENABLE_ORT_BACKEND | Linux(x64)/Windows(x64) | Default OFF, whether to intergrate ONNX Runtime backend |
| ENABLE_PADDLE_BACKEND | Linux(x64)/Windows(x64) | Default OFF, whether to intergrate Paddle Inference backend |
| ENABLE_TRT_BACKEND | Linux(x64)/Windows(x64) | Default OFF, whether to intergrate TensorRT backend |
| ENABLE_PADDLE_BACKEND | Linux(x64)/Windows(x64) | Default OFF, whether to intergrate Paddle Inference backend |
| ENABLE_TRT_BACKEND | Linux(x64)/Windows(x64) | Default OFF, whether to intergrate TensorRT backend |
| ENABLE_OPENVINO_BACKEND | Linux(x64)/Windows(x64) | Default OFF, whether to intergrate OpenVINO backend(Only CPU is supported) |
| ENABLE_VISION | Linux(x64)/Windows(x64) | Default OFF, whether to intergrate vision models |
| ENABLE_TEXT | Linux(x64/Windows(x64) | Default OFF, whether to intergrate text models |
| CUDA_DIRECTORY | Linux(x64/Windows(x64) | Default /usr/local/cudarequire CUDA>=11.2 |
| TRT_DIRECTORY | Linux(x64/Windows(x64) | Default emptyrequire TensorRT>=8.4 e.g. /Download/TensorRT-8.5 |
| ENABLE_CAPI | Linux(x64)/Windows(x64)/Mac OSX(x86) | Default OFF, whether to intergrate C API |
| ENABLE_CSHARPAPI | Windows(x64) | Default OFF, whether to intergrate C# API |
The configuration for third libraries(Optional, if the following option is not defined, the prebuilt third libraries will download automaticly while building FastDeploy).
| Option | Description |
@@ -85,6 +87,7 @@ cmake .. -G "Visual Studio 16 2019" -A x64 \
-DTRT_DIRECTORY="D:\Paddle\TensorRT-8.4.1.5" \
-DCUDA_DIRECTORY="C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.2" \
-DCMAKE_INSTALL_PREFIX="D:\Paddle\compiled_fastdeploy"
% nuget restore please execute it when ENABLE_CSHARPAPI to prepare dependencies in C#)
msbuild fastdeploy.sln /m /p:Configuration=Release /p:Platform=x64
msbuild INSTALL.vcxproj /m /p:Configuration=Release /p:Platform=x64
```

View File

@@ -1,4 +1,4 @@
cmake_minimum_required(VERSION 3.23)
cmake_minimum_required(VERSION 3.10)
project(silero_vad)
set(CMAKE_CXX_STANDARD 11)

View File

@@ -93,7 +93,7 @@ void Vad::setAudioCofig(int sr, int frame_ms, float threshold,
Vad::speech_pad_ms_ = speech_pad_ms;
}
bool Vad::Preprocess(std::vector<float> audioWindowData) {
bool Vad::Preprocess(std::vector<float>& audioWindowData) {
inputTensors_.resize(4);
inputTensors_[0].name = "input";
inputTensors_[0].SetExternalData(input_node_dims_, fastdeploy::FDDataType::FP32,

View File

@@ -50,7 +50,7 @@ class Vad : public fastdeploy::FastDeployModel {
private:
bool Initialize();
bool Preprocess(std::vector<float> audioWindowData);
bool Preprocess(std::vector<float>& audioWindowData);
bool Postprocess();

View File

@@ -84,11 +84,13 @@ int main(int argc, char* argv[]) {
runtime_option.SetModelPath(model_file, "", fd::ModelFormat::TORCHSCRIPT);
runtime_option.UsePorosBackend();
runtime_option.UseGpu(0);
runtime_option.is_dynamic = true;
// Compile runtime
std::unique_ptr<fd::Runtime> runtime =
std::unique_ptr<fd::Runtime>(new fd::Runtime());
runtime->Init(runtime_option);
if (!runtime->Compile(prewarm_datas, runtime_option)) {
std::cerr << "--- Init FastDeploy Runitme Failed! "
<< "\n--- Model: " << model_file << std::endl;
@@ -114,4 +116,4 @@ int main(int argc, char* argv[]) {
output_tensors[0].PrintInfo();
return 0;
}
}

View File

@@ -0,0 +1,13 @@
PROJECT(infer_demo C)
CMAKE_MINIMUM_REQUIRED (VERSION 3.10)
# 指定下载解压后的fastdeploy库路径
option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.")
include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
# 添加FastDeploy依赖头文件
include_directories(${FASTDEPLOY_INCS})
add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.c)
target_link_libraries(infer_demo ${FASTDEPLOY_LIBS})

View File

@@ -0,0 +1,183 @@
English | [简体中文](README_CN.md)
# PaddleClas C Deployment Example
This directory provides examples that `infer.c` fast finishes the deployment of PaddleClas models on CPU/GPU.
Before deployment, two steps require confirmation.
- 1. Software and hardware should meet the requirements. Please refer to [FastDeploy Environment Requirements](../../../../../docs/en/build_and_install/download_prebuilt_libraries.md).
- 2. Download the precompiled deployment library and samples code according to your development environment. Refer to [FastDeploy Precompiled Library](../../../../../docs/en/build_and_install/download_prebuilt_libraries.md).
Taking ResNet50_vd inference on Linux as an example, the compilation test can be completed by executing the following command in this directory. FastDeploy version 1.0.4 or above (x.x.x>=1.0.4) is required to support this model.
```bash
mkdir build
cd build
# Download FastDeploy precompiled library. Users can choose your appropriate version in the`FastDeploy Precompiled Library` mentioned above
wget https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-x.x.x.tgz
tar xvf fastdeploy-linux-x64-x.x.x.tgz
cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-x.x.x
make -j
# Download ResNet50_vd model file and test images
wget https://bj.bcebos.com/paddlehub/fastdeploy/ResNet50_vd_infer.tgz
tar -xvf ResNet50_vd_infer.tgz
wget https://gitee.com/paddlepaddle/PaddleClas/raw/release/2.4/deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg
# CPU inference
./infer_demo ResNet50_vd_infer ILSVRC2012_val_00000010.jpeg 0
# GPU inference
./infer_demo ResNet50_vd_infer ILSVRC2012_val_00000010.jpeg 1
```
The above command works for Linux or MacOS. Refer to
- [How to use FastDeploy C++ SDK in Windows](../../../../../docs/cn/faq/use_sdk_on_windows.md) for SDK use-pattern in Windows
## PaddleClas C Interface
### RuntimeOption
```c
FD_C_RuntimeOptionWrapper* FD_C_CreateRuntimeOptionWrapper()
```
> Create a RuntimeOption object, and return a pointer to manipulate it.
>
> **Return**
>
> * **fd_c_runtime_option_wrapper**(FD_C_RuntimeOptionWrapper*): Pointer to manipulate RuntimeOption object.
```c
void FD_C_RuntimeOptionWrapperUseCpu(
FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper)
```
> Enable Cpu inference.
>
> **Params**
>
> * **fd_c_runtime_option_wrapper**(FD_C_RuntimeOptionWrapper*): Pointer to manipulate RuntimeOption object.
```c
void FD_C_RuntimeOptionWrapperUseGpu(
FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper,
int gpu_id)
```
> 开启GPU推理
>
> **参数**
>
> * **fd_c_runtime_option_wrapper**(FD_C_RuntimeOptionWrapper*): Pointer to manipulate RuntimeOption object.
> * **gpu_id**(int): gpu id
### Model
```c
FD_C_PaddleClasModelWrapper* FD_C_CreatePaddleClasModelWrapper(
const char* model_file, const char* params_file, const char* config_file,
FD_C_RuntimeOptionWrapper* runtime_option,
const FD_C_ModelFormat model_format)
```
> Create a PaddleClas model object, and return a pointer to manipulate it.
>
> **Params**
>
> * **model_file**(const char*): Model file path
> * **params_file**(const char*): Parameter file path
> * **config_file**(const char*): Configuration file path, which is the deployment yaml file exported by PaddleClas.
> * **runtime_option**(FD_C_RuntimeOptionWrapper*): Backend inference configuration. None by default, which is the default configuration
> * **model_format**(FD_C_ModelFormat): Model format. Paddle format by default
>
> **Return**
> * **fd_c_ppclas_wrapper**(FD_C_PaddleClasModelWrapper*): Pointer to manipulate PaddleClas object.
#### Read and write image
```c
FD_C_Mat FD_C_Imread(const char* imgpath)
```
> Read an image, and return a pointer to cv::Mat.
>
> **Params**
>
> * **imgpath**(const char*): image path
>
> **Return**
>
> * **imgmat**(FD_C_Mat): pointer to cv::Mat object which holds the image.
```c
FD_C_Bool FD_C_Imwrite(const char* savepath, FD_C_Mat img);
```
> Write image to a file.
>
> **Params**
>
> * **savepath**(const char*): save path
> * **img**(FD_C_Mat): pointer to cv::Mat object
>
> **Return**
>
> * **result**(FD_C_Bool): bool to indicate success or failure
#### Prediction
```c
FD_C_Bool FD_C_PaddleClasModelWrapperPredict(
__fd_take FD_C_PaddleClasModelWrapper* fd_c_ppclas_wrapper, FD_C_Mat img,
FD_C_ClassifyResult* fd_c_ppclas_result)
```
>
> Predict an image, and generate classification result.
>
> **Params**
> * **fd_c_ppclas_wrapper**(FD_C_PaddleClasModelWrapper*): pointer to manipulate PaddleClas object
> * **img**FD_C_Mat: pointer to cv::Mat object, which can be obained by FD_C_Imread interface
> * **fd_c_ppclas_result** (FD_C_ClassifyResult*): The classification result, including label_id, and the corresponding confidence. Refer to [Visual Model Prediction Results](../../../../../docs/api/vision_results/) for the description of ClassifyResult
#### Result
```c
FD_C_ClassifyResultWrapper* FD_C_CreateClassifyResultWrapperFromData(
FD_C_ClassifyResult* fd_c_classify_result)
```
>
> Create a pointer to FD_C_ClassifyResultWrapper structure, which contains `fastdeploy::vision::ClassifyResult` object in C++. You can call methods in C++ ClassifyResult object by C API with this pointer.
>
> **Params**
> * **fd_c_classify_result**(FD_C_ClassifyResult*): pointer to FD_C_ClassifyResult structure
>
> **Return**
> * **fd_c_classify_result_wrapper**(FD_C_ClassifyResultWrapper*): pointer to FD_C_ClassifyResultWrapper structure
```c
char* FD_C_ClassifyResultWrapperStr(
FD_C_ClassifyResultWrapper* fd_c_classify_result_wrapper);
```
>
> Call Str() methods in `fastdeploy::vision::ClassifyResult` object contained in FD_C_ClassifyResultWrapper structureand return a string to describe information in result.
>
> **Params**
> * **fd_c_classify_result_wrapper**(FD_C_ClassifyResultWrapper*): pointer to FD_C_ClassifyResultWrapper structure
>
> **Return**
> * **str**(char*): a string to describe information in result
- [Model Description](../../)
- [Python Deployment](../python)
- [Visual Model prediction results](../../../../../docs/api/vision_results/)
- [How to switch the model inference backend engine](../../../../../docs/en/faq/how_to_change_backend.md)

View File

@@ -0,0 +1,189 @@
[English](README.md) | 简体中文
# PaddleClas C 部署示例
本目录下提供`infer_xxx.c`来调用C API快速完成PaddleClas系列模型在CPU/GPU上部署的示例。
在部署前,需确认以下两个步骤
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
- 2. 根据开发环境下载预编译部署库和samples代码参考[FastDeploy预编译库](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
以Linux上ResNet50_vd推理为例在本目录执行如下命令即可完成编译测试支持此模型需保证FastDeploy版本1.0.4以上(x.x.x>=1.0.4)
```bash
mkdir build
cd build
# 下载FastDeploy预编译库用户可在上文提到的`FastDeploy预编译库`中自行选择合适的版本使用
wget https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-x.x.x.tgz
tar xvf fastdeploy-linux-x64-x.x.x.tgz
cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-x.x.x
make -j
# 下载ResNet50_vd模型文件和测试图片
wget https://bj.bcebos.com/paddlehub/fastdeploy/ResNet50_vd_infer.tgz
tar -xvf ResNet50_vd_infer.tgz
wget https://gitee.com/paddlepaddle/PaddleClas/raw/release/2.4/deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg
# CPU推理
./infer_demo ResNet50_vd_infer ILSVRC2012_val_00000010.jpeg 0
# GPU推理
./infer_demo ResNet50_vd_infer ILSVRC2012_val_00000010.jpeg 1
```
以上命令只适用于Linux或MacOS, Windows下SDK的使用方式请参考:
- [如何在Windows中使用FastDeploy C++ SDK](../../../../../docs/cn/faq/use_sdk_on_windows.md)
如果用户使用华为昇腾NPU部署, 请参考以下方式在部署前初始化部署环境:
- [如何使用华为昇腾NPU部署](../../../../../docs/cn/faq/use_sdk_on_ascend.md)
## PaddleClas C API接口
### 配置
```c
FD_C_RuntimeOptionWrapper* FD_C_CreateRuntimeOptionWrapper()
```
> 创建一个RuntimeOption的配置对象并且返回操作它的指针。
>
> **返回**
>
> * **fd_c_runtime_option_wrapper**(FD_C_RuntimeOptionWrapper*): 指向RuntimeOption对象的指针
```c
void FD_C_RuntimeOptionWrapperUseCpu(
FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper)
```
> 开启CPU推理
>
> **参数**
>
> * **fd_c_runtime_option_wrapper**(FD_C_RuntimeOptionWrapper*): 指向RuntimeOption对象的指针
```c
void FD_C_RuntimeOptionWrapperUseGpu(
FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper,
int gpu_id)
```
> 开启GPU推理
>
> **参数**
>
> * **fd_c_runtime_option_wrapper**(FD_C_RuntimeOptionWrapper*): 指向RuntimeOption对象的指针
> * **gpu_id**(int): 显卡号
### 模型
```c
FD_C_PaddleClasModelWrapper* FD_C_CreatePaddleClasModelWrapper(
const char* model_file, const char* params_file, const char* config_file,
FD_C_RuntimeOptionWrapper* runtime_option,
const FD_C_ModelFormat model_format)
```
> 创建一个PaddleClas的模型并且返回操作它的指针。
>
> **参数**
>
> * **model_file**(const char*): 模型文件路径
> * **params_file**(const char*): 参数文件路径
> * **config_file**(const char*): 配置文件路径即PaddleClas导出的部署yaml文件
> * **runtime_option**(FD_C_RuntimeOptionWrapper*): 指向RuntimeOption的指针表示后端推理配置
> * **model_format**(FD_C_ModelFormat): 模型格式
>
> **返回**
> * **fd_c_ppclas_wrapper**(FD_C_PaddleClasModelWrapper*): 指向PaddleClas模型对象的指针
#### 读写图像
```c
FD_C_Mat FD_C_Imread(const char* imgpath)
```
> 读取一个图像并且返回cv::Mat的指针。
>
> **参数**
>
> * **imgpath**(const char*): 图像文件路径
>
> **返回**
>
> * **imgmat**(FD_C_Mat): 指向图像数据cv::Mat的指针。
```c
FD_C_Bool FD_C_Imwrite(const char* savepath, FD_C_Mat img);
```
> 将图像写入文件中。
>
> **参数**
>
> * **savepath**(const char*): 保存图像的路径
> * **img**(FD_C_Mat): 指向图像数据的指针
>
> **返回**
>
> * **result**(FD_C_Bool): 表示操作是否成功
#### Predict函数
```c
FD_C_Bool FD_C_PaddleClasModelWrapperPredict(
__fd_take FD_C_PaddleClasModelWrapper* fd_c_ppclas_wrapper, FD_C_Mat img,
FD_C_ClassifyResult* fd_c_ppclas_result)
```
>
> 模型预测接口,输入图像直接并生成分类结果。
>
> **参数**
> * **fd_c_ppclas_wrapper**(FD_C_PaddleClasModelWrapper*): 指向PaddleClas模型的指针
> * **img**FD_C_Mat: 输入图像的指针指向cv::Mat对象可以调用FD_C_Imread读取图像获取
> * **fd_c_ppclas_result**FD_C_ClassifyResult*): 分类结果包括label_id以及相应的置信度, ClassifyResult说明参考[视觉模型预测结果](../../../../../docs/api/vision_results/)
#### Predict结果
```c
FD_C_ClassifyResultWrapper* FD_C_CreateClassifyResultWrapperFromData(
FD_C_ClassifyResult* fd_c_classify_result)
```
>
> 创建一个FD_C_ClassifyResultWrapper对象的指针FD_C_ClassifyResultWrapper中包含了C++的`fastdeploy::vision::ClassifyResult`对象通过该指针使用C API可以访问调用对应C++中的函数。
>
>
> **参数**
> * **fd_c_classify_result**(FD_C_ClassifyResult*): 指向FD_C_ClassifyResult对象的指针
>
> **返回**
> * **fd_c_classify_result_wrapper**(FD_C_ClassifyResultWrapper*): 指向FD_C_ClassifyResultWrapper的指针
```c
char* FD_C_ClassifyResultWrapperStr(
FD_C_ClassifyResultWrapper* fd_c_classify_result_wrapper);
```
>
> 调用FD_C_ClassifyResultWrapper所包含的`fastdeploy::vision::ClassifyResult`对象的Str()方法,返回相关结果内数据信息的字符串。
>
> **参数**
> * **fd_c_classify_result_wrapper**(FD_C_ClassifyResultWrapper*): 指向FD_C_ClassifyResultWrapper对象的指针
>
> **返回**
> * **str**(char*): 表示结果数据信息的字符串
- [模型介绍](../../)
- [Python部署](../python)
- [视觉模型预测结果](../../../../../docs/api/vision_results/)
- [如何切换模型推理后端引擎](../../../../../docs/cn/faq/how_to_change_backend.md)

View File

@@ -0,0 +1,156 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stdio.h>
#include <stdlib.h>
#include "fastdeploy_capi/vision.h"
#ifdef WIN32
const char sep = '\\';
#else
const char sep = '/';
#endif
void CpuInfer(const char* model_dir, const char* image_file) {
char model_file[100];
char params_file[100];
char config_file[100];
int max_size = 99;
snprintf(model_file, max_size, "%s%c%s", model_dir, sep, "inference.pdmodel");
snprintf(params_file, max_size, "%s%c%s", model_dir, sep,
"inference.pdiparams");
snprintf(config_file, max_size, "%s%c%s", model_dir, sep,
"inference_cls.yaml");
FD_C_RuntimeOptionWrapper* option = FD_C_CreateRuntimeOptionWrapper();
FD_C_RuntimeOptionWrapperUseCpu(option);
FD_C_PaddleClasModelWrapper* model = FD_C_CreatePaddleClasModelWrapper(
model_file, params_file, config_file, option, PADDLE);
if (!FD_C_PaddleClasModelWrapperInitialized(model)) {
printf("Failed to initialize.\n");
FD_C_DestroyRuntimeOptionWrapper(option);
FD_C_DestroyPaddleClasModelWrapper(model);
return;
}
FD_C_Mat im = FD_C_Imread(image_file);
FD_C_ClassifyResult* result =
(FD_C_ClassifyResult*)malloc(sizeof(FD_C_ClassifyResult));
if (!FD_C_PaddleClasModelWrapperPredict(model, im, result)) {
printf("Failed to predict.\n");
FD_C_DestroyRuntimeOptionWrapper(option);
FD_C_DestroyPaddleClasModelWrapper(model);
FD_C_DestroyMat(im);
free(result);
return;
}
// print res
// You can directly access fields in FD_C_ClassifyResult and print it refer to
// ClassifyResult API Doc Or you can wrap it using
// FD_C_ClassifyResult_Wrapper, which containes C++ structure
// fastdeploy::vision::ClassifyResult, and using C API
// FD_C_ClassifyResultWrapperStr to call
// fastdeploy::vision::ClassifyResult::Str() in it. For convenience, we choose
// this method to print it.
FD_C_ClassifyResultWrapper* result_wrapper =
FD_C_CreateClassifyResultWrapperFromData(result);
printf("%s", FD_C_ClassifyResultWrapperStr(result_wrapper));
FD_C_DestroyRuntimeOptionWrapper(option);
FD_C_DestroyPaddleClasModelWrapper(model);
FD_C_DestroyClassifyResultWrapper(result_wrapper);
FD_C_DestroyClassifyResult(result);
FD_C_DestroyMat(im);
}
void GpuInfer(const char* model_dir, const char* image_file) {
char model_file[100];
char params_file[100];
char config_file[100];
int max_size = 99;
snprintf(model_file, max_size, "%s%c%s", model_dir, sep, "inference.pdmodel");
snprintf(params_file, max_size, "%s%c%s", model_dir, sep,
"inference.pdiparams");
snprintf(config_file, max_size, "%s%c%s", model_dir, sep,
"inference_cls.yaml");
FD_C_RuntimeOptionWrapper* option = FD_C_CreateRuntimeOptionWrapper();
FD_C_RuntimeOptionWrapperUseGpu(option, 0);
FD_C_PaddleClasModelWrapper* model = FD_C_CreatePaddleClasModelWrapper(
model_file, params_file, config_file, option, PADDLE);
if (!FD_C_PaddleClasModelWrapperInitialized(model)) {
printf("Failed to initialize.\n");
FD_C_DestroyRuntimeOptionWrapper(option);
FD_C_DestroyPaddleClasModelWrapper(model);
return;
}
FD_C_Mat im = FD_C_Imread(image_file);
FD_C_ClassifyResult* result =
(FD_C_ClassifyResult*)malloc(sizeof(FD_C_ClassifyResult));
if (!FD_C_PaddleClasModelWrapperPredict(model, im, result)) {
printf("Failed to predict.\n");
FD_C_DestroyRuntimeOptionWrapper(option);
FD_C_DestroyPaddleClasModelWrapper(model);
FD_C_DestroyMat(im);
free(result);
return;
}
// print res
// You can directly access fields in FD_C_ClassifyResult and print it refer to
// ClassifyResult API Doc Or you can wrap it using
// FD_C_ClassifyResult_Wrapper, which containes C++ structure
// fastdeploy::vision::ClassifyResult, and using C API
// FD_C_ClassifyResultWrapperStr to call
// fastdeploy::vision::ClassifyResult::Str() in it. For convenience, we choose
// this method to print it.
FD_C_ClassifyResultWrapper* result_wrapper =
FD_C_CreateClassifyResultWrapperFromData(result);
printf("%s", FD_C_ClassifyResultWrapperStr(result_wrapper));
FD_C_DestroyRuntimeOptionWrapper(option);
FD_C_DestroyPaddleClasModelWrapper(model);
FD_C_DestroyClassifyResultWrapper(result_wrapper);
FD_C_DestroyClassifyResult(result);
FD_C_DestroyMat(im);
}
int main(int argc, char* argv[]) {
if (argc < 4) {
printf(
"Usage: infer_demo path/to/model_dir path/to/image run_option, "
"e.g ./infer_model ./ppyoloe_model_dir ./test.jpeg 0"
"\n");
printf(
"The data type of run_option is int, 0: run with cpu; 1: run with gpu"
"\n");
return -1;
}
if (atoi(argv[3]) == 0) {
CpuInfer(argv[1], argv[2]);
} else if (atoi(argv[3]) == 1) {
GpuInfer(argv[1], argv[2]);
}
return 0;
}

View File

@@ -0,0 +1,22 @@
PROJECT(infer_demo CSharp)
CMAKE_MINIMUM_REQUIRED (VERSION 3.10)
# Set the C# language version (defaults to 3.0 if not set).
set(CMAKE_CSharp_FLAGS "/langversion:10")
set(CMAKE_DOTNET_TARGET_FRAMEWORK "net6.0")
set(CMAKE_DOTNET_SDK "Microsoft.NET.Sdk")
# 指定下载解压后的fastdeploy库路径
option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.")
include(${FASTDEPLOY_INSTALL_DIR}/FastDeployCSharp.cmake)
add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cs)
set_property(TARGET infer_demo PROPERTY VS_DOTNET_REFERENCES
${FASTDEPLOY_DOTNET_REFERENCES}
)
set_property(TARGET infer_demo
PROPERTY VS_PACKAGE_REFERENCES ${FASTDEPLOY_PACKAGE_REFERENCES})

View File

@@ -0,0 +1,99 @@
English | [简体中文](README_CN.md)
# PaddleClas C# Deployment Example
This directory provides example `infer.cs` to fastly finish the deployment of PaddleClas models on CPU/GPU.
Before deployment, two steps require confirmation
- 1. Software and hardware should meet the requirements. Please refer to [FastDeploy Environment Requirements](../../../../../docs/en/build_and_install/download_prebuilt_libraries.md)
- 2. Download the precompiled deployment library and samples code according to your development environment. Refer to [FastDeploy Precompiled Library](../../../../../docs/en/build_and_install/download_prebuilt_libraries.md)
Please follow below instructions to compile and test in Windows. FastDeploy version 1.0.4 or above (x.x.x>=1.0.4) is required to support this model.
## 1. Download C# package management tool nuget client
> https://dist.nuget.org/win-x86-commandline/v6.4.0/nuget.exe
Add nuget program into system variable **PATH**
## 2. Download model and image for test
> https://bj.bcebos.com/paddlehub/fastdeploy/ResNet50_vd_infer.tgz # (下载后解压缩)
> https://gitee.com/paddlepaddle/PaddleClas/raw/release/2.4/deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg
## 3. Compile example code
Open `x64 Native Tools Command Prompt for VS 2019` command tool on Windows, cd to the demo path of ppyoloe and execute commands
```shell
cd D:\Download\fastdeploy-win-x64-gpu-x.x.x\examples\vision\classification\paddleclas\csharp
mkdir build && cd build
cmake .. -G "Visual Studio 16 2019" -A x64 -DFASTDEPLOY_INSTALL_DIR=D:\Download\fastdeploy-win-x64-gpu-x.x.x -DCUDA_DIRECTORY="C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.2"
nuget restore
msbuild infer_demo.sln /m:4 /p:Configuration=Release /p:Platform=x64
```
For more information about how to use FastDeploy SDK to compile a project with Visual Studio 2019. Please refer to
- [Using the FastDeploy C++ SDK on Windows Platform](../../../../../docs/en/faq/use_sdk_on_windows.md)
## 4. Execute compiled program
fastdeploy.dll and related dynamic libraries are required by the program. FastDeploy provide a script to copy all required dll to your program path.
```shell
cd D:\Download\fastdeploy-win-x64-gpu-x.x.x
fastdeploy_init.bat install %cd% D:\Download\fastdeploy-win-x64-gpu-x.x.x\examples\vision\classification\paddleclas\csharp\build\Release
```
Then you can run your program and test the model with image
```shell
cd Release
# CPU inference
./infer_demo ResNet50_vd_infer ILSVRC2012_val_00000010.jpeg 0
# GPU inference
./infer_demo ResNet50_vd_infer ILSVRC2012_val_00000010.jpeg 1
```
## PaddleClas C# Interface
### Model Class
```c#
fastdeploy.vision.classification.PaddleClasModel(
string model_file,
string params_file,
string config_file
fastdeploy.RuntimeOption runtime_option = null,
fastdeploy.ModelFormat model_format = ModelFormat.PADDLE)
```
> PaddleClasModel initilization.
> **Params**
>> * **model_file**(str): Model file path
>> * **params_file**(str): Parameter file path
>> * **config_file**(str): Configuration file path, which is the deployment yaml file exported by PaddleClas
>> * **runtime_option**(RuntimeOption): Backend inference configuration. null by default, which is the default configuration
>> * **model_format**(ModelFormat): Model format. Paddle format by default
#### Predict Function
```c#
fastdeploy.ClassifyResult Predict(OpenCvSharp.Mat im)
```
> Model prediction interface. Input images and output results directly.
>
> **Params**
>
>> * **im**(Mat): Input images in HWC or BGR format
>
> **Return**
>
>> * **result**(ClassifyResult): The classification result, including label_id, and the corresponding confidence. Refer to [Visual Model Prediction Results](../../../../../docs/api/vision_results/) for the description of ClassifyResult
- [Model Description](../../)
- [Python Deployment](../python)
- [Vision Model prediction results](../../../../../docs/api/vision_results/)
- [How to switch the model inference backend engine](../../../../../docs/en/faq/how_to_change_backend.md)

View File

@@ -0,0 +1,101 @@
[English](README.md) | 简体中文
# PaddleClas C#部署示例
本目录下提供`infer.cs`来调用C# API快速完成PaddleClas系列模型在CPU/GPU上部署的示例。
在部署前,需确认以下两个步骤
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
- 2. 根据开发环境下载预编译部署库和samples代码参考[FastDeploy预编译库](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
以Linux上ResNet50_vd推理为例在本目录执行如下命令即可完成编译测试支持此模型需保证FastDeploy版本1.0.4以上(x.x.x>=1.0.4)
## 1. 下载C#包管理程序nuget客户端
> https://dist.nuget.org/win-x86-commandline/v6.4.0/nuget.exe
下载完成后将该程序添加到环境变量**PATH**中
## 2. 下载模型文件和测试图片
> https://bj.bcebos.com/paddlehub/fastdeploy/ResNet50_vd_infer.tgz # (下载后解压缩)
> https://gitee.com/paddlepaddle/PaddleClas/raw/release/2.4/deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg
## 3. 编译示例代码
本文档编译的示例代码可在解压的库中找到编译工具依赖VS 2019的安装**Windows打开x64 Native Tools Command Prompt for VS 2019命令工具**,通过如下命令开始编译
```shell
cd D:\Download\fastdeploy-win-x64-gpu-x.x.x\examples\vision\classification\paddleclas\csharp
mkdir build && cd build
cmake .. -G "Visual Studio 16 2019" -A x64 -DFASTDEPLOY_INSTALL_DIR=D:\Download\fastdeploy-win-x64-gpu-x.x.x -DCUDA_DIRECTORY="C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.2"
nuget restore
msbuild infer_demo.sln /m:4 /p:Configuration=Release /p:Platform=x64
```
关于使用Visual Studio 2019创建sln工程或者CMake工程等方式编译的更详细信息可参考如下文档
- [在 Windows 使用 FastDeploy C++ SDK](../../../../../docs/cn/faq/use_sdk_on_windows.md)
- [FastDeploy C++库在Windows上的多种使用方式](../../../../../docs/cn/faq/use_sdk_on_windows_build.md)
## 4. 运行可执行程序
注意Windows上运行时需要将FastDeploy依赖的库拷贝至可执行程序所在目录, 或者配置环境变量。FastDeploy提供了工具帮助我们快速将所有依赖库拷贝至可执行程序所在目录,通过如下命令将所有依赖的dll文件拷贝至可执行程序所在的目录(可能生成的可执行文件在Release下还有一层目录这里假设生成的可执行文件在Release处)
```shell
cd D:\Download\fastdeploy-win-x64-gpu-x.x.x
fastdeploy_init.bat install %cd% D:\Download\fastdeploy-win-x64-gpu-x.x.x\examples\vision\classification\paddleclas\csharp\build\Release
```
将dll拷贝到当前路径后准备好模型和图片使用如下命令运行可执行程序即可
```shell
cd Release
# CPU推理
./infer_demo ResNet50_vd_infer ILSVRC2012_val_00000010.jpeg 0
# GPU推理
./infer_demo ResNet50_vd_infer ILSVRC2012_val_00000010.jpeg 1
```
## PaddleClas C#接口
### 模型
```c#
fastdeploy.vision.classification.PaddleClasModel(
string model_file,
string params_file,
string config_file,
fastdeploy.RuntimeOption runtime_option = null,
fastdeploy.ModelFormat model_format = ModelFormat.PADDLE)
```
> PaddleClasModel模型加载和初始化。
> **参数**
>> * **model_file**(str): 模型文件路径
>> * **params_file**(str): 参数文件路径
>> * **config_file**(str): 配置文件路径即PaddleClas导出的部署yaml文件
>> * **runtime_option**(RuntimeOption): 后端推理配置默认为null即采用默认配置
>> * **model_format**(ModelFormat): 模型格式默认为PADDLE格式
#### Predict函数
```c#
fastdeploy.ClassifyResult Predict(OpenCvSharp.Mat im)
```
> 模型预测接口,输入图像直接输出检测结果。
>
> **参数**
>
>> * **im**(Mat): 输入图像注意需为HWCBGR格式
>>
> **返回值**
>
>> * **result**: 分类结果包括label_id以及相应的置信度, ClassifyResult说明参考[视觉模型预测结果](../../../../../docs/api/vision_results/)
- [模型介绍](../../)
- [Python部署](../python)
- [视觉模型预测结果](../../../../../docs/api/vision_results/)
- [如何切换模型推理后端引擎](../../../../../docs/cn/faq/how_to_change_backend.md)

View File

@@ -0,0 +1,58 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
using System;
using System.IO;
using System.Runtime.InteropServices;
using OpenCvSharp;
using fastdeploy;
namespace Test
{
public class TestPaddleClas
{
public static void Main(string[] args)
{
if (args.Length < 3) {
Console.WriteLine(
"Usage: infer_demo path/to/model_dir path/to/image run_option, " +
"e.g ./infer_model ./ppyolo_dirname ./test.jpeg 0"
);
Console.WriteLine( "The data type of run_option is int, 0: run with cpu; 1: run with gpu");
return;
}
string model_dir = args[0];
string image_path = args[1];
string model_file = model_dir + "\\" + "inference.pdmodel";
string params_file = model_dir + "\\" + "inference.pdiparams";
string config_file = model_dir + "\\" + "inference_cls.yaml";
RuntimeOption runtimeoption = new RuntimeOption();
int device_option = Int32.Parse(args[2]);
if(device_option==0){
runtimeoption.UseCpu();
}else{
runtimeoption.UseGpu();
}
fastdeploy.vision.classification.PaddleClasModel model = new fastdeploy.vision.classification.PaddleClasModel(model_file, params_file, config_file, runtimeoption, ModelFormat.PADDLE);
if(!model.Initialized()){
Console.WriteLine("Failed to initialize.\n");
}
Mat image = Cv2.ImRead(image_path);
fastdeploy.vision.ClassifyResult res = model.Predict(image);
Console.WriteLine(res.ToString());
}
}
}

View File

@@ -78,7 +78,7 @@ void FD_C_RuntimeOptionWrapperUseGpu(
```c
FD_C_PPYOLOEWrapper* FD_C_CreatesPPYOLOEWrapper(
FD_C_PPYOLOEWrapper* FD_C_CreatePPYOLOEWrapper(
const char* model_file, const char* params_file, const char* config_file,
FD_C_RuntimeOptionWrapper* runtime_option,
const FD_C_ModelFormat model_format)
@@ -137,7 +137,7 @@ FD_C_Bool FD_C_Imwrite(const char* savepath, FD_C_Mat img);
```c
FD_C_Bool FD_C_PPYOLOEWrapperPredict(
__fd_take FD_C_PPYOLOEWrapper* fd_c_ppyoloe_wrapper, FD_C_Mat img,
FD_C_DetectionResultWrapper* fd_c_detection_result_wrapper)
FD_C_DetectionResult* fd_c_detection_result)
```
>
> Predict an image, and generate detection result.
@@ -145,37 +145,11 @@ FD_C_Bool FD_C_PPYOLOEWrapperPredict(
> **Params**
> * **fd_c_ppyoloe_wrapper**(FD_C_PPYOLOEWrapper*): pointer to manipulate PPYOLOE object
> * **img**FD_C_Mat: pointer to cv::Mat object, which can be obained by FD_C_Imread interface
> * **result**FD_C_DetectionResultWrapper*): Detection result, including detection box and confidence of each box. Refer to [Vision Model Prediction Result](../../../../../docs/api/vision_results/) for DetectionResult
> * **fd_c_detection_result**FD_C_DetectionResult*): Detection result, including detection box and confidence of each box. Refer to [Vision Model Prediction Result](../../../../../docs/api/vision_results/) for DetectionResult
#### Result
```c
FD_C_DetectionResultWrapper* FD_C_CreateDetectionResultWrapper();
```
>
> Create a DetectionResult object to keep the detection resultreturn a pointer to manipulate it.
>
> **Return**
> * **fd_c_detection_result_wrapper**(FD_C_DetectionResultWrapper*): pointer to manipulate DetectionResult object
```c
FD_C_DetectionResult* FD_C_DetectionResultWrapperGetData(
FD_C_DetectionResultWrapper* fd_c_detection_result_wrapper)
```
>
> Get the C DetectionResult structure from FD_C_DetectionResultWrapper, which can access the fileds directly.
>
> **Params**
> * **fd_c_detection_result_wrapper**(FD_C_DetectionResultWrapper*): pointer to manipulate DetectionResult object
>
> **Return**
> * **fd_c_detection_result**(FD_C_DetectionResult*): pointer to C DetectionResult structure
```c
FD_C_Mat FD_C_VisDetection(FD_C_Mat im, FD_C_DetectionResult* fd_detection_result,
float score_threshold, int line_size, float font_size);

View File

@@ -82,7 +82,7 @@ void FD_C_RuntimeOptionWrapperUseGpu(
```c
FD_C_PPYOLOEWrapper* FD_C_CreatesPPYOLOEWrapper(
FD_C_PPYOLOEWrapper* FD_C_CreatePPYOLOEWrapper(
const char* model_file, const char* params_file, const char* config_file,
FD_C_RuntimeOptionWrapper* runtime_option,
const FD_C_ModelFormat model_format)
@@ -141,7 +141,7 @@ FD_C_Bool FD_C_Imwrite(const char* savepath, FD_C_Mat img);
```c
FD_C_Bool FD_C_PPYOLOEWrapperPredict(
__fd_take FD_C_PPYOLOEWrapper* fd_c_ppyoloe_wrapper, FD_C_Mat img,
FD_C_DetectionResultWrapper* fd_c_detection_result_wrapper)
FD_C_DetectionResult* fd_c_detection_result)
```
>
> 模型预测接口,输入图像直接并生成检测结果。
@@ -149,37 +149,11 @@ FD_C_Bool FD_C_PPYOLOEWrapperPredict(
> **参数**
> * **fd_c_ppyoloe_wrapper**(FD_C_PPYOLOEWrapper*): 指向PPYOLOE模型的指针
> * **img**FD_C_Mat: 输入图像的指针指向cv::Mat对象可以调用FD_C_Imread读取图像获取
> * **result**FD_C_DetectionResultWrapper*): 指向检测结果的指针,检测结果包括检测框,各个框的置信度, DetectionResult说明参考[视觉模型预测结果](../../../../../docs/api/vision_results/)
> * **fd_c_detection_result**FD_C_DetectionResult*): 指向检测结果的指针,检测结果包括检测框,各个框的置信度, DetectionResult说明参考[视觉模型预测结果](../../../../../docs/api/vision_results/)
#### Predict结果
```c
FD_C_DetectionResultWrapper* FD_C_CreateDetectionResultWrapper();
```
>
> 创建一个DetectionResult对象用来保存推理的结果并返回所创建的DetectionResult对象的指针。
>
> **返回**
> * **fd_c_detection_result_wrapper**(FD_C_DetectionResultWrapper*): 指向DetectionResult对象的指针
```c
FD_C_DetectionResult* FD_C_DetectionResultWrapperGetData(
FD_C_DetectionResultWrapper* fd_c_detection_result_wrapper)
```
>
> 从DetectionResult对象中提取纯C结构的DetectionResult结果并返回结构指针通过该指针可直接返回结构中的字段。
>
> **参数**
> * **fd_c_detection_result_wrapper**(FD_C_DetectionResultWrapper*): 指向DetectionResult对象的指针
>
> **返回**
> * **fd_c_detection_result**(FD_C_DetectionResult*): 指向纯C结构的DetectionResult的指针
```c
FD_C_Mat FD_C_VisDetection(FD_C_Mat im, FD_C_DetectionResult* fd_detection_result,
float score_threshold, int line_size, float font_size);
@@ -189,7 +163,7 @@ FD_C_Mat FD_C_VisDetection(FD_C_Mat im, FD_C_DetectionResult* fd_detection_resul
>
> **参数**
> * **im**(FD_C_Mat): 指向输入图像的指针
> * **fd_detection_result**(FD_C_DetectionResult*): 指向纯C结构DetectionResult的指针
> * **fd_detection_result**(FD_C_DetectionResult*): 指向FD_C_DetectionResult结构的指针
> * **score_threshold**(float): 检测阈值
> * **line_size**(int): 检测框线大小
> * **font_size**(float): 检测框字体大小

View File

@@ -35,21 +35,30 @@ void CpuInfer(const char* model_dir, const char* image_file) {
FD_C_RuntimeOptionWrapper* option = FD_C_CreateRuntimeOptionWrapper();
FD_C_RuntimeOptionWrapperUseCpu(option);
FD_C_PPYOLOEWrapper* model = FD_C_CreatesPPYOLOEWrapper(
FD_C_PPYOLOEWrapper* model = FD_C_CreatePPYOLOEWrapper(
model_file, params_file, config_file, option, PADDLE);
FD_C_Mat im = FD_C_Imread(image_file);
FD_C_DetectionResultWrapper* result_wrapper =
FD_C_CreateDetectionResultWrapper();
if (!FD_C_PPYOLOEWrapperPredict(model, im, result_wrapper)) {
printf("Failed to predict.\n");
if (!FD_C_PPYOLOEWrapperInitialized(model)) {
printf("Failed to initialize.\n");
FD_C_DestroyRuntimeOptionWrapper(option);
FD_C_DestroyPPYOLOEWrapper(model);
return;
}
FD_C_Mat im = FD_C_Imread(image_file);
FD_C_DetectionResult* result =
FD_C_DetectionResultWrapperGetData(result_wrapper);
(FD_C_DetectionResult*)malloc(sizeof(FD_C_DetectionResult));
if (!FD_C_PPYOLOEWrapperPredict(model, im, result)) {
printf("Failed to predict.\n");
FD_C_DestroyRuntimeOptionWrapper(option);
FD_C_DestroyPPYOLOEWrapper(model);
FD_C_DestroyMat(im);
free(result);
return;
}
FD_C_Mat vis_im = FD_C_VisDetection(im, result, 0.5, 1, 0.5);
FD_C_Imwrite("vis_result.jpg", vis_im);
@@ -57,7 +66,6 @@ void CpuInfer(const char* model_dir, const char* image_file) {
FD_C_DestroyRuntimeOptionWrapper(option);
FD_C_DestroyPPYOLOEWrapper(model);
FD_C_DestroyDetectionResultWrapper(result_wrapper);
FD_C_DestroyDetectionResult(result);
FD_C_DestroyMat(im);
FD_C_DestroyMat(vis_im);
@@ -75,21 +83,30 @@ void GpuInfer(const char* model_dir, const char* image_file) {
FD_C_RuntimeOptionWrapper* option = FD_C_CreateRuntimeOptionWrapper();
FD_C_RuntimeOptionWrapperUseGpu(option, 0);
FD_C_PPYOLOEWrapper* model = FD_C_CreatesPPYOLOEWrapper(
FD_C_PPYOLOEWrapper* model = FD_C_CreatePPYOLOEWrapper(
model_file, params_file, config_file, option, PADDLE);
FD_C_Mat im = FD_C_Imread(image_file);
FD_C_DetectionResultWrapper* result_wrapper =
FD_C_CreateDetectionResultWrapper();
if (!FD_C_PPYOLOEWrapperPredict(model, im, result_wrapper)) {
printf("Failed to predict.\n");
if (!FD_C_PPYOLOEWrapperInitialized(model)) {
printf("Failed to initialize.\n");
FD_C_DestroyRuntimeOptionWrapper(option);
FD_C_DestroyPPYOLOEWrapper(model);
return;
}
FD_C_Mat im = FD_C_Imread(image_file);
FD_C_DetectionResult* result =
FD_C_DetectionResultWrapperGetData(result_wrapper);
(FD_C_DetectionResult*)malloc(sizeof(FD_C_DetectionResult));
if (!FD_C_PPYOLOEWrapperPredict(model, im, result)) {
printf("Failed to predict.\n");
FD_C_DestroyRuntimeOptionWrapper(option);
FD_C_DestroyPPYOLOEWrapper(model);
FD_C_DestroyMat(im);
free(result);
return;
}
FD_C_Mat vis_im = FD_C_VisDetection(im, result, 0.5, 1, 0.5);
FD_C_Imwrite("vis_result.jpg", vis_im);
@@ -97,7 +114,6 @@ void GpuInfer(const char* model_dir, const char* image_file) {
FD_C_DestroyRuntimeOptionWrapper(option);
FD_C_DestroyPPYOLOEWrapper(model);
FD_C_DestroyDetectionResultWrapper(result_wrapper);
FD_C_DestroyDetectionResult(result);
FD_C_DestroyMat(im);
FD_C_DestroyMat(vis_im);

View File

@@ -39,7 +39,7 @@ msbuild infer_demo.sln /m:4 /p:Configuration=Release /p:Platform=x64
## 4. 运行可执行程序
注意Windows上运行时需要将FastDeploy依赖的库拷贝至可执行程序所在目录, 或者配置环境变量。FastDeploy提供了工具帮助我们快速将所有依赖库拷贝至可执行程序所在目录,通过如下命令将所有依赖的dll文件拷贝至可执行程序所在的目录
注意Windows上运行时需要将FastDeploy依赖的库拷贝至可执行程序所在目录, 或者配置环境变量。FastDeploy提供了工具帮助我们快速将所有依赖库拷贝至可执行程序所在目录,通过如下命令将所有依赖的dll文件拷贝至可执行程序所在的目录(可能生成的可执行文件在Release下还有一层目录这里假设生成的可执行文件在Release处)
```shell
cd D:\Download\fastdeploy-win-x64-gpu-x.x.x

View File

@@ -44,13 +44,16 @@ namespace Test
}else{
runtimeoption.UseGpu();
}
vision.detection.PPYOLOE model = new vision.detection.PPYOLOE(model_file, params_file, config_file, runtimeoption, ModelFormat.PADDLE);
fastdeploy.vision.detection.PPYOLOE model = new fastdeploy.vision.detection.PPYOLOE(model_file, params_file, config_file, runtimeoption, ModelFormat.PADDLE);
if(!model.Initialized()){
Console.WriteLine("Failed to initialize.\n");
}
Mat image = Cv2.ImRead(image_path);
vision.DetectionResult res = model.Predict(image);
Mat res_img = vision.Visualize.VisDetection(image, res, 0, 1, 0.5f);
fastdeploy.vision.DetectionResult res = model.Predict(image);
Console.WriteLine(res.ToString());
Mat res_img = fastdeploy.vision.Visualize.VisDetection(image, res, 0, 1, 0.5f);
Cv2.ImShow("result.png", res_img);
Cv2.WaitKey(0);
}
}

View File

@@ -0,0 +1,13 @@
PROJECT(infer_demo C)
CMAKE_MINIMUM_REQUIRED (VERSION 3.10)
# 指定下载解压后的fastdeploy库路径
option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.")
include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
# 添加FastDeploy依赖头文件
include_directories(${FASTDEPLOY_INCS})
add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.c)
target_link_libraries(infer_demo ${FASTDEPLOY_LIBS})

View File

@@ -0,0 +1,262 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stdio.h>
#include <stdlib.h>
#include "fastdeploy_capi/vision.h"
#ifdef WIN32
const char sep = '\\';
#else
const char sep = '/';
#endif
void CpuInfer(const char* det_model_dir, const char* cls_model_dir,
const char* rec_model_dir, const char* rec_label_file,
const char* image_file) {
char det_model_file[100];
char det_params_file[100];
char cls_model_file[100];
char cls_params_file[100];
char rec_model_file[100];
char rec_params_file[100];
int max_size = 99;
snprintf(det_model_file, max_size, "%s%c%s", det_model_dir, sep,
"inference.pdmodel");
snprintf(det_params_file, max_size, "%s%c%s", det_model_dir, sep,
"inference.pdiparams");
snprintf(cls_model_file, max_size, "%s%c%s", cls_model_dir, sep,
"inference.pdmodel");
snprintf(cls_params_file, max_size, "%s%c%s", cls_model_dir, sep,
"inference.pdiparams");
snprintf(rec_model_file, max_size, "%s%c%s", rec_model_dir, sep,
"inference.pdmodel");
snprintf(rec_params_file, max_size, "%s%c%s", rec_model_dir, sep,
"inference.pdiparams");
FD_C_RuntimeOptionWrapper* det_option = FD_C_CreateRuntimeOptionWrapper();
FD_C_RuntimeOptionWrapper* cls_option = FD_C_CreateRuntimeOptionWrapper();
FD_C_RuntimeOptionWrapper* rec_option = FD_C_CreateRuntimeOptionWrapper();
FD_C_RuntimeOptionWrapperUseCpu(det_option);
FD_C_RuntimeOptionWrapperUseCpu(cls_option);
FD_C_RuntimeOptionWrapperUseCpu(rec_option);
FD_C_DBDetectorWrapper* det_model = FD_C_CreateDBDetectorWrapper(
det_model_file, det_params_file, det_option, PADDLE);
FD_C_ClassifierWrapper* cls_model = FD_C_CreateClassifierWrapper(
cls_model_file, cls_params_file, cls_option, PADDLE);
FD_C_RecognizerWrapper* rec_model = FD_C_CreateRecognizerWrapper(
rec_model_file, rec_params_file, rec_label_file, rec_option, PADDLE);
FD_C_PPOCRv2Wrapper* ppocr_v2 =
FD_C_CreatePPOCRv2Wrapper(det_model, cls_model, rec_model);
if (!FD_C_PPOCRv2WrapperInitialized(ppocr_v2)) {
printf("Failed to initialize.\n");
FD_C_DestroyRuntimeOptionWrapper(det_option);
FD_C_DestroyRuntimeOptionWrapper(cls_option);
FD_C_DestroyRuntimeOptionWrapper(rec_option);
FD_C_DestroyClassifierWrapper(cls_model);
FD_C_DestroyDBDetectorWrapper(det_model);
FD_C_DestroyRecognizerWrapper(rec_model);
FD_C_DestroyPPOCRv2Wrapper(ppocr_v2);
return;
}
FD_C_Mat im = FD_C_Imread(image_file);
FD_C_OCRResult* result = (FD_C_OCRResult*)malloc(sizeof(FD_C_OCRResult));
if (!FD_C_PPOCRv2WrapperPredict(ppocr_v2, im, result)) {
printf("Failed to predict.\n");
FD_C_DestroyRuntimeOptionWrapper(det_option);
FD_C_DestroyRuntimeOptionWrapper(cls_option);
FD_C_DestroyRuntimeOptionWrapper(rec_option);
FD_C_DestroyClassifierWrapper(cls_model);
FD_C_DestroyDBDetectorWrapper(det_model);
FD_C_DestroyRecognizerWrapper(rec_model);
FD_C_DestroyPPOCRv2Wrapper(ppocr_v2);
FD_C_DestroyMat(im);
free(result);
return;
}
// print res
// You can directly access fields in FD_C_OCRResult and print it refer to
// OCRResult API Doc Or you can wrap it using
// FD_C_OCRResult_Wrapper, which containes C++ structure
// fastdeploy::vision::OCRResult, and using C API
// FD_C_OCRResultWrapperStr to call
// fastdeploy::vision::OCRResult::Str() in it. For convenience, we choose
// this method to print it.
FD_C_OCRResultWrapper* result_wrapper =
FD_C_CreateOCRResultWrapperFromData(result);
printf("%s", FD_C_OCRResultWrapperStr(result_wrapper));
FD_C_Mat vis_im = FD_C_VisOcr(im, result);
FD_C_Imwrite("vis_result.jpg", vis_im);
printf("Visualized result saved in ./vis_result.jpg\n");
FD_C_DestroyRuntimeOptionWrapper(det_option);
FD_C_DestroyRuntimeOptionWrapper(cls_option);
FD_C_DestroyRuntimeOptionWrapper(rec_option);
FD_C_DestroyClassifierWrapper(cls_model);
FD_C_DestroyDBDetectorWrapper(det_model);
FD_C_DestroyRecognizerWrapper(rec_model);
FD_C_DestroyPPOCRv2Wrapper(ppocr_v2);
FD_C_DestroyOCRResultWrapper(result_wrapper);
FD_C_DestroyOCRResult(result);
FD_C_DestroyMat(im);
}
void GpuInfer(const char* det_model_dir, const char* cls_model_dir,
const char* rec_model_dir, const char* rec_label_file,
const char* image_file) {
char det_model_file[100];
char det_params_file[100];
char cls_model_file[100];
char cls_params_file[100];
char rec_model_file[100];
char rec_params_file[100];
int max_size = 99;
snprintf(det_model_file, max_size, "%s%c%s", det_model_dir, sep,
"inference.pdmodel");
snprintf(det_params_file, max_size, "%s%c%s", det_model_dir, sep,
"inference.pdiparams");
snprintf(cls_model_file, max_size, "%s%c%s", cls_model_dir, sep,
"inference.pdmodel");
snprintf(cls_params_file, max_size, "%s%c%s", cls_model_dir, sep,
"inference.pdiparams");
snprintf(rec_model_file, max_size, "%s%c%s", rec_model_dir, sep,
"inference.pdmodel");
snprintf(rec_params_file, max_size, "%s%c%s", rec_model_dir, sep,
"inference.pdiparams");
FD_C_RuntimeOptionWrapper* det_option = FD_C_CreateRuntimeOptionWrapper();
FD_C_RuntimeOptionWrapper* cls_option = FD_C_CreateRuntimeOptionWrapper();
FD_C_RuntimeOptionWrapper* rec_option = FD_C_CreateRuntimeOptionWrapper();
FD_C_RuntimeOptionWrapperUseGpu(det_option, 0);
FD_C_RuntimeOptionWrapperUseGpu(cls_option, 0);
FD_C_RuntimeOptionWrapperUseGpu(rec_option, 0);
FD_C_DBDetectorWrapper* det_model = FD_C_CreateDBDetectorWrapper(
det_model_file, det_params_file, det_option, PADDLE);
FD_C_ClassifierWrapper* cls_model = FD_C_CreateClassifierWrapper(
cls_model_file, cls_params_file, cls_option, PADDLE);
FD_C_RecognizerWrapper* rec_model = FD_C_CreateRecognizerWrapper(
rec_model_file, rec_params_file, rec_label_file, rec_option, PADDLE);
FD_C_PPOCRv2Wrapper* ppocr_v2 =
FD_C_CreatePPOCRv2Wrapper(det_model, cls_model, rec_model);
if (!FD_C_PPOCRv2WrapperInitialized(ppocr_v2)) {
printf("Failed to initialize.\n");
FD_C_DestroyRuntimeOptionWrapper(det_option);
FD_C_DestroyRuntimeOptionWrapper(cls_option);
FD_C_DestroyRuntimeOptionWrapper(rec_option);
FD_C_DestroyClassifierWrapper(cls_model);
FD_C_DestroyDBDetectorWrapper(det_model);
FD_C_DestroyRecognizerWrapper(rec_model);
FD_C_DestroyPPOCRv2Wrapper(ppocr_v2);
return;
}
FD_C_Mat im = FD_C_Imread(image_file);
FD_C_OCRResult* result = (FD_C_OCRResult*)malloc(sizeof(FD_C_OCRResult));
if (!FD_C_PPOCRv2WrapperPredict(ppocr_v2, im, result)) {
printf("Failed to predict.\n");
FD_C_DestroyRuntimeOptionWrapper(det_option);
FD_C_DestroyRuntimeOptionWrapper(cls_option);
FD_C_DestroyRuntimeOptionWrapper(rec_option);
FD_C_DestroyClassifierWrapper(cls_model);
FD_C_DestroyDBDetectorWrapper(det_model);
FD_C_DestroyRecognizerWrapper(rec_model);
FD_C_DestroyPPOCRv2Wrapper(ppocr_v2);
FD_C_DestroyMat(im);
free(result);
return;
}
// print res
// You can directly access fields in FD_C_OCRResult and print it refer to
// OCRResult API Doc Or you can wrap it using
// FD_C_OCRResult_Wrapper, which containes C++ structure
// fastdeploy::vision::OCRResult, and using C API
// FD_C_OCRResultWrapperStr to call
// fastdeploy::vision::OCRResult::Str() in it. For convenience, we choose
// this method to print it.
FD_C_OCRResultWrapper* result_wrapper =
FD_C_CreateOCRResultWrapperFromData(result);
printf("%s", FD_C_OCRResultWrapperStr(result_wrapper));
FD_C_Mat vis_im = FD_C_VisOcr(im, result);
FD_C_Imwrite("vis_result.jpg", vis_im);
printf("Visualized result saved in ./vis_result.jpg\n");
FD_C_DestroyRuntimeOptionWrapper(det_option);
FD_C_DestroyRuntimeOptionWrapper(cls_option);
FD_C_DestroyRuntimeOptionWrapper(rec_option);
FD_C_DestroyClassifierWrapper(cls_model);
FD_C_DestroyDBDetectorWrapper(det_model);
FD_C_DestroyRecognizerWrapper(rec_model);
FD_C_DestroyPPOCRv2Wrapper(ppocr_v2);
FD_C_DestroyOCRResultWrapper(result_wrapper);
FD_C_DestroyOCRResult(result);
FD_C_DestroyMat(im);
}
int main(int argc, char* argv[]) {
if (argc < 7) {
printf(
"Usage: infer_demo path/to/det_model path/to/cls_model "
"path/to/rec_model path/to/rec_label_file path/to/image "
"run_option, "
"e.g ./infer_demo ./ch_PP-OCRv2_det_infer "
"./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv2_rec_infer "
"./ppocr_keys_v1.txt ./12.jpg 0\n");
printf(
"The data type of run_option is int, 0: run with cpu; 1: run with gpu"
"\n");
return -1;
}
if (atoi(argv[6]) == 0) {
CpuInfer(argv[1], argv[2], argv[3], argv[4], argv[5]);
} else if (atoi(argv[6]) == 1) {
GpuInfer(argv[1], argv[2], argv[3], argv[4], argv[5]);
}
return 0;
}

View File

@@ -0,0 +1,13 @@
PROJECT(infer_demo C)
CMAKE_MINIMUM_REQUIRED (VERSION 3.10)
# 指定下载解压后的fastdeploy库路径
option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.")
include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
# 添加FastDeploy依赖头文件
include_directories(${FASTDEPLOY_INCS})
add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.c)
target_link_libraries(infer_demo ${FASTDEPLOY_LIBS})

View File

@@ -0,0 +1,262 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stdio.h>
#include <stdlib.h>
#include "fastdeploy_capi/vision.h"
#ifdef WIN32
const char sep = '\\';
#else
const char sep = '/';
#endif
void CpuInfer(const char* det_model_dir, const char* cls_model_dir,
const char* rec_model_dir, const char* rec_label_file,
const char* image_file) {
char det_model_file[100];
char det_params_file[100];
char cls_model_file[100];
char cls_params_file[100];
char rec_model_file[100];
char rec_params_file[100];
int max_size = 99;
snprintf(det_model_file, max_size, "%s%c%s", det_model_dir, sep,
"inference.pdmodel");
snprintf(det_params_file, max_size, "%s%c%s", det_model_dir, sep,
"inference.pdiparams");
snprintf(cls_model_file, max_size, "%s%c%s", cls_model_dir, sep,
"inference.pdmodel");
snprintf(cls_params_file, max_size, "%s%c%s", cls_model_dir, sep,
"inference.pdiparams");
snprintf(rec_model_file, max_size, "%s%c%s", rec_model_dir, sep,
"inference.pdmodel");
snprintf(rec_params_file, max_size, "%s%c%s", rec_model_dir, sep,
"inference.pdiparams");
FD_C_RuntimeOptionWrapper* det_option = FD_C_CreateRuntimeOptionWrapper();
FD_C_RuntimeOptionWrapper* cls_option = FD_C_CreateRuntimeOptionWrapper();
FD_C_RuntimeOptionWrapper* rec_option = FD_C_CreateRuntimeOptionWrapper();
FD_C_RuntimeOptionWrapperUseCpu(det_option);
FD_C_RuntimeOptionWrapperUseCpu(cls_option);
FD_C_RuntimeOptionWrapperUseCpu(rec_option);
FD_C_DBDetectorWrapper* det_model = FD_C_CreateDBDetectorWrapper(
det_model_file, det_params_file, det_option, PADDLE);
FD_C_ClassifierWrapper* cls_model = FD_C_CreateClassifierWrapper(
cls_model_file, cls_params_file, cls_option, PADDLE);
FD_C_RecognizerWrapper* rec_model = FD_C_CreateRecognizerWrapper(
rec_model_file, rec_params_file, rec_label_file, rec_option, PADDLE);
FD_C_PPOCRv3Wrapper* ppocr_v3 =
FD_C_CreatePPOCRv3Wrapper(det_model, cls_model, rec_model);
if (!FD_C_PPOCRv3WrapperInitialized(ppocr_v3)) {
printf("Failed to initialize.\n");
FD_C_DestroyRuntimeOptionWrapper(det_option);
FD_C_DestroyRuntimeOptionWrapper(cls_option);
FD_C_DestroyRuntimeOptionWrapper(rec_option);
FD_C_DestroyClassifierWrapper(cls_model);
FD_C_DestroyDBDetectorWrapper(det_model);
FD_C_DestroyRecognizerWrapper(rec_model);
FD_C_DestroyPPOCRv3Wrapper(ppocr_v3);
return;
}
FD_C_Mat im = FD_C_Imread(image_file);
FD_C_OCRResult* result = (FD_C_OCRResult*)malloc(sizeof(FD_C_OCRResult));
if (!FD_C_PPOCRv3WrapperPredict(ppocr_v3, im, result)) {
printf("Failed to predict.\n");
FD_C_DestroyRuntimeOptionWrapper(det_option);
FD_C_DestroyRuntimeOptionWrapper(cls_option);
FD_C_DestroyRuntimeOptionWrapper(rec_option);
FD_C_DestroyClassifierWrapper(cls_model);
FD_C_DestroyDBDetectorWrapper(det_model);
FD_C_DestroyRecognizerWrapper(rec_model);
FD_C_DestroyPPOCRv3Wrapper(ppocr_v3);
FD_C_DestroyMat(im);
free(result);
return;
}
// print res
// You can directly access fields in FD_C_OCRResult and print it refer to
// OCRResult API Doc Or you can wrap it using
// FD_C_OCRResult_Wrapper, which containes C++ structure
// fastdeploy::vision::OCRResult, and using C API
// FD_C_OCRResultWrapperStr to call
// fastdeploy::vision::OCRResult::Str() in it. For convenience, we choose
// this method to print it.
FD_C_OCRResultWrapper* result_wrapper =
FD_C_CreateOCRResultWrapperFromData(result);
printf("%s", FD_C_OCRResultWrapperStr(result_wrapper));
FD_C_Mat vis_im = FD_C_VisOcr(im, result);
FD_C_Imwrite("vis_result.jpg", vis_im);
printf("Visualized result saved in ./vis_result.jpg\n");
FD_C_DestroyRuntimeOptionWrapper(det_option);
FD_C_DestroyRuntimeOptionWrapper(cls_option);
FD_C_DestroyRuntimeOptionWrapper(rec_option);
FD_C_DestroyClassifierWrapper(cls_model);
FD_C_DestroyDBDetectorWrapper(det_model);
FD_C_DestroyRecognizerWrapper(rec_model);
FD_C_DestroyPPOCRv3Wrapper(ppocr_v3);
FD_C_DestroyOCRResultWrapper(result_wrapper);
FD_C_DestroyOCRResult(result);
FD_C_DestroyMat(im);
}
void GpuInfer(const char* det_model_dir, const char* cls_model_dir,
const char* rec_model_dir, const char* rec_label_file,
const char* image_file) {
char det_model_file[100];
char det_params_file[100];
char cls_model_file[100];
char cls_params_file[100];
char rec_model_file[100];
char rec_params_file[100];
int max_size = 99;
snprintf(det_model_file, max_size, "%s%c%s", det_model_dir, sep,
"inference.pdmodel");
snprintf(det_params_file, max_size, "%s%c%s", det_model_dir, sep,
"inference.pdiparams");
snprintf(cls_model_file, max_size, "%s%c%s", cls_model_dir, sep,
"inference.pdmodel");
snprintf(cls_params_file, max_size, "%s%c%s", cls_model_dir, sep,
"inference.pdiparams");
snprintf(rec_model_file, max_size, "%s%c%s", rec_model_dir, sep,
"inference.pdmodel");
snprintf(rec_params_file, max_size, "%s%c%s", rec_model_dir, sep,
"inference.pdiparams");
FD_C_RuntimeOptionWrapper* det_option = FD_C_CreateRuntimeOptionWrapper();
FD_C_RuntimeOptionWrapper* cls_option = FD_C_CreateRuntimeOptionWrapper();
FD_C_RuntimeOptionWrapper* rec_option = FD_C_CreateRuntimeOptionWrapper();
FD_C_RuntimeOptionWrapperUseGpu(det_option, 0);
FD_C_RuntimeOptionWrapperUseGpu(cls_option, 0);
FD_C_RuntimeOptionWrapperUseGpu(rec_option, 0);
FD_C_DBDetectorWrapper* det_model = FD_C_CreateDBDetectorWrapper(
det_model_file, det_params_file, det_option, PADDLE);
FD_C_ClassifierWrapper* cls_model = FD_C_CreateClassifierWrapper(
cls_model_file, cls_params_file, cls_option, PADDLE);
FD_C_RecognizerWrapper* rec_model = FD_C_CreateRecognizerWrapper(
rec_model_file, rec_params_file, rec_label_file, rec_option, PADDLE);
FD_C_PPOCRv3Wrapper* ppocr_v3 =
FD_C_CreatePPOCRv3Wrapper(det_model, cls_model, rec_model);
if (!FD_C_PPOCRv3WrapperInitialized(ppocr_v3)) {
printf("Failed to initialize.\n");
FD_C_DestroyRuntimeOptionWrapper(det_option);
FD_C_DestroyRuntimeOptionWrapper(cls_option);
FD_C_DestroyRuntimeOptionWrapper(rec_option);
FD_C_DestroyClassifierWrapper(cls_model);
FD_C_DestroyDBDetectorWrapper(det_model);
FD_C_DestroyRecognizerWrapper(rec_model);
FD_C_DestroyPPOCRv3Wrapper(ppocr_v3);
return;
}
FD_C_Mat im = FD_C_Imread(image_file);
FD_C_OCRResult* result = (FD_C_OCRResult*)malloc(sizeof(FD_C_OCRResult));
if (!FD_C_PPOCRv3WrapperPredict(ppocr_v3, im, result)) {
printf("Failed to predict.\n");
FD_C_DestroyRuntimeOptionWrapper(det_option);
FD_C_DestroyRuntimeOptionWrapper(cls_option);
FD_C_DestroyRuntimeOptionWrapper(rec_option);
FD_C_DestroyClassifierWrapper(cls_model);
FD_C_DestroyDBDetectorWrapper(det_model);
FD_C_DestroyRecognizerWrapper(rec_model);
FD_C_DestroyPPOCRv3Wrapper(ppocr_v3);
FD_C_DestroyMat(im);
free(result);
return;
}
// print res
// You can directly access fields in FD_C_OCRResult and print it refer to
// OCRResult API Doc Or you can wrap it using
// FD_C_OCRResult_Wrapper, which containes C++ structure
// fastdeploy::vision::OCRResult, and using C API
// FD_C_OCRResultWrapperStr to call
// fastdeploy::vision::OCRResult::Str() in it. For convenience, we choose
// this method to print it.
FD_C_OCRResultWrapper* result_wrapper =
FD_C_CreateOCRResultWrapperFromData(result);
printf("%s", FD_C_OCRResultWrapperStr(result_wrapper));
FD_C_Mat vis_im = FD_C_VisOcr(im, result);
FD_C_Imwrite("vis_result.jpg", vis_im);
printf("Visualized result saved in ./vis_result.jpg\n");
FD_C_DestroyRuntimeOptionWrapper(det_option);
FD_C_DestroyRuntimeOptionWrapper(cls_option);
FD_C_DestroyRuntimeOptionWrapper(rec_option);
FD_C_DestroyClassifierWrapper(cls_model);
FD_C_DestroyDBDetectorWrapper(det_model);
FD_C_DestroyRecognizerWrapper(rec_model);
FD_C_DestroyPPOCRv3Wrapper(ppocr_v3);
FD_C_DestroyOCRResultWrapper(result_wrapper);
FD_C_DestroyOCRResult(result);
FD_C_DestroyMat(im);
}
int main(int argc, char* argv[]) {
if (argc < 7) {
printf(
"Usage: infer_demo path/to/det_model path/to/cls_model "
"path/to/rec_model path/to/rec_label_file path/to/image "
"run_option, "
"e.g ./infer_demo ./ch_PP-OCRv3_det_infer "
"./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv3_rec_infer "
"./ppocr_keys_v1.txt ./12.jpg 0\n");
printf(
"The data type of run_option is int, 0: run with cpu; 1: run with gpu"
"\n");
return -1;
}
if (atoi(argv[6]) == 0) {
CpuInfer(argv[1], argv[2], argv[3], argv[4], argv[5]);
} else if (atoi(argv[6]) == 1) {
GpuInfer(argv[1], argv[2], argv[3], argv[4], argv[5]);
}
return 0;
}

View File

@@ -51,7 +51,7 @@ void BindRuntime(pybind11::module& m) {
warm_datas[i][j].nbytes());
}
}
return self.Compile(warm_tensors, _option);
return self.Compile(warm_tensors);
})
.def("infer",
[](Runtime& self, std::map<std::string, pybind11::array>& data) {

View File

@@ -58,7 +58,10 @@ class BaseBackend {
virtual bool Initialized() const { return initialized_; }
virtual bool Init(const RuntimeOption& option) {
FDERROR << "Not Implement Yet." << std::endl;
FDERROR << "Not Implement for "
<< option.backend << " in "
<< option.device << "."
<< std::endl;
return false;
}
@@ -89,59 +92,59 @@ class BaseBackend {
return nullptr;
}
benchmark::BenchmarkOption benchmark_option_;
benchmark::BenchmarkResult benchmark_result_;
benchmark::BenchmarkOption benchmark_option_;
benchmark::BenchmarkResult benchmark_result_;
};
/** \brief Macros for Runtime benchmark profiling.
* The param 'base_loop' for 'RUNTIME_PROFILE_LOOP_BEGIN'
* indicates that the least number of times the loop
/** \brief Macros for Runtime benchmark profiling.
* The param 'base_loop' for 'RUNTIME_PROFILE_LOOP_BEGIN'
* indicates that the least number of times the loop
* will repeat when profiling mode is not enabled.
* In most cases, the value should be 1, i.e., results are
* obtained by running the inference process once, when
* the profile mode is turned off, such as ONNX Runtime,
* OpenVINO, TensorRT, Paddle Inference, Paddle Lite,
* RKNPU2, SOPHGO etc.
*
* In most cases, the value should be 1, i.e., results are
* obtained by running the inference process once, when
* the profile mode is turned off, such as ONNX Runtime,
* OpenVINO, TensorRT, Paddle Inference, Paddle Lite,
* RKNPU2, SOPHGO etc.
*
* example code @code
* // OpenVINOBackend::Infer
* // OpenVINOBackend::Infer
* RUNTIME_PROFILE_LOOP_H2D_D2H_BEGIN
* // do something ....
* // do something ....
* RUNTIME_PROFILE_LOOP_BEGIN(1)
* // The codes which wrapped by 'BEGIN(1) ~ END' scope
* // The codes which wrapped by 'BEGIN(1) ~ END' scope
* // will only run once when profiling mode is not enabled.
* request_.infer();
* request_.infer();
* RUNTIME_PROFILE_LOOP_END
* // do something ....
* // do something ....
* RUNTIME_PROFILE_LOOP_H2D_D2H_END
*
*
* @endcode In this case, No global variables inside a function
* are wrapped by BEGIN and END, which may be required for
* are wrapped by BEGIN and END, which may be required for
* subsequent tasks. But, some times we need to set 'base_loop'
* as 0, such as POROS.
*
*
* * example code @code
* // PorosBackend::Infer
* RUNTIME_PROFILE_LOOP_H2D_D2H_BEGIN
* // do something ....
* // do something ....
* RUNTIME_PROFILE_LOOP_BEGIN(0) // set 'base_loop' as 0
* // The codes which wrapped by 'BEGIN(0) ~ END' scope
* // The codes which wrapped by 'BEGIN(0) ~ END' scope
* // will not run when profiling mode is not enabled.
* auto poros_outputs = _poros_module->forward(poros_inputs);
* auto poros_outputs = _poros_module->forward(poros_inputs);
* RUNTIME_PROFILE_LOOP_END
* // Run another inference beyond the scope of 'BEGIN ~ END'
* // to get valid outputs for subsequent tasks.
* auto poros_outputs = _poros_module->forward(poros_inputs);
* auto poros_outputs = _poros_module->forward(poros_inputs);
* // do something .... will use 'poros_outputs' ...
* if (poros_outputs.isTensor()) {
* // ...
* }
* RUNTIME_PROFILE_LOOP_H2D_D2H_END
*
*
* @endcode In this case, 'poros_outputs' inside a function
* are wrapped by BEGIN and END, which may be required for
* are wrapped by BEGIN and END, which may be required for
* subsequent tasks. So, we set 'base_loop' as 0 and lanuch
* another infer to get the valid outputs beyond the scope
* another infer to get the valid outputs beyond the scope
* of 'BEGIN ~ END' for subsequent tasks.
*/

View File

@@ -51,6 +51,20 @@ class PorosBackend : public BaseBackend {
void BuildOption(const PorosBackendOption& option);
bool Init(const RuntimeOption& option) {
if (!(Supported(option.model_format, Backend::POROS)
&& Supported(option.device, Backend::POROS))) {
return false;
}
if (option.model_from_memory_) {
FDERROR << "Poros backend doesn't support load model "
<< "from memory, please load model from disk."
<< std::endl;
return false;
}
return true;
}
bool Compile(const std::string& model_file,
std::vector<std::vector<FDTensor>>& prewarm_tensors,
const PorosBackendOption& option = PorosBackendOption());

View File

@@ -417,25 +417,28 @@ Runtime* Runtime::Clone(void* stream, int device_id) {
return runtime;
}
// only for poros backend
bool Runtime::Compile(std::vector<std::vector<FDTensor>>& prewarm_tensors,
const RuntimeOption& _option) {
void Runtime::CreatePorosBackend() {
#ifdef ENABLE_POROS_BACKEND
backend_ = utils::make_unique<PorosBackend>();
FDASSERT(backend_->Init(option), "Failed to initialize Poros backend.");
#else
FDASSERT(false,
"PorosBackend is not available, please compiled with "
"ENABLE_POROS_BACKEND=ON.");
#endif
FDINFO << "Runtime initialized with Backend::POROS in " << option.device
<< "." << std::endl;
}
// only for poros backend
bool Runtime::Compile(std::vector<std::vector<FDTensor>>& prewarm_tensors) {
#ifdef ENABLE_POROS_BACKEND
FDASSERT(
option.model_format == ModelFormat::TORCHSCRIPT,
"PorosBackend only support model format of ModelFormat::TORCHSCRIPT.");
if (option.device != Device::CPU && option.device != Device::GPU) {
FDERROR << "PorosBackend only supports CPU/GPU, but now its "
<< option.device << "." << std::endl;
return false;
}
option.poros_option.device = option.device;
option.poros_option.device_id = option.device_id;
option.poros_option.enable_fp16 = option.trt_option.enable_fp16;
option.poros_option.max_batch_size = option.trt_option.max_batch_size;
option.poros_option.max_workspace_size = option.trt_option.max_workspace_size;
backend_ = utils::make_unique<PorosBackend>();
auto casted_backend = dynamic_cast<PorosBackend*>(backend_.get());
FDASSERT(
casted_backend->Compile(option.model_file, prewarm_tensors,

View File

@@ -99,11 +99,9 @@ struct FASTDEPLOY_DECL Runtime {
/** \brief Compile TorchScript Module, only for Poros backend
*
* \param[in] prewarm_tensors Prewarm datas for compile
* \param[in] _option Runtime option
* \return true if compile successed, otherwise false
*/
bool Compile(std::vector<std::vector<FDTensor>>& prewarm_tensors,
const RuntimeOption& _option);
bool Compile(std::vector<std::vector<FDTensor>>& prewarm_tensors);
/** \brief Get profile time of Runtime after the profile process is done.
*/
double GetProfileTime() {
@@ -118,6 +116,7 @@ struct FASTDEPLOY_DECL Runtime {
void CreateLiteBackend();
void CreateRKNPU2Backend();
void CreateSophgoNPUBackend();
void CreatePorosBackend();
std::unique_ptr<BaseBackend> backend_;
std::vector<FDTensor> input_tensors_;
std::vector<FDTensor> output_tensors_;

View File

@@ -230,7 +230,10 @@ ModelState::ModelState(TRITONBACKEND_Model* triton_model)
ParseBoolValue(value_string, &pd_enable_mkldnn));
runtime_options_->SetPaddleMKLDNN(pd_enable_mkldnn);
} else if (param_key == "use_paddle_log") {
runtime_options_->EnablePaddleLogInfo();
bool use_paddle_log;
THROW_IF_BACKEND_MODEL_ERROR(
ParseBoolValue(value_string, &use_paddle_log));
runtime_options_->paddle_infer_option.enable_log_info = use_paddle_log;
} else if (param_key == "num_streams") {
int num_streams;
THROW_IF_BACKEND_MODEL_ERROR(
@@ -241,6 +244,8 @@ ModelState::ModelState(TRITONBACKEND_Model* triton_model)
ParseBoolValue(value_string, &is_clone_));
} else if (param_key == "use_ipu") {
// runtime_options_->UseIpu();
} else if (param_key == "encryption_key") {
runtime_options_->SetEncryptionKey(value_string);
}
}
}
@@ -312,10 +317,15 @@ ModelState::ModelState(TRITONBACKEND_Model* triton_model)
} else if (param_key == "use_paddle") {
runtime_options_->EnablePaddleToTrt();
} else if (param_key == "use_paddle_log") {
runtime_options_->EnablePaddleLogInfo();
bool use_paddle_log;
THROW_IF_BACKEND_MODEL_ERROR(
ParseBoolValue(value_string, &use_paddle_log));
runtime_options_->paddle_infer_option.enable_log_info = use_paddle_log;
} else if (param_key == "is_clone") {
THROW_IF_BACKEND_MODEL_ERROR(
ParseBoolValue(value_string, &is_clone_));
} else if (param_key == "encryption_key") {
runtime_options_->SetEncryptionKey(value_string);
}
}
}