mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-07 01:22:59 +08:00
[Doc] add doxygen docs for c sharp api (#1495)
add doxygen docs for c sharp api Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
This commit is contained in:
@@ -24,20 +24,20 @@ public enum ModelFormat {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public enum rknpu2_CpuName {
|
public enum rknpu2_CpuName {
|
||||||
RK356X = 0, /* run on RK356X. */
|
RK356X = 0, ///< run on RK356X.
|
||||||
RK3588 = 1, /* default,run on RK3588. */
|
RK3588 = 1, ///< default,run on RK3588.
|
||||||
UNDEFINED,
|
UNDEFINED,
|
||||||
}
|
}
|
||||||
|
|
||||||
public enum rknpu2_CoreMask {
|
public enum rknpu2_CoreMask {
|
||||||
RKNN_NPU_CORE_AUTO = 0, //< default, run on NPU core randomly.
|
RKNN_NPU_CORE_AUTO = 0, ///< default, run on NPU core randomly.
|
||||||
RKNN_NPU_CORE_0 = 1, //< run on NPU core 0.
|
RKNN_NPU_CORE_0 = 1, ///< run on NPU core 0.
|
||||||
RKNN_NPU_CORE_1 = 2, //< run on NPU core 1.
|
RKNN_NPU_CORE_1 = 2, ///< run on NPU core 1.
|
||||||
RKNN_NPU_CORE_2 = 4, //< run on NPU core 2.
|
RKNN_NPU_CORE_2 = 4, ///< run on NPU core 2.
|
||||||
RKNN_NPU_CORE_0_1 =
|
RKNN_NPU_CORE_0_1 =
|
||||||
RKNN_NPU_CORE_0 | RKNN_NPU_CORE_1, //< run on NPU core 1 and core 2.
|
RKNN_NPU_CORE_0 | RKNN_NPU_CORE_1, ///< run on NPU core 1 and core 2.
|
||||||
RKNN_NPU_CORE_0_1_2 =
|
RKNN_NPU_CORE_0_1_2 =
|
||||||
RKNN_NPU_CORE_0_1 | RKNN_NPU_CORE_2, //< run on NPU core 1 and core 2.
|
RKNN_NPU_CORE_0_1 | RKNN_NPU_CORE_2, ///< run on NPU core 1 and core 2.
|
||||||
RKNN_NPU_CORE_UNDEFINED,
|
RKNN_NPU_CORE_UNDEFINED,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -18,6 +18,8 @@ using System.Runtime.InteropServices;
|
|||||||
|
|
||||||
namespace fastdeploy {
|
namespace fastdeploy {
|
||||||
|
|
||||||
|
/*! @brief Option object used when create a new Runtime object
|
||||||
|
*/
|
||||||
public class RuntimeOption {
|
public class RuntimeOption {
|
||||||
|
|
||||||
public RuntimeOption() {
|
public RuntimeOption() {
|
||||||
@@ -28,26 +30,41 @@ public class RuntimeOption {
|
|||||||
FD_C_DestroyRuntimeOptionWrapper(fd_runtime_option_wrapper);
|
FD_C_DestroyRuntimeOptionWrapper(fd_runtime_option_wrapper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** \brief Set path of model file and parameter file
|
||||||
|
*
|
||||||
|
* \param[in] model_path Path of model file, e.g ResNet50/model.pdmodel for Paddle format model / ResNet50/model.onnx for ONNX format model
|
||||||
|
* \param[in] params_path Path of parameter file, this only used when the model format is Paddle, e.g Resnet50/model.pdiparams
|
||||||
|
* \param[in] format Format of the loaded model
|
||||||
|
*/
|
||||||
public void SetModelPath(string model_path, string params_path = "",
|
public void SetModelPath(string model_path, string params_path = "",
|
||||||
ModelFormat format = ModelFormat.PADDLE) {
|
ModelFormat format = ModelFormat.PADDLE) {
|
||||||
FD_C_RuntimeOptionWrapperSetModelPath(fd_runtime_option_wrapper, model_path,
|
FD_C_RuntimeOptionWrapperSetModelPath(fd_runtime_option_wrapper, model_path,
|
||||||
params_path, format);
|
params_path, format);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** \brief Specify the memory buffer of model and parameter. Used when model and params are loaded directly from memory
|
||||||
|
*
|
||||||
|
* \param[in] model_buffer The string of model memory buffer
|
||||||
|
* \param[in] params_buffer The string of parameters memory buffer
|
||||||
|
* \param[in] format Format of the loaded model
|
||||||
|
*/
|
||||||
public void SetModelBuffer(string model_buffer, string params_buffer = "",
|
public void SetModelBuffer(string model_buffer, string params_buffer = "",
|
||||||
ModelFormat format = ModelFormat.PADDLE) {
|
ModelFormat format = ModelFormat.PADDLE) {
|
||||||
FD_C_RuntimeOptionWrapperSetModelBuffer(
|
FD_C_RuntimeOptionWrapperSetModelBuffer(
|
||||||
fd_runtime_option_wrapper, model_buffer, params_buffer, format);
|
fd_runtime_option_wrapper, model_buffer, params_buffer, format);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Use cpu to inference, the runtime will inference on CPU by default
|
||||||
public void UseCpu() {
|
public void UseCpu() {
|
||||||
FD_C_RuntimeOptionWrapperUseCpu(fd_runtime_option_wrapper);
|
FD_C_RuntimeOptionWrapperUseCpu(fd_runtime_option_wrapper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Use Nvidia GPU to inference
|
||||||
public void UseGpu(int gpu_id = 0) {
|
public void UseGpu(int gpu_id = 0) {
|
||||||
FD_C_RuntimeOptionWrapperUseGpu(fd_runtime_option_wrapper, gpu_id);
|
FD_C_RuntimeOptionWrapperUseGpu(fd_runtime_option_wrapper, gpu_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Use RKNPU2 e.g RK3588/RK356X to inference
|
||||||
public void
|
public void
|
||||||
UseRKNPU2(rknpu2_CpuName rknpu2_name = rknpu2_CpuName.RK3588,
|
UseRKNPU2(rknpu2_CpuName rknpu2_name = rknpu2_CpuName.RK3588,
|
||||||
rknpu2_CoreMask rknpu2_core = rknpu2_CoreMask.RKNN_NPU_CORE_0) {
|
rknpu2_CoreMask rknpu2_core = rknpu2_CoreMask.RKNN_NPU_CORE_0) {
|
||||||
@@ -55,14 +72,38 @@ public class RuntimeOption {
|
|||||||
rknpu2_core);
|
rknpu2_core);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Use TimVX e.g RV1126/A311D to inference
|
||||||
public void UseTimVX() {
|
public void UseTimVX() {
|
||||||
FD_C_RuntimeOptionWrapperUseTimVX(fd_runtime_option_wrapper);
|
FD_C_RuntimeOptionWrapperUseTimVX(fd_runtime_option_wrapper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Use Huawei Ascend to inference
|
||||||
public void UseAscend() {
|
public void UseAscend() {
|
||||||
FD_C_RuntimeOptionWrapperUseAscend(fd_runtime_option_wrapper);
|
FD_C_RuntimeOptionWrapperUseAscend(fd_runtime_option_wrapper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// \brief Turn on KunlunXin XPU.
|
||||||
|
///
|
||||||
|
/// \param kunlunxin_id the KunlunXin XPU card to use (default is 0).
|
||||||
|
/// \param l3_workspace_size The size of the video memory allocated by the l3
|
||||||
|
/// cache, the maximum is 16M.
|
||||||
|
/// \param locked Whether the allocated L3 cache can be locked. If false,
|
||||||
|
/// it means that the L3 cache is not locked, and the allocated L3
|
||||||
|
/// cache can be shared by multiple models, and multiple models
|
||||||
|
/// sharing the L3 cache will be executed sequentially on the card.
|
||||||
|
/// \param autotune Whether to autotune the conv operator in the model. If
|
||||||
|
/// true, when the conv operator of a certain dimension is executed
|
||||||
|
/// for the first time, it will automatically search for a better
|
||||||
|
/// algorithm to improve the performance of subsequent conv operators
|
||||||
|
/// of the same dimension.
|
||||||
|
/// \param autotune_file Specify the path of the autotune file. If
|
||||||
|
/// autotune_file is specified, the algorithm specified in the
|
||||||
|
/// file will be used and autotune will not be performed again.
|
||||||
|
/// \param precision Calculation accuracy of multi_encoder
|
||||||
|
/// \param adaptive_seqlen Is the input of multi_encoder variable length
|
||||||
|
/// \param enable_multi_stream Whether to enable the multi stream of
|
||||||
|
/// KunlunXin XPU.
|
||||||
|
///
|
||||||
public void
|
public void
|
||||||
UseKunlunXin(int kunlunxin_id = 0, int l3_workspace_size = 0xfffc00,
|
UseKunlunXin(int kunlunxin_id = 0, int l3_workspace_size = 0xfffc00,
|
||||||
bool locked = false, bool autotune = true,
|
bool locked = false, bool autotune = true,
|
||||||
@@ -74,6 +115,7 @@ public class RuntimeOption {
|
|||||||
enable_multi_stream);
|
enable_multi_stream);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Use Sophgo to inference
|
||||||
public void UseSophgo() {
|
public void UseSophgo() {
|
||||||
FD_C_RuntimeOptionWrapperUseSophgo(fd_runtime_option_wrapper);
|
FD_C_RuntimeOptionWrapperUseSophgo(fd_runtime_option_wrapper);
|
||||||
}
|
}
|
||||||
@@ -83,6 +125,9 @@ public class RuntimeOption {
|
|||||||
external_stream);
|
external_stream);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* @brief Set number of cpu threads while inference on CPU, by default it will decided by the different backends
|
||||||
|
*/
|
||||||
public void SetCpuThreadNum(int thread_num) {
|
public void SetCpuThreadNum(int thread_num) {
|
||||||
FD_C_RuntimeOptionWrapperSetCpuThreadNum(fd_runtime_option_wrapper,
|
FD_C_RuntimeOptionWrapperSetCpuThreadNum(fd_runtime_option_wrapper,
|
||||||
thread_num);
|
thread_num);
|
||||||
@@ -97,38 +142,47 @@ public class RuntimeOption {
|
|||||||
FD_C_RuntimeOptionWrapperUsePaddleBackend(fd_runtime_option_wrapper);
|
FD_C_RuntimeOptionWrapperUsePaddleBackend(fd_runtime_option_wrapper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Set Paddle Inference as inference backend, support CPU/GPU
|
||||||
public void UsePaddleInferBackend() {
|
public void UsePaddleInferBackend() {
|
||||||
FD_C_RuntimeOptionWrapperUsePaddleInferBackend(fd_runtime_option_wrapper);
|
FD_C_RuntimeOptionWrapperUsePaddleInferBackend(fd_runtime_option_wrapper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Set ONNX Runtime as inference backend, support CPU/GPU
|
||||||
public void UseOrtBackend() {
|
public void UseOrtBackend() {
|
||||||
FD_C_RuntimeOptionWrapperUseOrtBackend(fd_runtime_option_wrapper);
|
FD_C_RuntimeOptionWrapperUseOrtBackend(fd_runtime_option_wrapper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Set SOPHGO Runtime as inference backend, support SOPHGO
|
||||||
public void UseSophgoBackend() {
|
public void UseSophgoBackend() {
|
||||||
FD_C_RuntimeOptionWrapperUseSophgoBackend(fd_runtime_option_wrapper);
|
FD_C_RuntimeOptionWrapperUseSophgoBackend(fd_runtime_option_wrapper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Set TensorRT as inference backend, only support GPU
|
||||||
public void UseTrtBackend() {
|
public void UseTrtBackend() {
|
||||||
FD_C_RuntimeOptionWrapperUseTrtBackend(fd_runtime_option_wrapper);
|
FD_C_RuntimeOptionWrapperUseTrtBackend(fd_runtime_option_wrapper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Set Poros backend as inference backend, support CPU/GPU
|
||||||
public void UsePorosBackend() {
|
public void UsePorosBackend() {
|
||||||
FD_C_RuntimeOptionWrapperUsePorosBackend(fd_runtime_option_wrapper);
|
FD_C_RuntimeOptionWrapperUsePorosBackend(fd_runtime_option_wrapper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Set OpenVINO as inference backend, only support CPU
|
||||||
public void UseOpenVINOBackend() {
|
public void UseOpenVINOBackend() {
|
||||||
FD_C_RuntimeOptionWrapperUseOpenVINOBackend(fd_runtime_option_wrapper);
|
FD_C_RuntimeOptionWrapperUseOpenVINOBackend(fd_runtime_option_wrapper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Set Paddle Lite as inference backend, only support arm cpu
|
||||||
public void UseLiteBackend() {
|
public void UseLiteBackend() {
|
||||||
FD_C_RuntimeOptionWrapperUseLiteBackend(fd_runtime_option_wrapper);
|
FD_C_RuntimeOptionWrapperUseLiteBackend(fd_runtime_option_wrapper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Set Paddle Lite as inference backend, only support arm cpu
|
||||||
public void UsePaddleLiteBackend() {
|
public void UsePaddleLiteBackend() {
|
||||||
FD_C_RuntimeOptionWrapperUsePaddleLiteBackend(fd_runtime_option_wrapper);
|
FD_C_RuntimeOptionWrapperUsePaddleLiteBackend(fd_runtime_option_wrapper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
public void SetPaddleMKLDNN(bool pd_mkldnn = true) {
|
public void SetPaddleMKLDNN(bool pd_mkldnn = true) {
|
||||||
FD_C_RuntimeOptionWrapperSetPaddleMKLDNN(fd_runtime_option_wrapper,
|
FD_C_RuntimeOptionWrapperSetPaddleMKLDNN(fd_runtime_option_wrapper,
|
||||||
pd_mkldnn);
|
pd_mkldnn);
|
||||||
|
@@ -23,8 +23,18 @@ namespace fastdeploy {
|
|||||||
namespace vision {
|
namespace vision {
|
||||||
namespace classification {
|
namespace classification {
|
||||||
|
|
||||||
|
/*! @brief PaddleClas serials model object used when to load a PaddleClas model exported by PaddleClas repository
|
||||||
|
*/
|
||||||
public class PaddleClasModel {
|
public class PaddleClasModel {
|
||||||
|
|
||||||
|
/** \brief Set path of model file and configuration file, and the configuration of runtime
|
||||||
|
*
|
||||||
|
* \param[in] model_file Path of model file, e.g resnet/model.pdmodel
|
||||||
|
* \param[in] params_file Path of parameter file, e.g resnet/model.pdiparams, if the model format is ONNX, this parameter will be ignored
|
||||||
|
* \param[in] config_file Path of configuration file for deployment, e.g resnet/infer_cfg.yml
|
||||||
|
* \param[in] custom_option RuntimeOption for inference, the default will use cpu, and choose the backend defined in `valid_cpu_backends`
|
||||||
|
* \param[in] model_format Model format of the loaded model, default is Paddle format
|
||||||
|
*/
|
||||||
public PaddleClasModel(string model_file, string params_file,
|
public PaddleClasModel(string model_file, string params_file,
|
||||||
string config_file, RuntimeOption custom_option = null,
|
string config_file, RuntimeOption custom_option = null,
|
||||||
ModelFormat model_format = ModelFormat.PADDLE) {
|
ModelFormat model_format = ModelFormat.PADDLE) {
|
||||||
@@ -40,11 +50,17 @@ public class PaddleClasModel {
|
|||||||
FD_C_DestroyPaddleClasModelWrapper(fd_paddleclas_model_wrapper);
|
FD_C_DestroyPaddleClasModelWrapper(fd_paddleclas_model_wrapper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get model's name
|
||||||
public string ModelName() {
|
public string ModelName() {
|
||||||
return "PaddleClas/Model";
|
return "PaddleClas/Model";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** \brief DEPRECATED Predict the classification result for an input image, remove at 1.0 version
|
||||||
|
*
|
||||||
|
* \param[in] im The input image data, comes from cv::imread()
|
||||||
|
*
|
||||||
|
* \return ClassifyResult
|
||||||
|
*/
|
||||||
public ClassifyResult Predict(Mat img) {
|
public ClassifyResult Predict(Mat img) {
|
||||||
FD_ClassifyResult fd_classify_result = new FD_ClassifyResult();
|
FD_ClassifyResult fd_classify_result = new FD_ClassifyResult();
|
||||||
if(! FD_C_PaddleClasModelWrapperPredict(
|
if(! FD_C_PaddleClasModelWrapperPredict(
|
||||||
@@ -59,6 +75,12 @@ public class PaddleClasModel {
|
|||||||
return classify_result;
|
return classify_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** \brief Predict the classification results for a batch of input images
|
||||||
|
*
|
||||||
|
* \param[in] imgs, The input image list, each element comes from cv::imread()
|
||||||
|
*
|
||||||
|
* \return List<ClassifyResult>
|
||||||
|
*/
|
||||||
public List<ClassifyResult> BatchPredict(List<Mat> imgs){
|
public List<ClassifyResult> BatchPredict(List<Mat> imgs){
|
||||||
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
||||||
imgs_in.size = (nuint)imgs.Count;
|
imgs_in.size = (nuint)imgs.Count;
|
||||||
@@ -86,6 +108,7 @@ public class PaddleClasModel {
|
|||||||
return results_out;
|
return results_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Check whether model is initialized successfully
|
||||||
public bool Initialized() {
|
public bool Initialized() {
|
||||||
return FD_C_PaddleClasModelWrapperInitialized(fd_paddleclas_model_wrapper);
|
return FD_C_PaddleClasModelWrapperInitialized(fd_paddleclas_model_wrapper);
|
||||||
}
|
}
|
||||||
|
@@ -24,6 +24,9 @@ namespace vision {
|
|||||||
namespace detection {
|
namespace detection {
|
||||||
|
|
||||||
// YOLOv5
|
// YOLOv5
|
||||||
|
|
||||||
|
/*! @brief YOLOv5 model
|
||||||
|
*/
|
||||||
public class YOLOv5 {
|
public class YOLOv5 {
|
||||||
|
|
||||||
public YOLOv5( string model_file, string params_file,
|
public YOLOv5( string model_file, string params_file,
|
||||||
@@ -39,6 +42,12 @@ public class YOLOv5 {
|
|||||||
|
|
||||||
~YOLOv5() { FD_C_DestroyYOLOv5Wrapper(fd_yolov5_wrapper); }
|
~YOLOv5() { FD_C_DestroyYOLOv5Wrapper(fd_yolov5_wrapper); }
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image
|
||||||
|
* \param[in] im The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return DetectionResult
|
||||||
|
*/
|
||||||
public DetectionResult Predict(Mat img) {
|
public DetectionResult Predict(Mat img) {
|
||||||
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
||||||
if(! FD_C_YOLOv5WrapperPredict(fd_yolov5_wrapper, img.CvPtr,
|
if(! FD_C_YOLOv5WrapperPredict(fd_yolov5_wrapper, img.CvPtr,
|
||||||
@@ -53,6 +62,12 @@ public class YOLOv5 {
|
|||||||
return detection_result;
|
return detection_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image list
|
||||||
|
* \param[in] im The input image list, all the elements come from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return List<DetectionResult>
|
||||||
|
*/
|
||||||
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
||||||
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
||||||
imgs_in.size = (nuint)imgs.Count;
|
imgs_in.size = (nuint)imgs.Count;
|
||||||
@@ -80,6 +95,8 @@ public class YOLOv5 {
|
|||||||
return results_out;
|
return results_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// Check whether model is initialized successfully
|
||||||
public bool Initialized() {
|
public bool Initialized() {
|
||||||
return FD_C_YOLOv5WrapperInitialized(fd_yolov5_wrapper);
|
return FD_C_YOLOv5WrapperInitialized(fd_yolov5_wrapper);
|
||||||
}
|
}
|
||||||
@@ -123,6 +140,9 @@ public class YOLOv5 {
|
|||||||
|
|
||||||
// YOLOv7
|
// YOLOv7
|
||||||
|
|
||||||
|
|
||||||
|
/*! @brief YOLOv7 model
|
||||||
|
*/
|
||||||
public class YOLOv7 {
|
public class YOLOv7 {
|
||||||
|
|
||||||
public YOLOv7( string model_file, string params_file,
|
public YOLOv7( string model_file, string params_file,
|
||||||
@@ -138,6 +158,12 @@ public class YOLOv7 {
|
|||||||
|
|
||||||
~YOLOv7() { FD_C_DestroyYOLOv7Wrapper(fd_yolov7_wrapper); }
|
~YOLOv7() { FD_C_DestroyYOLOv7Wrapper(fd_yolov7_wrapper); }
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image
|
||||||
|
* \param[in] im The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return DetectionResult
|
||||||
|
*/
|
||||||
public DetectionResult Predict(Mat img) {
|
public DetectionResult Predict(Mat img) {
|
||||||
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
||||||
if(! FD_C_YOLOv7WrapperPredict(fd_yolov7_wrapper, img.CvPtr,
|
if(! FD_C_YOLOv7WrapperPredict(fd_yolov7_wrapper, img.CvPtr,
|
||||||
@@ -152,6 +178,12 @@ public class YOLOv7 {
|
|||||||
return detection_result;
|
return detection_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image list
|
||||||
|
* \param[in] im The input image list, all the elements come from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return List<DetectionResult>
|
||||||
|
*/
|
||||||
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
||||||
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
||||||
imgs_in.size = (nuint)imgs.Count;
|
imgs_in.size = (nuint)imgs.Count;
|
||||||
@@ -179,6 +211,8 @@ public class YOLOv7 {
|
|||||||
return results_out;
|
return results_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// Check whether model is initialized successfully
|
||||||
public bool Initialized() {
|
public bool Initialized() {
|
||||||
return FD_C_YOLOv7WrapperInitialized(fd_yolov7_wrapper);
|
return FD_C_YOLOv7WrapperInitialized(fd_yolov7_wrapper);
|
||||||
}
|
}
|
||||||
@@ -221,6 +255,9 @@ public class YOLOv7 {
|
|||||||
|
|
||||||
// YOLOv8
|
// YOLOv8
|
||||||
|
|
||||||
|
|
||||||
|
/*! @brief YOLOv8 model
|
||||||
|
*/
|
||||||
public class YOLOv8 {
|
public class YOLOv8 {
|
||||||
|
|
||||||
public YOLOv8( string model_file, string params_file,
|
public YOLOv8( string model_file, string params_file,
|
||||||
@@ -236,6 +273,12 @@ public class YOLOv8 {
|
|||||||
|
|
||||||
~YOLOv8() { FD_C_DestroyYOLOv8Wrapper(fd_yolov8_wrapper); }
|
~YOLOv8() { FD_C_DestroyYOLOv8Wrapper(fd_yolov8_wrapper); }
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image
|
||||||
|
* \param[in] im The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return DetectionResult
|
||||||
|
*/
|
||||||
public DetectionResult Predict(Mat img) {
|
public DetectionResult Predict(Mat img) {
|
||||||
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
||||||
if(! FD_C_YOLOv8WrapperPredict(fd_yolov8_wrapper, img.CvPtr,
|
if(! FD_C_YOLOv8WrapperPredict(fd_yolov8_wrapper, img.CvPtr,
|
||||||
@@ -250,6 +293,12 @@ public class YOLOv8 {
|
|||||||
return detection_result;
|
return detection_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image list
|
||||||
|
* \param[in] im The input image list, all the elements come from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return List<DetectionResult>
|
||||||
|
*/
|
||||||
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
||||||
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
||||||
imgs_in.size = (nuint)imgs.Count;
|
imgs_in.size = (nuint)imgs.Count;
|
||||||
@@ -277,6 +326,8 @@ public class YOLOv8 {
|
|||||||
return results_out;
|
return results_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// Check whether model is initialized successfully
|
||||||
public bool Initialized() {
|
public bool Initialized() {
|
||||||
return FD_C_YOLOv8WrapperInitialized(fd_yolov8_wrapper);
|
return FD_C_YOLOv8WrapperInitialized(fd_yolov8_wrapper);
|
||||||
}
|
}
|
||||||
@@ -321,6 +372,9 @@ public class YOLOv8 {
|
|||||||
|
|
||||||
// YOLOv6
|
// YOLOv6
|
||||||
|
|
||||||
|
|
||||||
|
/*! @brief YOLOv6 model
|
||||||
|
*/
|
||||||
public class YOLOv6 {
|
public class YOLOv6 {
|
||||||
|
|
||||||
public YOLOv6( string model_file, string params_file,
|
public YOLOv6( string model_file, string params_file,
|
||||||
@@ -352,6 +406,8 @@ public class YOLOv6 {
|
|||||||
return detection_result;
|
return detection_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// Check whether model is initialized successfully
|
||||||
public bool Initialized() {
|
public bool Initialized() {
|
||||||
return FD_C_YOLOv6WrapperInitialized(fd_yolov6_wrapper);
|
return FD_C_YOLOv6WrapperInitialized(fd_yolov6_wrapper);
|
||||||
}
|
}
|
||||||
@@ -390,6 +446,9 @@ public class YOLOv6 {
|
|||||||
|
|
||||||
// YOLOR
|
// YOLOR
|
||||||
|
|
||||||
|
|
||||||
|
/*! @brief YOLOR model
|
||||||
|
*/
|
||||||
public class YOLOR {
|
public class YOLOR {
|
||||||
|
|
||||||
public YOLOR( string model_file, string params_file,
|
public YOLOR( string model_file, string params_file,
|
||||||
@@ -421,6 +480,8 @@ public class YOLOR {
|
|||||||
return detection_result;
|
return detection_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// Check whether model is initialized successfully
|
||||||
public bool Initialized() {
|
public bool Initialized() {
|
||||||
return FD_C_YOLORWrapperInitialized(fd_yolor_wrapper);
|
return FD_C_YOLORWrapperInitialized(fd_yolor_wrapper);
|
||||||
}
|
}
|
||||||
@@ -460,6 +521,9 @@ public class YOLOR {
|
|||||||
|
|
||||||
// YOLOX
|
// YOLOX
|
||||||
|
|
||||||
|
|
||||||
|
/*! @brief YOLOX model
|
||||||
|
*/
|
||||||
public class YOLOX {
|
public class YOLOX {
|
||||||
|
|
||||||
public YOLOX( string model_file, string params_file,
|
public YOLOX( string model_file, string params_file,
|
||||||
@@ -491,6 +555,8 @@ public class YOLOX {
|
|||||||
return detection_result;
|
return detection_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// Check whether model is initialized successfully
|
||||||
public bool Initialized() {
|
public bool Initialized() {
|
||||||
return FD_C_YOLOXWrapperInitialized(fd_yolox_wrapper);
|
return FD_C_YOLOXWrapperInitialized(fd_yolox_wrapper);
|
||||||
}
|
}
|
||||||
|
@@ -23,9 +23,17 @@ namespace fastdeploy {
|
|||||||
namespace vision {
|
namespace vision {
|
||||||
namespace detection {
|
namespace detection {
|
||||||
|
|
||||||
// PPYOLOE
|
/*! @brief PPYOLOE model
|
||||||
|
*/
|
||||||
public class PPYOLOE {
|
public class PPYOLOE {
|
||||||
|
/** \brief Set path of model file and configuration file, and the configuration of runtime
|
||||||
|
*
|
||||||
|
* \param[in] model_file Path of model file, e.g ppyoloe/model.pdmodel
|
||||||
|
* \param[in] params_file Path of parameter file, e.g picodet/model.pdiparams, if the model format is ONNX, this parameter will be ignored
|
||||||
|
* \param[in] config_file Path of configuration file for deployment, e.g picodet/infer_cfg.yml
|
||||||
|
* \param[in] custom_option RuntimeOption for inference, the default will use cpu, and choose the backend defined in `valid_cpu_backends`
|
||||||
|
* \param[in] model_format Model format of the loaded model, default is Paddle format
|
||||||
|
*/
|
||||||
public PPYOLOE(string model_file, string params_file, string config_file,
|
public PPYOLOE(string model_file, string params_file, string config_file,
|
||||||
RuntimeOption custom_option = null,
|
RuntimeOption custom_option = null,
|
||||||
ModelFormat model_format = ModelFormat.PADDLE) {
|
ModelFormat model_format = ModelFormat.PADDLE) {
|
||||||
@@ -39,6 +47,11 @@ public class PPYOLOE {
|
|||||||
|
|
||||||
~PPYOLOE() { FD_C_DestroyPPYOLOEWrapper(fd_ppyoloe_wrapper); }
|
~PPYOLOE() { FD_C_DestroyPPYOLOEWrapper(fd_ppyoloe_wrapper); }
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image
|
||||||
|
* \param[in] im The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return DetectionResult
|
||||||
|
*/
|
||||||
public DetectionResult Predict(Mat img) {
|
public DetectionResult Predict(Mat img) {
|
||||||
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
||||||
if(! FD_C_PPYOLOEWrapperPredict(fd_ppyoloe_wrapper, img.CvPtr,
|
if(! FD_C_PPYOLOEWrapperPredict(fd_ppyoloe_wrapper, img.CvPtr,
|
||||||
@@ -53,6 +66,11 @@ public class PPYOLOE {
|
|||||||
return detection_result;
|
return detection_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image list
|
||||||
|
* \param[in] im The input image list, all the elements come from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return List<DetectionResult>
|
||||||
|
*/
|
||||||
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
||||||
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
||||||
imgs_in.size = (nuint)imgs.Count;
|
imgs_in.size = (nuint)imgs.Count;
|
||||||
@@ -80,6 +98,7 @@ public class PPYOLOE {
|
|||||||
return results_out;
|
return results_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Check whether model is initialized successfully
|
||||||
public bool Initialized() {
|
public bool Initialized() {
|
||||||
return FD_C_PPYOLOEWrapperInitialized(fd_ppyoloe_wrapper);
|
return FD_C_PPYOLOEWrapperInitialized(fd_ppyoloe_wrapper);
|
||||||
}
|
}
|
||||||
@@ -129,6 +148,9 @@ public class PPYOLOE {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PicoDet
|
// PicoDet
|
||||||
|
|
||||||
|
/*! @brief PicoDet model
|
||||||
|
*/
|
||||||
public class PicoDet {
|
public class PicoDet {
|
||||||
|
|
||||||
public PicoDet(string model_file, string params_file, string config_file,
|
public PicoDet(string model_file, string params_file, string config_file,
|
||||||
@@ -144,6 +166,12 @@ public class PicoDet {
|
|||||||
|
|
||||||
~PicoDet() { FD_C_DestroyPicoDetWrapper(fd_picodet_wrapper); }
|
~PicoDet() { FD_C_DestroyPicoDetWrapper(fd_picodet_wrapper); }
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image
|
||||||
|
* \param[in] im The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return DetectionResult
|
||||||
|
*/
|
||||||
public DetectionResult Predict(Mat img) {
|
public DetectionResult Predict(Mat img) {
|
||||||
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
||||||
if(! FD_C_PicoDetWrapperPredict(fd_picodet_wrapper, img.CvPtr,
|
if(! FD_C_PicoDetWrapperPredict(fd_picodet_wrapper, img.CvPtr,
|
||||||
@@ -158,6 +186,12 @@ public class PicoDet {
|
|||||||
return detection_result;
|
return detection_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image list
|
||||||
|
* \param[in] im The input image list, all the elements come from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return List<DetectionResult>
|
||||||
|
*/
|
||||||
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
||||||
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
||||||
imgs_in.size = (nuint)imgs.Count;
|
imgs_in.size = (nuint)imgs.Count;
|
||||||
@@ -185,6 +219,8 @@ public class PicoDet {
|
|||||||
return results_out;
|
return results_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// Check whether model is initialized successfully
|
||||||
public bool Initialized() {
|
public bool Initialized() {
|
||||||
return FD_C_PicoDetWrapperInitialized(fd_picodet_wrapper);
|
return FD_C_PicoDetWrapperInitialized(fd_picodet_wrapper);
|
||||||
}
|
}
|
||||||
@@ -236,6 +272,9 @@ public class PicoDet {
|
|||||||
|
|
||||||
// PPYOLO
|
// PPYOLO
|
||||||
|
|
||||||
|
|
||||||
|
/*! @brief PPYOLO model
|
||||||
|
*/
|
||||||
public class PPYOLO {
|
public class PPYOLO {
|
||||||
|
|
||||||
public PPYOLO(string model_file, string params_file, string config_file,
|
public PPYOLO(string model_file, string params_file, string config_file,
|
||||||
@@ -251,6 +290,12 @@ public class PPYOLO {
|
|||||||
|
|
||||||
~PPYOLO() { FD_C_DestroyPPYOLOWrapper(fd_ppyolo_wrapper); }
|
~PPYOLO() { FD_C_DestroyPPYOLOWrapper(fd_ppyolo_wrapper); }
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image
|
||||||
|
* \param[in] im The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return DetectionResult
|
||||||
|
*/
|
||||||
public DetectionResult Predict(Mat img) {
|
public DetectionResult Predict(Mat img) {
|
||||||
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
||||||
if(! FD_C_PPYOLOWrapperPredict(fd_ppyolo_wrapper, img.CvPtr,
|
if(! FD_C_PPYOLOWrapperPredict(fd_ppyolo_wrapper, img.CvPtr,
|
||||||
@@ -265,6 +310,12 @@ public class PPYOLO {
|
|||||||
return detection_result;
|
return detection_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image list
|
||||||
|
* \param[in] im The input image list, all the elements come from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return List<DetectionResult>
|
||||||
|
*/
|
||||||
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
||||||
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
||||||
imgs_in.size = (nuint)imgs.Count;
|
imgs_in.size = (nuint)imgs.Count;
|
||||||
@@ -292,6 +343,8 @@ public class PPYOLO {
|
|||||||
return results_out;
|
return results_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// Check whether model is initialized successfully
|
||||||
public bool Initialized() {
|
public bool Initialized() {
|
||||||
return FD_C_PPYOLOWrapperInitialized(fd_ppyolo_wrapper);
|
return FD_C_PPYOLOWrapperInitialized(fd_ppyolo_wrapper);
|
||||||
}
|
}
|
||||||
@@ -342,6 +395,9 @@ public class PPYOLO {
|
|||||||
|
|
||||||
// YOLOv3
|
// YOLOv3
|
||||||
|
|
||||||
|
|
||||||
|
/*! @brief YOLOv3 model
|
||||||
|
*/
|
||||||
public class YOLOv3 {
|
public class YOLOv3 {
|
||||||
|
|
||||||
public YOLOv3(string model_file, string params_file, string config_file,
|
public YOLOv3(string model_file, string params_file, string config_file,
|
||||||
@@ -357,6 +413,12 @@ public class YOLOv3 {
|
|||||||
|
|
||||||
~YOLOv3() { FD_C_DestroyYOLOv3Wrapper(fd_yolov3_wrapper); }
|
~YOLOv3() { FD_C_DestroyYOLOv3Wrapper(fd_yolov3_wrapper); }
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image
|
||||||
|
* \param[in] im The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return DetectionResult
|
||||||
|
*/
|
||||||
public DetectionResult Predict(Mat img) {
|
public DetectionResult Predict(Mat img) {
|
||||||
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
||||||
if(! FD_C_YOLOv3WrapperPredict(fd_yolov3_wrapper, img.CvPtr,
|
if(! FD_C_YOLOv3WrapperPredict(fd_yolov3_wrapper, img.CvPtr,
|
||||||
@@ -371,6 +433,12 @@ public class YOLOv3 {
|
|||||||
return detection_result;
|
return detection_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image list
|
||||||
|
* \param[in] im The input image list, all the elements come from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return List<DetectionResult>
|
||||||
|
*/
|
||||||
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
||||||
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
||||||
imgs_in.size = (nuint)imgs.Count;
|
imgs_in.size = (nuint)imgs.Count;
|
||||||
@@ -398,6 +466,8 @@ public class YOLOv3 {
|
|||||||
return results_out;
|
return results_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// Check whether model is initialized successfully
|
||||||
public bool Initialized() {
|
public bool Initialized() {
|
||||||
return FD_C_YOLOv3WrapperInitialized(fd_yolov3_wrapper);
|
return FD_C_YOLOv3WrapperInitialized(fd_yolov3_wrapper);
|
||||||
}
|
}
|
||||||
@@ -448,6 +518,9 @@ public class YOLOv3 {
|
|||||||
|
|
||||||
// PaddleYOLOX
|
// PaddleYOLOX
|
||||||
|
|
||||||
|
|
||||||
|
/*! @brief PaddleYOLOX model
|
||||||
|
*/
|
||||||
public class PaddleYOLOX {
|
public class PaddleYOLOX {
|
||||||
|
|
||||||
public PaddleYOLOX(string model_file, string params_file, string config_file,
|
public PaddleYOLOX(string model_file, string params_file, string config_file,
|
||||||
@@ -463,6 +536,12 @@ public class PaddleYOLOX {
|
|||||||
|
|
||||||
~PaddleYOLOX() { FD_C_DestroyPaddleYOLOXWrapper(fd_paddleyolox_wrapper); }
|
~PaddleYOLOX() { FD_C_DestroyPaddleYOLOXWrapper(fd_paddleyolox_wrapper); }
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image
|
||||||
|
* \param[in] im The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return DetectionResult
|
||||||
|
*/
|
||||||
public DetectionResult Predict(Mat img) {
|
public DetectionResult Predict(Mat img) {
|
||||||
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
||||||
if(! FD_C_PaddleYOLOXWrapperPredict(fd_paddleyolox_wrapper, img.CvPtr,
|
if(! FD_C_PaddleYOLOXWrapperPredict(fd_paddleyolox_wrapper, img.CvPtr,
|
||||||
@@ -477,6 +556,12 @@ public class PaddleYOLOX {
|
|||||||
return detection_result;
|
return detection_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image list
|
||||||
|
* \param[in] im The input image list, all the elements come from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return List<DetectionResult>
|
||||||
|
*/
|
||||||
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
||||||
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
||||||
imgs_in.size = (nuint)imgs.Count;
|
imgs_in.size = (nuint)imgs.Count;
|
||||||
@@ -504,6 +589,8 @@ public class PaddleYOLOX {
|
|||||||
return results_out;
|
return results_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// Check whether model is initialized successfully
|
||||||
public bool Initialized() {
|
public bool Initialized() {
|
||||||
return FD_C_PaddleYOLOXWrapperInitialized(fd_paddleyolox_wrapper);
|
return FD_C_PaddleYOLOXWrapperInitialized(fd_paddleyolox_wrapper);
|
||||||
}
|
}
|
||||||
@@ -554,6 +641,9 @@ public class PaddleYOLOX {
|
|||||||
|
|
||||||
// FasterRCNN
|
// FasterRCNN
|
||||||
|
|
||||||
|
|
||||||
|
/*! @brief FasterRCNN model
|
||||||
|
*/
|
||||||
public class FasterRCNN {
|
public class FasterRCNN {
|
||||||
|
|
||||||
public FasterRCNN(string model_file, string params_file, string config_file,
|
public FasterRCNN(string model_file, string params_file, string config_file,
|
||||||
@@ -569,6 +659,12 @@ public class FasterRCNN {
|
|||||||
|
|
||||||
~FasterRCNN() { FD_C_DestroyFasterRCNNWrapper(fd_fasterrcnn_wrapper); }
|
~FasterRCNN() { FD_C_DestroyFasterRCNNWrapper(fd_fasterrcnn_wrapper); }
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image
|
||||||
|
* \param[in] im The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return DetectionResult
|
||||||
|
*/
|
||||||
public DetectionResult Predict(Mat img) {
|
public DetectionResult Predict(Mat img) {
|
||||||
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
||||||
if(! FD_C_FasterRCNNWrapperPredict(fd_fasterrcnn_wrapper, img.CvPtr,
|
if(! FD_C_FasterRCNNWrapperPredict(fd_fasterrcnn_wrapper, img.CvPtr,
|
||||||
@@ -583,6 +679,12 @@ public class FasterRCNN {
|
|||||||
return detection_result;
|
return detection_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image list
|
||||||
|
* \param[in] im The input image list, all the elements come from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return List<DetectionResult>
|
||||||
|
*/
|
||||||
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
||||||
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
||||||
imgs_in.size = (nuint)imgs.Count;
|
imgs_in.size = (nuint)imgs.Count;
|
||||||
@@ -610,6 +712,8 @@ public class FasterRCNN {
|
|||||||
return results_out;
|
return results_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// Check whether model is initialized successfully
|
||||||
public bool Initialized() {
|
public bool Initialized() {
|
||||||
return FD_C_FasterRCNNWrapperInitialized(fd_fasterrcnn_wrapper);
|
return FD_C_FasterRCNNWrapperInitialized(fd_fasterrcnn_wrapper);
|
||||||
}
|
}
|
||||||
@@ -660,6 +764,9 @@ public class FasterRCNN {
|
|||||||
|
|
||||||
// MaskRCNN
|
// MaskRCNN
|
||||||
|
|
||||||
|
|
||||||
|
/*! @brief MaskRCNN model
|
||||||
|
*/
|
||||||
public class MaskRCNN {
|
public class MaskRCNN {
|
||||||
|
|
||||||
public MaskRCNN(string model_file, string params_file, string config_file,
|
public MaskRCNN(string model_file, string params_file, string config_file,
|
||||||
@@ -675,6 +782,12 @@ public class MaskRCNN {
|
|||||||
|
|
||||||
~MaskRCNN() { FD_C_DestroyMaskRCNNWrapper(fd_maskrcnn_wrapper); }
|
~MaskRCNN() { FD_C_DestroyMaskRCNNWrapper(fd_maskrcnn_wrapper); }
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image
|
||||||
|
* \param[in] im The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return DetectionResult
|
||||||
|
*/
|
||||||
public DetectionResult Predict(Mat img) {
|
public DetectionResult Predict(Mat img) {
|
||||||
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
||||||
if(! FD_C_MaskRCNNWrapperPredict(fd_maskrcnn_wrapper, img.CvPtr,
|
if(! FD_C_MaskRCNNWrapperPredict(fd_maskrcnn_wrapper, img.CvPtr,
|
||||||
@@ -689,6 +802,12 @@ public class MaskRCNN {
|
|||||||
return detection_result;
|
return detection_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image list
|
||||||
|
* \param[in] im The input image list, all the elements come from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return List<DetectionResult>
|
||||||
|
*/
|
||||||
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
||||||
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
||||||
imgs_in.size = (nuint)imgs.Count;
|
imgs_in.size = (nuint)imgs.Count;
|
||||||
@@ -716,6 +835,8 @@ public class MaskRCNN {
|
|||||||
return results_out;
|
return results_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// Check whether model is initialized successfully
|
||||||
public bool Initialized() {
|
public bool Initialized() {
|
||||||
return FD_C_MaskRCNNWrapperInitialized(fd_maskrcnn_wrapper);
|
return FD_C_MaskRCNNWrapperInitialized(fd_maskrcnn_wrapper);
|
||||||
}
|
}
|
||||||
@@ -766,6 +887,9 @@ public class MaskRCNN {
|
|||||||
|
|
||||||
// SSD
|
// SSD
|
||||||
|
|
||||||
|
|
||||||
|
/*! @brief SSD model
|
||||||
|
*/
|
||||||
public class SSD {
|
public class SSD {
|
||||||
|
|
||||||
public SSD(string model_file, string params_file, string config_file,
|
public SSD(string model_file, string params_file, string config_file,
|
||||||
@@ -781,6 +905,12 @@ public class SSD {
|
|||||||
|
|
||||||
~SSD() { FD_C_DestroySSDWrapper(fd_ssd_wrapper); }
|
~SSD() { FD_C_DestroySSDWrapper(fd_ssd_wrapper); }
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image
|
||||||
|
* \param[in] im The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return DetectionResult
|
||||||
|
*/
|
||||||
public DetectionResult Predict(Mat img) {
|
public DetectionResult Predict(Mat img) {
|
||||||
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
||||||
if(! FD_C_SSDWrapperPredict(fd_ssd_wrapper, img.CvPtr,
|
if(! FD_C_SSDWrapperPredict(fd_ssd_wrapper, img.CvPtr,
|
||||||
@@ -795,6 +925,12 @@ public class SSD {
|
|||||||
return detection_result;
|
return detection_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image list
|
||||||
|
* \param[in] im The input image list, all the elements come from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return List<DetectionResult>
|
||||||
|
*/
|
||||||
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
||||||
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
||||||
imgs_in.size = (nuint)imgs.Count;
|
imgs_in.size = (nuint)imgs.Count;
|
||||||
@@ -822,6 +958,8 @@ public class SSD {
|
|||||||
return results_out;
|
return results_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// Check whether model is initialized successfully
|
||||||
public bool Initialized() {
|
public bool Initialized() {
|
||||||
return FD_C_SSDWrapperInitialized(fd_ssd_wrapper);
|
return FD_C_SSDWrapperInitialized(fd_ssd_wrapper);
|
||||||
}
|
}
|
||||||
@@ -872,6 +1010,9 @@ public class SSD {
|
|||||||
|
|
||||||
// PaddleYOLOv5
|
// PaddleYOLOv5
|
||||||
|
|
||||||
|
|
||||||
|
/*! @brief PaddleYOLOv5 model
|
||||||
|
*/
|
||||||
public class PaddleYOLOv5 {
|
public class PaddleYOLOv5 {
|
||||||
|
|
||||||
public PaddleYOLOv5(string model_file, string params_file, string config_file,
|
public PaddleYOLOv5(string model_file, string params_file, string config_file,
|
||||||
@@ -887,6 +1028,12 @@ public class PaddleYOLOv5 {
|
|||||||
|
|
||||||
~PaddleYOLOv5() { FD_C_DestroyPaddleYOLOv5Wrapper(fd_paddleyolov5_wrapper); }
|
~PaddleYOLOv5() { FD_C_DestroyPaddleYOLOv5Wrapper(fd_paddleyolov5_wrapper); }
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image
|
||||||
|
* \param[in] im The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return DetectionResult
|
||||||
|
*/
|
||||||
public DetectionResult Predict(Mat img) {
|
public DetectionResult Predict(Mat img) {
|
||||||
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
||||||
if(! FD_C_PaddleYOLOv5WrapperPredict(fd_paddleyolov5_wrapper, img.CvPtr,
|
if(! FD_C_PaddleYOLOv5WrapperPredict(fd_paddleyolov5_wrapper, img.CvPtr,
|
||||||
@@ -901,6 +1048,12 @@ public class PaddleYOLOv5 {
|
|||||||
return detection_result;
|
return detection_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image list
|
||||||
|
* \param[in] im The input image list, all the elements come from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return List<DetectionResult>
|
||||||
|
*/
|
||||||
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
||||||
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
||||||
imgs_in.size = (nuint)imgs.Count;
|
imgs_in.size = (nuint)imgs.Count;
|
||||||
@@ -928,6 +1081,8 @@ public class PaddleYOLOv5 {
|
|||||||
return results_out;
|
return results_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// Check whether model is initialized successfully
|
||||||
public bool Initialized() {
|
public bool Initialized() {
|
||||||
return FD_C_PaddleYOLOv5WrapperInitialized(fd_paddleyolov5_wrapper);
|
return FD_C_PaddleYOLOv5WrapperInitialized(fd_paddleyolov5_wrapper);
|
||||||
}
|
}
|
||||||
@@ -978,6 +1133,9 @@ public class PaddleYOLOv5 {
|
|||||||
|
|
||||||
// PaddleYOLOv6
|
// PaddleYOLOv6
|
||||||
|
|
||||||
|
|
||||||
|
/*! @brief PaddleYOLOv6 model
|
||||||
|
*/
|
||||||
public class PaddleYOLOv6 {
|
public class PaddleYOLOv6 {
|
||||||
|
|
||||||
public PaddleYOLOv6(string model_file, string params_file, string config_file,
|
public PaddleYOLOv6(string model_file, string params_file, string config_file,
|
||||||
@@ -993,6 +1151,12 @@ public class PaddleYOLOv6 {
|
|||||||
|
|
||||||
~PaddleYOLOv6() { FD_C_DestroyPaddleYOLOv6Wrapper(fd_paddleyolov6_wrapper); }
|
~PaddleYOLOv6() { FD_C_DestroyPaddleYOLOv6Wrapper(fd_paddleyolov6_wrapper); }
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image
|
||||||
|
* \param[in] im The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return DetectionResult
|
||||||
|
*/
|
||||||
public DetectionResult Predict(Mat img) {
|
public DetectionResult Predict(Mat img) {
|
||||||
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
||||||
if(! FD_C_PaddleYOLOv6WrapperPredict(fd_paddleyolov6_wrapper, img.CvPtr,
|
if(! FD_C_PaddleYOLOv6WrapperPredict(fd_paddleyolov6_wrapper, img.CvPtr,
|
||||||
@@ -1007,6 +1171,12 @@ public class PaddleYOLOv6 {
|
|||||||
return detection_result;
|
return detection_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image list
|
||||||
|
* \param[in] im The input image list, all the elements come from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return List<DetectionResult>
|
||||||
|
*/
|
||||||
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
||||||
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
||||||
imgs_in.size = (nuint)imgs.Count;
|
imgs_in.size = (nuint)imgs.Count;
|
||||||
@@ -1034,6 +1204,8 @@ public class PaddleYOLOv6 {
|
|||||||
return results_out;
|
return results_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// Check whether model is initialized successfully
|
||||||
public bool Initialized() {
|
public bool Initialized() {
|
||||||
return FD_C_PaddleYOLOv6WrapperInitialized(fd_paddleyolov6_wrapper);
|
return FD_C_PaddleYOLOv6WrapperInitialized(fd_paddleyolov6_wrapper);
|
||||||
}
|
}
|
||||||
@@ -1084,6 +1256,9 @@ public class PaddleYOLOv6 {
|
|||||||
|
|
||||||
// PaddleYOLOv7
|
// PaddleYOLOv7
|
||||||
|
|
||||||
|
|
||||||
|
/*! @brief PaddleYOLOv7 model
|
||||||
|
*/
|
||||||
public class PaddleYOLOv7 {
|
public class PaddleYOLOv7 {
|
||||||
|
|
||||||
public PaddleYOLOv7(string model_file, string params_file, string config_file,
|
public PaddleYOLOv7(string model_file, string params_file, string config_file,
|
||||||
@@ -1099,6 +1274,12 @@ public class PaddleYOLOv7 {
|
|||||||
|
|
||||||
~PaddleYOLOv7() { FD_C_DestroyPaddleYOLOv7Wrapper(fd_paddleyolov7_wrapper); }
|
~PaddleYOLOv7() { FD_C_DestroyPaddleYOLOv7Wrapper(fd_paddleyolov7_wrapper); }
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image
|
||||||
|
* \param[in] im The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return DetectionResult
|
||||||
|
*/
|
||||||
public DetectionResult Predict(Mat img) {
|
public DetectionResult Predict(Mat img) {
|
||||||
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
||||||
if(! FD_C_PaddleYOLOv7WrapperPredict(fd_paddleyolov7_wrapper, img.CvPtr,
|
if(! FD_C_PaddleYOLOv7WrapperPredict(fd_paddleyolov7_wrapper, img.CvPtr,
|
||||||
@@ -1113,6 +1294,12 @@ public class PaddleYOLOv7 {
|
|||||||
return detection_result;
|
return detection_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image list
|
||||||
|
* \param[in] im The input image list, all the elements come from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return List<DetectionResult>
|
||||||
|
*/
|
||||||
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
||||||
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
||||||
imgs_in.size = (nuint)imgs.Count;
|
imgs_in.size = (nuint)imgs.Count;
|
||||||
@@ -1140,6 +1327,8 @@ public class PaddleYOLOv7 {
|
|||||||
return results_out;
|
return results_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// Check whether model is initialized successfully
|
||||||
public bool Initialized() {
|
public bool Initialized() {
|
||||||
return FD_C_PaddleYOLOv7WrapperInitialized(fd_paddleyolov7_wrapper);
|
return FD_C_PaddleYOLOv7WrapperInitialized(fd_paddleyolov7_wrapper);
|
||||||
}
|
}
|
||||||
@@ -1190,6 +1379,9 @@ public class PaddleYOLOv7 {
|
|||||||
|
|
||||||
// PaddleYOLOv8
|
// PaddleYOLOv8
|
||||||
|
|
||||||
|
|
||||||
|
/*! @brief PaddleYOLOv8 model
|
||||||
|
*/
|
||||||
public class PaddleYOLOv8 {
|
public class PaddleYOLOv8 {
|
||||||
|
|
||||||
public PaddleYOLOv8(string model_file, string params_file, string config_file,
|
public PaddleYOLOv8(string model_file, string params_file, string config_file,
|
||||||
@@ -1205,6 +1397,12 @@ public class PaddleYOLOv8 {
|
|||||||
|
|
||||||
~PaddleYOLOv8() { FD_C_DestroyPaddleYOLOv8Wrapper(fd_paddleyolov8_wrapper); }
|
~PaddleYOLOv8() { FD_C_DestroyPaddleYOLOv8Wrapper(fd_paddleyolov8_wrapper); }
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image
|
||||||
|
* \param[in] im The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return DetectionResult
|
||||||
|
*/
|
||||||
public DetectionResult Predict(Mat img) {
|
public DetectionResult Predict(Mat img) {
|
||||||
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
||||||
if(! FD_C_PaddleYOLOv8WrapperPredict(fd_paddleyolov8_wrapper, img.CvPtr,
|
if(! FD_C_PaddleYOLOv8WrapperPredict(fd_paddleyolov8_wrapper, img.CvPtr,
|
||||||
@@ -1219,6 +1417,12 @@ public class PaddleYOLOv8 {
|
|||||||
return detection_result;
|
return detection_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image list
|
||||||
|
* \param[in] im The input image list, all the elements come from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return List<DetectionResult>
|
||||||
|
*/
|
||||||
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
||||||
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
||||||
imgs_in.size = (nuint)imgs.Count;
|
imgs_in.size = (nuint)imgs.Count;
|
||||||
@@ -1246,6 +1450,8 @@ public class PaddleYOLOv8 {
|
|||||||
return results_out;
|
return results_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// Check whether model is initialized successfully
|
||||||
public bool Initialized() {
|
public bool Initialized() {
|
||||||
return FD_C_PaddleYOLOv8WrapperInitialized(fd_paddleyolov8_wrapper);
|
return FD_C_PaddleYOLOv8WrapperInitialized(fd_paddleyolov8_wrapper);
|
||||||
}
|
}
|
||||||
@@ -1296,6 +1502,9 @@ public class PaddleYOLOv8 {
|
|||||||
|
|
||||||
// RTMDet
|
// RTMDet
|
||||||
|
|
||||||
|
|
||||||
|
/*! @brief RTMDet model
|
||||||
|
*/
|
||||||
public class RTMDet {
|
public class RTMDet {
|
||||||
|
|
||||||
public RTMDet(string model_file, string params_file, string config_file,
|
public RTMDet(string model_file, string params_file, string config_file,
|
||||||
@@ -1311,6 +1520,12 @@ public class RTMDet {
|
|||||||
|
|
||||||
~RTMDet() { FD_C_DestroyRTMDetWrapper(fd_rtmdet_wrapper); }
|
~RTMDet() { FD_C_DestroyRTMDetWrapper(fd_rtmdet_wrapper); }
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image
|
||||||
|
* \param[in] im The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return DetectionResult
|
||||||
|
*/
|
||||||
public DetectionResult Predict(Mat img) {
|
public DetectionResult Predict(Mat img) {
|
||||||
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
||||||
if(! FD_C_RTMDetWrapperPredict(fd_rtmdet_wrapper, img.CvPtr,
|
if(! FD_C_RTMDetWrapperPredict(fd_rtmdet_wrapper, img.CvPtr,
|
||||||
@@ -1325,6 +1540,12 @@ public class RTMDet {
|
|||||||
return detection_result;
|
return detection_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image list
|
||||||
|
* \param[in] im The input image list, all the elements come from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return List<DetectionResult>
|
||||||
|
*/
|
||||||
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
||||||
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
||||||
imgs_in.size = (nuint)imgs.Count;
|
imgs_in.size = (nuint)imgs.Count;
|
||||||
@@ -1352,6 +1573,8 @@ public class RTMDet {
|
|||||||
return results_out;
|
return results_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// Check whether model is initialized successfully
|
||||||
public bool Initialized() {
|
public bool Initialized() {
|
||||||
return FD_C_RTMDetWrapperInitialized(fd_rtmdet_wrapper);
|
return FD_C_RTMDetWrapperInitialized(fd_rtmdet_wrapper);
|
||||||
}
|
}
|
||||||
@@ -1402,6 +1625,9 @@ public class RTMDet {
|
|||||||
|
|
||||||
// CascadeRCNN
|
// CascadeRCNN
|
||||||
|
|
||||||
|
|
||||||
|
/*! @brief CascadeRCNN model
|
||||||
|
*/
|
||||||
public class CascadeRCNN {
|
public class CascadeRCNN {
|
||||||
|
|
||||||
public CascadeRCNN(string model_file, string params_file, string config_file,
|
public CascadeRCNN(string model_file, string params_file, string config_file,
|
||||||
@@ -1417,6 +1643,12 @@ public class CascadeRCNN {
|
|||||||
|
|
||||||
~CascadeRCNN() { FD_C_DestroyCascadeRCNNWrapper(fd_cascadercnn_wrapper); }
|
~CascadeRCNN() { FD_C_DestroyCascadeRCNNWrapper(fd_cascadercnn_wrapper); }
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image
|
||||||
|
* \param[in] im The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return DetectionResult
|
||||||
|
*/
|
||||||
public DetectionResult Predict(Mat img) {
|
public DetectionResult Predict(Mat img) {
|
||||||
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
||||||
if(! FD_C_CascadeRCNNWrapperPredict(fd_cascadercnn_wrapper, img.CvPtr,
|
if(! FD_C_CascadeRCNNWrapperPredict(fd_cascadercnn_wrapper, img.CvPtr,
|
||||||
@@ -1431,6 +1663,12 @@ public class CascadeRCNN {
|
|||||||
return detection_result;
|
return detection_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image list
|
||||||
|
* \param[in] im The input image list, all the elements come from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return List<DetectionResult>
|
||||||
|
*/
|
||||||
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
||||||
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
||||||
imgs_in.size = (nuint)imgs.Count;
|
imgs_in.size = (nuint)imgs.Count;
|
||||||
@@ -1458,6 +1696,8 @@ public class CascadeRCNN {
|
|||||||
return results_out;
|
return results_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// Check whether model is initialized successfully
|
||||||
public bool Initialized() {
|
public bool Initialized() {
|
||||||
return FD_C_CascadeRCNNWrapperInitialized(fd_cascadercnn_wrapper);
|
return FD_C_CascadeRCNNWrapperInitialized(fd_cascadercnn_wrapper);
|
||||||
}
|
}
|
||||||
@@ -1508,6 +1748,9 @@ public class CascadeRCNN {
|
|||||||
|
|
||||||
// PSSDet
|
// PSSDet
|
||||||
|
|
||||||
|
|
||||||
|
/*! @brief PSSDet model
|
||||||
|
*/
|
||||||
public class PSSDet {
|
public class PSSDet {
|
||||||
|
|
||||||
public PSSDet(string model_file, string params_file, string config_file,
|
public PSSDet(string model_file, string params_file, string config_file,
|
||||||
@@ -1523,6 +1766,12 @@ public class PSSDet {
|
|||||||
|
|
||||||
~PSSDet() { FD_C_DestroyPSSDetWrapper(fd_pssdet_wrapper); }
|
~PSSDet() { FD_C_DestroyPSSDetWrapper(fd_pssdet_wrapper); }
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image
|
||||||
|
* \param[in] im The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return DetectionResult
|
||||||
|
*/
|
||||||
public DetectionResult Predict(Mat img) {
|
public DetectionResult Predict(Mat img) {
|
||||||
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
||||||
if(! FD_C_PSSDetWrapperPredict(fd_pssdet_wrapper, img.CvPtr,
|
if(! FD_C_PSSDetWrapperPredict(fd_pssdet_wrapper, img.CvPtr,
|
||||||
@@ -1537,6 +1786,12 @@ public class PSSDet {
|
|||||||
return detection_result;
|
return detection_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image list
|
||||||
|
* \param[in] im The input image list, all the elements come from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return List<DetectionResult>
|
||||||
|
*/
|
||||||
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
||||||
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
||||||
imgs_in.size = (nuint)imgs.Count;
|
imgs_in.size = (nuint)imgs.Count;
|
||||||
@@ -1564,6 +1819,8 @@ public class PSSDet {
|
|||||||
return results_out;
|
return results_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// Check whether model is initialized successfully
|
||||||
public bool Initialized() {
|
public bool Initialized() {
|
||||||
return FD_C_PSSDetWrapperInitialized(fd_pssdet_wrapper);
|
return FD_C_PSSDetWrapperInitialized(fd_pssdet_wrapper);
|
||||||
}
|
}
|
||||||
@@ -1614,6 +1871,9 @@ public class PSSDet {
|
|||||||
|
|
||||||
// RetinaNet
|
// RetinaNet
|
||||||
|
|
||||||
|
|
||||||
|
/*! @brief RetinaNet model
|
||||||
|
*/
|
||||||
public class RetinaNet {
|
public class RetinaNet {
|
||||||
|
|
||||||
public RetinaNet(string model_file, string params_file, string config_file,
|
public RetinaNet(string model_file, string params_file, string config_file,
|
||||||
@@ -1629,6 +1889,12 @@ public class RetinaNet {
|
|||||||
|
|
||||||
~RetinaNet() { FD_C_DestroyRetinaNetWrapper(fd_retinanet_wrapper); }
|
~RetinaNet() { FD_C_DestroyRetinaNetWrapper(fd_retinanet_wrapper); }
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image
|
||||||
|
* \param[in] im The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return DetectionResult
|
||||||
|
*/
|
||||||
public DetectionResult Predict(Mat img) {
|
public DetectionResult Predict(Mat img) {
|
||||||
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
||||||
if(! FD_C_RetinaNetWrapperPredict(fd_retinanet_wrapper, img.CvPtr,
|
if(! FD_C_RetinaNetWrapperPredict(fd_retinanet_wrapper, img.CvPtr,
|
||||||
@@ -1643,6 +1909,12 @@ public class RetinaNet {
|
|||||||
return detection_result;
|
return detection_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image list
|
||||||
|
* \param[in] im The input image list, all the elements come from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return List<DetectionResult>
|
||||||
|
*/
|
||||||
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
||||||
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
||||||
imgs_in.size = (nuint)imgs.Count;
|
imgs_in.size = (nuint)imgs.Count;
|
||||||
@@ -1670,6 +1942,8 @@ public class RetinaNet {
|
|||||||
return results_out;
|
return results_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// Check whether model is initialized successfully
|
||||||
public bool Initialized() {
|
public bool Initialized() {
|
||||||
return FD_C_RetinaNetWrapperInitialized(fd_retinanet_wrapper);
|
return FD_C_RetinaNetWrapperInitialized(fd_retinanet_wrapper);
|
||||||
}
|
}
|
||||||
@@ -1720,6 +1994,9 @@ public class RetinaNet {
|
|||||||
|
|
||||||
// FCOS
|
// FCOS
|
||||||
|
|
||||||
|
|
||||||
|
/*! @brief FCOS model
|
||||||
|
*/
|
||||||
public class FCOS {
|
public class FCOS {
|
||||||
|
|
||||||
public FCOS(string model_file, string params_file, string config_file,
|
public FCOS(string model_file, string params_file, string config_file,
|
||||||
@@ -1735,6 +2012,12 @@ public class FCOS {
|
|||||||
|
|
||||||
~FCOS() { FD_C_DestroyFCOSWrapper(fd_fcos_wrapper); }
|
~FCOS() { FD_C_DestroyFCOSWrapper(fd_fcos_wrapper); }
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image
|
||||||
|
* \param[in] im The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return DetectionResult
|
||||||
|
*/
|
||||||
public DetectionResult Predict(Mat img) {
|
public DetectionResult Predict(Mat img) {
|
||||||
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
||||||
if(! FD_C_FCOSWrapperPredict(fd_fcos_wrapper, img.CvPtr,
|
if(! FD_C_FCOSWrapperPredict(fd_fcos_wrapper, img.CvPtr,
|
||||||
@@ -1749,6 +2032,12 @@ public class FCOS {
|
|||||||
return detection_result;
|
return detection_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image list
|
||||||
|
* \param[in] im The input image list, all the elements come from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return List<DetectionResult>
|
||||||
|
*/
|
||||||
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
||||||
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
||||||
imgs_in.size = (nuint)imgs.Count;
|
imgs_in.size = (nuint)imgs.Count;
|
||||||
@@ -1776,6 +2065,8 @@ public class FCOS {
|
|||||||
return results_out;
|
return results_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// Check whether model is initialized successfully
|
||||||
public bool Initialized() {
|
public bool Initialized() {
|
||||||
return FD_C_FCOSWrapperInitialized(fd_fcos_wrapper);
|
return FD_C_FCOSWrapperInitialized(fd_fcos_wrapper);
|
||||||
}
|
}
|
||||||
@@ -1826,6 +2117,9 @@ public class FCOS {
|
|||||||
|
|
||||||
// TTFNet
|
// TTFNet
|
||||||
|
|
||||||
|
|
||||||
|
/*! @brief TTFNet model
|
||||||
|
*/
|
||||||
public class TTFNet {
|
public class TTFNet {
|
||||||
|
|
||||||
public TTFNet(string model_file, string params_file, string config_file,
|
public TTFNet(string model_file, string params_file, string config_file,
|
||||||
@@ -1841,6 +2135,12 @@ public class TTFNet {
|
|||||||
|
|
||||||
~TTFNet() { FD_C_DestroyTTFNetWrapper(fd_ttfnet_wrapper); }
|
~TTFNet() { FD_C_DestroyTTFNetWrapper(fd_ttfnet_wrapper); }
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image
|
||||||
|
* \param[in] im The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return DetectionResult
|
||||||
|
*/
|
||||||
public DetectionResult Predict(Mat img) {
|
public DetectionResult Predict(Mat img) {
|
||||||
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
||||||
if(! FD_C_TTFNetWrapperPredict(fd_ttfnet_wrapper, img.CvPtr,
|
if(! FD_C_TTFNetWrapperPredict(fd_ttfnet_wrapper, img.CvPtr,
|
||||||
@@ -1855,6 +2155,12 @@ public class TTFNet {
|
|||||||
return detection_result;
|
return detection_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image list
|
||||||
|
* \param[in] im The input image list, all the elements come from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return List<DetectionResult>
|
||||||
|
*/
|
||||||
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
||||||
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
||||||
imgs_in.size = (nuint)imgs.Count;
|
imgs_in.size = (nuint)imgs.Count;
|
||||||
@@ -1882,6 +2188,8 @@ public class TTFNet {
|
|||||||
return results_out;
|
return results_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// Check whether model is initialized successfully
|
||||||
public bool Initialized() {
|
public bool Initialized() {
|
||||||
return FD_C_TTFNetWrapperInitialized(fd_ttfnet_wrapper);
|
return FD_C_TTFNetWrapperInitialized(fd_ttfnet_wrapper);
|
||||||
}
|
}
|
||||||
@@ -1932,6 +2240,9 @@ public class TTFNet {
|
|||||||
|
|
||||||
// TOOD
|
// TOOD
|
||||||
|
|
||||||
|
|
||||||
|
/*! @brief TOOD model
|
||||||
|
*/
|
||||||
public class TOOD {
|
public class TOOD {
|
||||||
|
|
||||||
public TOOD(string model_file, string params_file, string config_file,
|
public TOOD(string model_file, string params_file, string config_file,
|
||||||
@@ -1947,6 +2258,12 @@ public class TOOD {
|
|||||||
|
|
||||||
~TOOD() { FD_C_DestroyTOODWrapper(fd_tood_wrapper); }
|
~TOOD() { FD_C_DestroyTOODWrapper(fd_tood_wrapper); }
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image
|
||||||
|
* \param[in] im The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return DetectionResult
|
||||||
|
*/
|
||||||
public DetectionResult Predict(Mat img) {
|
public DetectionResult Predict(Mat img) {
|
||||||
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
||||||
if(! FD_C_TOODWrapperPredict(fd_tood_wrapper, img.CvPtr,
|
if(! FD_C_TOODWrapperPredict(fd_tood_wrapper, img.CvPtr,
|
||||||
@@ -1961,6 +2278,12 @@ public class TOOD {
|
|||||||
return detection_result;
|
return detection_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image list
|
||||||
|
* \param[in] im The input image list, all the elements come from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return List<DetectionResult>
|
||||||
|
*/
|
||||||
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
||||||
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
||||||
imgs_in.size = (nuint)imgs.Count;
|
imgs_in.size = (nuint)imgs.Count;
|
||||||
@@ -1988,6 +2311,8 @@ public class TOOD {
|
|||||||
return results_out;
|
return results_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// Check whether model is initialized successfully
|
||||||
public bool Initialized() {
|
public bool Initialized() {
|
||||||
return FD_C_TOODWrapperInitialized(fd_tood_wrapper);
|
return FD_C_TOODWrapperInitialized(fd_tood_wrapper);
|
||||||
}
|
}
|
||||||
@@ -2038,6 +2363,9 @@ public class TOOD {
|
|||||||
|
|
||||||
// GFL
|
// GFL
|
||||||
|
|
||||||
|
|
||||||
|
/*! @brief GFL model
|
||||||
|
*/
|
||||||
public class GFL {
|
public class GFL {
|
||||||
|
|
||||||
public GFL(string model_file, string params_file, string config_file,
|
public GFL(string model_file, string params_file, string config_file,
|
||||||
@@ -2053,6 +2381,12 @@ public class GFL {
|
|||||||
|
|
||||||
~GFL() { FD_C_DestroyGFLWrapper(fd_gfl_wrapper); }
|
~GFL() { FD_C_DestroyGFLWrapper(fd_gfl_wrapper); }
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image
|
||||||
|
* \param[in] im The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return DetectionResult
|
||||||
|
*/
|
||||||
public DetectionResult Predict(Mat img) {
|
public DetectionResult Predict(Mat img) {
|
||||||
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
FD_DetectionResult fd_detection_result = new FD_DetectionResult();
|
||||||
if(! FD_C_GFLWrapperPredict(fd_gfl_wrapper, img.CvPtr,
|
if(! FD_C_GFLWrapperPredict(fd_gfl_wrapper, img.CvPtr,
|
||||||
@@ -2067,6 +2401,12 @@ public class GFL {
|
|||||||
return detection_result;
|
return detection_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Predict the detection result for an input image list
|
||||||
|
* \param[in] im The input image list, all the elements come from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return List<DetectionResult>
|
||||||
|
*/
|
||||||
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
public List<DetectionResult> BatchPredict(List<Mat> imgs){
|
||||||
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
||||||
imgs_in.size = (nuint)imgs.Count;
|
imgs_in.size = (nuint)imgs.Count;
|
||||||
@@ -2094,6 +2434,8 @@ public class GFL {
|
|||||||
return results_out;
|
return results_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// Check whether model is initialized successfully
|
||||||
public bool Initialized() {
|
public bool Initialized() {
|
||||||
return FD_C_GFLWrapperInitialized(fd_gfl_wrapper);
|
return FD_C_GFLWrapperInitialized(fd_gfl_wrapper);
|
||||||
}
|
}
|
||||||
|
@@ -27,8 +27,18 @@ namespace ocr {
|
|||||||
|
|
||||||
// Recognizer
|
// Recognizer
|
||||||
|
|
||||||
|
/*! @brief Recognizer object is used to load the recognition model provided by PaddleOCR.
|
||||||
|
*/
|
||||||
public class Recognizer {
|
public class Recognizer {
|
||||||
|
|
||||||
|
/** \brief Set path of model file, and the configuration of runtime
|
||||||
|
*
|
||||||
|
* \param[in] model_file Path of model file, e.g ./ch_PP-OCRv3_rec_infer/model.pdmodel.
|
||||||
|
* \param[in] params_file Path of parameter file, e.g ./ch_PP-OCRv3_rec_infer/model.pdiparams, if the model format is ONNX, this parameter will be ignored.
|
||||||
|
* \param[in] label_path Path of label file used by OCR recognition model. e.g ./ppocr_keys_v1.txt
|
||||||
|
* \param[in] custom_option RuntimeOption for inference, the default will use cpu, and choose the backend defined in `valid_cpu_backends`.
|
||||||
|
* \param[in] model_format Model format of the loaded model, default is Paddle format.
|
||||||
|
*/
|
||||||
public Recognizer(string model_file, string params_file,
|
public Recognizer(string model_file, string params_file,
|
||||||
string label_path,
|
string label_path,
|
||||||
RuntimeOption custom_option = null,
|
RuntimeOption custom_option = null,
|
||||||
@@ -45,11 +55,17 @@ public class Recognizer {
|
|||||||
FD_C_DestroyRecognizerWrapper(fd_recognizer_model_wrapper);
|
FD_C_DestroyRecognizerWrapper(fd_recognizer_model_wrapper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get model's name
|
||||||
public string ModelName() {
|
public string ModelName() {
|
||||||
return "ppocr/ocr_rec";
|
return "ppocr/ocr_rec";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** \brief Predict the input image and get OCR recognition model result.
|
||||||
|
*
|
||||||
|
* \param[in] img The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format.
|
||||||
|
*
|
||||||
|
* \return The output of OCR recognition model result
|
||||||
|
*/
|
||||||
public OCRRecognizerResult Predict(Mat img) {
|
public OCRRecognizerResult Predict(Mat img) {
|
||||||
OCRRecognizerResult ocr_recognizer_result = new OCRRecognizerResult();
|
OCRRecognizerResult ocr_recognizer_result = new OCRRecognizerResult();
|
||||||
FD_Cstr text = new FD_Cstr();
|
FD_Cstr text = new FD_Cstr();
|
||||||
@@ -64,6 +80,12 @@ public class Recognizer {
|
|||||||
return ocr_recognizer_result;
|
return ocr_recognizer_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** \brief BatchPredict the input image and get OCR recognition model result.
|
||||||
|
*
|
||||||
|
* \param[in] images The list of input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format.
|
||||||
|
*
|
||||||
|
* \return The output of OCR recognition model result.
|
||||||
|
*/
|
||||||
public List<OCRRecognizerResult> BatchPredict(List<Mat> imgs){
|
public List<OCRRecognizerResult> BatchPredict(List<Mat> imgs){
|
||||||
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
||||||
imgs_in.size = (nuint)imgs.Count;
|
imgs_in.size = (nuint)imgs.Count;
|
||||||
@@ -152,6 +174,7 @@ public class Recognizer {
|
|||||||
return results_out;
|
return results_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Check whether model is initialized successfully
|
||||||
public bool Initialized() {
|
public bool Initialized() {
|
||||||
return FD_C_RecognizerWrapperInitialized(fd_recognizer_model_wrapper);
|
return FD_C_RecognizerWrapperInitialized(fd_recognizer_model_wrapper);
|
||||||
}
|
}
|
||||||
@@ -219,8 +242,17 @@ public class Recognizer {
|
|||||||
|
|
||||||
// Classifier
|
// Classifier
|
||||||
|
|
||||||
|
/*! @brief Classifier object is used to load the classification model provided by PaddleOCR.
|
||||||
|
*/
|
||||||
public class Classifier {
|
public class Classifier {
|
||||||
|
|
||||||
|
/** \brief Set path of model file, and the configuration of runtime
|
||||||
|
*
|
||||||
|
* \param[in] model_file Path of model file, e.g ./ch_ppocr_mobile_v2.0_cls_infer/model.pdmodel.
|
||||||
|
* \param[in] params_file Path of parameter file, e.g ./ch_ppocr_mobile_v2.0_cls_infer/model.pdiparams, if the model format is ONNX, this parameter will be ignored.
|
||||||
|
* \param[in] custom_option RuntimeOption for inference, the default will use cpu, and choose the backend defined in `valid_cpu_backends`.
|
||||||
|
* \param[in] model_format Model format of the loaded model, default is Paddle format.
|
||||||
|
*/
|
||||||
public Classifier(string model_file, string params_file,
|
public Classifier(string model_file, string params_file,
|
||||||
RuntimeOption custom_option = null,
|
RuntimeOption custom_option = null,
|
||||||
ModelFormat model_format = ModelFormat.PADDLE) {
|
ModelFormat model_format = ModelFormat.PADDLE) {
|
||||||
@@ -236,11 +268,17 @@ public class Classifier {
|
|||||||
FD_C_DestroyClassifierWrapper(fd_classifier_model_wrapper);
|
FD_C_DestroyClassifierWrapper(fd_classifier_model_wrapper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get model's name
|
||||||
public string ModelName() {
|
public string ModelName() {
|
||||||
return "ppocr/ocr_cls";
|
return "ppocr/ocr_cls";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** \brief Predict the input image and get OCR classification model cls_result.
|
||||||
|
*
|
||||||
|
* \param[in] img The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format.
|
||||||
|
*
|
||||||
|
* \return OCRClassifierResult
|
||||||
|
*/
|
||||||
public OCRClassifierResult Predict(Mat img) {
|
public OCRClassifierResult Predict(Mat img) {
|
||||||
OCRClassifierResult ocr_classify_result = new OCRClassifierResult();
|
OCRClassifierResult ocr_classify_result = new OCRClassifierResult();
|
||||||
if(! FD_C_ClassifierWrapperPredict(
|
if(! FD_C_ClassifierWrapperPredict(
|
||||||
@@ -252,6 +290,12 @@ public class Classifier {
|
|||||||
return ocr_classify_result;
|
return ocr_classify_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** \brief BatchPredict the input image and get OCR classification model result.
|
||||||
|
*
|
||||||
|
* \param[in] img The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format.
|
||||||
|
*
|
||||||
|
* \return List<OCRClassifierResult>
|
||||||
|
*/
|
||||||
public List<OCRClassifierResult> BatchPredict(List<Mat> imgs){
|
public List<OCRClassifierResult> BatchPredict(List<Mat> imgs){
|
||||||
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
||||||
imgs_in.size = (nuint)imgs.Count;
|
imgs_in.size = (nuint)imgs.Count;
|
||||||
@@ -334,6 +378,7 @@ public class Classifier {
|
|||||||
return results_out;
|
return results_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Check whether model is initialized successfully
|
||||||
public bool Initialized() {
|
public bool Initialized() {
|
||||||
return FD_C_ClassifierWrapperInitialized(fd_classifier_model_wrapper);
|
return FD_C_ClassifierWrapperInitialized(fd_classifier_model_wrapper);
|
||||||
}
|
}
|
||||||
@@ -395,8 +440,17 @@ public class Classifier {
|
|||||||
|
|
||||||
// DBDetector
|
// DBDetector
|
||||||
|
|
||||||
|
/*! @brief DBDetector object is used to load the detection model provided by PaddleOCR.
|
||||||
|
*/
|
||||||
public class DBDetector {
|
public class DBDetector {
|
||||||
|
|
||||||
|
/** \brief Set path of model file, and the configuration of runtime
|
||||||
|
*
|
||||||
|
* \param[in] model_file Path of model file, e.g ./ch_PP-OCRv3_det_infer/model.pdmodel.
|
||||||
|
* \param[in] params_file Path of parameter file, e.g ./ch_PP-OCRv3_det_infer/model.pdiparams, if the model format is ONNX, this parameter will be ignored.
|
||||||
|
* \param[in] custom_option RuntimeOption for inference, the default will use cpu, and choose the backend defined in `valid_cpu_backends`.
|
||||||
|
* \param[in] model_format Model format of the loaded model, default is Paddle format.
|
||||||
|
*/
|
||||||
public DBDetector(string model_file, string params_file,
|
public DBDetector(string model_file, string params_file,
|
||||||
RuntimeOption custom_option = null,
|
RuntimeOption custom_option = null,
|
||||||
ModelFormat model_format = ModelFormat.PADDLE) {
|
ModelFormat model_format = ModelFormat.PADDLE) {
|
||||||
@@ -412,11 +466,17 @@ public class DBDetector {
|
|||||||
FD_C_DestroyDBDetectorWrapper(fd_dbdetector_model_wrapper);
|
FD_C_DestroyDBDetectorWrapper(fd_dbdetector_model_wrapper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get model's name
|
||||||
public string ModelName() {
|
public string ModelName() {
|
||||||
return "ppocr/ocr_det";
|
return "ppocr/ocr_det";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** \brief Predict the input image and get OCR detection model result.
|
||||||
|
*
|
||||||
|
* \param[in] img The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format.
|
||||||
|
*
|
||||||
|
* \return OCRDBDetectorResult
|
||||||
|
*/
|
||||||
public OCRDBDetectorResult Predict(Mat img) {
|
public OCRDBDetectorResult Predict(Mat img) {
|
||||||
OCRDBDetectorResult ocr_detector_result = new OCRDBDetectorResult();
|
OCRDBDetectorResult ocr_detector_result = new OCRDBDetectorResult();
|
||||||
FD_TwoDimArrayInt32 fd_box_result = new FD_TwoDimArrayInt32();
|
FD_TwoDimArrayInt32 fd_box_result = new FD_TwoDimArrayInt32();
|
||||||
@@ -441,6 +501,12 @@ public class DBDetector {
|
|||||||
return ocr_detector_result;
|
return ocr_detector_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** \brief BatchPredict the input image and get OCR detection model result.
|
||||||
|
*
|
||||||
|
* \param[in] images The list input of image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format.
|
||||||
|
*
|
||||||
|
* \return List<OCRDBDetectorResult>
|
||||||
|
*/
|
||||||
public List<OCRDBDetectorResult> BatchPredict(List<Mat> imgs){
|
public List<OCRDBDetectorResult> BatchPredict(List<Mat> imgs){
|
||||||
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
||||||
imgs_in.size = (nuint)imgs.Count;
|
imgs_in.size = (nuint)imgs.Count;
|
||||||
@@ -484,6 +550,7 @@ public class DBDetector {
|
|||||||
return results_out;
|
return results_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Check whether model is initialized successfully
|
||||||
public bool Initialized() {
|
public bool Initialized() {
|
||||||
return FD_C_DBDetectorWrapperInitialized(fd_dbdetector_model_wrapper);
|
return FD_C_DBDetectorWrapperInitialized(fd_dbdetector_model_wrapper);
|
||||||
}
|
}
|
||||||
@@ -541,8 +608,16 @@ namespace pipeline {
|
|||||||
|
|
||||||
// PPOCRv2
|
// PPOCRv2
|
||||||
|
|
||||||
|
/*! @brief PPOCRv2 is used to load PP-OCRv2 series models provided by PaddleOCR.
|
||||||
|
*/
|
||||||
public class PPOCRv2 {
|
public class PPOCRv2 {
|
||||||
|
|
||||||
|
/** \brief Set up the detection model path, classification model path and recognition model path respectively.
|
||||||
|
*
|
||||||
|
* \param[in] det_model Path of detection model, e.g ./ch_PP-OCRv2_det_infer
|
||||||
|
* \param[in] cls_model Path of classification model, e.g ./ch_ppocr_mobile_v2.0_cls_infer
|
||||||
|
* \param[in] rec_model Path of recognition model, e.g ./ch_PP-OCRv2_rec_infer
|
||||||
|
*/
|
||||||
public PPOCRv2(DBDetector ppocrv2, Classifier classifier,
|
public PPOCRv2(DBDetector ppocrv2, Classifier classifier,
|
||||||
Recognizer recognizer) {
|
Recognizer recognizer) {
|
||||||
fd_ppocrv2_wrapper = FD_C_CreatePPOCRv2Wrapper(
|
fd_ppocrv2_wrapper = FD_C_CreatePPOCRv2Wrapper(
|
||||||
@@ -560,6 +635,12 @@ public class PPOCRv2 {
|
|||||||
return "PPOCRv2";
|
return "PPOCRv2";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** \brief Predict the input image and get OCR result.
|
||||||
|
*
|
||||||
|
* \param[in] im The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format.
|
||||||
|
*
|
||||||
|
* \return OCRResult
|
||||||
|
*/
|
||||||
public OCRResult Predict(Mat img) {
|
public OCRResult Predict(Mat img) {
|
||||||
FD_OCRResult fd_ocr_result = new FD_OCRResult();
|
FD_OCRResult fd_ocr_result = new FD_OCRResult();
|
||||||
if(! FD_C_PPOCRv2WrapperPredict(
|
if(! FD_C_PPOCRv2WrapperPredict(
|
||||||
@@ -573,6 +654,12 @@ public class PPOCRv2 {
|
|||||||
return ocr_detector_result;
|
return ocr_detector_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** \brief BatchPredict the input image and get OCR result.
|
||||||
|
*
|
||||||
|
* \param[in] images The list of input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format.
|
||||||
|
*
|
||||||
|
* \return List<OCRResult>
|
||||||
|
*/
|
||||||
public List<OCRResult> BatchPredict(List<Mat> imgs){
|
public List<OCRResult> BatchPredict(List<Mat> imgs){
|
||||||
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
||||||
imgs_in.size = (nuint)imgs.Count;
|
imgs_in.size = (nuint)imgs.Count;
|
||||||
@@ -601,6 +688,7 @@ public class PPOCRv2 {
|
|||||||
return results_out;
|
return results_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Check whether model is initialized successfully
|
||||||
public bool Initialized() {
|
public bool Initialized() {
|
||||||
return FD_C_PPOCRv2WrapperInitialized(fd_ppocrv2_wrapper);
|
return FD_C_PPOCRv2WrapperInitialized(fd_ppocrv2_wrapper);
|
||||||
}
|
}
|
||||||
@@ -643,8 +731,16 @@ public class PPOCRv2 {
|
|||||||
|
|
||||||
// PPOCRv3
|
// PPOCRv3
|
||||||
|
|
||||||
|
/*! @brief PPOCRv3 is used to load PP-OCRv3 series models provided by PaddleOCR.
|
||||||
|
*/
|
||||||
public class PPOCRv3 {
|
public class PPOCRv3 {
|
||||||
|
|
||||||
|
/** \brief Set up the detection model path, classification model path and recognition model path respectively.
|
||||||
|
*
|
||||||
|
* \param[in] det_model Path of detection model, e.g ./ch_PP-OCRv3_det_infer
|
||||||
|
* \param[in] cls_model Path of classification model, e.g ./ch_ppocr_mobile_v2.0_cls_infer
|
||||||
|
* \param[in] rec_model Path of recognition model, e.g ./ch_PP-OCRv3_rec_infer
|
||||||
|
*/
|
||||||
public PPOCRv3(DBDetector ppocrv3, Classifier classifier,
|
public PPOCRv3(DBDetector ppocrv3, Classifier classifier,
|
||||||
Recognizer recognizer) {
|
Recognizer recognizer) {
|
||||||
fd_ppocrv3_wrapper = FD_C_CreatePPOCRv3Wrapper(
|
fd_ppocrv3_wrapper = FD_C_CreatePPOCRv3Wrapper(
|
||||||
@@ -662,6 +758,12 @@ public class PPOCRv3 {
|
|||||||
return "PPOCRv3";
|
return "PPOCRv3";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** \brief Predict the input image and get OCR result.
|
||||||
|
*
|
||||||
|
* \param[in] im The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format.
|
||||||
|
*
|
||||||
|
* \return OCRResult
|
||||||
|
*/
|
||||||
public OCRResult Predict(Mat img) {
|
public OCRResult Predict(Mat img) {
|
||||||
FD_OCRResult fd_ocr_result = new FD_OCRResult();
|
FD_OCRResult fd_ocr_result = new FD_OCRResult();
|
||||||
if(! FD_C_PPOCRv3WrapperPredict(
|
if(! FD_C_PPOCRv3WrapperPredict(
|
||||||
@@ -675,6 +777,12 @@ public class PPOCRv3 {
|
|||||||
return ocr_detector_result;
|
return ocr_detector_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** \brief BatchPredict the input image and get OCR result.
|
||||||
|
*
|
||||||
|
* \param[in] images The list of input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format.
|
||||||
|
*
|
||||||
|
* \return List<OCRResult>
|
||||||
|
*/
|
||||||
public List<OCRResult> BatchPredict(List<Mat> imgs){
|
public List<OCRResult> BatchPredict(List<Mat> imgs){
|
||||||
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
||||||
imgs_in.size = (nuint)imgs.Count;
|
imgs_in.size = (nuint)imgs.Count;
|
||||||
@@ -703,6 +811,7 @@ public class PPOCRv3 {
|
|||||||
return results_out;
|
return results_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Check whether model is initialized successfully
|
||||||
public bool Initialized() {
|
public bool Initialized() {
|
||||||
return FD_C_PPOCRv3WrapperInitialized(fd_ppocrv3_wrapper);
|
return FD_C_PPOCRv3WrapperInitialized(fd_ppocrv3_wrapper);
|
||||||
}
|
}
|
||||||
|
@@ -37,9 +37,11 @@ public enum ResultType {
|
|||||||
HEADPOSE
|
HEADPOSE
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*! Mask structure, used in DetectionResult for instance segmentation models
|
||||||
|
*/
|
||||||
public class Mask {
|
public class Mask {
|
||||||
public List<byte> data;
|
public List<byte> data; /// Mask data buffer
|
||||||
public List<long> shape;
|
public List<long> shape; /// Shape of mask
|
||||||
public ResultType type;
|
public ResultType type;
|
||||||
public Mask() {
|
public Mask() {
|
||||||
this.data = new List<byte>();
|
this.data = new List<byte>();
|
||||||
@@ -47,6 +49,7 @@ public class Mask {
|
|||||||
this.type = ResultType.MASK;
|
this.type = ResultType.MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// convert the result to string to print
|
||||||
public override string ToString() {
|
public override string ToString() {
|
||||||
string information = "Mask(" ;
|
string information = "Mask(" ;
|
||||||
int ndim = this.shape.Count;
|
int ndim = this.shape.Count;
|
||||||
@@ -63,16 +66,19 @@ public class Mask {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*! @brief Classify result structure for all the image classify models
|
||||||
|
*/
|
||||||
public class ClassifyResult {
|
public class ClassifyResult {
|
||||||
public List<int> label_ids;
|
public List<int> label_ids; /// Classify result for an image
|
||||||
public List<float> scores;
|
public List<float> scores; /// The confidence for each classify result
|
||||||
public ResultType type;
|
public ResultType type;
|
||||||
public ClassifyResult() {
|
public ClassifyResult() {
|
||||||
this.label_ids = new List<int>();
|
this.label_ids = new List<int>();
|
||||||
this.scores = new List<float>();
|
this.scores = new List<float>();
|
||||||
this.type = ResultType.CLASSIFY;
|
this.type = ResultType.CLASSIFY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// convert the result to string to print
|
||||||
public string ToString() {
|
public string ToString() {
|
||||||
string information;
|
string information;
|
||||||
information = "ClassifyResult(\nlabel_ids: ";
|
information = "ClassifyResult(\nlabel_ids: ";
|
||||||
@@ -89,12 +95,14 @@ public class ClassifyResult {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*! @brief Detection result structure for all the object detection models and instance segmentation models
|
||||||
|
*/
|
||||||
public class DetectionResult {
|
public class DetectionResult {
|
||||||
public List<float[]> boxes;
|
public List<float[]> boxes; /// Member variable which indicates the coordinates of all detected target boxes in a single image, each box is represented by 4 float values in order of xmin, ymin, xmax, ymax, i.e. the coordinates of the top left and bottom right corner.
|
||||||
public List<float> scores;
|
public List<float> scores; /// Member variable which indicates the confidence level of all targets detected in a single image
|
||||||
public List<int> label_ids;
|
public List<int> label_ids; /// Member variable which indicates all target categories detected in a single image
|
||||||
public List<Mask> masks;
|
public List<Mask> masks; /// Member variable which indicates all detected instance masks of a single image
|
||||||
public bool contain_masks;
|
public bool contain_masks; /// Member variable which indicates whether the detected result contains instance masks
|
||||||
public ResultType type;
|
public ResultType type;
|
||||||
public DetectionResult() {
|
public DetectionResult() {
|
||||||
this.boxes = new List<float[]>();
|
this.boxes = new List<float[]>();
|
||||||
@@ -105,7 +113,7 @@ public class DetectionResult {
|
|||||||
this.type = ResultType.DETECTION;
|
this.type = ResultType.DETECTION;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// convert the result to string to print
|
||||||
public string ToString() {
|
public string ToString() {
|
||||||
string information;
|
string information;
|
||||||
if (!contain_masks) {
|
if (!contain_masks) {
|
||||||
@@ -130,12 +138,14 @@ public class DetectionResult {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*! @brief OCR result structure for all the OCR models.
|
||||||
|
*/
|
||||||
public class OCRResult {
|
public class OCRResult {
|
||||||
public List<int[]> boxes;
|
public List<int[]> boxes; /// Member variable which indicates the coordinates of all detected target boxes in a single image. Each box is represented by 8 int values to indicate the 4 coordinates of the box, in the order of lower left, lower right, upper right, upper left.
|
||||||
public List<string> text;
|
public List<string> text; /// Member variable which indicates the content of the recognized text in multiple text boxes
|
||||||
public List<float> rec_scores;
|
public List<float> rec_scores; /// Member variable which indicates the confidence level of the recognized text.
|
||||||
public List<float> cls_scores;
|
public List<float> cls_scores; /// Member variable which indicates the confidence level of the classification result of the text box
|
||||||
public List<int> cls_labels;
|
public List<int> cls_labels; /// Member variable which indicates the directional category of the textbox
|
||||||
public ResultType type;
|
public ResultType type;
|
||||||
|
|
||||||
public OCRResult() {
|
public OCRResult() {
|
||||||
@@ -146,6 +156,8 @@ public class OCRResult {
|
|||||||
this.cls_labels = new List<int>();
|
this.cls_labels = new List<int>();
|
||||||
this.type = ResultType.OCR;
|
this.type = ResultType.OCR;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// convert the result to string to print
|
||||||
public string ToString() {
|
public string ToString() {
|
||||||
string no_result = "";
|
string no_result = "";
|
||||||
if (boxes.Count > 0) {
|
if (boxes.Count > 0) {
|
||||||
@@ -225,11 +237,13 @@ public class OCRRecognizerResult{
|
|||||||
public float rec_score;
|
public float rec_score;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*! @brief Segmentation result structure for all the segmentation models
|
||||||
|
*/
|
||||||
public class SegmentationResult{
|
public class SegmentationResult{
|
||||||
public List<byte> label_map;
|
public List<byte> label_map; /// `label_map` stores the pixel-level category labels for input image.
|
||||||
public List<float> score_map;
|
public List<float> score_map; /// `score_map` stores the probability of the predicted label for each pixel of input image.
|
||||||
public List<long> shape;
|
public List<long> shape; /// The output shape, means [H, W]
|
||||||
public bool contain_score_map;
|
public bool contain_score_map; /// SegmentationResult whether containing score_map
|
||||||
public ResultType type;
|
public ResultType type;
|
||||||
public SegmentationResult() {
|
public SegmentationResult() {
|
||||||
this.label_map = new List<byte>();
|
this.label_map = new List<byte>();
|
||||||
@@ -239,6 +253,7 @@ public class SegmentationResult{
|
|||||||
this.type = ResultType.SEGMENTATION;
|
this.type = ResultType.SEGMENTATION;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// convert the result to string to print
|
||||||
public string ToString() {
|
public string ToString() {
|
||||||
string information;
|
string information;
|
||||||
information = "SegmentationResult Image masks 10 rows x 10 cols: \n";
|
information = "SegmentationResult Image masks 10 rows x 10 cols: \n";
|
||||||
|
@@ -23,8 +23,18 @@ namespace fastdeploy {
|
|||||||
namespace vision {
|
namespace vision {
|
||||||
namespace segmentation {
|
namespace segmentation {
|
||||||
|
|
||||||
|
/*! @brief PaddleSeg serials model object used when to load a PaddleSeg model exported by PaddleSeg repository
|
||||||
|
*/
|
||||||
public class PaddleSegModel {
|
public class PaddleSegModel {
|
||||||
|
|
||||||
|
/** \brief Set path of model file and configuration file, and the configuration of runtime
|
||||||
|
*
|
||||||
|
* \param[in] model_file Path of model file, e.g unet/model.pdmodel
|
||||||
|
* \param[in] params_file Path of parameter file, e.g unet/model.pdiparams, if the model format is ONNX, this parameter will be ignored
|
||||||
|
* \param[in] config_file Path of configuration file for deployment, e.g unet/deploy.yml
|
||||||
|
* \param[in] custom_option RuntimeOption for inference, the default will use cpu, and choose the backend defined in `valid_cpu_backends`
|
||||||
|
* \param[in] model_format Model format of the loaded model, default is Paddle format
|
||||||
|
*/
|
||||||
public PaddleSegModel(string model_file, string params_file,
|
public PaddleSegModel(string model_file, string params_file,
|
||||||
string config_file, RuntimeOption custom_option = null,
|
string config_file, RuntimeOption custom_option = null,
|
||||||
ModelFormat model_format = ModelFormat.PADDLE) {
|
ModelFormat model_format = ModelFormat.PADDLE) {
|
||||||
@@ -40,11 +50,17 @@ public class PaddleSegModel {
|
|||||||
FD_C_DestroyPaddleSegModelWrapper(fd_paddleseg_model_wrapper);
|
FD_C_DestroyPaddleSegModelWrapper(fd_paddleseg_model_wrapper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get model's name
|
||||||
public string ModelName() {
|
public string ModelName() {
|
||||||
return "PaddleSeg";
|
return "PaddleSeg";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** \brief DEPRECATED Predict the segmentation result for an input image
|
||||||
|
*
|
||||||
|
* \param[in] im The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
*
|
||||||
|
* \return SegmentationResult
|
||||||
|
*/
|
||||||
public SegmentationResult Predict(Mat img) {
|
public SegmentationResult Predict(Mat img) {
|
||||||
FD_SegmentationResult fd_segmentation_result = new FD_SegmentationResult();
|
FD_SegmentationResult fd_segmentation_result = new FD_SegmentationResult();
|
||||||
if(! FD_C_PaddleSegModelWrapperPredict(
|
if(! FD_C_PaddleSegModelWrapperPredict(
|
||||||
@@ -59,6 +75,12 @@ public class PaddleSegModel {
|
|||||||
return segmentation_result;
|
return segmentation_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** \brief Predict the segmentation results for a batch of input images
|
||||||
|
*
|
||||||
|
* \param[in] imgs, The input image list, each element comes from cv::imread()
|
||||||
|
*
|
||||||
|
* \return List<SegmentationResult>
|
||||||
|
*/
|
||||||
public List<SegmentationResult> BatchPredict(List<Mat> imgs){
|
public List<SegmentationResult> BatchPredict(List<Mat> imgs){
|
||||||
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
FD_OneDimMat imgs_in = new FD_OneDimMat();
|
||||||
imgs_in.size = (nuint)imgs.Count;
|
imgs_in.size = (nuint)imgs.Count;
|
||||||
@@ -86,6 +108,7 @@ public class PaddleSegModel {
|
|||||||
return results_out;
|
return results_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Check whether model is initialized successfully
|
||||||
public bool Initialized() {
|
public bool Initialized() {
|
||||||
return FD_C_PaddleSegModelWrapperInitialized(fd_paddleseg_model_wrapper);
|
return FD_C_PaddleSegModelWrapperInitialized(fd_paddleseg_model_wrapper);
|
||||||
}
|
}
|
||||||
|
@@ -24,6 +24,15 @@ namespace vision {
|
|||||||
|
|
||||||
public class Visualize {
|
public class Visualize {
|
||||||
|
|
||||||
|
/** \brief Show the visualized results for detection models
|
||||||
|
*
|
||||||
|
* \param[in] im the input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
* \param[in] result the result produced by model
|
||||||
|
* \param[in] score_threshold threshold for result scores, the bounding box will not be shown if the score is less than score_threshold
|
||||||
|
* \param[in] line_size line size for bounding boxes
|
||||||
|
* \param[in] font_size font size for text
|
||||||
|
* \return Mat type stores the visualized results
|
||||||
|
*/
|
||||||
public static Mat VisDetection(Mat im, DetectionResult detection_result,
|
public static Mat VisDetection(Mat im, DetectionResult detection_result,
|
||||||
float score_threshold = 0.0f,
|
float score_threshold = 0.0f,
|
||||||
int line_size = 1, float font_size = 0.5f) {
|
int line_size = 1, float font_size = 0.5f) {
|
||||||
@@ -35,7 +44,16 @@ public class Visualize {
|
|||||||
return new Mat(result_ptr);
|
return new Mat(result_ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** \brief Show the visualized results with custom labels for detection models
|
||||||
|
*
|
||||||
|
* \param[in] im the input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
* \param[in] result the result produced by model
|
||||||
|
* \param[in] labels the visualized result will show the bounding box contain class label
|
||||||
|
* \param[in] score_threshold threshold for result scores, the bounding box will not be shown if the score is less than score_threshold
|
||||||
|
* \param[in] line_size line size for bounding boxes
|
||||||
|
* \param[in] font_size font size for text
|
||||||
|
* \return Mat type stores the visualized results
|
||||||
|
*/
|
||||||
public static Mat VisDetection(Mat im, DetectionResult detection_result,
|
public static Mat VisDetection(Mat im, DetectionResult detection_result,
|
||||||
string[] labels,
|
string[] labels,
|
||||||
float score_threshold = 0.0f,
|
float score_threshold = 0.0f,
|
||||||
@@ -50,6 +68,12 @@ public class Visualize {
|
|||||||
return new Mat(result_ptr);
|
return new Mat(result_ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** \brief Show the visualized results for Ocr models
|
||||||
|
*
|
||||||
|
* \param[in] im the input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
* \param[in] result the result produced by model
|
||||||
|
* \return Mat type stores the visualized results
|
||||||
|
*/
|
||||||
public static Mat VisOcr(Mat im, OCRResult ocr_result){
|
public static Mat VisOcr(Mat im, OCRResult ocr_result){
|
||||||
FD_OCRResult fd_ocr_result =
|
FD_OCRResult fd_ocr_result =
|
||||||
ConvertResult.ConvertOCRResultToCResult(ocr_result);
|
ConvertResult.ConvertOCRResultToCResult(ocr_result);
|
||||||
@@ -58,6 +82,13 @@ public class Visualize {
|
|||||||
return new Mat(result_ptr);
|
return new Mat(result_ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** \brief Show the visualized results for segmentation models
|
||||||
|
*
|
||||||
|
* \param[in] im the input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format
|
||||||
|
* \param[in] result the result produced by model
|
||||||
|
* \param[in] weight transparent weight of visualized result image
|
||||||
|
* \return Mat type stores the visualized results
|
||||||
|
*/
|
||||||
public static Mat VisSegmentation(Mat im,
|
public static Mat VisSegmentation(Mat im,
|
||||||
SegmentationResult segmentation_result,
|
SegmentationResult segmentation_result,
|
||||||
float weight = 0.5f){
|
float weight = 0.5f){
|
||||||
|
2495
docs/api_docs/csharp/Doxyfile
Normal file
2495
docs/api_docs/csharp/Doxyfile
Normal file
File diff suppressed because it is too large
Load Diff
7
docs/api_docs/csharp/main_page.md
Normal file
7
docs/api_docs/csharp/main_page.md
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
# FastDeploy C# API Summary
|
||||||
|
|
||||||
|
- Github: [https://github.com/PaddlePaddle/FastDeploy](https://github.com/PaddlePaddle/FastDeploy)
|
||||||
|
- [Installation](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/en/build_and_install)
|
||||||
|
- [Usage Documents](https://github.com/PaddlePaddle/FastDeploy/blob/develop/csharp)
|
||||||
|
- [Release Notes](https://github.com/PaddlePaddle/FastDeploy/releases)
|
||||||
|
- [Examples](https://github.com/PaddlePaddle/FastDeploy/tree/develop/examples)
|
Reference in New Issue
Block a user