diff --git a/fastdeploy/vision/classification/contrib/resnet.h b/fastdeploy/vision/classification/contrib/resnet.h index f5db8b1be..fa557c715 100644 --- a/fastdeploy/vision/classification/contrib/resnet.h +++ b/fastdeploy/vision/classification/contrib/resnet.h @@ -50,12 +50,16 @@ class FASTDEPLOY_DECL ResNet : public FastDeployModel { */ virtual bool Predict(cv::Mat* im, ClassifyResult* result, int topk = 1); /*! @brief - Argument for image preprocessing step, tuple of (width, height), decide the target size after resize + Argument for image preprocessing step, tuple of (width, height), decide the target size after resize, default size = {224, 224} */ std::vector size; - /// Mean parameters for normalize, size should be the the same as channels + /*! @brief + Mean parameters for normalize, size should be the the same as channels, default mean_vals = {0.485f, 0.456f, 0.406f} + */ std::vector mean_vals; - /// Std parameters for normalize, size should be the the same as channels + /*! @brief + Std parameters for normalize, size should be the the same as channels, default std_vals = {0.229f, 0.224f, 0.225f} + */ std::vector std_vals; diff --git a/fastdeploy/vision/detection/contrib/nanodet_plus.h b/fastdeploy/vision/detection/contrib/nanodet_plus.h index 9923e4d37..45ed40fe7 100644 --- a/fastdeploy/vision/detection/contrib/nanodet_plus.h +++ b/fastdeploy/vision/detection/contrib/nanodet_plus.h @@ -54,7 +54,7 @@ class FASTDEPLOY_DECL NanoDetPlus : public FastDeployModel { float nms_iou_threshold = 0.5f); /*! @brief - Argument for image preprocessing step, tuple of input size (width, height), e.g (320, 320) + Argument for image preprocessing step, tuple of input size (width, height), default (320, 320) */ std::vector size; // padding value, size should be the same as channels diff --git a/fastdeploy/vision/detection/contrib/scaledyolov4.h b/fastdeploy/vision/detection/contrib/scaledyolov4.h index a0108cfa5..c7b5fb57d 100644 --- a/fastdeploy/vision/detection/contrib/scaledyolov4.h +++ b/fastdeploy/vision/detection/contrib/scaledyolov4.h @@ -51,7 +51,7 @@ class FASTDEPLOY_DECL ScaledYOLOv4 : public FastDeployModel { float nms_iou_threshold = 0.5); /*! @brief - Argument for image preprocessing step, tuple of (width, height), decide the target size after resize + Argument for image preprocessing step, tuple of (width, height), decide the target size after resize, default size = {640, 640} */ std::vector size; // padding value, size should be the same as channels diff --git a/fastdeploy/vision/detection/contrib/yolor.h b/fastdeploy/vision/detection/contrib/yolor.h index 0f8e23537..e98da3ee1 100644 --- a/fastdeploy/vision/detection/contrib/yolor.h +++ b/fastdeploy/vision/detection/contrib/yolor.h @@ -39,7 +39,7 @@ class FASTDEPLOY_DECL YOLOR : public FastDeployModel { virtual std::string ModelName() const { return "YOLOR"; } /** \brief Predict the detection result for an input image * - * \param[in] im The input image data, comes from cv::imread(), is a 3-D array with layout HWC, BGR format + * \param[in] im The input image data, comes from cv::imread() * \param[in] result The output detection result will be writen to this structure * \param[in] conf_threshold confidence threashold for postprocessing, default is 0.25 * \param[in] nms_iou_threshold iou threashold for NMS, default is 0.5 @@ -50,7 +50,7 @@ class FASTDEPLOY_DECL YOLOR : public FastDeployModel { float nms_iou_threshold = 0.5); /*! @brief - Argument for image preprocessing step, tuple of (width, height), decide the target size after resize + Argument for image preprocessing step, tuple of (width, height), decide the target size after resize, default size = {640, 640} */ std::vector size; // padding value, size should be the same as channels diff --git a/fastdeploy/vision/detection/contrib/yolov5.h b/fastdeploy/vision/detection/contrib/yolov5.h index 15d98e6f2..7be906a9d 100644 --- a/fastdeploy/vision/detection/contrib/yolov5.h +++ b/fastdeploy/vision/detection/contrib/yolov5.h @@ -78,7 +78,7 @@ class FASTDEPLOY_DECL YOLOv5 : public FastDeployModel { float max_wh = 7680.0); /*! @brief - Argument for image preprocessing step, tuple of (width, height), decide the target size after resize + Argument for image preprocessing step, tuple of (width, height), decide the target size after resize, default size = {640, 640} */ std::vector size_; // padding value, size should be the same as channels @@ -96,7 +96,9 @@ class FASTDEPLOY_DECL YOLOv5 : public FastDeployModel { int stride_; // for offseting the boxes by classes when using NMS float max_wh_; - /// for different strategies to get boxes when postprocessing + /*! @brief + Argument for image preprocessing step, for different strategies to get boxes when postprocessing, default true + */ bool multi_label_; private: diff --git a/fastdeploy/vision/detection/contrib/yolov5lite.h b/fastdeploy/vision/detection/contrib/yolov5lite.h index 8bbcf331a..edaa18a63 100644 --- a/fastdeploy/vision/detection/contrib/yolov5lite.h +++ b/fastdeploy/vision/detection/contrib/yolov5lite.h @@ -54,7 +54,7 @@ class FASTDEPLOY_DECL YOLOv5Lite : public FastDeployModel { void UseCudaPreprocessing(int max_img_size = 3840 * 2160); /*! @brief - Argument for image preprocessing step, tuple of (width, height), decide the target size after resize + Argument for image preprocessing step, tuple of (width, height), decide the target size after resize, size = {640, 640} */ std::vector size; // padding value, size should be the same as channels @@ -84,7 +84,7 @@ class FASTDEPLOY_DECL YOLOv5Lite : public FastDeployModel { decode module. Please set it 'true' manually if the model file was exported with decode module. false : ONNX files without decode module. - true : ONNX file with decode module. + true : ONNX file with decode module. default false. */ bool is_decode_exported; diff --git a/fastdeploy/vision/detection/contrib/yolov6.h b/fastdeploy/vision/detection/contrib/yolov6.h index 1e0af6fd3..bb6c988cc 100644 --- a/fastdeploy/vision/detection/contrib/yolov6.h +++ b/fastdeploy/vision/detection/contrib/yolov6.h @@ -57,7 +57,7 @@ class FASTDEPLOY_DECL YOLOv6 : public FastDeployModel { void UseCudaPreprocessing(int max_img_size = 3840 * 2160); /*! @brief - Argument for image preprocessing step, tuple of (width, height), decide the target size after resize + Argument for image preprocessing step, tuple of (width, height), decide the target size after resize, default size = {640, 640}; */ std::vector size; // padding value, size should be the same as channels diff --git a/fastdeploy/vision/detection/contrib/yolov7.h b/fastdeploy/vision/detection/contrib/yolov7.h index 2eb038f71..cdf56969e 100644 --- a/fastdeploy/vision/detection/contrib/yolov7.h +++ b/fastdeploy/vision/detection/contrib/yolov7.h @@ -54,7 +54,7 @@ class FASTDEPLOY_DECL YOLOv7 : public FastDeployModel { void UseCudaPreprocessing(int max_img_size = 3840 * 2160); /*! @brief - Argument for image preprocessing step, tuple of (width, height), decide the target size after resize + Argument for image preprocessing step, tuple of (width, height), decide the target size after resize, default size = {640, 640} */ std::vector size; // padding value, size should be the same as channels diff --git a/fastdeploy/vision/detection/contrib/yolov7end2end_ort.h b/fastdeploy/vision/detection/contrib/yolov7end2end_ort.h index 9434d69ed..b68589579 100644 --- a/fastdeploy/vision/detection/contrib/yolov7end2end_ort.h +++ b/fastdeploy/vision/detection/contrib/yolov7end2end_ort.h @@ -48,7 +48,7 @@ class FASTDEPLOY_DECL YOLOv7End2EndORT : public FastDeployModel { float conf_threshold = 0.25); /*! @brief - Argument for image preprocessing step, tuple of (width, height), decide the target size after resize + Argument for image preprocessing step, tuple of (width, height), decide the target size after resize, default size = {640, 640} */ std::vector size; // padding value, size should be the same as channels diff --git a/fastdeploy/vision/detection/contrib/yolov7end2end_trt.h b/fastdeploy/vision/detection/contrib/yolov7end2end_trt.h index f6ce6e943..10b95d02a 100644 --- a/fastdeploy/vision/detection/contrib/yolov7end2end_trt.h +++ b/fastdeploy/vision/detection/contrib/yolov7end2end_trt.h @@ -53,7 +53,7 @@ class FASTDEPLOY_DECL YOLOv7End2EndTRT : public FastDeployModel { void UseCudaPreprocessing(int max_img_size = 3840 * 2160); /*! @brief - Argument for image preprocessing step, tuple of (width, height), decide the target size after resize + Argument for image preprocessing step, tuple of (width, height), decide the target size after resize, default size = {640, 640} */ std::vector size; // padding value, size should be the same as channels diff --git a/fastdeploy/vision/detection/contrib/yolox.h b/fastdeploy/vision/detection/contrib/yolox.h index c040c28a8..8ad029b95 100644 --- a/fastdeploy/vision/detection/contrib/yolox.h +++ b/fastdeploy/vision/detection/contrib/yolox.h @@ -52,7 +52,7 @@ class FASTDEPLOY_DECL YOLOX : public FastDeployModel { float nms_iou_threshold = 0.5); /*! @brief - Argument for image preprocessing step, tuple of (width, height), decide the target size after resize + Argument for image preprocessing step, tuple of (width, height), decide the target size after resize, default size = {640, 640} */ std::vector size; // padding value, size should be the same as channels @@ -61,7 +61,7 @@ class FASTDEPLOY_DECL YOLOX : public FastDeployModel { whether the model_file was exported with decode module. The official YOLOX/tools/export_onnx.py script will export ONNX file without decode module. Please set it 'true' manually if the model file - was exported with decode module. + was exported with decode module. default false. */ bool is_decode_exported; // downsample strides for YOLOX to generate anchors, diff --git a/fastdeploy/vision/facedet/contrib/retinaface.h b/fastdeploy/vision/facedet/contrib/retinaface.h index e7011df89..c05deedff 100644 --- a/fastdeploy/vision/facedet/contrib/retinaface.h +++ b/fastdeploy/vision/facedet/contrib/retinaface.h @@ -65,7 +65,7 @@ class FASTDEPLOY_DECL RetinaFace : public FastDeployModel { */ std::vector downsample_strides; /*! @brief - Argument for image postprocessing step, min sizes, width and height for each anchor + Argument for image postprocessing step, min sizes, width and height for each anchor, default min_sizes = {{16, 32}, {64, 128}, {256, 512}} */ std::vector> min_sizes; /*! @brief diff --git a/fastdeploy/vision/facedet/contrib/scrfd.h b/fastdeploy/vision/facedet/contrib/scrfd.h index 58dd8807b..38da3af42 100644 --- a/fastdeploy/vision/facedet/contrib/scrfd.h +++ b/fastdeploy/vision/facedet/contrib/scrfd.h @@ -77,14 +77,16 @@ class FASTDEPLOY_DECL SCRFD : public FastDeployModel { */ int landmarks_per_face; /*! @brief - Argument for image postprocessing step, the outputs of onnx file with key points features or not + Argument for image postprocessing step, the outputs of onnx file with key points features or not, default true */ bool use_kps; /*! @brief - Argument for image postprocessing step, the upperbond number of boxes processed by nms + Argument for image postprocessing step, the upperbond number of boxes processed by nms, default 30000 */ int max_nms; - /// Argument for image postprocessing step, anchor number of each stride + /*! @brief + Argument for image postprocessing step, anchor number of each stride, default 2 + */ unsigned int num_anchors; private: diff --git a/fastdeploy/vision/facedet/contrib/yolov5face.h b/fastdeploy/vision/facedet/contrib/yolov5face.h index 10479052d..199ed35df 100644 --- a/fastdeploy/vision/facedet/contrib/yolov5face.h +++ b/fastdeploy/vision/facedet/contrib/yolov5face.h @@ -51,7 +51,7 @@ class FASTDEPLOY_DECL YOLOv5Face : public FastDeployModel { float nms_iou_threshold = 0.5); /*! @brief - Argument for image preprocessing step, tuple of (width, height), decide the target size after resize + Argument for image preprocessing step, tuple of (width, height), decide the target size after resize, default size = {640, 640} */ std::vector size; // padding value, size should be the same as channels @@ -72,7 +72,7 @@ class FASTDEPLOY_DECL YOLOv5Face : public FastDeployModel { /*! @brief Argument for image postprocessing step, setup the number of landmarks for per face (if have), default 5 in official yolov5face note that, the outupt tensor's shape must be: - (1,n,4+1+2*landmarks_per_face+1=box+obj+landmarks+cls) + (1,n,4+1+2*landmarks_per_face+1=box+obj+landmarks+cls), default 5 */ int landmarks_per_face; diff --git a/fastdeploy/vision/faceid/contrib/insightface_rec.h b/fastdeploy/vision/faceid/contrib/insightface_rec.h index 12f882d7a..2e66d3d71 100644 --- a/fastdeploy/vision/faceid/contrib/insightface_rec.h +++ b/fastdeploy/vision/faceid/contrib/insightface_rec.h @@ -44,9 +44,13 @@ class FASTDEPLOY_DECL InsightFaceRecognitionModel : public FastDeployModel { Argument for image preprocessing step, tuple of (width, height), decide the target size after resize, default (112, 112) */ std::vector size; - /// Argument for image preprocessing step, alpha values for normalization + /*! @brief + Argument for image preprocessing step, alpha values for normalization, default alpha = {1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f}; + */ std::vector alpha; - /// Argument for image preprocessing step, beta values for normalization + /*! @brief + Argument for image preprocessing step, beta values for normalization, default beta = {-1.f, -1.f, -1.f} + */ std::vector beta; /*! @brief Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default true. diff --git a/fastdeploy/vision/matting/contrib/modnet.h b/fastdeploy/vision/matting/contrib/modnet.h index 75148b60a..09810a62e 100644 --- a/fastdeploy/vision/matting/contrib/modnet.h +++ b/fastdeploy/vision/matting/contrib/modnet.h @@ -44,11 +44,11 @@ class FASTDEPLOY_DECL MODNet : public FastDeployModel { */ std::vector size; /*! @brief - Argument for image preprocessing step, parameters for normalization, size should be the the same as channels + Argument for image preprocessing step, parameters for normalization, size should be the the same as channels, default alpha = {1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f} */ std::vector alpha; /*! @brief - Argument for image preprocessing step, parameters for normalization, size should be the the same as channels + Argument for image preprocessing step, parameters for normalization, size should be the the same as channels, default beta = {-1.f, -1.f, -1.f} */ std::vector beta; /*! @brief diff --git a/python/fastdeploy/vision/classification/contrib/resnet.py b/python/fastdeploy/vision/classification/contrib/resnet.py index 52f45933b..46383c159 100644 --- a/python/fastdeploy/vision/classification/contrib/resnet.py +++ b/python/fastdeploy/vision/classification/contrib/resnet.py @@ -56,21 +56,21 @@ class ResNet(FastDeployModel): @property def size(self): """ - Returns the preprocess image size + Returns the preprocess image size, default size = [224, 224]; """ return self._model.size @property def mean_vals(self): """ - Returns the mean value of normlization + Returns the mean value of normlization, default mean_vals = [0.485f, 0.456f, 0.406f]; """ return self._model.mean_vals @property def std_vals(self): """ - Returns the std value of normlization + Returns the std value of normlization, default std_vals = [0.229f, 0.224f, 0.225f]; """ return self._model.std_vals diff --git a/python/fastdeploy/vision/classification/contrib/yolov5cls.py b/python/fastdeploy/vision/classification/contrib/yolov5cls.py index 8a4744e56..5f401fa1d 100644 --- a/python/fastdeploy/vision/classification/contrib/yolov5cls.py +++ b/python/fastdeploy/vision/classification/contrib/yolov5cls.py @@ -52,7 +52,7 @@ class YOLOv5Cls(FastDeployModel): @property def size(self): """ - Returns the preprocess image size + Returns the preprocess image size, default is (224, 224) """ return self._model.size diff --git a/python/fastdeploy/vision/detection/contrib/nanodet_plus.py b/python/fastdeploy/vision/detection/contrib/nanodet_plus.py index b5a83fe2b..30dfd1257 100644 --- a/python/fastdeploy/vision/detection/contrib/nanodet_plus.py +++ b/python/fastdeploy/vision/detection/contrib/nanodet_plus.py @@ -56,7 +56,7 @@ class NanoDetPlus(FastDeployModel): @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default (320, 320) """ return self._model.size diff --git a/python/fastdeploy/vision/detection/contrib/scaled_yolov4.py b/python/fastdeploy/vision/detection/contrib/scaled_yolov4.py index 1e46ba1a1..f9466fe80 100644 --- a/python/fastdeploy/vision/detection/contrib/scaled_yolov4.py +++ b/python/fastdeploy/vision/detection/contrib/scaled_yolov4.py @@ -56,7 +56,8 @@ class ScaledYOLOv4(FastDeployModel): @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640] + """ return self._model.size diff --git a/python/fastdeploy/vision/detection/contrib/yolor.py b/python/fastdeploy/vision/detection/contrib/yolor.py index 6326630e1..1e2454419 100644 --- a/python/fastdeploy/vision/detection/contrib/yolor.py +++ b/python/fastdeploy/vision/detection/contrib/yolor.py @@ -56,7 +56,7 @@ class YOLOR(FastDeployModel): @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640] """ return self._model.size diff --git a/python/fastdeploy/vision/detection/contrib/yolov5.py b/python/fastdeploy/vision/detection/contrib/yolov5.py index a5068df5e..5ecef307b 100644 --- a/python/fastdeploy/vision/detection/contrib/yolov5.py +++ b/python/fastdeploy/vision/detection/contrib/yolov5.py @@ -81,7 +81,7 @@ class YOLOv5(FastDeployModel): @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640] """ return self._model.size @@ -117,6 +117,9 @@ class YOLOv5(FastDeployModel): @property def multi_label(self): + """ + Argument for image preprocessing step, for different strategies to get boxes when postprocessing, default True + """ return self._model.multi_label @size.setter diff --git a/python/fastdeploy/vision/detection/contrib/yolov5lite.py b/python/fastdeploy/vision/detection/contrib/yolov5lite.py index 606dc98c4..c04a348a6 100644 --- a/python/fastdeploy/vision/detection/contrib/yolov5lite.py +++ b/python/fastdeploy/vision/detection/contrib/yolov5lite.py @@ -56,7 +56,7 @@ class YOLOv5Lite(FastDeployModel): @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640] """ return self._model.size @@ -96,7 +96,8 @@ class YOLOv5Lite(FastDeployModel): whether the model_file was exported with decode module. The official YOLOv5Lite/export.py script will export ONNX file without decode module. Please set it 'true' manually if the model file was exported with decode module. - false : ONNX files without decode module. true : ONNX file with decode module. + False : ONNX files without decode module. True : ONNX file with decode module. + default False """ return self._model.is_decode_exported diff --git a/python/fastdeploy/vision/detection/contrib/yolov6.py b/python/fastdeploy/vision/detection/contrib/yolov6.py index 9f9533114..61a9c0728 100644 --- a/python/fastdeploy/vision/detection/contrib/yolov6.py +++ b/python/fastdeploy/vision/detection/contrib/yolov6.py @@ -56,7 +56,7 @@ class YOLOv6(FastDeployModel): @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640] """ return self._model.size diff --git a/python/fastdeploy/vision/detection/contrib/yolov7.py b/python/fastdeploy/vision/detection/contrib/yolov7.py index 53ef24a10..033450485 100644 --- a/python/fastdeploy/vision/detection/contrib/yolov7.py +++ b/python/fastdeploy/vision/detection/contrib/yolov7.py @@ -56,7 +56,7 @@ class YOLOv7(FastDeployModel): @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640] """ return self._model.size diff --git a/python/fastdeploy/vision/detection/contrib/yolov7end2end_ort.py b/python/fastdeploy/vision/detection/contrib/yolov7end2end_ort.py index e16ec6a90..47a07feff 100644 --- a/python/fastdeploy/vision/detection/contrib/yolov7end2end_ort.py +++ b/python/fastdeploy/vision/detection/contrib/yolov7end2end_ort.py @@ -54,7 +54,7 @@ class YOLOv7End2EndORT(FastDeployModel): @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640] """ return self._model.size diff --git a/python/fastdeploy/vision/detection/contrib/yolov7end2end_trt.py b/python/fastdeploy/vision/detection/contrib/yolov7end2end_trt.py index 4a2621b44..7059c9d1d 100644 --- a/python/fastdeploy/vision/detection/contrib/yolov7end2end_trt.py +++ b/python/fastdeploy/vision/detection/contrib/yolov7end2end_trt.py @@ -54,7 +54,7 @@ class YOLOv7End2EndTRT(FastDeployModel): @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640] """ return self._model.size diff --git a/python/fastdeploy/vision/detection/contrib/yolox.py b/python/fastdeploy/vision/detection/contrib/yolox.py index c121cd802..ae042b150 100644 --- a/python/fastdeploy/vision/detection/contrib/yolox.py +++ b/python/fastdeploy/vision/detection/contrib/yolox.py @@ -56,7 +56,7 @@ class YOLOX(FastDeployModel): @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640] """ return self._model.size @@ -71,6 +71,7 @@ class YOLOX(FastDeployModel): whether the model_file was exported with decode module. The official YOLOX/tools/export_onnx.py script will export ONNX file without decode module. Please set it 'true' manually if the model file was exported with decode module. + Defalut False. """ return self._model.is_decode_exported diff --git a/python/fastdeploy/vision/facedet/contrib/retinaface.py b/python/fastdeploy/vision/facedet/contrib/retinaface.py index 9afa5055c..895aeebf2 100644 --- a/python/fastdeploy/vision/facedet/contrib/retinaface.py +++ b/python/fastdeploy/vision/facedet/contrib/retinaface.py @@ -56,7 +56,7 @@ class RetinaFace(FastDeployModel): @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default (640, 640) """ return self._model.size @@ -77,7 +77,7 @@ class RetinaFace(FastDeployModel): @property def min_sizes(self): """ - Argument for image postprocessing step, min sizes, width and height for each anchor + Argument for image postprocessing step, min sizes, width and height for each anchor, default min_sizes = [[16, 32], [64, 128], [256, 512]] """ return self._model.min_sizes diff --git a/python/fastdeploy/vision/facedet/contrib/scrfd.py b/python/fastdeploy/vision/facedet/contrib/scrfd.py index fa8e1cfd7..96171088c 100644 --- a/python/fastdeploy/vision/facedet/contrib/scrfd.py +++ b/python/fastdeploy/vision/facedet/contrib/scrfd.py @@ -56,7 +56,7 @@ class SCRFD(FastDeployModel): @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default (640, 640) """ return self._model.size @@ -87,22 +87,40 @@ class SCRFD(FastDeployModel): @property def downsample_strides(self): + """ + Argument for image postprocessing step, + downsample strides (namely, steps) for SCRFD to generate anchors, + will take (8,16,32) as default values + """ return self._model.downsample_strides @property def landmarks_per_face(self): + """ + Argument for image postprocessing step, landmarks_per_face, default 5 in SCRFD + """ return self._model.landmarks_per_face @property def use_kps(self): + """ + Argument for image postprocessing step, + the outputs of onnx file with key points features or not, default true + """ return self._model.use_kps @property def max_nms(self): + """ + Argument for image postprocessing step, the upperbond number of boxes processed by nms, default 30000 + """ return self._model.max_nms @property def num_anchors(self): + """ + Argument for image postprocessing step, anchor number of each stride, default 2 + """ return self._model.num_anchors @size.setter diff --git a/python/fastdeploy/vision/facedet/contrib/ultraface.py b/python/fastdeploy/vision/facedet/contrib/ultraface.py index 8d84a6d86..d4a007c17 100644 --- a/python/fastdeploy/vision/facedet/contrib/ultraface.py +++ b/python/fastdeploy/vision/facedet/contrib/ultraface.py @@ -56,7 +56,7 @@ class UltraFace(FastDeployModel): @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default (320, 240) """ return self._model.size diff --git a/python/fastdeploy/vision/facedet/contrib/yolov5face.py b/python/fastdeploy/vision/facedet/contrib/yolov5face.py index be09e840a..50acb2012 100644 --- a/python/fastdeploy/vision/facedet/contrib/yolov5face.py +++ b/python/fastdeploy/vision/facedet/contrib/yolov5face.py @@ -56,7 +56,7 @@ class YOLOv5Face(FastDeployModel): @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640,640] """ return self._model.size diff --git a/python/fastdeploy/vision/faceid/contrib/adaface.py b/python/fastdeploy/vision/faceid/contrib/adaface.py index c0b6d9b1d..140cdb504 100644 --- a/python/fastdeploy/vision/faceid/contrib/adaface.py +++ b/python/fastdeploy/vision/faceid/contrib/adaface.py @@ -52,35 +52,36 @@ class AdaFace(FastDeployModel): @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default (112, 112) """ return self._model.size @property def alpha(self): """ - Argument for image preprocessing step, alpha value for normalization + Argument for image preprocessing step, alpha value for normalization, default alpha = [1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f] """ return self._model.alpha @property def beta(self): """ - Argument for image preprocessing step, beta value for normalization + Argument for image preprocessing step, beta values for normalization, default beta = {-1.f, -1.f, -1.f} + """ return self._model.beta @property def swap_rb(self): """ - Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default true. + Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default True. """ return self._model.swap_rb @property def l2_normalize(self): """ - Argument for image preprocessing step, whether to apply l2 normalize to embedding values, default; + Argument for image preprocessing step, whether to apply l2 normalize to embedding values, default False; """ return self._model.l2_normalize diff --git a/python/fastdeploy/vision/faceid/contrib/arcface.py b/python/fastdeploy/vision/faceid/contrib/arcface.py index be0a09d95..f4341b170 100644 --- a/python/fastdeploy/vision/faceid/contrib/arcface.py +++ b/python/fastdeploy/vision/faceid/contrib/arcface.py @@ -54,35 +54,35 @@ class ArcFace(FastDeployModel): @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default (112, 112) """ return self._model.size @property def alpha(self): """ - Argument for image preprocessing step, alpha value for normalization + Argument for image preprocessing step, alpha value for normalization, default alpha = [1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f] """ return self._model.alpha @property def beta(self): """ - Argument for image preprocessing step, beta value for normalization + Argument for image preprocessing step, beta values for normalization, default beta = {-1.f, -1.f, -1.f} """ return self._model.beta @property def swap_rb(self): """ - Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default true. + Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default True. """ return self._model.swap_rb @property def l2_normalize(self): """ - Argument for image preprocessing step, whether to apply l2 normalize to embedding values, default; + Argument for image preprocessing step, whether to apply l2 normalize to embedding values, default False; """ return self._model.l2_normalize diff --git a/python/fastdeploy/vision/faceid/contrib/cosface.py b/python/fastdeploy/vision/faceid/contrib/cosface.py index 982f3c481..61d1f2cb9 100644 --- a/python/fastdeploy/vision/faceid/contrib/cosface.py +++ b/python/fastdeploy/vision/faceid/contrib/cosface.py @@ -53,28 +53,28 @@ class CosFace(FastDeployModel): @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default (112, 112) """ return self._model.size @property def alpha(self): """ - Argument for image preprocessing step, alpha value for normalization + Argument for image preprocessing step, alpha value for normalization, default alpha = [1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f] """ return self._model.alpha @property def beta(self): """ - Argument for image preprocessing step, beta value for normalization + Argument for image preprocessing step, beta values for normalization, default beta = {-1.f, -1.f, -1.f} """ return self._model.beta @property def swap_rb(self): """ - Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default true. + Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default True. """ return self._model.swap_rb diff --git a/python/fastdeploy/vision/faceid/contrib/insightface_rec.py b/python/fastdeploy/vision/faceid/contrib/insightface_rec.py index 2793b88f4..ea4aed81a 100644 --- a/python/fastdeploy/vision/faceid/contrib/insightface_rec.py +++ b/python/fastdeploy/vision/faceid/contrib/insightface_rec.py @@ -53,28 +53,28 @@ class InsightFaceRecognitionModel(FastDeployModel): @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default (112, 112) """ return self._model.size @property def alpha(self): """ - Argument for image preprocessing step, alpha value for normalization + Argument for image preprocessing step, alpha value for normalization, default alpha = [1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f] """ return self._model.alpha @property def beta(self): """ - Argument for image preprocessing step, beta value for normalization + Argument for image preprocessing step, beta values for normalization, default beta = {-1.f, -1.f, -1.f} """ return self._model.beta @property def swap_rb(self): """ - Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default true. + Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default True. """ return self._model.swap_rb diff --git a/python/fastdeploy/vision/faceid/contrib/partial_fc.py b/python/fastdeploy/vision/faceid/contrib/partial_fc.py index de31b0a27..0798af56e 100644 --- a/python/fastdeploy/vision/faceid/contrib/partial_fc.py +++ b/python/fastdeploy/vision/faceid/contrib/partial_fc.py @@ -53,28 +53,28 @@ class PartialFC(FastDeployModel): @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default (112, 112) """ return self._model.size @property def alpha(self): """ - Argument for image preprocessing step, alpha value for normalization + Argument for image preprocessing step, alpha value for normalization, default alpha = [1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f] """ return self._model.alpha @property def beta(self): """ - Argument for image preprocessing step, beta value for normalization + Argument for image preprocessing step, beta values for normalization, default beta = {-1.f, -1.f, -1.f} """ return self._model.beta @property def swap_rb(self): """ - Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default true. + Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default True. """ return self._model.swap_rb diff --git a/python/fastdeploy/vision/faceid/contrib/vpl.py b/python/fastdeploy/vision/faceid/contrib/vpl.py index 3a8df5f16..5db5b4e67 100644 --- a/python/fastdeploy/vision/faceid/contrib/vpl.py +++ b/python/fastdeploy/vision/faceid/contrib/vpl.py @@ -53,28 +53,28 @@ class VPL(FastDeployModel): @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default (112, 112) """ return self._model.size @property def alpha(self): """ - Argument for image preprocessing step, alpha value for normalization + Argument for image preprocessing step, alpha value for normalization, default alpha = [1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f] """ return self._model.alpha @property def beta(self): """ - Argument for image preprocessing step, beta value for normalization + Argument for image preprocessing step, beta values for normalization, default beta = {-1.f, -1.f, -1.f} """ return self._model.beta @property def swap_rb(self): """ - Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default true. + Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default True. """ return self._model.swap_rb diff --git a/python/fastdeploy/vision/matting/contrib/modnet.py b/python/fastdeploy/vision/matting/contrib/modnet.py index 33fb5a025..da8f6c1d0 100644 --- a/python/fastdeploy/vision/matting/contrib/modnet.py +++ b/python/fastdeploy/vision/matting/contrib/modnet.py @@ -53,28 +53,28 @@ class MODNet(FastDeployModel): @property def size(self): """ - Argument for image preprocessing step, the preprocess image size, tuple of (width, height) + Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [256,256] """ return self._model.size @property def alpha(self): """ - Argument for image preprocessing step, alpha value for normalization + Argument for image preprocessing step, alpha value for normalization, default alpha = {1.f / 127.5f, 1.f / 127.5f, 1.f / 127.5f} """ return self._model.alpha @property def beta(self): """ - Argument for image preprocessing step, beta value for normalization + Argument for image preprocessing step, beta value for normalization, default beta = {-1.f, -1.f, -1.f} """ return self._model.beta @property def swap_rb(self): """ - Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default true. + Argument for image preprocessing step, whether to swap the B and R channel, such as BGR->RGB, default True. """ return self._model.swap_rb