Roadmap: face detector yn face recognizer sf (#1232)

objdetect: FaceDetectorYN + FaceRecognizerSF
This commit is contained in:
diegohce
2024-10-02 15:33:11 -03:00
committed by GitHub
parent 2a83b2f91e
commit 47b74f755d
8 changed files with 731 additions and 3 deletions

View File

@@ -44,6 +44,8 @@ jobs:
run: |
mkdir -p ${GITHUB_WORKSPACE}/testdata
curl -sL https://github.com/onnx/models/raw/main/validated/vision/classification/inception_and_googlenet/googlenet/model/googlenet-9.onnx > ${GITHUB_WORKSPACE}/testdata/googlenet-9.onnx
curl -sL https://github.com/opencv/opencv_zoo/raw/refs/heads/main/models/face_recognition_sface/face_recognition_sface_2021dec.onnx > ${GITHUB_WORKSPACE}/testdata/face_recognition_sface_2021dec.onnx
curl -sL https://github.com/opencv/opencv_zoo/raw/refs/heads/main/models/face_detection_yunet/face_detection_yunet_2023mar.onnx > ${GITHUB_WORKSPACE}/testdata/face_detection_yunet_2023mar.onnx
- name: Run main tests
run: xvfb-run -a --error-file /var/log/xvfb_error.log --server-args="-screen 0 1024x768x24 +extension RANDR" go test -v -coverprofile=/tmp/coverage.out -count=1 -tags matprofile .
env:

View File

@@ -53,6 +53,8 @@ jobs:
run: |
mkdir -p ${GITHUB_WORKSPACE}/testdata
curl -sL https://github.com/onnx/models/raw/main/validated/vision/classification/inception_and_googlenet/googlenet/model/googlenet-9.onnx > ${GITHUB_WORKSPACE}/testdata/googlenet-9.onnx
curl -sL https://github.com/opencv/opencv_zoo/raw/refs/heads/main/models/face_recognition_sface/face_recognition_sface_2021dec.onnx > ${GITHUB_WORKSPACE}/testdata/face_recognition_sface_2021dec.onnx
curl -sL https://github.com/opencv/opencv_zoo/raw/refs/heads/main/models/face_detection_yunet/face_detection_yunet_2023mar.onnx > ${GITHUB_WORKSPACE}/testdata/face_detection_yunet_2023mar.onnx
- name: Run main tests
run: go test -v -tags matprofile .
env:

View File

@@ -90,6 +90,8 @@ jobs:
- name: Install ONNX test model
run: |
curl -sL https://github.com/onnx/models/raw/main/validated/vision/classification/inception_and_googlenet/googlenet/model/googlenet-9.onnx > ./testdata/googlenet-9.onnx
curl -sL https://github.com/opencv/opencv_zoo/raw/refs/heads/main/models/face_recognition_sface/face_recognition_sface_2021dec.onnx > ./testdata/face_recognition_sface_2021dec.onnx
curl -sL https://github.com/opencv/opencv_zoo/raw/refs/heads/main/models/face_detection_yunet/face_detection_yunet_2023mar.onnx > ./testdata/face_detection_yunet_2023mar.onnx
- name: Install GOTURN test model
shell: bash
run: |

View File

@@ -164,9 +164,9 @@ Your pull requests will be greatly appreciated!
- [ ] [detectBoard](https://docs.opencv.org/4.x/d9/df5/classcv_1_1aruco_1_1CharucoDetector.html#aacbea601612a3a0feaa45ebb7fb255fd)
- [ ] [detectDiamonds](https://docs.opencv.org/4.x/d9/df5/classcv_1_1aruco_1_1CharucoDetector.html#a50342803f68deb1e6b0b79f61d4b1a73)
- [ ] Face Detection
- [ ] [FaceDetectorYN](https://docs.opencv.org/4.x/df/d20/classcv_1_1FaceDetectorYN.html)
- [ ] [FaceRecognizerSF](https://docs.opencv.org/4.x/da/d09/classcv_1_1FaceRecognizerSF.html)
- [X] Face Detection
- [X] [FaceDetectorYN](https://docs.opencv.org/4.x/df/d20/classcv_1_1FaceDetectorYN.html)
- [X] [FaceRecognizerSF](https://docs.opencv.org/4.x/da/d09/classcv_1_1FaceRecognizerSF.html)
- [X] **dnn. Deep Neural Network module**
- [ ] ml. Machine Learning

View File

@@ -175,4 +175,140 @@ bool QRCodeDetector_DetectAndDecodeMulti(QRCodeDetector qr, Mat input, CStrings*
decoded->length = decodedCodes.size();
decoded->strs = strs;
return res;
}
FaceDetectorYN FaceDetectorYN_Create(const char* model, const char* config, Size size) {
cv::String smodel = cv::String(model);
cv::String sconfig = cv::String(config);
cv::Size ssize = cv::Size(size.width, size.height);
return new cv::Ptr<cv::FaceDetectorYN>(cv::FaceDetectorYN::create(smodel, sconfig, ssize));
}
FaceDetectorYN FaceDetectorYN_Create_WithParams(const char* model, const char* config, Size size, float score_threshold, float nms_threshold, int top_k, int backend_id, int target_id) {
cv::String smodel = cv::String(model);
cv::String sconfig = cv::String(config);
cv::Size ssize = cv::Size(size.width, size.height);
return new cv::Ptr<cv::FaceDetectorYN>(cv::FaceDetectorYN::create(smodel, sconfig, ssize, score_threshold, nms_threshold, top_k, backend_id, target_id));
}
FaceDetectorYN FaceDetectorYN_Create_FromBytes(const char* framework, void* bufferModel, int model_size, void* bufferConfig, int config_size, Size size) {
cv::String sframework = cv::String(framework);
cv::Size ssize = cv::Size(size.width, size.height);
std::vector<uchar> bufferModelV;
std::vector<uchar> bufferConfigV;
uchar* bmv = (uchar*)bufferModel;
uchar* bcv = (uchar*)bufferConfig;
for(int i = 0; i < model_size; i ++) {
bufferModelV.push_back(bmv[i]);
}
for(int i = 0; i < config_size; i ++) {
bufferConfigV.push_back(bcv[i]);
}
return new cv::Ptr<cv::FaceDetectorYN>(cv::FaceDetectorYN::create(sframework, bufferModelV, bufferConfigV, ssize));
}
FaceDetectorYN FaceDetectorYN_Create_FromBytes_WithParams(const char* framework, void* bufferModel, int model_size, void* bufferConfig, int config_size, Size size, float score_threshold, float nms_threshold, int top_k, int backend_id, int target_id) {
cv::String sframework = cv::String(framework);
cv::Size ssize = cv::Size(size.width, size.height);
std::vector<uchar> bufferModelV;
std::vector<uchar> bufferConfigV;
uchar* bmv = (uchar*)bufferModel;
uchar* bcv = (uchar*)bufferConfig;
for(int i = 0; i < model_size; i ++) {
bufferModelV.push_back(bmv[i]);
}
for(int i = 0; i < config_size; i ++) {
bufferConfigV.push_back(bcv[i]);
}
return new cv::Ptr<cv::FaceDetectorYN>(cv::FaceDetectorYN::create(sframework, bufferModelV, bufferConfigV, ssize, score_threshold, nms_threshold, top_k, backend_id, target_id));
}
void FaceDetectorYN_Close(FaceDetectorYN fd) {
delete fd;
}
int FaceDetectorYN_Detect(FaceDetectorYN fd, Mat image, Mat faces) {
return (*fd)->detect(*image, *faces);
}
Size FaceDetectorYN_GetInputSize(FaceDetectorYN fd) {
Size sz;
cv::Size cvsz = (*fd)->getInputSize();
sz.width = cvsz.width;
sz.height = cvsz.height;
return sz;
}
float FaceDetectorYN_GetNMSThreshold(FaceDetectorYN fd) {
return (*fd)->getNMSThreshold();
}
float FaceDetectorYN_GetScoreThreshold(FaceDetectorYN fd) {
return (*fd)->getScoreThreshold();
}
int FaceDetectorYN_GetTopK(FaceDetectorYN fd) {
return (*fd)->getTopK();
}
void FaceDetectorYN_SetInputSize(FaceDetectorYN fd, Size input_size){
cv::Size isz(input_size.width, input_size.height);
(*fd)->setInputSize(isz);
}
void FaceDetectorYN_SetNMSThreshold(FaceDetectorYN fd, float nms_threshold){
(*fd)->setNMSThreshold(nms_threshold);
}
void FaceDetectorYN_SetScoreThreshold(FaceDetectorYN fd, float score_threshold){
(*fd)->setScoreThreshold(score_threshold);
}
void FaceDetectorYN_SetTopK(FaceDetectorYN fd, int top_k){
(*fd)->setTopK(top_k);
}
FaceRecognizerSF FaceRecognizerSF_Create(const char* model, const char* config) {
return FaceRecognizerSF_Create_WithParams(model, config, 0, 0);
}
FaceRecognizerSF FaceRecognizerSF_Create_WithParams(const char* model, const char* config, int backend_id, int target_id) {
cv::Ptr<cv::FaceRecognizerSF>* p = new cv::Ptr<cv::FaceRecognizerSF>(cv::FaceRecognizerSF::create(model, config, backend_id, target_id));
return p;
}
void FaceRecognizerSF_Close(FaceRecognizerSF fr) {
delete fr;
}
void FaceRecognizerSF_AlignCrop(FaceRecognizerSF fr, Mat src_img, Mat face_box, Mat aligned_img) {
(*fr)->alignCrop(*src_img, *face_box, *aligned_img);
}
void FaceRecognizerSF_Feature(FaceRecognizerSF fr, Mat aligned_img, Mat face_feature) {
(*fr)->feature(*aligned_img, *face_feature);
}
float FaceRecognizerSF_Match(FaceRecognizerSF fr, Mat face_feature1, Mat face_feature2) {
return FaceRecognizerSF_Match_WithParams(fr, face_feature1, face_feature2, 0);
}
float FaceRecognizerSF_Match_WithParams(FaceRecognizerSF fr, Mat face_feature1, Mat face_feature2, int dis_type) {
double rv = (*fr)->match(*face_feature1, *face_feature2, dis_type);
return (float)rv;
}

View File

@@ -269,3 +269,294 @@ func (a *QRCodeDetector) DetectAndDecodeMulti(input Mat, decoded []string, point
}
return bool(success)
}
type FaceDetectorYN struct {
p C.FaceDetectorYN
}
// NewFaceDetectorYN Creates an instance of face detector with given parameters.
//
// modelPath: the path to the requested model
//
// configPath: the path to the config file for compability, which is not requested for ONNX models
//
// size: the size of the input image
//
// For further details, please see:
// https://docs.opencv.org/4.x/df/d20/classcv_1_1FaceDetectorYN.html#a5f7fb43c60c95ca5ebab78483de02516
func NewFaceDetectorYN(modelPath string, configPath string, size image.Point) FaceDetectorYN {
c_model_path := C.CString(modelPath)
defer C.free(unsafe.Pointer(c_model_path))
c_config_path := C.CString(configPath)
defer C.free(unsafe.Pointer(c_config_path))
c_size := C.Size{
width: C.int(size.X),
height: C.int(size.Y),
}
return FaceDetectorYN{p: C.FaceDetectorYN_Create(c_model_path, c_config_path, c_size)}
}
// NewFaceDetectorYNWithParams Creates an instance of face detector with given parameters.
//
// For further details, please see:
// https://docs.opencv.org/4.x/df/d20/classcv_1_1FaceDetectorYN.html#a5f7fb43c60c95ca5ebab78483de02516
func NewFaceDetectorYNWithParams(modelPath string, configPath string, size image.Point, scoreThreshold float32, nmsThreshold float32, topK int, backendId int, targetId int) FaceDetectorYN {
c_model_path := C.CString(modelPath)
defer C.free(unsafe.Pointer(c_model_path))
c_config_path := C.CString(configPath)
defer C.free(unsafe.Pointer(c_config_path))
c_size := C.Size{
width: C.int(size.X),
height: C.int(size.Y),
}
return FaceDetectorYN{p: C.FaceDetectorYN_Create_WithParams(c_model_path, c_config_path, c_size, C.float(scoreThreshold), C.float(nmsThreshold), C.int(topK), C.int(backendId), C.int(targetId))}
}
// NewFaceDetectorYNFromBytes Creates an instance of face detector with given parameters.
//
// For further details, please see:
// https://docs.opencv.org/4.x/df/d20/classcv_1_1FaceDetectorYN.html#aa0796a4bfe2d4709bef81abbae9a927a
func NewFaceDetectorYNFromBytes(framework string, bufferModel []byte, bufferConfig []byte, size image.Point) FaceDetectorYN {
c_framework := C.CString(framework)
defer C.free(unsafe.Pointer(c_framework))
c_size := C.Size{
width: C.int(size.X),
height: C.int(size.Y),
}
return FaceDetectorYN{p: C.FaceDetectorYN_Create_FromBytes(c_framework,
unsafe.Pointer(unsafe.SliceData(bufferModel)), C.int(len(bufferModel)),
unsafe.Pointer(unsafe.SliceData(bufferConfig)), C.int(len(bufferConfig)), c_size)}
}
// NewFaceDetectorYNFromBuffers Creates an instance of face detector with given parameters.
//
// For further details, please see:
// https://docs.opencv.org/4.x/df/d20/classcv_1_1FaceDetectorYN.html#aa0796a4bfe2d4709bef81abbae9a927a
func NewFaceDetectorYNFromBytesWithParams(framework string, bufferModel []byte, bufferConfig []byte, size image.Point, scoreThreshold float32, nmsThreshold float32, topK int, backendId int, targetId int) FaceDetectorYN {
c_framework := C.CString(framework)
defer C.free(unsafe.Pointer(c_framework))
c_size := C.Size{
width: C.int(size.X),
height: C.int(size.Y),
}
return FaceDetectorYN{p: C.FaceDetectorYN_Create_FromBytes_WithParams(c_framework,
unsafe.Pointer(unsafe.SliceData(bufferModel)), C.int(len(bufferModel)),
unsafe.Pointer(unsafe.SliceData(bufferConfig)), C.int(len(bufferConfig)), c_size,
C.float(scoreThreshold), C.float(nmsThreshold), C.int(topK), C.int(backendId), C.int(targetId))}
}
func (fd *FaceDetectorYN) Close() {
C.FaceDetectorYN_Close(fd.p)
}
// Detect Detects faces in the input image.
//
// image: an image to detect
//
// faces: detection results stored in a 2D cv::Mat of shape [num_faces, 15]
//
// 0-1: x, y of bbox top left corner
//
// 2-3: width, height of bbox
//
// 4-5: x, y of right eye (blue point in the example image)
//
// 6-7: x, y of left eye (red point in the example image)
//
// 8-9: x, y of nose tip (green point in the example image)
//
// 10-11: x, y of right corner of mouth (pink point in the example image)
//
// 12-13: x, y of left corner of mouth (yellow point in the example image)
//
// 14: face score
//
// For further details, please see:
// https://docs.opencv.org/4.x/df/d20/classcv_1_1FaceDetectorYN.html#ac05bd075ca3e6edc0e328927aae6f45b
func (fd *FaceDetectorYN) Detect(image Mat, faces *Mat) int {
c_rv := C.FaceDetectorYN_Detect(fd.p, image.p, faces.p)
return int(c_rv)
}
func (fd *FaceDetectorYN) GetInputSize() image.Point {
sz := C.FaceDetectorYN_GetInputSize(fd.p)
return image.Pt(int(sz.width), int(sz.height))
}
func (fd *FaceDetectorYN) GetMNSThreshold() float32 {
t := C.FaceDetectorYN_GetNMSThreshold(fd.p)
return float32(t)
}
func (fd *FaceDetectorYN) GetScoreThreshold() float32 {
t := C.FaceDetectorYN_GetScoreThreshold(fd.p)
return float32(t)
}
func (fd *FaceDetectorYN) GetTopK() int {
i := C.FaceDetectorYN_GetTopK(fd.p)
return int(i)
}
// SetInputSize Set the size for the network input, which overwrites the input size of creating model.
// Call this method when the size of input image does not match the input size when creating model.
//
// For further details, please see:
// https://docs.opencv.org/4.x/df/d20/classcv_1_1FaceDetectorYN.html#a072418e5ce7beeb69c41edda75c41d2e
func (fd *FaceDetectorYN) SetInputSize(sz image.Point) {
c_sz := C.Size{
width: C.int(sz.X),
height: C.int(sz.Y),
}
C.FaceDetectorYN_SetInputSize(fd.p, c_sz)
}
// SetNMSThreshold Set the Non-maximum-suppression threshold to suppress
// bounding boxes that have IoU greater than the given value.
//
// For further details, please see:
// https://docs.opencv.org/4.x/df/d20/classcv_1_1FaceDetectorYN.html#ab6011efee7e12dca3857d82de5269ac5
func (fd *FaceDetectorYN) SetNMSThreshold(nmsThreshold float32) {
C.FaceDetectorYN_SetNMSThreshold(fd.p, C.float(nmsThreshold))
}
// SetScoreThreshold Set the score threshold to filter out bounding boxes of score less than the given value.
//
// For further details, please see:
// https://docs.opencv.org/4.x/df/d20/classcv_1_1FaceDetectorYN.html#a37f3c23b82158fac7fdad967d315f85a
func (fd *FaceDetectorYN) SetScoreThreshold(scoreThreshold float32) {
C.FaceDetectorYN_SetScoreThreshold(fd.p, C.float(scoreThreshold))
}
// SetTopK Set the number of bounding boxes preserved before NMS.
//
// For further details, please see:
// https://docs.opencv.org/4.x/df/d20/classcv_1_1FaceDetectorYN.html#aa88d20e1e2df75ea36b851534089856a
func (fd *FaceDetectorYN) SetTopK(topK int) {
C.FaceDetectorYN_SetTopK(fd.p, C.int(topK))
}
type FaceRecognizerSFDisType int
const (
FaceRecognizerSFDisTypeCosine FaceRecognizerSFDisType = 0
FaceRecognizerSFDisTypeNormL2 FaceRecognizerSFDisType = 1
)
type FaceRecognizerSF struct {
p C.FaceRecognizerSF
}
// NewFaceRecognizerSF Creates an instance with given parameters.
//
// model: the path of the onnx model used for face recognition
//
// config: the path to the config file for compability, which is not requested for ONNX models
//
// For further details, please see:
// https://docs.opencv.org/4.x/da/d09/classcv_1_1FaceRecognizerSF.html#a04df90b0cd7d26d350acd92621a35743
func NewFaceRecognizerSF(modelPath string, configPath string) FaceRecognizerSF {
c_model := C.CString(modelPath)
defer C.free(unsafe.Pointer(c_model))
c_config := C.CString(configPath)
defer C.free(unsafe.Pointer(c_config))
return FaceRecognizerSF{p: C.FaceRecognizerSF_Create(c_model, c_config)}
}
// NewFaceRecognizerSFWithParams Creates an instance with given parameters.
//
// model: the path of the onnx model used for face recognition
//
// config: the path to the config file for compability, which is not requested for ONNX models
//
// backend_id: the id of backend
//
// target_id: the id of target device
//
// For further details, please see:
// https://docs.opencv.org/4.x/da/d09/classcv_1_1FaceRecognizerSF.html#a04df90b0cd7d26d350acd92621a35743
func NewFaceRecognizerSFWithParams(modelPath string, configPath string, backendId int, targetId int) FaceRecognizerSF {
c_model := C.CString(modelPath)
defer C.free(unsafe.Pointer(c_model))
c_config := C.CString(configPath)
defer C.free(unsafe.Pointer(c_config))
return FaceRecognizerSF{p: C.FaceRecognizerSF_Create_WithParams(c_model, c_config, C.int(backendId), C.int(targetId))}
}
// Close Releases FaceRecognizerSF resources.
func (fr *FaceRecognizerSF) Close() {
C.FaceRecognizerSF_Close(fr.p)
}
// AlignCrop Aligns detected face with the source input image and crops it.
//
// srcImg: input image
//
// faceBox: the detected face result from the input image
//
// alignedImg: output aligned image
//
// For further details, please see:
// https://docs.opencv.org/4.x/da/d09/classcv_1_1FaceRecognizerSF.html#a84492908abecbc9362b4ddc8d46b8345
func (fr *FaceRecognizerSF) AlignCrop(srcImg Mat, faceBox Mat, alignedImg *Mat) {
C.FaceRecognizerSF_AlignCrop(fr.p, srcImg.p, faceBox.p, alignedImg.p)
}
// Feature Extracts face feature from aligned image.
//
// alignedImg: input aligned image
//
// faceFeature: output face feature
//
// For further details, please see:
// https://docs.opencv.org/4.x/da/d09/classcv_1_1FaceRecognizerSF.html#ab1b4a3c12213e89091a490c573dc5aba
func (fr *FaceRecognizerSF) Feature(alignedImg Mat, faceFeature *Mat) {
C.FaceRecognizerSF_Feature(fr.p, alignedImg.p, faceFeature.p)
}
// Match Calculates the distance between two face features.
//
// faceFeature1: the first input feature
//
// faceFeature2: the second input feature of the same size and the same type as face_feature1
//
// For further details, please see:
// https://docs.opencv.org/4.x/da/d09/classcv_1_1FaceRecognizerSF.html#a2f0362ca1e64320a1f3ba7e1386d0219
func (fr *FaceRecognizerSF) Match(faceFeature1 Mat, faceFeature2 Mat) float32 {
rv := C.FaceRecognizerSF_Match(fr.p, faceFeature1.p, faceFeature2.p)
return float32(rv)
}
// MatchWithParams Calculates the distance between two face features.
//
// faceFeature1: the first input feature
//
// faceFeature2: the second input feature of the same size and the same type as face_feature1
//
// disType: defines how to calculate the distance between two face features
//
// For further details, please see:
// https://docs.opencv.org/4.x/da/d09/classcv_1_1FaceRecognizerSF.html#a2f0362ca1e64320a1f3ba7e1386d0219
func (fr *FaceRecognizerSF) MatchWithParams(faceFeature1 Mat, faceFeature2 Mat, disType FaceRecognizerSFDisType) float32 {
rv := C.FaceRecognizerSF_Match_WithParams(fr.p, faceFeature1.p, faceFeature2.p, C.int(disType))
return float32(rv)
}

View File

@@ -14,10 +14,14 @@ extern "C" {
typedef cv::CascadeClassifier* CascadeClassifier;
typedef cv::HOGDescriptor* HOGDescriptor;
typedef cv::QRCodeDetector* QRCodeDetector;
typedef cv::Ptr<cv::FaceDetectorYN>* FaceDetectorYN;
typedef cv::Ptr<cv::FaceRecognizerSF>* FaceRecognizerSF;
#else
typedef void* CascadeClassifier;
typedef void* HOGDescriptor;
typedef void* QRCodeDetector;
typedef void* FaceDetectorYN;
typedef void* FaceRecognizerSF;
#endif
// CascadeClassifier
@@ -48,6 +52,31 @@ void QRCodeDetector_Close(QRCodeDetector qr);
bool QRCodeDetector_DetectMulti(QRCodeDetector qr, Mat input, Mat points);
bool QRCodeDetector_DetectAndDecodeMulti(QRCodeDetector qr, Mat input, CStrings* decoded ,Mat points, struct Mats* mats);
// FaceDetectorYN
FaceDetectorYN FaceDetectorYN_Create(const char* model, const char* config, Size size);
FaceDetectorYN FaceDetectorYN_Create_WithParams(const char* model, const char* config, Size size, float score_threshold, float mms_threshold, int top_k, int backend_id, int target_id);
FaceDetectorYN FaceDetectorYN_Create_FromBytes(const char* framework, void* bufferModel, int model_size, void* bufferConfig, int config_size, Size size);
FaceDetectorYN FaceDetectorYN_Create_FromBytes_WithParams(const char* framework, void* bufferModel, int model_size, void* bufferConfig, int config_size, Size size, float score_threshold, float mms_threshold, int top_k, int backend_id, int target_id);
void FaceDetectorYN_Close(FaceDetectorYN fd);
int FaceDetectorYN_Detect(FaceDetectorYN fd, Mat image, Mat faces);
Size FaceDetectorYN_GetInputSize(FaceDetectorYN fd);
float FaceDetectorYN_GetNMSThreshold(FaceDetectorYN fd);
float FaceDetectorYN_GetScoreThreshold(FaceDetectorYN fd);
int FaceDetectorYN_GetTopK(FaceDetectorYN fd);
void FaceDetectorYN_SetInputSize(FaceDetectorYN fd, Size input_size);
void FaceDetectorYN_SetNMSThreshold(FaceDetectorYN fd, float nms_threshold);
void FaceDetectorYN_SetScoreThreshold(FaceDetectorYN fd, float score_threshold);
void FaceDetectorYN_SetTopK(FaceDetectorYN fd, int top_k);
// FaceRecognizerSF
FaceRecognizerSF FaceRecognizerSF_Create(const char* model, const char* config);
FaceRecognizerSF FaceRecognizerSF_Create_WithParams(const char* model, const char* config, int backend_id, int target_id);
void FaceRecognizerSF_Close(FaceRecognizerSF fr);
void FaceRecognizerSF_AlignCrop(FaceRecognizerSF fr, Mat src_img, Mat face_box, Mat aligned_img);
void FaceRecognizerSF_Feature(FaceRecognizerSF fr, Mat aligned_img, Mat face_feature);
float FaceRecognizerSF_Match(FaceRecognizerSF fr, Mat face_feature1, Mat face_feature2);
float FaceRecognizerSF_Match_WithParams(FaceRecognizerSF fr, Mat face_feature1, Mat face_feature2, int dis_type);
#ifdef __cplusplus
}
#endif

View File

@@ -3,6 +3,7 @@ package gocv
import (
"image"
"image/color"
"os"
"testing"
)
@@ -214,3 +215,268 @@ func padQr(qr *Mat) Mat {
CopyMakeBorder(qrCodes0, &out, d, d, d, d, BorderConstant, color.RGBA{255, 255, 255, 255})
return out
}
func TestFaceDetectorYN(t *testing.T) {
img := IMRead("images/face.jpg", IMReadAnyColor)
defer img.Close()
s := image.Pt(img.Size()[1], img.Size()[0])
faces := NewMat()
defer faces.Close()
fd := NewFaceDetectorYN("testdata/face_detection_yunet_2023mar.onnx", "", s)
defer fd.Close()
sz := fd.GetInputSize()
if sz.X != 640 && sz.Y != 480 {
t.Error("error on FaceDetectorYN.GetInputSize()")
}
fd.SetInputSize(sz)
t1 := fd.GetMNSThreshold()
fd.SetNMSThreshold(t1)
t2 := fd.GetScoreThreshold()
fd.SetScoreThreshold(t2)
topK := fd.GetTopK()
fd.SetTopK(topK)
fd.Detect(img, &faces)
facesCount := faces.Rows()
if facesCount < 1 {
t.Error("no face detected")
}
}
func TestFaceDetectorYNWithParams(t *testing.T) {
img := IMRead("images/face.jpg", IMReadAnyColor)
defer img.Close()
s := image.Pt(img.Size()[1], img.Size()[0])
faces := NewMat()
defer faces.Close()
fd := NewFaceDetectorYNWithParams("testdata/face_detection_yunet_2023mar.onnx", "", s, 0.9, 0.3, 5000, 0, 0)
defer fd.Close()
sz := fd.GetInputSize()
if sz.X != 640 && sz.Y != 480 {
t.Error("error on FaceDetectorYN.GetInputSize()")
}
fd.SetInputSize(sz)
t1 := fd.GetMNSThreshold()
fd.SetNMSThreshold(t1)
t2 := fd.GetScoreThreshold()
fd.SetScoreThreshold(t2)
topK := fd.GetTopK()
fd.SetTopK(topK)
fd.Detect(img, &faces)
facesCount := faces.Rows()
if facesCount < 1 {
t.Error("no face detected")
}
}
func TestFaceDetectorYNFromBytes(t *testing.T) {
modelBuffer, err := os.ReadFile("testdata/face_detection_yunet_2023mar.onnx")
if err != nil {
t.Errorf("%s reading testdata/face_detection_yunet_2023mar.onnx", err.Error())
}
img := IMRead("images/face.jpg", IMReadAnyColor)
defer img.Close()
s := image.Pt(img.Size()[1], img.Size()[0])
faces := NewMat()
defer faces.Close()
fd := NewFaceDetectorYNFromBytes("onnx", modelBuffer, []byte(""), s)
defer fd.Close()
sz := fd.GetInputSize()
if sz.X != 640 && sz.Y != 480 {
t.Error("error on FaceDetectorYN.GetInputSize()")
}
fd.SetInputSize(sz)
t1 := fd.GetMNSThreshold()
fd.SetNMSThreshold(t1)
t2 := fd.GetScoreThreshold()
fd.SetScoreThreshold(t2)
topK := fd.GetTopK()
fd.SetTopK(topK)
fd.Detect(img, &faces)
facesCount := faces.Rows()
if facesCount < 1 {
t.Error("no face detected")
}
}
func TestFaceDetectorYNFromBytesWithParams(t *testing.T) {
modelBuffer, err := os.ReadFile("testdata/face_detection_yunet_2023mar.onnx")
if err != nil {
t.Errorf("%s reading testdata/face_detection_yunet_2023mar.onnx", err.Error())
}
img := IMRead("images/face.jpg", IMReadAnyColor)
defer img.Close()
s := image.Pt(img.Size()[1], img.Size()[0])
faces := NewMat()
defer faces.Close()
fd := NewFaceDetectorYNFromBytesWithParams("onnx", modelBuffer, []byte(""), s, 0.9, 0.3, 5000, 0, 0)
defer fd.Close()
sz := fd.GetInputSize()
if sz.X != 640 && sz.Y != 480 {
t.Error("error on FaceDetectorYN.GetInputSize()")
}
fd.SetInputSize(sz)
t1 := fd.GetMNSThreshold()
fd.SetNMSThreshold(t1)
t2 := fd.GetScoreThreshold()
fd.SetScoreThreshold(t2)
topK := fd.GetTopK()
fd.SetTopK(topK)
fd.Detect(img, &faces)
facesCount := faces.Rows()
if facesCount < 1 {
t.Error("no face detected")
}
}
func TestFaceRecognizerSF(t *testing.T) {
rons := IMRead("images/face.jpg", IMReadUnchanged)
defer rons.Close()
ronsImgSz := rons.Size()
s := image.Pt(ronsImgSz[1], ronsImgSz[0])
fd := NewFaceDetectorYN("testdata/face_detection_yunet_2023mar.onnx", "", s)
defer fd.Close()
ronsFaces := NewMat()
defer ronsFaces.Close()
detectRv := fd.Detect(rons, &ronsFaces)
t.Log("detect rv is", detectRv)
facesCount := ronsFaces.Rows()
if facesCount < 1 {
t.Error("no face detected")
}
ronsFaceX0 := ronsFaces.GetFloatAt(0, 0)
ronsFaceY0 := ronsFaces.GetFloatAt(0, 1)
ronsFaceX1 := ronsFaces.GetFloatAt(0, 0) + ronsFaces.GetFloatAt(0, 2)
ronsFaceY1 := ronsFaces.GetFloatAt(0, 1) + ronsFaces.GetFloatAt(0, 3)
ronsFace := rons.Region(image.Rect(int(ronsFaceX0), int(ronsFaceY0), int(ronsFaceX1), int(ronsFaceY1)))
defer ronsFace.Close()
fr := NewFaceRecognizerSF("testdata/face_recognition_sface_2021dec.onnx", "")
defer fr.Close()
ronsAligned := NewMat()
defer ronsAligned.Close()
fr.AlignCrop(rons, ronsFace, &ronsAligned)
if ronsAligned.Empty() {
t.Error("aligned is empty")
}
ronsFaceFeature := NewMat()
defer ronsFaceFeature.Close()
fr.Feature(ronsAligned, &ronsFaceFeature)
match := fr.Match(ronsFaceFeature, ronsFaceFeature)
t.Log("face feature match: ", match)
}
func TestFaceRecognizerSFWithParams(t *testing.T) {
rons := IMRead("images/face.jpg", IMReadUnchanged)
defer rons.Close()
ronsImgSz := rons.Size()
s := image.Pt(ronsImgSz[1], ronsImgSz[0])
fd := NewFaceDetectorYN("testdata/face_detection_yunet_2023mar.onnx", "", s)
defer fd.Close()
ronsFaces := NewMat()
defer ronsFaces.Close()
detectRv := fd.Detect(rons, &ronsFaces)
t.Log("detect rv is", detectRv)
facesCount := ronsFaces.Rows()
if facesCount < 1 {
t.Error("no face detected")
}
ronsFaceX0 := ronsFaces.GetFloatAt(0, 0)
ronsFaceY0 := ronsFaces.GetFloatAt(0, 1)
ronsFaceX1 := ronsFaces.GetFloatAt(0, 0) + ronsFaces.GetFloatAt(0, 2)
ronsFaceY1 := ronsFaces.GetFloatAt(0, 1) + ronsFaces.GetFloatAt(0, 3)
ronsFace := rons.Region(image.Rect(int(ronsFaceX0), int(ronsFaceY0), int(ronsFaceX1), int(ronsFaceY1)))
defer ronsFace.Close()
fr := NewFaceRecognizerSFWithParams("testdata/face_recognition_sface_2021dec.onnx", "", 0, 0)
defer fr.Close()
ronsAligned := NewMat()
defer ronsAligned.Close()
fr.AlignCrop(rons, ronsFace, &ronsAligned)
if ronsAligned.Empty() {
t.Error("aligned is empty")
}
ronsFaceFeature := NewMat()
defer ronsFaceFeature.Close()
fr.Feature(ronsAligned, &ronsFaceFeature)
match := fr.MatchWithParams(ronsFaceFeature, ronsFaceFeature, FaceRecognizerSFDisTypeCosine)
t.Log("face feature match: ", match)
}