Face Detection and Recognition
DNN-based face detection and recognition using the FaceDetectorYN and FaceRecognizerSF classes.
FaceDetectorYN
DNN-based face detector class.
Model download link: Face Detection YuNet
Constructor
Use the static create() method to create an instance.
create (from file)
static Ptr<FaceDetectorYN> create(
const String& model,
const String& config,
const Size& input_size,
float score_threshold = 0.9f,
float nms_threshold = 0.3f,
int top_k = 5000,
int backend_id = 0,
int target_id = 0
)
Path to the requested model file
Path to the config file for compatibility (not requested for ONNX models)
Threshold to filter out bounding boxes of score less than the given value
Threshold to suppress bounding boxes that have IoU greater than the given value
Number of bounding boxes to preserve from top rank based on score before NMS
ID of the backend (DNN backend)
Returns: Pointer to FaceDetectorYN instance
create (from buffer)
static Ptr<FaceDetectorYN> create(
const String& framework,
const std::vector<uchar>& bufferModel,
const std::vector<uchar>& bufferConfig,
const Size& input_size,
float score_threshold = 0.9f,
float nms_threshold = 0.3f,
int top_k = 5000,
int backend_id = 0,
int target_id = 0
)
Buffer with content of binary file with model weights
Buffer with content of text file containing network configuration
Methods
Sets the size for the network input.
void setInputSize(const Size& input_size)
Size of the input image. This overwrites the input size used when creating the model.
Call this method when the size of the input image does not match the input size when creating the model.
Gets the current input size.
Returns: Current input size
setScoreThreshold
Sets the score threshold to filter out bounding boxes.
void setScoreThreshold(float score_threshold)
Threshold for filtering out bounding boxes
getScoreThreshold
Gets the current score threshold.
float getScoreThreshold()
Returns: Current score threshold
setNMSThreshold
Sets the Non-maximum-suppression threshold.
void setNMSThreshold(float nms_threshold)
Threshold for NMS operation to suppress bounding boxes that have IoU greater than the given value
getNMSThreshold
Gets the current NMS threshold.
Returns: Current NMS threshold
setTopK
Sets the number of bounding boxes preserved before NMS.
Number of bounding boxes to preserve from top rank based on score
getTopK
Gets the current top K value.
Returns: Current top K value
detect
Detects faces in the input image.
int detect(InputArray image, OutputArray faces)
Input image to detect faces in
Detection results stored in a 2D cv::Mat of shape [num_faces, 15]:
- 0-1: x, y of bbox top left corner
- 2-3: width, height of bbox
- 4-5: x, y of right eye
- 6-7: x, y of left eye
- 8-9: x, y of nose tip
- 10-11: x, y of right corner of mouth
- 12-13: x, y of left corner of mouth
- 14: face score
Returns: Number of faces detected
Example Usage
#include <opencv2/objdetect/face.hpp>
#include <opencv2/imgproc.hpp>
// Create face detector
auto detector = cv::FaceDetectorYN::create(
"face_detection_yunet_2023mar.onnx",
"",
cv::Size(320, 320),
0.9f,
0.3f,
5000
);
// Set input size to match image
detector->setInputSize(image.size());
// Detect faces
cv::Mat faces;
detector->detect(image, faces);
// Draw bounding boxes and landmarks
for (int i = 0; i < faces.rows; i++) {
// Get bounding box
int x = faces.at<float>(i, 0);
int y = faces.at<float>(i, 1);
int w = faces.at<float>(i, 2);
int h = faces.at<float>(i, 3);
cv::rectangle(image, cv::Rect(x, y, w, h), cv::Scalar(0, 255, 0), 2);
// Draw facial landmarks
for (int j = 4; j < 14; j += 2) {
cv::circle(image,
cv::Point(faces.at<float>(i, j), faces.at<float>(i, j+1)),
2, cv::Scalar(255, 0, 0), -1);
}
}
import cv2
# Create face detector
detector = cv2.FaceDetectorYN.create(
'face_detection_yunet_2023mar.onnx',
'',
(320, 320),
0.9,
0.3,
5000
)
# Set input size to match image
detector.setInputSize(image.shape[1::-1])
# Detect faces
_, faces = detector.detect(image)
if faces is not None:
for face in faces:
# Get bounding box
x, y, w, h = face[:4].astype(int)
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
# Draw facial landmarks
for i in range(4, 14, 2):
cv2.circle(image, (int(face[i]), int(face[i+1])),
2, (255, 0, 0), -1)
FaceRecognizerSF
DNN-based face recognizer class.
Model download link: Face Recognition SFace
Constructor
Use the static create() method to create an instance.
create (from file)
static Ptr<FaceRecognizerSF> create(
const String& model,
const String& config,
int backend_id = 0,
int target_id = 0
)
Path to the ONNX model used for face recognition
Path to the config file for compatibility (not requested for ONNX models)
Returns: Pointer to FaceRecognizerSF instance
create (from buffer)
static Ptr<FaceRecognizerSF> create(
const String& framework,
const std::vector<uchar>& bufferModel,
const std::vector<uchar>& bufferConfig,
int backend_id = 0,
int target_id = 0
)
Name of the framework (ONNX, etc.)
Buffer containing the binary model weights
Buffer containing the network configuration
Enums
DisType
Distance types for calculating distance between face features.
enum DisType {
FR_COSINE = 0, // Cosine distance
FR_NORM_L2 = 1 // L2 norm distance
}
Methods
alignCrop
Aligns detected face with the source input image and crops it.
void alignCrop(
InputArray src_img,
InputArray face_box,
OutputArray aligned_img
) const
Detected face result from the input image (from FaceDetectorYN)
Output aligned and cropped face image
feature
Extracts face feature from aligned image.
void feature(
InputArray aligned_img,
OutputArray face_feature
)
Output face feature vector
match
Calculates the distance between two face features.
double match(
InputArray face_feature1,
InputArray face_feature2,
int dis_type = FaceRecognizerSF::FR_COSINE
) const
First input feature vector
Second input feature vector of the same size and type as face_feature1
Distance calculation method: FR_COSINE (cosine distance) or FR_NORM_L2 (L2 norm distance)
Returns: Distance between the two face features. Lower values indicate more similar faces.
Example Usage
#include <opencv2/objdetect/face.hpp>
// Create face recognizer
auto recognizer = cv::FaceRecognizerSF::create(
"face_recognition_sface_2021dec.onnx",
""
);
// Assume we have detected faces using FaceDetectorYN
cv::Mat face1_aligned, face2_aligned;
// Align and crop faces
recognizer->alignCrop(image1, face1_box, face1_aligned);
recognizer->alignCrop(image2, face2_box, face2_aligned);
// Extract features
cv::Mat feature1, feature2;
recognizer->feature(face1_aligned, feature1);
recognizer->feature(face2_aligned, feature2);
// Calculate similarity
double cosine_score = recognizer->match(
feature1, feature2,
cv::FaceRecognizerSF::FR_COSINE
);
// Threshold for face matching (typical: 0.363 for cosine)
bool is_same_person = cosine_score >= 0.363;
std::cout << "Cosine similarity: " << cosine_score << std::endl;
std::cout << "Same person: " << (is_same_person ? "Yes" : "No") << std::endl;
import cv2
# Create face recognizer
recognizer = cv2.FaceRecognizerSF.create(
'face_recognition_sface_2021dec.onnx',
''
)
# Assume we have detected faces using FaceDetectorYN
# Align and crop faces
face1_aligned = recognizer.alignCrop(image1, face1_box)
face2_aligned = recognizer.alignCrop(image2, face2_box)
# Extract features
feature1 = recognizer.feature(face1_aligned)
feature2 = recognizer.feature(face2_aligned)
# Calculate similarity
cosine_score = recognizer.match(
feature1, feature2,
cv2.FaceRecognizerSF_FR_COSINE
)
# Threshold for face matching (typical: 0.363 for cosine)
is_same_person = cosine_score >= 0.363
print(f"Cosine similarity: {cosine_score}")
print(f"Same person: {'Yes' if is_same_person else 'No'}")
Complete Workflow Example
#include <opencv2/objdetect/face.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
int main() {
// Load images
cv::Mat img1 = cv::imread("person1.jpg");
cv::Mat img2 = cv::imread("person2.jpg");
// Create detector and recognizer
auto detector = cv::FaceDetectorYN::create(
"face_detection_yunet_2023mar.onnx", "",
cv::Size(320, 320)
);
auto recognizer = cv::FaceRecognizerSF::create(
"face_recognition_sface_2021dec.onnx", ""
);
// Detect faces in both images
detector->setInputSize(img1.size());
cv::Mat faces1;
detector->detect(img1, faces1);
detector->setInputSize(img2.size());
cv::Mat faces2;
detector->detect(img2, faces2);
if (faces1.rows > 0 && faces2.rows > 0) {
// Get first face from each image
cv::Mat face1_box = faces1.row(0);
cv::Mat face2_box = faces2.row(0);
// Align and extract features
cv::Mat aligned1, aligned2;
recognizer->alignCrop(img1, face1_box, aligned1);
recognizer->alignCrop(img2, face2_box, aligned2);
cv::Mat feature1, feature2;
recognizer->feature(aligned1, feature1);
recognizer->feature(aligned2, feature2);
// Compare faces
double score = recognizer->match(feature1, feature2);
std::cout << "Match score: " << score << std::endl;
}
return 0;
}
import cv2
# Load images
img1 = cv2.imread('person1.jpg')
img2 = cv2.imread('person2.jpg')
# Create detector and recognizer
detector = cv2.FaceDetectorYN.create(
'face_detection_yunet_2023mar.onnx',
'',
(320, 320)
)
recognizer = cv2.FaceRecognizerSF.create(
'face_recognition_sface_2021dec.onnx',
''
)
# Detect faces in both images
detector.setInputSize((img1.shape[1], img1.shape[0]))
_, faces1 = detector.detect(img1)
detector.setInputSize((img2.shape[1], img2.shape[0]))
_, faces2 = detector.detect(img2)
if faces1 is not None and faces2 is not None:
# Get first face from each image
face1_box = faces1[0]
face2_box = faces2[0]
# Align and extract features
aligned1 = recognizer.alignCrop(img1, face1_box)
aligned2 = recognizer.alignCrop(img2, face2_box)
feature1 = recognizer.feature(aligned1)
feature2 = recognizer.feature(aligned2)
# Compare faces
score = recognizer.match(feature1, feature2)
print(f"Match score: {score}")
See Also