# USAGEh
# python build_face_dataset.py --cascade haarcascade_frontalface_default.xml --output dataset/adrian
# Import
from imutils.video import VideoStream
import imutils
import time
import cv2
import os
# The formal parameter name is the English abbreviation of the name of the person to be entered.
def build_face_dataset(name):
# Loading the Haar face detector for OpenCV
detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
print("[INFO] starting video stream...")
# Initialize and enable the video stream VideoStreem (parameter src is set to 1, use the built-in camera)
vs = VideoStream(src=1).start()
# If you are using a Raspberry Pi, replace the code on line 17 with the code on line 19
# vs = VideoStream(usePiCamera=True).start()
# Preheat the camera sensor and pause for two seconds
time.sleep(2.0)
# Initialize the total counter to represent the number of saved face pictures
total = 0
# Start the loop (the loop ends after the 'q' key is pressed)
while True:
# Capture a frame: frame
frame = vs.read()
#Clone the captured screen, and it will be used when writing the screen to disk (see program line 51)
orig = frame.copy()
# Adjust the frame size so we can apply facial detection faster
frame = imutils.resize(frame, width=400)
# Detect faces in grayscale frames
# Use the detectMultiScale method to detect faces in each frame
# image: a grayscale image; scaleFactor: specifies the scale of the image size reduction in each dimension
# minNeighbor: Specify the number of candidate detection boxes to ensure that the detection is valid;
# minSize: Minimum face image size
rects = detector.detectMultiScale(
cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY), scaleFactor=1.1,
minNeighbors=5, minSize=(30, 30))
# The results of face detection are saved in the list rects (rectangular detection box).
# Loop for facial detection and draw the rectangle on the picture.
for (x, y, w, h) in rects:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
# Display the image of this frame on the screen
cv2.imshow("Frame", frame)
# Capture keyboard command [(1) 1 is a parameter, unit to milliseconds, indicating the interval time
# 0xFF is a hexadecimal constant, and only the original last 8 bits are left through operation to prevent bugs. 】
key = cv2.waitKey(1) & 0xFF
# Press k to keep the frame image and save it to the hard disk so that we can process it later and use it for face recognition
# Increase the total counter at the same time
if key == ord('k') or key == 'k':
p = os.path.sep.join(['dataset/' + name, "{}.png".format(
str(total).zfill(5))])
cv2.imwrite(p, orig)
total += 1
# Press q to exit the loop
elif key == ord('q') or key == 'q':
break
# Print the number of saved pictures
print("[INFO] {} face images stored".format(total))
# Clear the cache
print("[INFO] cleaning up...")
cv2.destroyAllWindows()
vs.stop()
if __name__ == '__main__':
build_face_dataset('zjw')