Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Made Seperate functions for the main logic of Face Recognition #32

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
183 changes: 95 additions & 88 deletions FaceRecognizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def rect_to_bb(rect):

def shape_to_np(shape, dtype="int"):
# initialize (x, y) coordinates to zero
coords = np.zeros((shape.num_parts, 2), dtype=dtype)
coords = np.zeros((shape.num_parts, 2), dtype=np.int64)

# loop through 68 facial landmarks and convert them
# to a 2-tuple of (x, y)- coordinates
Expand Down Expand Up @@ -63,106 +63,113 @@ def shape_to_np(shape, dtype="int"):

}

# initialize dlib's face detector and facial landmark predictor
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(args["shape_predictor"])
face = FaceAligner(predictor, desiredFaceWidth=256)

# Load input image, resize and convert it to grayscale
image = cv2.imread(args["image"])
image = imutils.resize(image, width=500)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
def ProcessingImage(detector,face, predictor):
image = cv2.imread(args["image"])
image = imutils.resize(image, width=500)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #convert the image from BGR(Blue, green and red) to a grayscale image.

# detect faces in the grayscale image
cv2.imshow("Input", image)
rects = detector(gray, 1)
# detect faces in the grayscale image
cv2.imshow("Input", image)
rects = detector(gray, 1)
ProcessingDetectedFaces(image,gray,rects)

def ProcessingDetectedFaces(image,gray,rects):
# loop over the faces that are detected
for (i, rect) in enumerate(rects):
# Detected face landmark (x, y)-coordinates are converted into
# Numpy array
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
for (i, rect) in enumerate(rects):
# Detected face landmark (x, y)-coordinates are converted into
# Numpy array
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)

# convert dlib's rectangle to OpenCV bounding box and draw
# [i.e., (x, y, w, h)]
(x, y, w, h) = face_utils.rect_to_bb(rect)
faceOrig = imutils.resize(image[y:y + h, x:x + w], width=256)
faceAligned = face.align(image, gray, rect)
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
# convert dlib's rectangle to OpenCV bounding box and draw
# [i.e., (x, y, w, h)]
(x, y, w, h) = face_utils.rect_to_bb(rect)
faceOrig = imutils.resize(image[y:y + h, x:x + w], width=256)
faceAligned = face.align(image, gray, rect)
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)


f = str(uuid.uuid4())
cv2.imwrite("foo/" + f + ".png", faceAligned)
f = str(uuid.uuid4())
cv2.imwrite("foo/" + f + ".png", faceAligned)

# shows the face number
cv2.putText(image, "Face #{}".format(i + 1), (x - 10, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
# shows the face number
cv2.putText(image, "Face #{}".format(i + 1), (x - 10, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

# loop over the (x, y) coordinate for facial landmark and drow on th image
for (x, y) in shape:
cv2.circle(image, (x, y), 1, (0, 0, 255), -1)
# loop over the (x, y) coordinate for facial landmark and drow on th image
for (x, y) in shape:
cv2.circle(image, (x, y), 1, (0, 0, 255), -1)

cv2.imshow("Original", faceOrig)
cv2.imshow("Aligned", faceAligned)
cv2.waitKey(0)
cv2.imshow("Original", faceOrig)
cv2.imshow("Aligned", faceAligned)
cv2.waitKey(0)

# show output with facial landmarks
cv2.imshow("Landmarks", image)
# show output with facial landmarks
cv2.imshow("Landmarks", image)

# load the known faces and embeddings
print("[INFO] loading encodings...")
data = pickle.loads(open(args["encodings"], "rb").read())
# load the known faces and embeddings
print("[INFO] loading encodings...")
data = pickle.loads(open(args["encodings"], "rb").read())

# load the input image and convert it from BGR to RGB
image = cv2.imread(args["image"])
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# load the input image and convert it from BGR to RGB
image = cv2.imread(args["image"])
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

# detect the (x, y) coordinates of the bounding box corresponding to
# each face inthe input image and compute facial embeddings for each face
print("[INFO] recognizing faces...")
boxes = face_recognition.face_locations(rgb, model = args["detection_method"])
encodings = face_recognition.face_encodings(rgb, boxes)
# detect the (x, y) coordinates of the bounding box corresponding to
# each face inthe input image and compute facial embeddings for each face
print("[INFO] recognizing faces...")
boxes = face_recognition.face_locations(rgb, model = args["detection_method"])
encodings = face_recognition.face_encodings(rgb, boxes)
embeddings(data,encodings,image,boxes)

# initialize the list of names of detected faces
names = []

# loop over facial embeddings
for encoding in encodings:
# compares each face in the input image to our known encodings
matches = face_recognition.compare_faces(data["encodings"], encoding)
name = "Unknown"

# check if match is found or not
if True in matches:
#find the indexes of all matches and initialize a dictionary
# to count number of times a match occur
matchedIdxs = [i for (i, b) in enumerate(matches) if b]
counts = {}

# loop over matched indexes and maintain a count for each face
for i in matchedIdxs:
name = data["names"][i]
counts[name] = counts.get(name, 0) + 1

# Select the recognized face with maximum number of matches and
# if there is a tie Python selects first entry from the dictionary
name = max(counts, key=counts.get)

# update the list of names
names.append(name)

# loop over the recognized faces
for ((top, right, bottom, left), name) in zip(boxes, names):
# draw predicted face name on image
cv2.rectangle(image, (left, top), (right, bottom), (0, 255, 0), 2)
y = top - 15 if top - 15 > 15 else top + 15
cv2.putText(image, name, (left, y), cv2.FONT_HERSHEY_SIMPLEX,
0.75, (0, 255, 0), 2)

# Output Image

cv2.imshow("Detected face", image)
cv2.waitKey(0)

rotateImage.rotateFunction(image)
def embeddings(data,encodings,image,boxes):
# loop over facial embeddings
names = []
for encoding in encodings:
# compares each face in the input image to our known encodings
matches = face_recognition.compare_faces(data["encodings"], encoding)
name = "Unknown"

# check if match is found or not
if True in matches:
#find the indexes of all matches and initialize a dictionary
# to count number of times a match occur
matchedIdxs = [i for (i, b) in enumerate(matches) if b]
counts = {}

# loop over matched indexes and maintain a count for each face
for i in matchedIdxs:
name = data["names"][i]
counts[name] = counts.get(name, 0) + 1

# Select the recognized face with maximum number of matches and
# if there is a tie Python selects first entry from the dictionary
name = max(counts, key=counts.get)

# update the list of names
names.append(name)
Final_Process(image,boxes,names)

def Final_Process(image,boxes,names):
# loop over the recognized faces
for ((top, right, bottom, left), name) in zip(boxes, names):
# draw predicted face name on image
cv2.rectangle(image, (left, top), (right, bottom), (0, 255, 0), 2)
y = top - 15 if top - 15 > 15 else top + 15
cv2.putText(image, name, (left, y), cv2.FONT_HERSHEY_SIMPLEX,
0.75, (0, 255, 0), 2)

# Output Image

cv2.imshow("Detected face", image)
cv2.waitKey(0)
rotateImage.rotateFunction(image)

if __name__ == "__main__":
# initialize dlib's face detector and facial landmark predictor
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(args["shape_predictor"])
face = FaceAligner(predictor, desiredFaceWidth=256)
ProcessingImage(detector, face, predictor)