I want to create a face recognition with facenet but most website that I have referred they used tensorflow version 1 instead version 2. I have changed the program a little bit so that it can run in Tf v2 but the image result do not recognize any face. Do you guys have any idea what is wrong with my coding?
import cv2
import numpy as np
import mtcnn
from architecture import *
from train_v2 import normalize,l2_normalizer
from scipy.spatial.distance import cosine
from tensorflow.keras.models import load_model
import pickle
def get_face(img, box):
x1, y1, width, height = box
x1, y1 = abs(x1), abs(y1)
x2, y2 = x1 + width, y1 + height
face = img[y1:y2, x1:x2]
return face, (x1, y1), (x2, y2)
def get_encode(face_encoder, face, size):
face = normalize(face)
face = cv2.resize(face, size)
encode = face_encoder.predict(np.expand_dims(face, axis=0))[0]
return encode
def load_pickle(path):
with open(path, 'rb') as f:
encoding_dict = pickle.load(f)
return encoding_dict
#required_shape = (160,160)
face_encoder = InceptionResNetV2()
path_m = "facenet_keras_weights.h5"
face_encoder.load_weights(path_m)
people_dir = 'Faces'
encodings_path = 'encodings/encodings.pkl'
test_img_path = 'friends.jpg'
test_res_path = 'result/friends.jpg'
recognition_t = 0.3
required_size = (160, 160)
face_detector = mtcnn.MTCNN()
encoding_dict = load_pickle(encodings_path)
img = cv2.imread(test_img_path)
# plt_show(img)
def detect(img ,detector,encoder,encoding_dict):
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
results = detector.detect_faces(img_rgb)
for res in results:
face, pt_1, pt_2 = get_face(img_rgb, res['box'])
encode = get_encode(encoder, face, required_size)
encode = l2_normalizer.transform(np.expand_dims(encode, axis=0))[0]
name = 'unknown'
distance = float("inf")
for db_name, db_encode in encoding_dict.items():
dist = cosine(db_encode, encode)
if dist < recognition_t and dist < distance:
name = db_name
distance = dist
if name == 'unknown':
cv2.rectangle(img, pt_1, pt_2, (0, 0, 255), 2)
cv2.putText(img, name, pt_1, cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 255), 2)
else:
cv2.rectangle(img, pt_1, pt_2, (0, 255, 0), 2)
cv2.putText(img, name + f'__{distance:.2f}', pt_1, cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 0), 2)
cv2.imwrite(test_res_path, img)
cv2.imshow('Image', img)
cv2.waitKey(0)
Why don't you run it wihin deepface framework for python?
This is going to verify two images are same person or different persons.
This is going to look for the identity of img1.jpg in your database folder and returns candidates in pandas dataframe format.
Deepface builds Facenet model, downloads it pre-trained weights, applies pre-processing stages of a face recognition pipeline (detection and alignment) in the background. You just need to call its verify or find function.