import os
import gradio as gr
import requests
import json
import cv2
import numpy as np
import time
from PIL import Image
from fr.engine.header import *
from fl.engine.header import *
import fr.engine.header as fr_header
import fl.engine.header as fl_header
css = """
.example-image img{
display: flex; /* Use flexbox to align items */
justify-content: center; /* Center the image horizontally */
align-items: center; /* Center the image vertically */
height: 300px; /* Set the height of the container */
object-fit: contain; /* Preserve aspect ratio while fitting the image within the container */
}
.example-image{
display: flex; /* Use flexbox to align items */
justify-content: center; /* Center the image horizontally */
align-items: center; /* Center the image vertically */
height: 350px; /* Set the height of the container */
object-fit: contain; /* Preserve aspect ratio while fitting the image within the container */
}
.face-row {
display: flex;
justify-content: space-around; /* Distribute space evenly between elements */
align-items: center; /* Align items vertically */
width: 100%; /* Set the width of the row to 100% */
}
.face-image{
justify-content: center; /* Center the image horizontally */
align-items: center; /* Center the image vertically */
height: 160px; /* Set the height of the container */
width: 160px;
object-fit: contain; /* Preserve aspect ratio while fitting the image within the container */
}
.face-image img{
justify-content: center; /* Center the image horizontally */
align-items: center; /* Center the image vertically */
height: 160px; /* Set the height of the container */
object-fit: contain; /* Preserve aspect ratio while fitting the image within the container */
}
.markdown-success-container {
background-color: #F6FFED;
padding: 20px;
margin: 20px;
border-radius: 1px;
border: 2px solid green;
text-align: center;
}
.markdown-fail-container {
background-color: #FFF1F0;
padding: 20px;
margin: 20px;
border-radius: 1px;
border: 2px solid red;
text-align: center;
}
.markdown-attribute-container {
display: flex;
justify-content: space-around; /* Distribute space evenly between elements */
align-items: center; /* Align items vertically */
padding: 10px;
margin: 10px;
}
.block-background {
# background-color: #202020; /* Set your desired background color */
border-radius: 5px;
}
"""
file_path = os.path.abspath(__file__)
root_path = os.path.dirname(file_path)
g_fr_activation_result = -1
g_fl_activation_result = -1
MATCH_THRESHOLD = 0.67
SPOOF_THRESHOLD = 0.5
def activate_fr_sdk():
fr_key = os.environ.get("FR_LICENSE_KEY")
fr_dict_path = os.path.join(root_path, "fr/engine/bin")
ret = -1
if fr_key is None:
print_warning("Recognition online license key not found!")
else:
ret = fr_header.init_sdk(fr_dict_path.encode('utf-8'), fr_key.encode('utf-8'))
if ret == 0:
print_log("Successfully init FR SDK!")
else:
print_error(f"Falied to init FR SDK, Error code {ret}")
return ret
def activate_fl_sdk():
fl_key = os.environ.get("FL_LICENSE_KEY")
fl_dict_path = os.path.join(root_path, "fl/engine/bin")
ret = -1
if fl_key is None:
print_warning("Liveness Detection online license key not found!")
else:
ret = fl_header.init_sdk(fl_dict_path.encode('utf-8'), fl_key.encode('utf-8'))
if ret == 0:
print_log("Successfully init FL SDK!")
else:
print_error(f"Falied to init FL SDK, Error code {ret}")
return ret
def convert_fun(input_str):
# Remove line breaks and extra whitespaces
return ' '.join(input_str.split())
# def get_attributes(frame):
# url = "https://recognito.p.rapidapi.com/api/analyze_face"
# try:
# files = {'image': open(frame, 'rb')}
# headers = {"X-RapidAPI-Key": os.environ.get("API_KEY")}
# r = requests.post(url=url, files=files, headers=headers)
# except:
# raise gr.Error("Please select images file!")
# faces = None
# face_crop, one_line_attribute = None, ""
# try:
# image = Image.open(frame)
# face = Image.new('RGBA',(150, 150), (80,80,80,0))
# res = r.json().get('image')
# if res is not None and res:
# face = res.get('detection')
# x1 = face.get('x')
# y1 = face.get('y')
# x2 = x1 + face.get('w')
# y2 = y1 + face.get('h')
# if x1 < 0:
# x1 = 0
# if y1 < 0:
# y1 = 0
# if x2 >= image.width:
# x2 = image.width - 1
# if y2 >= image.height:
# y2 = image.height - 1
# face_crop = image.crop((x1, y1, x2, y2))
# face_image_ratio = face_crop.width / float(face_crop.height)
# resized_w = int(face_image_ratio * 150)
# resized_h = 150
# face_crop = face_crop.resize((int(resized_w), int(resized_h)))
# attr = res.get('attribute')
# age = attr.get('age')
# gender = attr.get('gender')
# emotion = attr.get('emotion')
# ethnicity = attr.get('ethnicity')
# mask = attr.get('face_mask')
# glass = 'No Glasses'
# if attr.get('glasses') == 'USUAL':
# glass = 'Glasses'
# if attr.get('glasses') == 'DARK':
# glass = 'Sunglasses'
# open_eye_thr = 0.3
# left_eye = 'Close'
# if attr.get('eye_left') >= open_eye_thr:
# left_eye = 'Open'
# right_eye = 'Close'
# if attr.get('eye_right') >= open_eye_thr:
# right_eye = 'Open'
# facehair = attr.get('facial_hair')
# haircolor = attr.get('hair_color')
# hairtype = attr.get('hair_type')
# headwear = attr.get('headwear')
# pitch = attr.get('pitch')
# roll = attr.get('roll')
# yaw = attr.get('yaw')
# quality = attr.get('quality')
# attribute = f"""
#
#
#
#
# Attribute |
# Result |
# Score |
# Threshold |
#
#
# Gender |
# {gender} |
# | |
#
#
# Age |
# {int(age)} |
# | |
#
#
# Pitch |
# {"{:.4f}".format(pitch)} |
# | |
#
#
# Yaw |
# {"{:.4f}".format(yaw)} |
# | |
#
#
# Roll |
# {"{:.4f}".format(roll)} |
# | |
#
#
# Emotion |
# {emotion} |
# | |
#
#
# Left Eye |
# {left_eye} |
# {"{:.4f}".format(attr.get('eye_left'))} |
# {open_eye_thr} |
#
#
# Right Eye |
# {right_eye} |
# {"{:.4f}".format(attr.get('eye_right'))} |
# {open_eye_thr} |
#
#
# Mask |
# {mask} |
# | |
#
#
# Glass |
# {glass} |
# | |
#
#
# FaceHair |
# {facehair} |
# | |
#
#
# HairColor |
# {haircolor} |
# | |
#
#
# HairType |
# {hairtype} |
# | |
#
#
# HeadWear |
# {headwear} |
# | |
#
#
# Image Quality |
# {"{:.4f}".format(quality)} |
# | |
#
#
#
# """
# one_line_attribute = convert_fun(attribute)
# except:
# pass
# return face_crop, one_line_attribute
def check_liveness(frame):
# url = "https://recognito-faceliveness.p.rapidapi.com/api/check_liveness"
# try:
# files = {'image': open(frame, 'rb')}
# headers = {"X-RapidAPI-Key": os.environ.get("API_KEY")}
# r = requests.post(url=url, files=files, headers=headers)
# except:
# raise gr.Error("Please select images file!")
# faces = None
# face_crop, liveness_result, liveness_score = None, "", -200
# try:
# image = Image.open(frame)
# face = Image.new('RGBA',(150, 150), (80,80,80,0))
# res = r.json().get('data')
# if res is not None and res:
# face = res.get('face_rect')
# x1 = face.get('x')
# y1 = face.get('y')
# x2 = x1 + face.get('w')
# y2 = y1 + face.get('h')
# if x1 < 0:
# x1 = 0
# if y1 < 0:
# y1 = 0
# if x2 >= image.width:
# x2 = image.width - 1
# if y2 >= image.height:
# y2 = image.height - 1
# face_crop = image.crop((x1, y1, x2, y2))
# face_image_ratio = face_crop.width / float(face_crop.height)
# resized_w = int(face_image_ratio * 150)
# resized_h = 150
# face_crop = face_crop.resize((int(resized_w), int(resized_h)))
# liveness_score = res.get('liveness_score')
# liveness = res.get('result')
# if liveness == 'REAL':
# liveness_result = f"""
Liveness Check: REAL
Score: {liveness_score}
"""
# else:
# liveness_result = f"""
Liveness Check: {liveness}
Score: {liveness_score}
"""
# except:
# pass
# return face_crop, liveness_result, liveness_score
global g_fl_activation_result
if g_fl_activation_result != 0:
gr.Warning("FL SDK Activation Failed!")
return None, None, None
try:
image = open(frame, 'rb')
except:
raise gr.Error("Please select image file!")
image_mat = cv2.imdecode(np.frombuffer(image.read(), np.uint8), cv2.IMREAD_COLOR)
start_time = time.time()
result, face_rect, score, angles = fl_header.check_liveness(image_mat, SPOOF_THRESHOLD)
end_time = time.time()
process_time = (end_time - start_time) * 1000
face_crop, one_line_attribute = None, ""
try:
image = Image.open(frame)
face = Image.new('RGBA',(150, 150), (80,80,80,0))
if face_rect is not None:
x1 = int(face_rect[0])
y1 = int(face_rect[1])
x2 = int(face_rect[2])
y2 = int(face_rect[3])
if x1 < 0:
x1 = 0
if y1 < 0:
y1 = 0
if x2 >= image.width:
x2 = image.width - 1
if y2 >= image.height:
y2 = image.height - 1
if (x2 - x1) != 0 and (y2 - y1) != 0:
face_crop = image.crop((x1, y1, x2, y2))
face_image_ratio = face_crop.width / float(face_crop.height)
resized_w = int(face_image_ratio * 150)
resized_h = 150
face_crop = face_crop.resize((int(resized_w), int(resized_h)))
if angles is not None:
yaw = angles[0]
roll = angles[1]
pitch = angles[2]
attribute = f"""
Field |
Value |
Face Rect |
x |
{x1} |
y |
{y1} |
width |
{x2 - x1 + 1} |
height |
{y2 - y1 + 1} |
Face Angle |
Pitch |
{"{:.4f}".format(pitch)} |
Yaw |
{"{:.4f}".format(yaw)} |
Roll |
{"{:.4f}".format(roll)} |
"""
one_line_attribute = convert_fun(attribute)
except:
pass
str_score = str("{:.4f}".format(score))
if result == "REAL":
liveness_result = f"""
Liveness Check: REAL
Score: {str_score}
"""
else:
liveness_result = f"""
Liveness Check: {result}
Score: {str_score}
"""
return face_crop, liveness_result, one_line_attribute
def analyze_face(frame):
# face_crop_1, liveness_result, liveness_score = check_liveness(frame)
# face_crop_2, attribute = get_attributes(frame)
# face_crop = face_crop_1 if (face_crop_1 is not None) else face_crop_2
face_crop, liveness_result, attribute = check_liveness(frame)
return [face_crop, liveness_result, attribute]
def compare_face(frame1, frame2):
"""
url = "https://recognito.p.rapidapi.com/api/compare_face"
try:
files = {'image1': open(frame1, 'rb'), 'image2': open(frame2, 'rb')}
headers = {"X-RapidAPI-Key": os.environ.get("API_KEY")}
r = requests.post(url=url, files=files, headers=headers)
except:
raise gr.Error("Please select images files!")
faces = None
try:
image1 = Image.open(frame1)
image2 = Image.open(frame2)
face1 = Image.new('RGBA',(150, 150), (80,80,80,0))
face2 = Image.new('RGBA',(150, 150), (80,80,80,0))
res1 = r.json().get('image1')
if res1 is not None and res1:
face = res1.get('detection')
x1 = face.get('x')
y1 = face.get('y')
x2 = x1 + face.get('w')
y2 = y1 + face.get('h')
if x1 < 0:
x1 = 0
if y1 < 0:
y1 = 0
if x2 >= image1.width:
x2 = image1.width - 1
if y2 >= image1.height:
y2 = image1.height - 1
face1 = image1.crop((x1, y1, x2, y2))
face_image_ratio = face1.width / float(face1.height)
resized_w = int(face_image_ratio * 150)
resized_h = 150
face1 = face1.resize((int(resized_w), int(resized_h)))
res2 = r.json().get('image2')
if res2 is not None and res2:
face = res2.get('detection')
x1 = face.get('x')
y1 = face.get('y')
x2 = x1 + face.get('w')
y2 = y1 + face.get('h')
if x1 < 0:
x1 = 0
if y1 < 0:
y1 = 0
if x2 >= image2.width:
x2 = image2.width - 1
if y2 >= image2.height:
y2 = image2.height - 1
face2 = image2.crop((x1, y1, x2, y2))
face_image_ratio = face2.width / float(face2.height)
resized_w = int(face_image_ratio * 150)
resized_h = 150
face2 = face2.resize((int(resized_w), int(resized_h)))
except:
pass
matching_result = Image.open("icons/blank.png")
similarity_score = ""
if face1 is not None and face2 is not None:
matching_score = r.json().get('matching_score')
if matching_score is not None:
str_score = str("{:.4f}".format(matching_score))
if matching_score >= 0.7:
matching_result = Image.open("icons/same.png")
similarity_score =
else:
matching_result = Image.open("icons/different.png")
similarity_score =
return [face1, face2, matching_result, similarity_score]
"""
global g_fr_activation_result
if g_fr_activation_result != 0:
gr.Warning("FR SDK Activation Failed!")
return None, None, None, None
try:
image1 = open(frame1, 'rb')
image2 = open(frame2, 'rb')
except:
raise gr.Error("Please select images files!")
image_mat1 = cv2.imdecode(np.frombuffer(image1.read(), np.uint8), cv2.IMREAD_COLOR)
image_mat2 = cv2.imdecode(np.frombuffer(image2.read(), np.uint8), cv2.IMREAD_COLOR)
start_time = time.time()
result, score, face_bboxes, face_features = fr_header.compare_face(image_mat1, image_mat2, MATCH_THRESHOLD)
end_time = time.time()
process_time = (end_time - start_time) * 1000
try:
image1 = Image.open(frame1)
image2 = Image.open(frame2)
images = [image1, image2]
face1 = Image.new('RGBA',(150, 150), (80,80,80,0))
face2 = Image.new('RGBA',(150, 150), (80,80,80,0))
faces = [face1, face2]
face_bboxes_result = []
if face_bboxes is not None:
for i, bbox in enumerate(face_bboxes):
x1 = bbox[0]
y1 = bbox[1]
x2 = bbox[2]
y2 = bbox[3]
if x1 < 0:
x1 = 0
if y1 < 0:
y1 = 0
if x2 >= images[i].width:
x2 = images[i].width - 1
if y2 >= images[i].height:
y2 = images[i].height - 1
face_bbox_str = f"x1: {x1}, y1: {y1}, x2: {x2}, y2: {y2}"
face_bboxes_result.append(face_bbox_str)
faces[i] = images[i].crop((x1, y1, x2, y2))
face_image_ratio = faces[i].width / float(faces[i].height)
resized_w = int(face_image_ratio * 150)
resized_h = 150
faces[i] = faces[i].resize((int(resized_w), int(resized_h)))
except:
pass
matching_result = Image.open("icons/blank.png")
similarity_score = ""
if faces[0] is not None and faces[1] is not None:
if score is not None:
str_score = str("{:.4f}".format(score))
if result == "SAME PERSON":
matching_result = Image.open("icons/same.png")
similarity_score = f"""
Similarity score: {str_score}
"""
else:
matching_result = Image.open("icons/different.png")
similarity_score = f"""
Similarity score: {str_score}
"""
return faces[0], faces[1], matching_result, similarity_score
def launch_demo(activate_fr_result, activate_fl_result):
with gr.Blocks(css=css) as demo:
gr.Markdown(
"""
Recognito
www.recognito.vision
✨ NIST FRVT Top #1 Face Recognition Algorithm Developer
📘 Product Documentation
🏠 Visit Recognito
🤝 Contact us for our on-premise Face Recognition, Liveness Detection SDKs deployment
"""
)
with gr.Tabs():
with gr.Tab("Face Recognition"):
with gr.Row():
with gr.Column(scale=2):
with gr.Row():
with gr.Column(scale=1):
compare_face_input1 = gr.Image(label="Image1", type='filepath', elem_classes="example-image")
gr.Examples(['examples/1.jpg', 'examples/2.jpg', 'examples/3.jpg', 'examples/4.jpg'],
inputs=compare_face_input1)
with gr.Column(scale=1):
compare_face_input2 = gr.Image(label="Image2", type='filepath', elem_classes="example-image")
gr.Examples(['examples/5.jpg', 'examples/6.jpg', 'examples/7.jpg', 'examples/8.jpg'],
inputs=compare_face_input2)
with gr.Blocks():
with gr.Column(scale=1, min_width=400, elem_classes="block-background"):
compare_face_button = gr.Button("Compare Face", variant="primary", size="lg")
with gr.Row(elem_classes="face-row"):
face_output1 = gr.Image(value="icons/face.jpg", label="Face 1", scale=0, elem_classes="face-image", show_share_button=False, show_download_button=False, show_fullscreen_button=False)
compare_result = gr.Image(value="icons/blank.png", min_width=30, scale=0, show_download_button=False, show_label=False, show_share_button=False, show_fullscreen_button=False)
face_output2 = gr.Image(value="icons/face.jpg", label="Face 2", scale=0, elem_classes="face-image", show_share_button=False, show_download_button=False, show_fullscreen_button=False)
similarity_markdown = gr.Markdown("")
compare_face_button.click(compare_face, inputs=[compare_face_input1, compare_face_input2], outputs=[face_output1, face_output2, compare_result, similarity_markdown])
with gr.Tab("Face Liveness, Analysis"):
with gr.Row():
with gr.Column(scale=1):
face_input = gr.Image(label="Image", type='filepath', elem_classes="example-image")
gr.Examples(['examples/att_1.jpg', 'examples/att_2.jpg', 'examples/att_3.jpg', 'examples/att_4.jpg', 'examples/att_5.jpg', 'examples/att_6.jpg', 'examples/att_7.jpg'],
inputs=face_input)
with gr.Blocks():
with gr.Column(scale=1, elem_classes="block-background"):
analyze_face_button = gr.Button("Analyze Face", variant="primary", size="lg")
with gr.Row(elem_classes="face-row"):
face_output = gr.Image(value="icons/face.jpg", label="Face", scale=0, elem_classes="face-image", show_share_button=False, show_download_button=False, show_fullscreen_button=False)
liveness_result = gr.Markdown("")
attribute_result = gr.Markdown("")
analyze_face_button.click(analyze_face, inputs=face_input, outputs=[face_output, liveness_result, attribute_result])
gr.HTML('
')
demo.launch(server_name="0.0.0.0", server_port=7860, show_api=False)
if __name__ == '__main__':
g_fr_activation_result = activate_fr_sdk()
g_fl_activation_result = activate_fl_sdk()
launch_demo(g_fr_activation_result, g_fl_activation_result)