|
import os |
|
import gradio as gr |
|
import requests |
|
import json |
|
import cv2 |
|
import numpy as np |
|
import time |
|
from PIL import Image |
|
from fr.engine.header import * |
|
from fl.engine.header import * |
|
import fr.engine.header as fr_header |
|
import fl.engine.header as fl_header |
|
|
|
css = """ |
|
.example-image img{ |
|
display: flex; /* Use flexbox to align items */ |
|
justify-content: center; /* Center the image horizontally */ |
|
align-items: center; /* Center the image vertically */ |
|
height: 300px; /* Set the height of the container */ |
|
object-fit: contain; /* Preserve aspect ratio while fitting the image within the container */ |
|
} |
|
|
|
.example-image{ |
|
display: flex; /* Use flexbox to align items */ |
|
justify-content: center; /* Center the image horizontally */ |
|
align-items: center; /* Center the image vertically */ |
|
height: 350px; /* Set the height of the container */ |
|
object-fit: contain; /* Preserve aspect ratio while fitting the image within the container */ |
|
} |
|
|
|
.face-row { |
|
display: flex; |
|
justify-content: space-around; /* Distribute space evenly between elements */ |
|
align-items: center; /* Align items vertically */ |
|
width: 100%; /* Set the width of the row to 100% */ |
|
} |
|
|
|
.face-image{ |
|
justify-content: center; /* Center the image horizontally */ |
|
align-items: center; /* Center the image vertically */ |
|
height: 160px; /* Set the height of the container */ |
|
width: 160px; |
|
object-fit: contain; /* Preserve aspect ratio while fitting the image within the container */ |
|
} |
|
|
|
.face-image img{ |
|
justify-content: center; /* Center the image horizontally */ |
|
align-items: center; /* Center the image vertically */ |
|
height: 160px; /* Set the height of the container */ |
|
object-fit: contain; /* Preserve aspect ratio while fitting the image within the container */ |
|
} |
|
|
|
.markdown-success-container { |
|
background-color: #F6FFED; |
|
padding: 20px; |
|
margin: 20px; |
|
border-radius: 1px; |
|
border: 2px solid green; |
|
text-align: center; |
|
} |
|
|
|
.markdown-fail-container { |
|
background-color: #FFF1F0; |
|
padding: 20px; |
|
margin: 20px; |
|
border-radius: 1px; |
|
border: 2px solid red; |
|
text-align: center; |
|
} |
|
|
|
.markdown-attribute-container { |
|
display: flex; |
|
justify-content: space-around; /* Distribute space evenly between elements */ |
|
align-items: center; /* Align items vertically */ |
|
padding: 10px; |
|
margin: 10px; |
|
} |
|
|
|
.block-background { |
|
# background-color: #202020; /* Set your desired background color */ |
|
border-radius: 5px; |
|
} |
|
|
|
""" |
|
|
|
file_path = os.path.abspath(__file__) |
|
root_path = os.path.dirname(file_path) |
|
|
|
g_fr_activation_result = -1 |
|
g_fl_activation_result = -1 |
|
MATCH_THRESHOLD = 0.67 |
|
SPOOF_THRESHOLD = 0.5 |
|
|
|
def activate_fr_sdk(): |
|
fr_key = os.environ.get("FR_LICENSE_KEY") |
|
fr_dict_path = os.path.join(root_path, "fr/engine/bin") |
|
|
|
ret = -1 |
|
if fr_key is None: |
|
print_warning("Recognition online license key not found!") |
|
else: |
|
ret = fr_header.init_sdk(fr_dict_path.encode('utf-8'), fr_key.encode('utf-8')) |
|
|
|
if ret == 0: |
|
print_log("Successfully init FR SDK!") |
|
else: |
|
print_error(f"Falied to init FR SDK, Error code {ret}") |
|
|
|
return ret |
|
|
|
def activate_fl_sdk(): |
|
fl_key = os.environ.get("FL_LICENSE_KEY") |
|
fl_dict_path = os.path.join(root_path, "fl/engine/bin") |
|
|
|
ret = -1 |
|
if fl_key is None: |
|
print_warning("Liveness Detection online license key not found!") |
|
else: |
|
ret = fl_header.init_sdk(fl_dict_path.encode('utf-8'), fl_key.encode('utf-8')) |
|
|
|
if ret == 0: |
|
print_log("Successfully init FL SDK!") |
|
else: |
|
print_error(f"Falied to init FL SDK, Error code {ret}") |
|
|
|
return ret |
|
|
|
def convert_fun(input_str): |
|
|
|
return ' '.join(input_str.split()) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def check_liveness(frame): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
global g_fl_activation_result |
|
if g_fl_activation_result != 0: |
|
gr.Warning("FL SDK Activation Failed!") |
|
return None, None, None |
|
|
|
try: |
|
image = open(frame, 'rb') |
|
except: |
|
raise gr.Error("Please select image file!") |
|
|
|
image_mat = cv2.imdecode(np.frombuffer(image.read(), np.uint8), cv2.IMREAD_COLOR) |
|
start_time = time.time() |
|
result, face_rect, score, angles = fl_header.check_liveness(image_mat, SPOOF_THRESHOLD) |
|
end_time = time.time() |
|
process_time = (end_time - start_time) * 1000 |
|
|
|
face_crop, one_line_attribute = None, "" |
|
try: |
|
image = Image.open(frame) |
|
|
|
face = Image.new('RGBA',(150, 150), (80,80,80,0)) |
|
|
|
if face_rect is not None: |
|
x1 = int(face_rect[0]) |
|
y1 = int(face_rect[1]) |
|
x2 = int(face_rect[2]) |
|
y2 = int(face_rect[3]) |
|
|
|
if x1 < 0: |
|
x1 = 0 |
|
if y1 < 0: |
|
y1 = 0 |
|
if x2 >= image.width: |
|
x2 = image.width - 1 |
|
if y2 >= image.height: |
|
y2 = image.height - 1 |
|
|
|
if (x2 - x1) != 0 and (y2 - y1) != 0: |
|
face_crop = image.crop((x1, y1, x2, y2)) |
|
face_image_ratio = face_crop.width / float(face_crop.height) |
|
resized_w = int(face_image_ratio * 150) |
|
resized_h = 150 |
|
|
|
face_crop = face_crop.resize((int(resized_w), int(resized_h))) |
|
|
|
if angles is not None: |
|
yaw = angles[0] |
|
roll = angles[1] |
|
pitch = angles[2] |
|
|
|
attribute = f""" |
|
<br/> |
|
<div class="markdown-attribute-container"> |
|
<table> |
|
<tr> |
|
<th>Field</th> |
|
<th colspan="2">Value</th> |
|
</tr> |
|
<tr> |
|
<th rowspan="4">Face Rect</th> |
|
<td>x</td> |
|
<td>{x1}</td> |
|
</tr> |
|
<tr> |
|
<td>y</td> |
|
<td>{y1}</td> |
|
</tr> |
|
<tr> |
|
<td>width</td> |
|
<td>{x2 - x1 + 1}</td> |
|
</tr> |
|
<tr> |
|
<td>height</td> |
|
<td>{y2 - y1 + 1}</td> |
|
</tr> |
|
<tr> |
|
<th rowspan="3">Face Angle</th> |
|
<td>Pitch</td> |
|
<td>{"{:.4f}".format(pitch)}</td> |
|
</tr> |
|
<tr> |
|
<td>Yaw</td> |
|
<td>{"{:.4f}".format(yaw)}</td> |
|
</tr> |
|
<tr> |
|
<td>Roll</td> |
|
<td>{"{:.4f}".format(roll)}</td> |
|
</tr> |
|
</table> |
|
</div> |
|
""" |
|
|
|
one_line_attribute = convert_fun(attribute) |
|
except: |
|
pass |
|
|
|
str_score = str("{:.4f}".format(score)) |
|
if result == "REAL": |
|
liveness_result = f"""<br/><div class="markdown-success-container"><p style="text-align: center; font-size: 20px; color: green;">Liveness Check: REAL<br/>Score: {str_score}</p></div>""" |
|
else: |
|
liveness_result = f"""<br/><div class="markdown-fail-container"><p style="text-align: center; font-size: 20px; color: red;">Liveness Check: {result}<br/>Score: {str_score}</p></div>""" |
|
|
|
return face_crop, liveness_result, one_line_attribute |
|
|
|
def analyze_face(frame): |
|
|
|
|
|
|
|
face_crop, liveness_result, attribute = check_liveness(frame) |
|
return [face_crop, liveness_result, attribute] |
|
|
|
|
|
def compare_face(frame1, frame2): |
|
""" |
|
url = "https://recognito.p.rapidapi.com/api/compare_face" |
|
try: |
|
files = {'image1': open(frame1, 'rb'), 'image2': open(frame2, 'rb')} |
|
headers = {"X-RapidAPI-Key": os.environ.get("API_KEY")} |
|
|
|
r = requests.post(url=url, files=files, headers=headers) |
|
except: |
|
raise gr.Error("Please select images files!") |
|
|
|
faces = None |
|
|
|
try: |
|
image1 = Image.open(frame1) |
|
image2 = Image.open(frame2) |
|
|
|
face1 = Image.new('RGBA',(150, 150), (80,80,80,0)) |
|
face2 = Image.new('RGBA',(150, 150), (80,80,80,0)) |
|
|
|
res1 = r.json().get('image1') |
|
|
|
if res1 is not None and res1: |
|
face = res1.get('detection') |
|
x1 = face.get('x') |
|
y1 = face.get('y') |
|
x2 = x1 + face.get('w') |
|
y2 = y1 + face.get('h') |
|
if x1 < 0: |
|
x1 = 0 |
|
if y1 < 0: |
|
y1 = 0 |
|
if x2 >= image1.width: |
|
x2 = image1.width - 1 |
|
if y2 >= image1.height: |
|
y2 = image1.height - 1 |
|
|
|
face1 = image1.crop((x1, y1, x2, y2)) |
|
face_image_ratio = face1.width / float(face1.height) |
|
resized_w = int(face_image_ratio * 150) |
|
resized_h = 150 |
|
|
|
face1 = face1.resize((int(resized_w), int(resized_h))) |
|
|
|
res2 = r.json().get('image2') |
|
if res2 is not None and res2: |
|
face = res2.get('detection') |
|
x1 = face.get('x') |
|
y1 = face.get('y') |
|
x2 = x1 + face.get('w') |
|
y2 = y1 + face.get('h') |
|
|
|
if x1 < 0: |
|
x1 = 0 |
|
if y1 < 0: |
|
y1 = 0 |
|
if x2 >= image2.width: |
|
x2 = image2.width - 1 |
|
if y2 >= image2.height: |
|
y2 = image2.height - 1 |
|
|
|
face2 = image2.crop((x1, y1, x2, y2)) |
|
face_image_ratio = face2.width / float(face2.height) |
|
resized_w = int(face_image_ratio * 150) |
|
resized_h = 150 |
|
|
|
face2 = face2.resize((int(resized_w), int(resized_h))) |
|
except: |
|
pass |
|
|
|
matching_result = Image.open("icons/blank.png") |
|
similarity_score = "" |
|
if face1 is not None and face2 is not None: |
|
matching_score = r.json().get('matching_score') |
|
if matching_score is not None: |
|
str_score = str("{:.4f}".format(matching_score)) |
|
if matching_score >= 0.7: |
|
matching_result = Image.open("icons/same.png") |
|
similarity_score = |
|
else: |
|
matching_result = Image.open("icons/different.png") |
|
similarity_score = |
|
|
|
return [face1, face2, matching_result, similarity_score] |
|
""" |
|
global g_fr_activation_result |
|
if g_fr_activation_result != 0: |
|
gr.Warning("FR SDK Activation Failed!") |
|
return None, None, None, None |
|
|
|
try: |
|
image1 = open(frame1, 'rb') |
|
image2 = open(frame2, 'rb') |
|
except: |
|
raise gr.Error("Please select images files!") |
|
|
|
image_mat1 = cv2.imdecode(np.frombuffer(image1.read(), np.uint8), cv2.IMREAD_COLOR) |
|
image_mat2 = cv2.imdecode(np.frombuffer(image2.read(), np.uint8), cv2.IMREAD_COLOR) |
|
start_time = time.time() |
|
result, score, face_bboxes, face_features = fr_header.compare_face(image_mat1, image_mat2, MATCH_THRESHOLD) |
|
end_time = time.time() |
|
process_time = (end_time - start_time) * 1000 |
|
|
|
try: |
|
image1 = Image.open(frame1) |
|
image2 = Image.open(frame2) |
|
images = [image1, image2] |
|
|
|
face1 = Image.new('RGBA',(150, 150), (80,80,80,0)) |
|
face2 = Image.new('RGBA',(150, 150), (80,80,80,0)) |
|
faces = [face1, face2] |
|
|
|
face_bboxes_result = [] |
|
if face_bboxes is not None: |
|
for i, bbox in enumerate(face_bboxes): |
|
x1 = bbox[0] |
|
y1 = bbox[1] |
|
x2 = bbox[2] |
|
y2 = bbox[3] |
|
if x1 < 0: |
|
x1 = 0 |
|
if y1 < 0: |
|
y1 = 0 |
|
if x2 >= images[i].width: |
|
x2 = images[i].width - 1 |
|
if y2 >= images[i].height: |
|
y2 = images[i].height - 1 |
|
|
|
face_bbox_str = f"x1: {x1}, y1: {y1}, x2: {x2}, y2: {y2}" |
|
face_bboxes_result.append(face_bbox_str) |
|
|
|
faces[i] = images[i].crop((x1, y1, x2, y2)) |
|
face_image_ratio = faces[i].width / float(faces[i].height) |
|
resized_w = int(face_image_ratio * 150) |
|
resized_h = 150 |
|
|
|
faces[i] = faces[i].resize((int(resized_w), int(resized_h))) |
|
except: |
|
pass |
|
|
|
matching_result = Image.open("icons/blank.png") |
|
similarity_score = "" |
|
if faces[0] is not None and faces[1] is not None: |
|
if score is not None: |
|
str_score = str("{:.4f}".format(score)) |
|
if result == "SAME PERSON": |
|
matching_result = Image.open("icons/same.png") |
|
similarity_score = f"""<br/><div class="markdown-success-container"><p style="text-align: center; font-size: 20px; color: green;">Similarity score: {str_score}</p></div>""" |
|
else: |
|
matching_result = Image.open("icons/different.png") |
|
similarity_score = f"""<br/><div class="markdown-fail-container"><p style="text-align: center; font-size: 20px; color: red;">Similarity score: {str_score}</p></div>""" |
|
|
|
return faces[0], faces[1], matching_result, similarity_score |
|
|
|
|
|
|
|
def launch_demo(activate_fr_result, activate_fl_result): |
|
with gr.Blocks(css=css) as demo: |
|
gr.Markdown( |
|
""" |
|
<a href="https://recognito.vision" style="display: flex; align-items: center;"> |
|
<img src="https://recognito.vision/wp-content/uploads/2024/03/Recognito-modified.png" style="width: 8%; margin-right: 15px;"/> |
|
<div> |
|
<p style="font-size: 32px; font-weight: bold; margin: 0;">Recognito</p> |
|
<p style="font-size: 18px; margin: 0;">www.recognito.vision</p> |
|
</div> |
|
</a> |
|
|
|
<p style="font-size: 20px; font-weight: bold;">β¨ NIST FRVT Top #1 Face Recognition Algorithm Developer</p> |
|
<div style="display: flex; align-items: center;"> |
|
  <a href="https://pages.nist.gov/frvt/html/frvt11.html"> <p style="font-size: 14px;">ππ» Latest NIST FRVT Report</p></a> |
|
</div> |
|
<p style="font-size: 20px; font-weight: bold;">π Product Documentation</p> |
|
<div style="display: flex; align-items: center;"> |
|
  <a href="https://docs.recognito.vision" style="display: flex; align-items: center;"><img src="https://recognito.vision/wp-content/uploads/2024/05/book.png" style="width: 48px; margin-right: 5px;"/></a> |
|
</div> |
|
<p style="font-size: 20px; font-weight: bold;">π Visit Recognito</p> |
|
<div style="display: flex; align-items: center;"> |
|
  <a href="https://recognito.vision" style="display: flex; align-items: center;"><img src="https://recognito.vision/wp-content/uploads/2024/03/recognito_64_cl.png" style="width: 32px; margin-right: 5px;"/></a> |
|
<a href="https://www.linkedin.com/company/recognito-vision" style="display: flex; align-items: center;"><img src="https://recognito.vision/wp-content/uploads/2024/03/linkedin_64_cl.png" style="width: 32px; margin-right: 5px;"/></a> |
|
<a href="https://huggingface.co/recognito" style="display: flex; align-items: center;"><img src="https://recognito.vision/wp-content/uploads/2024/03/hf_64_cl.png" style="width: 32px; margin-right: 5px;"/></a> |
|
<a href="https://github.com/recognito-vision" style="display: flex; align-items: center;"><img src="https://recognito.vision/wp-content/uploads/2024/03/github_64_cl.png" style="width: 32px; margin-right: 5px;"/></a> |
|
<a href="https://hub.docker.com/u/recognito" style="display: flex; align-items: center;"><img src="https://recognito.vision/wp-content/uploads/2024/03/docker_64_cl.png" style="width: 32px; margin-right: 5px;"/></a> |
|
<a href="https://www.youtube.com/@recognito-vision" style="display: flex; align-items: center;"><img src="https://recognito.vision/wp-content/uploads/2024/04/youtube_64_cl.png" style="width: 32px; margin-right: 5px;"/></a> |
|
</div> |
|
<p style="font-size: 20px; font-weight: bold;">π€ Contact us for our on-premise Face Recognition, Liveness Detection SDKs deployment</p> |
|
<div style="display: flex; align-items: center;"> |
|
  <a target="_blank" href="mailto:[email protected]"><img src="https://img.shields.io/badge/[email protected]?logo=gmail " alt="www.recognito.vision"></a> |
|
<a target="_blank" href="https://wa.me/+14158003112"><img src="https://img.shields.io/badge/whatsapp-+14158003112-blue.svg?logo=whatsapp " alt="www.recognito.vision"></a> |
|
<a target="_blank" href="https://t.me/recognito_vision"><img src="https://img.shields.io/badge/telegram-@recognito__vision-blue.svg?logo=telegram " alt="www.recognito.vision"></a> |
|
<a target="_blank" href="https://join.slack.com/t/recognito-workspace/shared_invite/zt-2d4kscqgn-"><img src="https://img.shields.io/badge/slack-recognito__workspace-blue.svg?logo=slack " alt="www.recognito.vision"></a> |
|
</div> |
|
<br/><br/><br/> |
|
""" |
|
) |
|
|
|
with gr.Tabs(): |
|
with gr.Tab("Face Recognition"): |
|
with gr.Row(): |
|
with gr.Column(scale=2): |
|
with gr.Row(): |
|
with gr.Column(scale=1): |
|
compare_face_input1 = gr.Image(label="Image1", type='filepath', elem_classes="example-image") |
|
gr.Examples(['examples/1.jpg', 'examples/2.jpg', 'examples/3.jpg', 'examples/4.jpg'], |
|
inputs=compare_face_input1) |
|
with gr.Column(scale=1): |
|
compare_face_input2 = gr.Image(label="Image2", type='filepath', elem_classes="example-image") |
|
gr.Examples(['examples/5.jpg', 'examples/6.jpg', 'examples/7.jpg', 'examples/8.jpg'], |
|
inputs=compare_face_input2) |
|
|
|
with gr.Blocks(): |
|
with gr.Column(scale=1, min_width=400, elem_classes="block-background"): |
|
compare_face_button = gr.Button("Compare Face", variant="primary", size="lg") |
|
with gr.Row(elem_classes="face-row"): |
|
face_output1 = gr.Image(value="icons/face.jpg", label="Face 1", scale=0, elem_classes="face-image", show_share_button=False, show_download_button=False, show_fullscreen_button=False) |
|
compare_result = gr.Image(value="icons/blank.png", min_width=30, scale=0, show_download_button=False, show_label=False, show_share_button=False, show_fullscreen_button=False) |
|
face_output2 = gr.Image(value="icons/face.jpg", label="Face 2", scale=0, elem_classes="face-image", show_share_button=False, show_download_button=False, show_fullscreen_button=False) |
|
similarity_markdown = gr.Markdown("") |
|
|
|
compare_face_button.click(compare_face, inputs=[compare_face_input1, compare_face_input2], outputs=[face_output1, face_output2, compare_result, similarity_markdown]) |
|
|
|
with gr.Tab("Face Liveness, Analysis"): |
|
with gr.Row(): |
|
with gr.Column(scale=1): |
|
face_input = gr.Image(label="Image", type='filepath', elem_classes="example-image") |
|
gr.Examples(['examples/att_1.jpg', 'examples/att_2.jpg', 'examples/att_3.jpg', 'examples/att_4.jpg', 'examples/att_5.jpg', 'examples/att_6.jpg', 'examples/att_7.jpg'], |
|
inputs=face_input) |
|
|
|
with gr.Blocks(): |
|
with gr.Column(scale=1, elem_classes="block-background"): |
|
analyze_face_button = gr.Button("Analyze Face", variant="primary", size="lg") |
|
with gr.Row(elem_classes="face-row"): |
|
face_output = gr.Image(value="icons/face.jpg", label="Face", scale=0, elem_classes="face-image", show_share_button=False, show_download_button=False, show_fullscreen_button=False) |
|
|
|
liveness_result = gr.Markdown("") |
|
attribute_result = gr.Markdown("") |
|
|
|
analyze_face_button.click(analyze_face, inputs=face_input, outputs=[face_output, liveness_result, attribute_result]) |
|
|
|
gr.HTML('<a href="https://visitorbadge.io/status?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2FRecognito%2FFaceRecognition-LivenessDetection-FaceAnalysis"><img src="https://api.visitorbadge.io/api/combined?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2FRecognito%2FFaceRecognition-LivenessDetection-FaceAnalysis&countColor=%2337d67a&style=flat&labelStyle=upper" /></a>') |
|
|
|
demo.launch(server_name="0.0.0.0", server_port=7860, show_api=False) |
|
|
|
if __name__ == '__main__': |
|
g_fr_activation_result = activate_fr_sdk() |
|
g_fl_activation_result = activate_fl_sdk() |
|
launch_demo(g_fr_activation_result, g_fl_activation_result) |