import os import gradio as gr import requests import json import io from gradio.components import Image from PIL import Image as PILImage, ImageDraw, ImageFont # This import may be needed if you're processing images from PIL import Image from PIL import Image import io import base64 def face_crop(image, face_rect): x = face_rect.get('x') y = face_rect.get('y') width = face_rect.get('width') height = face_rect.get('height') if x < 0: x = 0 if y < 0: y = 0 if x + width >= image.width: width = image.width - x if y + height >= image.height: height = image.height - y face_image = image.crop((x, y, x + width - 1, y + height - 1)) face_image_ratio = face_image.width / float(face_image.height) resized_w = int(face_image_ratio * 150) resized_h = 150 face_image = face_image.resize((int(resized_w), int(resized_h))) return face_image def pil_image_to_base64(image, format="PNG"): """ Converts a PIL.Image object to a Base64-encoded string. :param image: PIL.Image object :param format: Format to save the image, e.g., "PNG", "JPEG" :return: Base64-encoded string """ # Save the image to a BytesIO buffer buffer = io.BytesIO() image.save(buffer, format=format) buffer.seek(0) # Rewind the buffer # Convert the buffer's contents to a Base64 string base64_string = base64.b64encode(buffer.getvalue()).decode('utf-8') return base64_string def compare_face(image1, image2, verifyThreshold): try: img_bytes1 = io.BytesIO() image1.save(img_bytes1, format="JPEG") img_bytes1.seek(0) except: return ["Failed to open image1", {"resultCode": "Failed to open image1"}] try: img_bytes2 = io.BytesIO() image2.save(img_bytes2, format="JPEG") img_bytes2.seek(0) except: return ["Failed to open image2", {"resultCode": "Failed to open image2"}] url = "http://127.0.0.1:8000/compare_face" files = {'image1': img_bytes1, 'image2': img_bytes2} result = requests.post(url=url, files=files) if result.ok: json_result = result.json() if json_result.get("resultCode") != "Ok": return [json_result.get("resultCode"), json_result] html = "" faces1 = json_result.get("faces1", {}) faces2 = json_result.get("faces2", {}) results = json_result.get("results", {}) for result in results: score = result.get('score') face1_idx = result.get('face1') face2_idx = result.get('face2') face_image1 = face_crop(image1, faces1[face1_idx]) face_value1 = ('').format(base64_image=pil_image_to_base64(face_image1, format="PNG")) face_image2 = face_crop(image2, faces2[face2_idx]) face_value2 = ('').format(base64_image=pil_image_to_base64(face_image2, format="PNG")) match_icon = '' if score > verifyThreshold: match_icon = '' item_value = ('
' '
' '{face_value1}' '{match_icon}' '{face_value2}' '
' '
' 'Score: {score}' '
' '
' ).format(face_value1=face_value1, face_value2=face_value2, match_icon=match_icon, score=f"{score:.2f}") html += item_value html += '
' return [html, json_result] else: return [result.text, {"resultCode": result.text}] def detect_face(image): try: img_bytes = io.BytesIO() image.save(img_bytes, format="JPEG") img_bytes.seek(0) except: return ["Failed to open image", {"resultCode": "Failed to open image"}] url = "http://127.0.0.1:8000/detect_face" files = {'image': img_bytes} result = requests.post(url=url, files=files) if result.ok: json_result = result.json() html = "" resultCode = json_result.get("resultCode") if resultCode == "Ok": faces = json_result.get("result", {}) for face in faces: face_rect = face.get("rect", {}) angles = face.get("angles", {}) age_gender = face.get("age_gender", {}) emotion = face.get("emotion", {}) attribute = face.get("attribute", {}) face_image = face_crop(image, face_rect) face_value = ('').format(base64_image=pil_image_to_base64(face_image, format="PNG")) item_value = ('
' '
' '{face_value}' '
' '
' '

Age

' '

Gender

' '

Mask

' '

Left Eye

' '

Right Eye

' '

Yaw

' '

Roll

' '

Pitch

' '
' '
' '

{age}

' '

{gender}

' '

{masked}

' '

{left_eye}

' '

{right_eye}

' '

{yaw}

' '

{roll}

' '

{pitch}

' '
' '
' '
' '
' '

Neutral

' '

Happy

' '

Angry

' '

Surprised

' '

Disgusted

' '

Sad

' '

Scared

' '
' '
' '

{neutral}

' '

{happy}

' '

{angry}

' '

{surprised}

' '

{disgusted}

' '

{sad}

' '

{scared}

' '
' '
' '
').format(face_value=face_value, age=age_gender.get('age'), gender="Female" if age_gender.get('gender') == 0 else "Male", neutral=f"{emotion.get('neutral'):.2f}", happy=f"{emotion.get('happy'):.2f}", angry=f"{emotion.get('angry'):.2f}", surprised=f"{emotion.get('surprised'):.2f}", disgusted=f"{emotion.get('disgusted'):.2f}", sad=f"{emotion.get('sad'):.2f}", scared=f"{emotion.get('scared'):.2f}", masked="Yes" if attribute.get('masked') == 1 else "No", left_eye="Open" if attribute.get('left_eye_opened') == 1 else "Close", right_eye="Open" if attribute.get('right_eye_opened') == 1 else "Close", yaw=f"{angles.get('yaw'):.2f}", roll=f"{angles.get('roll'):.2f}", pitch=f"{angles.get('pitch'):.2f}", ) html += item_value html += '
' else: html = "No face!" return [html, json_result] else: return [result.text, {"resultCode": result.text}] with gr.Blocks() as demo: gr.Markdown( f"""

Recognito

www.recognito.vision

📘 Product Documentation

  

🏠 Visit Recognito

                           

🤝 Contact us for our on-premise ID Document Verification SDKs deployment

  www.recognito.vision     www.recognito.vision     www.recognito.vision     www.recognito.vision

""" ) with gr.TabItem("Face Recognition"): with gr.Row(): with gr.Column(scale=7): with gr.Row(): with gr.Column(): image_input1 = gr.Image(type='pil') gr.Examples(['examples/1.webp', 'examples/2.webp', 'examples/3.webp', 'examples/4.webp'], inputs=image_input1) with gr.Column(): image_input2 = gr.Image(type='pil') gr.Examples(['examples/5.webp', 'examples/6.webp', 'examples/7.webp', 'examples/8.webp'], inputs=image_input2) verifyThreshold = gr.Slider(minimum=0, maximum=1, value=0.67, label="Verify Threshold") face_recog_button = gr.Button("Face Recognition") with gr.Column(scale=3): with gr.TabItem("Output"): recog_html_output = gr.HTML() with gr.TabItem("JSON"): recog_json_output = gr.JSON() with gr.TabItem("Face Attribute"): with gr.Row(): with gr.Column(): image_input = gr.Image(type='pil') gr.Examples(['examples/11.webp', 'examples/12.webp', 'examples/13.webp', 'examples/14.webp'], inputs=image_input) face_attr_button = gr.Button("Face Attribute") with gr.Column(): with gr.TabItem("Output"): detect_html_output = gr.HTML() with gr.TabItem("JSON"): detect_json_output = gr.JSON() face_recog_button.click(compare_face, inputs=[image_input1, image_input2, verifyThreshold], outputs=[recog_html_output, recog_json_output]) face_attr_button.click(detect_face, inputs=[image_input], outputs=[detect_html_output, detect_json_output]) demo.launch(server_name="0.0.0.0", server_port=7860)