Spaces:
Running
Running
import gradio as gr | |
from PIL import Image | |
import os | |
import cv2 | |
import numpy as np | |
import mediapipe as mp | |
# Load hairstyles from 'hairstyles' folder | |
def load_hairstyles(): | |
folder = "hairstyles" | |
if not os.path.exists(folder): | |
return [] | |
return [ | |
Image.open(os.path.join(folder, f)).convert("RGBA") | |
for f in sorted(os.listdir(folder)) if f.endswith(".png") | |
] | |
hairstyles = load_hairstyles() | |
mp_face_detection = mp.solutions.face_detection | |
# Apply hairstyle using face detection to auto-align on head | |
def apply_hairstyle(user_img, style_index): | |
if user_img is None or not hairstyles: | |
return None | |
# Convert to CV2 image for detection | |
img_cv2 = np.array(user_img.convert("RGB")) | |
img_cv2 = cv2.cvtColor(img_cv2, cv2.COLOR_RGB2BGR) | |
h_img, w_img = img_cv2.shape[:2] | |
with mp_face_detection.FaceDetection(model_selection=1, min_detection_confidence=0.5) as face_detection: | |
results = face_detection.process(cv2.cvtColor(img_cv2, cv2.COLOR_BGR2RGB)) | |
if not results.detections: | |
return user_img # No face detected, return original | |
# Use first face | |
detection = results.detections[0] | |
bbox = detection.location_data.relative_bounding_box | |
x = int(bbox.xmin * w_img) | |
y = int(bbox.ymin * h_img) | |
w = int(bbox.width * w_img) | |
h = int(bbox.height * h_img) | |
# Estimate head top | |
top_y = max(y - int(h * 0.6), 0) | |
# Load and resize hairstyle | |
hairstyle = hairstyles[style_index] | |
new_width = int(w * 1.1) # Slightly wider than face | |
new_height = int(hairstyle.height * (new_width / hairstyle.width)) | |
resized_hair = hairstyle.resize((new_width, new_height)) | |
# Create output image | |
user_img = user_img.convert("RGBA") | |
composite = Image.new("RGBA", user_img.size) | |
paste_x = x - int((new_width - w) / 2) | |
paste_y = top_y | |
composite.paste(resized_hair, (paste_x, paste_y), resized_hair) | |
final = Image.alpha_composite(user_img, composite) | |
return final.convert("RGB") | |
# Gradio Interface | |
with gr.Blocks() as demo: | |
gr.Markdown("## π Salon Virtual Hairstyle Try-On (Face-Aligned, No Adjustment Needed)") | |
with gr.Row(): | |
with gr.Column(): | |
image_input = gr.Image(type="pil", label="π· Upload Your Image") | |
style_slider = gr.Slider(0, max(len(hairstyles)-1, 0), step=1, label="π¨ Select Hairstyle") | |
apply_btn = gr.Button("β¨ Apply Hairstyle") | |
with gr.Column(): | |
result_output = gr.Image(label="π Result Preview") | |
apply_btn.click( | |
fn=apply_hairstyle, | |
inputs=[image_input, style_slider], | |
outputs=result_output | |
) | |
demo.launch() | |