File size: 2,778 Bytes
86e4494
 
 
8069372
 
 
86e4494
8069372
86e4494
 
 
 
 
 
 
 
 
 
8069372
86e4494
8069372
43a727e
86e4494
 
 
8069372
 
 
 
ca5d897
8069372
 
43a727e
8069372
 
ca5d897
8069372
 
 
 
 
 
 
ca5d897
8069372
 
86e4494
8069372
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86e4494
8069372
 
 
86e4494
 
43a727e
86e4494
 
 
ca5d897
86e4494
ca5d897
 
43a727e
ca5d897
 
86e4494
 
8069372
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import gradio as gr
from PIL import Image
import os
import cv2
import numpy as np
import mediapipe as mp

# Load hairstyles from 'hairstyles' folder
def load_hairstyles():
    folder = "hairstyles"
    if not os.path.exists(folder):
        return []
    return [
        Image.open(os.path.join(folder, f)).convert("RGBA")
        for f in sorted(os.listdir(folder)) if f.endswith(".png")
    ]

hairstyles = load_hairstyles()
mp_face_detection = mp.solutions.face_detection

# Apply hairstyle using face detection to auto-align on head
def apply_hairstyle(user_img, style_index):
    if user_img is None or not hairstyles:
        return None

    # Convert to CV2 image for detection
    img_cv2 = np.array(user_img.convert("RGB"))
    img_cv2 = cv2.cvtColor(img_cv2, cv2.COLOR_RGB2BGR)
    h_img, w_img = img_cv2.shape[:2]

    with mp_face_detection.FaceDetection(model_selection=1, min_detection_confidence=0.5) as face_detection:
        results = face_detection.process(cv2.cvtColor(img_cv2, cv2.COLOR_BGR2RGB))

        if not results.detections:
            return user_img  # No face detected, return original

        # Use first face
        detection = results.detections[0]
        bbox = detection.location_data.relative_bounding_box
        x = int(bbox.xmin * w_img)
        y = int(bbox.ymin * h_img)
        w = int(bbox.width * w_img)
        h = int(bbox.height * h_img)

        # Estimate head top
        top_y = max(y - int(h * 0.6), 0)

        # Load and resize hairstyle
        hairstyle = hairstyles[style_index]
        new_width = int(w * 1.1)  # Slightly wider than face
        new_height = int(hairstyle.height * (new_width / hairstyle.width))
        resized_hair = hairstyle.resize((new_width, new_height))

        # Create output image
        user_img = user_img.convert("RGBA")
        composite = Image.new("RGBA", user_img.size)
        paste_x = x - int((new_width - w) / 2)
        paste_y = top_y
        composite.paste(resized_hair, (paste_x, paste_y), resized_hair)

        final = Image.alpha_composite(user_img, composite)
        return final.convert("RGB")

# Gradio Interface
with gr.Blocks() as demo:
    gr.Markdown("## πŸ’‡ Salon Virtual Hairstyle Try-On (Face-Aligned, No Adjustment Needed)")
    with gr.Row():
        with gr.Column():
            image_input = gr.Image(type="pil", label="πŸ“· Upload Your Image")
            style_slider = gr.Slider(0, max(len(hairstyles)-1, 0), step=1, label="🎨 Select Hairstyle")
            apply_btn = gr.Button("✨ Apply Hairstyle")
        with gr.Column():
            result_output = gr.Image(label="πŸ” Result Preview")

    apply_btn.click(
        fn=apply_hairstyle,
        inputs=[image_input, style_slider],
        outputs=result_output
    )

demo.launch()