Spaces:
Running
Running
Update overlay.py
Browse files- overlay.py +22 -26
overlay.py
CHANGED
@@ -1,15 +1,13 @@
|
|
1 |
-
# overlay.py
|
2 |
-
|
3 |
import numpy as np
|
4 |
from PIL import Image
|
5 |
import mediapipe as mp
|
6 |
-
from baldhead import inference as bald_inference
|
7 |
-
from segmentation import remove_face_using_segmentation
|
8 |
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
|
|
|
|
13 |
|
14 |
def get_face_bbox(img: Image.Image) -> tuple[int,int,int,int] | None:
|
15 |
arr = np.array(img.convert("RGB"))
|
@@ -29,8 +27,8 @@ def compute_scale(w_bg, h_bg, w_src, h_src) -> float:
|
|
29 |
|
30 |
def compute_offset(bbox_bg, bbox_src, scale) -> tuple[int,int]:
|
31 |
x1, y1, x2, y2 = bbox_bg
|
32 |
-
bg_cx = x1 + (x2 - x1)
|
33 |
-
bg_cy = y1 + (y2 - y1)
|
34 |
sx1, sy1, sx2, sy2 = bbox_src
|
35 |
src_cx = int((sx1 + (sx2 - sx1)//2) * scale)
|
36 |
src_cy = int((sy1 + (sy2 - sy1)//2) * scale)
|
@@ -47,8 +45,8 @@ def paste_with_alpha(bg: np.ndarray, src: np.ndarray, offset: tuple[int,int]) ->
|
|
47 |
return Image.fromarray(res)
|
48 |
cs = src[y1-y:y2-y, x1-x:x2-x]
|
49 |
cd = res[y1:y2, x1:x2]
|
50 |
-
mask = cs[...,3]>0
|
51 |
-
if cd.shape[2]==3:
|
52 |
cd[mask] = cs[mask][...,:3]
|
53 |
else:
|
54 |
cd[mask] = cs[mask]
|
@@ -56,7 +54,7 @@ def paste_with_alpha(bg: np.ndarray, src: np.ndarray, offset: tuple[int,int]) ->
|
|
56 |
return Image.fromarray(res)
|
57 |
|
58 |
def overlay_source(background: Image.Image, source: Image.Image):
|
59 |
-
# 1)
|
60 |
bbox_bg = get_face_bbox(background)
|
61 |
bbox_src = get_face_bbox(source)
|
62 |
if bbox_bg is None:
|
@@ -64,30 +62,28 @@ def overlay_source(background: Image.Image, source: Image.Image):
|
|
64 |
if bbox_src is None:
|
65 |
return None, "❌ No face in source."
|
66 |
|
67 |
-
# 2)
|
68 |
w_bg, h_bg = bbox_bg[2]-bbox_bg[0], bbox_bg[3]-bbox_bg[1]
|
69 |
w_src, h_src = bbox_src[2]-bbox_src[0], bbox_src[3]-bbox_src[1]
|
70 |
scale = compute_scale(w_bg, h_bg, w_src, h_src)
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
src_scaled = source.resize((int(source.width*scale), int(source.height*scale)))
|
76 |
|
77 |
-
# 3)
|
78 |
offset = compute_offset(bbox_bg, bbox_src, scale)
|
79 |
|
80 |
-
# 4)
|
81 |
-
bg_bald =
|
82 |
|
83 |
-
# 5)
|
84 |
-
hair_only =
|
85 |
|
86 |
-
# 6)
|
87 |
result = paste_with_alpha(
|
88 |
np.array(bg_bald.convert("RGBA")),
|
89 |
np.array(hair_only),
|
90 |
offset
|
91 |
)
|
92 |
-
|
93 |
return result, f"✅ Scale={scale:.2f}, Offset={offset}"
|
|
|
|
|
|
|
1 |
import numpy as np
|
2 |
from PIL import Image
|
3 |
import mediapipe as mp
|
|
|
|
|
4 |
|
5 |
+
from baldhead import inference # cạo tóc background
|
6 |
+
from segmentation import extract_hair
|
7 |
+
|
8 |
+
# MediaPipe Face Detection
|
9 |
+
mp_fd = mp.solutions.face_detection.FaceDetection(model_selection=1,
|
10 |
+
min_detection_confidence=0.5)
|
11 |
|
12 |
def get_face_bbox(img: Image.Image) -> tuple[int,int,int,int] | None:
|
13 |
arr = np.array(img.convert("RGB"))
|
|
|
27 |
|
28 |
def compute_offset(bbox_bg, bbox_src, scale) -> tuple[int,int]:
|
29 |
x1, y1, x2, y2 = bbox_bg
|
30 |
+
bg_cx = x1 + (x2 - x1)//2
|
31 |
+
bg_cy = y1 + (y2 - y1)//2
|
32 |
sx1, sy1, sx2, sy2 = bbox_src
|
33 |
src_cx = int((sx1 + (sx2 - sx1)//2) * scale)
|
34 |
src_cy = int((sy1 + (sy2 - sy1)//2) * scale)
|
|
|
45 |
return Image.fromarray(res)
|
46 |
cs = src[y1-y:y2-y, x1-x:x2-x]
|
47 |
cd = res[y1:y2, x1:x2]
|
48 |
+
mask = cs[...,3] > 0
|
49 |
+
if cd.shape[2] == 3:
|
50 |
cd[mask] = cs[mask][...,:3]
|
51 |
else:
|
52 |
cd[mask] = cs[mask]
|
|
|
54 |
return Image.fromarray(res)
|
55 |
|
56 |
def overlay_source(background: Image.Image, source: Image.Image):
|
57 |
+
# 1) detect bboxes
|
58 |
bbox_bg = get_face_bbox(background)
|
59 |
bbox_src = get_face_bbox(source)
|
60 |
if bbox_bg is None:
|
|
|
62 |
if bbox_src is None:
|
63 |
return None, "❌ No face in source."
|
64 |
|
65 |
+
# 2) compute scale & resize source
|
66 |
w_bg, h_bg = bbox_bg[2]-bbox_bg[0], bbox_bg[3]-bbox_bg[1]
|
67 |
w_src, h_src = bbox_src[2]-bbox_src[0], bbox_src[3]-bbox_src[1]
|
68 |
scale = compute_scale(w_bg, h_bg, w_src, h_src)
|
69 |
+
src_scaled = source.resize(
|
70 |
+
(int(source.width*scale), int(source.height*scale)),
|
71 |
+
Image.Resampling.LANCZOS
|
72 |
+
)
|
|
|
73 |
|
74 |
+
# 3) compute offset
|
75 |
offset = compute_offset(bbox_bg, bbox_src, scale)
|
76 |
|
77 |
+
# 4) baldhead background
|
78 |
+
bg_bald = inference(background)
|
79 |
|
80 |
+
# 5) extract hair-only from source
|
81 |
+
hair_only = extract_hair(src_scaled)
|
82 |
|
83 |
+
# 6) paste onto bald background
|
84 |
result = paste_with_alpha(
|
85 |
np.array(bg_bald.convert("RGBA")),
|
86 |
np.array(hair_only),
|
87 |
offset
|
88 |
)
|
|
|
89 |
return result, f"✅ Scale={scale:.2f}, Offset={offset}"
|