Spaces:
Running
Running
Test commit
Browse files- Dockerfile +4 -3
- app.py +34 -41
- requirements.txt +1 -0
- s3.py +53 -0
Dockerfile
CHANGED
@@ -28,13 +28,14 @@ WORKDIR /app
|
|
28 |
# Install runtime dependency: libopenblas.so.0 is provided by libopenblas-base.
|
29 |
RUN apt-get update && apt-get install -y build-essential cmake libopenblas-dev liblapack-dev libopenblas-dev liblapack-dev
|
30 |
|
31 |
-
RUN mkdir -p /tmp/model /tmp/huggingface /tmp/.cache /tmp/.gradio \
|
32 |
-
&& chmod -R 777 /tmp/model /tmp/huggingface /tmp/.cache /tmp/.gradio
|
33 |
-
|
34 |
ENV HF_HOME=/tmp/huggingface
|
35 |
ENV HUGGINGFACE_HUB_CACHE=/tmp/huggingface
|
36 |
ENV XDG_CACHE_HOME=/tmp/.cache
|
37 |
ENV GRADIO_CACHE_DIR=/tmp/.gradio
|
|
|
|
|
|
|
|
|
38 |
|
39 |
COPY --from=builder /app/venv venv
|
40 |
|
|
|
28 |
# Install runtime dependency: libopenblas.so.0 is provided by libopenblas-base.
|
29 |
RUN apt-get update && apt-get install -y build-essential cmake libopenblas-dev liblapack-dev libopenblas-dev liblapack-dev
|
30 |
|
|
|
|
|
|
|
31 |
ENV HF_HOME=/tmp/huggingface
|
32 |
ENV HUGGINGFACE_HUB_CACHE=/tmp/huggingface
|
33 |
ENV XDG_CACHE_HOME=/tmp/.cache
|
34 |
ENV GRADIO_CACHE_DIR=/tmp/.gradio
|
35 |
+
ENV GRADIO_EXAMPLES_CACHE=/tmp/.gradio/cached_examples
|
36 |
+
|
37 |
+
RUN mkdir -p /tmp/model /tmp/huggingface /tmp/.cache /tmp/.gradio /tmp/.gradio/cached_examples \
|
38 |
+
&& chmod -R 777 /tmp/model /tmp/huggingface /tmp/.cache /tmp/.gradio /tmp/.gradio/cached_examples
|
39 |
|
40 |
COPY --from=builder /app/venv venv
|
41 |
|
app.py
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
import os
|
2 |
-
import pathlib
|
3 |
import time
|
4 |
import torch
|
5 |
from models import UNet
|
@@ -9,36 +8,24 @@ import gradio as gr
|
|
9 |
from gradio_client import Client, handle_file
|
10 |
from huggingface_hub import hf_hub_download
|
11 |
import tempfile
|
12 |
-
import
|
13 |
|
14 |
-
|
15 |
-
os.environ["HUGGINGFACE_HUB_CACHE"] = "/tmp/huggingface"
|
16 |
-
os.environ["XDG_CACHE_HOME"] = "/tmp/.cache"
|
17 |
|
18 |
-
|
19 |
-
os.environ["GRADIO_EXAMPLES_CACHE"] = "/tmp/.gradio/cached_examples"
|
20 |
-
|
21 |
-
for d in (
|
22 |
-
"/tmp/huggingface",
|
23 |
-
"/tmp/.cache",
|
24 |
-
"/tmp/.gradio",
|
25 |
-
os.environ["GRADIO_EXAMPLES_CACHE"],
|
26 |
-
):
|
27 |
-
pathlib.Path(d).mkdir(parents=True, exist_ok=True)
|
28 |
|
|
|
29 |
MODEL_DIR = "/tmp/model"
|
30 |
-
|
31 |
MODEL_PATH = os.path.join(MODEL_DIR, "best_unet_model.pth")
|
32 |
|
33 |
-
|
34 |
-
MODEL_PATH = os.path.join(MODEL_DIR, "best_unet_model.pth")
|
35 |
def download_model():
|
36 |
print("Starting model download at", time.strftime("%Y-%m-%d %H:%M:%S"))
|
37 |
path = hf_hub_download(
|
38 |
repo_id="Robys01/face-aging",
|
39 |
filename="best_unet_model.pth",
|
40 |
local_dir=MODEL_DIR,
|
41 |
-
cache_dir=os.environ
|
42 |
)
|
43 |
print(f"Model downloaded to {path}")
|
44 |
|
@@ -51,25 +38,30 @@ model.load_state_dict(torch.load(MODEL_PATH, map_location=torch.device("cpu"), w
|
|
51 |
model.eval()
|
52 |
|
53 |
|
54 |
-
def age_image(
|
|
|
55 |
if image.mode not in ["RGB", "L"]:
|
56 |
print(f"Converting image from {image.mode} to RGB")
|
57 |
image = image.convert("RGB")
|
58 |
processed_image = process_image(model, image, source_age, target_age)
|
|
|
59 |
return processed_image
|
60 |
|
61 |
|
62 |
-
def age_video(
|
|
|
|
|
63 |
orig_tmp = tempfile.NamedTemporaryFile(suffix=".jpg", delete=False)
|
64 |
orig_path = orig_tmp.name
|
65 |
image.save(orig_path)
|
66 |
orig_tmp.close()
|
67 |
|
68 |
-
aged_img = age_image(
|
69 |
aged_tmp = tempfile.NamedTemporaryFile(suffix=".jpg", delete=False)
|
70 |
aged_path = aged_tmp.name
|
71 |
aged_img.save(aged_path)
|
72 |
aged_tmp.close()
|
|
|
73 |
|
74 |
client = Client("Robys01/Face-Morphing")
|
75 |
try:
|
@@ -100,10 +92,12 @@ def age_video(image: Image.Image, source_age: int, target_age: int, duration: in
|
|
100 |
return video_path
|
101 |
|
102 |
|
103 |
-
def age_timelapse(
|
|
|
|
|
104 |
target_ages = [10, 20, 30, 50, 70]
|
105 |
# Filter out ages too close to source
|
106 |
-
filtered = [age for age in target_ages if abs(age - source_age) >=
|
107 |
# Combine with source and sort
|
108 |
ages = sorted(set(filtered + [source_age]))
|
109 |
temp_handles = []
|
@@ -112,12 +106,13 @@ def age_timelapse(image: Image.Image, source_age: int) -> str:
|
|
112 |
if age == source_age:
|
113 |
img = image
|
114 |
else:
|
115 |
-
img = age_image(
|
116 |
tmp = tempfile.NamedTemporaryFile(suffix=".jpg", delete=False)
|
117 |
path = tmp.name
|
118 |
img.save(path)
|
119 |
tmp.close()
|
120 |
temp_handles.append(handle_file(path))
|
|
|
121 |
|
122 |
client = Client("Robys01/Face-Morphing")
|
123 |
try:
|
@@ -145,24 +140,19 @@ def age_timelapse(image: Image.Image, source_age: int) -> str:
|
|
145 |
return video_path
|
146 |
|
147 |
|
148 |
-
# Pre-load the example images as PIL objects
|
149 |
-
example1 = Image.open("examples/girl.jpg")
|
150 |
-
example2 = Image.open("examples/man.jpg")
|
151 |
-
example3 = Image.open("examples/trump.jpg")
|
152 |
-
|
153 |
demo_age_image = gr.Interface(
|
154 |
fn=age_image,
|
155 |
inputs=[
|
156 |
-
gr.Image(type="
|
157 |
gr.Slider(10, 90, value=20, step=1, label="Current age", info="Choose the current age"),
|
158 |
gr.Slider(10, 90, value=70, step=1, label="Target age", info="Choose the desired age")
|
159 |
],
|
160 |
outputs=gr.Image(type="pil", label="Aged Image"),
|
161 |
examples=[
|
162 |
-
[
|
163 |
-
[
|
164 |
-
[
|
165 |
-
[
|
166 |
],
|
167 |
cache_examples=True,
|
168 |
description="Upload an image along with a source age approximation and a target age to generate an aged version of the face."
|
@@ -171,7 +161,7 @@ demo_age_image = gr.Interface(
|
|
171 |
demo_age_video = gr.Interface(
|
172 |
fn=age_video,
|
173 |
inputs=[
|
174 |
-
gr.Image(type="
|
175 |
gr.Slider(10, 90, value=20, step=1, label="Current age", info="Choose the current age"),
|
176 |
gr.Slider(10, 90, value=70, step=1, label="Target age", info="Choose the desired age"),
|
177 |
gr.Slider(label="Duration (seconds)", minimum=1, maximum=10, step=1, value=3),
|
@@ -180,9 +170,9 @@ demo_age_video = gr.Interface(
|
|
180 |
outputs=gr.Video(label="Aged Video", format="mp4"),
|
181 |
|
182 |
examples=[
|
183 |
-
[
|
184 |
-
[
|
185 |
-
[
|
186 |
],
|
187 |
cache_examples=True,
|
188 |
description="Generate a video of the aging process."
|
@@ -190,9 +180,12 @@ demo_age_video = gr.Interface(
|
|
190 |
|
191 |
demo_age_timelapse = gr.Interface(
|
192 |
fn=age_timelapse,
|
193 |
-
inputs=[gr.Image(type="
|
194 |
outputs=[gr.Video(label="Aging Timelapse", format="mp4")],
|
195 |
-
examples=[
|
|
|
|
|
|
|
196 |
cache_examples=True,
|
197 |
description="Generate a timelapse video showing the aging process at different ages."
|
198 |
)
|
|
|
1 |
import os
|
|
|
2 |
import time
|
3 |
import torch
|
4 |
from models import UNet
|
|
|
8 |
from gradio_client import Client, handle_file
|
9 |
from huggingface_hub import hf_hub_download
|
10 |
import tempfile
|
11 |
+
from dotenv import load_dotenv
|
12 |
|
13 |
+
load_dotenv()
|
|
|
|
|
14 |
|
15 |
+
from s3 import imagine
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
+
# Model download & caching directory (created in Dockerfile)
|
18 |
MODEL_DIR = "/tmp/model"
|
19 |
+
os.makedirs(MODEL_DIR, exist_ok=True)
|
20 |
MODEL_PATH = os.path.join(MODEL_DIR, "best_unet_model.pth")
|
21 |
|
|
|
|
|
22 |
def download_model():
|
23 |
print("Starting model download at", time.strftime("%Y-%m-%d %H:%M:%S"))
|
24 |
path = hf_hub_download(
|
25 |
repo_id="Robys01/face-aging",
|
26 |
filename="best_unet_model.pth",
|
27 |
local_dir=MODEL_DIR,
|
28 |
+
cache_dir=os.environ.get("HUGGINGFACE_HUB_CACHE"),
|
29 |
)
|
30 |
print(f"Model downloaded to {path}")
|
31 |
|
|
|
38 |
model.eval()
|
39 |
|
40 |
|
41 |
+
def age_image(image_path: str, source_age: int, target_age: int) -> Image.Image:
|
42 |
+
image = Image.open(image_path)
|
43 |
if image.mode not in ["RGB", "L"]:
|
44 |
print(f"Converting image from {image.mode} to RGB")
|
45 |
image = image.convert("RGB")
|
46 |
processed_image = process_image(model, image, source_age, target_age)
|
47 |
+
imagine(image_path, source_age)
|
48 |
return processed_image
|
49 |
|
50 |
|
51 |
+
def age_video(image_path: str, source_age: int, target_age: int, duration: int, fps: int) -> str:
|
52 |
+
image = Image.open(image_path)
|
53 |
+
|
54 |
orig_tmp = tempfile.NamedTemporaryFile(suffix=".jpg", delete=False)
|
55 |
orig_path = orig_tmp.name
|
56 |
image.save(orig_path)
|
57 |
orig_tmp.close()
|
58 |
|
59 |
+
aged_img = age_image(image_path, source_age, target_age)
|
60 |
aged_tmp = tempfile.NamedTemporaryFile(suffix=".jpg", delete=False)
|
61 |
aged_path = aged_tmp.name
|
62 |
aged_img.save(aged_path)
|
63 |
aged_tmp.close()
|
64 |
+
imagine(image_path, source_age)
|
65 |
|
66 |
client = Client("Robys01/Face-Morphing")
|
67 |
try:
|
|
|
92 |
return video_path
|
93 |
|
94 |
|
95 |
+
def age_timelapse(image_path: str, source_age: int) -> str:
|
96 |
+
image = Image.open(image_path)
|
97 |
+
|
98 |
target_ages = [10, 20, 30, 50, 70]
|
99 |
# Filter out ages too close to source
|
100 |
+
filtered = [age for age in target_ages if abs(age - source_age) >= 4]
|
101 |
# Combine with source and sort
|
102 |
ages = sorted(set(filtered + [source_age]))
|
103 |
temp_handles = []
|
|
|
106 |
if age == source_age:
|
107 |
img = image
|
108 |
else:
|
109 |
+
img = age_image(image_path, source_age, age)
|
110 |
tmp = tempfile.NamedTemporaryFile(suffix=".jpg", delete=False)
|
111 |
path = tmp.name
|
112 |
img.save(path)
|
113 |
tmp.close()
|
114 |
temp_handles.append(handle_file(path))
|
115 |
+
imagine(image_path, source_age)
|
116 |
|
117 |
client = Client("Robys01/Face-Morphing")
|
118 |
try:
|
|
|
140 |
return video_path
|
141 |
|
142 |
|
|
|
|
|
|
|
|
|
|
|
143 |
demo_age_image = gr.Interface(
|
144 |
fn=age_image,
|
145 |
inputs=[
|
146 |
+
gr.Image(type="filepath", label="Input Image"),
|
147 |
gr.Slider(10, 90, value=20, step=1, label="Current age", info="Choose the current age"),
|
148 |
gr.Slider(10, 90, value=70, step=1, label="Target age", info="Choose the desired age")
|
149 |
],
|
150 |
outputs=gr.Image(type="pil", label="Aged Image"),
|
151 |
examples=[
|
152 |
+
["examples/girl.jpg", 14, 50],
|
153 |
+
["examples/man.jpg", 45, 70],
|
154 |
+
["examples/man.jpg", 45, 20],
|
155 |
+
["examples/trump.jpg", 74, 30],
|
156 |
],
|
157 |
cache_examples=True,
|
158 |
description="Upload an image along with a source age approximation and a target age to generate an aged version of the face."
|
|
|
161 |
demo_age_video = gr.Interface(
|
162 |
fn=age_video,
|
163 |
inputs=[
|
164 |
+
gr.Image(type="filepath", label="Input Image"),
|
165 |
gr.Slider(10, 90, value=20, step=1, label="Current age", info="Choose the current age"),
|
166 |
gr.Slider(10, 90, value=70, step=1, label="Target age", info="Choose the desired age"),
|
167 |
gr.Slider(label="Duration (seconds)", minimum=1, maximum=10, step=1, value=3),
|
|
|
170 |
outputs=gr.Video(label="Aged Video", format="mp4"),
|
171 |
|
172 |
examples=[
|
173 |
+
["examples/girl.jpg", 14, 50, 3, 30],
|
174 |
+
["examples/man.jpg", 45, 70, 3, 30],
|
175 |
+
["examples/man.jpg", 45, 20, 3, 30],
|
176 |
],
|
177 |
cache_examples=True,
|
178 |
description="Generate a video of the aging process."
|
|
|
180 |
|
181 |
demo_age_timelapse = gr.Interface(
|
182 |
fn=age_timelapse,
|
183 |
+
inputs=[gr.Image(type="filepath", label="Input Image"), gr.Slider(10, 90, value=20, step=1, label="Current age")],
|
184 |
outputs=[gr.Video(label="Aging Timelapse", format="mp4")],
|
185 |
+
examples=[
|
186 |
+
["examples/girl.jpg", 14],
|
187 |
+
["examples/man.jpg", 45],
|
188 |
+
],
|
189 |
cache_examples=True,
|
190 |
description="Generate a timelapse video showing the aging process at different ages."
|
191 |
)
|
requirements.txt
CHANGED
@@ -64,3 +64,4 @@ uvicorn==0.34.0
|
|
64 |
uvloop==0.21.0
|
65 |
watchfiles==1.0.4
|
66 |
websockets==14.2
|
|
|
|
64 |
uvloop==0.21.0
|
65 |
watchfiles==1.0.4
|
66 |
websockets==14.2
|
67 |
+
boto3==1.39.2
|
s3.py
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# s3_utils.py
|
2 |
+
|
3 |
+
import os
|
4 |
+
import datetime
|
5 |
+
import pathlib
|
6 |
+
import boto3
|
7 |
+
from pathlib import Path
|
8 |
+
|
9 |
+
# βββ GLOBAL STATE βββββββββββββββββββββββββββββββββββββββ
|
10 |
+
uploaded_files = set()
|
11 |
+
last_reset = datetime.datetime.now()
|
12 |
+
|
13 |
+
# boto3 picks up AWS_* from env
|
14 |
+
s3 = boto3.client("s3", region_name=os.environ["AWS_DEFAULT_REGION"])
|
15 |
+
BUCKET_NAME = os.environ["AWS_BUCKET_NAME"]
|
16 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
17 |
+
|
18 |
+
def imagine(local_path: str, age: int) -> str:
|
19 |
+
global last_reset, uploaded_files
|
20 |
+
|
21 |
+
try:
|
22 |
+
now = datetime.datetime.now()
|
23 |
+
# 1) reset once per hour
|
24 |
+
if now - last_reset >= datetime.timedelta(hours=1):
|
25 |
+
print("Resetting uploaded files set")
|
26 |
+
uploaded_files.clear()
|
27 |
+
last_reset = now
|
28 |
+
|
29 |
+
# 2) skip the three built-in examples by name
|
30 |
+
if Path(local_path).name in ("girl.jpg", "man.jpg", "trump.jpg"):
|
31 |
+
print(f"Skipping upload for built-in example: {local_path}")
|
32 |
+
return None
|
33 |
+
|
34 |
+
basename = pathlib.Path(local_path).name
|
35 |
+
|
36 |
+
if basename in uploaded_files:
|
37 |
+
print(f"Skipping upload for already seen file: {basename}")
|
38 |
+
return None
|
39 |
+
|
40 |
+
today = now.strftime("%Y-%m-%d")
|
41 |
+
ts = now.strftime("%H%M%S")
|
42 |
+
stem = pathlib.Path(local_path).stem
|
43 |
+
ext = pathlib.Path(local_path).suffix
|
44 |
+
key = f"{today}/{age}_{ts}_{stem}{ext}"
|
45 |
+
|
46 |
+
s3.upload_file(Filename=local_path, Bucket=BUCKET_NAME, Key=key)
|
47 |
+
uploaded_files.add(basename)
|
48 |
+
print(f"Uploaded {local_path} to s3://{BUCKET_NAME}/{key}")
|
49 |
+
|
50 |
+
return key
|
51 |
+
except Exception as e:
|
52 |
+
print(f"Error uploading: {e}")
|
53 |
+
return None
|