Upload folder using huggingface_hub
Browse files
app.py
CHANGED
|
@@ -1,16 +1,14 @@
|
|
| 1 |
import os
|
| 2 |
import time
|
| 3 |
from datetime import datetime, timezone, timedelta
|
| 4 |
-
from tqdm import tqdm
|
| 5 |
|
| 6 |
import spaces
|
| 7 |
import torch
|
| 8 |
-
import torch.optim as optim
|
| 9 |
-
import torch.nn.functional as F
|
| 10 |
import gradio as gr
|
| 11 |
|
| 12 |
from utils import preprocess_img, preprocess_img_from_path, postprocess_img
|
| 13 |
from vgg19 import VGG_19
|
|
|
|
| 14 |
|
| 15 |
if torch.cuda.is_available(): device = 'cuda'
|
| 16 |
elif torch.backends.mps.is_available(): device = 'mps'
|
|
@@ -42,63 +40,33 @@ for style_name, style_img_path in style_options.items():
|
|
| 42 |
style_features = (model(style_img_512), model(style_img_1024))
|
| 43 |
cached_style_features[style_name] = style_features
|
| 44 |
|
| 45 |
-
def gram_matrix(feature):
|
| 46 |
-
batch_size, n_feature_maps, height, width = feature.size()
|
| 47 |
-
new_feature = feature.view(batch_size * n_feature_maps, height * width)
|
| 48 |
-
return torch.mm(new_feature, new_feature.t())
|
| 49 |
-
|
| 50 |
-
def compute_loss(generated_features, content_features, style_features, alpha, beta):
|
| 51 |
-
content_loss = 0
|
| 52 |
-
style_loss = 0
|
| 53 |
-
w_l = 1 / len(generated_features)
|
| 54 |
-
for gf, cf, sf in zip(generated_features, content_features, style_features):
|
| 55 |
-
content_loss += F.mse_loss(gf, cf)
|
| 56 |
-
G = gram_matrix(gf)
|
| 57 |
-
A = gram_matrix(sf)
|
| 58 |
-
style_loss += w_l * F.mse_loss(G, A)
|
| 59 |
-
return alpha * content_loss + beta * style_loss
|
| 60 |
-
|
| 61 |
@spaces.GPU(duration=6)
|
| 62 |
-
def
|
| 63 |
-
yield None
|
| 64 |
-
print('-'*15)
|
| 65 |
-
print('DATETIME:', datetime.now(timezone.utc) - timedelta(hours=4))
|
| 66 |
-
print('STYLE:', style_name)
|
| 67 |
-
|
| 68 |
img_size = 1024 if output_quality else 512
|
| 69 |
content_img, original_size = preprocess_img(content_image, img_size)
|
| 70 |
content_img = content_img.to(device)
|
| 71 |
|
|
|
|
|
|
|
|
|
|
| 72 |
print('CONTENT IMG SIZE:', original_size)
|
| 73 |
print('STYLE STRENGTH:', style_strength)
|
| 74 |
print('HIGH QUALITY:', output_quality)
|
| 75 |
|
| 76 |
-
iters = 35
|
| 77 |
-
lr = 0.001 + (0.099 / 99) * (style_strength - 1) # [0.001, 0.1]
|
| 78 |
-
alpha = 1
|
| 79 |
-
beta = 1
|
| 80 |
-
|
| 81 |
-
st = time.time()
|
| 82 |
-
generated_img = content_img.clone().requires_grad_(True)
|
| 83 |
-
optimizer = optim.AdamW([generated_img], lr=lr)
|
| 84 |
-
|
| 85 |
-
with torch.no_grad():
|
| 86 |
-
content_features = model(content_img)
|
| 87 |
style_features = cached_style_features[style_name][0 if img_size == 512 else 1]
|
|
|
|
| 88 |
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
optimizer.step()
|
| 97 |
-
|
| 98 |
et = time.time()
|
| 99 |
print('TIME TAKEN:', et-st)
|
| 100 |
|
| 101 |
-
|
| 102 |
|
| 103 |
|
| 104 |
def set_slider(value):
|
|
@@ -139,7 +107,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 139 |
return filename
|
| 140 |
|
| 141 |
submit_button.click(
|
| 142 |
-
fn=
|
| 143 |
inputs=[content_and_output, style_dropdown, style_strength_slider, output_quality],
|
| 144 |
outputs=[content_and_output]
|
| 145 |
).then(
|
|
|
|
| 1 |
import os
|
| 2 |
import time
|
| 3 |
from datetime import datetime, timezone, timedelta
|
|
|
|
| 4 |
|
| 5 |
import spaces
|
| 6 |
import torch
|
|
|
|
|
|
|
| 7 |
import gradio as gr
|
| 8 |
|
| 9 |
from utils import preprocess_img, preprocess_img_from_path, postprocess_img
|
| 10 |
from vgg19 import VGG_19
|
| 11 |
+
from inference import inference
|
| 12 |
|
| 13 |
if torch.cuda.is_available(): device = 'cuda'
|
| 14 |
elif torch.backends.mps.is_available(): device = 'mps'
|
|
|
|
| 40 |
style_features = (model(style_img_512), model(style_img_1024))
|
| 41 |
cached_style_features[style_name] = style_features
|
| 42 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
@spaces.GPU(duration=6)
|
| 44 |
+
def run(content_image, style_name, style_strength, output_quality, progress=gr.Progress(track_tqdm=True)):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
img_size = 1024 if output_quality else 512
|
| 46 |
content_img, original_size = preprocess_img(content_image, img_size)
|
| 47 |
content_img = content_img.to(device)
|
| 48 |
|
| 49 |
+
print('-'*15)
|
| 50 |
+
print('DATETIME:', datetime.now(timezone.utc) - timedelta(hours=4)) # est
|
| 51 |
+
print('STYLE:', style_name)
|
| 52 |
print('CONTENT IMG SIZE:', original_size)
|
| 53 |
print('STYLE STRENGTH:', style_strength)
|
| 54 |
print('HIGH QUALITY:', output_quality)
|
| 55 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
style_features = cached_style_features[style_name][0 if img_size == 512 else 1]
|
| 57 |
+
converted_lr = 0.001 + (0.099 / 99) * (style_strength - 1)
|
| 58 |
|
| 59 |
+
st = time.time()
|
| 60 |
+
generated_img = inference(
|
| 61 |
+
model=model,
|
| 62 |
+
content_image=content_img,
|
| 63 |
+
style_features=style_features,
|
| 64 |
+
lr=converted_lr
|
| 65 |
+
)
|
|
|
|
|
|
|
| 66 |
et = time.time()
|
| 67 |
print('TIME TAKEN:', et-st)
|
| 68 |
|
| 69 |
+
return postprocess_img(generated_img, original_size)
|
| 70 |
|
| 71 |
|
| 72 |
def set_slider(value):
|
|
|
|
| 107 |
return filename
|
| 108 |
|
| 109 |
submit_button.click(
|
| 110 |
+
fn=run,
|
| 111 |
inputs=[content_and_output, style_dropdown, style_strength_slider, output_quality],
|
| 112 |
outputs=[content_and_output]
|
| 113 |
).then(
|
inference.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from tqdm import tqdm
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import torch.optim as optim
|
| 5 |
+
import torch.nn.functional as F
|
| 6 |
+
|
| 7 |
+
def _gram_matrix(feature):
|
| 8 |
+
batch_size, n_feature_maps, height, width = feature.size()
|
| 9 |
+
new_feature = feature.view(batch_size * n_feature_maps, height * width)
|
| 10 |
+
return torch.mm(new_feature, new_feature.t())
|
| 11 |
+
|
| 12 |
+
def _compute_loss(generated_features, content_features, style_features, alpha, beta):
|
| 13 |
+
content_loss = 0
|
| 14 |
+
style_loss = 0
|
| 15 |
+
w_l = 1 / len(generated_features)
|
| 16 |
+
for gf, cf, sf in zip(generated_features, content_features, style_features):
|
| 17 |
+
content_loss += F.mse_loss(gf, cf)
|
| 18 |
+
G = _gram_matrix(gf)
|
| 19 |
+
A = _gram_matrix(sf)
|
| 20 |
+
style_loss += w_l * F.mse_loss(G, A)
|
| 21 |
+
return alpha * content_loss + beta * style_loss
|
| 22 |
+
|
| 23 |
+
def inference(
|
| 24 |
+
*,
|
| 25 |
+
model,
|
| 26 |
+
content_image,
|
| 27 |
+
style_features,
|
| 28 |
+
lr,
|
| 29 |
+
iterations=35,
|
| 30 |
+
alpha=1,
|
| 31 |
+
beta=1
|
| 32 |
+
):
|
| 33 |
+
geenrated_image = content_image.clone().requires_grad_(True)
|
| 34 |
+
optimizer = optim.AdamW([geenrated_image], lr=lr)
|
| 35 |
+
|
| 36 |
+
with torch.no_grad():
|
| 37 |
+
content_features = model(content_image)
|
| 38 |
+
|
| 39 |
+
for _ in tqdm(range(iterations), desc='The magic is happening ✨'):
|
| 40 |
+
optimizer.zero_grad()
|
| 41 |
+
|
| 42 |
+
generated_features = model(geenrated_image)
|
| 43 |
+
total_loss = _compute_loss(generated_features, content_features, style_features, alpha, beta)
|
| 44 |
+
|
| 45 |
+
total_loss.backward()
|
| 46 |
+
optimizer.step()
|
| 47 |
+
|
| 48 |
+
return geenrated_image
|
vgg16.py
CHANGED
|
@@ -52,7 +52,7 @@ VGG(
|
|
| 52 |
class VGG_16(nn.Module):
|
| 53 |
def __init__(self):
|
| 54 |
super(VGG_16, self).__init__()
|
| 55 |
-
self.model = models.vgg16(weights=models.VGG16_Weights).features[:30]
|
| 56 |
|
| 57 |
for i, _ in enumerate(self.model):
|
| 58 |
if i in [4, 9, 16, 23]:
|
|
|
|
| 52 |
class VGG_16(nn.Module):
|
| 53 |
def __init__(self):
|
| 54 |
super(VGG_16, self).__init__()
|
| 55 |
+
self.model = models.vgg16(weights=models.VGG16_Weights.IMAGENET1K_V1).features[:30]
|
| 56 |
|
| 57 |
for i, _ in enumerate(self.model):
|
| 58 |
if i in [4, 9, 16, 23]:
|
vgg19.py
CHANGED
|
@@ -58,7 +58,7 @@ VGG(
|
|
| 58 |
class VGG_19(nn.Module):
|
| 59 |
def __init__(self):
|
| 60 |
super(VGG_19, self).__init__()
|
| 61 |
-
self.model = models.vgg19(weights=models.VGG19_Weights).features[:30]
|
| 62 |
|
| 63 |
for i, _ in enumerate(self.model):
|
| 64 |
if i in [4, 9, 18, 27]:
|
|
|
|
| 58 |
class VGG_19(nn.Module):
|
| 59 |
def __init__(self):
|
| 60 |
super(VGG_19, self).__init__()
|
| 61 |
+
self.model = models.vgg19(weights=models.VGG19_Weights.IMAGENET1K_V1).features[:30]
|
| 62 |
|
| 63 |
for i, _ in enumerate(self.model):
|
| 64 |
if i in [4, 9, 18, 27]:
|