Spaces:
Running
on
Zero
Running
on
Zero
File size: 2,433 Bytes
b81c46e 9c30e9f b81c46e 728ae1d b81c46e 9c30e9f b81c46e 9bd366d b81c46e e615212 e9e9db2 b81c46e e615212 b81c46e 679744e b81c46e 308e3d3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 |
import gradio as gr
import spaces
import torch
from PIL import Image
from RealESRGAN import RealESRGAN
import time
from datetime import timedelta as td
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model2 = RealESRGAN(device, scale=2)
model2.load_weights('weights/RealESRGAN_x2.pth', download=True)
model4 = RealESRGAN(device, scale=4)
model4.load_weights('weights/RealESRGAN_x4.pth', download=True)
model8 = RealESRGAN(device, scale=8)
model8.load_weights('weights/RealESRGAN_x8.pth', download=True)
@spaces.GPU(duration=13)
def inference(image, size):
start_load = time.time()
global model2
global model4
global model8
if image is None:
raise gr.Error("Image not uploaded")
if torch.cuda.is_available():
torch.cuda.empty_cache()
if size == '2x':
try:
result = model2.predict(image.convert('RGB'))
except torch.cuda.OutOfMemoryError as e:
print(e)
model2 = RealESRGAN(device, scale=2)
model2.load_weights('weights/RealESRGAN_x2.pth', download=False)
result = model2.predict(image.convert('RGB'))
elif size == '4x':
try:
result = model4.predict(image.convert('RGB'))
except torch.cuda.OutOfMemoryError as e:
print(e)
model4 = RealESRGAN(device, scale=4)
model4.load_weights('weights/RealESRGAN_x4.pth', download=False)
result = model2.predict(image.convert('RGB'))
else:
try:
result = model8.predict(image.convert('RGB'))
except torch.cuda.OutOfMemoryError as e:
print(e)
model8 = RealESRGAN(device, scale=8)
model8.load_weights('weights/RealESRGAN_x8.pth', download=False)
result = model2.predict(image.convert('RGB'))
print(f"Image size ({device}): {size}, time: {td(seconds=int(time.time() - start_load))} ... OK")
return result
title = ""
description = ""
article = ""
gr.Interface(inference,
[gr.Image(type="pil"),
gr.Radio(["2x", "4x", "8x"],
type="value",
value="4x",
label="Resolution model")],
gr.Image(type="pil", label="Output"),
title=title,
description=description,
article=article,
examples=[],
flagging_mode="never",
cache_mode="lazy",
).queue(api_open=True).launch(show_error=True, show_api=True, mcp_server=False)
|