TyDemo / app.py
GtyTongji's picture
Update app.py
c5e4846 verified
import os
os.system("pip freeze")
import spaces
import gradio as gr
import torch as torch
from diffusers import MarigoldDepthPipeline, DDIMScheduler
from gradio_dualvision import DualVisionApp
from huggingface_hub import login
from PIL import Image
CHECKPOINT = "prs-eth/marigold-depth-v1-1"
if "Gty20030709" in os.environ:
login(token=os.environ["Gty20030709"])
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dtype = torch.bfloat16 if torch.cuda.is_available() else torch.float32
pipe = MarigoldDepthPipeline.from_pretrained(CHECKPOINT)
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing")
pipe = pipe.to(device=device, dtype=dtype)
try:
import xformers
pipe.enable_xformers_memory_efficient_attention()
except:
pass
class MarigoldDepthApp(DualVisionApp):
DEFAULT_SEED = 2024
DEFAULT_ENSEMBLE_SIZE = 1
DEFAULT_DENOISE_STEPS = 4
DEFAULT_PROCESSING_RES = 768
def make_header(self):
gr.Markdown(
"""
<h2><a href="https://huggingface.co/spaces/prs-eth/marigold" style="color: black;">Marigold Depth Estimation</a></h2>
"""
)
with gr.Row(elem_classes="remove-elements"):
gr.Markdown(
)
def build_user_components(self):
with gr.Column():
ensemble_size = gr.Slider(
label="Ensemble size",
minimum=1,
maximum=10,
step=1,
value=self.DEFAULT_ENSEMBLE_SIZE,
)
denoise_steps = gr.Slider(
label="Number of denoising steps",
minimum=1,
maximum=20,
step=1,
value=self.DEFAULT_DENOISE_STEPS,
)
processing_res = gr.Radio(
[
("Native", 0),
("Recommended", 768),
],
label="Processing resolution",
value=self.DEFAULT_PROCESSING_RES,
)
return {
"ensemble_size": ensemble_size,
"denoise_steps": denoise_steps,
"processing_res": processing_res,
}
def process(self, image_in: Image.Image, **kwargs):
ensemble_size = kwargs.get("ensemble_size", self.DEFAULT_ENSEMBLE_SIZE)
denoise_steps = kwargs.get("denoise_steps", self.DEFAULT_DENOISE_STEPS)
processing_res = kwargs.get("processing_res", self.DEFAULT_PROCESSING_RES)
generator = torch.Generator(device=device).manual_seed(self.DEFAULT_SEED)
pipe_out = pipe(
image_in,
ensemble_size=ensemble_size,
num_inference_steps=denoise_steps,
processing_resolution=processing_res,
batch_size=1 if processing_res == 0 else 2,
output_uncertainty=ensemble_size >= 3,
generator=generator,
)
depth_vis = pipe.image_processor.visualize_depth(pipe_out.prediction)[0]
depth_16bit = pipe.image_processor.export_depth_to_16bit_png(pipe_out.prediction)[0]
out_modalities = {
"Depth Visualization": depth_vis,
"Depth 16-bit": depth_16bit,
}
if ensemble_size >= 3:
uncertainty = pipe.image_processor.visualize_uncertainty(pipe_out.uncertainty)[0]
out_modalities["Uncertainty"] = uncertainty
out_settings = {
"ensemble_size": ensemble_size,
"denoise_steps": denoise_steps,
"processing_res": processing_res,
}
return out_modalities, out_settings
with MarigoldDepthApp(
title="Marigold Depth",
examples_path="files",
examples_per_page=12,
squeeze_canvas=True,
spaces_zero_gpu_enabled=True,
) as demo:
demo.queue(
api_open=False,
).launch(
server_name="0.0.0.0",
server_port=7860,
)