File size: 2,368 Bytes
1eede43
 
 
 
 
 
 
 
cb26d65
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1eede43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
import gradio as gr
from diffusers import AutoPipelineForText2Image, AutoPipelineForImage2Image
from diffusers.utils import load_image
import torch

# Clear CUDA cache
torch.cuda.empty_cache()

# Define custom CSS to style the Gradio interface
css_style = """
.container {
    max-width: 3xl;
    padding: 2rem 1rem;
    margin: auto;
}
h1 {
    text-align: center;
    margin-bottom: 1.5rem;
    font-size: 1.875rem;
    font-weight: bold;
}
.form-textarea {
    width: 100%;
    margin-top: 0.25rem;
    height: 6rem;
}
.form-range {
    width: 100%;
}
.form-input {
    margin-top: 0.25rem;
    width: 9rem;
}
.form-checkbox {
    height: 1.25rem;
    width: 1.25rem;
    color: blue;
}
.button {
    background-color: blue;
    color: white;
    padding: 0.5rem 1rem;
    border-radius: 0.375rem;
    font-weight: bold;
}
.error {
    color: red;
    text-align: center;
}
"""
# Set environment variable for memory fragmentation
import os
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:128'

device = "cuda" if torch.cuda.is_available() else "cpu"

pipes = {
    "txt2img": AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16").to(device),
    "img2img": AutoPipelineForImage2Image.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16").to(device)
}

if device == "cpu":
    pipes["txt2img"].enable_model_cpu_offload()
    pipes["img2img"].enable_model_cpu_offload()

def run(prompt, image):
    try:
        print(f"prompt={prompt}, image={image}")
        if image is None:
            return pipes["txt2img"](prompt=prompt, num_inference_steps=1, guidance_scale=0.0).images[0]
        else:
            image = image.resize((512,512))
            print(f"img2img image={image}")
            return pipes["img2img"](prompt, image=image, num_inference_steps=2, strength=0.5, guidance_scale=0.0).images[0]
    except RuntimeError as e:
        if "CUDA out of memory" in str(e):
            print("CUDA out of memory. Trying to clear cache.")
            torch.cuda.empty_cache()
            # Consider additional fallback strategies here
        else:
            raise e

demo = gr.Interface(
    run,
    inputs=[
        gr.Textbox(label="Prompt"),
        gr.Image(type="pil")
    ],
    outputs=gr.Image(width=512, height=512),
    live=True
)

demo.launch()