File size: 2,019 Bytes
3a991bd bbbe0c9 3a991bd bbbe0c9 3a991bd bbbe0c9 3a991bd bbbe0c9 3a991bd bbbe0c9 3a991bd bbbe0c9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 |
import gradio as gr
from diffusers import StableDiffusionPipeline
import torch
from PIL import Image
# Load a lightweight diffusion model that works on CPU
model_id = "OFA-Sys/small-stable-diffusion-v0" # A smaller model that works better on CPU
# Create pipeline (will use CPU by default when CUDA isn't available)
pipe = StableDiffusionPipeline.from_pretrained(model_id)
pipe = pipe.to("cpu") # Explicitly move to CPU
def generate_image(prompt):
"""Generate image from text prompt"""
try:
# Generate image
result = pipe(prompt)
# Get the image
image = result.images[0]
return image
except Exception as e:
# Return a black image with error message if generation fails
error_img = Image.new('RGB', (300, 200), color='red')
return error_img
# Create Gradio interface
with gr.Blocks(title="Text to Image Generator") as demo:
gr.Markdown("# 🎨 Text to Image Generator (CPU)")
gr.Markdown("Enter a text prompt to generate an image. This runs on CPU so it may be slow.")
with gr.Row():
with gr.Column():
prompt_input = gr.Textbox(
label="Enter your prompt",
placeholder="A beautiful sunset over mountains...",
lines=3
)
generate_btn = gr.Button("Generate Image", variant="primary")
with gr.Column():
output_image = gr.Image(label="Generated Image", type="pil")
# Examples
gr.Examples(
examples=[
["A cute cat wearing a hat"],
["A futuristic city at night"],
["A beautiful landscape with mountains and lake"]
],
inputs=prompt_input
)
# Connect button to function
generate_btn.click(
fn=generate_image,
inputs=prompt_input,
outputs=output_image
)
# For running directly as a script
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860)
|