import gradio as gr from diffusers import StableDiffusionPipeline import torch from PIL import Image # Load a lightweight diffusion model that works on CPU model_id = "OFA-Sys/small-stable-diffusion-v0" # A smaller model that works better on CPU # Create pipeline (will use CPU by default when CUDA isn't available) pipe = StableDiffusionPipeline.from_pretrained(model_id) pipe = pipe.to("cpu") # Explicitly move to CPU def generate_image(prompt): """Generate image from text prompt""" try: # Generate image result = pipe(prompt) # Get the image image = result.images[0] return image except Exception as e: # Return a black image with error message if generation fails error_img = Image.new('RGB', (300, 200), color='red') return error_img # Create Gradio interface with gr.Blocks(title="Text to Image Generator") as demo: gr.Markdown("# 🎨 Text to Image Generator (CPU)") gr.Markdown("Enter a text prompt to generate an image. This runs on CPU so it may be slow.") with gr.Row(): with gr.Column(): prompt_input = gr.Textbox( label="Enter your prompt", placeholder="A beautiful sunset over mountains...", lines=3 ) generate_btn = gr.Button("Generate Image", variant="primary") with gr.Column(): output_image = gr.Image(label="Generated Image", type="pil") # Examples gr.Examples( examples=[ ["A cute cat wearing a hat"], ["A futuristic city at night"], ["A beautiful landscape with mountains and lake"] ], inputs=prompt_input ) # Connect button to function generate_btn.click( fn=generate_image, inputs=prompt_input, outputs=output_image ) # For running directly as a script if __name__ == "__main__": demo.launch(server_name="0.0.0.0", server_port=7860)