|
import gradio as gr |
|
import torch |
|
from diffusers import StableDiffusionPipeline |
|
import imageio |
|
import numpy as np |
|
from PIL import Image |
|
import tempfile |
|
import random |
|
import os |
|
|
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
print(f"Using device: {device}") |
|
|
|
pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") |
|
pipe.to(device) |
|
print("Model loaded!") |
|
|
|
def generate_image(prompt, num_images=1): |
|
generator = torch.Generator(device=device).manual_seed(random.randint(0, 2**32-1)) |
|
images = pipe(prompt=prompt, num_inference_steps=25, generator=generator).images |
|
return images[0] |
|
|
|
def generate_video(prompt, duration=2, fps=5): |
|
frames = int(duration * fps) |
|
temp_dir = tempfile.mkdtemp() |
|
video_path = os.path.join(temp_dir, f"output_{random.randint(10000,99999)}.mp4") |
|
|
|
writer = imageio.get_writer(video_path, fps=fps) |
|
for i in range(frames): |
|
img = generate_image(prompt) |
|
img_array = np.array(img) |
|
writer.append_data(img_array) |
|
writer.close() |
|
return video_path |
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("# Simple SD Video Generator") |
|
prompt_input = gr.Textbox(label="Prompt", value="A majestic dragon flying over a castle") |
|
duration_input = gr.Slider(label="Duration (seconds)", minimum=1, maximum=10, value=2, step=1) |
|
output_video = gr.Video(label="Generated Video") |
|
|
|
gen_button = gr.Button("Generate Video") |
|
gen_button.click(fn=generate_video, inputs=[prompt_input, duration_input], outputs=[output_video]) |
|
|
|
if __name__ == "__main__": |
|
demo.launch(server_name="0.0.0.0", server_port=7860, debug=True) |
|
|