import gradio as gr from diffusers import DiffusionPipeline import torch from PIL import Image from moviepy.editor import ImageSequenceClip # Загружаем модель pipe = DiffusionPipeline.from_pretrained( "Suparious/FP-image-to-video-FLUX.1-HV-bf16", torch_dtype=torch.bfloat16, variant="bf16" ).to("cuda") pipe.enable_model_cpu_offload() def animate_image(image: Image.Image): result = pipe(image, decode_chunk_size=8) frames = result.frames clip = ImageSequenceClip(frames, fps=8) output_path = "output.mp4" clip.write_videofile(output_path, codec="libx264", audio=False) return output_path demo = gr.Interface( fn=animate_image, inputs=gr.Image(type="pil"), outputs=gr.Video(), title="🌀 FP Image to Video Animation", description="Загрузи изображение, и модель FLUX.1 превратит его в короткое анимированное видео." ) demo.launch()