Spaces:
Runtime error
Runtime error
File size: 1,603 Bytes
51160c7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
from transformers import AutoModelForCausalLM, AutoTokenizer
import gradio as gr
import torch
# Load the model and tokenizer
model_name = "Skywork/SkyReels-V2-DF-1.3B-540P"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
# Set device
device = "cuda" if torch.cuda.is_available() else "cpu"
model = model.to(device)
def generate_video(prompt, max_length=512, temperature=0.7, top_k=50, top_p=0.95):
"""
Generate video based on text prompt
"""
# Tokenize input
inputs = tokenizer(prompt, return_tensors="pt").to(device)
# Generate output
with torch.no_grad():
outputs = model.generate(
**inputs,
max_length=max_length,
temperature=temperature,
top_k=top_k,
top_p=top_p,
do_sample=True
)
# Decode and return the output
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
# In a real implementation, you would process this into a video
# For demo purposes, we'll just return the generated text
return generated_text
# Create Gradio interface
iface = gr.Interface(
fn=generate_video,
inputs=gr.Textbox(lines=2, placeholder="Enter your video prompt here..."),
outputs="text",
title="SkyReels Video Generation",
description="Generate video content using Skywork/SkyReels-V2-DF-1.3B-540P model",
examples=[
["A sunny day at the beach with waves crashing"],
["A futuristic cityscape at night with flying cars"]
]
)
iface.launch() |