|
import os |
|
import requests |
|
import json |
|
import time |
|
import threading |
|
import uuid |
|
import shutil |
|
from datetime import datetime |
|
from pathlib import Path |
|
from http.server import HTTPServer, SimpleHTTPRequestHandler |
|
import base64 |
|
from dotenv import load_dotenv |
|
import gradio as gr |
|
import random |
|
import torch |
|
from PIL import Image, ImageDraw, ImageFont |
|
from transformers import AutoTokenizer, AutoModelForSequenceClassification |
|
from functools import lru_cache |
|
|
|
load_dotenv() |
|
|
|
MODEL_URL = "TostAI/nsfw-text-detection-large" |
|
CLASS_NAMES = { |
|
0: "✅ SAFE", |
|
1: "⚠️ QUESTIONABLE", |
|
2: "🚫 UNSAFE" |
|
} |
|
|
|
tokenizer = AutoTokenizer.from_pretrained(MODEL_URL) |
|
model = AutoModelForSequenceClassification.from_pretrained(MODEL_URL) |
|
|
|
class SessionManager: |
|
_instances = {} |
|
_lock = threading.Lock() |
|
|
|
@classmethod |
|
def get_session(cls, session_id): |
|
with cls._lock: |
|
if session_id not in cls._instances: |
|
cls._instances[session_id] = { |
|
'count': 0, |
|
'history': [], |
|
'last_active': time.time() |
|
} |
|
return cls._instances[session_id] |
|
|
|
@classmethod |
|
def cleanup_sessions(cls): |
|
with cls._lock: |
|
now = time.time() |
|
expired = [k for k, v in cls._instances.items() if now - v['last_active'] > 3600] |
|
for k in expired: |
|
del cls._instances[k] |
|
|
|
class RateLimiter: |
|
def __init__(self): |
|
self.clients = {} |
|
self.lock = threading.Lock() |
|
|
|
def check(self, client_id): |
|
with self.lock: |
|
now = time.time() |
|
if client_id not in self.clients: |
|
self.clients[client_id] = {'count': 1, 'reset': now + 3600} |
|
return True |
|
|
|
if now > self.clients[client_id]['reset']: |
|
self.clients[client_id] = {'count': 1, 'reset': now + 3600} |
|
return True |
|
|
|
if self.clients[client_id]['count'] >= 8: |
|
return False |
|
|
|
self.clients[client_id]['count'] += 1 |
|
return True |
|
|
|
session_manager = SessionManager() |
|
rate_limiter = RateLimiter() |
|
|
|
def image_to_base64(file_path): |
|
try: |
|
with open(file_path, "rb") as f: |
|
ext = Path(file_path).suffix.lower()[1:] |
|
mime_map = {'jpg':'jpeg','jpeg':'jpeg','png':'png','webp':'webp','gif':'gif'} |
|
mime = mime_map.get(ext, 'jpeg') |
|
|
|
encoded = base64.b64encode(f.read()) |
|
if len(encoded) % 4: |
|
encoded += b'=' * (4 - len(encoded) % 4) |
|
|
|
return f"data:image/{mime};base64,{encoded.decode()}" |
|
except Exception as e: |
|
raise ValueError(f"Base64 Error: {str(e)}") |
|
|
|
def create_error_image(message): |
|
img = Image.new("RGB", (832, 480), "#ffdddd") |
|
try: |
|
font = ImageFont.truetype("arial.ttf", 24) |
|
except: |
|
font = ImageFont.load_default() |
|
|
|
draw = ImageDraw.Draw(img) |
|
text = f"Error: {message[:60]}..." if len(message) > 60 else message |
|
draw.text((50, 200), text, fill="#ff0000", font=font) |
|
img.save("error.jpg") |
|
return "error.jpg" |
|
|
|
|
|
@lru_cache(maxsize=100) |
|
def classify_prompt(prompt): |
|
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512) |
|
with torch.no_grad(): |
|
outputs = model(**inputs) |
|
return torch.argmax(outputs.logits).item() |
|
|
|
def generate_video( |
|
image, |
|
prompt, |
|
duration, |
|
enable_safety, |
|
flow_shift, |
|
guidance, |
|
negative_prompt, |
|
steps, |
|
seed, |
|
size, |
|
session_id |
|
): |
|
|
|
safety_level = classify_prompt(prompt) |
|
if safety_level != 0: |
|
error_img = create_error_image(CLASS_NAMES[safety_level]) |
|
yield f"❌ Blocked: {CLASS_NAMES[safety_level]}", error_img |
|
return |
|
|
|
if not rate_limiter.check(session_id): |
|
error_img = create_error_image("Hourly limit exceeded (20 requests)") |
|
yield "❌ 请求过于频繁,请稍后再试", error_img |
|
return |
|
|
|
session = session_manager.get_session(session_id) |
|
session['last_active'] = time.time() |
|
session['count'] += 1 |
|
|
|
try: |
|
api_key = os.getenv("WAVESPEED_API_KEY") |
|
if not api_key: |
|
raise ValueError("API key missing") |
|
|
|
base64_img = image_to_base64(image) |
|
headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"} |
|
|
|
guidance_scale = guidance |
|
inference_steps = steps |
|
|
|
payload = { |
|
"image": base64_img, |
|
"enable_safety_checker": True, |
|
"prompt": prompt, |
|
"duration": duration, |
|
"flow_shift": flow_shift, |
|
"guidance_scale": guidance_scale, |
|
"negative_prompt": negative_prompt, |
|
"num_inference_steps": inference_steps, |
|
"seed": seed if seed != -1 else random.randint(0, 999999), |
|
"size": "832*480" |
|
} |
|
|
|
|
|
response = requests.post( |
|
"https://api.wavespeed.ai/api/v2/wavespeed-ai/wan-2.1/i2v-480p-ultra-fast", |
|
headers=headers, |
|
json=payload |
|
) |
|
|
|
if response.status_code != 200: |
|
raise Exception(f"API Error {response.status_code}: {response.text}") |
|
|
|
request_id = response.json()["data"]["id"] |
|
yield f"✅ 任务已提交 (ID: {request_id})", None |
|
|
|
except Exception as e: |
|
error_img = create_error_image(str(e)) |
|
yield f"❌ 提交失败: {str(e)}", error_img |
|
return |
|
|
|
result_url = f"https://api.wavespeed.ai/api/v2/predictions/{request_id}/result" |
|
start_time = time.time() |
|
|
|
while True: |
|
time.sleep(1) |
|
try: |
|
resp = requests.get(result_url, headers=headers) |
|
if resp.status_code != 200: |
|
raise Exception(f"状态查询失败: {resp.text}") |
|
|
|
data = resp.json()["data"] |
|
status = data["status"] |
|
|
|
if status == "completed": |
|
elapsed = time.time() - start_time |
|
video_url = data["outputs"][0] |
|
session["history"].append(video_url) |
|
yield f"🎉 生成成功! 耗时 {elapsed:.1f}s", video_url |
|
return |
|
|
|
elif status == "failed": |
|
raise Exception(data.get("error", "Unknown error")) |
|
|
|
else: |
|
yield f"⏳ 当前状态: {status.capitalize()}...", None |
|
|
|
except Exception as e: |
|
error_img = create_error_image(str(e)) |
|
yield f"❌ 生成失败: {str(e)}", error_img |
|
return |
|
|
|
def cleanup_task(): |
|
while True: |
|
session_manager.cleanup_sessions() |
|
time.sleep(3600) |
|
|
|
with gr.Blocks( |
|
theme=gr.themes.Soft(), |
|
css=""" |
|
.video-preview { max-width: 600px !important; } |
|
.status-box { padding: 10px; border-radius: 5px; margin: 5px; } |
|
.safe { background: #e8f5e9; border: 1px solid #a5d6a7; } |
|
.warning { background: #fff3e0; border: 1px solid #ffcc80; } |
|
.error { background: #ffebee; border: 1px solid #ef9a9a; } |
|
""" |
|
) as app: |
|
|
|
session_id = gr.State(str(uuid.uuid4())) |
|
|
|
gr.Markdown("# 🌊 Wan-2.1-i2v-480p-Ultra-Fast Run On WaveSpeedAI") |
|
gr.Markdown(""" |
|
[WaveSpeedAI](https://wavespeed.ai/) is the global pioneer in accelerating AI-powered video and image generation. |
|
Our in-house inference accelerator provides lossless speedup on image & video generation based on our rich inference optimization software stack, including our in-house inference compiler, CUDA kernel libraries and parallel computing libraries. |
|
""") |
|
gr.Markdown(""" |
|
The Wan2.1 14B model is an advanced image-to-video model that offers accelerated inference capabilities, enabling high-res video generation with high visual quality and motion diversity. |
|
""") |
|
|
|
with gr.Row(): |
|
with gr.Column(scale=1): |
|
img_input = gr.Image(type="filepath", label="Upload Image") |
|
prompt = gr.Textbox(label="Prompt", lines=3, placeholder="Prompt...") |
|
negative_prompt = gr.Textbox(label="Negative Prompt", lines=2) |
|
|
|
with gr.Row(): |
|
size = gr.Dropdown(["832*480", "480*832"], value="832*480", interactive=True, label="Resolution") |
|
steps = gr.Slider(1, 50, value=30, label="Inference Steps") |
|
with gr.Row(): |
|
duration = gr.Slider(1, 10, value=5, step=1, label="时长(秒)") |
|
guidance = gr.Slider(1, 20, value=7, label="Guidance Scale") |
|
with gr.Row(): |
|
seed = gr.Number(-1, label="Seed") |
|
random_seed_btn = gr.Button("Random🎲Seed", variant="secondary") |
|
with gr.Row(): |
|
enable_safety = gr.Checkbox(label="🔒 Enable Safety Checker",value=True, interactive=False) |
|
flow_shift = gr.Number(3, label="Flow Shift",interactive=False) |
|
|
|
with gr.Column(scale=1): |
|
video_output = gr.Video(label="Generated Video", format="mp4", elem_classes=["video-preview"]) |
|
status_output = gr.Textbox(label="System Status", interactive=False, lines=4) |
|
generate_btn = gr.Button("Generated", variant="primary") |
|
|
|
|
|
|
|
|
|
with gr.Accordion("Safety Status", open=True): |
|
gr.Markdown(""" |
|
<div class="status-box safe"> |
|
✅ Content safety check passed |
|
</div> |
|
""") |
|
gr.Examples( |
|
examples=[ |
|
[ |
|
"Victorian era, 19th-century gentleman wearing a black top hat and tuxedo, standing on a cobblestone street, dim gaslight lamps, passersby in vintage clothing, gentle breeze moving his coat, slow cinematic pan around him, nostalgic retro film style, realistic textures", |
|
"https://d2g64w682n9w0w.cloudfront.net/media/images/1745725874603980753_95mFCAxu.jpg" |
|
], |
|
[ |
|
"A cyberpunk female warrior with short silver hair and glowing green eyes, wearing a futuristic armored suit, standing in a neon-lit rainy city street, camera slowly circling around her, raindrops falling in slow motion, neon reflections on wet pavement, cinematic atmosphere, highly detailed, ultra realistic, 4K", |
|
"https://d2g64w682n9w0w.cloudfront.net/media/images/1745726299175719855_pFO0WSRM.jpg" |
|
], |
|
[ |
|
"Wide shot of a brave medieval female knight in shining silver armor and a red cape, standing on a castle rooftop at sunset, slowly drawing a large ornate sword from its scabbard, seen from a distance with the vast castle and surrounding landscape in the background, golden light bathing the scene, hair and cape flowing gently in the wind, cinematic epic atmosphere, dynamic motion, majestic clouds drifting, ultra realistic, high fantasy world, 4K ultra-detailed", |
|
"https://d2g64w682n9w0w.cloudfront.net/media/images/1745727436576834405_rtsokheb.jpg" |
|
] |
|
], |
|
inputs=[prompt, img_input], |
|
label="Example Inputs", |
|
examples_per_page=3 |
|
) |
|
|
|
random_seed_btn.click( |
|
fn=lambda: random.randint(0, 999999), |
|
outputs=seed |
|
) |
|
|
|
generate_btn.click( |
|
generate_video, |
|
inputs=[ |
|
img_input, |
|
prompt, |
|
duration, |
|
enable_safety, |
|
flow_shift, |
|
guidance, |
|
negative_prompt, |
|
steps, |
|
seed, |
|
size, |
|
session_id |
|
], |
|
outputs=[ |
|
status_output, |
|
video_output |
|
] |
|
) |
|
|
|
if __name__ == "__main__": |
|
threading.Thread(target=cleanup_task, daemon=True).start() |
|
app.queue(max_size=2).launch( |
|
server_name="0.0.0.0", |
|
max_threads=4, |
|
share=False |
|
) |