whatsappbot / app.py
codeignite's picture
Update app.py
b9f8cc2 verified
import gradio as gr
import random
import numpy as np
import os
from huggingface_hub import InferenceClient
# 1. Initialize the Client
# If you are running this IN a Hugging Face Space, it will automatically
# find your token if you add it to 'Settings > Variables and Secrets' as HF_TOKEN
client = InferenceClient("black-forest-labs/FLUX.1-schnell")
MAX_SEED = np.iinfo(np.int32).max
def infer(prompt, seed, randomize_seed, width, height):
if randomize_seed:
seed = random.randint(0, MAX_SEED)
# FLUX.1-schnell is optimized for 4 steps.
# The Inference Client handles the generator/torch logic server-side.
try:
image = client.text_to_image(
prompt,
width=width,
height=height,
num_inference_steps=4, # Schnell specifically needs only 4 steps
guidance_scale=0.0, # Schnell usually ignores guidance or prefers 0.0
seed=seed
)
return image, seed
except Exception as e:
raise gr.Error(f"Generation failed: {e}")
# Simple UI
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.Markdown("# 🚀 CodeIgnite FLUX Engine")
gr.Markdown("Using `FLUX.1-schnell` via Inference API for lightning-fast results.")
with gr.Column():
with gr.Row():
prompt = gr.Textbox(
label="Prompt",
placeholder="A futuristic cyberpunk city...",
scale=4
)
run_button = gr.Button("Generate", variant="primary")
result = gr.Image(label="Result")
with gr.Accordion("Advanced Settings", open=False):
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
# FLUX works best at 1024x1024, but 512-1024 is safe for API
width = gr.Slider(label="Width", minimum=256, maximum=1024, step=32, value=1024)
height = gr.Slider(label="Height", minimum=256, maximum=1024, step=32, value=1024)
run_button.click(
fn=infer,
inputs=[prompt, seed, randomize_seed, width, height],
outputs=[result, seed],
api_name="predict"
)
if __name__ == "__main__":
demo.launch()