Spaces:
Sleeping
Sleeping
# app.py | |
import gradio as gr | |
from handler import generate_text | |
def infer(prompt, max_tokens, temperature, top_p, top_k, repetition_penalty, trim_output): | |
return generate_text( | |
prompt, | |
max_tokens=max_tokens, | |
temperature=temperature, | |
top_p=top_p, | |
top_k=top_k, | |
repetition_penalty=repetition_penalty, | |
trim_output=trim_output | |
) | |
iface = gr.Interface( | |
fn=infer, | |
inputs=[ | |
gr.Textbox(label="Prompt", placeholder="Type your prompt here...", lines=4), | |
gr.Slider(50, 512, step=10, value=250, label="Max Tokens"), | |
gr.Slider(0.1, 1.5, step=0.1, value=0.7, label="Temperature"), | |
gr.Slider(0.1, 1.0, step=0.05, value=0.95, label="Top-p (nucleus sampling)"), | |
gr.Slider(0, 100, step=1, value=50, label="Top-k"), | |
gr.Slider(0.5, 2.0, step=0.1, value=1.2, label="Repetition Penalty"), | |
gr.Checkbox(label="Trim Prompt From Output", value=True) | |
], | |
outputs=gr.Textbox(label="Generated Text", lines=10), | |
title="🧠 Your Custom AI Text Generator", | |
description="Using Hugging Face inference API with your own model (no token required)" | |
) | |
if __name__ == "__main__": | |
iface.launch() | |