Spaces:
Sleeping
Sleeping
File size: 1,205 Bytes
7b1e54c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 |
# app.py
import gradio as gr
from handler import generate_text
def infer(prompt, max_tokens, temperature, top_p, top_k, repetition_penalty, trim_output):
return generate_text(
prompt,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
top_k=top_k,
repetition_penalty=repetition_penalty,
trim_output=trim_output
)
iface = gr.Interface(
fn=infer,
inputs=[
gr.Textbox(label="Prompt", placeholder="Type your prompt here...", lines=4),
gr.Slider(50, 512, step=10, value=250, label="Max Tokens"),
gr.Slider(0.1, 1.5, step=0.1, value=0.7, label="Temperature"),
gr.Slider(0.1, 1.0, step=0.05, value=0.95, label="Top-p (nucleus sampling)"),
gr.Slider(0, 100, step=1, value=50, label="Top-k"),
gr.Slider(0.5, 2.0, step=0.1, value=1.2, label="Repetition Penalty"),
gr.Checkbox(label="Trim Prompt From Output", value=True)
],
outputs=gr.Textbox(label="Generated Text", lines=10),
title="🧠 Your Custom AI Text Generator",
description="Using Hugging Face inference API with your own model (no token required)"
)
if __name__ == "__main__":
iface.launch()
|