File size: 3,847 Bytes
a4b21e5
 
 
 
 
6ce8b1e
a4b21e5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ba54a13
 
a4b21e5
 
 
 
 
 
 
 
 
 
 
 
6ce8b1e
 
a4b21e5
 
 
ba54a13
 
 
 
6ce8b1e
ba54a13
 
 
 
 
 
 
 
6ce8b1e
ba54a13
 
 
 
 
a4b21e5
 
 
 
14d377a
a4b21e5
 
 
 
 
 
 
 
 
 
 
 
6ce8b1e
2870fe9
 
a4b21e5
 
 
c438893
bdddd23
a4b21e5
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
from transformers import pipeline, TextIteratorStreamer
import torch
from threading import Thread
import gradio as gr
import spaces
import re

model_id = "openai/gpt-oss-20b"

pipe = pipeline(
    "text-generation",
    model=model_id,
    torch_dtype="auto",
    device_map="auto",
)
def format_conversation_history(chat_history):
    messages = []
    for item in chat_history:
        role = item["role"]
        content = item["content"]
        if isinstance(content, list):
            content = content[0]["text"] if content and "text" in content[0] else str(content)
        messages.append({"role": role, "content": content})
    return messages
    
@spaces.GPU()
def generate_response(input_data, chat_history, max_new_tokens, system_prompt, temperature, top_p, top_k, repetition_penalty):
    new_message = {"role": "user", "content": input_data}
    system_message = [{"role": "system", "content": system_prompt}] if system_prompt else []
    processed_history = format_conversation_history(chat_history)
    messages = system_message + processed_history + [new_message]
    streamer = TextIteratorStreamer(pipe.tokenizer, skip_prompt=True, skip_special_tokens=True)
    generation_kwargs = {
        "max_new_tokens": max_new_tokens,
        "do_sample": True,
        "temperature": temperature,
        "top_p": top_p,
        "top_k": top_k,
        "repetition_penalty": repetition_penalty,
        "streamer": streamer
    }
    thread = Thread(target=pipe, args=(messages,), kwargs=generation_kwargs)
    thread.start()
    # simple formatting without harmony because of no tool usage etc. and experienced hf space problems with harmony 
    thinking = ""
    final = ""
    started_final = False
    for chunk in streamer:
        if not started_final:
            if "assistantfinal" in chunk.lower():
                split_parts = re.split(r'assistantfinal', chunk, maxsplit=1)
                thinking += split_parts[0]
                final += split_parts[1]
                started_final = True
            else:
                thinking += chunk
        else:
            final += chunk
        clean_thinking = re.sub(r'^analysis\s*', '', thinking).strip()
        clean_final = final.strip()
        formatted = f"<details open><summary>Click to view Thinking Process</summary>\n\n{clean_thinking}\n\n</details>\n\n{clean_final}"
        yield formatted

demo = gr.ChatInterface(
    fn=generate_response,
    additional_inputs=[
        gr.Slider(label="Max new tokens", minimum=64, maximum=4096, step=1, value=2048),
        gr.Textbox(
            label="System Prompt",
            value="You are a helpful assistant. Reasoning: medium",
            lines=4,
            placeholder="Change system prompt"
        ),
        gr.Slider(label="Temperature", minimum=0.1, maximum=2.0, step=0.1, value=0.7),
        gr.Slider(label="Top-p", minimum=0.05, maximum=1.0, step=0.05, value=0.9),
        gr.Slider(label="Top-k", minimum=1, maximum=100, step=1, value=50),
        gr.Slider(label="Repetition Penalty", minimum=1.0, maximum=2.0, step=0.05, value=1.0)
    ],
    examples=[
        [{"text": "Explain Newton laws clearly and concisely"}],
        [{"text": "Write a Python function to calculate the Fibonacci sequence"}],
        [{"text": "What are the benefits of open weight AI models"}],
    ],
    cache_examples=False,
    type="messages",
    description="""# gpt-oss-20b Demo
Give it a couple of seconds to start. You can adjust reasoning level in the system prompt like "Reasoning: high." Click to view thinking process (default is on).""",
    fill_height=True,
    textbox=gr.Textbox(
        label="Query Input",
        placeholder="Type your prompt"
    ),
    stop_btn="Stop Generation",
    multimodal=False,
    theme=gr.themes.Soft()
)

if __name__ == "__main__":
    demo.launch(share=True)