Spaces:
Sleeping
Sleeping
File size: 1,693 Bytes
76cbdc2 e51d280 76cbdc2 e51d280 406bf3c 76cbdc2 e51d280 76cbdc2 e51d280 76cbdc2 e51d280 76cbdc2 e51d280 76cbdc2 e51d280 2dd7a64 76cbdc2 e51d280 feb293a e51d280 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
import os
import gradio as gr
from huggingface_hub import InferenceClient
# Lấy token từ biến môi trường (đã tạo trong Secrets với tên HF_TOKEN)
HF_TOKEN = os.getenv("HF_TOKEN")
# model GPT-OSS-120B
client = InferenceClient(
model="openai/gpt-oss-120b",
token=HF_TOKEN,
)
def respond(
message,
history: list[tuple[str, str]],
system_message,
max_tokens,
temperature,
top_p,
):
messages = [{"role": "system", "content": system_message}]
for user_msg, bot_msg in history:
if user_msg:
messages.append({"role": "user", "content": user_msg})
if bot_msg:
messages.append({"role": "assistant", "content": bot_msg})
messages.append({"role": "user", "content": message})
response = ""
for response_chunk in client.chat_completion(
messages=messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
delta = response_chunk.choices[0].delta.content
if delta:
response += delta
yield response
# UI với Gradio
demo = gr.ChatInterface(
respond,
additional_inputs=[
gr.Textbox(value="You are a helpful assistant.", label="System message"),
gr.Slider(minimum=1, maximum=10240, value=4096, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p"),
],
title="GPT-OSS-120B на сайте GPT-ChatBot.ru",
description="powered by openai/gpt-oss-120b"
)
if __name__ == "__main__":
demo.launch()
|