Spaces:
Sleeping
Sleeping
File size: 1,195 Bytes
ec80b9e 66d4184 ec80b9e e995f26 ec80b9e 4c03c86 ec80b9e 4c03c86 ec80b9e 4c03c86 ec80b9e 4c03c86 ec80b9e 4c03c86 ec80b9e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
from transformers import AutoModelForCausalLM, AutoTokenizer
import gradio as gr
import torch
# Załaduj model i tokenizer
model_name = "radlab/polish-gpt2-small-v2"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
# Funkcja czatu
def chatbot(prompt, history=[]):
history_text = ""
for user, bot in history:
history_text += f"Użytkownik: {user}\nAI: {bot}\n"
history_text += f"Użytkownik: {prompt}\nAI:"
inputs = tokenizer.encode(history_text, return_tensors="pt", truncation=True, max_length=1024)
outputs = model.generate(
inputs,
max_length=inputs.shape[1] + 80,
do_sample=True,
top_k=50,
top_p=0.95,
temperature=0.7,
pad_token_id=tokenizer.eos_token_id
)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
# Wyciągnij tylko nową odpowiedź
answer = decoded[len(history_text):].split("Użytkownik:")[0].strip()
history.append((prompt, answer))
return answer, history
# Gradio UI
gr.ChatInterface(fn=chatbot, title="🤖 Polski Chatbot AI", description="Model: radlab/polish-gpt2-small-v2").launch()
|