Spaces:
Sleeping
Sleeping
from transformers import AutoModelForCausalLM, AutoTokenizer | |
import gradio as gr | |
import torch | |
# Załaduj model i tokenizer | |
model_name = "radlab/polish-gpt2-small-v2" | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForCausalLM.from_pretrained(model_name) | |
# Funkcja czatu | |
def chatbot(prompt, history=[]): | |
history_text = "" | |
for user, bot in history: | |
history_text += f"Użytkownik: {user}\nAI: {bot}\n" | |
history_text += f"Użytkownik: {prompt}\nAI:" | |
inputs = tokenizer.encode(history_text, return_tensors="pt", truncation=True, max_length=1024) | |
outputs = model.generate( | |
inputs, | |
max_length=inputs.shape[1] + 80, | |
do_sample=True, | |
top_k=50, | |
top_p=0.95, | |
temperature=0.7, | |
pad_token_id=tokenizer.eos_token_id | |
) | |
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
# Wyciągnij tylko nową odpowiedź | |
answer = decoded[len(history_text):].split("Użytkownik:")[0].strip() | |
history.append((prompt, answer)) | |
return answer, history | |
# Gradio UI | |
gr.ChatInterface(fn=chatbot, title="🤖 Polski Chatbot AI", description="Model: radlab/polish-gpt2-small-v2").launch() | |