|
import gradio as gr |
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
|
|
|
|
model_name = "Smilyai-labs/Sam-large-v1-speacil" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
|
|
|
|
def respond(message, history): |
|
history = history or [] |
|
chat_prompt = "" |
|
for user, bot in history: |
|
chat_prompt += f"User: {user}\nSam: {bot}\n" |
|
chat_prompt += f"User: {message}\nSam:" |
|
|
|
|
|
inputs = tokenizer(chat_prompt, return_tensors="pt") |
|
outputs = model.generate(inputs["input_ids"], max_length=200, num_return_sequences=1) |
|
|
|
response = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
reply = response[len(chat_prompt):].split("\n")[0].strip() |
|
|
|
history.append((message, reply)) |
|
return history, history |
|
|
|
|
|
chatbot = gr.Chatbot() |
|
demo = gr.Interface( |
|
fn=respond, |
|
inputs=[gr.Textbox(label="Your message"), gr.State()], |
|
outputs=[chatbot, gr.State()], |
|
title="🧠 Sam Chatbot", |
|
description="A demo of Sam-large-v1-speacil: chaotic, aligned, and fun." |
|
) |
|
|
|
demo.launch() |
|
|