Spaces:
Running
Running
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
+
import torch
|
4 |
+
|
5 |
+
# Load model and tokenizer
|
6 |
+
model_name = "Smilyai-labs/Sam-large-v1-speacil"
|
7 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
8 |
+
model = AutoModelForCausalLM.from_pretrained(model_name)
|
9 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
10 |
+
model.to(device)
|
11 |
+
|
12 |
+
# Chat function
|
13 |
+
def respond(message, history):
|
14 |
+
history = history or []
|
15 |
+
chat_prompt = ""
|
16 |
+
for user, bot in history:
|
17 |
+
chat_prompt += f"User: {user}\nSam: {bot}\n"
|
18 |
+
chat_prompt += f"User: {message}\nSam:"
|
19 |
+
|
20 |
+
inputs = tokenizer(chat_prompt, return_tensors="pt").to(device)
|
21 |
+
outputs = model.generate(
|
22 |
+
**inputs,
|
23 |
+
max_new_tokens=128,
|
24 |
+
temperature=0.7,
|
25 |
+
top_p=0.9,
|
26 |
+
do_sample=True,
|
27 |
+
pad_token_id=tokenizer.eos_token_id
|
28 |
+
)
|
29 |
+
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
30 |
+
reply = response[len(chat_prompt):].split("\n")[0].strip()
|
31 |
+
|
32 |
+
history.append((message, reply))
|
33 |
+
return history, history
|
34 |
+
|
35 |
+
# Gradio interface
|
36 |
+
chatbot = gr.Chatbot()
|
37 |
+
demo = gr.Interface(
|
38 |
+
fn=respond,
|
39 |
+
inputs=[gr.Textbox(label="Your message"), gr.State()],
|
40 |
+
outputs=[chatbot, gr.State()],
|
41 |
+
title="🧠 Sam Chatbot",
|
42 |
+
description="A demo of Sam-large-v1-speacil: chaotic, aligned, and fun."
|
43 |
+
)
|
44 |
+
|
45 |
+
demo.launch()
|