boning123 commited on
Commit
c64f60a
·
verified ·
1 Parent(s): 18183af

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -12
app.py CHANGED
@@ -1,13 +1,10 @@
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
- import torch
4
 
5
  # Load model and tokenizer
6
  model_name = "Smilyai-labs/Sam-large-v1-speacil"
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
  model = AutoModelForCausalLM.from_pretrained(model_name)
9
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
10
- model.to(device)
11
 
12
  # Chat function
13
  def respond(message, history):
@@ -17,15 +14,10 @@ def respond(message, history):
17
  chat_prompt += f"User: {user}\nSam: {bot}\n"
18
  chat_prompt += f"User: {message}\nSam:"
19
 
20
- inputs = tokenizer(chat_prompt, return_tensors="pt").to(device)
21
- outputs = model.generate(
22
- **inputs,
23
- max_new_tokens=128,
24
- temperature=0.7,
25
- top_p=0.9,
26
- do_sample=True,
27
- pad_token_id=tokenizer.eos_token_id
28
- )
29
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
30
  reply = response[len(chat_prompt):].split("\n")[0].strip()
31
 
 
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
 
3
 
4
  # Load model and tokenizer
5
  model_name = "Smilyai-labs/Sam-large-v1-speacil"
6
  tokenizer = AutoTokenizer.from_pretrained(model_name)
7
  model = AutoModelForCausalLM.from_pretrained(model_name)
 
 
8
 
9
  # Chat function
10
  def respond(message, history):
 
14
  chat_prompt += f"User: {user}\nSam: {bot}\n"
15
  chat_prompt += f"User: {message}\nSam:"
16
 
17
+ # Tokenize input and generate a response
18
+ inputs = tokenizer(chat_prompt, return_tensors="pt")
19
+ outputs = model.generate(inputs["input_ids"], max_length=200, num_return_sequences=1)
20
+
 
 
 
 
 
21
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
22
  reply = response[len(chat_prompt):].split("\n")[0].strip()
23