phanerozoic commited on
Commit
72242ed
·
verified ·
1 Parent(s): 46ffd61

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -40
app.py CHANGED
@@ -1,11 +1,3 @@
1
- """
2
- SchoolSpirit AI – Llama‑3 3 B public chatbot Space
3
- -------------------------------------------------
4
- • Loads Meta Llama‑3.2‑3B‑Instruct.
5
- • Keeps only last 6 turns to fit context.
6
- • Handles model‑load or generation failures gracefully.
7
- """
8
-
9
  import gradio as gr
10
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
11
  from transformers.utils import logging as hf_logging
@@ -23,7 +15,6 @@ SYSTEM_MSG = (
23
  "personal data."
24
  )
25
 
26
- # ---------------------------------------------------------------------------
27
  try:
28
  tok = AutoTokenizer.from_pretrained(MODEL_ID)
29
  model = AutoModelForCausalLM.from_pretrained(
@@ -40,54 +31,32 @@ try:
40
  model_error = None
41
  except Exception as exc: # noqa: BLE001
42
  model_error = f"Model load error: {exc}"
43
- gen = None # ensure var exists
44
- # ---------------------------------------------------------------------------
45
-
46
 
47
  def chat(history, user_msg):
48
- """Gradio ChatInterface callback using new 'messages' format."""
49
  if model_error:
50
- history.append(
51
- {
52
- "role": "assistant",
53
- "content": model_error,
54
- }
55
- )
56
- return history
57
 
58
- # Trim to last MAX_TURNS messages (role+assistant pairs)
59
- if len(history) > MAX_TURNS * 2:
60
- history = history[-MAX_TURNS * 2 :]
61
 
62
- # Build prompt
63
  prompt = SYSTEM_MSG + "\n"
64
- for msg in history:
65
- role = "User" if msg["role"] == "user" else "AI"
66
- prompt += f"{role}: {msg['content']}\n"
67
  prompt += f"User: {user_msg}\nAI:"
68
 
69
  try:
70
  completion = gen(prompt)[0]["generated_text"]
71
  reply = completion.split("AI:", 1)[-1].strip()
72
  except Exception as err: # noqa: BLE001
73
- reply = (
74
- "Sorry, something went wrong on my end. "
75
- "Please try again in a few seconds."
76
- )
77
  hf_logging.get_logger("SchoolSpirit").error(str(err))
78
 
79
- history.extend(
80
- [
81
- {"role": "user", "content": user_msg},
82
- {"role": "assistant", "content": reply},
83
- ]
84
- )
85
- return history
86
-
87
 
88
  gr.ChatInterface(
89
  chat,
90
  title="SchoolSpirit AI Chat",
91
  theme=gr.themes.Soft(primary_hue="blue"),
92
- type="messages", # avoids “tuples” deprecation warning
93
  ).launch()
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
  from transformers.utils import logging as hf_logging
 
15
  "personal data."
16
  )
17
 
 
18
  try:
19
  tok = AutoTokenizer.from_pretrained(MODEL_ID)
20
  model = AutoModelForCausalLM.from_pretrained(
 
31
  model_error = None
32
  except Exception as exc: # noqa: BLE001
33
  model_error = f"Model load error: {exc}"
34
+ gen = None
 
 
35
 
36
  def chat(history, user_msg):
 
37
  if model_error:
38
+ return history + [(user_msg, model_error)], ""
 
 
 
 
 
 
39
 
40
+ if len(history) > MAX_TURNS:
41
+ history = history[-MAX_TURNS:]
 
42
 
 
43
  prompt = SYSTEM_MSG + "\n"
44
+ for u, a in history:
45
+ prompt += f"User: {u}\nAI: {a}\n"
 
46
  prompt += f"User: {user_msg}\nAI:"
47
 
48
  try:
49
  completion = gen(prompt)[0]["generated_text"]
50
  reply = completion.split("AI:", 1)[-1].strip()
51
  except Exception as err: # noqa: BLE001
52
+ reply = "Sorry, an internal error occurred. Please try again later."
 
 
 
53
  hf_logging.get_logger("SchoolSpirit").error(str(err))
54
 
55
+ history.append((user_msg, reply))
56
+ return history, ""
 
 
 
 
 
 
57
 
58
  gr.ChatInterface(
59
  chat,
60
  title="SchoolSpirit AI Chat",
61
  theme=gr.themes.Soft(primary_hue="blue"),
 
62
  ).launch()