aika42 commited on
Commit
a7f2414
Β·
verified Β·
1 Parent(s): fc7991c

Update app.py

Browse files

maybe works without side bar

Files changed (1) hide show
  1. app.py +36 -73
app.py CHANGED
@@ -7,44 +7,25 @@ import plotly.graph_objects as go
7
 
8
  def extract_scores_and_verdict(result_text):
9
  scores = {}
10
-
11
- # Match categories based on keywords (not emojis)
12
  patterns = {
13
  "Clarity": r"[-–‒\s]*[^\w]?Clarity:\s*(\d)/5",
14
  "Context": r"[-–‒\s]*[^\w]?Context:\s*(\d)/5",
15
  "Specificity": r"[-–‒\s]*[^\w]?Specificity:\s*(\d)/5",
16
  "Intent Alignment": r"[-–‒\s]*[^\w]?Intent Alignment:\s*(\d)/5"
17
  }
18
-
19
  for key, pattern in patterns.items():
20
  match = re.search(pattern, result_text)
21
- if match:
22
- scores[key] = int(match.group(1))
23
- else:
24
- scores[key] = 0
25
-
26
- # Match verdict based on emoji OR fallback to keyword
27
-
28
- #verdict_match = re.search(r"Verdict: (βœ…|⚠️|🚫)", result_text)
29
  verdict_match = re.search(r"Verdict:\s*(βœ…|⚠️|🚫)", result_text)
30
  verdict_emoji = verdict_match.group(1) if verdict_match else "βœ…"
31
-
32
  return scores, verdict_emoji
33
 
34
-
35
  def generate_radar_plot(scores, verdict_emoji):
36
  categories = list(scores.keys())
37
  values = list(scores.values())
38
- values.append(values[0]) # close the loop for radar
39
-
40
- colors = {
41
- "βœ…": "green",
42
- "⚠️": "orange",
43
- "🚫": "red"
44
- }
45
-
46
  fig = go.Figure()
47
-
48
  fig.add_trace(go.Scatterpolar(
49
  r=values,
50
  theta=categories + [categories[0]],
@@ -53,17 +34,13 @@ def generate_radar_plot(scores, verdict_emoji):
53
  line=dict(color=colors.get(verdict_emoji, "gray")),
54
  marker=dict(size=8)
55
  ))
56
-
57
  fig.update_layout(
58
- polar=dict(
59
- radialaxis=dict(visible=True, range=[0, 5]),
60
- ),
61
  showlegend=False,
62
  title="πŸ”Ž Prompt Evaluation Radar"
63
  )
64
  return fig
65
 
66
- # Environment Setup
67
  HF_API_URL = "https://router.huggingface.co/novita/v3/openai/chat/completions"
68
  HF_TOKEN = os.environ.get("HF_PROJECT_TOKEN")
69
  HEADERS = {"Authorization": f"Bearer {HF_TOKEN}"}
@@ -102,16 +79,14 @@ Improved Prompt:
102
  "[Rewritten version of the user prompt]"
103
  """
104
 
105
- # Function to query DeepSeek
106
  @st.cache_data(show_spinner=False)
107
  def evaluate_prompt(user_prompt):
108
  payload = {
109
- "messages": [
110
- {"role": "user", "content": PROMPT_TEMPLATE.format(user_prompt=user_prompt)}
111
- ],
112
  "model": "deepseek/deepseek-r1-turbo",
113
- "temperature": 0.7, # Required for HF's OpenAI-compatible endpoint
114
- "stream": False # Explicitly set streaming
115
  }
116
  response = requests.post(HF_API_URL, headers=HEADERS, json=payload)
117
  if response.status_code == 200:
@@ -119,9 +94,10 @@ def evaluate_prompt(user_prompt):
119
  else:
120
  return f"Error: {response.status_code} - {response.text}"
121
 
122
-
123
- # App UI
124
- st.set_page_config("PromptPolice", page_icon="πŸš“", layout="centered")
 
125
  st.markdown("""
126
  <style>
127
  .main {background-color: #0f1117; color: #f0f0f0; font-family: 'Segoe UI', sans-serif;}
@@ -134,52 +110,39 @@ st.markdown("""
134
  st.title("πŸš“ PromptPolice")
135
  st.caption("Evaluate your prompts like a seasoned detective.")
136
 
137
- # Sidebar
138
- with st.sidebar:
139
- st.header("πŸ›  Prompt Tools")
140
- use_example = st.toggle("Load Example Prompt")
141
- st.markdown("---")
142
- st.info("Paste a natural language prompt and get an instant evaluation. No fluff.", icon="πŸ“Œ")
143
-
144
- # Main Input
145
- if use_example:
146
- user_input = st.text_area("Paste your prompt here:",
147
- "Generate a short story about a robot in a post-apocalyptic world.",
148
- height=200)
149
- else:
150
- user_input = st.text_area("Paste your prompt here:", height=200)
151
-
152
- # Evaluate Button
153
- if st.button(":mag_right: Evaluate Prompt"):
154
- if not HF_TOKEN:
155
- st.error("Missing Hugging Face token. Please set HF_PROJECT_TOKEN as environment variable.")
156
- elif user_input.strip() == "":
157
- st.warning("Please enter a prompt to evaluate.")
158
- else:
159
- with st.spinner("Evaluating prompt with PromptPolice..."):
160
- result = evaluate_prompt(user_input)
161
 
162
- st.markdown("---")
163
- st.subheader(":clipboard: Evaluation Result")
164
- scores, verdict_emoji = extract_scores_and_verdict(result)
165
- radar_fig = generate_radar_plot(scores, verdict_emoji)
166
- st.plotly_chart(radar_fig, use_container_width=True)
167
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168
 
169
- # Add scroll anchor
170
  st.markdown("<a name='result'></a>", unsafe_allow_html=True)
 
171
 
172
- # Render result with dark theme-friendly style
173
  st.markdown(f"""
174
  <div style='background-color:#1e1e1e; color:#f0f0f0; padding:20px; border-radius:10px; border: 1px solid #444; font-family: monospace; font-size: 15px;'>
175
- <pre style='white-space:pre-wrap; color:#f0f0f0;'>{result}</pre>
176
  </div>
177
  """, unsafe_allow_html=True)
178
 
179
- # Download button
180
- st.download_button("Download Evaluation", result, file_name="evaluation.txt")
181
-
182
- # Toast + Balloons + Auto-scroll
183
  st.toast("βœ… Evaluation complete!", icon="πŸ€–")
184
  st.balloons()
185
  st.markdown("""
@@ -200,4 +163,4 @@ st.markdown("""
200
  Made with ❀️ by <b>Penguins</b> Β· Powered by <code>DeepSeek R1 Turbo</code><br>
201
  No data stored Β· No nonsense Β· Just prompt justice βš–οΈ
202
  </center>
203
- """, unsafe_allow_html=True)
 
7
 
8
  def extract_scores_and_verdict(result_text):
9
  scores = {}
 
 
10
  patterns = {
11
  "Clarity": r"[-–‒\s]*[^\w]?Clarity:\s*(\d)/5",
12
  "Context": r"[-–‒\s]*[^\w]?Context:\s*(\d)/5",
13
  "Specificity": r"[-–‒\s]*[^\w]?Specificity:\s*(\d)/5",
14
  "Intent Alignment": r"[-–‒\s]*[^\w]?Intent Alignment:\s*(\d)/5"
15
  }
 
16
  for key, pattern in patterns.items():
17
  match = re.search(pattern, result_text)
18
+ scores[key] = int(match.group(1)) if match else 0
 
 
 
 
 
 
 
19
  verdict_match = re.search(r"Verdict:\s*(βœ…|⚠️|🚫)", result_text)
20
  verdict_emoji = verdict_match.group(1) if verdict_match else "βœ…"
 
21
  return scores, verdict_emoji
22
 
 
23
  def generate_radar_plot(scores, verdict_emoji):
24
  categories = list(scores.keys())
25
  values = list(scores.values())
26
+ values.append(values[0]) # loop
27
+ colors = { "βœ…": "green", "⚠️": "orange", "🚫": "red" }
 
 
 
 
 
 
28
  fig = go.Figure()
 
29
  fig.add_trace(go.Scatterpolar(
30
  r=values,
31
  theta=categories + [categories[0]],
 
34
  line=dict(color=colors.get(verdict_emoji, "gray")),
35
  marker=dict(size=8)
36
  ))
 
37
  fig.update_layout(
38
+ polar=dict(radialaxis=dict(visible=True, range=[0, 5])),
 
 
39
  showlegend=False,
40
  title="πŸ”Ž Prompt Evaluation Radar"
41
  )
42
  return fig
43
 
 
44
  HF_API_URL = "https://router.huggingface.co/novita/v3/openai/chat/completions"
45
  HF_TOKEN = os.environ.get("HF_PROJECT_TOKEN")
46
  HEADERS = {"Authorization": f"Bearer {HF_TOKEN}"}
 
79
  "[Rewritten version of the user prompt]"
80
  """
81
 
82
+
83
  @st.cache_data(show_spinner=False)
84
  def evaluate_prompt(user_prompt):
85
  payload = {
86
+ "messages": [{"role": "user", "content": PROMPT_TEMPLATE.format(user_prompt=user_prompt)}],
 
 
87
  "model": "deepseek/deepseek-r1-turbo",
88
+ "temperature": 0.7,
89
+ "stream": False
90
  }
91
  response = requests.post(HF_API_URL, headers=HEADERS, json=payload)
92
  if response.status_code == 200:
 
94
  else:
95
  return f"Error: {response.status_code} - {response.text}"
96
 
97
+ # ----------- UI STARTS HERE -------------
98
+ st.set_page_config("PromptPolice", page_icon="πŸš“", layout="wide")
99
+
100
+ # Custom styling
101
  st.markdown("""
102
  <style>
103
  .main {background-color: #0f1117; color: #f0f0f0; font-family: 'Segoe UI', sans-serif;}
 
110
  st.title("πŸš“ PromptPolice")
111
  st.caption("Evaluate your prompts like a seasoned detective.")
112
 
113
+ col1, col2 = st.columns([1, 2], gap="large")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
 
115
+ with col1:
116
+ user_input = st.text_area("Paste your prompt here:", height=300, placeholder="e.g. Generate a short story about a robot in a post-apocalyptic world.")
 
 
 
117
 
118
+ if st.button(":mag_right: Evaluate Prompt"):
119
+ if not HF_TOKEN:
120
+ st.error("Missing Hugging Face token. Please set HF_PROJECT_TOKEN as environment variable.")
121
+ elif user_input.strip() == "":
122
+ st.warning("Please enter a prompt to evaluate.")
123
+ else:
124
+ with st.spinner("Evaluating prompt with PromptPolice..."):
125
+ result = evaluate_prompt(user_input)
126
+ st.session_state["result"] = result
127
+ st.session_state["scores"], st.session_state["verdict"] = extract_scores_and_verdict(result)
128
+
129
+ # Right column β€” show result only if evaluated
130
+ if "result" in st.session_state:
131
+ with col2:
132
+ st.subheader(":bar_chart: Radar Score")
133
+ radar_fig = generate_radar_plot(st.session_state["scores"], st.session_state["verdict"])
134
+ st.plotly_chart(radar_fig, use_container_width=True)
135
 
 
136
  st.markdown("<a name='result'></a>", unsafe_allow_html=True)
137
+ st.subheader(":scroll: Evaluation Result")
138
 
 
139
  st.markdown(f"""
140
  <div style='background-color:#1e1e1e; color:#f0f0f0; padding:20px; border-radius:10px; border: 1px solid #444; font-family: monospace; font-size: 15px;'>
141
+ <pre style='white-space:pre-wrap; color:#f0f0f0;'>{st.session_state["result"]}</pre>
142
  </div>
143
  """, unsafe_allow_html=True)
144
 
145
+ st.download_button("Download Evaluation", st.session_state["result"], file_name="evaluation.txt")
 
 
 
146
  st.toast("βœ… Evaluation complete!", icon="πŸ€–")
147
  st.balloons()
148
  st.markdown("""
 
163
  Made with ❀️ by <b>Penguins</b> Β· Powered by <code>DeepSeek R1 Turbo</code><br>
164
  No data stored Β· No nonsense Β· Just prompt justice βš–οΈ
165
  </center>
166
+ """, unsafe_allow_html=True)