app.py CHANGED
@@ -28,7 +28,7 @@ from src.display.utils import (
28
  Precision
29
  )
30
  from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
31
- from src.populate import get_evaluation_queue_df, get_leaderboard_df
32
  from src.submission.submit import add_new_eval
33
  import base64
34
 
@@ -60,11 +60,8 @@ def make_rate_chart(df: pd.DataFrame):
60
  return fig
61
 
62
  def make_leaderboard_plot(df: pd.DataFrame, col: str, title: str, bar_color: str):
63
- """
64
- Return a horizontal bar chart sorted ascending by `col`.
65
- Lowest value (best) at the top.
66
- """
67
- df_sorted = df.sort_values(col, ascending=False) # best β†’ worst
68
  fig = px.bar(
69
  df_sorted,
70
  x=col,
@@ -87,6 +84,100 @@ def make_leaderboard_plot(df: pd.DataFrame, col: str, title: str, bar_color: str
87
  return fig
88
 
89
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
  def color_scale(s, cmap):
91
  """
92
  Return background-colour styles for a numeric Series (lower = greener,
@@ -120,14 +211,9 @@ except Exception:
120
  print(f"[WARN] Skipping RESULTS sync: {Exception}")
121
 
122
 
123
- # LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
124
  LEADERBOARD_DF = get_leaderboard_df("leaderboard/data/leaderboard.csv")
 
125
 
126
- # (
127
- # finished_eval_queue_df,
128
- # running_eval_queue_df,
129
- # pending_eval_queue_df,
130
- # ) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
131
 
132
  def init_leaderboard(df: pd.DataFrame):
133
  if df is None or df.empty:
@@ -198,7 +284,7 @@ with demo:
198
  ),
199
  show_label=False,
200
  )
201
- gr.Markdown("*HaluEval-QA benchmark (RAG): The model receives a question plus supporting context. We report the % of answers that introduce facts not found in that context β€” lower is better. See the **Details** tab for more information.*", elem_classes="plot-caption")
202
  with gr.Column():
203
  gr.Plot(
204
  make_leaderboard_plot(
@@ -209,25 +295,13 @@ with demo:
209
  ),
210
  show_label=False,
211
  )
212
- gr.Markdown("*UltraChat benchmark (~11 k prompts, non-RAG): Evaluates open-domain answers when only the question is given. Score is the % of hallucinated responses β€” lower is better. See the **Details** tab for more information.*", elem_classes="plot-caption")
213
 
214
  # ---------- Leaderboard ----------
215
  leaderboard = init_leaderboard(LEADERBOARD_DF)
216
 
217
  # ---------- Get Started with Verify ----------
218
- verify_markdown = textwrap.dedent(
219
- """
220
- ## Get started with Verify by kluster.ai
221
-
222
- Verify is an intelligent agent that validates LLM outputs in real-time.
223
-
224
- - **Blog post:** [Introducing Verify by kluster.ai](https://www.kluster.ai/blog/introducing-verify-by-kluster-ai-the-missing-trust-layer-in-your-ai-stack)
225
- - **Documentation:** [Verify overview & API reference](https://docs.kluster.ai/get-started/verify/overview/)
226
- - **Try it out in your browser:** [kluster.ai platform](https://platform.kluster.ai/verify)
227
-
228
- ### Quick API examples
229
- """
230
- )
231
 
232
  gr.Markdown(verify_markdown, elem_classes="markdown-text")
233
 
@@ -259,108 +333,75 @@ with demo:
259
 
260
  gr.Code(code_example_chat, language="shell")
261
 
262
- with gr.TabItem("πŸ“ Details", elem_id="llm-benchmark-tab-table", id=2):
263
- gr.Markdown((Path(__file__).parent / "docs.md").read_text(), elem_classes="markdown-text")
264
 
265
- with gr.TabItem("πŸš€ Submit Here! ", elem_id="llm-benchmark-tab-table", id=3):
266
- gr.Markdown((Path(__file__).parent / "submit.md").read_text(), elem_classes="markdown-text")
 
 
 
 
267
 
268
- # with gr.Column():
269
- # with gr.Row():
270
- # gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
271
-
272
- # with gr.Column():
273
- # with gr.Accordion(
274
- # f"βœ… Finished Evaluations ({len(finished_eval_queue_df)})",
275
- # open=False,
276
- # ):
277
- # with gr.Row():
278
- # finished_eval_table = gr.components.Dataframe(
279
- # value=finished_eval_queue_df,
280
- # headers=EVAL_COLS,
281
- # datatype=EVAL_TYPES,
282
- # row_count=5,
283
- # )
284
- # with gr.Accordion(
285
- # f"πŸ”„ Running Evaluation Queue ({len(running_eval_queue_df)})",
286
- # open=False,
287
- # ):
288
- # with gr.Row():
289
- # running_eval_table = gr.components.Dataframe(
290
- # value=running_eval_queue_df,
291
- # headers=EVAL_COLS,
292
- # datatype=EVAL_TYPES,
293
- # row_count=5,
294
- # )
295
-
296
- # with gr.Accordion(
297
- # f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
298
- # open=False,
299
- # ):
300
- # with gr.Row():
301
- # pending_eval_table = gr.components.Dataframe(
302
- # value=pending_eval_queue_df,
303
- # headers=EVAL_COLS,
304
- # datatype=EVAL_TYPES,
305
- # row_count=5,
306
- # )
307
- # with gr.Row():
308
- # gr.Markdown("# βœ‰οΈβœ¨ Submit your model here!", elem_classes="markdown-text")
309
-
310
- # with gr.Row():
311
- # with gr.Column():
312
- # model_name_textbox = gr.Textbox(label="Model name")
313
- # revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
314
- # model_type = gr.Dropdown(
315
- # choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
316
- # label="Model type",
317
- # multiselect=False,
318
- # value=None,
319
- # interactive=True,
320
- # )
321
-
322
- # with gr.Column():
323
- # precision = gr.Dropdown(
324
- # choices=[i.value.name for i in Precision if i != Precision.Unknown],
325
- # label="Precision",
326
- # multiselect=False,
327
- # value="float16",
328
- # interactive=True,
329
- # )
330
- # weight_type = gr.Dropdown(
331
- # choices=[i.value.name for i in WeightType],
332
- # label="Weights type",
333
- # multiselect=False,
334
- # value="Original",
335
- # interactive=True,
336
- # )
337
- # base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
338
-
339
- # submit_button = gr.Button("Submit Eval")
340
- # submission_result = gr.Markdown()
341
- # submit_button.click(
342
- # add_new_eval,
343
- # [
344
- # model_name_textbox,
345
- # base_model_name_textbox,
346
- # revision_name_textbox,
347
- # precision,
348
- # weight_type,
349
- # model_type,
350
- # ],
351
- # submission_result,
352
- # )
353
-
354
- # with gr.Row():
355
- # with gr.Accordion("πŸ“™ Citation", open=False):
356
- # citation_button = gr.Textbox(
357
- # value=CITATION_BUTTON_TEXT,
358
- # label=CITATION_BUTTON_LABEL,
359
- # lines=20,
360
- # elem_id="citation-button",
361
- # show_copy_button=True,
362
- # )
363
 
 
364
  scheduler = BackgroundScheduler()
365
  scheduler.add_job(restart_space, "interval", seconds=1800)
366
  scheduler.start()
 
28
  Precision
29
  )
30
  from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
31
+ from src.populate import get_evaluation_queue_df, get_leaderboard_df, get_rag_leaderboard_df
32
  from src.submission.submit import add_new_eval
33
  import base64
34
 
 
60
  return fig
61
 
62
  def make_leaderboard_plot(df: pd.DataFrame, col: str, title: str, bar_color: str):
63
+
64
+ df_sorted = df.sort_values(col, ascending=False)
 
 
 
65
  fig = px.bar(
66
  df_sorted,
67
  x=col,
 
84
  return fig
85
 
86
 
87
+ def make_rag_average_plot(df: pd.DataFrame, col: str, title: str, bar_color: str):
88
+ rag_cols = [
89
+ "Context in System Prompt (%)",
90
+ "Context and Question Single-Turn (%)",
91
+ "Context and Question Two-Turns (%)",
92
+ ]
93
+
94
+ df_plot = df.copy()
95
+ if col not in df_plot.columns:
96
+ df_plot[col] = df_plot[rag_cols].mean(axis=1, skipna=True).round(2)
97
+
98
+ df_plot["Std Dev"] = df_plot[rag_cols].std(axis=1, skipna=True).round(2)
99
+
100
+ df_sorted = df_plot.sort_values(col, ascending=False)
101
+
102
+ fig = px.bar(
103
+ df_sorted,
104
+ x=col,
105
+ y="Models",
106
+ orientation="h",
107
+ title=title,
108
+ height=400,
109
+ color_discrete_sequence=[bar_color],
110
+ error_x="Std Dev",
111
+ )
112
+ fig.update_traces(
113
+ texttemplate="%{x:.2f}",
114
+ textposition="inside",
115
+ insidetextanchor="middle",
116
+ cliponaxis=False,
117
+ )
118
+ fig.update_layout(
119
+ xaxis_title="Hallucination Rate (%)",
120
+ yaxis_title="",
121
+ yaxis=dict(dtick=1),
122
+ margin=dict(l=140, r=60, t=60, b=40),
123
+ )
124
+ return fig
125
+
126
+
127
+ def make_rag_method_average_plot(df: pd.DataFrame, title: str, bar_color: str):
128
+ method_cols = [
129
+ "Context in System Prompt (%)",
130
+ "Context and Question Single-Turn (%)",
131
+ "Context and Question Two-Turns (%)",
132
+ ]
133
+
134
+ averages = df[method_cols].mean().round(2)
135
+ stds = df[method_cols].std().round(2)
136
+
137
+ avg_df = pd.DataFrame(
138
+ {
139
+ "RAG Method": averages.index,
140
+ "Average Hallucination Rate (%)": averages.values,
141
+ "Std Dev": stds.values,
142
+ }
143
+ )
144
+
145
+ fig = px.bar(
146
+ avg_df,
147
+ x="RAG Method",
148
+ y="Average Hallucination Rate (%)",
149
+ error_y="Std Dev",
150
+ title=title,
151
+ height=400,
152
+ color_discrete_sequence=[bar_color],
153
+ )
154
+ fig.update_traces(
155
+ texttemplate="%{y:.2f}" if 'orientation' not in fig.data[0] or fig.data[0].orientation == 'v' else "%{x:.2f}",
156
+ textposition="inside",
157
+ insidetextanchor="start",
158
+ cliponaxis=False,
159
+ textfont_color="white",
160
+ )
161
+ labels_map = {
162
+ "Context in System Prompt (%)": "Context in<br>System Prompt",
163
+ "Context and Question Single-Turn (%)": "Context & Question<br>Single-Turn",
164
+ "Context and Question Two-Turns (%)": "Context & Question<br>Two-Turns",
165
+ }
166
+ fig.update_xaxes(
167
+ tickmode="array",
168
+ tickvals=list(labels_map.keys()),
169
+ ticktext=list(labels_map.values()),
170
+ tickangle=0,
171
+ automargin=True,
172
+ )
173
+ fig.update_layout(
174
+ xaxis_title="",
175
+ yaxis_title="Hallucination Rate (%)",
176
+ margin=dict(l=40, r=100, t=60, b=120),
177
+ )
178
+ return fig
179
+
180
+
181
  def color_scale(s, cmap):
182
  """
183
  Return background-colour styles for a numeric Series (lower = greener,
 
211
  print(f"[WARN] Skipping RESULTS sync: {Exception}")
212
 
213
 
 
214
  LEADERBOARD_DF = get_leaderboard_df("leaderboard/data/leaderboard.csv")
215
+ RAG_DF = get_rag_leaderboard_df("leaderboard/data/rag_methods_compare.csv")
216
 
 
 
 
 
 
217
 
218
  def init_leaderboard(df: pd.DataFrame):
219
  if df is None or df.empty:
 
284
  ),
285
  show_label=False,
286
  )
287
+ gr.Markdown("*HaluEval-QA benchmark (RAG): The model receives a question plus supporting context. We report the % of answers that introduce facts not found in that context β€” lower is better. See the **Methodology** section below for more information.*", elem_classes="plot-caption")
288
  with gr.Column():
289
  gr.Plot(
290
  make_leaderboard_plot(
 
295
  ),
296
  show_label=False,
297
  )
298
+ gr.Markdown("*UltraChat benchmark (~11 k prompts, non-RAG): Evaluates open-domain answers when only the question is given. Score is the % of hallucinated responses β€” lower is better. See the **Methodology** section below for more information.*", elem_classes="plot-caption")
299
 
300
  # ---------- Leaderboard ----------
301
  leaderboard = init_leaderboard(LEADERBOARD_DF)
302
 
303
  # ---------- Get Started with Verify ----------
304
+ verify_markdown = (Path(__file__).parent / "verify.md").read_text()
 
 
 
 
 
 
 
 
 
 
 
 
305
 
306
  gr.Markdown(verify_markdown, elem_classes="markdown-text")
307
 
 
333
 
334
  gr.Code(code_example_chat, language="shell")
335
 
336
+ with gr.Accordion("πŸ“„ Methodology & Benchmark Details", open=True):
337
+ gr.Markdown((Path(__file__).parent / "docs.md").read_text(), elem_classes="markdown-text")
338
 
339
+
340
+ with gr.TabItem("πŸ§ͺ RAG Techniques and Hallucinations", elem_id="llm-benchmark-tab-table", id=2):
341
+ rag_intro_markdown = (Path(__file__).parent / "rag_techniques_intro.md").read_text()
342
+ rag_details_markdown = (Path(__file__).parent / "rag_techniques_details.md").read_text()
343
+
344
+ gr.Markdown(rag_intro_markdown, elem_classes="markdown-text")
345
 
346
+ with gr.Row():
347
+ with gr.Column():
348
+ gr.Plot(
349
+ make_rag_method_average_plot(
350
+ RAG_DF,
351
+ "Average Hallucination Rate by RAG Method (lower is better)",
352
+ bar_color="#4CAF50",
353
+ ),
354
+ show_label=False,
355
+ )
356
+ gr.Markdown(
357
+ "*Mean hallucination rate for each RAG prompting strategy across all models on the HaluEval-QA benchmark. Error bars represent Β±1 SD; lower is better.*",
358
+ elem_classes="plot-caption",
359
+ )
360
+ with gr.Column():
361
+ gr.Plot(
362
+ make_rag_average_plot(
363
+ RAG_DF,
364
+ "Average Hallucination Rate (%)",
365
+ "Average Hallucination Rate per Model (lower is better)",
366
+ bar_color="#2196F3",
367
+ ),
368
+ show_label=False,
369
+ )
370
+ gr.Markdown(
371
+ "*Mean hallucination rate across the three RAG prompting settings for each individual model. Error bars show Β±1 SD across the three strategies; lower is better.*",
372
+ elem_classes="plot-caption",
373
+ )
374
+
375
+
376
+ rag_leaderboard = Leaderboard(
377
+ value=RAG_DF,
378
+ datatype=["markdown", "number", "number", "number"],
379
+ select_columns=SelectColumns(
380
+ default_selection=[
381
+ "Models",
382
+ "Context in System Prompt (%)",
383
+ "Context and Question Single-Turn (%)",
384
+ "Context and Question Two-Turns (%)",
385
+ ],
386
+ cant_deselect=["Models"],
387
+ label="Select RAG Method Columns:",
388
+ ),
389
+ search_columns=["Models"],
390
+ bool_checkboxgroup_label=None,
391
+ interactive=False,
392
+ height=700
393
+ )
394
+
395
+ with gr.Accordion("πŸ“„ RAG Techniques & Benchmark Details", open=True):
396
+ gr.Markdown(rag_details_markdown, elem_classes="markdown-text")
397
+
398
+
399
+
400
+
401
+ with gr.TabItem("πŸš€ Submit Here! ", elem_id="llm-benchmark-tab-table", id=4):
402
+ gr.Markdown((Path(__file__).parent / "submit.md").read_text(), elem_classes="markdown-text")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
403
 
404
+
405
  scheduler = BackgroundScheduler()
406
  scheduler.add_job(restart_space, "interval", seconds=1800)
407
  scheduler.start()
docs.md CHANGED
@@ -1,3 +1,7 @@
 
 
 
 
1
  # About
2
 
3
  As large language models (LLMs) continue to improve, evaluating how well they avoid hallucinations (producing information that is unfaithful or factually incorrect) has become increasingly important. While many models claim to be reliable, their factual grounding can vary significantly across tasks and settings.
 
1
+ <!--
2
+ keywords: hallucination detection documentation, LLM hallucination benchmark, RAG evaluation guide, Verify API, kluster.ai, retrieval-augmented generation evaluation, large language model accuracy
3
+ -->
4
+
5
  # About
6
 
7
  As large language models (LLMs) continue to improve, evaluating how well they avoid hallucinations (producing information that is unfaithful or factually incorrect) has become increasingly important. While many models claim to be reliable, their factual grounding can vary significantly across tasks and settings.
introduction.md ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!--
2
+ keywords: LLM hallucination detection, hallucination leaderboard, RAG hallucination benchmark, UltraChat hallucination rate, Verify API, kluster.ai, factual accuracy of language models, large language model evaluation
3
+ -->
4
+
5
+ The **LLM Hallucination Detection Leaderboard** is a public, continuously updated comparison of how well popular Large Language Models (LLMs) avoid *hallucinations*, responses that are factually incorrect, fabricated, or unsupported by evidence. By surfacing transparent metrics across tasks, we help practitioners choose models that they can trust in production.
6
+
7
+ ### Why does hallucination detection matter?
8
+
9
+ * **User Trust & Safety**: Hallucinations undermine confidence and can damage reputation.
10
+ * **Retrieval-Augmented Generation (RAG) Quality**: In enterprise workflows, LLMs must remain faithful to supplied context. Measuring hallucinations highlights which models respect that constraint.
11
+ * **Regulatory & Compliance Pressure**: Upcoming AI regulations require demonstrable accuracy standards. Reliable hallucination metrics can help you meet these requirements.
12
+
13
+ ### How we measure hallucinations
14
+
15
+ We evaluate each model on two complementary benchmarks and compute a *hallucination rate* (lower = better):
16
+
17
+ 1. **HaluEval-QA (RAG setting)**: Given a question *and* a supporting document, the model must answer *only* using the provided context.
18
+ 2. **UltraChat Filtered (Non-RAG setting)**: Open-domain questions with **no** extra context test the model's internal knowledge.
19
+
20
+ Outputs are automatically verified by [Verify](https://platform.kluster.ai/verify) from [kluster.ai](https://kluster.ai/), which cross-checks claims against the source document or web results.
21
+
22
+ > **Note:** Full experiment details, including prompt templates, dataset description, and evaluation methodology, are provided at the end of this page for reference.
23
+
24
+ ---
25
+
26
+ Stay informed as we add new models and tasks, and follow us on [X](https://x.com/klusterai) or join Discord [here](https://discord.com/invite/klusterai) for the latest updates on trustworthy LLMs.
27
+
leaderboard/data/leaderboard.csv CHANGED
@@ -1,16 +1,16 @@
1
  Models,ha_rag_rate,ha_non_rag_rate
2
- klusterai/Meta-Llama-3.1-8B-Instruct-Turbo,8.1,12.5
3
- Qwen/Qwen2.5-VL-7B-Instruct,9.35,4.55
4
- mistralai/Mistral-Nemo-Instruct-2407,10.63,8.74
5
- meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8,3.34,0.69
6
- meta-llama/Llama-4-Scout-17B-16E-Instruct,4.23,2.48
7
- mistralai/Mistral-Small-24B-Instruct-2501,4.74,7.85
8
- mistralai/Magistral-Small-2506,8.62,28.07
9
- google/gemma-3-27b-it,3.71,0.48
10
- klusterai/Meta-Llama-3.3-70B-Instruct-Turbo,2.12,1.09
11
- deepseek-ai/DeepSeek-V3-0324,4.66,0.91
12
- Qwen/Qwen3-235B-A22B-FP8,5.04,0.88
13
- deepseek-ai/DeepSeek-R1-0528,2.26,0.78
14
- openai/gpt-4o,6.05,0.64
15
- anthropic/claude-sonnet-4,2.21,0.6
16
- google/gemini-2.5-pro,1.57,0.36
 
1
  Models,ha_rag_rate,ha_non_rag_rate
2
+ klusterai-Meta-Llama-3.1-8B-Instruct-Turbo,8.1,12.5
3
+ Qwen2.5-VL-7B-Instruct,9.35,4.55
4
+ Mistral-Nemo-Instruct-2407,10.63,8.74
5
+ Llama-4-Maverick-17B-128E-Instruct-FP8,3.34,0.69
6
+ Llama-4-Scout-17B-16E-Instruct,4.23,2.48
7
+ Mistral-Small-24B-Instruct-2501,4.74,7.85
8
+ Magistral-Small-2506,8.62,28.07
9
+ gemma-3-27b-it,3.71,0.48
10
+ klusterai-Meta-Llama-3.3-70B-Instruct-Turbo,2.12,1.09
11
+ DeepSeek-V3-0324,4.66,0.91
12
+ Qwen3-235B-A22B-FP8,5.04,0.88
13
+ DeepSeek-R1-0528,2.26,0.78
14
+ gpt-4o,6.05,0.64
15
+ claude-sonnet-4,2.21,0.6
16
+ gemini-2.5-pro,1.57,0.36
leaderboard/data/rag_methods_compare.csv ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Models,rag1,rag2,rag3
2
+ klusterai-Meta-Llama-3.1-8B-Instruct-Turbo,13.09,8.1,11.92
3
+ Qwen2.5-VL-7B-Instruct,13.39,9.35,13.24
4
+ Mistral-Nemo-Instruct-2407,13.99,10.63,14.42
5
+ Llama-4-Maverick-17B-128E-Instruct-FP8,6.27,3.34,5.72
6
+ Llama-4-Scout-17B-16E-Instruct,7.17,4.23,6.98
7
+ Mistral-Small-24B-Instruct-2501,7.5,4.74,7.09
8
+ Magistral-Small-2506,12.09,8.62,11.87
9
+ gemma-3-27b-it,6.09,3.71,-
10
+ klusterai-Meta-Llama-3.3-70B-Instruct-Turbo,4.63,2.12,4.65
11
+ DeepSeek-V3-0324,7.71,4.66,7.09
12
+ Qwen3-235B-A22B-FP8,6.8,5.04,6.63
13
+ DeepSeek-R1-0528,3.52,2.26,3.58
rag_techniques.md ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!--
2
+ keywords: RAG techniques, Retrieval-Augmented Generation prompt engineering, hallucination detection, LLM hallucination rate, kluster.ai, Verify API, prompt design comparison, large language model evaluation
3
+ -->
4
+ # Comparison of Retrieval-Augmented Generation (RAG) Prompting Techniques and Hallucinations
5
+
6
+ LLMs can generate fluent answers but still hallucinate facts, especially in Retrieval-Augmented Generation (RAG) workflows. This leaderboard aims to understand how different prompt engineering strategies impact hallucination rates across models. In other words: Which prompt format is most reliable? Which models are more sensitive to prompt structure? The goal is to inform better design of RAG pipelines so you can reduce factual errors in downstream applications.
7
+
8
+ We present hallucination rates for various LLMs under three RAG request strategies. Each method delivers the same document context and question, but differs in how the information is structured during the request.
9
+
10
+ ## Overview
11
+
12
+ - **What we measure**: Hallucination rate (%) across three RAG request patterns.
13
+ - **RAG patterns compared**:
14
+ 1) **System Prompt**: context is placed in the system message; user sends only the question.
15
+ 2) **Single-Turn**: one user message that includes both the context *and* the question.
16
+ 3) **Two-Turn**: first user message provides the context, a second user message provides the question.
17
+ - **Why it matters**: Request structure can change reliability significantly. Knowing the safest default helps you ship trustworthy RAG systems faster.
18
+ - **Detect & reduce hallucinations**: The same [Verify](https://platform.kluster.ai/verify) API used for these evaluations can be plugged into your pipeline to flag and filter ungrounded answers in real time.
19
+ - **How to read the charts**: Lower bars = fewer hallucinations. Error bars show Β±1 SD across models.
20
+ - **Experiment summary**: 10,000 HaluEval-QA examples, temperature 0, judged with [Verify](https://docs.kluster.ai/get-started/verify/overview/).
21
+
22
+ ### RAG Techniques Evaluated
23
+
24
+ **1. RAG with Context in System Prompt**
25
+ The document is embedded inside the system prompt, and the user sends only the question:
26
+ ```text
27
+ [System]: You are an assistant for question-answering tasks.
28
+ Given the QUESTION and DOCUMENT you must answer the QUESTION using the information in the DOCUMENT.
29
+ You must not offer new information beyond the context provided in the DOCUMENT. Do not add any external knowledge.
30
+ The ANSWER also must not contradict information provided in the DOCUMENT.
31
+ If the DOCUMENT does not contain the facts to answer the QUESTION or you do not know the answer, you truthfully say that you do not know.
32
+ You have access to information provided by the user as DOCUMENT to answer the QUESTION, and nothing else.
33
+ Use three sentences maximum and keep the answer concise.
34
+ DOCUMENT: <context>
35
+
36
+ [User]: <prompt>
37
+ ```
38
+
39
+ **2. RAG with Context and Question in Single-Turn**
40
+ Both the document and question are concatenated in a single user message:
41
+ ```text
42
+ [System]: You are an assistant for question-answering tasks.
43
+ Given the QUESTION and DOCUMENT you must answer the QUESTION using the information in the DOCUMENT.
44
+ You must not offer new information beyond the context provided in the DOCUMENT. Do not add any external knowledge.
45
+ The ANSWER also must not contradict information provided in the DOCUMENT.
46
+ If the DOCUMENT does not contain the facts to answer the QUESTION or you do not know the answer, you truthfully say that you do not know.
47
+ You have access to information provided by the user as DOCUMENT to answer the QUESTION, and nothing else.
48
+ Use three sentences maximum and keep the answer concise.
49
+
50
+ [User]:
51
+ DOCUMENT: <context>
52
+ QUESTION: <prompt>
53
+
54
+ ```
55
+
56
+ **3. RAG with Context and Question in Two-Turns**
57
+ The document and question are sent in separate user messages:
58
+ ```text
59
+ [System]: You are an assistant for question-answering tasks.
60
+ Given the QUESTION and DOCUMENT you must answer the QUESTION using the information in the DOCUMENT.
61
+ You must not offer new information beyond the context provided in the DOCUMENT. Do not add any external knowledge.
62
+ The ANSWER also must not contradict information provided in the DOCUMENT.
63
+ If the DOCUMENT does not contain the facts to answer the QUESTION or you do not know the answer, you truthfully say that you do not know.
64
+ You have access to information provided by the user as DOCUMENT to answer the QUESTION, and nothing else.
65
+ Use three sentences maximum and keep the answer concise.
66
+
67
+ [User]: DOCUMENT: <context>
68
+ [User]: QUESTION: <prompt>
69
+ ```
70
+ *Note: This method did **not** work on Gemma 3 27B with the default chat template due to its restriction on consecutive user messages without an intervening assistant response.*
71
+
72
+ ### Dataset
73
+ We evaluate all three prompting strategies on the **HaluEval QA** benchmark, a large-scale collection of RAG question-answer examples.
74
+ - **Source**: [HaluEval QA](https://huggingface.co/datasets/pminervini/HaluEval/viewer/qa?views%5B%5D=qa)
75
+ - **Size**: 10,000 question-document pairs
76
+ - **Content**: Each example contains a short passage (extracted primarily from Wikipedia-style articles) and an accompanying question that can be answered **only** from that passage.
77
+ - **Use case**: Designed to measure whether an LLM can remain faithful to supplied context without inventing new facts.
78
+
79
+ All prompts are generated with *temperature = 0* to remove randomness so that differences in hallucination rate stem solely from the prompt format.
80
+
81
+ ### Metric
82
+
83
+ The values in the table indicate the **hallucination rate (%)** of answers deemed factually incorrect or ungrounded given the provided context.
84
+
85
+ Hallucination rates are automatically computed using **[Verify](https://platform.kluster.ai/verify)** by [kluster.ai](https://kluster.ai/), the [leading](https://www.kluster.ai/blog/introducing-verify-by-kluster-ai-the-missing-trust-layer-in-your-ai-stack) AI-powered hallucination detection API that cross-checks model claims against the source document.
86
+
rag_techniques_details.md ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### RAG Techniques Evaluated
2
+
3
+ **1. RAG with Context in System Prompt**
4
+ The document is embedded inside the system prompt, and the user sends only the question:
5
+ ```text
6
+ [System]: You are an assistant for question-answering tasks.
7
+ Given the QUESTION and DOCUMENT you must answer the QUESTION using the information in the DOCUMENT.
8
+ You must not offer new information beyond the context provided in the DOCUMENT. Do not add any external knowledge.
9
+ The ANSWER also must not contradict information provided in the DOCUMENT.
10
+ If the DOCUMENT does not contain the facts to answer the QUESTION or you do not know the answer, you truthfully say that you do not know.
11
+ You have access to information provided by the user as DOCUMENT to answer the QUESTION, and nothing else.
12
+ Use three sentences maximum and keep the answer concise.
13
+ DOCUMENT: <context>
14
+
15
+ [User]: <prompt>
16
+ ```
17
+
18
+ **2. RAG with Context and Question in Single-Turn**
19
+ Both the document and question are concatenated in a single user message:
20
+ ```text
21
+ [System]: You are an assistant for question-answering tasks.
22
+ Given the QUESTION and DOCUMENT you must answer the QUESTION using the information in the DOCUMENT.
23
+ You must not offer new information beyond the context provided in the DOCUMENT. Do not add any external knowledge.
24
+ The ANSWER also must not contradict information provided in the DOCUMENT.
25
+ If the DOCUMENT does not contain the facts to answer the QUESTION or you do not know the answer, you truthfully say that you do not know.
26
+ You have access to information provided by the user as DOCUMENT to answer the QUESTION, and nothing else.
27
+ Use three sentences maximum and keep the answer concise.
28
+
29
+ [User]:
30
+ DOCUMENT: <context>
31
+ QUESTION: <prompt>
32
+
33
+ ```
34
+
35
+ **3. RAG with Context and Question in Two-Turns**
36
+ The document and question are sent in separate user messages:
37
+ ```text
38
+ [System]: You are an assistant for question-answering tasks.
39
+ Given the QUESTION and DOCUMENT you must answer the QUESTION using the information in the DOCUMENT.
40
+ You must not offer new information beyond the context provided in the DOCUMENT. Do not add any external knowledge.
41
+ The ANSWER also must not contradict information provided in the DOCUMENT.
42
+ If the DOCUMENT does not contain the facts to answer the QUESTION or you do not know the answer, you truthfully say that you do not know.
43
+ You have access to information provided by the user as DOCUMENT to answer the QUESTION, and nothing else.
44
+ Use three sentences maximum and keep the answer concise.
45
+
46
+ [User]: DOCUMENT: <context>
47
+ [User]: QUESTION: <prompt>
48
+ ```
49
+ *Note: This method did **not** work on Gemma 3 27B with the default chat template due to its restriction on consecutive user messages without an intervening assistant response.*
50
+
51
+ ### Dataset
52
+ We evaluate all three prompting strategies on the **HaluEval QA** benchmark, a large-scale collection of RAG question-answer examples.
53
+ - **Source**: [HaluEval QA](https://huggingface.co/datasets/pminervini/HaluEval/viewer/qa?views%5B%5D=qa)
54
+ - **Size**: 10,000 question-document pairs
55
+ - **Content**: Each example contains a short passage (extracted primarily from Wikipedia-style articles) and an accompanying question that can be answered **only** from that passage.
56
+ - **Use case**: Designed to measure whether an LLM can remain faithful to supplied context without inventing new facts.
57
+
58
+ All prompts are generated with *temperature = 0* to remove randomness so that differences in hallucination rate stem solely from the prompt format.
59
+
60
+ ### Metric
61
+
62
+ The values in the table indicate the **hallucination rate (%)** of answers deemed factually incorrect or ungrounded given the provided context.
63
+
64
+ Hallucination rates are automatically computed using **[Verify](https://platform.kluster.ai/verify)** by [kluster.ai](https://kluster.ai/), the [leading](https://www.kluster.ai/blog/introducing-verify-by-kluster-ai-the-missing-trust-layer-in-your-ai-stack) AI-powered hallucination detection API that cross-checks model claims against the source document.
rag_techniques_intro.md ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!--
2
+ keywords: RAG techniques, Retrieval-Augmented Generation prompt engineering, hallucination detection, LLM hallucination rate, kluster.ai, Verify API, prompt design comparison, large language model evaluation
3
+ -->
4
+ # Comparison of Retrieval-Augmented Generation (RAG) Prompting Techniques and Hallucinations
5
+
6
+ LLMs can generate fluent answers but still hallucinate facts, especially in Retrieval-Augmented Generation (RAG) workflows. This leaderboard aims to understand how different prompt engineering strategies impact hallucination rates across models. In other words: Which prompt format is most reliable? Which models are more sensitive to prompt structure? The goal is to inform better design of RAG pipelines so you can reduce factual errors in downstream applications.
7
+
8
+ We present hallucination rates for various LLMs under three RAG request strategies. Each method delivers the same document context and question, but differs in how the information is structured during the request.
9
+
10
+ ## Overview
11
+
12
+ - **What we measure**: Hallucination rate (%) across three RAG request patterns.
13
+ - **RAG patterns compared**:
14
+ 1) **System Prompt**: context is placed in the system message; user sends only the question.
15
+ 2) **Single-Turn**: one user message that includes both the context *and* the question.
16
+ 3) **Two-Turn**: first user message provides the context, a second user message provides the question.
17
+ - **Why it matters**: Request structure can change reliability significantly. Knowing the safest default helps you ship trustworthy RAG systems faster.
18
+ - **Detect & reduce hallucinations**: The same [Verify](https://platform.kluster.ai/verify) API used for these evaluations can be plugged into your pipeline to flag and filter ungrounded answers in real time.
19
+ - **How to read the charts**: Lower bars = fewer hallucinations. Error bars show Β±1 SD across models.
20
+ - **Experiment summary**: 10,000 HaluEval-QA examples, temperature 0, judged with [Verify](https://docs.kluster.ai/get-started/verify/overview/).
21
+
22
+ > **Note:** Full experiment details, including prompt templates, dataset description, and evaluation methodology, are provided at the end of this page for reference.
23
+ <br>
src/about.py CHANGED
@@ -1,5 +1,6 @@
1
  from dataclasses import dataclass
2
  from enum import Enum
 
3
 
4
  @dataclass
5
  class Task:
@@ -23,32 +24,7 @@ NUM_FEWSHOT = 0 # Change with your few shot
23
  TITLE = """<h1 align="center" id="space-title">LLM Hallucination Detection Leaderboard</h1>"""
24
 
25
  # What does your leaderboard evaluate?
26
- INTRODUCTION_TEXT = """
27
- <!--
28
- keywords: LLM hallucination detection, hallucination leaderboard, RAG hallucination benchmark, UltraChat hallucination rate, Verify API, kluster.ai, factual accuracy of language models, large language model evaluation
29
- -->
30
-
31
- The **LLM Hallucination Detection Leaderboard** is a public, continuously updated comparison of how well popular Large Language Models (LLMs) avoid *hallucinations*, responses that are factually incorrect, fabricated, or unsupported by evidence. By surfacing transparent metrics across tasks, we help practitioners choose models that they can trust in production.
32
-
33
- ### Why does hallucination detection matter?
34
-
35
- * **User Trust & Safety** – Hallucinations undermine confidence and can damage reputation.
36
- * **Retrieval-Augmented Generation (RAG) Quality** – In enterprise workflows, LLMs must remain faithful to supplied context. Measuring hallucinations highlights which models respect that constraint.
37
- * **Regulatory & Compliance Pressure** – Upcoming AI regulations require demonstrable accuracy standards. Reliable hallucination metrics can help you meet these requirements.
38
-
39
- ### How we measure hallucinations
40
-
41
- We evaluate each model on two complementary benchmarks and compute a *hallucination rate* (lower = better):
42
-
43
- 1. **HaluEval-QA (RAG setting)** – Given a question *and* a supporting document, the model must answer *only* using the provided context.
44
- 2. **UltraChat Filtered (Non-RAG setting)** – Open-domain questions with **no** extra context test the model's internal knowledge.
45
-
46
- Outputs are automatically verified by [Verify](https://platform.kluster.ai/verify) from [kluster.ai](https://kluster.ai/), which cross-checks claims against the source document or web results.
47
-
48
- ---
49
-
50
- Stay informed as we add new models and tasks, and follow us on [X](https://x.com/klusterai) or join Discord [here](https://discord.com/invite/klusterai) for the latest updates on trustworthy LLMs.
51
- """
52
 
53
  LLM_BENCHMARKS_TEXT = f"""
54
  ## How it works
 
1
  from dataclasses import dataclass
2
  from enum import Enum
3
+ from pathlib import Path
4
 
5
  @dataclass
6
  class Task:
 
24
  TITLE = """<h1 align="center" id="space-title">LLM Hallucination Detection Leaderboard</h1>"""
25
 
26
  # What does your leaderboard evaluate?
27
+ INTRODUCTION_TEXT = (Path(__file__).resolve().parent.parent / "introduction.md").read_text()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
  LLM_BENCHMARKS_TEXT = f"""
30
  ## How it works
src/populate.py CHANGED
@@ -4,9 +4,30 @@ import os
4
  import pandas as pd
5
 
6
  from src.display.formatting import has_no_nan_values, make_clickable_model
7
- from src.display.utils import AutoEvalColumn, EvalQueueColumn
8
- from src.leaderboard.read_evals import get_raw_eval_results
9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
  def get_leaderboard_df(results_path):
12
  df = pd.read_csv(results_path)
 
4
  import pandas as pd
5
 
6
  from src.display.formatting import has_no_nan_values, make_clickable_model
7
+ from src.display.utils import EvalQueueColumn
8
+ # from src.leaderboard.read_evals import get_raw_eval_results
9
 
10
+ def get_rag_leaderboard_df(csv_path):
11
+ df = pd.read_csv(csv_path)
12
+
13
+ for col in ["rag1", "rag2", "rag3"]:
14
+ df[col] = pd.to_numeric(df[col], errors="coerce").round(2)
15
+
16
+ pretty = {
17
+ "Models": "Models",
18
+ "rag1": "Context in System Prompt (%)",
19
+ "rag2": "Context and Question Single-Turn (%)",
20
+ "rag3": "Context and Question Two-Turns (%)",
21
+ }
22
+ df = df.rename(columns=pretty)
23
+
24
+ # sort so the lowest Single-Turn hallucination rate appears first
25
+ df = (
26
+ df.sort_values("Context and Question Single-Turn (%)", ascending=True)
27
+ .reset_index(drop=True)
28
+ )
29
+
30
+ return df
31
 
32
  def get_leaderboard_df(results_path):
33
  df = pd.read_csv(results_path)
submit.md CHANGED
@@ -1,3 +1,7 @@
 
 
 
 
1
  # LLM Hallucination Detection Leaderboard Submission Guidelines
2
 
3
  Thank you for your interest in contributing to the **LLM Hallucination Detection Leaderboard**! We welcome submissions from researchers and practitioners who have built or finetuned language models that can be evaluated on our hallucination benchmarks.
@@ -14,15 +18,15 @@ Please email **ryan@kluster.ai** with the subject line:
14
 
15
  Attach **one ZIP file** that contains **all of the following**:
16
 
17
- 1. **`model_card.md`** – A short Markdown file describing your model:
18
  β€’ Name and version
19
  β€’ Architecture / base model
20
  β€’ Training or finetuning procedure
21
  β€’ License
22
  β€’ Intended use & known limitations
23
  β€’ Contact information
24
- 2. **`results.csv`** – A CSV file with **one row per prompt** and **one column per field** (see schema below).
25
- 3. (Optional) **`extra_notes.md`** – Anything else you would like us to know (e.g., additional analysis).
26
 
27
  ---
28
 
 
1
+ <!--
2
+ keywords: LLM hallucination leaderboard submission, Verify leaderboard guidelines, kluster.ai, hallucination benchmark contributions, large language model evaluation submission
3
+ -->
4
+
5
  # LLM Hallucination Detection Leaderboard Submission Guidelines
6
 
7
  Thank you for your interest in contributing to the **LLM Hallucination Detection Leaderboard**! We welcome submissions from researchers and practitioners who have built or finetuned language models that can be evaluated on our hallucination benchmarks.
 
18
 
19
  Attach **one ZIP file** that contains **all of the following**:
20
 
21
+ 1. **`model_card.md`**: A short Markdown file describing your model:
22
  β€’ Name and version
23
  β€’ Architecture / base model
24
  β€’ Training or finetuning procedure
25
  β€’ License
26
  β€’ Intended use & known limitations
27
  β€’ Contact information
28
+ 2. **`results.csv`**: A CSV file with **one row per prompt** and **one column per field** (see schema below).
29
+ 3. (Optional) **`extra_notes.md`**: Anything else you would like us to know (e.g., additional analysis).
30
 
31
  ---
32
 
verify.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!--
2
+ keywords: Verify by kluster.ai, LLM verification API, hallucination detection tool, factual accuracy checker, AI output validation, large language model trust layer
3
+ -->
4
+
5
+ ## Get started with Verify by kluster.ai
6
+
7
+ Verify is an intelligent agent that validates LLM outputs in real-time.
8
+
9
+ - **Blog post:** [Introducing Verify by kluster.ai](https://www.kluster.ai/blog/introducing-verify-by-kluster-ai-the-missing-trust-layer-in-your-ai-stack)
10
+ - **Documentation:** [Verify overview & API reference](https://docs.kluster.ai/get-started/verify/overview/)
11
+ - **Try it out in your browser:** [kluster.ai platform](https://platform.kluster.ai/verify)
12
+
13
+ ### Quick API examples