Update interface
Browse files
app.py
CHANGED
@@ -78,16 +78,17 @@ def highlight_tokens(token_ids, answer_start, changed_indices, color):
|
|
78 |
highlighted.append(tok_str)
|
79 |
return "".join(highlighted)
|
80 |
|
81 |
-
def diffusion_chat(question, noising,
|
82 |
|
83 |
sharpness = 3.0
|
84 |
noise_start = 0.5
|
85 |
top_p = 1.0
|
86 |
top_k = 10
|
87 |
clustering = False
|
|
|
88 |
|
89 |
if question.strip() == "":
|
90 |
-
question = "What do you know about
|
91 |
|
92 |
prompt = format_chat_prompt(question)
|
93 |
input_ids = tokenizer.encode(prompt, add_special_tokens=False)
|
@@ -200,15 +201,20 @@ assistant_marker_ids = tokenizer.encode("<|start_header_id|>assistant<|end_heade
|
|
200 |
demo = gr.Interface(
|
201 |
fn=diffusion_chat,
|
202 |
inputs=[
|
203 |
-
gr.Textbox(
|
204 |
-
|
205 |
-
|
206 |
-
|
|
|
|
|
|
|
|
|
207 |
],
|
208 |
-
outputs=
|
209 |
-
title="
|
210 |
-
|
211 |
-
|
|
|
212 |
)
|
213 |
|
214 |
demo.launch(share=True, allowed_paths=["."], ssr_mode=False)
|
|
|
78 |
highlighted.append(tok_str)
|
79 |
return "".join(highlighted)
|
80 |
|
81 |
+
def diffusion_chat(question, noising, enable_pause, max_it):
|
82 |
|
83 |
sharpness = 3.0
|
84 |
noise_start = 0.5
|
85 |
top_p = 1.0
|
86 |
top_k = 10
|
87 |
clustering = False
|
88 |
+
pause_length = 1.0 if enable_pause else 0.0
|
89 |
|
90 |
if question.strip() == "":
|
91 |
+
question = "What do you know about Amsterdam?"
|
92 |
|
93 |
prompt = format_chat_prompt(question)
|
94 |
input_ids = tokenizer.encode(prompt, add_special_tokens=False)
|
|
|
201 |
demo = gr.Interface(
|
202 |
fn=diffusion_chat,
|
203 |
inputs=[
|
204 |
+
gr.Textbox(
|
205 |
+
label="User Question",
|
206 |
+
lines=2,
|
207 |
+
placeholder="What do you know about Amsterdam?",
|
208 |
+
),
|
209 |
+
gr.Checkbox(label="Enable intermediate noising", value=True),
|
210 |
+
gr.Checkbox(label="Pause between iterations", value=False),
|
211 |
+
gr.Slider(1, 512, value=64, step=1, label="Increase the maximum number of iterations."),
|
212 |
],
|
213 |
+
outputs=gr.HTML(label="Diffusion Output"),
|
214 |
+
title="LAD Chat",
|
215 |
+
description="This interface runs a diffusion-based language model to generate answers progressively.",
|
216 |
+
allow_flagging="never",
|
217 |
+
live=False # ensures the Stop button appears properly
|
218 |
)
|
219 |
|
220 |
demo.launch(share=True, allowed_paths=["."], ssr_mode=False)
|