Spaces:
Ruurd
/
Running on Zero

Ruurd commited on
Commit
fa10798
·
1 Parent(s): 77f8336

Update interface

Browse files
Files changed (1) hide show
  1. app.py +16 -10
app.py CHANGED
@@ -78,16 +78,17 @@ def highlight_tokens(token_ids, answer_start, changed_indices, color):
78
  highlighted.append(tok_str)
79
  return "".join(highlighted)
80
 
81
- def diffusion_chat(question, noising, max_it, pause_length):
82
 
83
  sharpness = 3.0
84
  noise_start = 0.5
85
  top_p = 1.0
86
  top_k = 10
87
  clustering = False
 
88
 
89
  if question.strip() == "":
90
- question = "What do you know about the city of Amsterdam?"
91
 
92
  prompt = format_chat_prompt(question)
93
  input_ids = tokenizer.encode(prompt, add_special_tokens=False)
@@ -200,15 +201,20 @@ assistant_marker_ids = tokenizer.encode("<|start_header_id|>assistant<|end_heade
200
  demo = gr.Interface(
201
  fn=diffusion_chat,
202
  inputs=[
203
- gr.Textbox(label="User Question", lines=2, placeholder="What do you know about the city of Amsterdam?"),
204
- gr.Checkbox(label="Enable noising", value=True, info="If disabled, the model will not apply any intermediate noise."),
205
- gr.Slider(1, 512, value=64, step=1, label="Increase the maximum number of iterations to run."),
206
- gr.Slider(0, 5, value=0, step=0.01, label="Increase the pause between iterations to visualize the process.")
 
 
 
 
207
  ],
208
- outputs=[gr.HTML(label="Diffusion Output")],
209
- title="Diffusion Language Model Chat",
210
- theme="default",
211
- description="This interface runs a diffusion-based language model to generate answers progressively."
 
212
  )
213
 
214
  demo.launch(share=True, allowed_paths=["."], ssr_mode=False)
 
78
  highlighted.append(tok_str)
79
  return "".join(highlighted)
80
 
81
+ def diffusion_chat(question, noising, enable_pause, max_it):
82
 
83
  sharpness = 3.0
84
  noise_start = 0.5
85
  top_p = 1.0
86
  top_k = 10
87
  clustering = False
88
+ pause_length = 1.0 if enable_pause else 0.0
89
 
90
  if question.strip() == "":
91
+ question = "What do you know about Amsterdam?"
92
 
93
  prompt = format_chat_prompt(question)
94
  input_ids = tokenizer.encode(prompt, add_special_tokens=False)
 
201
  demo = gr.Interface(
202
  fn=diffusion_chat,
203
  inputs=[
204
+ gr.Textbox(
205
+ label="User Question",
206
+ lines=2,
207
+ placeholder="What do you know about Amsterdam?",
208
+ ),
209
+ gr.Checkbox(label="Enable intermediate noising", value=True),
210
+ gr.Checkbox(label="Pause between iterations", value=False),
211
+ gr.Slider(1, 512, value=64, step=1, label="Increase the maximum number of iterations."),
212
  ],
213
+ outputs=gr.HTML(label="Diffusion Output"),
214
+ title="LAD Chat",
215
+ description="This interface runs a diffusion-based language model to generate answers progressively.",
216
+ allow_flagging="never",
217
+ live=False # ensures the Stop button appears properly
218
  )
219
 
220
  demo.launch(share=True, allowed_paths=["."], ssr_mode=False)