Spaces:
Sleeping
Sleeping
Alexandra Zapko-Willmes
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -1,60 +1,22 @@
|
|
1 |
import gradio as gr
|
2 |
-
|
3 |
-
import io
|
4 |
-
from transformers import pipeline
|
5 |
|
6 |
-
# Available zero-shot classification models
|
7 |
models = {
|
8 |
-
"
|
9 |
-
"
|
10 |
-
"MULTI: xlm-roberta-xnli": "joeddav/xlm-roberta-large-xnli"
|
11 |
}
|
12 |
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
result = classifier(question, labels, multi_label=False)
|
28 |
-
row = {"Item #": i, "Item": question}
|
29 |
-
output_lines.append(f"{i}. {question}")
|
30 |
-
for label, score in zip(result["labels"], result["scores"]):
|
31 |
-
row[label] = round(score, 3)
|
32 |
-
output_lines.append(f"→ {label}: {round(score, 3)}")
|
33 |
-
output_lines.append("")
|
34 |
-
response_table.append(row)
|
35 |
-
|
36 |
-
return "\n".join(output_lines), None
|
37 |
-
|
38 |
-
def download_csv():
|
39 |
-
df = pd.DataFrame(response_table)
|
40 |
-
buffer = io.StringIO()
|
41 |
-
df.to_csv(buffer, index=False)
|
42 |
-
return buffer.getvalue()
|
43 |
-
|
44 |
-
# Gradio interface
|
45 |
-
with gr.Blocks() as demo:
|
46 |
-
gr.Markdown("## 🧠 Zero-Shot Classification with Model Selection")
|
47 |
-
gr.Markdown("Students can enter multiple questionnaire items and define their own response labels. The selected model will classify each item and provide probabilities.")
|
48 |
-
|
49 |
-
model_dropdown = gr.Dropdown(choices=list(models.keys()), label="Choose a model")
|
50 |
-
labels_input = gr.Textbox(label="Response Options (comma-separated)", placeholder="e.g., Strongly disagree, Disagree, Neutral, Agree, Strongly agree")
|
51 |
-
questions_input = gr.Textbox(label="Questionnaire Items (one per line)", lines=10)
|
52 |
-
output_box = gr.Textbox(label="Model Output", lines=20)
|
53 |
-
submit_btn = gr.Button("Classify")
|
54 |
-
download_btn = gr.Button("📥 Download CSV")
|
55 |
-
file_output = gr.File(label="Download CSV", visible=False)
|
56 |
-
|
57 |
-
submit_btn.click(fn=classify_items, inputs=[questions_input, labels_input, model_dropdown], outputs=[output_box, file_output])
|
58 |
-
download_btn.click(fn=download_csv, inputs=[], outputs=file_output)
|
59 |
-
|
60 |
-
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
+
from huggingface_hub import InferenceClient
|
|
|
|
|
3 |
|
|
|
4 |
models = {
|
5 |
+
"Mistral-7B": "mistralai/Mistral-7B-Instruct-v0.2",
|
6 |
+
"Falcon-7B": "tiiuae/falcon-7b-instruct"
|
|
|
7 |
}
|
8 |
|
9 |
+
def ask_model(question, model_choice):
|
10 |
+
client = InferenceClient(models[model_choice])
|
11 |
+
prompt = f"Answer this questionnaire item: {question} (Strongly disagree - Strongly agree)"
|
12 |
+
return client.text_generation(prompt=prompt)
|
13 |
+
|
14 |
+
gr.Interface(
|
15 |
+
fn=ask_model,
|
16 |
+
inputs=[
|
17 |
+
gr.Textbox(label="Questionnaire Item"),
|
18 |
+
gr.Dropdown(list(models.keys()), label="Choose Model")
|
19 |
+
],
|
20 |
+
outputs="text",
|
21 |
+
title="LLM-Powered Questionnaire"
|
22 |
+
).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|