Alexandra Zapko-Willmes commited on
Commit
842372c
·
verified ·
1 Parent(s): cc03b28

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -55
app.py CHANGED
@@ -1,60 +1,22 @@
1
  import gradio as gr
2
- import pandas as pd
3
- import io
4
- from transformers import pipeline
5
 
6
- # Available zero-shot classification models
7
  models = {
8
- "EN: deberta-v3-large-zeroshot": "MoritzLaurer/deberta-v3-large-zeroshot-v2.0",
9
- "MULTI: mDeBERTa-v3-xnli": "MoritzLaurer/mDeBERTa-v3-base-xnli-multilingual-nli-2mil7",
10
- "MULTI: xlm-roberta-xnli": "joeddav/xlm-roberta-large-xnli"
11
  }
12
 
13
- response_table = []
14
-
15
- def classify_items(questions_text, labels_text, model_choice):
16
- labels = [l.strip() for l in labels_text.split(",") if l.strip()]
17
- questions = [q.strip() for q in questions_text.strip().split("\n") if q.strip()]
18
- if not labels or not questions:
19
- return "Please enter both questionnaire items and response labels.", None
20
-
21
- classifier = pipeline("zero-shot-classification", model=models[model_choice])
22
- global response_table
23
- response_table = []
24
- output_lines = []
25
-
26
- for i, question in enumerate(questions, 1):
27
- result = classifier(question, labels, multi_label=False)
28
- row = {"Item #": i, "Item": question}
29
- output_lines.append(f"{i}. {question}")
30
- for label, score in zip(result["labels"], result["scores"]):
31
- row[label] = round(score, 3)
32
- output_lines.append(f"→ {label}: {round(score, 3)}")
33
- output_lines.append("")
34
- response_table.append(row)
35
-
36
- return "\n".join(output_lines), None
37
-
38
- def download_csv():
39
- df = pd.DataFrame(response_table)
40
- buffer = io.StringIO()
41
- df.to_csv(buffer, index=False)
42
- return buffer.getvalue()
43
-
44
- # Gradio interface
45
- with gr.Blocks() as demo:
46
- gr.Markdown("## 🧠 Zero-Shot Classification with Model Selection")
47
- gr.Markdown("Students can enter multiple questionnaire items and define their own response labels. The selected model will classify each item and provide probabilities.")
48
-
49
- model_dropdown = gr.Dropdown(choices=list(models.keys()), label="Choose a model")
50
- labels_input = gr.Textbox(label="Response Options (comma-separated)", placeholder="e.g., Strongly disagree, Disagree, Neutral, Agree, Strongly agree")
51
- questions_input = gr.Textbox(label="Questionnaire Items (one per line)", lines=10)
52
- output_box = gr.Textbox(label="Model Output", lines=20)
53
- submit_btn = gr.Button("Classify")
54
- download_btn = gr.Button("📥 Download CSV")
55
- file_output = gr.File(label="Download CSV", visible=False)
56
-
57
- submit_btn.click(fn=classify_items, inputs=[questions_input, labels_input, model_dropdown], outputs=[output_box, file_output])
58
- download_btn.click(fn=download_csv, inputs=[], outputs=file_output)
59
-
60
- demo.launch()
 
1
  import gradio as gr
2
+ from huggingface_hub import InferenceClient
 
 
3
 
 
4
  models = {
5
+ "Mistral-7B": "mistralai/Mistral-7B-Instruct-v0.2",
6
+ "Falcon-7B": "tiiuae/falcon-7b-instruct"
 
7
  }
8
 
9
+ def ask_model(question, model_choice):
10
+ client = InferenceClient(models[model_choice])
11
+ prompt = f"Answer this questionnaire item: {question} (Strongly disagree - Strongly agree)"
12
+ return client.text_generation(prompt=prompt)
13
+
14
+ gr.Interface(
15
+ fn=ask_model,
16
+ inputs=[
17
+ gr.Textbox(label="Questionnaire Item"),
18
+ gr.Dropdown(list(models.keys()), label="Choose Model")
19
+ ],
20
+ outputs="text",
21
+ title="LLM-Powered Questionnaire"
22
+ ).launch()