Spaces:
Sleeping
Sleeping
File size: 10,970 Bytes
04ca1b2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 |
"""
File managing the scenarios and objectivity assessment functionality for the application.
"""
import os
import json
import random
import gradio as gr
import torch
import torch.nn.functional as F
from models import device, bbq_model, bbq_tokenizer
# Topics for the scenarios
TOPICS = [
"AI in Healthcare", "Climate Change", "Universal Basic Income", "Social Media's Role in Elections",
"Government Surveillance and Privacy", "Genetic Engineering", "Gender Pay Gap",
"Police Use of Facial Recognition", "Space Exploration and Government Funding",
"Affirmative Action in Universities", "Renewable Energy Advances", "Mental Health Awareness",
"Online Privacy and Data Security", "Impact of Automation on Employment",
"Electric Vehicles Adoption", "Work From Home Culture", "Food Security and GMOs",
"Cryptocurrency Volatility", "Artificial Intelligence in Education", "Cultural Diversity in Media",
"Urbanization and Infrastructure", "Healthcare Reform", "Taxation Policies",
"Global Trade and Tariffs", "Environmental Conservation", "Social Justice Movements",
"Digital Transformation in Business", "Public Transportation Funding", "Immigration Reform",
"Aging Population Challenges", "Mental Health in the Workplace", "Internet Censorship",
"Political Polarization", "Cybersecurity in the Digital Age", "Privacy vs. Security",
"Sustainable Agriculture", "Future of Work", "Tech Monopolies",
"Education Reform", "Climate Policy and Economics", "Renewable Energy Storage",
"Water Scarcity", "Urban Green Spaces", "Automation in Manufacturing",
"Renewable Energy Subsidies", "Universal Healthcare", "Workplace Automation",
"Cultural Heritage Preservation", "Biotechnology in Agriculture", "Media Bias",
"Renewable Energy Policy", "Artificial Intelligence Ethics", "Space Colonization",
"Social Media Regulation", "Virtual Reality in Education", "Blockchain in Supply Chain",
"Data-Driven Policymaking", "Gig Economy", "Climate Adaptation Strategies",
"Economic Inequality", "Sustainable Urban Development", "Media Regulation"
]
print(f"Offline topics loaded. Total: {len(TOPICS)}")
# Load initial scenarios
scenarios = []
def load_scenarios(file_path: str = "scenarios.json"):
"""
Load scenarios from a file if it exists.
Args:
file_path (str) : Path to the scenarios file.
Returns:
list : List of scenarios.
"""
try:
if not os.path.exists(file_path):
print(f"No scenarios file found at {file_path}.")
return []
with open(file_path, "r", encoding="utf-8") as f:
data = json.load(f)
if not isinstance(data, list):
raise ValueError(f"Invalid scenarios format: expected a list but got {type(data).__name__}")
print(f"Scenarios loaded from {file_path}. Total: {len(data)}")
return data
except FileNotFoundError:
print(f"No scenarios file found at {file_path}")
return []
except json.JSONDecodeError as e:
error = f"Error decoding scenarios JSON in {file_path}: {str(e)}"
print(error)
raise RuntimeError(error) from e
except Exception as e:
error = f"Error loading scenarios from {file_path}: {str(e)}"
print(error)
raise RuntimeError(error) from e
def get_scenario(topic: str):
"""
Find a random scenario that matches the selected topic.
Args:
topic (str) : Selected topic to find a scenario for.
Returns:
dict : Scenario that matches the selected topic.
None : If no matching scenario is found.
"""
try:
if not topic or not isinstance(topic, str):
raise ValueError(f"Invalid topic: expected a non-empty string but got {type(topic).__name__}")
if not scenarios:
print(f"No scenarios available to match with topic '{topic}'")
return None
topic = topic.lower().strip()
matches = [s for s in scenarios if s.get("topic", "").lower().strip() == topic]
if matches:
scenario = random.choice(matches)
print(f"[CHECKPOINT] Scenario found for topic '{topic}': {scenario}")
return scenario
else:
print(f"No scenario found for topic '{topic}'")
return None
except ValueError as e:
error = f"Invalid topic parameter: {str(e)}"
print(error)
raise
except Exception as e:
error = f"Error getting scenario for topic '{topic}': {str(e)}"
print(error)
raise RuntimeError(error) from e
def on_load_scenario(topic: str):
"""
Load offline scenario if a matching scenario is found.
Args:
topic (str) : Selected topic to find a scenario for.
Returns:
tuple: Context, question, answers and UI updates
"""
try:
scenario = get_scenario(topic)
if scenario:
return (
scenario.get("context", "[No context]"),
scenario.get("question", "[No question]"),
scenario.get("answer0", "[No answer0]"),
scenario.get("answer1", "[No answer1]"),
scenario.get("answer2", "[No answer2]"),
gr.update(
choices=[
scenario.get("answer0", ""),
scenario.get("answer1", ""),
scenario.get("answer2", "")
],
value=None
)
)
else:
return ("[No offline scenario found]", "[No offline scenario found]",
"[No offline scenario found]", "[No offline scenario found]",
"[No offline scenario found]",
gr.update(choices=[], value=None))
except ValueError as e:
error = f"Invalid topic parameter: {str(e)}"
print(error)
return (error, error, error, error, error, gr.update(choices=[], value=None))
except RuntimeError as e:
error = f"Error loading scenario for topic '{topic}': {str(e)}"
print(error)
return (error, error, error, error, error, gr.update(choices=[], value=None))
except Exception as e:
error = f"Error on load scenario for topic '{topic}' : {str(e)}"
print(error)
return (error, error, error, error, error, gr.update(choices=[], value=None))
def classify_multiple_choice(context: str, question: str, ans0: str, ans1: str, ans2: str):
"""
Classifies MCQ answers using the BBQ model to determine the most objective answer.
Args:
context (str) : Context for the question.
question (str) : Question to answer.
ans0 (str) : Answer choice 0.
ans1 (str) : Answer choice 1.
ans2 (str) : Answer choice 2.
Returns:
tuple (predicted_answer, prob_dict) : Predicted answer and probabilities.
"""
try:
for param_name, param_value in [
("context", context),
("question", question),
("ans0", ans0),
("ans1", ans1),
("ans2", ans2)
]:
if not isinstance(param_value, str):
raise ValueError(f"Parameter '{param_name}' must be a string, got {type(param_value).__name__}")
if not param_value.strip():
raise ValueError(f"Parameter '{param_name}' cannot be empty.")
print("[Checkpoint] Starting multiple-choice classification...")
inputs = [f"{question} {ans}" for ans in (ans0, ans1, ans2)]
contexts = [context] * 3
inputs = bbq_tokenizer(
inputs,
contexts,
return_tensors="pt",
truncation=True,
padding="max_length",
max_length=128
).to(device)
inputs = {k: v.unsqueeze(0) for k, v in inputs.items()}
print("[Checkpoint] Tokenization complete. Running model...")
bbq_model.eval()
with torch.no_grad():
outputs = bbq_model(**inputs)
logits = outputs.logits[0]
probs = F.softmax(logits, dim=-1)
pred_idx = torch.argmax(logits).item()
all_answers = [ans0, ans1, ans2]
prob_dict = {all_answers[i]: float(probs[i].item()) for i in range(3)}
predicted_answer = all_answers[pred_idx]
print(f"[Checkpoint] Multiple Choice complete. Predicted answer: {predicted_answer}")
return predicted_answer, prob_dict
except ValueError as e:
return f"Configuration error: {str(e)}", {}
except RuntimeError as e:
return f"Model error: {str(e)}", {}
except Exception as e:
return f"Error analyzing text: {str(e)}", {}
def assess_objectivity(context: str, question: str, ans0: str, ans1: str, ans2: str, user_choice: str):
"""
Evaluates the objectivity of a user's choice by comparing it with the prediction of the BBQ model.
Args:
context (str) : Context for the question.
question (str) : Question to answer.
ans0 (str) : Answer choice 0.
ans1 (str) : Answer choice 1.
ans2 (str) : Answer choice 2.
user_choice (str) : User's selected answer.
Returns:
assessment (str) : Assessment message.
prob_dict (dict) : Probabilities for each answer choice.
"""
try:
if not user_choice:
return "Please select one of the generated answers.", {}
print("[Checkpoint] Starting objectivity assessment...")
predicted_answer, prob_dict = classify_multiple_choice(context, question, ans0, ans1, ans2)
if user_choice == predicted_answer:
assessment = f"✅ Your choice corresponds to the model's prediction ('{predicted_answer}').\nThis indicates an objective response."
else:
assessment = (
f"⚠️ Your choice ('{user_choice}') does not match the model's prediction ('{predicted_answer}').\nThis suggests a deviation from the objective norm."
)
print("[Checkpoint] Objectivity assessment complete.")
return assessment, prob_dict
except ValueError as e:
error = f"Invalid input for assessment: {str(e)}"
print(error)
return error, {}
except RuntimeError as e:
error = f"Model error during assessment: {str(e)}"
print(error)
return error, {}
except Exception as e:
error = f"Error during objectivity assessment: {str(e)}"
print(error)
return error, {}
# Initialize scenarios on module load
scenarios = load_scenarios() |