Spaces:
Sleeping
Sleeping
""" | |
File managing the scenarios and objectivity assessment functionality for the application. | |
""" | |
import os | |
import json | |
import random | |
import gradio as gr | |
import torch | |
import torch.nn.functional as F | |
from models import device, bbq_model, bbq_tokenizer | |
# Topics for the scenarios | |
TOPICS = [ | |
"AI in Healthcare", "Climate Change", "Universal Basic Income", "Social Media's Role in Elections", | |
"Government Surveillance and Privacy", "Genetic Engineering", "Gender Pay Gap", | |
"Police Use of Facial Recognition", "Space Exploration and Government Funding", | |
"Affirmative Action in Universities", "Renewable Energy Advances", "Mental Health Awareness", | |
"Online Privacy and Data Security", "Impact of Automation on Employment", | |
"Electric Vehicles Adoption", "Work From Home Culture", "Food Security and GMOs", | |
"Cryptocurrency Volatility", "Artificial Intelligence in Education", "Cultural Diversity in Media", | |
"Urbanization and Infrastructure", "Healthcare Reform", "Taxation Policies", | |
"Global Trade and Tariffs", "Environmental Conservation", "Social Justice Movements", | |
"Digital Transformation in Business", "Public Transportation Funding", "Immigration Reform", | |
"Aging Population Challenges", "Mental Health in the Workplace", "Internet Censorship", | |
"Political Polarization", "Cybersecurity in the Digital Age", "Privacy vs. Security", | |
"Sustainable Agriculture", "Future of Work", "Tech Monopolies", | |
"Education Reform", "Climate Policy and Economics", "Renewable Energy Storage", | |
"Water Scarcity", "Urban Green Spaces", "Automation in Manufacturing", | |
"Renewable Energy Subsidies", "Universal Healthcare", "Workplace Automation", | |
"Cultural Heritage Preservation", "Biotechnology in Agriculture", "Media Bias", | |
"Renewable Energy Policy", "Artificial Intelligence Ethics", "Space Colonization", | |
"Social Media Regulation", "Virtual Reality in Education", "Blockchain in Supply Chain", | |
"Data-Driven Policymaking", "Gig Economy", "Climate Adaptation Strategies", | |
"Economic Inequality", "Sustainable Urban Development", "Media Regulation" | |
] | |
print(f"Offline topics loaded. Total: {len(TOPICS)}") | |
# Load initial scenarios | |
scenarios = [] | |
def load_scenarios(file_path: str = "scenarios.json"): | |
""" | |
Load scenarios from a file if it exists. | |
Args: | |
file_path (str) : Path to the scenarios file. | |
Returns: | |
list : List of scenarios. | |
""" | |
try: | |
if not os.path.exists(file_path): | |
print(f"No scenarios file found at {file_path}.") | |
return [] | |
with open(file_path, "r", encoding="utf-8") as f: | |
data = json.load(f) | |
if not isinstance(data, list): | |
raise ValueError(f"Invalid scenarios format: expected a list but got {type(data).__name__}") | |
print(f"Scenarios loaded from {file_path}. Total: {len(data)}") | |
return data | |
except FileNotFoundError: | |
print(f"No scenarios file found at {file_path}") | |
return [] | |
except json.JSONDecodeError as e: | |
error = f"Error decoding scenarios JSON in {file_path}: {str(e)}" | |
print(error) | |
raise RuntimeError(error) from e | |
except Exception as e: | |
error = f"Error loading scenarios from {file_path}: {str(e)}" | |
print(error) | |
raise RuntimeError(error) from e | |
def get_scenario(topic: str): | |
""" | |
Find a random scenario that matches the selected topic. | |
Args: | |
topic (str) : Selected topic to find a scenario for. | |
Returns: | |
dict : Scenario that matches the selected topic. | |
None : If no matching scenario is found. | |
""" | |
try: | |
if not topic or not isinstance(topic, str): | |
raise ValueError(f"Invalid topic: expected a non-empty string but got {type(topic).__name__}") | |
if not scenarios: | |
print(f"No scenarios available to match with topic '{topic}'") | |
return None | |
topic = topic.lower().strip() | |
matches = [s for s in scenarios if s.get("topic", "").lower().strip() == topic] | |
if matches: | |
scenario = random.choice(matches) | |
print(f"[CHECKPOINT] Scenario found for topic '{topic}': {scenario}") | |
return scenario | |
else: | |
print(f"No scenario found for topic '{topic}'") | |
return None | |
except ValueError as e: | |
error = f"Invalid topic parameter: {str(e)}" | |
print(error) | |
raise | |
except Exception as e: | |
error = f"Error getting scenario for topic '{topic}': {str(e)}" | |
print(error) | |
raise RuntimeError(error) from e | |
def on_load_scenario(topic: str): | |
""" | |
Load offline scenario if a matching scenario is found. | |
Args: | |
topic (str) : Selected topic to find a scenario for. | |
Returns: | |
tuple: Context, question, answers and UI updates | |
""" | |
try: | |
scenario = get_scenario(topic) | |
if scenario: | |
return ( | |
scenario.get("context", "[No context]"), | |
scenario.get("question", "[No question]"), | |
scenario.get("answer0", "[No answer0]"), | |
scenario.get("answer1", "[No answer1]"), | |
scenario.get("answer2", "[No answer2]"), | |
gr.update( | |
choices=[ | |
scenario.get("answer0", ""), | |
scenario.get("answer1", ""), | |
scenario.get("answer2", "") | |
], | |
value=None | |
) | |
) | |
else: | |
return ("[No offline scenario found]", "[No offline scenario found]", | |
"[No offline scenario found]", "[No offline scenario found]", | |
"[No offline scenario found]", | |
gr.update(choices=[], value=None)) | |
except ValueError as e: | |
error = f"Invalid topic parameter: {str(e)}" | |
print(error) | |
return (error, error, error, error, error, gr.update(choices=[], value=None)) | |
except RuntimeError as e: | |
error = f"Error loading scenario for topic '{topic}': {str(e)}" | |
print(error) | |
return (error, error, error, error, error, gr.update(choices=[], value=None)) | |
except Exception as e: | |
error = f"Error on load scenario for topic '{topic}' : {str(e)}" | |
print(error) | |
return (error, error, error, error, error, gr.update(choices=[], value=None)) | |
def classify_multiple_choice(context: str, question: str, ans0: str, ans1: str, ans2: str): | |
""" | |
Classifies MCQ answers using the BBQ model to determine the most objective answer. | |
Args: | |
context (str) : Context for the question. | |
question (str) : Question to answer. | |
ans0 (str) : Answer choice 0. | |
ans1 (str) : Answer choice 1. | |
ans2 (str) : Answer choice 2. | |
Returns: | |
tuple (predicted_answer, prob_dict) : Predicted answer and probabilities. | |
""" | |
try: | |
for param_name, param_value in [ | |
("context", context), | |
("question", question), | |
("ans0", ans0), | |
("ans1", ans1), | |
("ans2", ans2) | |
]: | |
if not isinstance(param_value, str): | |
raise ValueError(f"Parameter '{param_name}' must be a string, got {type(param_value).__name__}") | |
if not param_value.strip(): | |
raise ValueError(f"Parameter '{param_name}' cannot be empty.") | |
print("[Checkpoint] Starting multiple-choice classification...") | |
inputs = [f"{question} {ans}" for ans in (ans0, ans1, ans2)] | |
contexts = [context] * 3 | |
inputs = bbq_tokenizer( | |
inputs, | |
contexts, | |
return_tensors="pt", | |
truncation=True, | |
padding="max_length", | |
max_length=128 | |
).to(device) | |
inputs = {k: v.unsqueeze(0) for k, v in inputs.items()} | |
print("[Checkpoint] Tokenization complete. Running model...") | |
bbq_model.eval() | |
with torch.no_grad(): | |
outputs = bbq_model(**inputs) | |
logits = outputs.logits[0] | |
probs = F.softmax(logits, dim=-1) | |
pred_idx = torch.argmax(logits).item() | |
all_answers = [ans0, ans1, ans2] | |
prob_dict = {all_answers[i]: float(probs[i].item()) for i in range(3)} | |
predicted_answer = all_answers[pred_idx] | |
print(f"[Checkpoint] Multiple Choice complete. Predicted answer: {predicted_answer}") | |
return predicted_answer, prob_dict | |
except ValueError as e: | |
return f"Configuration error: {str(e)}", {} | |
except RuntimeError as e: | |
return f"Model error: {str(e)}", {} | |
except Exception as e: | |
return f"Error analyzing text: {str(e)}", {} | |
def assess_objectivity(context: str, question: str, ans0: str, ans1: str, ans2: str, user_choice: str): | |
""" | |
Evaluates the objectivity of a user's choice by comparing it with the prediction of the BBQ model. | |
Args: | |
context (str) : Context for the question. | |
question (str) : Question to answer. | |
ans0 (str) : Answer choice 0. | |
ans1 (str) : Answer choice 1. | |
ans2 (str) : Answer choice 2. | |
user_choice (str) : User's selected answer. | |
Returns: | |
assessment (str) : Assessment message. | |
prob_dict (dict) : Probabilities for each answer choice. | |
""" | |
try: | |
if not user_choice: | |
return "Please select one of the generated answers.", {} | |
print("[Checkpoint] Starting objectivity assessment...") | |
predicted_answer, prob_dict = classify_multiple_choice(context, question, ans0, ans1, ans2) | |
if user_choice == predicted_answer: | |
assessment = f"✅ Your choice corresponds to the model's prediction ('{predicted_answer}').\nThis indicates an objective response." | |
else: | |
assessment = ( | |
f"⚠️ Your choice ('{user_choice}') does not match the model's prediction ('{predicted_answer}').\nThis suggests a deviation from the objective norm." | |
) | |
print("[Checkpoint] Objectivity assessment complete.") | |
return assessment, prob_dict | |
except ValueError as e: | |
error = f"Invalid input for assessment: {str(e)}" | |
print(error) | |
return error, {} | |
except RuntimeError as e: | |
error = f"Model error during assessment: {str(e)}" | |
print(error) | |
return error, {} | |
except Exception as e: | |
error = f"Error during objectivity assessment: {str(e)}" | |
print(error) | |
return error, {} | |
# Initialize scenarios on module load | |
scenarios = load_scenarios() |