Spaces:
Running
Running
import streamlit as st | |
from transformers import pipeline | |
import google.generativeai as genai | |
import json | |
import random | |
import os | |
from dotenv import load_dotenv | |
import langdetect | |
from langdetect import detect, DetectorFactory | |
from langdetect.lang_detect_exception import LangDetectException | |
# Set seed for consistent language detection | |
DetectorFactory.seed = 0 | |
# Load environment variables | |
load_dotenv() | |
# Load language configurations from JSON | |
with open('languages_config.json', 'r', encoding='utf-8') as f: | |
LANGUAGES = json.load(f)['LANGUAGES'] | |
# Load the JSON data for emotion templates | |
with open('emotion_templates.json', 'r') as f: | |
data = json.load(f) | |
# Configure Gemini API | |
gemini_api_key = os.getenv("GEMINI_API_KEY") | |
if not gemini_api_key: | |
st.error("GEMINI_API_KEY not found in environment variables. Please set it in your .env file.") | |
st.stop() | |
genai.configure(api_key=gemini_api_key) | |
model = genai.GenerativeModel('gemini-2.0-flash') | |
# Configure Hugging Face API (optional, for private models or rate limiting) | |
hf_token = os.getenv("HUGGINGFACE_TOKEN") | |
if hf_token: | |
os.environ["HUGGINGFACE_HUB_TOKEN"] = hf_token | |
# Available emotion detection models | |
EMOTION_MODELS = { | |
"AnasAlokla/multilingual_go_emotions": "Multilingual Go Emotions (Original)", | |
"AnasAlokla/multilingual_go_emotions_V1.1": "Multilingual Go Emotions (V1.1)", | |
"AnasAlokla/multilingual_go_emotions_V1.2": "Multilingual Go Emotions (V1.2)" | |
} | |
# Language mapping for detection | |
SUPPORTED_LANGUAGES = { | |
'en': 'English', | |
'ar': 'Arabic', | |
'fr': 'French', | |
'es': 'Spanish', | |
'nl': 'Dutch', | |
'tr': 'Turkish' | |
} | |
def normalize_emotion_predictions(predictions): | |
""" | |
Normalize emotion predictions to ensure consistent format. | |
Handles different return formats from Hugging Face pipelines. | |
""" | |
try: | |
# If predictions is a list of lists (multiple inputs) | |
if isinstance(predictions, list) and len(predictions) > 0: | |
if isinstance(predictions[0], list): | |
# Take the first prediction set | |
predictions = predictions[0] | |
# Ensure each prediction has the required keys | |
normalized = [] | |
for pred in predictions: | |
if isinstance(pred, dict): | |
# Handle different possible key names | |
label = pred.get('label') or pred.get('LABEL') or pred.get('emotion', 'unknown') | |
score = pred.get('score') or pred.get('SCORE') or pred.get('confidence', 0.0) | |
normalized.append({ | |
'label': str(label).lower(), | |
'score': float(score) | |
}) | |
else: | |
# Handle unexpected format | |
st.warning(f"Unexpected prediction format: {pred}") | |
continue | |
return normalized if normalized else [{'label': 'neutral', 'score': 1.0}] | |
else: | |
# Handle case where predictions is not in expected format | |
st.warning(f"Unexpected predictions format: {type(predictions)}") | |
return [{'label': 'neutral', 'score': 1.0}] | |
except Exception as e: | |
st.error(f"Error normalizing predictions: {str(e)}") | |
return [{'label': 'neutral', 'score': 1.0}] | |
def detect_language(text): | |
"""Detect the language of the input text.""" | |
try: | |
detected_lang = detect(text) | |
if detected_lang in SUPPORTED_LANGUAGES: | |
return detected_lang | |
else: | |
return 'en' # Default to English if language not supported | |
except LangDetectException: | |
return 'en' # Default to English if detection fails | |
def get_language_name(lang_code): | |
"""Get the full language name from language code.""" | |
return SUPPORTED_LANGUAGES.get(lang_code, 'English') | |
def categorize_emotion(emotion): | |
"""Categorize emotion as positive, negative, or neutral.""" | |
positive_emotions = ['admiration', 'amusement', 'approval', 'caring', 'curiosity', | |
'desire', 'excitement', 'gratitude', 'joy', 'love', 'optimism', | |
'pride', 'relief'] | |
negative_emotions = ['anger', 'annoyance', 'confusion', 'disappointment', 'disapproval', | |
'disgust', 'embarrassment', 'fear', 'grief', 'nervousness', | |
'remorse', 'sadness'] | |
if emotion in positive_emotions: | |
return 'positive' | |
elif emotion in negative_emotions: | |
return 'negative' | |
else: | |
return 'neutral' | |
def generate_text(prompt, context=""): | |
""" | |
Generates text using the Gemini model. | |
""" | |
try: | |
response = model.generate_content(prompt) | |
return response.text | |
except Exception as e: | |
print(f"Error generating text: {e}") | |
return "I am sorry, I encountered an error while generating the text." | |
def create_enhanced_prompt(emotion, topic, detected_language, emotion_score): | |
""" | |
Creates an enhanced emotional prompt based on detected language and emotion intensity. | |
""" | |
# Get base template from emotion_templates.json | |
templates = data["emotion_templates"][emotion] | |
base_prompt = random.choice(templates) | |
# Replace placeholders | |
if topic: | |
placeholders = ["[topic/person]", "[topic]", "[person]", "[object]", "[outcome]"] | |
for placeholder in placeholders: | |
base_prompt = base_prompt.replace(placeholder, topic) | |
# Get language name | |
language_name = get_language_name(detected_language) | |
# Get emotion category | |
emotion_category = categorize_emotion(emotion) | |
# Get emotional enhancers from JSON file | |
emotional_enhancers = data.get("emotional_enhancers", {}) | |
language_enhancers = emotional_enhancers.get(detected_language, emotional_enhancers.get('en', {})) | |
emotion_enhancer = "" | |
if language_enhancers and emotion_category in language_enhancers: | |
emotion_enhancer = random.choice(language_enhancers[emotion_category]) | |
# Calculate emotion intensity | |
intensity = "high" if emotion_score > 0.7 else "moderate" if emotion_score > 0.4 else "low" | |
# Create enhanced prompt | |
enhanced_prompt = f""" | |
You are an emotionally intelligent AI assistant. Respond with genuine {emotion} emotion at {intensity} intensity. | |
Language Instructions: | |
- Respond ONLY in {language_name} | |
- Use natural, native-speaker expressions | |
- Match the emotional tone of a {language_name} speaker | |
Emotional Guidelines: | |
- The detected emotion is: {emotion} (confidence: {emotion_score:.2f}) | |
- Emotion category: {emotion_category} | |
- Use emotionally resonant words like: {emotion_enhancer} | |
- Express {emotion} authentically and appropriately | |
- Make your response feel genuinely {emotion_category} | |
Context: {base_prompt} | |
Topic to respond about: {topic} | |
Requirements: | |
- Keep response concise but emotionally expressive (2-4 sentences) | |
- Use appropriate emotional language for {emotion} | |
- Sound natural in {language_name} | |
- Show empathy and understanding | |
- Match the emotional intensity of the user's input | |
""" | |
return enhanced_prompt | |
def load_emotion_classifier(model_name): | |
"""Load and cache the emotion classifier model.""" | |
try: | |
# Use the HF token if available for authentication | |
if hf_token: | |
return pipeline("text-classification", model=model_name, use_auth_token=hf_token, top_k=None) | |
else: | |
return pipeline("text-classification", model=model_name, top_k=None) | |
except Exception as e: | |
st.error(f"Error loading model {model_name}: {str(e)}") | |
return None | |
def get_ai_response(user_input, emotion_predictions, detected_language): | |
"""Generates AI response based on user input, detected emotions, and language.""" | |
try: | |
# Ensure predictions are normalized | |
normalized_predictions = normalize_emotion_predictions(emotion_predictions) | |
dominant_emotion = None | |
max_score = 0 | |
for prediction in normalized_predictions: | |
if prediction['score'] > max_score: | |
max_score = prediction['score'] | |
dominant_emotion = prediction['label'] | |
if dominant_emotion is None: | |
return "Error: No emotion detected for response generation." | |
# Create enhanced prompt with language and emotion context | |
prompt_text = create_enhanced_prompt(dominant_emotion, user_input, detected_language, max_score) | |
response = generate_text(prompt_text) | |
return response | |
except Exception as e: | |
st.error(f"Error generating AI response: {str(e)}") | |
return "I'm sorry, I encountered an error while generating a response." | |
def display_top_predictions(emotion_predictions, selected_language, num_predictions=3): | |
"""Display top emotion predictions in sidebar.""" | |
try: | |
# Normalize predictions first | |
normalized_predictions = normalize_emotion_predictions(emotion_predictions) | |
# Sort predictions by score in descending order | |
sorted_predictions = sorted(normalized_predictions, key=lambda x: x['score'], reverse=True) | |
# Take top N predictions | |
top_predictions = sorted_predictions[:num_predictions] | |
# Display in sidebar | |
st.sidebar.markdown("---") | |
st.sidebar.subheader("π― Top Emotion Predictions") | |
for i, prediction in enumerate(top_predictions, 1): | |
emotion = prediction['label'] | |
score = prediction['score'] | |
percentage = score * 100 | |
# Create a progress bar for visual representation | |
st.sidebar.markdown(f"**{i}. {emotion.title()}**") | |
st.sidebar.progress(score) | |
st.sidebar.markdown(f"Score: {percentage:.1f}%") | |
st.sidebar.markdown("---") | |
except Exception as e: | |
st.sidebar.error(f"Error displaying predictions: {str(e)}") | |
def display_language_info(detected_language, confidence_scores=None): | |
"""Display detected language information.""" | |
language_name = get_language_name(detected_language) | |
st.sidebar.markdown("---") | |
st.sidebar.subheader("π Language Detection") | |
st.sidebar.success(f"**Detected:** {language_name} ({detected_language.upper()})") | |
if confidence_scores: | |
st.sidebar.markdown("**Detection Confidence:**") | |
for lang, score in confidence_scores.items(): | |
if lang in SUPPORTED_LANGUAGES: | |
lang_name = SUPPORTED_LANGUAGES[lang] | |
st.sidebar.markdown(f"β’ {lang_name}: {score:.2f}") | |
def main(): | |
# Sidebar configurations | |
st.sidebar.header("βοΈ Configuration") | |
# Language Selection | |
selected_language = st.sidebar.selectbox( | |
"π Select Interface Language", | |
list(LANGUAGES.keys()), | |
index=0 # Default to English | |
) | |
# Model Selection | |
selected_model_key = st.sidebar.selectbox( | |
"π€ Select Emotion Detection Model", | |
list(EMOTION_MODELS.keys()), | |
format_func=lambda x: EMOTION_MODELS[x], | |
index=0 # Default to first model | |
) | |
# Number of predictions to show in sidebar | |
num_predictions = st.sidebar.slider( | |
"π Number of predictions to show", | |
min_value=1, | |
max_value=6, | |
value=3, | |
step=1 | |
) | |
# Language detection settings | |
auto_detect = True | |
# Load the selected emotion classifier | |
emotion_classifier = load_emotion_classifier(selected_model_key) | |
# Check if model loaded successfully | |
if emotion_classifier is None: | |
st.error("Failed to load the selected emotion detection model. Please try again or select a different model.") | |
return | |
# Display selected model info | |
st.sidebar.success(f"β Current Model: {EMOTION_MODELS[selected_model_key]}") | |
# Display Image | |
if os.path.exists('chatBot_image.jpg'): | |
st.image('chatBot_image.jpg', channels='RGB') | |
# Set page title and header based on selected language | |
st.title(LANGUAGES[selected_language]['title']) | |
st.markdown(f"### π¬ {LANGUAGES[selected_language]['analyze_subtitle']}") | |
# Add language support info | |
st.info("π **Supported Languages:** English, Arabic, French, Spanish, Dutch, Turkish") | |
# Input Text Box | |
user_input = st.text_area( | |
LANGUAGES[selected_language]['input_placeholder'], | |
"", | |
height=100, | |
help="Type your message here to analyze emotions and get an emotionally appropriate response" | |
) | |
if user_input: | |
try: | |
# Language Detection | |
if auto_detect: | |
detected_language = detect_language(user_input) | |
# Display language detection results | |
display_language_info(detected_language) | |
# Emotion Detection | |
with st.spinner("Analyzing emotions..."): | |
emotion_predictions = emotion_classifier(user_input) | |
# Normalize predictions | |
normalized_predictions = normalize_emotion_predictions(emotion_predictions) | |
# Display top predictions in sidebar | |
display_top_predictions(emotion_predictions, selected_language, num_predictions) | |
# Display Emotions in main area (top 5) | |
st.subheader(LANGUAGES[selected_language]['emotions_header']) | |
top_5_emotions = sorted(normalized_predictions, key=lambda x: x['score'], reverse=True)[:5] | |
# Create columns for better display | |
col1, col2 = st.columns(2) | |
for i, prediction in enumerate(top_5_emotions): | |
emotion = prediction['label'] | |
score = prediction['score'] | |
percentage = score * 100 | |
# Add emotion category indicator | |
emotion_category = categorize_emotion(emotion) | |
category_emoji = "π" if emotion_category == "positive" else "π" if emotion_category == "negative" else "π" | |
if i % 2 == 0: | |
with col1: | |
st.metric( | |
label=f"{category_emoji} {emotion.title()}", | |
value=f"{percentage:.1f}%", | |
delta=None | |
) | |
else: | |
with col2: | |
st.metric( | |
label=f"{category_emoji} {emotion.title()}", | |
value=f"{percentage:.1f}%", | |
delta=None | |
) | |
# Get AI Response with enhanced emotional intelligence | |
with st.spinner("Generating emotionally intelligent response..."): | |
ai_response = get_ai_response(user_input, emotion_predictions, detected_language) | |
# Display AI Response | |
st.subheader(f"π€ {LANGUAGES[selected_language]['response_header']}") | |
# Show dominant emotion and response language | |
dominant_emotion = max(normalized_predictions, key=lambda x: x['score']) | |
language_name = get_language_name(detected_language) | |
# Display the response in a nice container | |
with st.container(): | |
st.write(ai_response) | |
# Add emotion intensity indicator | |
emotion_score = dominant_emotion['score'] | |
intensity = "High" if emotion_score > 0.7 else "Moderate" if emotion_score > 0.4 else "Low" | |
st.caption(f"Emotion Intensity: {intensity} ({emotion_score:.2f})") | |
except Exception as e: | |
st.error(f"An error occurred: {str(e)}") | |
st.error("Please try again with different input or check your configuration.") | |
# Run the main function | |
if __name__ == "__main__": | |
main() |