import streamlit as st
import os
import json
import pandas as pd
import random
from os.path import join
from datetime import datetime
from src import (
preprocess_and_load_df,
load_agent,
ask_agent,
decorate_with_code,
show_response,
get_from_user,
load_smart_df,
ask_question,
)
from dotenv import load_dotenv
from langchain_groq import ChatGroq
from langchain_google_genai import ChatGoogleGenerativeAI
from streamlit_feedback import streamlit_feedback
from huggingface_hub import HfApi
from datasets import load_dataset, get_dataset_config_info, Dataset
from PIL import Image
import time
import uuid
# Page config with beautiful theme
st.set_page_config(
page_title="VayuChat - AI Air Quality Assistant",
page_icon="๐ฌ๏ธ",
layout="wide",
initial_sidebar_state="expanded"
)
# Custom CSS for beautiful styling
st.markdown("""
""", unsafe_allow_html=True)
# Auto-scroll JavaScript
st.markdown("""
""", unsafe_allow_html=True)
# FORCE reload environment variables
load_dotenv(override=True)
# Get API keys
Groq_Token = os.getenv("GROQ_API_KEY")
hf_token = os.getenv("HF_TOKEN")
gemini_token = os.getenv("GEMINI_TOKEN")
models = {
"gpt-oss-20b": "openai/gpt-oss-20b",
"gpt-oss-120b": "openai/gpt-oss-120b",
"llama3.1": "llama-3.1-8b-instant",
"llama3.3": "llama-3.3-70b-versatile",
"deepseek-R1": "deepseek-r1-distill-llama-70b",
"llama4 maverik":"meta-llama/llama-4-maverick-17b-128e-instruct",
"llama4 scout":"meta-llama/llama-4-scout-17b-16e-instruct",
"gemini-pro": "gemini-1.5-pro"
}
self_path = os.path.dirname(os.path.abspath(__file__))
# Initialize session ID for this session
if "session_id" not in st.session_state:
st.session_state.session_id = str(uuid.uuid4())
def upload_feedback(feedback, error, output, last_prompt, code, status):
"""Enhanced feedback upload function with better logging and error handling"""
try:
if not hf_token or hf_token.strip() == "":
st.warning("โ ๏ธ Cannot upload feedback - HF_TOKEN not available")
return False
# Create comprehensive feedback data
feedback_data = {
"timestamp": datetime.now().isoformat(),
"session_id": st.session_state.session_id,
"feedback_score": feedback.get("score", ""),
"feedback_comment": feedback.get("text", ""),
"user_prompt": last_prompt,
"ai_output": str(output),
"generated_code": code or "",
"error_message": error or "",
"is_image_output": status.get("is_image", False),
"success": not bool(error)
}
# Create unique folder name with timestamp
timestamp_str = datetime.now().strftime("%Y%m%d_%H%M%S")
random_id = str(uuid.uuid4())[:8]
folder_name = f"feedback_{timestamp_str}_{random_id}"
# Create markdown feedback file
markdown_content = f"""# VayuChat Feedback Report
## Session Information
- **Timestamp**: {feedback_data['timestamp']}
- **Session ID**: {feedback_data['session_id']}
## User Interaction
**Prompt**: {feedback_data['user_prompt']}
## AI Response
**Output**: {feedback_data['ai_output']}
## Generated Code
```python
{feedback_data['generated_code']}
```
## Technical Details
- **Error Message**: {feedback_data['error_message']}
- **Is Image Output**: {feedback_data['is_image_output']}
- **Success**: {feedback_data['success']}
## User Feedback
- **Score**: {feedback_data['feedback_score']}
- **Comments**: {feedback_data['feedback_comment']}
"""
# Save markdown file locally
markdown_filename = f"{folder_name}.md"
markdown_local_path = f"/tmp/{markdown_filename}"
with open(markdown_local_path, "w", encoding="utf-8") as f:
f.write(markdown_content)
# Upload to Hugging Face
api = HfApi(token=hf_token)
# Upload markdown feedback
api.upload_file(
path_or_fileobj=markdown_local_path,
path_in_repo=f"data/{markdown_filename}",
repo_id="SustainabilityLabIITGN/VayuChat_Feedback",
repo_type="dataset",
)
# Upload image if it exists and is an image output
if status.get("is_image", False) and isinstance(output, str) and os.path.exists(output):
try:
image_filename = f"{folder_name}_plot.png"
api.upload_file(
path_or_fileobj=output,
path_in_repo=f"data/{image_filename}",
repo_id="SustainabilityLabIITGN/VayuChat_Feedback",
repo_type="dataset",
)
except Exception as img_error:
print(f"Error uploading image: {img_error}")
# Clean up local files
if os.path.exists(markdown_local_path):
os.remove(markdown_local_path)
st.success("๐ Feedback uploaded successfully!")
return True
except Exception as e:
st.error(f"โ Error uploading feedback: {e}")
print(f"Feedback upload error: {e}")
return False
# Beautiful header
st.markdown("
๐ฌ๏ธ VayuChat
", unsafe_allow_html=True)
st.markdown("""
AI-Powered Air Quality Insights
Simplifying pollution analysis using conversational AI.
""", unsafe_allow_html=True)
st.markdown("""
How to Use:
Select a model from the sidebar and ask questions directly in the chat. Use quick prompts below for common queries.
""", unsafe_allow_html=True)
os.environ["PANDASAI_API_KEY"] = "$2a$10$gbmqKotzJOnqa7iYOun8eO50TxMD/6Zw1pLI2JEoqncwsNx4XeBS2"
# Load data with error handling
try:
df = preprocess_and_load_df(join(self_path, "Data.csv"))
st.success("โ
Data loaded successfully!")
except Exception as e:
st.error(f"โ Error loading data: {e}")
st.stop()
inference_server = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.2"
image_path = "IITGN_Logo.png"
# Beautiful sidebar
with st.sidebar:
# Logo and title
col1, col2, col3 = st.columns([1, 2, 1])
with col2:
if os.path.exists(image_path):
st.image(image_path, use_column_width=True)
# Session info
st.markdown(f"**Session ID**: `{st.session_state.session_id[:8]}...`")
# Model selection
st.markdown("### ๐ค AI Model Selection")
# Filter available models
available_models = []
model_names = list(models.keys())
groq_models = []
gemini_models = []
for model_name in model_names:
if "gemini" not in model_name:
groq_models.append(model_name)
else:
gemini_models.append(model_name)
if Groq_Token and Groq_Token.strip():
available_models.extend(groq_models)
if gemini_token and gemini_token.strip():
available_models.extend(gemini_models)
if not available_models:
st.error("โ No API keys available! Please set up your API keys in the .env file")
st.stop()
model_name = st.selectbox(
"Choose your AI assistant:",
available_models,
help="Different models have different strengths. Try them all!"
)
# Model descriptions
model_descriptions = {
"llama3.1": "๐ฆ Fast and efficient for general queries",
"llama3.3": "๐ฆ Most advanced LLaMA model for complex reasoning",
"mistral": "โก Balanced performance and speed",
"gemma": "๐ Google's lightweight model",
"gemini-pro": "๐ง Google's most powerful model",
"gpt-oss-20b": "๐ OpenAI's compact open-weight GPT for everyday tasks",
"gpt-oss-120b": "๐ OpenAI's massive open-weight GPT for nuanced responses",
"deepseek-R1": "๐ DeepSeek's distilled LLaMA model for efficient reasoning",
"llama4 maverik": "๐ Meta's LLaMA 4 Maverick โ high-performance instruction model",
"llama4 scout": "๐ฐ๏ธ Meta's LLaMA 4 Scout โ optimized for adaptive reasoning"
}
if model_name in model_descriptions:
st.info(model_descriptions[model_name])
st.markdown("---")
# Logging status
st.markdown("### ๐ Logging Status")
if hf_token and hf_token.strip():
st.success("โ
Logging enabled")
st.caption("Interactions are being logged to HuggingFace")
else:
st.warning("โ ๏ธ Logging disabled")
st.caption("HF_TOKEN not available")
st.markdown("---")
# Clear Chat Button
if st.button("๐งน Clear Chat"):
st.session_state.responses = []
st.session_state.processing = False
# Generate new session ID for new chat
st.session_state.session_id = str(uuid.uuid4())
try:
st.rerun()
except AttributeError:
st.experimental_rerun()
st.markdown("---")
# Chat History in Sidebar
with st.expander("๐ Chat History"):
for i, response in enumerate(st.session_state.get("responses", [])):
if response.get("role") == "user":
st.markdown(f"**You:** {response.get('content', '')[:50]}...")
elif response.get("role") == "assistant":
content = response.get('content', '')
if isinstance(content, str) and len(content) > 50:
st.markdown(f"**VayuChat:** {content[:50]}...")
else:
st.markdown(f"**VayuChat:** {str(content)[:50]}...")
st.markdown("---")
# Load quick prompts
questions = []
questions_file = join(self_path, "questions.txt")
if os.path.exists(questions_file):
try:
with open(questions_file, 'r', encoding='utf-8') as f:
content = f.read()
questions = [q.strip() for q in content.split("\n") if q.strip()]
print(f"Loaded {len(questions)} quick prompts") # Debug
except Exception as e:
st.error(f"Error loading questions: {e}")
questions = []
# Add some default prompts if file doesn't exist or is empty
if not questions:
questions = [
"What is the average PM2.5 level in the dataset?",
"Show me the air quality trend over time",
"Which pollutant has the highest concentration?",
"Create a correlation plot between different pollutants",
"What are the peak pollution hours?",
"Compare weekday vs weekend pollution levels"
]
# Quick prompts section (horizontal)
st.markdown("### ๐ญ Quick Prompts")
# Create columns for horizontal layout
cols_per_row = 2 # Reduced to 2 for better fit
rows = [questions[i:i + cols_per_row] for i in range(0, len(questions), cols_per_row)]
selected_prompt = None
for row_idx, row in enumerate(rows):
cols = st.columns(len(row))
for col_idx, question in enumerate(row):
with cols[col_idx]:
# Create unique key using row and column indices
unique_key = f"prompt_btn_{row_idx}_{col_idx}"
button_text = f"๐ {question[:35]}{'...' if len(question) > 35 else ''}"
if st.button(button_text,
key=unique_key,
help=question,
use_container_width=True):
selected_prompt = question
st.markdown("---")
# Initialize chat history and processing state
if "responses" not in st.session_state:
st.session_state.responses = []
if "processing" not in st.session_state:
st.session_state.processing = False
def show_custom_response(response):
"""Custom response display function"""
role = response.get("role", "assistant")
content = response.get("content", "")
if role == "user":
st.markdown(f"""
""", unsafe_allow_html=True)
elif role == "assistant":
st.markdown(f"""
๐ค VayuChat
{content if isinstance(content, str) else str(content)}
""", unsafe_allow_html=True)
# Show generated code if available
if response.get("gen_code"):
with st.expander("๐ View Generated Code"):
st.code(response["gen_code"], language="python")
# Try to display image if content is a file path
try:
if isinstance(content, str) and (content.endswith('.png') or content.endswith('.jpg')):
if os.path.exists(content):
st.image(content)
return {"is_image": True}
except:
pass
return {"is_image": False}
def show_processing_indicator(model_name, question):
"""Show processing indicator"""
st.markdown(f"""
๐ค VayuChat โข Processing with {model_name}
Question: {question}
๐ Generating response...
""", unsafe_allow_html=True)
# Main chat container
chat_container = st.container()
with chat_container:
# Display chat history
for response_id, response in enumerate(st.session_state.responses):
status = show_custom_response(response)
# Show feedback section for assistant responses
if response["role"] == "assistant":
feedback_key = f"feedback_{int(response_id/2)}"
error = response.get("error", "")
output = response.get("content", "")
last_prompt = response.get("last_prompt", "")
code = response.get("gen_code", "")
if "feedback" in st.session_state.responses[response_id]:
feedback_data = st.session_state.responses[response_id]["feedback"]
st.markdown(f"""
๐ Your Feedback: {feedback_data.get('score', '')}
{f"- {feedback_data.get('text', '')}" if feedback_data.get('text') else ""}
""", unsafe_allow_html=True)
else:
# Beautiful feedback section
st.markdown("---")
st.markdown("**How was this response?**")
col1, col2 = st.columns(2)
with col1:
thumbs_up = st.button("๐ Helpful", key=f"{feedback_key}_up", use_container_width=True)
with col2:
thumbs_down = st.button("๐ Not Helpful", key=f"{feedback_key}_down", use_container_width=True)
if thumbs_up or thumbs_down:
thumbs = "๐ Helpful" if thumbs_up else "๐ Not Helpful"
comments = st.text_area(
"๐ฌ Tell us more (optional):",
key=f"{feedback_key}_comments",
placeholder="What could be improved? Any suggestions?",
max_chars=500
)
if st.button("๐ Submit Feedback", key=f"{feedback_key}_submit"):
feedback = {"score": thumbs, "text": comments}
# Upload feedback with enhanced error handling
if upload_feedback(feedback, error, output, last_prompt, code, status or {}):
st.session_state.responses[response_id]["feedback"] = feedback
time.sleep(1) # Give user time to see success message
st.rerun()
else:
st.error("Failed to submit feedback. Please try again.")
# Show processing indicator if processing
if st.session_state.get("processing"):
show_processing_indicator(
st.session_state.get("current_model", "Unknown"),
st.session_state.get("current_question", "Processing...")
)
# Chat input (always visible at bottom)
prompt = st.chat_input("๐ฌ Ask me anything about air quality!", key="main_chat")
# Handle selected prompt from quick prompts
if selected_prompt:
prompt = selected_prompt
# Handle new queries
if prompt and not st.session_state.get("processing"):
# Prevent duplicate processing
if "last_prompt" in st.session_state:
last_prompt = st.session_state["last_prompt"]
last_model_name = st.session_state.get("last_model_name", "")
if (prompt == last_prompt) and (model_name == last_model_name):
prompt = None
if prompt:
# Add user input to chat history
user_response = get_from_user(prompt)
st.session_state.responses.append(user_response)
# Set processing state
st.session_state.processing = True
st.session_state.current_model = model_name
st.session_state.current_question = prompt
# Rerun to show processing indicator
st.rerun()
# Process the question if we're in processing state
if st.session_state.get("processing"):
prompt = st.session_state.get("current_question")
model_name = st.session_state.get("current_model")
try:
response = ask_question(model_name=model_name, question=prompt)
if not isinstance(response, dict):
response = {
"role": "assistant",
"content": "โ Error: Invalid response format",
"gen_code": "",
"ex_code": "",
"last_prompt": prompt,
"error": "Invalid response format"
}
response.setdefault("role", "assistant")
response.setdefault("content", "No content generated")
response.setdefault("gen_code", "")
response.setdefault("ex_code", "")
response.setdefault("last_prompt", prompt)
response.setdefault("error", None)
except Exception as e:
response = {
"role": "assistant",
"content": f"Sorry, I encountered an error: {str(e)}",
"gen_code": "",
"ex_code": "",
"last_prompt": prompt,
"error": str(e)
}
st.session_state.responses.append(response)
st.session_state["last_prompt"] = prompt
st.session_state["last_model_name"] = model_name
st.session_state.processing = False
# Clear processing state
if "current_model" in st.session_state:
del st.session_state.current_model
if "current_question" in st.session_state:
del st.session_state.current_question
st.rerun()
# Auto-scroll to bottom
if st.session_state.responses:
st.markdown("", unsafe_allow_html=True)
# Beautiful sidebar footer
# with st.sidebar:
# st.markdown("---")
# st.markdown("""
#
# """, unsafe_allow_html=True)
# Statistics (if logging is enabled)
if hf_token and hf_token.strip():
st.markdown("### ๐ Session Stats")
total_interactions = len([r for r in st.session_state.get("responses", []) if r.get("role") == "assistant"])
st.metric("Interactions", total_interactions)
feedbacks_given = len([r for r in st.session_state.get("responses", []) if r.get("role") == "assistant" and "feedback" in r])
st.metric("Feedbacks Given", feedbacks_given)
# Footer
st.markdown("""
๐ Together for Cleaner Air
VayuChat - Empowering environmental awareness through AI
ยฉ 2024 IIT Gandhinagar Sustainability Lab
""", unsafe_allow_html=True)