|
import streamlit as st |
|
import time |
|
import os |
|
import sys |
|
import json |
|
from datetime import datetime |
|
from pathlib import Path |
|
sys.path.append(str(Path(__file__).parent)) |
|
|
|
|
|
from utils.config import config |
|
from core.session import session_manager |
|
from core.memory import check_redis_health |
|
from core.errors import translate_error |
|
from core.personality import personality |
|
from src.analytics.user_logger import user_logger |
|
from src.analytics.session_analytics import session_analytics |
|
from src.llm.factory import llm_factory |
|
import logging |
|
|
|
|
|
logging.basicConfig(level=logging.INFO) |
|
logger = logging.getLogger(__name__) |
|
|
|
st.set_page_config(page_title="CosmicCat AI Assistant", page_icon="π±", layout="wide") |
|
|
|
|
|
if "messages" not in st.session_state: |
|
st.session_state.messages = [] |
|
if "is_processing" not in st.session_state: |
|
st.session_state.is_processing = False |
|
if "ngrok_url_temp" not in st.session_state: |
|
st.session_state.ngrok_url_temp = st.session_state.get("ngrok_url", "https://7bcc180dffd1.ngrok-free.app") |
|
if "cosmic_mode" not in st.session_state: |
|
st.session_state.cosmic_mode = True |
|
if "show_welcome" not in st.session_state: |
|
st.session_state.show_welcome = True |
|
if "last_processed_message" not in st.session_state: |
|
st.session_state.last_processed_message = "" |
|
if "session_id" not in st.session_state: |
|
st.session_state.session_id = f"sess_{int(time.time())}_{abs(hash(str(time.time()))) % 10000}" |
|
if "selected_model_value" not in st.session_state: |
|
st.session_state.selected_model_value = "auto" |
|
|
|
|
|
try: |
|
session_analytics.start_session_tracking("default_user", st.session_state.session_id) |
|
except Exception as e: |
|
logger.warning(f"Analytics session tracking failed: {e}") |
|
|
|
|
|
session_analytics.track_interaction("default_user", st.session_state.session_id, "page_view", { |
|
"page": "main_chat" |
|
}) |
|
|
|
|
|
with st.sidebar: |
|
st.title("π± CosmicCat AI Assistant") |
|
st.markdown("Your personal AI-powered assistant with a cosmic twist.") |
|
|
|
|
|
model_options = { |
|
"Auto Select": "auto", |
|
"π¦ Ollama (Local)": "ollama", |
|
"π€ HF Endpoint": "huggingface" |
|
} |
|
|
|
selected_model_key = st.selectbox( |
|
"Select Provider", |
|
options=list(model_options.keys()), |
|
index=0 |
|
) |
|
st.session_state.selected_model_value = model_options[selected_model_key] |
|
|
|
|
|
actual_provider = "Unknown" |
|
if st.session_state.selected_model_value == "auto": |
|
try: |
|
from src.services.hf_endpoint_monitor import hf_monitor |
|
if config.hf_token: |
|
status = hf_monitor.get_endpoint_status() |
|
if status["available"]: |
|
actual_provider = "π€ HF Endpoint" |
|
elif config.ollama_host: |
|
actual_provider = "π¦ Ollama" |
|
elif config.ollama_host: |
|
actual_provider = "π¦ Ollama" |
|
except: |
|
if config.ollama_host: |
|
actual_provider = "π¦ Ollama" |
|
else: |
|
actual_provider = "π€ HF Endpoint" if st.session_state.selected_model_value == "huggingface" else "π¦ Ollama" |
|
|
|
st.info(f"**Using Provider:** {actual_provider}") |
|
|
|
|
|
session_analytics.track_interaction("default_user", st.session_state.session_id, "model_selection", { |
|
"selected_model": st.session_state.selected_model_value, |
|
"actual_provider": actual_provider |
|
}) |
|
|
|
|
|
st.session_state.cosmic_mode = st.checkbox("Enable Cosmic Mode", value=st.session_state.cosmic_mode, |
|
on_change=lambda: session_analytics.track_interaction("default_user", st.session_state.session_id, "cosmic_mode_toggle", { |
|
"enabled": st.session_state.cosmic_mode |
|
})) |
|
|
|
st.divider() |
|
|
|
|
|
st.subheader("βοΈ Configuration") |
|
ngrok_url_input = st.text_input( |
|
"Ollama Server URL", |
|
value=st.session_state.ngrok_url_temp, |
|
help="Enter your ngrok URL", |
|
on_change=lambda: session_analytics.track_interaction("default_user", st.session_state.session_id, "url_update", { |
|
"url_changed": ngrok_url_input != st.session_state.ngrok_url_temp |
|
}) |
|
) |
|
|
|
if ngrok_url_input != st.session_state.ngrok_url_temp: |
|
st.session_state.ngrok_url_temp = ngrok_url_input |
|
st.success("β
URL updated!") |
|
session_analytics.track_interaction("default_user", st.session_state.session_id, "url_updated", { |
|
"new_url": ngrok_url_input |
|
}) |
|
|
|
if st.button("π‘ Test Connection"): |
|
start_time = time.time() |
|
session_analytics.track_interaction("default_user", st.session_state.session_id, "test_connection_click") |
|
try: |
|
from core.providers.ollama import OllamaProvider |
|
ollama_provider = OllamaProvider(st.session_state.selected_model_value) |
|
is_valid = ollama_provider.validate_model() |
|
end_time = time.time() |
|
|
|
if is_valid: |
|
st.success("β
Connection successful!") |
|
session_analytics.track_interaction("default_user", st.session_state.session_id, "connection_success", { |
|
"response_time": end_time - start_time |
|
}) |
|
user_logger.log_performance_metric("default_user", "connection_test", end_time - start_time) |
|
else: |
|
st.error("β Model validation failed") |
|
session_analytics.track_interaction("default_user", st.session_state.session_id, "connection_failed", { |
|
"error": "model_validation_failed" |
|
}) |
|
except Exception as e: |
|
end_time = time.time() |
|
st.error(f"β Error: {str(e)[:50]}...") |
|
session_analytics.track_interaction("default_user", st.session_state.session_id, "connection_error", { |
|
"error": str(e)[:100], |
|
"response_time": end_time - start_time |
|
}) |
|
user_logger.log_error("default_user", "connection_test", str(e)) |
|
|
|
if st.button("ποΈ Clear History"): |
|
session_analytics.track_interaction("default_user", st.session_state.session_id, "clear_history_click") |
|
st.session_state.messages = [] |
|
st.session_state.last_processed_message = "" |
|
|
|
session_manager.clear_session("default_user") |
|
st.success("History cleared!") |
|
session_analytics.track_interaction("default_user", st.session_state.session_id, "history_cleared") |
|
|
|
st.divider() |
|
|
|
|
|
with st.expander("π System Status", expanded=True): |
|
st.subheader("π Status") |
|
|
|
|
|
try: |
|
from services.ollama_monitor import check_ollama_status |
|
ollama_status = check_ollama_status() |
|
if ollama_status.get("running"): |
|
st.success("π¦ Ollama: Running") |
|
else: |
|
st.warning("π¦ Ollama: Not running") |
|
except: |
|
st.info("π¦ Ollama: Unknown") |
|
|
|
|
|
try: |
|
from src.services.hf_endpoint_monitor import hf_monitor |
|
status_message = hf_monitor.get_human_readable_status() |
|
|
|
|
|
if "π’" in status_message: |
|
st.success(status_message) |
|
elif "π‘" in status_message: |
|
st.warning(status_message) |
|
elif "π΄" in status_message: |
|
st.error(status_message) |
|
elif "β" in status_message: |
|
st.error(status_message) |
|
elif "β³" in status_message: |
|
st.info(status_message) |
|
else: |
|
st.info(status_message) |
|
|
|
|
|
init_progress = hf_monitor.get_initialization_progress() |
|
if init_progress: |
|
st.info(init_progress) |
|
|
|
|
|
if "scaled to zero" in status_message.lower() or "initializing" in status_message.lower(): |
|
if st.button("β‘ Wake Up HF Endpoint", key="wake_up_hf"): |
|
session_analytics.track_interaction("default_user", st.session_state.session_id, "wake_up_hf_click") |
|
with st.spinner("Attempting to wake up HF endpoint... This may take 2-4 minutes during initialization..."): |
|
start_time = time.time() |
|
if hf_monitor.attempt_wake_up(): |
|
end_time = time.time() |
|
st.success("β
Wake-up request sent! The endpoint should be initializing now. Try your request again in a moment.") |
|
session_analytics.track_interaction("default_user", st.session_state.session_id, "hf_wake_up_success", { |
|
"response_time": end_time - start_time |
|
}) |
|
user_logger.log_performance_metric("default_user", "hf_wake_up", end_time - start_time) |
|
time.sleep(3) |
|
st.experimental_rerun() |
|
else: |
|
end_time = time.time() |
|
st.error("β Failed to send wake-up request. Please try again or wait for initialization to complete.") |
|
session_analytics.track_interaction("default_user", st.session_state.session_id, "hf_wake_up_failed", { |
|
"response_time": end_time - start_time |
|
}) |
|
user_logger.log_error("default_user", "hf_wake_up", "Failed to wake up HF endpoint") |
|
|
|
except Exception as e: |
|
st.info(f"π€ HF Endpoint: Error checking status - {str(e)}") |
|
session_analytics.track_interaction("default_user", st.session_state.session_id, "hf_status_error", { |
|
"error": str(e) |
|
}) |
|
user_logger.log_error("default_user", "hf_status_check", str(e)) |
|
|
|
|
|
try: |
|
if check_redis_health(): |
|
st.success("πΎ Redis: Connected") |
|
else: |
|
st.error("πΎ Redis: Disconnected") |
|
except: |
|
st.info("πΎ Redis: Unknown") |
|
|
|
st.divider() |
|
|
|
|
|
st.subheader("β Feedback") |
|
rating = st.radio("How would you rate your experience?", [1, 2, 3, 4, 5], horizontal=True) |
|
feedback_comment = st.text_area("Additional comments (optional):") |
|
if st.button("Submit Feedback"): |
|
user_logger.log_feedback("default_user", rating, feedback_comment) |
|
session_analytics.track_interaction("default_user", st.session_state.session_id, "feedback_submitted", { |
|
"rating": rating, |
|
"has_comment": bool(feedback_comment) |
|
}) |
|
st.success("Thank you for your feedback! π") |
|
|
|
st.divider() |
|
|
|
|
|
st.subheader("π Debug Info") |
|
st.markdown(f"**Environment:** {'HF Space' if config.is_hf_space else 'Local'}") |
|
st.markdown(f"**Model:** {st.session_state.selected_model_value}") |
|
st.markdown(f"**Session ID:** {st.session_state.session_id}") |
|
|
|
|
|
st.title("π± CosmicCat AI Assistant") |
|
st.markdown("Ask me anything!") |
|
|
|
|
|
if st.session_state.show_welcome: |
|
with st.chat_message("assistant"): |
|
greeting = personality.get_greeting(cosmic_mode=st.session_state.cosmic_mode) |
|
st.markdown(greeting) |
|
st.session_state.show_welcome = False |
|
|
|
|
|
for message in st.session_state.get("messages", []): |
|
with st.chat_message(message["role"]): |
|
st.markdown(message["content"]) |
|
if "timestamp" in message: |
|
provider_info = f" (via {message.get('provider', 'ollama')})" if message["role"] == "assistant" else "" |
|
st.caption(f"π {message['timestamp']}{provider_info}") |
|
|
|
|
|
user_input = st.chat_input("Type your message here...", key="chat_input") |
|
|
|
|
|
if user_input and user_input.strip(): |
|
|
|
if not st.session_state.get('is_processing', False): |
|
chat_handler.process_user_message(user_input, st.session_state.selected_model_value) |
|
else: |
|
st.warning("Still processing your previous request...") |
|
|
|
|
|
if st.session_state.get('is_processing', False) and st.session_state.get('last_processed_message'): |
|
chat_handler.process_ai_response( |
|
st.session_state.last_processed_message, |
|
st.session_state.selected_model_value |
|
) |
|
|
|
|
|
st.divider() |
|
tab1, = st.tabs(["βΉοΈ About"]) |
|
|
|
with tab1: |
|
st.header("βΉοΈ About CosmicCat AI Assistant") |
|
st.markdown(""" |
|
The CosmicCat AI Assistant is a sophisticated conversational AI with a cosmic theme. |
|
|
|
### π§ Core Features |
|
- **Local AI processing** with Ollama models |
|
- **Persistent memory** using Redis |
|
- **Space-themed personality** for fun interactions |
|
- **HF Endpoint integration** for advanced capabilities |
|
|
|
### π Cosmic Mode |
|
When enabled, the AI responds with space-themed language and metaphors. |
|
|
|
### π οΈ Technical Architecture |
|
- **Primary model**: HF Endpoint (advanced processing) |
|
- **Secondary model**: Ollama (local processing) |
|
- **Memory system**: Redis-based session management |
|
""") |
|
|
|
|
|
session_analytics.track_interaction("default_user", st.session_state.session_id, "about_page_view") |
|
|
|
|
|
def on_session_end(): |
|
session_analytics.end_session_tracking("default_user", st.session_state.session_id) |
|
|
|
|
|
import atexit |
|
atexit.register(on_session_end) |
|
|