|
import streamlit as st |
|
import time |
|
import os |
|
import sys |
|
import json |
|
import asyncio |
|
from datetime import datetime |
|
from pathlib import Path |
|
sys.path.append(str(Path(__file__).parent)) |
|
|
|
from utils.config import config |
|
from core.llm import send_to_ollama, send_to_hf |
|
from core.session import session_manager |
|
from core.memory import check_redis_health |
|
from core.coordinator import coordinator |
|
import logging |
|
|
|
|
|
logging.basicConfig(level=logging.INFO) |
|
logger = logging.getLogger(__name__) |
|
|
|
st.set_page_config(page_title="AI Life Coach", page_icon="π§ ", layout="wide") |
|
|
|
|
|
if "messages" not in st.session_state: |
|
st.session_state.messages = [] |
|
if "last_error" not in st.session_state: |
|
st.session_state.last_error = "" |
|
if "is_processing" not in st.session_state: |
|
st.session_state.is_processing = False |
|
if "ngrok_url_temp" not in st.session_state: |
|
st.session_state.ngrok_url_temp = st.session_state.get("ngrok_url", "https://7bcc180dffd1.ngrok-free.app") |
|
if "hf_expert_requested" not in st.session_state: |
|
st.session_state.hf_expert_requested = False |
|
|
|
|
|
with st.sidebar: |
|
st.title("AI Life Coach π§ ") |
|
st.markdown("Your personal AI-powered life development assistant") |
|
|
|
|
|
model_options = { |
|
"Mistral 7B (Local)": "mistral:latest", |
|
"Llama 2 7B (Local)": "llama2:latest", |
|
"OpenChat 3.5 (Local)": "openchat:latest" |
|
} |
|
selected_model_name = st.selectbox( |
|
"Select Model", |
|
options=list(model_options.keys()), |
|
index=0 |
|
) |
|
st.session_state.selected_model = model_options[selected_model_name] |
|
|
|
|
|
st.subheader("Ollama Configuration") |
|
ngrok_url_input = st.text_input( |
|
"Ollama Server URL", |
|
value=st.session_state.ngrok_url_temp, |
|
help="Enter your ngrok URL", |
|
key="ngrok_url_input" |
|
) |
|
|
|
if ngrok_url_input != st.session_state.ngrok_url_temp: |
|
st.session_state.ngrok_url_temp = ngrok_url_input |
|
st.success("β
URL updated!") |
|
|
|
|
|
if st.button("π‘ Test Connection"): |
|
try: |
|
import requests |
|
headers = { |
|
"ngrok-skip-browser-warning": "true", |
|
"User-Agent": "AI-Life-Coach-Test" |
|
} |
|
with st.spinner("Testing connection..."): |
|
response = requests.get( |
|
f"{ngrok_url_input}/api/tags", |
|
headers=headers, |
|
timeout=15 |
|
) |
|
if response.status_code == 200: |
|
st.success("β
Connection successful!") |
|
else: |
|
st.error(f"β Failed: {response.status_code}") |
|
except Exception as e: |
|
st.error(f"β Error: {str(e)[:50]}...") |
|
|
|
|
|
st.subheader("Conversation History") |
|
if st.button("ποΈ Clear History"): |
|
st.session_state.messages = [] |
|
st.success("History cleared!") |
|
|
|
if st.session_state.messages: |
|
user_msgs = len([m for m in st.session_state.messages if m["role"] == "user"]) |
|
ai_msgs = len([m for m in st.session_state.messages if m["role"] == "assistant"]) |
|
st.caption(f"π¬ {user_msgs} user, {ai_msgs} AI messages") |
|
|
|
|
|
with st.expander("π System Monitor", expanded=False): |
|
st.subheader("π Status") |
|
|
|
|
|
try: |
|
from services.ollama_monitor import check_ollama_status |
|
ollama_status = check_ollama_status() |
|
if ollama_status.get("running"): |
|
st.success("π¦ Ollama: Running") |
|
else: |
|
st.warning("π¦ Ollama: Not running") |
|
except: |
|
st.info("π¦ Ollama: Unknown") |
|
|
|
|
|
try: |
|
from services.hf_endpoint_monitor import hf_monitor |
|
hf_status = hf_monitor.check_endpoint_status() |
|
if hf_status['available']: |
|
st.success("π€ HF: Available") |
|
else: |
|
st.warning("π€ HF: Not available") |
|
except: |
|
st.info("π€ HF: Unknown") |
|
|
|
|
|
if check_redis_health(): |
|
st.success("πΎ Redis: Connected") |
|
else: |
|
st.error("πΎ Redis: Disconnected") |
|
|
|
|
|
st.title("π§ AI Life Coach") |
|
st.markdown("Ask me anything about personal development, goal setting, or life advice!") |
|
|
|
|
|
for message in st.session_state.messages: |
|
with st.chat_message(message["role"]): |
|
|
|
if message.get("source") == "hf_expert": |
|
st.markdown("### π€ HF Expert Analysis") |
|
st.markdown(message["content"]) |
|
else: |
|
st.markdown(message["content"]) |
|
if "timestamp" in message: |
|
st.caption(f"π {message['timestamp']}") |
|
|
|
|
|
if st.session_state.messages and len(st.session_state.messages) > 0: |
|
st.divider() |
|
|
|
|
|
with st.expander("π€ HF Expert Analysis", expanded=False): |
|
st.subheader("Deep Conversation Analysis") |
|
|
|
col1, col2 = st.columns([3, 1]) |
|
with col1: |
|
st.markdown(""" |
|
**HF Expert Features:** |
|
- Analyzes entire conversation history |
|
- Performs web research when needed |
|
- Provides deep insights and recommendations |
|
- Acts as expert consultant in your conversation |
|
""") |
|
|
|
with col2: |
|
if st.button("π§ Activate HF Expert", |
|
key="activate_hf_expert", |
|
help="Send conversation to HF endpoint for deep analysis", |
|
use_container_width=True, |
|
disabled=st.session_state.is_processing): |
|
st.session_state.hf_expert_requested = True |
|
|
|
|
|
if st.session_state.get("hf_expert_requested", False): |
|
with st.spinner("π§ HF Expert analyzing conversation..."): |
|
try: |
|
|
|
user_session = session_manager.get_session("default_user") |
|
conversation_history = user_session.get("conversation", []) |
|
|
|
|
|
with st.expander("π HF Expert Input", expanded=False): |
|
st.markdown("**Conversation History Sent to HF Expert:**") |
|
for i, msg in enumerate(conversation_history[-10:]): |
|
st.markdown(f"**{msg['role'].capitalize()}:** {msg['content'][:100]}{'...' if len(msg['content']) > 100 else ''}") |
|
|
|
|
|
hf_analysis = coordinator.manual_hf_analysis( |
|
"default_user", |
|
conversation_history |
|
) |
|
|
|
if hf_analysis: |
|
|
|
with st.chat_message("assistant"): |
|
st.markdown("### π€ HF Expert Analysis") |
|
st.markdown(hf_analysis) |
|
|
|
|
|
research_needs = coordinator.determine_web_search_needs(conversation_history) |
|
if research_needs["needs_search"]: |
|
st.info(f"π **Research Needed:** {research_needs['reasoning']}") |
|
if st.button("π Perform Web Research", key="web_research_button"): |
|
|
|
with st.spinner("π Searching for current information..."): |
|
|
|
st.success("β
Web research completed!") |
|
|
|
|
|
st.session_state.messages.append({ |
|
"role": "assistant", |
|
"content": hf_analysis, |
|
"timestamp": datetime.now().strftime("%H:%M:%S"), |
|
"source": "hf_expert", |
|
"research_needs": research_needs |
|
}) |
|
|
|
st.session_state.hf_expert_requested = False |
|
|
|
except Exception as e: |
|
st.error(f"β HF Expert analysis failed: {str(e)}") |
|
st.session_state.hf_expert_requested = False |
|
|
|
|
|
user_input = st.chat_input("Type your message here...", disabled=st.session_state.is_processing) |
|
|
|
|
|
if user_input and not st.session_state.is_processing: |
|
st.session_state.is_processing = True |
|
|
|
|
|
with st.chat_message("user"): |
|
st.markdown(user_input) |
|
|
|
st.session_state.messages.append({ |
|
"role": "user", |
|
"content": user_input, |
|
"timestamp": datetime.now().strftime("%H:%M:%S") |
|
}) |
|
|
|
|
|
with st.chat_message("assistant"): |
|
response_placeholder = st.empty() |
|
status_placeholder = st.empty() |
|
|
|
try: |
|
|
|
user_session = session_manager.get_session("default_user") |
|
conversation = user_session.get("conversation", []) |
|
conversation_history = conversation[-5:] |
|
conversation_history.append({"role": "user", "content": user_input}) |
|
|
|
|
|
status_placeholder.info("π¦ Contacting Ollama...") |
|
ai_response = None |
|
|
|
try: |
|
ai_response = send_to_ollama( |
|
user_input, |
|
conversation_history, |
|
st.session_state.ngrok_url_temp, |
|
st.session_state.selected_model |
|
) |
|
|
|
if ai_response: |
|
response_placeholder.markdown(ai_response) |
|
status_placeholder.success("β
Response received!") |
|
else: |
|
status_placeholder.warning("β οΈ Empty response from Ollama") |
|
|
|
except Exception as ollama_error: |
|
status_placeholder.error(f"β Ollama error: {str(ollama_error)[:50]}...") |
|
|
|
|
|
if config.hf_token: |
|
status_placeholder.info("π Trying Hugging Face...") |
|
try: |
|
ai_response = send_to_hf(user_input, conversation_history) |
|
if ai_response: |
|
response_placeholder.markdown(ai_response) |
|
status_placeholder.success("β
HF response received!") |
|
else: |
|
status_placeholder.error("β No response from HF") |
|
except Exception as hf_error: |
|
status_placeholder.error(f"β HF also failed: {str(hf_error)[:50]}...") |
|
|
|
|
|
if ai_response: |
|
|
|
conversation.append({"role": "user", "content": user_input}) |
|
conversation.append({"role": "assistant", "content": ai_response}) |
|
user_session["conversation"] = conversation |
|
session_manager.update_session("default_user", user_session) |
|
|
|
|
|
st.session_state.messages.append({ |
|
"role": "assistant", |
|
"content": ai_response, |
|
"timestamp": datetime.now().strftime("%H:%M:%S") |
|
}) |
|
else: |
|
st.session_state.messages.append({ |
|
"role": "assistant", |
|
"content": "Sorry, I couldn't process your request. Please try again.", |
|
"timestamp": datetime.now().strftime("%H:%M:%S") |
|
}) |
|
|
|
except Exception as e: |
|
error_msg = f"System error: {str(e)}" |
|
response_placeholder.error(error_msg) |
|
st.session_state.messages.append({ |
|
"role": "assistant", |
|
"content": error_msg, |
|
"timestamp": datetime.now().strftime("%H:%M:%S") |
|
}) |
|
finally: |
|
st.session_state.is_processing = False |
|
time.sleep(0.5) |
|
st.experimental_rerun() |
|
|