surabhic's picture
Update app.py
5bba861 verified
import streamlit as st
import datetime
import os
import json
import gspread
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from oauth2client.service_account import ServiceAccountCredentials
# --- Google Sheets Setup ---
def init_gsheet():
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
# Get the service account info from secrets
service_account_info = {
"type": os.environ.get("TYPE"),
"project_id": os.environ.get("PROJECT_ID"),
"private_key_id": os.environ.get("PRIVATE_KEY_ID"),
"private_key": os.environ.get("PRIVATE_KEY").replace('\\n', '\n'),
"client_email": os.environ.get("CLIENT_EMAIL"),
"client_id": os.environ.get("CLIENT_ID"),
"auth_uri": os.environ.get("AUTH_URI"),
"token_uri": os.environ.get("TOKEN_URI"),
"auth_provider_x509_cert_url": os.environ.get("AUTH_PROVIDER_X509_CERT_URL"),
"client_x509_cert_url": os.environ.get("CLIENT_X509_CERT_URL")
}
try:
creds = ServiceAccountCredentials.from_json_keyfile_dict(service_account_info, scope)
client = gspread.authorize(creds)
sheet = client.open("mood_log").sheet1
return sheet
except Exception as e:
st.error(f"Google Sheets Error: {e}")
return None
# --- Load Hugging Face DialoGPT ---
MODEL_NAME = "microsoft/DialoGPT-medium"
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, use_auth_token=False)
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, use_auth_token=False)
# --- Streamlit UI ---
st.set_page_config(page_title="Mental Health Bot", page_icon="🧠")
st.title("🧠 Mental Health Buddy")
st.markdown("Talk to me about how you're feeling. I'm here to listen πŸ’™")
# Initialize session state
if "chat_history_ids" not in st.session_state:
st.session_state.chat_history_ids = None
st.session_state.step = 0
st.session_state.conversation = []
user_input = st.text_input("How are you feeling today?", "")
if user_input:
# Generate response
input_ids = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors='pt')
# Combine with previous conversation if exists
bot_input_ids = (
torch.cat([st.session_state.chat_history_ids, input_ids], dim=-1)
if st.session_state.step > 0 else input_ids
)
# Generate more empathetic response
chat_history_ids = model.generate(
bot_input_ids,
max_length=1000,
pad_token_id=tokenizer.eos_token_id,
no_repeat_ngram_size=3,
do_sample=True,
top_k=100,
top_p=0.7,
temperature=0.9, # Slightly higher temperature for more varied responses
)
response = tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)
# Add some post-processing to make responses more empathetic
empathetic_responses = [
"I hear you. Would you like to talk more about that?",
"That sounds difficult. I'm here to listen.",
"I understand. Feelings like this can be challenging.",
"Thank you for sharing that with me. How can I support you?",
"I appreciate you opening up about this. Would you like to explore this feeling more?"
]
# Sometimes use our empathetic responses instead of the model's
if st.session_state.step % 2 == 0 and len(response.split()) < 10:
import random
response = random.choice(empathetic_responses)
# Display conversation
st.markdown(f"**You:** {user_input}")
st.success(f"**Buddy:** {response}")
# Log to Google Sheet
sheet = init_gsheet()
if sheet:
try:
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
sheet.append_row([now, user_input, response])
except Exception as e:
st.warning(f"⚠️ Could not save to Google Sheet: {e}")
# Update session
st.session_state.chat_history_ids = chat_history_ids
st.session_state.step += 1
st.session_state.conversation.append((user_input, response))