qwen-psychology / app.py
phxdev's picture
Update app.py
7ee7b29 verified
import gradio as gr
import torch
from peft import PeftModel
from transformers import AutoModelForCausalLM, AutoTokenizer
import warnings
import traceback
import json
# Import spaces if available (for HuggingFace Spaces GPU optimization)
try:
import spaces
HAS_SPACES = True
except ImportError:
HAS_SPACES = False
warnings.filterwarnings('ignore')
# Global model variables
model = None
tokenizer = None
device = None
def load_model():
"""Load the psychology-tuned model"""
global model, tokenizer, device
try:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"πŸš€ Loading psychology model on {device}...")
# Load tokenizer
print("πŸ“ Loading tokenizer...")
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-0.5B")
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
print("βœ… Tokenizer loaded")
# Load base model
print("🧠 Loading base model...")
base_model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-0.5B",
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
device_map="auto" if torch.cuda.is_available() else None,
trust_remote_code=True
)
print("βœ… Base model loaded")
# Load PEFT adapter
print("πŸ”§ Loading PEFT adapter...")
model = PeftModel.from_pretrained(base_model, "phxdev/psychology-qwen-0.5b")
model = model.merge_and_unload()
print("βœ… PEFT adapter merged")
if not torch.cuda.is_available():
model = model.to(device)
print("βœ… Psychology model ready!")
return True
except Exception as e:
print(f"❌ Error loading model: {e}")
traceback.print_exc()
return False
def _generate_response(formatted_prompt: str, max_new_tokens: int = 200) -> str:
"""Core generation function optimized for friend-mode conversations"""
if model is None:
return "⚠️ Model not loaded. Please refresh and try again."
try:
inputs = tokenizer(
formatted_prompt,
return_tensors="pt",
truncation=True,
max_length=800
).to(device)
# Friend-mode generation settings - conversational and supportive
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=max_new_tokens,
temperature=0.7, # Balanced for natural conversation
top_p=0.9,
do_sample=True,
repetition_penalty=1.1,
pad_token_id=tokenizer.eos_token_id,
eos_token_id=tokenizer.eos_token_id,
)
full_response = tokenizer.decode(outputs[0], skip_special_tokens=True)
generated_text = full_response[len(formatted_prompt):].strip()
# Clean up for natural conversation
if generated_text.startswith('"') and generated_text.endswith('"'):
generated_text = generated_text[1:-1]
stop_phrases = [
"Their response", "Your response", "They said", "You said",
"And their response", "And your response", "Person:", "Response:"
]
for phrase in stop_phrases:
if phrase in generated_text:
generated_text = generated_text.split(phrase)[0].strip()
if not generated_text:
return "I'm here to help. Can you tell me more about what's going on?"
return generated_text
except Exception as e:
return f"⚠️ Generation error: {str(e)}"
# PRIMARY FRIEND MODE FUNCTIONS
def friend_support_internal(message: str) -> str:
"""Main friend support function - natural conversation"""
if not message.strip():
return "Hey, what's on your mind?"
formatted_prompt = f"You're talking to a close friend who has psychology training. Your friend says: \"{message}\"\n\nAs their supportive friend, you respond:"
return _generate_response(formatted_prompt, max_new_tokens=180)
def friend_trauma_support_internal(message: str) -> str:
"""Friend helping with trauma - conversational support"""
if not message.strip():
return "I'm here for you. What's been going on?"
formatted_prompt = f"Your friend who's been through trauma tells you: \"{message}\"\n\nAs someone who cares about them and has some psychology knowledge, you respond with understanding and support:"
return _generate_response(formatted_prompt, max_new_tokens=200)
def friend_emotional_support_internal(message: str) -> str:
"""Friend helping process emotions"""
if not message.strip():
return "What are you feeling right now? I'm here to listen."
formatted_prompt = f"Your friend is struggling emotionally and tells you: \"{message}\"\n\nAs their friend who understands emotions, you help them process what they're going through:"
return _generate_response(formatted_prompt, max_new_tokens=170)
def friend_situation_analysis_internal(message: str) -> str:
"""Friend helping analyze a difficult situation"""
if not message.strip():
return "Tell me what's happening. Let's figure this out together."
formatted_prompt = f"Your friend describes a difficult situation: \"{message}\"\n\nAs their friend who's good at seeing patterns and understanding people, you help them make sense of what's going on:"
return _generate_response(formatted_prompt, max_new_tokens=190)
def friend_relationship_help_internal(message: str) -> str:
"""Friend giving relationship perspective"""
if not message.strip():
return "What's going on with your relationships? I'm here to help you think through it."
formatted_prompt = f"Your friend tells you about a relationship issue: \"{message}\"\n\nAs their friend who understands relationship dynamics, you offer perspective and support:"
return _generate_response(formatted_prompt, max_new_tokens=180)
def friend_coping_help_internal(situation: str) -> str:
"""Friend suggesting coping strategies"""
if not situation.strip():
return "What are you dealing with? Let's brainstorm some ways to handle it."
formatted_prompt = f"Your friend is dealing with: \"{situation}\"\n\nAs their supportive friend with knowledge about coping strategies, you offer practical help:"
return _generate_response(formatted_prompt, max_new_tokens=200)
# STATIC SUPPORT RESOURCES
def crisis_resources_internal() -> str:
"""Get immediate crisis intervention resources"""
return """πŸ†˜ **IMMEDIATE CRISIS RESOURCES**
**Emergency:** Call 911 or your local emergency number
**24/7 Crisis Support:**
β€’ **988 Suicide & Crisis Lifeline**: Call or text 988 (US)
β€’ **Crisis Text Line**: Text HOME to 741741 (US)
β€’ **International**: https://iasp.info/resources/Crisis_Centres/
**Specialized Support:**
β€’ National Domestic Violence Hotline: 1-800-799-7233
β€’ SAMHSA National Helpline: 1-800-662-4357
β€’ Trans Lifeline: 877-565-8860
β€’ LGBT National Hotline: 1-888-843-4564
β€’ Veterans Crisis Line: 1-800-273-8255
You are not alone. Help is available 24/7."""
def mindfulness_exercise_internal() -> str:
"""Guided mindfulness exercise"""
return """🧘 **5-MINUTE GROUNDING EXERCISE**
**Right now, notice:**
β€’ **5 things you can SEE** (colors, shapes, objects)
β€’ **4 things you can TOUCH** (chair, table, clothes, temperature)
β€’ **3 things you can HEAR** (sounds around you)
β€’ **2 things you can SMELL** (coffee, soap, air)
β€’ **1 thing you can TASTE** (gum, coffee, or just your mouth)
**Box Breathing:**
β€’ Inhale for 4 counts β†’ Hold for 4 β†’ Exhale for 4 β†’ Hold for 4
β€’ Repeat 4-6 times
**Grounding Phrase:** "I am here. I am safe. This moment will pass."
You've got this. One breath at a time."""
# BONUS BUSINESS ANALYSIS FUNCTIONS
def business_manipulation_analysis_internal(content: str) -> str:
"""Analyze business/marketing manipulation tactics - BONUS FEATURE"""
if not content.strip():
return "Provide some marketing content or website copy to analyze."
formatted_prompt = f"As a friend with psychology knowledge, analyze what this business content is trying to do psychologically: \"{content}\"\n\nExplain the psychological tactics, manipulation techniques, and emotional triggers being used:"
return _generate_response(formatted_prompt, max_new_tokens=250)
def website_psychology_analysis_internal(content: str) -> str:
"""Analyze website psychological tactics - BONUS FEATURE"""
if not content.strip():
return "Describe the website content or user experience to analyze."
formatted_prompt = f"As a friend who understands web psychology, help me understand what's happening with this website experience: \"{content}\"\n\nWhat psychological techniques are they using? How should someone protect themselves?"
return _generate_response(formatted_prompt, max_new_tokens=280)
def competitive_psychology_analysis_internal(content: str) -> str:
"""Analyze competitor psychological tactics - BONUS FEATURE"""
if not content.strip():
return "Describe the competitor's content or approach to analyze."
formatted_prompt = f"As a friend helping with business strategy, analyze the psychological approach in this competitor content: \"{content}\"\n\nWhat persuasion techniques are they using? What cognitive biases are they exploiting? How effective is this approach?"
return _generate_response(formatted_prompt, max_new_tokens=300)
def psychology_mcp_router(function_name: str, parameters: str) -> str:
"""
Friend-Mode Psychology Router with Business Analysis Bonus
Args:
function_name (str): Function to call
parameters (str): JSON string with function parameters
Returns:
str: Response from the psychology-informed friend system
"""
try:
# Parse parameters if provided
if parameters.strip():
params = json.loads(parameters)
else:
params = {}
# PRIMARY FRIEND MODE FUNCTIONS
if function_name == "friend_support":
return friend_support_internal(params.get("message", ""))
elif function_name == "friend_trauma_support":
return friend_trauma_support_internal(params.get("message", ""))
elif function_name == "friend_emotional_support":
return friend_emotional_support_internal(params.get("message", ""))
elif function_name == "friend_situation_analysis":
return friend_situation_analysis_internal(params.get("message", ""))
elif function_name == "friend_relationship_help":
return friend_relationship_help_internal(params.get("message", ""))
elif function_name == "friend_coping_help":
return friend_coping_help_internal(params.get("situation", ""))
elif function_name == "crisis_resources":
return crisis_resources_internal()
elif function_name == "mindfulness_exercise":
return mindfulness_exercise_internal()
# BONUS BUSINESS ANALYSIS FUNCTIONS
elif function_name == "business_manipulation_analysis":
return business_manipulation_analysis_internal(params.get("content", ""))
elif function_name == "website_psychology_analysis":
return website_psychology_analysis_internal(params.get("content", ""))
elif function_name == "competitive_psychology_analysis":
return competitive_psychology_analysis_internal(params.get("content", ""))
elif function_name == "list_functions":
return json.dumps({
"primary_friend_functions": {
"functions": [
"friend_support", "friend_trauma_support", "friend_emotional_support",
"friend_situation_analysis", "friend_relationship_help", "friend_coping_help",
"crisis_resources", "mindfulness_exercise"
],
"parameters": {"message": "text"} or {"situation": "text"},
"purpose": "Psychology-informed friend for personal support and understanding"
},
"bonus_business_functions": {
"functions": [
"business_manipulation_analysis", "website_psychology_analysis",
"competitive_psychology_analysis"
],
"parameters": {"content": "text"},
"purpose": "Analyze business/marketing psychological tactics (bonus feature)"
},
"usage_examples": {
"friend_mode": '{"message": "I feel overwhelmed by online shopping pressure"}',
"business_bonus": '{"content": "Buy now! Limited time offer! Only 3 left!"}'
}
})
else:
return json.dumps({
"error": f"Unknown function: {function_name}",
"primary_functions": [
"friend_support", "friend_trauma_support", "friend_emotional_support",
"friend_situation_analysis", "friend_relationship_help", "friend_coping_help",
"crisis_resources", "mindfulness_exercise"
],
"bonus_functions": [
"business_manipulation_analysis", "website_psychology_analysis",
"competitive_psychology_analysis"
]
})
except json.JSONDecodeError:
return json.dumps({
"error": "Invalid JSON in parameters",
"friend_example": '{"message": "your situation here"}',
"business_example": '{"content": "marketing content to analyze"}'
})
except Exception as e:
return json.dumps({
"error": f"Friend psychology system error: {str(e)}"
})
# Apply GPU optimization if available
if HAS_SPACES:
psychology_mcp_router = spaces.GPU(psychology_mcp_router)
# Initialize model
print("πŸš€ Starting Psychology-Informed Friend Agent...")
model_loaded = load_model()
if model_loaded:
print("βœ… Creating friend-mode MCP interface...")
# Create the friend-focused MCP interface
demo = gr.Interface(
fn=psychology_mcp_router,
inputs=[
gr.Textbox(
label="Function Name",
placeholder="friend_support",
info="FRIEND MODE: friend_support, friend_trauma_support, friend_emotional_support, friend_situation_analysis, friend_relationship_help, friend_coping_help | BONUS: business_manipulation_analysis, website_psychology_analysis"
),
gr.Textbox(
label="Parameters (JSON)",
placeholder='{"message": "I feel overwhelmed by shopping websites"}',
lines=4,
info='Friend Mode: {"message": "what\'s happening"} | Business Bonus: {"content": "marketing content to analyze"}'
)
],
outputs=gr.Textbox(
label="Friend Response",
lines=12
),
title="🀝 Psychology-Informed Friend - Personal Support + Business Insights",
description="""**⚠️ SAFETY NOTICE: This is NOT medical advice. For crisis situations, call 911 or 988.**
*This model may not be perfect, it may be clinical when we need a friend. But the alternative is silence. And that helps no one.*
**PRIMARY PURPOSE: Your Psychology-Informed Friend**
A supportive friend with psychology training who helps you understand what's happening in your life, process emotions, and navigate difficult situations.
**BONUS FEATURE: Business Psychology Analysis**
Also happens to be great at analyzing marketing manipulation, website psychology tactics, and competitive persuasion techniques.
**How to use**: Talk to it like a friend who understands psychology. Frame your questions conversationally.""",
examples=[
["friend_support", '{"message": "I feel manipulated by shopping websites with their countdown timers"}'],
["friend_trauma_support", '{"message": "I have PTSD from medical situations and need someone to understand"}'],
["friend_emotional_support", '{"message": "I feel angry and helpless about things I cannot control"}'],
["friend_situation_analysis", '{"message": "I keep getting pressured by sales tactics and want to understand why"}'],
["business_manipulation_analysis", '{"content": "Try Walmart+ for FREE! Order within 11 hr 59 min. Only 5 left!"}'],
["website_psychology_analysis", '{"content": "Countdown timers, scarcity messages, and subscription pressure at checkout"}'],
["crisis_resources", "{}"],
["list_functions", "{}"]
]
)
if __name__ == "__main__":
demo.launch(
share=True,
server_name="0.0.0.0",
server_port=7860,
show_error=True,
mcp_server=True
)
else:
print("❌ Failed to load psychology model")
def error_response(function_name, parameters):
return "⚠️ Model loading error. Please check console logs and try restarting."
demo = gr.Interface(
fn=error_response,
inputs=["text", "text"],
outputs="text",
title="⚠️ Personal Agent - Loading Error"
)
if __name__ == "__main__":
demo.launch(share=True, mcp_server=True)