Spaces:
Running
on
Zero
Running
on
Zero
File size: 17,920 Bytes
d47a43d c5ed134 2252617 fa808e9 a578794 76eb3c8 c5ed134 a578794 c5ed134 a578794 c5ed134 fe49dd8 c5ed134 2252617 c5ed134 2252617 c5ed134 2252617 c5ed134 2252617 c5ed134 2252617 c5ed134 2252617 c5ed134 2252617 c5ed134 2252617 c5ed134 2252617 c5ed134 76eb3c8 9e416ee 2252617 9940952 c5ed134 9940952 c5ed134 5507602 c5ed134 9e416ee c5ed134 2252617 9e416ee c5ed134 2252617 9e416ee c5ed134 d20aee2 5507602 d20aee2 2252617 9e416ee 2252617 c5ed134 9e416ee 5507602 9e416ee 2252617 9e416ee 9940952 9e416ee ce583c7 9e416ee 2252617 9e416ee ce583c7 9e416ee 2252617 9e416ee 2252617 9e416ee ce583c7 9e416ee 76eb3c8 9e416ee 2252617 9e416ee fe49dd8 9e416ee fe49dd8 9e416ee 8934605 9e416ee 8934605 9e416ee 8934605 9e416ee 8934605 9e416ee 8934605 9e416ee 8934605 9e416ee fa808e9 8934605 2252617 fe49dd8 2252617 fe49dd8 2252617 fe49dd8 fa808e9 8934605 2252617 fe49dd8 2252617 c5ed134 fe49dd8 2252617 fe49dd8 2252617 88eac55 2252617 88eac55 9e416ee 9afc028 9e416ee 9afc028 9e416ee 9afc028 9e416ee 9afc028 9e416ee 9afc028 9e416ee 5507602 9e416ee 9afc028 9e416ee 9afc028 9e416ee 9afc028 fa808e9 9e416ee fa808e9 9e416ee fa808e9 9e416ee fa808e9 9e416ee fa808e9 9e416ee fa808e9 9e416ee fa808e9 9e416ee fa808e9 9e416ee fa808e9 9e416ee fa808e9 9e416ee 9afc028 9e416ee 9afc028 9e416ee 9afc028 fa808e9 9e416ee 5507602 9e416ee 5507602 9e416ee 5507602 9e416ee 5507602 9e416ee 5507602 9e416ee 5507602 9e416ee 5507602 fa808e9 9e416ee fa808e9 9e416ee fa808e9 9e416ee fa808e9 2252617 ce583c7 fa808e9 ce583c7 fe49dd8 9e416ee c5ed134 9e416ee ea6ba10 9e416ee fa808e9 9e416ee fa808e9 9e416ee 9afc028 9e416ee 8934605 fa808e9 9e416ee fa808e9 9e416ee fa808e9 7ee7b29 9e416ee 9afc028 9e416ee fa808e9 9e416ee fa808e9 c5ed134 84e36d5 fe49dd8 c5ed134 29f2127 c5ed134 fe49dd8 c5ed134 fe49dd8 2252617 fa808e9 2252617 fe49dd8 2252617 fa808e9 fe49dd8 c5ed134 fa808e9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 |
import gradio as gr
import torch
from peft import PeftModel
from transformers import AutoModelForCausalLM, AutoTokenizer
import warnings
import traceback
import json
# Import spaces if available (for HuggingFace Spaces GPU optimization)
try:
import spaces
HAS_SPACES = True
except ImportError:
HAS_SPACES = False
warnings.filterwarnings('ignore')
# Global model variables
model = None
tokenizer = None
device = None
def load_model():
"""Load the psychology-tuned model"""
global model, tokenizer, device
try:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"π Loading psychology model on {device}...")
# Load tokenizer
print("π Loading tokenizer...")
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-0.5B")
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
print("β
Tokenizer loaded")
# Load base model
print("π§ Loading base model...")
base_model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-0.5B",
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
device_map="auto" if torch.cuda.is_available() else None,
trust_remote_code=True
)
print("β
Base model loaded")
# Load PEFT adapter
print("π§ Loading PEFT adapter...")
model = PeftModel.from_pretrained(base_model, "phxdev/psychology-qwen-0.5b")
model = model.merge_and_unload()
print("β
PEFT adapter merged")
if not torch.cuda.is_available():
model = model.to(device)
print("β
Psychology model ready!")
return True
except Exception as e:
print(f"β Error loading model: {e}")
traceback.print_exc()
return False
def _generate_response(formatted_prompt: str, max_new_tokens: int = 200) -> str:
"""Core generation function optimized for friend-mode conversations"""
if model is None:
return "β οΈ Model not loaded. Please refresh and try again."
try:
inputs = tokenizer(
formatted_prompt,
return_tensors="pt",
truncation=True,
max_length=800
).to(device)
# Friend-mode generation settings - conversational and supportive
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=max_new_tokens,
temperature=0.7, # Balanced for natural conversation
top_p=0.9,
do_sample=True,
repetition_penalty=1.1,
pad_token_id=tokenizer.eos_token_id,
eos_token_id=tokenizer.eos_token_id,
)
full_response = tokenizer.decode(outputs[0], skip_special_tokens=True)
generated_text = full_response[len(formatted_prompt):].strip()
# Clean up for natural conversation
if generated_text.startswith('"') and generated_text.endswith('"'):
generated_text = generated_text[1:-1]
stop_phrases = [
"Their response", "Your response", "They said", "You said",
"And their response", "And your response", "Person:", "Response:"
]
for phrase in stop_phrases:
if phrase in generated_text:
generated_text = generated_text.split(phrase)[0].strip()
if not generated_text:
return "I'm here to help. Can you tell me more about what's going on?"
return generated_text
except Exception as e:
return f"β οΈ Generation error: {str(e)}"
# PRIMARY FRIEND MODE FUNCTIONS
def friend_support_internal(message: str) -> str:
"""Main friend support function - natural conversation"""
if not message.strip():
return "Hey, what's on your mind?"
formatted_prompt = f"You're talking to a close friend who has psychology training. Your friend says: \"{message}\"\n\nAs their supportive friend, you respond:"
return _generate_response(formatted_prompt, max_new_tokens=180)
def friend_trauma_support_internal(message: str) -> str:
"""Friend helping with trauma - conversational support"""
if not message.strip():
return "I'm here for you. What's been going on?"
formatted_prompt = f"Your friend who's been through trauma tells you: \"{message}\"\n\nAs someone who cares about them and has some psychology knowledge, you respond with understanding and support:"
return _generate_response(formatted_prompt, max_new_tokens=200)
def friend_emotional_support_internal(message: str) -> str:
"""Friend helping process emotions"""
if not message.strip():
return "What are you feeling right now? I'm here to listen."
formatted_prompt = f"Your friend is struggling emotionally and tells you: \"{message}\"\n\nAs their friend who understands emotions, you help them process what they're going through:"
return _generate_response(formatted_prompt, max_new_tokens=170)
def friend_situation_analysis_internal(message: str) -> str:
"""Friend helping analyze a difficult situation"""
if not message.strip():
return "Tell me what's happening. Let's figure this out together."
formatted_prompt = f"Your friend describes a difficult situation: \"{message}\"\n\nAs their friend who's good at seeing patterns and understanding people, you help them make sense of what's going on:"
return _generate_response(formatted_prompt, max_new_tokens=190)
def friend_relationship_help_internal(message: str) -> str:
"""Friend giving relationship perspective"""
if not message.strip():
return "What's going on with your relationships? I'm here to help you think through it."
formatted_prompt = f"Your friend tells you about a relationship issue: \"{message}\"\n\nAs their friend who understands relationship dynamics, you offer perspective and support:"
return _generate_response(formatted_prompt, max_new_tokens=180)
def friend_coping_help_internal(situation: str) -> str:
"""Friend suggesting coping strategies"""
if not situation.strip():
return "What are you dealing with? Let's brainstorm some ways to handle it."
formatted_prompt = f"Your friend is dealing with: \"{situation}\"\n\nAs their supportive friend with knowledge about coping strategies, you offer practical help:"
return _generate_response(formatted_prompt, max_new_tokens=200)
# STATIC SUPPORT RESOURCES
def crisis_resources_internal() -> str:
"""Get immediate crisis intervention resources"""
return """π **IMMEDIATE CRISIS RESOURCES**
**Emergency:** Call 911 or your local emergency number
**24/7 Crisis Support:**
β’ **988 Suicide & Crisis Lifeline**: Call or text 988 (US)
β’ **Crisis Text Line**: Text HOME to 741741 (US)
β’ **International**: https://iasp.info/resources/Crisis_Centres/
**Specialized Support:**
β’ National Domestic Violence Hotline: 1-800-799-7233
β’ SAMHSA National Helpline: 1-800-662-4357
β’ Trans Lifeline: 877-565-8860
β’ LGBT National Hotline: 1-888-843-4564
β’ Veterans Crisis Line: 1-800-273-8255
You are not alone. Help is available 24/7."""
def mindfulness_exercise_internal() -> str:
"""Guided mindfulness exercise"""
return """π§ **5-MINUTE GROUNDING EXERCISE**
**Right now, notice:**
β’ **5 things you can SEE** (colors, shapes, objects)
β’ **4 things you can TOUCH** (chair, table, clothes, temperature)
β’ **3 things you can HEAR** (sounds around you)
β’ **2 things you can SMELL** (coffee, soap, air)
β’ **1 thing you can TASTE** (gum, coffee, or just your mouth)
**Box Breathing:**
β’ Inhale for 4 counts β Hold for 4 β Exhale for 4 β Hold for 4
β’ Repeat 4-6 times
**Grounding Phrase:** "I am here. I am safe. This moment will pass."
You've got this. One breath at a time."""
# BONUS BUSINESS ANALYSIS FUNCTIONS
def business_manipulation_analysis_internal(content: str) -> str:
"""Analyze business/marketing manipulation tactics - BONUS FEATURE"""
if not content.strip():
return "Provide some marketing content or website copy to analyze."
formatted_prompt = f"As a friend with psychology knowledge, analyze what this business content is trying to do psychologically: \"{content}\"\n\nExplain the psychological tactics, manipulation techniques, and emotional triggers being used:"
return _generate_response(formatted_prompt, max_new_tokens=250)
def website_psychology_analysis_internal(content: str) -> str:
"""Analyze website psychological tactics - BONUS FEATURE"""
if not content.strip():
return "Describe the website content or user experience to analyze."
formatted_prompt = f"As a friend who understands web psychology, help me understand what's happening with this website experience: \"{content}\"\n\nWhat psychological techniques are they using? How should someone protect themselves?"
return _generate_response(formatted_prompt, max_new_tokens=280)
def competitive_psychology_analysis_internal(content: str) -> str:
"""Analyze competitor psychological tactics - BONUS FEATURE"""
if not content.strip():
return "Describe the competitor's content or approach to analyze."
formatted_prompt = f"As a friend helping with business strategy, analyze the psychological approach in this competitor content: \"{content}\"\n\nWhat persuasion techniques are they using? What cognitive biases are they exploiting? How effective is this approach?"
return _generate_response(formatted_prompt, max_new_tokens=300)
def psychology_mcp_router(function_name: str, parameters: str) -> str:
"""
Friend-Mode Psychology Router with Business Analysis Bonus
Args:
function_name (str): Function to call
parameters (str): JSON string with function parameters
Returns:
str: Response from the psychology-informed friend system
"""
try:
# Parse parameters if provided
if parameters.strip():
params = json.loads(parameters)
else:
params = {}
# PRIMARY FRIEND MODE FUNCTIONS
if function_name == "friend_support":
return friend_support_internal(params.get("message", ""))
elif function_name == "friend_trauma_support":
return friend_trauma_support_internal(params.get("message", ""))
elif function_name == "friend_emotional_support":
return friend_emotional_support_internal(params.get("message", ""))
elif function_name == "friend_situation_analysis":
return friend_situation_analysis_internal(params.get("message", ""))
elif function_name == "friend_relationship_help":
return friend_relationship_help_internal(params.get("message", ""))
elif function_name == "friend_coping_help":
return friend_coping_help_internal(params.get("situation", ""))
elif function_name == "crisis_resources":
return crisis_resources_internal()
elif function_name == "mindfulness_exercise":
return mindfulness_exercise_internal()
# BONUS BUSINESS ANALYSIS FUNCTIONS
elif function_name == "business_manipulation_analysis":
return business_manipulation_analysis_internal(params.get("content", ""))
elif function_name == "website_psychology_analysis":
return website_psychology_analysis_internal(params.get("content", ""))
elif function_name == "competitive_psychology_analysis":
return competitive_psychology_analysis_internal(params.get("content", ""))
elif function_name == "list_functions":
return json.dumps({
"primary_friend_functions": {
"functions": [
"friend_support", "friend_trauma_support", "friend_emotional_support",
"friend_situation_analysis", "friend_relationship_help", "friend_coping_help",
"crisis_resources", "mindfulness_exercise"
],
"parameters": {"message": "text"} or {"situation": "text"},
"purpose": "Psychology-informed friend for personal support and understanding"
},
"bonus_business_functions": {
"functions": [
"business_manipulation_analysis", "website_psychology_analysis",
"competitive_psychology_analysis"
],
"parameters": {"content": "text"},
"purpose": "Analyze business/marketing psychological tactics (bonus feature)"
},
"usage_examples": {
"friend_mode": '{"message": "I feel overwhelmed by online shopping pressure"}',
"business_bonus": '{"content": "Buy now! Limited time offer! Only 3 left!"}'
}
})
else:
return json.dumps({
"error": f"Unknown function: {function_name}",
"primary_functions": [
"friend_support", "friend_trauma_support", "friend_emotional_support",
"friend_situation_analysis", "friend_relationship_help", "friend_coping_help",
"crisis_resources", "mindfulness_exercise"
],
"bonus_functions": [
"business_manipulation_analysis", "website_psychology_analysis",
"competitive_psychology_analysis"
]
})
except json.JSONDecodeError:
return json.dumps({
"error": "Invalid JSON in parameters",
"friend_example": '{"message": "your situation here"}',
"business_example": '{"content": "marketing content to analyze"}'
})
except Exception as e:
return json.dumps({
"error": f"Friend psychology system error: {str(e)}"
})
# Apply GPU optimization if available
if HAS_SPACES:
psychology_mcp_router = spaces.GPU(psychology_mcp_router)
# Initialize model
print("π Starting Psychology-Informed Friend Agent...")
model_loaded = load_model()
if model_loaded:
print("β
Creating friend-mode MCP interface...")
# Create the friend-focused MCP interface
demo = gr.Interface(
fn=psychology_mcp_router,
inputs=[
gr.Textbox(
label="Function Name",
placeholder="friend_support",
info="FRIEND MODE: friend_support, friend_trauma_support, friend_emotional_support, friend_situation_analysis, friend_relationship_help, friend_coping_help | BONUS: business_manipulation_analysis, website_psychology_analysis"
),
gr.Textbox(
label="Parameters (JSON)",
placeholder='{"message": "I feel overwhelmed by shopping websites"}',
lines=4,
info='Friend Mode: {"message": "what\'s happening"} | Business Bonus: {"content": "marketing content to analyze"}'
)
],
outputs=gr.Textbox(
label="Friend Response",
lines=12
),
title="π€ Psychology-Informed Friend - Personal Support + Business Insights",
description="""**β οΈ SAFETY NOTICE: This is NOT medical advice. For crisis situations, call 911 or 988.**
*This model may not be perfect, it may be clinical when we need a friend. But the alternative is silence. And that helps no one.*
**PRIMARY PURPOSE: Your Psychology-Informed Friend**
A supportive friend with psychology training who helps you understand what's happening in your life, process emotions, and navigate difficult situations.
**BONUS FEATURE: Business Psychology Analysis**
Also happens to be great at analyzing marketing manipulation, website psychology tactics, and competitive persuasion techniques.
**How to use**: Talk to it like a friend who understands psychology. Frame your questions conversationally.""",
examples=[
["friend_support", '{"message": "I feel manipulated by shopping websites with their countdown timers"}'],
["friend_trauma_support", '{"message": "I have PTSD from medical situations and need someone to understand"}'],
["friend_emotional_support", '{"message": "I feel angry and helpless about things I cannot control"}'],
["friend_situation_analysis", '{"message": "I keep getting pressured by sales tactics and want to understand why"}'],
["business_manipulation_analysis", '{"content": "Try Walmart+ for FREE! Order within 11 hr 59 min. Only 5 left!"}'],
["website_psychology_analysis", '{"content": "Countdown timers, scarcity messages, and subscription pressure at checkout"}'],
["crisis_resources", "{}"],
["list_functions", "{}"]
]
)
if __name__ == "__main__":
demo.launch(
share=True,
server_name="0.0.0.0",
server_port=7860,
show_error=True,
mcp_server=True
)
else:
print("β Failed to load psychology model")
def error_response(function_name, parameters):
return "β οΈ Model loading error. Please check console logs and try restarting."
demo = gr.Interface(
fn=error_response,
inputs=["text", "text"],
outputs="text",
title="β οΈ Personal Agent - Loading Error"
)
if __name__ == "__main__":
demo.launch(share=True, mcp_server=True) |