geethaAICoach12 / app.py
geethareddy's picture
Update app.py
5d49b9a verified
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from simple_salesforce import Salesforce
import os
from datetime import datetime
import logging
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# Salesforce credentials (use environment variables for security)
SF_USERNAME = os.getenv('Ai@Coach.com')
SF_PASSWORD = os.getenv('Teja90325@')
SF_SECURITY_TOKEN = os.getenv('clceSdBgQ30Rx9BSC66gAcRx')
SF_DOMAIN = os.getenv('SF_DOMAIN', 'login') # Default to 'login' if not set
HUGGINGFACE_API_KEY = os.getenv('HUGGINGFACE_API_KEY')
# Validate that environment variables are set
if not all([SF_USERNAME, SF_PASSWORD, SF_SECURITY_TOKEN, HUGGINGFACE_API_KEY]):
logger.error("Missing required environment variables (SF_USERNAME, SF_PASSWORD, SF_SECURITY_TOKEN, or HUGGINGFACE_API_KEY)")
raise ValueError("Missing required environment variables. Please set SF_USERNAME, SF_PASSWORD, SF_SECURITY_TOKEN, and HUGGINGFACE_API_KEY.")
# Initialize Salesforce connection
try:
sf = Salesforce(
username=SF_USERNAME,
password=SF_PASSWORD,
security_token=SF_SECURITY_TOKEN,
domain=SF_DOMAIN
)
logger.info("Successfully connected to Salesforce")
except Exception as e:
logger.error(f"Failed to connect to Salesforce: {e}")
sf = None
# Initialize model and tokenizer
model_name = "distilgpt2"
try:
tokenizer = AutoTokenizer.from_pretrained(model_name, cache_dir="./model_cache")
model = AutoModelForCausalLM.from_pretrained(model_name, cache_dir="./model_cache")
except Exception as e:
logger.error(f"Error loading model: {e}")
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
# Set pad_token to eos_token if not already set
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
model.config.pad_token_id = tokenizer.eos_token_id
# Simplified prompt template
PROMPT_TEMPLATE = """Role: {role}
Project: {project_id}
Milestones:
- {milestones_list}
Reflection: {reflection}
Generate:
Checklist:
- {milestones_list}
Suggestions:
{suggestions_list}
Quote:
{your_motivational_quote}"""
def fetch_salesforce_data_for_role(role):
"""Fetch Project__c records from Salesforce based on the selected role."""
if sf is None:
logger.error("Salesforce connection not initialized")
return None, None, None, "Error: Salesforce connection not initialized"
try:
# Query Project__c records where the Supervisor's role matches the selected role
query = f"""
SELECT Id, Name, Project_Name__c, Milestones__c, Weather_Log__c,
Supervisor__r.Role__c, Supervisor__r.Name, Supervisor__c
FROM Project__c
WHERE Supervisor__r.Role__c = '{role}' AND Milestones__c != null
LIMIT 1
"""
projects = sf.query(query)['records']
if not projects:
logger.info(f"No projects found for role: {role}")
return None, None, None, f"No projects found for role: {role}"
project = projects[0] # Take the first project
project_id = project['Name']
milestones = project['Milestones__c'] or 'No milestones'
reflection = f"Weather: {project['Weather_Log__c'] or 'Sunny, 25°C'}"
logger.info(f"Fetched project {project_id} for role {role}")
return project, project_id, milestones, reflection
except Exception as e:
logger.error(f"Error fetching Salesforce data: {e}")
return None, None, None, f"Error fetching Salesforce data: {e}"
def generate_outputs(role, project_id, milestones, reflection):
"""Generate checklist, suggestions, and quote using distilgpt2."""
# Input validation
if not all([role, project_id, milestones, reflection]):
logger.error("All fields are required")
return "Error: All fields are required.", "", ""
# Process milestones
milestones_list = "\n- ".join([m.strip() for m in milestones.split(",") if m.strip()])
if not milestones_list:
logger.error("At least one valid milestone is required")
return "Error: At least one valid milestone is required.", "", ""
# Generate suggestions based on reflection
suggestions_list = []
reflection_lower = reflection.lower()
if "delays" in reflection_lower:
suggestions_list.extend(["Adjust timelines for delays.", "Communicate with stakeholders."])
if "weather" in reflection_lower:
suggestions_list.extend(["Ensure rain gear availability.", "Monitor weather updates."])
if "equipment" in reflection_lower:
suggestions_list.extend(["Inspect equipment.", "Schedule maintenance."])
suggestions_list = "\n- ".join(suggestions_list) if suggestions_list else "No specific suggestions."
# Format prompt
prompt = PROMPT_TEMPLATE.format(
role=role,
project_id=project_id,
milestones_list=milestones_list.replace("\n- ", "\n- "),
reflection=reflection,
suggestions_list=suggestions_list,
your_motivational_quote="Your motivational quote here"
)
# Tokenize input
try:
inputs = tokenizer(
prompt,
return_tensors="pt",
max_length=512,
truncation=True,
padding=True
)
except Exception as e:
logger.error(f"Error in tokenization: {e}")
return f"Error in tokenization: {e}", "", ""
# Generate output
try:
with torch.no_grad():
outputs = model.generate(
input_ids=inputs['input_ids'],
attention_mask=inputs['attention_mask'],
max_length=600,
num_return_sequences=1,
no_repeat_ngram_size=2,
top_k=50,
top_p=0.95,
temperature=0.7,
pad_token_id=tokenizer.eos_token_id,
do_sample=True
)
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
except Exception as e:
logger.error(f"Error in generation: {e}")
return f"Error in generation: {e}", "", ""
# Extract sections
checklist = suggestions = quote = "Not generated."
try:
if "Checklist:" in generated_text:
checklist_start = generated_text.find("Checklist:") + 10
suggestions_start = generated_text.find("Suggestions:", checklist_start)
if suggestions_start == -1:
suggestions_start = len(generated_text)
checklist = generated_text[checklist_start:suggestions_start].strip()
if "Suggestions:" in generated_text:
suggestions_start = generated_text.find("Suggestions:") + 12
quote_start = generated_text.find("Quote:", suggestions_start)
if quote_start == -1:
quote_start = len(generated_text)
suggestions = generated_text[suggestions_start:quote_start].strip()
if "Quote:" in generated_text:
quote_start = generated_text.find("Quote:") + 7
quote = generated_text[quote_start:].strip()
except Exception as e:
logger.error(f"Error parsing output: {e}")
return f"Error parsing output: {e}", "", ""
return checklist, suggestions, quote
def store_in_salesforce(project, role, project_id, milestones, reflection, checklist, suggestions, quote):
"""Store the generated outputs in Salesforce Supervisor_AI_Coaching__c."""
if sf is None:
logger.error("Salesforce connection not initialized")
return "Error: Salesforce connection not initialized"
try:
# Update or create Supervisor_AI_Coaching__c record
coaching_query = f"""
SELECT Id
FROM Supervisor_AI_Coaching__c
WHERE Project_ID__c = '{project['Id']}'
LIMIT 1
"""
coaching_records = sf.query(coaching_query)['records']
coaching_data = {
'Project_ID__c': project['Id'],
'Supervisor_ID__c': project['Supervisor__c'],
'Daily_Checklist__c': checklist,
'Suggested_Tips__c': suggestions,
'Reflection_Log__c': reflection,
'Last_Refresh_Date__c': datetime.utcnow().isoformat(),
'Engagement_Score__c': 50 if checklist != 'Not generated.' else 0,
'KPI_Flag__c': 'delay' in suggestions.lower() or 'issue' in suggestions.lower()
}
if coaching_records:
sf.Supervisor_AI_Coaching__c.update(coaching_records[0]['Id'], coaching_data)
logger.info(f"Updated Supervisor_AI_Coaching__c for Project {project_id}")
else:
sf.Supervisor_AI_Coaching__c.create(coaching_data)
logger.info(f"Created Supervisor_AI_Coaching__c for Project {project_id}")
return f"Successfully stored data for Project {project_id} in Salesforce"
except Exception as e:
logger.error(f"Error storing data in Salesforce: {e}")
return f"Error storing data in Salesforce: {e}"
def fetch_and_process_salesforce_data():
"""Fetch Project__c records from Salesforce and generate AI outputs."""
if sf is None:
logger.error("Salesforce connection not initialized")
return "Error: Salesforce connection not initialized"
try:
# Query Project__c records
query = """
SELECT Id, Name, Project_Name__c, Milestones__c, Weather_Log__c,
Supervisor__r.Role__c, Supervisor__r.Name, Supervisor__c
FROM Project__c
WHERE Milestones__c != null
LIMIT 10
"""
projects = sf.query(query)['records']
logger.info(f"Fetched {len(projects)} Project__c records")
for project in projects:
role = project['Supervisor__r']['Role__c'] if project['Supervisor__r'] is not None else 'Supervisor'
project_id = project['Name']
milestones = project['Milestones__c'] or 'No milestones'
reflection = f"Weather: {project['Weather_Log__c'] or 'Sunny, 25°C'}"
# Generate AI outputs
checklist, suggestions, quote = generate_outputs(role, project_id, milestones, reflection)
logger.info(f"Generated outputs for Project {project_id}")
# Store in Salesforce
store_in_salesforce(project, role, project_id, milestones, reflection, checklist, suggestions, quote)
return f"Processed {len(projects)} projects successfully"
except Exception as e:
logger.error(f"Error processing Salesforce data: {e}")
return f"Error processing Salesforce data: {e}"
def on_role_change(role):
"""Handle role selection change, fetch Salesforce data, generate outputs, and store in Salesforce."""
# Fetch data from Salesforce based on the selected role
project, project_id, milestones, reflection = fetch_salesforce_data_for_role(role)
if project is None:
return "", "", "", "", project_id, milestones, reflection
# Generate outputs
checklist, suggestions, quote = generate_outputs(role, project_id, milestones, reflection)
# Store the generated outputs in Salesforce
store_result = store_in_salesforce(project, role, project_id, milestones, reflection, checklist, suggestions, quote)
return checklist, suggestions, quote, store_result, project_id, milestones, reflection
def create_interface():
"""Create Gradio interface for manual testing."""
with gr.Blocks() as demo:
gr.Markdown("### Construction Supervisor AI Coach")
with gr.Row():
role = gr.Dropdown(choices=["Supervisor", "Foreman", "Project Manager"], label="Role", value="Supervisor")
project_id = gr.Textbox(label="Project ID", placeholder="e.g., PROJ-123")
milestones = gr.Textbox(
label="Milestones (comma-separated)",
placeholder="e.g., Foundation complete, Framing started, Roof installed"
)
reflection = gr.Textbox(
label="Reflection",
lines=3,
placeholder="e.g., Facing delays due to weather and equipment issues."
)
with gr.Row():
submit = gr.Button("Generate")
clear = gr.Button("Clear")
sf_button = gr.Button("Process Salesforce Data")
checklist_output = gr.Textbox(label="Checklist", lines=4)
suggestions_output = gr.Textbox(label="Suggestions", lines=4)
quote_output = gr.Textbox(label="Quote", lines=2)
sf_output = gr.Textbox(label="Salesforce Processing Result", lines=2)
# Fetch data, generate outputs, and store in Salesforce when role changes
role.change(
fn=on_role_change,
inputs=role,
outputs=[checklist_output, suggestions_output, quote_output, sf_output, project_id, milestones, reflection]
)
submit.click(
fn=generate_outputs,
inputs=[role, project_id, milestones, reflection],
outputs=[checklist_output, suggestions_output, quote_output]
)
clear.click(
fn=lambda: ("Supervisor", "", "", ""),
inputs=None,
outputs=[role, project_id, milestones, reflection]
)
sf_button.click(
fn=fetch_and_process_salesforce_data,
inputs=None,
outputs=sf_output
)
return demo
if __name__ == "__main__":
try:
demo = create_interface()
demo.launch(server_name="0.0.0.0", server_port=7860, share=False)
except Exception as e:
logger.error(f"Error launching Gradio interface: {e}")