Spaces:
Running
Running
from dotenv import load_dotenv | |
from openai import OpenAI | |
import json | |
import os | |
from pypdf import PdfReader | |
import gradio as gr | |
import csv | |
# Load environment variables | |
load_dotenv(override=True) | |
GEMINI_BASE_URL = "https://generativelanguage.googleapis.com/v1beta/openai/" | |
google_api_key = os.getenv("GOOGLE_API_KEY") | |
gemini = OpenAI(base_url=GEMINI_BASE_URL, api_key=google_api_key) | |
# CSV files for logging | |
USER_CSV = "user_details.csv" | |
UNKNOWN_CSV = "unknown_questions.csv" | |
# Ensure CSV files exist with headers | |
for file, headers in [(USER_CSV, ["email", "name", "notes"]), | |
(UNKNOWN_CSV, ["question"])]: | |
if not os.path.exists(file): | |
with open(file, "w", newline="", encoding="utf-8") as f: | |
writer = csv.writer(f) | |
writer.writerow(headers) | |
# Functions to log user details and unknown questions | |
def record_user_details(email, name="Name not provided", notes="not provided"): | |
with open(USER_CSV, "a", newline="", encoding="utf-8") as f: | |
writer = csv.writer(f) | |
writer.writerow([email, name, notes]) | |
return {"recorded": "ok"} | |
def record_unknown_question(question): | |
with open(UNKNOWN_CSV, "a", newline="", encoding="utf-8") as f: | |
writer = csv.writer(f) | |
writer.writerow([question]) | |
return {"recorded": "ok"} | |
# JSON definitions for tools | |
record_user_details_json = { | |
"name": "record_user_details", | |
"description": "Record user info when they provide email", | |
"parameters": { | |
"type": "object", | |
"properties": { | |
"email": {"type": "string", "description": "The user's email"}, | |
"name": {"type": "string", "description": "User's name"}, | |
"notes": {"type": "string", "description": "Extra info"} | |
}, | |
"required": ["email"], | |
"additionalProperties": False | |
} | |
} | |
record_unknown_question_json = { | |
"name": "record_unknown_question", | |
"description": "Record any unanswered question", | |
"parameters": { | |
"type": "object", | |
"properties": { | |
"question": {"type": "string", "description": "The question not answered"}, | |
}, | |
"required": ["question"], | |
"additionalProperties": False | |
} | |
} | |
tools = [ | |
{"type": "function", "function": record_user_details_json}, | |
{"type": "function", "function": record_unknown_question_json} | |
] | |
class Me: | |
def __init__(self): | |
self.openai = gemini | |
self.name = "SnehaLeela" | |
# Load profile JSON | |
with open("profile.json", "r", encoding="utf-8") as f: | |
self.profile = json.load(f) | |
# Set attributes for easier access | |
self.personal_info = self.profile.get("personal_info", {}) | |
self.expertise = self.profile.get("expertise", []) | |
self.experience = self.profile.get("experience", []) | |
self.education = self.profile.get("education", []) | |
self.friends = self.profile.get("friends", []) | |
# Handle tool calls | |
def handle_tool_call(self, tool_calls): | |
results = [] | |
for tool_call in tool_calls: | |
tool_name = tool_call.function.name | |
arguments = json.loads(tool_call.function.arguments) | |
tool = globals().get(tool_name) | |
result = tool(**arguments) if tool else {} | |
results.append({"role": "tool", "content": json.dumps(result), "tool_call_id": tool_call.id}) | |
return results | |
# System prompt for LLM | |
def system_prompt(self): | |
# Combine experience into text | |
experience_text = "" | |
for company in self.experience: | |
experience_text += f"{company['company']}" | |
if 'location' in company: | |
experience_text += f" ({company['location']})" | |
for role in company.get('roles', []): | |
experience_text += f"\n- {role['title']} ({role.get('years', '')})" | |
for hl in role.get('highlights', []): | |
experience_text += f"\n • {hl}" | |
experience_text += "\n" | |
expertise_text = ", ".join(self.expertise) | |
education_text = "" | |
if hasattr(self, 'education') and self.education: | |
highest = self.education[0].get("highest_degree", {}) | |
education_text = f"{highest.get('degree','')} in {highest.get('field_of_study','')} from {highest.get('university','')} ({highest.get('start_year','')}–{highest.get('end_year','')})" | |
# Optional: prepare friends text for fun | |
friends_text = "" | |
if hasattr(self, 'friends') and self.friends: | |
friends_list = [] | |
for f in self.friends: | |
friends_list.append(f"{f.get('Name','')} ({f.get('Company','')}): {f.get('Description','')}") | |
friends_text = "\n".join(friends_list) | |
system_prompt = ( | |
f"You are acting as {self.personal_info['name']} (aka {self.personal_info.get('nickname','')}). " | |
f"Answer questions about {self.personal_info['name']}'s career, background, skills, and experience. " | |
f"Represent {self.personal_info['name']} faithfully. " | |
f"If you don't know an answer, use record_unknown_question tool. " | |
f"If the user engages in discussion, try to steer them towards providing their email using record_user_details tool.\n\n" | |
f"## Summary:\n{self.personal_info['summary']}\n\n" | |
f"## Interests:\n{', '.join(self.personal_info.get('personal_interests', []))}\n\n" | |
f"## Travel History:\n{', '.join(self.personal_info.get('travel_history', []))}\n\n" | |
f"## Education:\n{education_text}\n\n" | |
f"## Expertise:\n{expertise_text}\n\n" | |
f"## Experience:\n{experience_text}\n\n" | |
f"## Friends (for fun):\n{friends_text}\n\n" | |
f"## LinkedIn Profile:\nhttps://www.linkedin.com/in/sneha-leela-0a450349/\n\n" | |
f"Chat with the user staying in character as {self.personal_info['name']}." | |
) | |
return system_prompt | |
# Main chat function | |
def chat(self, message, history): | |
# ✅ Convert Gradio's history (list of lists) into role/content dicts | |
formatted_history = [] | |
for user_msg, bot_msg in history: | |
formatted_history.append({"role": "user", "content": user_msg}) | |
formatted_history.append({"role": "assistant", "content": bot_msg}) | |
messages = ( | |
[{"role": "system", "content": self.system_prompt()}] | |
+ formatted_history | |
+ [{"role": "user", "content": message}] | |
) | |
done = False | |
while not done: | |
response = self.openai.chat.completions.create( | |
model="gemini-2.5-flash-preview-05-20", | |
messages=messages, | |
tools=tools | |
) | |
if response.choices[0].finish_reason == "tool_calls": | |
message = response.choices[0].message | |
tool_calls = message.tool_calls | |
results = self.handle_tool_call(tool_calls) | |
messages.append(message) | |
messages.extend(results) | |
else: | |
done = True | |
return response.choices[0].message.content | |
# Custom CSS with your local image | |
css_code = """ | |
div { | |
background-image: url("file/Gemini_Generated.png"); /* Your local image */ | |
background-size: cover; | |
background-position: center; | |
background-repeat: no-repeat; | |
} | |
.gradio-container { | |
background-color: rgba(255, 255, 255, 0.6); /* Optional overlay for readability */ | |
} | |
.chat-message.user { | |
background-color: rgba(208, 230, 255, 0.8); | |
} | |
.chat-message.bot { | |
background-color: rgba(224, 255, 224, 0.8); | |
} | |
""" | |
# Launch Gradio interface | |
if __name__ == "__main__": | |
me = Me() | |
#gr.ChatInterface(me.chat, type="messages",theme="NoCrypt/miku",).launch(share=True) | |
gr.ChatInterface( | |
me.chat, | |
theme="NoCrypt/miku", | |
title="SnehaLeela's Careerbot", | |
#css=css_code | |
).launch(share=True) | |