Spaces:
Running
Running
File size: 8,009 Bytes
e9907ee 08f1855 e9907ee d04dd91 e9907ee d04dd91 e9907ee abfee73 e04988e 079c35d abfee73 3bd3796 abfee73 08a0e60 079c35d 08a0e60 abfee73 079c35d abfee73 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 |
from dotenv import load_dotenv
from openai import OpenAI
import json
import os
from pypdf import PdfReader
import gradio as gr
import csv
# Load environment variables
load_dotenv(override=True)
GEMINI_BASE_URL = "https://generativelanguage.googleapis.com/v1beta/openai/"
google_api_key = os.getenv("GOOGLE_API_KEY")
gemini = OpenAI(base_url=GEMINI_BASE_URL, api_key=google_api_key)
# CSV files for logging
USER_CSV = "user_details.csv"
UNKNOWN_CSV = "unknown_questions.csv"
# Ensure CSV files exist with headers
for file, headers in [(USER_CSV, ["email", "name", "notes"]),
(UNKNOWN_CSV, ["question"])]:
if not os.path.exists(file):
with open(file, "w", newline="", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow(headers)
# Functions to log user details and unknown questions
def record_user_details(email, name="Name not provided", notes="not provided"):
with open(USER_CSV, "a", newline="", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow([email, name, notes])
return {"recorded": "ok"}
def record_unknown_question(question):
with open(UNKNOWN_CSV, "a", newline="", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow([question])
return {"recorded": "ok"}
# JSON definitions for tools
record_user_details_json = {
"name": "record_user_details",
"description": "Record user info when they provide email",
"parameters": {
"type": "object",
"properties": {
"email": {"type": "string", "description": "The user's email"},
"name": {"type": "string", "description": "User's name"},
"notes": {"type": "string", "description": "Extra info"}
},
"required": ["email"],
"additionalProperties": False
}
}
record_unknown_question_json = {
"name": "record_unknown_question",
"description": "Record any unanswered question",
"parameters": {
"type": "object",
"properties": {
"question": {"type": "string", "description": "The question not answered"},
},
"required": ["question"],
"additionalProperties": False
}
}
tools = [
{"type": "function", "function": record_user_details_json},
{"type": "function", "function": record_unknown_question_json}
]
class Me:
def __init__(self):
self.openai = gemini
self.name = "SnehaLeela"
# Load profile JSON
with open("profile.json", "r", encoding="utf-8") as f:
self.profile = json.load(f)
# Set attributes for easier access
self.personal_info = self.profile.get("personal_info", {})
self.expertise = self.profile.get("expertise", [])
self.experience = self.profile.get("experience", [])
self.education = self.profile.get("education", [])
self.friends = self.profile.get("friends", [])
# Handle tool calls
def handle_tool_call(self, tool_calls):
results = []
for tool_call in tool_calls:
tool_name = tool_call.function.name
arguments = json.loads(tool_call.function.arguments)
tool = globals().get(tool_name)
result = tool(**arguments) if tool else {}
results.append({"role": "tool", "content": json.dumps(result), "tool_call_id": tool_call.id})
return results
# System prompt for LLM
def system_prompt(self):
# Combine experience into text
experience_text = ""
for company in self.experience:
experience_text += f"{company['company']}"
if 'location' in company:
experience_text += f" ({company['location']})"
for role in company.get('roles', []):
experience_text += f"\n- {role['title']} ({role.get('years', '')})"
for hl in role.get('highlights', []):
experience_text += f"\n • {hl}"
experience_text += "\n"
expertise_text = ", ".join(self.expertise)
education_text = ""
if hasattr(self, 'education') and self.education:
highest = self.education[0].get("highest_degree", {})
education_text = f"{highest.get('degree','')} in {highest.get('field_of_study','')} from {highest.get('university','')} ({highest.get('start_year','')}–{highest.get('end_year','')})"
# Optional: prepare friends text for fun
friends_text = ""
if hasattr(self, 'friends') and self.friends:
friends_list = []
for f in self.friends:
friends_list.append(f"{f.get('Name','')} ({f.get('Company','')}): {f.get('Description','')}")
friends_text = "\n".join(friends_list)
system_prompt = (
f"You are acting as {self.personal_info['name']} (aka {self.personal_info.get('nickname','')}). "
f"Answer questions about {self.personal_info['name']}'s career, background, skills, and experience. "
f"Represent {self.personal_info['name']} faithfully. "
f"If you don't know an answer, use record_unknown_question tool. "
f"If the user engages in discussion, try to steer them towards providing their email using record_user_details tool.\n\n"
f"## Summary:\n{self.personal_info['summary']}\n\n"
f"## Interests:\n{', '.join(self.personal_info.get('personal_interests', []))}\n\n"
f"## Travel History:\n{', '.join(self.personal_info.get('travel_history', []))}\n\n"
f"## Education:\n{education_text}\n\n"
f"## Expertise:\n{expertise_text}\n\n"
f"## Experience:\n{experience_text}\n\n"
f"## Friends (for fun):\n{friends_text}\n\n"
f"## LinkedIn Profile:\nhttps://www.linkedin.com/in/sneha-leela-0a450349/\n\n"
f"Chat with the user staying in character as {self.personal_info['name']}."
)
return system_prompt
# Main chat function
def chat(self, message, history):
# ✅ Convert Gradio's history (list of lists) into role/content dicts
formatted_history = []
for user_msg, bot_msg in history:
formatted_history.append({"role": "user", "content": user_msg})
formatted_history.append({"role": "assistant", "content": bot_msg})
messages = (
[{"role": "system", "content": self.system_prompt()}]
+ formatted_history
+ [{"role": "user", "content": message}]
)
done = False
while not done:
response = self.openai.chat.completions.create(
model="gemini-2.5-flash-preview-05-20",
messages=messages,
tools=tools
)
if response.choices[0].finish_reason == "tool_calls":
message = response.choices[0].message
tool_calls = message.tool_calls
results = self.handle_tool_call(tool_calls)
messages.append(message)
messages.extend(results)
else:
done = True
return response.choices[0].message.content
# Custom CSS with your local image
css_code = """
div {
background-image: url("file/Gemini_Generated.png"); /* Your local image */
background-size: cover;
background-position: center;
background-repeat: no-repeat;
}
.gradio-container {
background-color: rgba(255, 255, 255, 0.6); /* Optional overlay for readability */
}
.chat-message.user {
background-color: rgba(208, 230, 255, 0.8);
}
.chat-message.bot {
background-color: rgba(224, 255, 224, 0.8);
}
"""
# Launch Gradio interface
if __name__ == "__main__":
me = Me()
#gr.ChatInterface(me.chat, type="messages",theme="NoCrypt/miku",).launch(share=True)
gr.ChatInterface(
me.chat,
theme="NoCrypt/miku",
title="SnehaLeela's Careerbot",
#css=css_code
).launch(share=True)
|