Spaces:
Running
Running
import gradio as gr | |
from huggingface_hub import InferenceClient | |
theme = gr.themes.Monochrome( | |
primary_hue=gr.themes.Color(c100="#f5f5f3", c200="#fbfaf8", c300="#f9f9f9", c400="#eee9ee", c50="rgba(255, 255, 255, 1)", c500="rgba(0, 0, 0, 1)", c600="rgba(26.934374999999992, 26.934374999999992, 26.934374999999992, 1)", c700="rgba(10.943750000000012, 10.943750000000012, 10.943750000000012, 1)", c800="rgba(17.053125000000005, 17.053125000000005, 17.053125000000005, 1)", c900="#fffefe", c950="#fffefe"), | |
secondary_hue=gr.themes.Color(c100="#f29c74", c200="#f4b7a8", c300="#fffefe", c400="#fffefe", c50="#e46e45", c500="#fffefe", c600="#fffefe", c700="#fffefe", c800="#fffefe", c900="#fffefe", c950="#fffefe"), | |
neutral_hue=gr.themes.Color(c100="#6b8ea8", c200="#a4c5e8", c300="rgba(0, 0, 0, 1)", c400="rgba(0, 0, 0, 1)", c50="#284566", c500="rgba(0, 0, 0, 1)", c600="rgba(0, 0, 0, 1)", c700="rgba(0, 0, 0, 1)", c800="rgba(0, 0, 0, 1)", c900="rgba(0, 0, 0, 1)", c950="rgba(0, 0, 0, 1)"), | |
font=[gr.themes.GoogleFont('open sans'), 'ui-sans-serif', 'system-ui', 'sans-serif'], | |
).set( | |
body_background_fill='*primary_50', | |
body_background_fill_dark='*primary_700', | |
body_text_color='*primary_500', | |
body_text_color_dark='*primary_400', | |
body_text_color_subdued='*neutral_50', | |
body_text_color_subdued_dark='*primary_300', | |
background_fill_primary='*primary_400', | |
background_fill_primary_dark='*neutral_100', | |
background_fill_secondary='*primary_400', | |
background_fill_secondary_dark='*neutral_50', | |
border_color_accent_dark='*secondary_50', | |
border_color_accent_subdued_dark='*primary_50', | |
border_color_primary='*neutral_50', | |
border_color_primary_dark='*neutral_100', | |
color_accent='*primary_50', | |
color_accent_soft_dark='*neutral_100', | |
block_background_fill_dark='*neutral_50', | |
checkbox_background_color='*primary_400', | |
checkbox_background_color_selected='*primary_700', | |
checkbox_background_color_selected_dark='*neutral_50', | |
checkbox_label_border_color_dark='*neutral_50', | |
input_background_fill='*primary_50', | |
input_background_fill_dark='*neutral_100', | |
input_border_color='*neutral_50', | |
input_border_color_dark='*neutral_50', | |
input_border_width='2px', | |
table_odd_background_fill='*neutral_100', | |
button_primary_background_fill='*neutral_50', | |
button_primary_background_fill_dark='*neutral_100', | |
button_primary_background_fill_hover='*neutral_100' | |
) | |
#STEP 1 FROM SEMATIC SEARCH | |
from sentence_transformers import SentenceTransformer | |
import torch | |
#STEP 2 FROM SEMATIC SEARCH | |
# Open the weather.txt file in read mode with UTF-8 encoding | |
with open("weather.txt", "r", encoding="utf-8") as file: | |
# Read the entire contents of the file and store it in a variable | |
weather_text = file.read() | |
with open("luggage.txt", "r", encoding="utf-8") as file: | |
# Read the entire contents of the file and store it in a variable | |
luggage_text = file.read() | |
with open("attractions.txt", "r", encoding="utf-8") as file: | |
# Read the entire contents of the file and store it in a variable | |
attraction_text = file.read() | |
with open("food.txt", "r", encoding="utf-8") as file: | |
# Read the entire contents of the file and store it in a variable | |
food_text = file.read() | |
#STEP 3 FROM SEMATIC SEARCH | |
def preprocess_text(text): | |
# Strip extra whitespace from the beginning and the end of the text | |
cleaned_text = text.strip() | |
# Split the cleaned_text by every newline character (\n) | |
chunks = cleaned_text.split("***") | |
# Create an empty list to store cleaned chunks | |
cleaned_chunks = [] | |
# Write your for-in loop below to clean each chunk and add it to the cleaned_chunks list | |
for chunk in chunks: | |
chunk.strip() | |
if chunk != "": | |
cleaned_chunks.append(chunk) | |
return cleaned_chunks | |
# Call the preprocess_text function and store the result in a cleaned_chunks variable | |
cleaned_chunks_weather = preprocess_text(weather_text) # Complete this line | |
cleaned_chunks_luggage = preprocess_text(luggage_text) | |
cleaned_chunks_attraction = preprocess_text(attraction_text) | |
cleaned_chunks_food = preprocess_text(food_text) | |
#STEP 4 FROM SEMATIC SEARCH | |
# Load the pre-trained embedding model that converts text to vectors | |
model = SentenceTransformer('all-MiniLM-L6-v2') | |
def create_embeddings(text_chunks): | |
# Convert each text chunk into a vector embedding and store as a tensor | |
chunk_embeddings = model.encode(text_chunks, convert_to_tensor=True) # Replace ... with the text_chunks list | |
return chunk_embeddings | |
# Call the create_embeddings function and store the result in a new chunk_embeddings variable | |
chunk_embeddings_weather = create_embeddings(cleaned_chunks_weather) # Complete this line | |
chunk_embeddings_luggage = create_embeddings(cleaned_chunks_luggage) | |
chunk_embeddings_attraction = create_embeddings(cleaned_chunks_attraction) | |
chunk_embeddings_food = create_embeddings(cleaned_chunks_food) | |
#STEP 5 FROM SEMATIC SEARCH | |
# Define a function to find the most relevant text chunks for a given query, chunk_embeddings, and text_chunks | |
def get_top_chunks(query, chunk_embeddings, text_chunks): | |
# Convert the query text into a vector embedding | |
query_embedding = model.encode(query, convert_to_tensor = True) # Complete this line | |
# Normalize the query embedding to unit length for accurate similarity comparison | |
query_embedding_normalized = query_embedding / query_embedding.norm() | |
# Normalize all chunk embeddings to unit length for consistent comparison | |
chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True) | |
# Calculate cosine similarity between query and all chunks using matrix multiplication | |
similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized) # Complete this line | |
# Print the similarities | |
#print(similarities) | |
# Find the indices of the 3 chunks with highest similarity scores | |
top_indices = torch.topk(similarities, k=3).indices | |
# Print the top indices | |
#print(top_indices) | |
# Create an empty list to store the most relevant chunks | |
top_chunks = [] | |
# Loop through the top indices and retrieve the corresponding text chunks | |
for top_index in top_indices: | |
top_chunks.append(text_chunks[top_index]) | |
# Return the list of most relevant chunks | |
return top_chunks | |
#STEP 6 FROM SEMANTIC SEARCH | |
client = InferenceClient("Qwen/Qwen2.5-72B-Instruct") | |
def respond(message, history, language, chatbot_mode, destinations, trip_length, trip_unit, trip_season, luggage_types, luggage_size, food_prefs, activity): | |
destinations = destinations or [] | |
trip_length = trip_length or "Not specified" | |
trip_unit = trip_unit or "" | |
trip_season = trip_season or "Not specified" | |
luggage_types = luggage_types or [] | |
luggage_size = luggage_size or "Not specified" | |
food_prefs = food_prefs or [] | |
activity = activity or [] | |
language = language or "English" | |
#conduct a semantic search for each of our files | |
top_weather = get_top_chunks(message, chunk_embeddings_weather, cleaned_chunks_weather) | |
top_luggage = get_top_chunks(message, chunk_embeddings_luggage, cleaned_chunks_luggage) | |
top_attraction = get_top_chunks(message, chunk_embeddings_attraction, cleaned_chunks_attraction) | |
top_food = get_top_chunks(message, chunk_embeddings_food, cleaned_chunks_food) | |
str_top_weather = "\n".join(top_weather) | |
str_top_luggage = "\n".join(top_luggage) | |
str_top_attraction = "\n".join(top_attraction) | |
str_top_food = "\n".join(top_food) | |
#collect inputed data from the sidebar elements | |
ctx = ( | |
f"Language: {language}" | |
f"Destination: {', '.join(destinations) if destinations else 'Not specified'}\n" | |
f"Trip Length: {trip_length} {trip_unit}\n" | |
f"Trip Season: {trip_season}\n" | |
f"Luggage: {', '.join(luggage_types) if luggage_types else 'Not specified'}, " | |
f"Size: {luggage_size}L\n" | |
f"Food Preferences: {', '.join(food_prefs) if food_prefs else 'Not specified'}\n" | |
f"Activity Preferences: {', '.join(activity) if activity else 'Not specified'}\n" | |
) | |
if chatbot_mode == "Packing": | |
messages = [{ | |
"role": "system", | |
"content": ( | |
f"You are a friendly and Gen Z travel chatbot helping with packing advice.\n\n" | |
f"{ctx}\n" | |
f"Relevant context:\n{str_top_weather}\n{str_top_luggage}" | |
f"Please respond in {language}" | |
) | |
}] | |
elif chatbot_mode == "Food/Attractions": | |
messages = [{ | |
"role": "system", | |
"content": ( | |
f"You are a friendly and Gen Z travel chatbot recommending food and attractions.\n\n" | |
f"{ctx}\n" | |
f"Relevant context:\n{str_top_food}\n{str_top_attraction}" | |
f"Please respond in {language}" | |
) | |
}] | |
else: | |
messages = [{ | |
"role": "system", | |
"content": ( | |
f"You are a friendly and Gen Z travel chatbot helping travelers plan trips to San Francisco and/or Los Angeles.\n\n" | |
f"{ctx}\n" | |
f"Use relevant context:\n{str_top_weather}\n{str_top_luggage}\n{str_top_food}\n{str_top_attraction}" | |
f"Please respond in {language}" | |
) | |
}] | |
if history: | |
for user_msg, bot_reply in history: | |
messages.append({"role": "user", "content": user_msg}) | |
messages.append({"role": "assistant", "content": bot_reply}) | |
messages.append({"role": "user", "content": message}) | |
response = client.chat_completion( | |
messages, | |
max_tokens = 2000, | |
temperature = 1 | |
) | |
if isinstance(response, dict): | |
reply = response['choices'][0]['message']['content'].strip() | |
else: | |
reply = response.strip() | |
history.append((message, reply)) | |
return history, "" | |
def reset_inputs(): | |
return ( | |
None, # language | |
None, # chatbot_mode | |
[], # destinations | |
None, # trip_length | |
None, # trip_unit | |
None, # trip_season | |
[], # luggage_types | |
20, # luggage_size (default) | |
[], # food_prefs | |
[] # activity | |
) | |
def update_visibility(chatbot_mode): | |
if chatbot_mode == "Packing": | |
return gr.update(visible=True), gr.update(visible=False) | |
elif chatbot_mode == "Food/Attractions": | |
return gr.update(visible=False), gr.update(visible=True) | |
else: | |
return gr.update(visible=True), gr.update(visible=True) | |
with gr.Blocks(theme=theme) as demo: | |
with gr.Row(): | |
# ─── left column: your controls ─── | |
with gr.Column(scale=1): | |
gr.Markdown("### Chatbot Settings") | |
language = gr.Radio( | |
choices=["English","Español", "Italiano", "Français", "日本語", "中文"], | |
label="What language would you like to use?" | |
) | |
chatbot_mode = gr.Radio( | |
choices=["Packing", "Food/Attractions"], | |
label="What do you need help with?" | |
) | |
gr.Markdown("### Destination") | |
destinations = gr.CheckboxGroup( | |
choices=["San Francisco","Los Angeles"], | |
label="Where are you going?" | |
) | |
gr.Markdown("### Trip Length") | |
trip_length = gr.Number(label="How long is your trip?", precision=0) | |
trip_unit = gr.Radio( | |
choices=["day", "week", "month"], | |
label="Trip length unit:" | |
) | |
trip_season = gr.Radio( | |
choices=["Warm/Dry (May to October)", "Cool/Wet (November to April)"], | |
label="Season:" | |
) | |
with gr.Group(visible=True) as packing_group: | |
#gr.Markdown("### Luggage") | |
luggage_types = gr.CheckboxGroup( | |
choices=["Carry-on", "Checked"], | |
label="What is your luggage type?" | |
) | |
luggage_size = gr.Slider( | |
minimum=10, maximum=100, step=10, value=20, | |
label="What is the size of your luggage (liters)?" | |
) | |
with gr.Group(visible=True) as food_group: | |
#gr.Markdown("### Food") | |
food_prefs = gr.Dropdown( | |
choices=["Italian", "Thai", "Mexican", "Japanese", "Vegan", "Seafood"], | |
multiselect=True, | |
label="What are your food preferences?" | |
) | |
#gr.Markdown("### Activities") | |
activity = gr.Dropdown( | |
choices=["Outdoor & Nature", "Indoor", "Museums", "Shopping", "Relaxation"], | |
multiselect=True, | |
label="What are your activity preferences?" | |
) | |
chatbot_mode.change( | |
fn=update_visibility, | |
inputs=[chatbot_mode], | |
outputs=[packing_group, food_group] | |
) | |
reset_btn = gr.Button("Reset All", variant="secondary") | |
reset_btn.click( | |
fn=reset_inputs, | |
inputs=[], | |
outputs=[ | |
language, | |
chatbot_mode, | |
destinations, | |
trip_length, | |
trip_unit, | |
trip_season, | |
luggage_types, | |
luggage_size, | |
food_prefs, | |
activity | |
] | |
) | |
with gr.Column(scale=3): | |
gr.Image(value="Go Buddy2.png", interactive=False, show_label=False) | |
# 1) the chat history panel | |
chat_box = gr.Chatbot(height=900) | |
# 2) where the user types | |
msg = gr.Textbox( | |
placeholder="Ask me anything…", | |
lines=1, | |
label = "How can we help you?" | |
) | |
# 3) send button | |
send_btn = gr.Button("Send") | |
# 4) hook up the click event | |
send_btn.click( | |
fn=respond, | |
inputs=[ | |
msg, | |
chat_box, | |
language, | |
chatbot_mode, | |
destinations, | |
trip_length, | |
trip_unit, | |
trip_season, | |
luggage_types, | |
luggage_size, | |
food_prefs, | |
activity | |
], | |
outputs=[chat_box, msg] | |
) | |
msg.submit( | |
fn=respond, | |
inputs=[ | |
msg, | |
chat_box, | |
language, | |
chatbot_mode, | |
destinations, | |
trip_length, | |
trip_unit, | |
trip_season, | |
luggage_types, | |
luggage_size, | |
food_prefs, | |
activity | |
], | |
outputs=[chat_box, msg] | |
) | |
demo.launch(debug=True) |