import os import uuid import gradio as gr import pandas as pd import re from groq import Groq from amazon_apparel_recommender import price_quality_recommendations # Load metadata metadata = pd.read_csv("assets/cleaned_metadata.csv") metadata['title'] = metadata['title'].astype(str) metadata['color'] = metadata['color'].astype(str) metadata['brand'] = metadata['brand'].astype(str) metadata['product_type_name'] = metadata['product_type_name'].astype(str) # Initialize Groq client client = Groq(api_key=os.getenv("groqkey")) # Initial system prompt system_prompt = ( "You are an Amazon fashion assistant. Users describe the kind of clothing they're looking for, and you recommend products based on metadata like brand, color, product type, and price. Keep responses short and clear." ) # Flexible keyword-based filter (no need for price limit) def filter_metadata(query): query = query.lower() keywords = re.findall(r'\w+', query) filtered = metadata.copy() for kw in keywords: if len(filtered) < 3: break filtered = filtered[ filtered['title'].str.lower().str.contains(kw) | filtered['color'].str.lower().str.contains(kw) | filtered['brand'].str.lower().str.contains(kw) | filtered['product_type_name'].str.lower().str.contains(kw) ] return filtered[['title', 'brand', 'price', 'review_score']].head(3).to_dict(orient='records') # Function to get Groq response conversation_state = gr.State([]) def get_chat_response(query, history): if not history or history[0]["role"] != "system": history.insert(0, {"role": "system", "content": system_prompt}) history.append({"role": "user", "content": query}) # Add grounded product suggestions product_suggestions = filter_metadata(query) if product_suggestions: product_context = "\nHere are some matching products:\n" for p in product_suggestions: product_context += f"- {p['title']} by {p['brand']} (${p['price']}, score: {p['review_score']})\n" else: product_context = "\nSorry, I couldn't find matching items for that query.\n" history.append({"role": "assistant", "content": product_context}) completion = client.chat.completions.create( model="deepseek-r1-distill-llama-70b", messages=history, temperature=0.4, top_p=0.95, stream=True ) response = "" for chunk in completion: if chunk.choices[0].delta.content: response += chunk.choices[0].delta.content history.append({"role": "assistant", "content": response}) # Render conversation pairs explicitly chat_display = [] for i in range(len(history)): if history[i]["role"] == "user": user_msg = history[i]["content"] assistant_msg = history[i + 1]["content"] if i + 1 < len(history) and history[i + 1]["role"] == "assistant" else "(no response)" chat_display.append((user_msg, assistant_msg)) return history, chat_display, "" def clear_chat(): return [], [], "" with gr.Blocks(title="๐๏ธ Amazon Chat Recommender") as demo: gr.HTML("""
Ask for clothing recommendations and get chat-based responses.
""") chatbot = gr.Chatbot(label="๐งต Apparel Chat", min_height=500) user_query = gr.Textbox(label="Ask for a recommendation", placeholder="e.g. black hoodie or summer dress") submit = gr.Button("Send") clear = gr.Button("Clear") demo_state = gr.State([]) submit.click(fn=get_chat_response, inputs=[user_query, demo_state], outputs=[demo_state, chatbot, user_query]) user_query.submit(fn=get_chat_response, inputs=[user_query, demo_state], outputs=[demo_state, chatbot, user_query]) clear.click(fn=clear_chat, outputs=[demo_state, chatbot, user_query]) demo.launch()