Promptly / app.py
reddmann007's picture
Update app.py
e2e02d1 verified
import streamlit as st
import os
from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
from langchain_core.prompts import PromptTemplate, FewShotPromptTemplate
from langchain_core.messages import HumanMessage, SystemMessage
# --- 1. UI Setup ---
st.set_page_config(page_title="FlavorFeedback AI", page_icon="🍴")
st.title("🍴 FlavorFeedback: Prompting Lab")
st.markdown("""
This app demonstrates how different **Prompt Engineering** techniques affect AI performance
in a Restaurant Management context.
""")
# --- 2. Model Setup ---
api_token = os.getenv("HUGGINGFACEHUB_API_TOKEN")
if not api_token:
st.error("Please add your HUGGINGFACEHUB_API_TOKEN to the Space Secrets.")
st.stop()
# Using Qwen 2.5 7B - Excellent at reasoning and ungated (no request needed)
repo_id = "Qwen/Qwen2.5-7B-Instruct"
llm = HuggingFaceEndpoint(
repo_id=repo_id,
task="text-generation",
temperature=0.7,
huggingfacehub_api_token=api_token
)
chat_model = ChatHuggingFace(llm=llm)
# --- 3. Sidebar & Logic Selection ---
st.sidebar.header("Configuration")
option = st.sidebar.selectbox(
"Choose Technique",
("Zero-Shot", "Single-Shot", "Few-Shot", "Chain of Thought")
)
# --- 4. Define Defaults for the Use Case ---
defaults = {
"Zero-Shot": "The pasta was okay, but the service was incredibly slow and the waiter forgot our drinks twice.",
"Single-Shot": "The staff was so friendly and the steak was cooked to perfection, though the decor felt a bit dated.",
"Few-Shot": "The music was way too loud and we couldn't hear each other at the table.",
"Chain of Thought": "Issue: Undercooked Salmon. Bill Total: $72. Resolution: Waiter apologized but kept the item on the bill."
}
user_query = st.text_area("Input Data / Review:", value=defaults[option], height=150)
# --- 5. Execution Logic ---
if st.button("Generate Response"):
system_instruction = "You are a professional Restaurant Operations Assistant."
formatted_prompt = ""
if option == "Zero-Shot":
formatted_prompt = f"Classify the following restaurant review as 'Positive', 'Negative', or 'Neutral':\n\nReview: {user_query}\n\nSentiment:"
elif option == "Single-Shot":
formatted_prompt = (
"Extract key ratings from the review.\n\n"
"Example:\n"
"Input: 'The pizza was amazing, but it was too loud in there.'\n"
"Output: Food: 5/5 | Service: N/A | Atmosphere: 2/5\n\n"
f"Input: '{user_query}'\n"
"Output:"
)
elif option == "Few-Shot":
formatted_prompt = (
"As the Manager, write a brief response to this feedback.\n\n"
"Example 1:\nFeedback: 'Best tacos in town!'\n"
"Response: Thank you so much! We're thrilled you enjoyed the tacos.\n\n"
"Example 2:\nFeedback: 'Wait time was too long.'\n"
"Response: We apologize for the delay. We are working on our speed.\n\n"
f"Feedback: {user_query}\n"
"Response:"
)
elif option == "Chain of Thought":
system_instruction = "You are a senior restaurant manager who follows strict logic rules."
formatted_prompt = (
"Rule 1: Complaint must involve Food Quality or Billing.\n"
"Rule 2: Total spend must be over $50.\n"
"Rule 3: Issue was not resolved on the spot.\n\n"
"Determine if this customer gets a 15% discount based on the feedback below.\n"
f"Feedback: {user_query}\n\n"
"Let's think step-by-step:"
)
with st.spinner("Analyzing..."):
try:
messages = [
SystemMessage(content=system_instruction),
HumanMessage(content=formatted_prompt)
]
response = chat_model.invoke(messages)
st.subheader(f"Results: {option}")
st.success(response.content)
# Show the "Internal Logic" for the portfolio
with st.expander("View the raw prompt sent to AI"):
st.code(formatted_prompt)
except Exception as e:
st.error(f"Error: {e}")
# --- 6. Footer ---
st.sidebar.markdown("---")
st.sidebar.info("Built with LangChain & Hugging Face")