Spaces:
Running
Running
import streamlit as st | |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline | |
# ------------------------ | |
# Streamlit UI | |
# ------------------------ | |
st.set_page_config(page_title="LangChain Demo", page_icon=":robot:") | |
st.header("MHRV Chatbot") | |
# ------------------------ | |
# Session memory | |
# ------------------------ | |
if "conversation" not in st.session_state: | |
st.session_state.conversation = [] | |
# ------------------------ | |
# Load model and tokenizer | |
# ------------------------ | |
model_name = "bigscience/bloom-560m" # CPU-friendly | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForCausalLM.from_pretrained(model_name) | |
# Text-generation pipeline | |
generator = pipeline( | |
"text-generation", | |
model=model, | |
tokenizer=tokenizer, | |
device=-1, # CPU | |
max_new_tokens=256, | |
temperature=0.3 | |
) | |
# ------------------------ | |
# Helper functions | |
# ------------------------ | |
def load_answer(user_input): | |
# Add user input to session | |
st.session_state.conversation.append({"role": "user", "content": user_input}) | |
# Build prompt for BLOOM | |
system_instruction = ( | |
"You are a helpful, professional customer support assistant. " | |
"Answer questions clearly, politely, and accurately. " | |
"If the question is mathematical or factual, provide the correct answer. " | |
"Do not repeat the user's message." | |
) | |
# Use last 3 messages (or fewer if new) to maintain context | |
prompt = system_instruction + "\n" | |
for msg in st.session_state.conversation[-3:]: | |
if msg["role"] == "user": | |
prompt += f"User: {msg['content']}\n" | |
elif msg["role"] == "assistant": | |
prompt += f"Assistant: {msg['content']}\n" | |
prompt += "Assistant:" | |
# Generate answer | |
output = generator(prompt, max_new_tokens=128, do_sample=False) | |
answer = output[0]["generated_text"][len(prompt):].strip() | |
# Save answer in session | |
st.session_state.conversation.append({"role": "assistant", "content": answer}) | |
return answer | |
# ------------------------ | |
# Streamlit input | |
# ------------------------ | |
user_input = st.text_input("You: ", key="input") | |
submit = st.button("Generate") | |
if submit and user_input: | |
response = load_answer(user_input) | |
st.subheader("Answer:") | |
st.write(response) | |
# Optional: show conversation history | |
if st.checkbox("Show conversation history"): | |
for msg in st.session_state.conversation: | |
role = "You" if msg["role"] == "user" else "Bot" | |
st.write(f"**{role}:** {msg['content']}") | |