Spaces:
Running
Running
File size: 2,537 Bytes
fcf3944 ac7d0c9 fcf3944 ac7d0c9 fcf3944 9afa870 fcf3944 ac7d0c9 eb1d61e fcf3944 ac7d0c9 eb1d61e 9afa870 ac7d0c9 9afa870 eb1d61e ac7d0c9 fcf3944 ac7d0c9 eb1d61e 0b211f9 eb1d61e 0b211f9 eb1d61e 0b211f9 eb1d61e fcf3944 eb1d61e fcf3944 ac7d0c9 eb1d61e ac7d0c9 eb1d61e 0b211f9 fcf3944 b5ae24a fcf3944 0b211f9 eb1d61e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 |
import streamlit as st
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
# ------------------------
# Streamlit UI
# ------------------------
st.set_page_config(page_title="LangChain Demo", page_icon=":robot:")
st.header("MHRV Chatbot")
# ------------------------
# Session memory
# ------------------------
if "conversation" not in st.session_state:
st.session_state.conversation = []
# ------------------------
# Load model and tokenizer
# ------------------------
model_name = "bigscience/bloom-560m" # CPU-friendly
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
# Text-generation pipeline
generator = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
device=-1, # CPU
max_new_tokens=256,
temperature=0.3
)
# ------------------------
# Helper functions
# ------------------------
def load_answer(user_input):
# Add user input to session
st.session_state.conversation.append({"role": "user", "content": user_input})
# Build prompt for BLOOM
system_instruction = (
"You are a helpful, professional customer support assistant. "
"Answer questions clearly, politely, and accurately. "
"If the question is mathematical or factual, provide the correct answer. "
"Do not repeat the user's message."
)
# Use last 3 messages (or fewer if new) to maintain context
prompt = system_instruction + "\n"
for msg in st.session_state.conversation[-3:]:
if msg["role"] == "user":
prompt += f"User: {msg['content']}\n"
elif msg["role"] == "assistant":
prompt += f"Assistant: {msg['content']}\n"
prompt += "Assistant:"
# Generate answer
output = generator(prompt, max_new_tokens=128, do_sample=False)
answer = output[0]["generated_text"][len(prompt):].strip()
# Save answer in session
st.session_state.conversation.append({"role": "assistant", "content": answer})
return answer
# ------------------------
# Streamlit input
# ------------------------
user_input = st.text_input("You: ", key="input")
submit = st.button("Generate")
if submit and user_input:
response = load_answer(user_input)
st.subheader("Answer:")
st.write(response)
# Optional: show conversation history
if st.checkbox("Show conversation history"):
for msg in st.session_state.conversation:
role = "You" if msg["role"] == "user" else "Bot"
st.write(f"**{role}:** {msg['content']}")
|