import streamlit as st from transformers import AutoTokenizer, AutoModelForCausalLM # Load your model and tokenizer model_name = "CJHauser/PrisimAI-chat" tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True) model = AutoModelForCausalLM.from_pretrained(model_name) # Streamlit App st.set_page_config(page_title="PrisimAI Chatbot") st.title("🤖 PrisimAI Chatbot") # User prompt user_input = st.text_input("Ask something:", placeholder="e.g. What is AI?") if user_input: with st.spinner("Thinking..."): inputs = tokenizer(user_input, return_tensors="pt") outputs = model.generate( inputs["input_ids"], max_length=150, do_sample=True, temperature=0.7, top_p=0.9, pad_token_id=tokenizer.eos_token_id ) response = tokenizer.decode(outputs[0], skip_special_tokens=True) # Remove the original prompt from the response if repeated response = response.replace(user_input, "").strip() st.markdown(f"**Response:** {response}")