DataScience / app.py
67Ayush87's picture
Update app.py
15a2774 verified
raw
history blame
1.89 kB
# import os
# import langchain
# import langchain_huggingface
# from langchain_huggingface import HuggingFaceEndpoint,HuggingFacePipeline, ChatHuggingFace
# from langchain_core.messages import HumanMessage, SystemMessage, AIMessage
# os.environ["HF_TOKEN"]=os.getenv('Ayush')
# os.environ["HUGGINGFACEHUB_API_KEY"]=os.getenv('Ayush')
# llama_model = HuggingFaceEndpoint(repo_id= "meta-llama/Llama-3.2-3B-Instruct",provider= "nebius",temperature=0.6, max_new_tokens=70,task="conversational")
# model_d=ChatHuggingFace(llm =llama_model,repo_id= "meta-llama/Llama-3.2-3B-Instruct",provider= "nebius",temperature=0.6, max_new_tokens=70,task="conversational")
# message = [SystemMessage(content = "Answer like you are a hardcore pc gamer"),
# HumanMessage(content = "Give me name of top 10 pc games of all time with description")]
# result = model_d.invoke(message)
# print(result.content)
import streamlit as st
from langchain_community.chat_models import ChatHuggingFace
from langchain_community.llms import HuggingFaceHub
from langchain_core.messages import HumanMessage, SystemMessage
# Setup API key (replace with your key or use st.secrets)
import os
os.environ["HF_TOKEN"]=os.getenv('Ayush')
os.environ["HUGGINGFACEHUB_API_KEY"]=os.getenv('Ayush')
# Load model
llm = HuggingFaceHub(
repo_id="meta-llama/Llama-3.2-3B-Instruct",
model_kwargs={"temperature": 0.6, "max_new_tokens": 100}
)
chat_model = ChatHuggingFace(llm=llm)
# Streamlit UI
st.title("🧪 Simple LLaMA Chat Test")
question = st.text_input("Ask a gaming-related question:", "Give me name of top 10 PC games of all time with description")
if st.button("Ask"):
messages = [
SystemMessage(content="Answer like you are a hardcore PC gamer"),
HumanMessage(content=question)
]
response = chat_model.invoke(messages)
st.write("### Response:")
st.write(response.content)