import streamlit as st
from backend import uploaded_file_to_response, normal_response
from llama_index.llms.gemini import Gemini
from llama_index.embeddings.gemini import GeminiEmbedding
import google.generativeai as genai
import os
from dotenv import load_dotenv
load_dotenv()
GOOGLE_API_KEY = os.environ.get("GOOGLE_API_KEY")
os.environ["GOOGLE_API_KEY"] = GOOGLE_API_KEY
llm = Gemini(model_name="models/gemini-1.5-flash")
embeddings = GeminiEmbedding(model_name="models/embedding-001")
if "chat_history" not in st.session_state:
st.session_state.chat_history = []
st.markdown("""
""", unsafe_allow_html=True)
st.markdown("
🧠VisionLang
", unsafe_allow_html=True)
st.caption("🚀 Upload files & chat with VisionLang")
uploaded_file = st.file_uploader("Upload a File (Image, Document, Code or Video)", type=["png", "jpg", "jpeg", "pdf", "docx", "txt", "py", "js", "java", "cpp", "mp4"], key="file_uploader")
user_input = st.text_input("Type your message here...", key="chat_input", help="Chat with AI", label_visibility="collapsed")
if st.button("Do Magic", key="generate_button", help="Click to get AI response", use_container_width=False):
if user_input:
with st.spinner("Processing..."):
response = normal_response(user_input)
if uploaded_file:
response = uploaded_file_to_response(uploaded_file, user_input)
st.session_state.chat_history.insert(0, (user_input, response))
if st.session_state.chat_history:
chat_container = st.container()
with chat_container:
st.markdown("### Chat History")
for user_msg, ai_response in st.session_state.chat_history:
st.markdown(f"You: {user_msg}
", unsafe_allow_html=True)
st.markdown(f"VisonLang: {ai_response}
", unsafe_allow_html=True)
st.markdown("
", unsafe_allow_html=True)