Lite-gpt.4 / app.py
chillguyyyyyyyyyyer's picture
Create app.py
0b75645 verified
import streamlit as st
import openai
import torch
from diffusers import StableVideoDiffusionPipeline
import tempfile
import cv2
import os
from langchain.vectorstores import FAISS
from langchain.embeddings import OpenAIEmbeddings
from langchain.chains import RetrievalQA
from langchain.document_loaders import TextLoader
from langchain.text_splitter import CharacterTextSplitter
# Configure OpenAI API (Replace 'your-api-key' with an actual key)
openai.api_key = "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"
# Load Stable Video Diffusion Model (Optimized for Performance)
@st.cache_resource
def load_video_model():
model_id = "stabilityai/stable-video-diffusion-img2vid"
pipe = StableVideoDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
pipe.to("cuda" if torch.cuda.is_available() else "cpu")
return pipe
pipe = load_video_model()
# Load RAG Components
@st.cache_resource
def load_rag():
loader = TextLoader("knowledge_base.txt") # Ensure you have a knowledge base file
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=50)
texts = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
vectorstore = FAISS.from_documents(texts, embeddings)
retriever = vectorstore.as_retriever()
return RetrievalQA.from_chain_type(llm=openai.ChatCompletion, retriever=retriever)
rag_chain = load_rag()
# Streamlit UI Configuration
st.set_page_config(page_title="LiteGPT - Chat & Video AI", layout="wide")
st.title("πŸ’¬ LiteGPT - Chat & Video AI")
# Chatbot Function with RAG
@st.cache_resource
def chat_with_gpt(prompt):
response = rag_chain.run(prompt)
return response
# Sidebar - Video Generation
st.sidebar.header("πŸŽ₯ AI Video Generator")
video_prompt = st.sidebar.text_area("Enter a prompt for video generation")
if st.sidebar.button("Generate Video"):
if video_prompt:
with st.spinner("Generating video..."):
video_frames = pipe(video_prompt, num_inference_steps=50).frames
video_path = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False).name
height, width, _ = video_frames[0].shape
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter(video_path, fourcc, 8, (width, height))
for frame in video_frames:
out.write(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
out.release()
st.sidebar.video(video_path)
else:
st.sidebar.warning("Please enter a video prompt!")
# Chat Interface
st.subheader("πŸ’‘ Chat with LiteGPT")
user_input = st.text_input("Type your message:")
if st.button("Send"):
if user_input:
response = chat_with_gpt(user_input)
st.write("πŸ€– LiteGPT:", response)
else:
st.warning("Please enter a message!")