from langchain.chat_models import ChatOpenAI from langchain.embeddings import OpenAIEmbeddings from langchain.chains import LLMChain from langchain.prompts import PromptTemplate from langchain.schema.messages import HumanMessage, SystemMessage from langchain.schema.document import Document from langchain_community.vectorstores import FAISS from langchain.retrievers.multi_vector import MultiVectorRetriever import os import uuid import base64 from fastapi import FastAPI, Request, Form, Response, File, UploadFile from fastapi.responses import HTMLResponse, JSONResponse from fastapi.templating import Jinja2Templates from fastapi.encoders import jsonable_encoder from fastapi.middleware.cors import CORSMiddleware import json from dotenv import load_dotenv from langchain_core.runnables import RunnableSequence from langchain_core.prompts import PromptTemplate from langchain_core.runnables import RunnableSequence load_dotenv() load_dotenv() app = FastAPI() templates = Jinja2Templates(directory="templates") # Define the prompt template prompt_template = """ You are an expert in skin cancer, etc. Answer the question based only on the following context, which can include text, images, and tables: {context} Question: {question} Don't answer if you are not sure and decline to answer and say "Sorry, I don't have much information about it." Just return the helpful answer in as much detail as possible. Answer: """ # Load environment variables openai_api_key = "sk-89la-SEB-GWL62eQC0RYXpVzPlYGLCZuXIAz39F58YT3BlbkFJqdiuuTxIAUwxud-MhcL6tAjLxZJvXbpEEcMUkN8DIA" # Replace with your actual API key embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key) llm = ChatOpenAI(model="gpt-4", openai_api_key=openai_api_key, max_tokens=1024) # Load FAISS index db = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True) # Create the prompt template using the PromptTemplate class prompt = PromptTemplate(input_variables=["context", "question"], template=prompt_template) # Replace LLMChain with RunnableSequence qa_chain = RunnableSequence(prompt | llm) @app.get("/", response_class=HTMLResponse) async def index(request: Request): return templates.TemplateResponse("index.html", {"request": request}) @app.post("/get_answer") async def get_answer(question: str = Form(...)): # Perform similarity search relevant_docs = db.similarity_search(question) context = "" relevant_images = [] for d in relevant_docs: if d.metadata['type'] == 'text': context += '[text]' + d.metadata['original_content'] elif d.metadata['type'] == 'table': context += '[table]' + d.metadata['original_content'] elif d.metadata['type'] == 'image': context += '[image]' + d.page_content relevant_images.append(d.metadata['original_content']) # Run the qa_chain with the context and question result = qa_chain.invoke({"context": context, "question": question}) return JSONResponse({"relevant_images": relevant_images[0] if relevant_images else None, "result": result})