Inori_Chatbot / main.py
SaitoHoujou's picture
Initialize Deployment
0d061c3 verified
import os
import io
from uuid import uuid4
from dotenv import load_dotenv
from typing import List, Optional
import requests
from bs4 import BeautifulSoup
import pypdf
import google.generativeai as genai
from fastapi import FastAPI, File, UploadFile
from fastapi.responses import StreamingResponse
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
# SETUP
load_dotenv()
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
app = FastAPI(title="Chatbot Backend", description="API for Chatbot Application")
app.add_middleware(
CORSMiddleware,
allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"],
)
pdf_texts_db = {}
gemini_model = genai.GenerativeModel('gemini-2.5-flash')
# PYDANTIC MODEL
class ChatMessage(BaseModel):
role: str
content: str
class ChatRequest(BaseModel):
messages: List[ChatMessage]
temperature: Optional[float] = 0
class URLRequest(BaseModel):
url: str
question: str
class PDFQueryRequest(BaseModel):
file_id: str
question: str
# API ENDPOINTS
@app.get("/")
def read_root():
return {"message": "Welcome to the Chatbot Backend API!"}
@app.post("/api/gemini/chat/stream")
async def gemini_chat_stream(request: ChatRequest):
history = [{'role': 'user' if msg.role == 'user' else 'model', 'parts': [msg.content]} for msg in request.messages]
chat = gemini_model.start_chat(history=history[:-1])
user_message = history[-1]['parts'][0]
async def stream_generator():
try:
response = chat.send_message(
user_message,
stream=True,
generation_config=genai.types.GenerationConfig(temperature=request.temperature)
)
for chunk in response:
if chunk.text:
yield chunk.text
except Exception as e:
print(e)
return StreamingResponse(stream_generator(), media_type="text/plain")
@app.post("/api/gemini/process-url")
async def gemini_process_url_stream(request: URLRequest):
try:
response = requests.get(request.url, timeout=15)
response.raise_for_status()
soup = BeautifulSoup(response.content, 'html.parser')
web_content = soup.get_text(separator='\n', strip=True)[:15000]
prompt = f"""Use content from website:
---
{web_content}
---
and answer the following question: "{request.question}"
"""
async def stream_generator():
try:
response_stream = gemini_model.generate_content(prompt, stream=True)
for chunk in response_stream:
if chunk.text:
yield chunk.text
except Exception as e:
print(e)
return StreamingResponse(stream_generator(), media_type="text/plain")
except requests.RequestException as e:
return {"Error": f"Cannot read the content from: {e}"}
except Exception as e:
return {e}
@app.post("/api/gemini/upload-pdf")
async def gemini_upload_pdf(file: UploadFile = File(...)):
if file.content_type != "application/pdf":
return {"Please use PDF file type!"}
file_id = str(uuid4())
pdf_content = await file.read()
reader = pypdf.PdfReader(io.BytesIO(pdf_content))
text = "".join(page.extract_text() for page in reader.pages)
pdf_texts_db[file_id] = text
return {"file_id": file_id, "filename": file.filename}
@app.post("/api/gemini/query-pdf")
async def gemini_query_pdf(request: PDFQueryRequest):
document_text = pdf_texts_db.get(request.file_id)
if not document_text:
return {"error": "Cannot find file"}
prompt = f"""Use all the content from file:
---
{document_text}
---
to answer the following question: "{request.question}"
"""
async def stream_generator():
try:
response = gemini_model.generate_content(prompt, stream=True)
for chunk in response:
if chunk.text:
yield chunk.text
except Exception as e:
print(e)
return StreamingResponse(stream_generator(), media_type="text/plain")