Spaces:
Sleeping
Sleeping
File size: 1,121 Bytes
4e32ced 48b4976 4e32ced 48b4976 4e32ced fe451c1 15b92f7 2376c1d 48b4976 4e32ced f65289c 48b4976 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 |
from fastapi import FastAPI, Query
from fastapi.staticfiles import StaticFiles
from fastapi.responses import FileResponse
from transformers import pipeline
import logging
app = FastAPI()
# Set up logging
logging.basicConfig(level=logging.INFO)
# Load the model without cache_dir
pipe_flan = pipeline("text2text-generation", model="ai1-test/finance-chatbot-flan-t5-large")
@app.get("/infer_t5")
def t5(input: str = Query(..., description="Input text for the chatbot")):
try:
logging.info(f"Received input: {input}") # Log input
output = pipe_flan(input)
generated_text = output[0]["generated_text"]
logging.info(f"Generated text: {generated_text}") # Log generated output
return {"output": generated_text}
except Exception as e:
logging.error(f"Error occurred during inference: {str(e)}") # Log the error
return {"error": f"An error occurred: {str(e)}"}
app.mount("/", StaticFiles(directory="static", html=True), name="static")
@app.get("/")
def index() -> FileResponse:
return FileResponse(path="/app/static/index.html", media_type="text/html") |