|
import os
|
|
|
|
import torch
|
|
import uvicorn
|
|
from fastapi import FastAPI, HTTPException
|
|
from fastapi.responses import JSONResponse
|
|
from pydantic import BaseModel
|
|
from transformers import pipeline
|
|
|
|
|
|
app = FastAPI(
|
|
title="Emotions PL API",
|
|
description="API do oznaczaniem tagami emocji go-emotions-polish-gpt2-small-v0.0.1",
|
|
version="1.0.0"
|
|
)
|
|
|
|
|
|
MODEL_NAME = "nie3e/go-emotions-polish-gpt2-small-v0.0.1"
|
|
generator = None
|
|
|
|
|
|
|
|
class PredictRequest(BaseModel):
|
|
prompt: str
|
|
|
|
|
|
@app.on_event("startup")
|
|
async def startup_event():
|
|
global generator
|
|
|
|
if torch.cuda.is_available():
|
|
print("device: GPU")
|
|
else:
|
|
print("device: CPU")
|
|
print(f"艁adowanie modelu: {MODEL_NAME}...")
|
|
try:
|
|
|
|
|
|
|
|
generator = pipeline(
|
|
"text-classification",
|
|
model=MODEL_NAME,
|
|
top_k=-1,
|
|
|
|
)
|
|
print("Model za艂adowany pomy艣lnie!")
|
|
except Exception as e:
|
|
print(f"B艂膮d 艂adowania modelu: {e}")
|
|
|
|
|
|
generator = None
|
|
|
|
|
|
@app.get("/")
|
|
async def root():
|
|
return {"message": "Polish emotions API is running!"}
|
|
|
|
|
|
@app.post("/predict")
|
|
async def predict(request: PredictRequest):
|
|
if generator is None:
|
|
raise HTTPException(status_code=503, detail="Model nie zosta艂 za艂adowany lub wyst膮pi艂 b艂膮d.")
|
|
|
|
try:
|
|
generated_text = generator(
|
|
request.prompt
|
|
)
|
|
|
|
response_data = generated_text[0]
|
|
return JSONResponse(
|
|
content=response_data,
|
|
media_type="application/json; charset=utf-8"
|
|
)
|
|
|
|
except Exception as e:
|
|
raise HTTPException(status_code=500, detail=f"B艂膮d podczas generowania tekstu: {e}")
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
uvicorn.run(app, host="0.0.0.0", port=int(os.getenv("PORT", 7860)))
|
|
|