Spaces:
Build error
Build error
File size: 1,714 Bytes
fc0db44 b6ae0f8 fc0db44 f3f1d25 b6ae0f8 f3f1d25 b6ae0f8 f3f1d25 b6ae0f8 fc0db44 38852bc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
from fastapi import FastAPI, UploadFile, File, HTTPException
from fastapi.responses import PlainTextResponse
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
from fastapi import Request
import os
from dotenv import load_dotenv
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from huggingface_hub import login
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Carrega o token do Hugging Face a partir do .env
load_dotenv()
hf_token = os.getenv("HF_TOKEN")
if not hf_token:
raise ValueError("Token do Hugging Face não encontrado. Adicione HF_TOKEN no arquivo .env")
# Autentica (opcional se já tiver setado a env var)
login(token=hf_token)
model_name = "NullisTerminis/oncology_t5"
# Carrega o modelo usando o token atualizado
tokenizer = AutoTokenizer.from_pretrained(model_name, token=hf_token)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name, token=hf_token)
#Teste simples
#input_text = input(str("Texto:"))
#inputs = tokenizer(input_text, return_tensors="pt", padding=True, truncation=True)
#output = model.generate(**inputs, max_length=128)
#decoded_output = tokenizer.decode(output[0], skip_special_tokens=True)
#print("Saída gerada pelo modelo:")
#print(decoded_output)
#ROTAS /////////////////////////////////////////////////////////////////////
@app.get("/", response_class=PlainTextResponse)
async def root():
return (
"Link start!"
)
@app.get("/mensagem", response_class=JSONResponse)
async def mensagem():
return {"mensagem": "API funcionando perfeitamente no Hugging Face!"} |