Spaces:
Sleeping
Sleeping
File size: 2,236 Bytes
212f1ad e373c5a 212f1ad e373c5a 212f1ad e373c5a 212f1ad e373c5a 212f1ad e373c5a 212f1ad e373c5a 212f1ad e373c5a 212f1ad e373c5a 212f1ad e373c5a 212f1ad e373c5a 212f1ad e373c5a 212f1ad e373c5a 212f1ad e373c5a 212f1ad e373c5a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 |
from fastapi import FastAPI, File, UploadFile
from transformers import AutoImageProcessor, SiglipForImageClassification
from PIL import Image
import torch
import io
MODEL_NAME = "prithivMLmods/Alphabet-Sign-Language-Detection"
try:
print(f"Cargando modelo '{MODEL_NAME}'...")
processor = AutoImageProcessor.from_pretrained(MODEL_NAME)
model = SiglipForImageClassification.from_pretrained(MODEL_NAME)
print(f"Modelo '{MODEL_NAME}' cargado exitosamente")
model_loaded = True
except Exception as e:
print(f"Error al cargar el modelo: {e}")
print("Usando modelo de ejemplo. Para uso real, necesitas un modelo específico de ASL.")
model = None
processor = None
model_loaded = False
app = FastAPI(title="API de ASL con modelo de HF")
@app.post("/predict/")
async def translate_sign(file: UploadFile = File(...)):
if not model or not processor:
return {
"error": "Modelo no disponible.",
"message": "El modelo específico de ASL no pudo cargarse. Verifica que el modelo existe en Hugging Face.",
"model_attempted": MODEL_NAME
}
try:
image_bytes = await file.read()
image = Image.open(io.BytesIO(image_bytes))
inputs = processor(images=image, return_tensors="pt")
with torch.no_grad():
outputs = model(**inputs)
logits = outputs.logits
probs = torch.nn.functional.softmax(logits, dim=1)
predicted_class_idx = logits.argmax(-1).item()
confidence = probs[0][predicted_class_idx].item()
predicted_label = model.config.id2label[predicted_class_idx]
return {
"prediction": predicted_label,
"confidence": round(confidence * 100, 2)
}
except Exception as e:
return {"error": f"Error al procesar la imagen: {str(e)}"}
@app.get("/")
def read_root():
return {"message": "API ok. Usa el endpoint /predict/ para predecir."}
@app.get("/status/")
def get_status():
return {
"model_loaded": model_loaded,
"model_name": MODEL_NAME,
"message": "Modelo cargado correctamente" if model_loaded else "Modelo no disponible"
}
|