Spaces:
Runtime error
Runtime error
File size: 1,747 Bytes
6d90138 6df0331 ae9f6d2 6df0331 6d90138 45f9871 6d90138 f4839e5 45f9871 f4839e5 45f9871 f4839e5 45f9871 1646717 d7a4545 6d90138 4f2d14d 6d90138 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 |
import os
os.environ["HF_HOME"] = "./app_model/hf_cache"
os.environ["TRANSFORMERS_CACHE"] = "./app_model/hf_cache"
os.environ["HF_DATASETS_CACHE"] = "./app_model/hf_cache"
os.environ["HF_METRICS_CACHE"] = "./app_model/hf_cache"
# Ensure model directory exists
MODEL_DIR = "./app_model"
os.makedirs(MODEL_DIR, exist_ok=True)
from pathlib import Path
import torch
import torch.nn.functional as F
from huggingface_hub import snapshot_download
from transformers import AutoModelForSequenceClassification, AutoTokenizer
# ------------------------TRAINING/FINETUNING------------------------
# refer to notebook for the training
# ------------------------INFERENCE------------------------
model_path = snapshot_download(
repo_id="milanchndr/email-classification-model",
local_dir=MODEL_DIR,
local_dir_use_symlinks=False
)
# Load model and tokenizer from the local path
model = AutoModelForSequenceClassification.from_pretrained(model_path, trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
model.eval() # Set model to evaluation mode
label_map = {0: "Incident", 1: "Request", 2: "Change", 3: "Problem"}
def classify_email(email: str) -> str:
"""Classify an email into a support category using a fine-tuned model.
Args:
email (str): The email text to classify.
Returns:
str: The predicted category (Incident, Request, Change, or Problem).
"""
inputs = tokenizer(
email, padding=True, truncation=True, max_length=512, return_tensors="pt"
)
with torch.no_grad():
outputs = model(**inputs)
probs = F.softmax(outputs.logits, dim=1)
pred = torch.argmax(probs, dim=1).item()
return label_map[pred]
|