milanchndr's picture
Update models.py
de5aec3 verified
from pathlib import Path
import torch
import torch.nn.functional as F
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import os
# Set a custom cache directory to avoid permission issues
os.environ["HF_HOME"] = "/tmp/huggingface_cache"
os.environ["TRANSFORMERS_CACHE"] = "/tmp/huggingface_cache"
# Set Hugging Face token for private repository (if applicable)
hf_token = os.environ.get("HF_TOKEN", None)
# Define the repository ID
repo_id = "milanchndr/email-classification-model"
# Load the model and tokenizer
try:
model = AutoModelForSequenceClassification.from_pretrained(
repo_id,
cache_dir="/tmp/huggingface_cache",
use_auth_token=hf_token
)
tokenizer = AutoTokenizer.from_pretrained(
repo_id,
cache_dir="/tmp/huggingface_cache",
use_auth_token=hf_token
)
print("Model and tokenizer loaded successfully!")
except Exception as e:
print(f"Error loading model or tokenizer: {e}")
exit()
# Set model to evaluation mode
model.eval()
model.eval()
label_map = {0: "Incident", 1: "Request", 2: "Change", 3: "Problem"}
def classify_email(email: str) -> str:
"""Classify an email into a support category using a fine-tuned model.
Args:
email (str): The email text to classify.
Returns:
str: The predicted category (Incident, Request, Change, or Problem).
"""
inputs = tokenizer(
email, padding=True, truncation=True, max_length=512, return_tensors="pt"
)
with torch.no_grad():
outputs = model(**inputs)
probs = F.softmax(outputs.logits, dim=1)
pred = torch.argmax(probs, dim=1).item()
return label_map[pred]