Spaces:
Sleeping
Sleeping
File size: 1,701 Bytes
2917fb8 239b916 2917fb8 239b916 2917fb8 de5aec3 239b916 819ef7f 7218be7 819ef7f 9c30ad1 239b916 7218be7 9c30ad1 7218be7 239b916 7218be7 f3910e2 2917fb8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 |
from pathlib import Path
import torch
import torch.nn.functional as F
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import os
# Set a custom cache directory to avoid permission issues
os.environ["HF_HOME"] = "/tmp/huggingface_cache"
os.environ["TRANSFORMERS_CACHE"] = "/tmp/huggingface_cache"
# Set Hugging Face token for private repository (if applicable)
hf_token = os.environ.get("HF_TOKEN", None)
# Define the repository ID
repo_id = "milanchndr/email-classification-model"
# Load the model and tokenizer
try:
model = AutoModelForSequenceClassification.from_pretrained(
repo_id,
cache_dir="/tmp/huggingface_cache",
use_auth_token=hf_token
)
tokenizer = AutoTokenizer.from_pretrained(
repo_id,
cache_dir="/tmp/huggingface_cache",
use_auth_token=hf_token
)
print("Model and tokenizer loaded successfully!")
except Exception as e:
print(f"Error loading model or tokenizer: {e}")
exit()
# Set model to evaluation mode
model.eval()
model.eval()
label_map = {0: "Incident", 1: "Request", 2: "Change", 3: "Problem"}
def classify_email(email: str) -> str:
"""Classify an email into a support category using a fine-tuned model.
Args:
email (str): The email text to classify.
Returns:
str: The predicted category (Incident, Request, Change, or Problem).
"""
inputs = tokenizer(
email, padding=True, truncation=True, max_length=512, return_tensors="pt"
)
with torch.no_grad():
outputs = model(**inputs)
probs = F.softmax(outputs.logits, dim=1)
pred = torch.argmax(probs, dim=1).item()
return label_map[pred]
|