netandreus commited on
Commit
cc02b66
·
verified ·
1 Parent(s): 29827d7

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. handler.py +62 -0
handler.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Any
2
+ import torch
3
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
4
+ import torch.nn.functional as F
5
+
6
+ class EndpointHandler:
7
+ def __init__(self, path: str = "BAAI/bge-reranker-v2-m3"):
8
+ # Load tokenizer and model
9
+ self.tokenizer = AutoTokenizer.from_pretrained(path)
10
+ self.model = AutoModelForSequenceClassification.from_pretrained(path)
11
+ self.model.eval()
12
+
13
+ # Determine the computation device
14
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
15
+ self.model.to(self.device)
16
+
17
+ def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
18
+ """
19
+ Expected input format:
20
+ {
21
+ "query": "Your query here",
22
+ "texts": ["Document 1", "Document 2", ...],
23
+ "normalize": true # Optional; defaults to False
24
+ }
25
+ """
26
+ query = data.get("query")
27
+ texts = data.get("texts", [])
28
+ normalize = data.get("normalize", False)
29
+
30
+ if not query or not texts:
31
+ return [{"error": "Both 'query' and 'texts' fields are required."}]
32
+
33
+ # Prepare input pairs
34
+ pairs = [[query, text] for text in texts]
35
+
36
+ # Tokenize input pairs
37
+ inputs = self.tokenizer(
38
+ pairs,
39
+ padding=True,
40
+ truncation=True,
41
+ return_tensors="pt",
42
+ max_length=512
43
+ ).to(self.device)
44
+
45
+ with torch.no_grad():
46
+ # Get model logits
47
+ outputs = self.model(**inputs)
48
+ scores = outputs.logits.view(-1)
49
+
50
+ # Apply sigmoid normalization if requested
51
+ if normalize:
52
+ scores = torch.sigmoid(scores)
53
+
54
+ # Prepare the response
55
+ results = [
56
+ {"index": idx, "score": score.item()}
57
+ for idx, score in enumerate(scores)
58
+ ]
59
+
60
+ # Sort results by descending score
61
+ results.sort(key=lambda x: x["score"], reverse=True)
62
+ return results