netandreus commited on
Commit
4b7f4a7
·
verified ·
1 Parent(s): b3787bf

Upload folder using huggingface_hub

Browse files
Files changed (2) hide show
  1. README.md +16 -4
  2. handler.py +7 -7
README.md CHANGED
@@ -5,7 +5,7 @@ tags:
5
  - generated_from_trainer
6
  - transformers
7
  library_name: sentence-transformers
8
- pipeline_tag: text-ranking
9
  model-index:
10
  - name: bge_reranker
11
  results: []
@@ -13,9 +13,9 @@ inference:
13
  parameters:
14
  normalize: True
15
  widget:
16
- - input:
17
- query: "Hello, world!"
18
- documents:
19
  - "Hello! How are you?"
20
  - "Cats and dogs"
21
  - "The sky is blue"
@@ -76,6 +76,18 @@ curl "https://xxxxxxx.us-east-1.aws.endpoints.huggingface.cloud" \
76
 
77
  ```python
78
  from FlagEmbedding import FlagReranker
 
 
 
 
 
 
 
 
 
 
 
 
79
  reranker = FlagReranker('netandreus/bge-reranker-v2-m3', use_fp16=True)
80
  scores = reranker.compute_score(arr, normalize=True)
81
  if not isinstance(scores, list):
 
5
  - generated_from_trainer
6
  - transformers
7
  library_name: sentence-transformers
8
+ pipeline_tag: sentence-similarity
9
  model-index:
10
  - name: bge_reranker
11
  results: []
 
13
  parameters:
14
  normalize: True
15
  widget:
16
+ - inputs:
17
+ source_sentence: "Hello, world!"
18
+ sentences:
19
  - "Hello! How are you?"
20
  - "Cats and dogs"
21
  - "The sky is blue"
 
76
 
77
  ```python
78
  from FlagEmbedding import FlagReranker
79
+
80
+ class RerankRequest(BaseModel):
81
+ query: str
82
+ documents: list[str]
83
+
84
+ # Prepare array
85
+ arr = []
86
+ for element in request.documents:
87
+ arr.append([request.query, element])
88
+ print(arr)
89
+
90
+ # Inference
91
  reranker = FlagReranker('netandreus/bge-reranker-v2-m3', use_fp16=True)
92
  scores = reranker.compute_score(arr, normalize=True)
93
  if not isinstance(scores, list):
handler.py CHANGED
@@ -19,22 +19,22 @@ class EndpointHandler:
19
  Expected input format:
20
  {
21
  "inputs": {
22
- "query": "Your query here",
23
- "documents": ["Document 1", "Document 2", ...]
24
  },
25
  "normalize": true # Optional; defaults to False
26
  }
27
  """
28
  inputs = data.get("inputs", {})
29
- query = inputs.get("query")
30
- documents = inputs.get("documents", [])
31
  normalize = data.get("normalize", False)
32
 
33
- if not query or not documents:
34
- return [{"error": "Both 'query' and 'documents' fields are required inside 'inputs'."}]
35
 
36
  # Prepare input pairs
37
- pairs = [[query, text] for text in documents]
38
 
39
  # Tokenize input pairs
40
  tokenizer_inputs = self.tokenizer(
 
19
  Expected input format:
20
  {
21
  "inputs": {
22
+ "source_sentence": "Your query here",
23
+ "sentences": ["Document 1", "Document 2", ...]
24
  },
25
  "normalize": true # Optional; defaults to False
26
  }
27
  """
28
  inputs = data.get("inputs", {})
29
+ source_sentence = inputs.get("source_sentence")
30
+ sentences = inputs.get("sentences", [])
31
  normalize = data.get("normalize", False)
32
 
33
+ if not source_sentence or not sentences:
34
+ return [{"error": "Both 'source_sentence' and 'sentences' fields are required inside 'inputs'."}]
35
 
36
  # Prepare input pairs
37
+ pairs = [[source_sentence, text] for text in sentences]
38
 
39
  # Tokenize input pairs
40
  tokenizer_inputs = self.tokenizer(