sentence-transformers (#1)
Browse files- feat: sentence transformers (51a9ea9e49d0fe871a0d12e269575b76e4c92711)
- 1_Pooling/config.json +9 -0
- README.md +7 -1
- config_sentence_transformers.json +7 -0
- modules.json +14 -0
- sentence_bert_config.json +4 -0
- special_tokens_map.json +37 -0
- tokenizer.json +0 -0
- tokenizer_config.json +55 -0
- vocab.txt +0 -0
    	
        1_Pooling/config.json
    ADDED
    
    | @@ -0,0 +1,9 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
                "word_embedding_dimension": 768,
         | 
| 3 | 
            +
                "pooling_mode_cls_token": false,
         | 
| 4 | 
            +
                "pooling_mode_mean_tokens": true,
         | 
| 5 | 
            +
                "pooling_mode_max_tokens": false,
         | 
| 6 | 
            +
                "pooling_mode_mean_sqrt_len_tokens": false,
         | 
| 7 | 
            +
                "pooling_mode_weightedmean_tokens": false,
         | 
| 8 | 
            +
                "pooling_mode_lasttoken": false
         | 
| 9 | 
            +
              }
         | 
    	
        README.md
    CHANGED
    
    | @@ -2670,11 +2670,17 @@ For retrieval applications, you should prepend `search_document` for all your do | |
| 2670 |  | 
| 2671 | 
             
            ### Sentence Transformers
         | 
| 2672 | 
             
            ```python
         | 
|  | |
| 2673 | 
             
            from sentence_transformers import SentenceTransformer
         | 
| 2674 |  | 
| 2675 | 
            -
             | 
|  | |
|  | |
| 2676 | 
             
            sentences = ['search_query: What is TSNE?', 'search_query: Who is Laurens van der Maaten?']
         | 
| 2677 | 
             
            embeddings = model.encode(sentences)
         | 
|  | |
|  | |
|  | |
| 2678 | 
             
            print(embeddings)
         | 
| 2679 | 
             
            ```
         | 
| 2680 |  | 
|  | |
| 2670 |  | 
| 2671 | 
             
            ### Sentence Transformers
         | 
| 2672 | 
             
            ```python
         | 
| 2673 | 
            +
            import torch.nn.functional as F
         | 
| 2674 | 
             
            from sentence_transformers import SentenceTransformer
         | 
| 2675 |  | 
| 2676 | 
            +
            matryoshka_dim = 512
         | 
| 2677 | 
            +
             | 
| 2678 | 
            +
            model = SentenceTransformer(".", trust_remote_code=True)
         | 
| 2679 | 
             
            sentences = ['search_query: What is TSNE?', 'search_query: Who is Laurens van der Maaten?']
         | 
| 2680 | 
             
            embeddings = model.encode(sentences)
         | 
| 2681 | 
            +
            embeddings = F.layer_norm(embeddings, normalized_shape=(embeddings.shape[1],))
         | 
| 2682 | 
            +
            embeddings = embeddings[:, :matryoshka_dim]
         | 
| 2683 | 
            +
            embeddings = F.normalize(embeddings, p=2, dim=1)
         | 
| 2684 | 
             
            print(embeddings)
         | 
| 2685 | 
             
            ```
         | 
| 2686 |  | 
    	
        config_sentence_transformers.json
    ADDED
    
    | @@ -0,0 +1,7 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
                "__version__": {
         | 
| 3 | 
            +
                  "sentence_transformers": "2.4.0.dev0",
         | 
| 4 | 
            +
                  "transformers": "4.37.2",
         | 
| 5 | 
            +
                  "pytorch": "2.1.0+cu121"
         | 
| 6 | 
            +
                }
         | 
| 7 | 
            +
              }
         | 
    	
        modules.json
    ADDED
    
    | @@ -0,0 +1,14 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            [
         | 
| 2 | 
            +
                {
         | 
| 3 | 
            +
                  "idx": 0,
         | 
| 4 | 
            +
                  "name": "0",
         | 
| 5 | 
            +
                  "path": "",
         | 
| 6 | 
            +
                  "type": "sentence_transformers.models.Transformer"
         | 
| 7 | 
            +
                },
         | 
| 8 | 
            +
                {
         | 
| 9 | 
            +
                  "idx": 1,
         | 
| 10 | 
            +
                  "name": "1",
         | 
| 11 | 
            +
                  "path": "1_Pooling",
         | 
| 12 | 
            +
                  "type": "sentence_transformers.models.Pooling"
         | 
| 13 | 
            +
                }
         | 
| 14 | 
            +
              ]
         | 
    	
        sentence_bert_config.json
    ADDED
    
    | @@ -0,0 +1,4 @@ | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
                "max_seq_length": 8192,
         | 
| 3 | 
            +
                "do_lower_case": false
         | 
| 4 | 
            +
            }
         | 
    	
        special_tokens_map.json
    ADDED
    
    | @@ -0,0 +1,37 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "cls_token": {
         | 
| 3 | 
            +
                "content": "[CLS]",
         | 
| 4 | 
            +
                "lstrip": false,
         | 
| 5 | 
            +
                "normalized": false,
         | 
| 6 | 
            +
                "rstrip": false,
         | 
| 7 | 
            +
                "single_word": false
         | 
| 8 | 
            +
              },
         | 
| 9 | 
            +
              "mask_token": {
         | 
| 10 | 
            +
                "content": "[MASK]",
         | 
| 11 | 
            +
                "lstrip": false,
         | 
| 12 | 
            +
                "normalized": false,
         | 
| 13 | 
            +
                "rstrip": false,
         | 
| 14 | 
            +
                "single_word": false
         | 
| 15 | 
            +
              },
         | 
| 16 | 
            +
              "pad_token": {
         | 
| 17 | 
            +
                "content": "[PAD]",
         | 
| 18 | 
            +
                "lstrip": false,
         | 
| 19 | 
            +
                "normalized": false,
         | 
| 20 | 
            +
                "rstrip": false,
         | 
| 21 | 
            +
                "single_word": false
         | 
| 22 | 
            +
              },
         | 
| 23 | 
            +
              "sep_token": {
         | 
| 24 | 
            +
                "content": "[SEP]",
         | 
| 25 | 
            +
                "lstrip": false,
         | 
| 26 | 
            +
                "normalized": false,
         | 
| 27 | 
            +
                "rstrip": false,
         | 
| 28 | 
            +
                "single_word": false
         | 
| 29 | 
            +
              },
         | 
| 30 | 
            +
              "unk_token": {
         | 
| 31 | 
            +
                "content": "[UNK]",
         | 
| 32 | 
            +
                "lstrip": false,
         | 
| 33 | 
            +
                "normalized": false,
         | 
| 34 | 
            +
                "rstrip": false,
         | 
| 35 | 
            +
                "single_word": false
         | 
| 36 | 
            +
              }
         | 
| 37 | 
            +
            }
         | 
    	
        tokenizer.json
    ADDED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        tokenizer_config.json
    ADDED
    
    | @@ -0,0 +1,55 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "added_tokens_decoder": {
         | 
| 3 | 
            +
                "0": {
         | 
| 4 | 
            +
                  "content": "[PAD]",
         | 
| 5 | 
            +
                  "lstrip": false,
         | 
| 6 | 
            +
                  "normalized": false,
         | 
| 7 | 
            +
                  "rstrip": false,
         | 
| 8 | 
            +
                  "single_word": false,
         | 
| 9 | 
            +
                  "special": true
         | 
| 10 | 
            +
                },
         | 
| 11 | 
            +
                "100": {
         | 
| 12 | 
            +
                  "content": "[UNK]",
         | 
| 13 | 
            +
                  "lstrip": false,
         | 
| 14 | 
            +
                  "normalized": false,
         | 
| 15 | 
            +
                  "rstrip": false,
         | 
| 16 | 
            +
                  "single_word": false,
         | 
| 17 | 
            +
                  "special": true
         | 
| 18 | 
            +
                },
         | 
| 19 | 
            +
                "101": {
         | 
| 20 | 
            +
                  "content": "[CLS]",
         | 
| 21 | 
            +
                  "lstrip": false,
         | 
| 22 | 
            +
                  "normalized": false,
         | 
| 23 | 
            +
                  "rstrip": false,
         | 
| 24 | 
            +
                  "single_word": false,
         | 
| 25 | 
            +
                  "special": true
         | 
| 26 | 
            +
                },
         | 
| 27 | 
            +
                "102": {
         | 
| 28 | 
            +
                  "content": "[SEP]",
         | 
| 29 | 
            +
                  "lstrip": false,
         | 
| 30 | 
            +
                  "normalized": false,
         | 
| 31 | 
            +
                  "rstrip": false,
         | 
| 32 | 
            +
                  "single_word": false,
         | 
| 33 | 
            +
                  "special": true
         | 
| 34 | 
            +
                },
         | 
| 35 | 
            +
                "103": {
         | 
| 36 | 
            +
                  "content": "[MASK]",
         | 
| 37 | 
            +
                  "lstrip": false,
         | 
| 38 | 
            +
                  "normalized": false,
         | 
| 39 | 
            +
                  "rstrip": false,
         | 
| 40 | 
            +
                  "single_word": false,
         | 
| 41 | 
            +
                  "special": true
         | 
| 42 | 
            +
                }
         | 
| 43 | 
            +
              },
         | 
| 44 | 
            +
              "clean_up_tokenization_spaces": true,
         | 
| 45 | 
            +
              "cls_token": "[CLS]",
         | 
| 46 | 
            +
              "do_lower_case": true,
         | 
| 47 | 
            +
              "mask_token": "[MASK]",
         | 
| 48 | 
            +
              "model_max_length": 8192,
         | 
| 49 | 
            +
              "pad_token": "[PAD]",
         | 
| 50 | 
            +
              "sep_token": "[SEP]",
         | 
| 51 | 
            +
              "strip_accents": null,
         | 
| 52 | 
            +
              "tokenize_chinese_chars": true,
         | 
| 53 | 
            +
              "tokenizer_class": "BertTokenizer",
         | 
| 54 | 
            +
              "unk_token": "[UNK]"
         | 
| 55 | 
            +
            }
         | 
    	
        vocab.txt
    ADDED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 

