Commit
·
e4bb56d
1
Parent(s):
e56d574
Upload replit_lm_tokenizer.py
Browse files- replit_lm_tokenizer.py +119 -0
replit_lm_tokenizer.py
ADDED
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
"""
|
16 |
+
Forked from the file src/transformers/models/bert_generation/tokenization_bert_generation.py from the HuggingFace Transformers library.
|
17 |
+
Permalink: https://github.com/huggingface/transformers/blob/04ab5605fbb4ef207b10bf2772d88c53fc242e83/src/transformers/models/bert_generation/tokenization_bert_generation.py
|
18 |
+
|
19 |
+
Tokenizer class for ReplitLM
|
20 |
+
Class is modified for compatibility with custom vocabulary and to achieve desired encode/decode behavior for Replit Code V1 3B model.
|
21 |
+
"""
|
22 |
+
import os
|
23 |
+
import sentencepiece as spm
|
24 |
+
from shutil import copyfile
|
25 |
+
from transformers import PreTrainedTokenizer
|
26 |
+
from typing import Any, Dict, List, Optional, Tuple
|
27 |
+
VOCAB_FILES_NAMES = {'vocab_file': 'spiece.model'}
|
28 |
+
|
29 |
+
class ReplitLMTokenizer(PreTrainedTokenizer):
|
30 |
+
"""
|
31 |
+
Construct a ReplitLMTokenizer tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
|
32 |
+
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods.
|
33 |
+
|
34 |
+
Args:
|
35 |
+
vocab_file (`str`):
|
36 |
+
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
|
37 |
+
contains the vocabulary necessary to instantiate a tokenizer.
|
38 |
+
eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
|
39 |
+
The end of sequence token.
|
40 |
+
bos_token (`str`, *optional*, defaults to `None`):
|
41 |
+
The begin of sequence token.
|
42 |
+
unk_token (`str`, *optional*, defaults to `"<|unk|>"`):
|
43 |
+
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
|
44 |
+
token instead.
|
45 |
+
pad_token (`str`, *optional*, defaults to `"<|pad|>"`):
|
46 |
+
The token used for padding, for example when batching sequences of different lengths.
|
47 |
+
sp_model_kwargs (`dict`, *optional*):
|
48 |
+
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
|
49 |
+
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
|
50 |
+
to set:
|
51 |
+
- `enable_sampling`: Enable subword regularization.
|
52 |
+
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
|
53 |
+
- `nbest_size = {0,1}`: No sampling is performed.
|
54 |
+
- `nbest_size > 1`: samples from the nbest_size results.
|
55 |
+
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
|
56 |
+
using forward-filtering-and-backward-sampling algorithm.
|
57 |
+
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
|
58 |
+
BPE-dropout.
|
59 |
+
"""
|
60 |
+
vocab_files_names = VOCAB_FILES_NAMES
|
61 |
+
prefix_tokens: List[int] = []
|
62 |
+
model_input_names = ['input_ids', 'attention_mask']
|
63 |
+
|
64 |
+
def __init__(self, vocab_file, bos_token=None, eos_token='<|endoftext|>', unk_token='<|unk|>', pad_token='<|pad|>', sep_token=None, sp_model_kwargs: Optional[Dict[str, Any]]=None, **kwargs) -> None:
|
65 |
+
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
|
66 |
+
super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, sep_token=sep_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs)
|
67 |
+
self.vocab_file = vocab_file
|
68 |
+
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
|
69 |
+
self.sp_model.Load(vocab_file)
|
70 |
+
|
71 |
+
@property
|
72 |
+
def vocab_size(self):
|
73 |
+
return self.sp_model.get_piece_size()
|
74 |
+
|
75 |
+
def get_vocab(self):
|
76 |
+
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
|
77 |
+
vocab.update(self.added_tokens_encoder)
|
78 |
+
return vocab
|
79 |
+
|
80 |
+
def __getstate__(self):
|
81 |
+
state = self.__dict__.copy()
|
82 |
+
state['sp_model'] = None
|
83 |
+
return state
|
84 |
+
|
85 |
+
def __setstate__(self, d):
|
86 |
+
self.__dict__ = d
|
87 |
+
if not hasattr(self, 'sp_model_kwargs'):
|
88 |
+
self.sp_model_kwargs = {}
|
89 |
+
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
|
90 |
+
self.sp_model.load(self.vocab_file)
|
91 |
+
|
92 |
+
def _tokenize(self, text: str) -> List[str]:
|
93 |
+
"""Take as input a string and return a list of strings (tokens) for words/sub-words"""
|
94 |
+
return self.sp_model.encode(text, out_type=str)
|
95 |
+
|
96 |
+
def _convert_token_to_id(self, token):
|
97 |
+
"""Converts a token (str) in an id using the vocab."""
|
98 |
+
return self.sp_model.piece_to_id(token)
|
99 |
+
|
100 |
+
def _convert_id_to_token(self, index):
|
101 |
+
"""Converts an index (integer) in a token (str) using the vocab."""
|
102 |
+
token = self.sp_model.id_to_piece(index)
|
103 |
+
return token
|
104 |
+
|
105 |
+
def convert_tokens_to_string(self, tokens):
|
106 |
+
"""Converts a sequence of tokens (string) in a single string."""
|
107 |
+
return self.sp_model.decode(tokens)
|
108 |
+
|
109 |
+
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
|
110 |
+
if not os.path.isdir(save_directory):
|
111 |
+
raise ValueError(f'Vocabulary path ({save_directory}) should be a directory')
|
112 |
+
out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
|
113 |
+
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
|
114 |
+
copyfile(self.vocab_file, out_vocab_file)
|
115 |
+
elif not os.path.isfile(self.vocab_file):
|
116 |
+
with open(out_vocab_file, 'wb') as fi:
|
117 |
+
content_spiece_model = self.sp_model.serialized_model_proto()
|
118 |
+
fi.write(content_spiece_model)
|
119 |
+
return (out_vocab_file,)
|