Upload folder using huggingface_hub
Browse files- README.md +33 -121
- added_tokens.json +7 -6
- chat_template.jinja +1 -0
- config.json +2 -3
- generation_config.json +1 -1
- model.safetensors +2 -2
- runs/May28_17-48-55_r-davanstrien-jupyterlab-svh3scip-91c05-sx22q/events.out.tfevents.1748447366.r-davanstrien-jupyterlab-svh3scip-91c05-sx22q.1156.0 +3 -0
- special_tokens_map.json +7 -1
- tokenizer.json +15 -11
- tokenizer_config.json +14 -7
- training_args.bin +2 -2
README.md
CHANGED
@@ -1,146 +1,58 @@
|
|
1 |
---
|
2 |
base_model: HuggingFaceTB/SmolLM2-360M
|
3 |
library_name: transformers
|
4 |
-
model_name: SmolLM2-360M-tldr-sft-2025-
|
5 |
tags:
|
6 |
- generated_from_trainer
|
7 |
- trl
|
8 |
- sft
|
9 |
-
|
10 |
-
datasets:
|
11 |
-
- davanstrien/hub-tldr-dataset-summaries-llama
|
12 |
-
- davanstrien/hub-tldr-model-summaries-llama
|
13 |
---
|
14 |
|
15 |
-
#
|
16 |
|
17 |
-
|
18 |
-
|
19 |
-
</div>
|
20 |
|
21 |
-
|
22 |
-
|
23 |
-
- creating useful tl;dr descriptions that can give you a quick sense of what a dataset or model is for
|
24 |
-
- as input text for creating embeddings for semantic search. You can see a demo of this in [librarian-bots/huggingface-datasets-semantic-search](https://huggingface.co/spaces/librarian-bots/huggingface-datasets-semantic-search).
|
25 |
-
|
26 |
-
The model was trained using supervised fine-tuning (SFT) with [TRL](https://github.com/huggingface/trl).
|
27 |
-
|
28 |
-
A meta example of a summary generated for this card:
|
29 |
-
|
30 |
-
> This model is a fine-tuned version of SmolLM2-360M for generating concise, one-sentence summaries of model and dataset cards from the Hugging Face Hub.
|
31 |
-
|
32 |
-
|
33 |
-
## Intended Use
|
34 |
-
|
35 |
-
The model is designed to generate brief, informative summaries of:
|
36 |
-
- Model cards: Focusing on key capabilities and characteristics
|
37 |
-
- Dataset cards: Capturing essential dataset characteristics and purposes
|
38 |
-
|
39 |
-
## Training Data
|
40 |
-
|
41 |
-
The model was trained on:
|
42 |
-
- Model card summaries generated by Llama 3.3 70B
|
43 |
-
- Dataset card summaries generated by Llama 3.3 70B
|
44 |
-
|
45 |
-
Model context length: the model was trained with cards up to a length of 2048 tokens
|
46 |
-
|
47 |
-
## Usage
|
48 |
-
|
49 |
-
Using the chat template when using the model in inference is recommended. Additionally, you should prepend either `<MODEL_CARD>` or `<DATASET_CARD>` to the start of the card you want to summarize. The training data used the body of the model or dataset card (i.e., the part after the YAML, so you will likely get better results only by passing this part of the card.
|
50 |
-
|
51 |
-
I have so far found that a low temperature of `0.4` generates better results.
|
52 |
-
|
53 |
-
Example:
|
54 |
|
55 |
```python
|
56 |
-
from transformers import
|
57 |
-
from huggingface_hub import ModelCard
|
58 |
-
|
59 |
-
card = ModelCard.load("davanstrien/Smol-Hub-tldr")
|
60 |
-
|
61 |
-
# Load tokenizer and model
|
62 |
-
tokenizer = AutoTokenizer.from_pretrained("davanstrien/Smol-Hub-tldr")
|
63 |
-
model = AutoModelForCausalLM.from_pretrained("davanstrien/Smol-Hub-tldr")
|
64 |
-
|
65 |
-
# Format input according to the chat template
|
66 |
-
messages = [{"role": "user", "content": f"<MODEL_CARD>{card.text}"}]
|
67 |
-
# Encode with the chat template
|
68 |
-
inputs = tokenizer.apply_chat_template(
|
69 |
-
messages, add_generation_prompt=True, return_tensors="pt"
|
70 |
-
)
|
71 |
-
|
72 |
-
# Generate with stop tokens
|
73 |
-
outputs = model.generate(
|
74 |
-
inputs,
|
75 |
-
max_new_tokens=60,
|
76 |
-
pad_token_id=tokenizer.pad_token_id,
|
77 |
-
eos_token_id=tokenizer.eos_token_id,
|
78 |
-
temperature=0.4,
|
79 |
-
do_sample=True,
|
80 |
-
)
|
81 |
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
summary = response.split("<CARD_SUMMARY>")[-1].split("</CARD_SUMMARY>")[0]
|
87 |
-
print(summary)
|
88 |
-
>>> "The Smol-Hub-tldr model is a fine-tuned version of SmolLM2-360M designed to generate concise, one-sentence summaries of model and dataset cards from the Hugging Face Hub."
|
89 |
```
|
90 |
|
91 |
-
|
92 |
-
|
93 |
-
```python
|
94 |
-
from transformers import pipeline, StoppingCriteria, StoppingCriteriaList
|
95 |
-
import torch
|
96 |
|
|
|
97 |
|
98 |
-
class StopOnTokens(StoppingCriteria):
|
99 |
-
def __init__(self, tokenizer, stop_token_ids):
|
100 |
-
self.stop_token_ids = stop_token_ids
|
101 |
-
self.tokenizer = tokenizer
|
102 |
|
103 |
-
|
104 |
-
self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs
|
105 |
-
) -> bool:
|
106 |
-
for stop_id in self.stop_token_ids:
|
107 |
-
if input_ids[0][-1] == stop_id:
|
108 |
-
return True
|
109 |
-
return False
|
110 |
|
|
|
111 |
|
112 |
-
|
113 |
-
|
114 |
-
|
|
|
|
|
115 |
|
116 |
-
|
117 |
-
stop_token_ids = [
|
118 |
-
tokenizer.encode("</CARD_SUMMARY>", add_special_tokens=True)[-1],
|
119 |
-
tokenizer.eos_token_id,
|
120 |
-
]
|
121 |
|
122 |
-
# Create stopping criteria
|
123 |
-
stopping_criteria = StoppingCriteriaList([StopOnTokens(tokenizer, stop_token_ids)])
|
124 |
|
125 |
-
# Generate with stopping criteria
|
126 |
-
response = pipe(
|
127 |
-
messages,
|
128 |
-
max_new_tokens=50,
|
129 |
-
do_sample=True,
|
130 |
-
temperature=0.7,
|
131 |
-
stopping_criteria=stopping_criteria,
|
132 |
-
return_full_text=False,
|
133 |
-
)
|
134 |
-
|
135 |
-
# Clean up the response
|
136 |
-
summary = response[0]["generated_text"]
|
137 |
-
print(summary)
|
138 |
-
>>> "This model is a fine-tuned version of SmolLM2-360M for generating concise, one-sentence summaries of model and dataset cards from the Hugging Face Hub."
|
139 |
-
```
|
140 |
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
base_model: HuggingFaceTB/SmolLM2-360M
|
3 |
library_name: transformers
|
4 |
+
model_name: SmolLM2-360M-tldr-sft-2025-05-28_17-48
|
5 |
tags:
|
6 |
- generated_from_trainer
|
7 |
- trl
|
8 |
- sft
|
9 |
+
licence: license
|
|
|
|
|
|
|
10 |
---
|
11 |
|
12 |
+
# Model Card for SmolLM2-360M-tldr-sft-2025-05-28_17-48
|
13 |
|
14 |
+
This model is a fine-tuned version of [HuggingFaceTB/SmolLM2-360M](https://huggingface.co/HuggingFaceTB/SmolLM2-360M).
|
15 |
+
It has been trained using [TRL](https://github.com/huggingface/trl).
|
|
|
16 |
|
17 |
+
## Quick start
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
```python
|
20 |
+
from transformers import pipeline
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
|
22 |
+
question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
|
23 |
+
generator = pipeline("text-generation", model="davanstrien/SmolLM2-360M-tldr-sft-2025-05-28_17-48", device="cuda")
|
24 |
+
output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
|
25 |
+
print(output["generated_text"])
|
|
|
|
|
|
|
26 |
```
|
27 |
|
28 |
+
## Training procedure
|
|
|
|
|
|
|
|
|
29 |
|
30 |
+
[<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/davanstrien/huggingface/runs/qf0k0s0e)
|
31 |
|
|
|
|
|
|
|
|
|
32 |
|
33 |
+
This model was trained with SFT.
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
|
35 |
+
### Framework versions
|
36 |
|
37 |
+
- TRL: 0.19.0
|
38 |
+
- Transformers: 4.52.3
|
39 |
+
- Pytorch: 2.7.0
|
40 |
+
- Datasets: 3.6.0
|
41 |
+
- Tokenizers: 0.21.1
|
42 |
|
43 |
+
## Citations
|
|
|
|
|
|
|
|
|
44 |
|
|
|
|
|
45 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
|
47 |
+
Cite TRL as:
|
48 |
+
|
49 |
+
```bibtex
|
50 |
+
@misc{vonwerra2022trl,
|
51 |
+
title = {{TRL: Transformer Reinforcement Learning}},
|
52 |
+
author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallou{\'e}dec},
|
53 |
+
year = 2020,
|
54 |
+
journal = {GitHub repository},
|
55 |
+
publisher = {GitHub},
|
56 |
+
howpublished = {\url{https://github.com/huggingface/trl}}
|
57 |
+
}
|
58 |
+
```
|
added_tokens.json
CHANGED
@@ -1,8 +1,9 @@
|
|
1 |
{
|
2 |
-
"</CARD>":
|
3 |
-
"</CARD_SUMMARY>":
|
4 |
-
"<CARD>":
|
5 |
-
"<CARD_SUMMARY>":
|
6 |
-
"<DATASET_CARD>":
|
7 |
-
"<MODEL_CARD>":
|
|
|
8 |
}
|
|
|
1 |
{
|
2 |
+
"</CARD>": 49154,
|
3 |
+
"</CARD_SUMMARY>": 49156,
|
4 |
+
"<CARD>": 49153,
|
5 |
+
"<CARD_SUMMARY>": 49155,
|
6 |
+
"<DATASET_CARD>": 49157,
|
7 |
+
"<MODEL_CARD>": 49158,
|
8 |
+
"[PAD]": 49152
|
9 |
}
|
chat_template.jinja
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{% for message in messages %}{% if message['role'] == 'user' %}<CARD>{{ message['content'] }}</CARD>{% elif message['role'] == 'assistant' %}<CARD_SUMMARY>{{ message['content'] }}</CARD_SUMMARY>{% endif %}{% endfor %}{% if not add_generation_prompt %}<|endoftext|>{% endif %}{% if add_generation_prompt %}<CARD_SUMMARY>{% endif %}
|
config.json
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "HuggingFaceTB/SmolLM2-360M",
|
3 |
"architectures": [
|
4 |
"LlamaForCausalLM"
|
5 |
],
|
@@ -26,7 +25,7 @@
|
|
26 |
"rope_theta": 100000,
|
27 |
"tie_word_embeddings": true,
|
28 |
"torch_dtype": "bfloat16",
|
29 |
-
"transformers_version": "4.
|
30 |
"use_cache": true,
|
31 |
-
"vocab_size":
|
32 |
}
|
|
|
1 |
{
|
|
|
2 |
"architectures": [
|
3 |
"LlamaForCausalLM"
|
4 |
],
|
|
|
25 |
"rope_theta": 100000,
|
26 |
"tie_word_embeddings": true,
|
27 |
"torch_dtype": "bfloat16",
|
28 |
+
"transformers_version": "4.52.3",
|
29 |
"use_cache": true,
|
30 |
+
"vocab_size": 49159
|
31 |
}
|
generation_config.json
CHANGED
@@ -2,5 +2,5 @@
|
|
2 |
"_from_model_config": true,
|
3 |
"bos_token_id": 0,
|
4 |
"eos_token_id": 0,
|
5 |
-
"transformers_version": "4.
|
6 |
}
|
|
|
2 |
"_from_model_config": true,
|
3 |
"bos_token_id": 0,
|
4 |
"eos_token_id": 0,
|
5 |
+
"transformers_version": "4.52.3"
|
6 |
}
|
model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a5ad1d90198dfce88e6ee1f90539332d12d087f3a59da4f6abdf436813ff622c
|
3 |
+
size 723688352
|
runs/May28_17-48-55_r-davanstrien-jupyterlab-svh3scip-91c05-sx22q/events.out.tfevents.1748447366.r-davanstrien-jupyterlab-svh3scip-91c05-sx22q.1156.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4e473849f0b893c6dad386796492a653251310f5d85bb1a862d98be3b90c16e7
|
3 |
+
size 18148
|
special_tokens_map.json
CHANGED
@@ -57,7 +57,13 @@
|
|
57 |
"rstrip": false,
|
58 |
"single_word": false
|
59 |
},
|
60 |
-
"pad_token":
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
"unk_token": {
|
62 |
"content": "<|endoftext|>",
|
63 |
"lstrip": false,
|
|
|
57 |
"rstrip": false,
|
58 |
"single_word": false
|
59 |
},
|
60 |
+
"pad_token": {
|
61 |
+
"content": "[PAD]",
|
62 |
+
"lstrip": false,
|
63 |
+
"normalized": false,
|
64 |
+
"rstrip": false,
|
65 |
+
"single_word": false
|
66 |
+
},
|
67 |
"unk_token": {
|
68 |
"content": "<|endoftext|>",
|
69 |
"lstrip": false,
|
tokenizer.json
CHANGED
@@ -1,11 +1,6 @@
|
|
1 |
{
|
2 |
"version": "1.0",
|
3 |
-
"truncation":
|
4 |
-
"direction": "Right",
|
5 |
-
"max_length": 8192,
|
6 |
-
"strategy": "LongestFirst",
|
7 |
-
"stride": 0
|
8 |
-
},
|
9 |
"padding": null,
|
10 |
"added_tokens": [
|
11 |
{
|
@@ -163,7 +158,7 @@
|
|
163 |
},
|
164 |
{
|
165 |
"id": 49152,
|
166 |
-
"content": "
|
167 |
"single_word": false,
|
168 |
"lstrip": false,
|
169 |
"rstrip": false,
|
@@ -172,7 +167,7 @@
|
|
172 |
},
|
173 |
{
|
174 |
"id": 49153,
|
175 |
-
"content": "
|
176 |
"single_word": false,
|
177 |
"lstrip": false,
|
178 |
"rstrip": false,
|
@@ -181,7 +176,7 @@
|
|
181 |
},
|
182 |
{
|
183 |
"id": 49154,
|
184 |
-
"content": "
|
185 |
"single_word": false,
|
186 |
"lstrip": false,
|
187 |
"rstrip": false,
|
@@ -190,7 +185,7 @@
|
|
190 |
},
|
191 |
{
|
192 |
"id": 49155,
|
193 |
-
"content": "
|
194 |
"single_word": false,
|
195 |
"lstrip": false,
|
196 |
"rstrip": false,
|
@@ -199,7 +194,7 @@
|
|
199 |
},
|
200 |
{
|
201 |
"id": 49156,
|
202 |
-
"content": "
|
203 |
"single_word": false,
|
204 |
"lstrip": false,
|
205 |
"rstrip": false,
|
@@ -208,6 +203,15 @@
|
|
208 |
},
|
209 |
{
|
210 |
"id": 49157,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
211 |
"content": "<MODEL_CARD>",
|
212 |
"single_word": false,
|
213 |
"lstrip": false,
|
|
|
1 |
{
|
2 |
"version": "1.0",
|
3 |
+
"truncation": null,
|
|
|
|
|
|
|
|
|
|
|
4 |
"padding": null,
|
5 |
"added_tokens": [
|
6 |
{
|
|
|
158 |
},
|
159 |
{
|
160 |
"id": 49152,
|
161 |
+
"content": "[PAD]",
|
162 |
"single_word": false,
|
163 |
"lstrip": false,
|
164 |
"rstrip": false,
|
|
|
167 |
},
|
168 |
{
|
169 |
"id": 49153,
|
170 |
+
"content": "<CARD>",
|
171 |
"single_word": false,
|
172 |
"lstrip": false,
|
173 |
"rstrip": false,
|
|
|
176 |
},
|
177 |
{
|
178 |
"id": 49154,
|
179 |
+
"content": "</CARD>",
|
180 |
"single_word": false,
|
181 |
"lstrip": false,
|
182 |
"rstrip": false,
|
|
|
185 |
},
|
186 |
{
|
187 |
"id": 49155,
|
188 |
+
"content": "<CARD_SUMMARY>",
|
189 |
"single_word": false,
|
190 |
"lstrip": false,
|
191 |
"rstrip": false,
|
|
|
194 |
},
|
195 |
{
|
196 |
"id": 49156,
|
197 |
+
"content": "</CARD_SUMMARY>",
|
198 |
"single_word": false,
|
199 |
"lstrip": false,
|
200 |
"rstrip": false,
|
|
|
203 |
},
|
204 |
{
|
205 |
"id": 49157,
|
206 |
+
"content": "<DATASET_CARD>",
|
207 |
+
"single_word": false,
|
208 |
+
"lstrip": false,
|
209 |
+
"rstrip": false,
|
210 |
+
"normalized": false,
|
211 |
+
"special": true
|
212 |
+
},
|
213 |
+
{
|
214 |
+
"id": 49158,
|
215 |
"content": "<MODEL_CARD>",
|
216 |
"single_word": false,
|
217 |
"lstrip": false,
|
tokenizer_config.json
CHANGED
@@ -138,7 +138,7 @@
|
|
138 |
"special": true
|
139 |
},
|
140 |
"49152": {
|
141 |
-
"content": "
|
142 |
"lstrip": false,
|
143 |
"normalized": false,
|
144 |
"rstrip": false,
|
@@ -146,7 +146,7 @@
|
|
146 |
"special": true
|
147 |
},
|
148 |
"49153": {
|
149 |
-
"content": "
|
150 |
"lstrip": false,
|
151 |
"normalized": false,
|
152 |
"rstrip": false,
|
@@ -154,7 +154,7 @@
|
|
154 |
"special": true
|
155 |
},
|
156 |
"49154": {
|
157 |
-
"content": "
|
158 |
"lstrip": false,
|
159 |
"normalized": false,
|
160 |
"rstrip": false,
|
@@ -162,7 +162,7 @@
|
|
162 |
"special": true
|
163 |
},
|
164 |
"49155": {
|
165 |
-
"content": "
|
166 |
"lstrip": false,
|
167 |
"normalized": false,
|
168 |
"rstrip": false,
|
@@ -170,7 +170,7 @@
|
|
170 |
"special": true
|
171 |
},
|
172 |
"49156": {
|
173 |
-
"content": "
|
174 |
"lstrip": false,
|
175 |
"normalized": false,
|
176 |
"rstrip": false,
|
@@ -178,6 +178,14 @@
|
|
178 |
"special": true
|
179 |
},
|
180 |
"49157": {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
181 |
"content": "<MODEL_CARD>",
|
182 |
"lstrip": false,
|
183 |
"normalized": false,
|
@@ -195,12 +203,11 @@
|
|
195 |
"<MODEL_CARD>"
|
196 |
],
|
197 |
"bos_token": "<|endoftext|>",
|
198 |
-
"chat_template": "{% for message in messages %}{% if message['role'] == 'user' %}<CARD>{{ message['content'] }}</CARD>{% elif message['role'] == 'assistant' %}<CARD_SUMMARY>{{ message['content'] }}</CARD_SUMMARY>{% endif %}{% endfor %}{% if not add_generation_prompt %}<|endoftext|>{% endif %}{% if add_generation_prompt %}<CARD_SUMMARY>{% endif %}",
|
199 |
"clean_up_tokenization_spaces": false,
|
200 |
"eos_token": "<|endoftext|>",
|
201 |
"extra_special_tokens": {},
|
202 |
"model_max_length": 8192,
|
203 |
-
"pad_token": "
|
204 |
"tokenizer_class": "GPT2Tokenizer",
|
205 |
"unk_token": "<|endoftext|>",
|
206 |
"vocab_size": 49152
|
|
|
138 |
"special": true
|
139 |
},
|
140 |
"49152": {
|
141 |
+
"content": "[PAD]",
|
142 |
"lstrip": false,
|
143 |
"normalized": false,
|
144 |
"rstrip": false,
|
|
|
146 |
"special": true
|
147 |
},
|
148 |
"49153": {
|
149 |
+
"content": "<CARD>",
|
150 |
"lstrip": false,
|
151 |
"normalized": false,
|
152 |
"rstrip": false,
|
|
|
154 |
"special": true
|
155 |
},
|
156 |
"49154": {
|
157 |
+
"content": "</CARD>",
|
158 |
"lstrip": false,
|
159 |
"normalized": false,
|
160 |
"rstrip": false,
|
|
|
162 |
"special": true
|
163 |
},
|
164 |
"49155": {
|
165 |
+
"content": "<CARD_SUMMARY>",
|
166 |
"lstrip": false,
|
167 |
"normalized": false,
|
168 |
"rstrip": false,
|
|
|
170 |
"special": true
|
171 |
},
|
172 |
"49156": {
|
173 |
+
"content": "</CARD_SUMMARY>",
|
174 |
"lstrip": false,
|
175 |
"normalized": false,
|
176 |
"rstrip": false,
|
|
|
178 |
"special": true
|
179 |
},
|
180 |
"49157": {
|
181 |
+
"content": "<DATASET_CARD>",
|
182 |
+
"lstrip": false,
|
183 |
+
"normalized": false,
|
184 |
+
"rstrip": false,
|
185 |
+
"single_word": false,
|
186 |
+
"special": true
|
187 |
+
},
|
188 |
+
"49158": {
|
189 |
"content": "<MODEL_CARD>",
|
190 |
"lstrip": false,
|
191 |
"normalized": false,
|
|
|
203 |
"<MODEL_CARD>"
|
204 |
],
|
205 |
"bos_token": "<|endoftext|>",
|
|
|
206 |
"clean_up_tokenization_spaces": false,
|
207 |
"eos_token": "<|endoftext|>",
|
208 |
"extra_special_tokens": {},
|
209 |
"model_max_length": 8192,
|
210 |
+
"pad_token": "[PAD]",
|
211 |
"tokenizer_class": "GPT2Tokenizer",
|
212 |
"unk_token": "<|endoftext|>",
|
213 |
"vocab_size": 49152
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:75bc5c4c8f5f8eed5a885964a0e2fea3ca3737607c6f9890a35ef46b516d96d9
|
3 |
+
size 6097
|