Upload folder using huggingface_hub
Browse files- .gitattributes +1 -0
- README.md +193 -0
- chat_template.jinja +33 -0
- config.json +105 -0
- generation_config.json +11 -0
- model.safetensors +3 -0
- preprocessor_config.json +27 -0
- special_tokens_map.json +34 -0
- tokenizer.json +3 -0
- tokenizer_config.json +220 -0
- video_preprocessor_config.json +42 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: transformers
|
3 |
+
pipeline_tag: image-text-to-text
|
4 |
+
inference: true
|
5 |
+
widget:
|
6 |
+
- text: Hello!
|
7 |
+
example_title: Hello world
|
8 |
+
group: Python
|
9 |
+
base_model:
|
10 |
+
- THUDM/GLM-4.1V-9B-Thinking
|
11 |
+
---
|
12 |
+
|
13 |
+
This tiny model is for debugging. It is randomly initialized with the config adapted from [THUDM/GLM-4.1V-9B-Thinking](https://huggingface.co/THUDM/GLM-4.1V-9B-Thinking).
|
14 |
+
|
15 |
+
### Example usage:
|
16 |
+
|
17 |
+
```python
|
18 |
+
import os
|
19 |
+
import re
|
20 |
+
|
21 |
+
import torch
|
22 |
+
|
23 |
+
from transformers import AutoProcessor, Glm4vForConditionalGeneration
|
24 |
+
|
25 |
+
model_id = "tiny-random/glm-4.1v"
|
26 |
+
messages = [
|
27 |
+
{
|
28 |
+
"role": "user",
|
29 |
+
"content": [
|
30 |
+
{
|
31 |
+
"type": "image",
|
32 |
+
"url": "https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png"
|
33 |
+
},
|
34 |
+
{
|
35 |
+
"type": "text",
|
36 |
+
"text": "describe this image"
|
37 |
+
}
|
38 |
+
],
|
39 |
+
}
|
40 |
+
]
|
41 |
+
processor = AutoProcessor.from_pretrained(model_id)
|
42 |
+
model = Glm4vForConditionalGeneration.from_pretrained(
|
43 |
+
pretrained_model_name_or_path=model_id,
|
44 |
+
torch_dtype=torch.bfloat16,
|
45 |
+
device_map="auto",
|
46 |
+
)
|
47 |
+
inputs = processor.apply_chat_template(
|
48 |
+
messages,
|
49 |
+
tokenize=True,
|
50 |
+
add_generation_prompt=True,
|
51 |
+
return_dict=True,
|
52 |
+
return_tensors="pt"
|
53 |
+
).to(model.device)
|
54 |
+
generated_ids = model.generate(**inputs, max_new_tokens=16)
|
55 |
+
output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
|
56 |
+
print(output_text)
|
57 |
+
```
|
58 |
+
|
59 |
+
### Codes to create this repo:
|
60 |
+
|
61 |
+
```python
|
62 |
+
import json
|
63 |
+
from pathlib import Path
|
64 |
+
|
65 |
+
import torch
|
66 |
+
|
67 |
+
import accelerate
|
68 |
+
from huggingface_hub import file_exists, hf_hub_download
|
69 |
+
from transformers import (
|
70 |
+
AutoConfig,
|
71 |
+
AutoModelForCausalLM,
|
72 |
+
AutoProcessor,
|
73 |
+
GenerationConfig,
|
74 |
+
set_seed,
|
75 |
+
)
|
76 |
+
from transformers import AutoProcessor, Glm4vForConditionalGeneration
|
77 |
+
|
78 |
+
source_model_id = "THUDM/GLM-4.1V-9B-Thinking"
|
79 |
+
save_folder = "/tmp/tiny-random/glm-4.1v"
|
80 |
+
|
81 |
+
processor = AutoProcessor.from_pretrained(source_model_id, trust_remote_code=True)
|
82 |
+
processor.save_pretrained(save_folder)
|
83 |
+
|
84 |
+
with open(hf_hub_download(source_model_id, filename='config.json', repo_type='model'), 'r', encoding='utf-8') as f:
|
85 |
+
config_json = json.load(f)
|
86 |
+
config_json['hidden_size'] = 64
|
87 |
+
config_json['intermediate_size'] = 128
|
88 |
+
config_json['num_attention_heads'] = 2
|
89 |
+
config_json['num_hidden_layers'] = 2
|
90 |
+
config_json['num_key_value_heads'] = 1
|
91 |
+
config_json['tie_word_embeddings'] = True
|
92 |
+
config_json['vision_config']['hidden_size'] = 64
|
93 |
+
config_json['vision_config']['depth'] = 2
|
94 |
+
config_json['vision_config']['num_heads'] = 2
|
95 |
+
config_json['vision_config']['intermediate_size'] = 128
|
96 |
+
config_json['vision_config']['out_hidden_size'] = 64
|
97 |
+
config_json['rope_scaling']['mrope_section'] = [2, 2, 4]
|
98 |
+
|
99 |
+
with open(f"{save_folder}/config.json", "w", encoding='utf-8') as f:
|
100 |
+
json.dump(config_json, f, indent=2)
|
101 |
+
|
102 |
+
config = AutoConfig.from_pretrained(
|
103 |
+
save_folder,
|
104 |
+
trust_remote_code=True,
|
105 |
+
)
|
106 |
+
print(config)
|
107 |
+
torch.set_default_dtype(torch.bfloat16)
|
108 |
+
model = Glm4vForConditionalGeneration(config)
|
109 |
+
torch.set_default_dtype(torch.float32)
|
110 |
+
if file_exists(filename="generation_config.json", repo_id=source_model_id, repo_type='model'):
|
111 |
+
model.generation_config = GenerationConfig.from_pretrained(
|
112 |
+
source_model_id, trust_remote_code=True,
|
113 |
+
)
|
114 |
+
set_seed(42)
|
115 |
+
model = model.cpu() # cpu is more stable for random initialization across machines
|
116 |
+
with torch.no_grad():
|
117 |
+
for name, p in sorted(model.named_parameters()):
|
118 |
+
torch.nn.init.normal_(p, 0, 0.2)
|
119 |
+
print(name, p.shape)
|
120 |
+
model.save_pretrained(save_folder)
|
121 |
+
print(model)
|
122 |
+
```
|
123 |
+
|
124 |
+
### Printing the model:
|
125 |
+
|
126 |
+
```text
|
127 |
+
Glm4vForConditionalGeneration(
|
128 |
+
(model): Glm4vModel(
|
129 |
+
(visual): Glm4vVisionModel(
|
130 |
+
(embeddings): Glm4vVisionEmbeddings(
|
131 |
+
(position_embedding): Embedding(576, 64)
|
132 |
+
)
|
133 |
+
(patch_embed): Glm4vVisionPatchEmbed(
|
134 |
+
(proj): Conv3d(3, 64, kernel_size=(2, 14, 14), stride=(2, 14, 14))
|
135 |
+
)
|
136 |
+
(rotary_pos_emb): Glm4vVisionRotaryEmbedding()
|
137 |
+
(blocks): ModuleList(
|
138 |
+
(0-1): 2 x Glm4vVisionBlock(
|
139 |
+
(norm1): Glm4vRMSNorm((64,), eps=1e-05)
|
140 |
+
(norm2): Glm4vRMSNorm((64,), eps=1e-05)
|
141 |
+
(attn): Glm4vVisionAttention(
|
142 |
+
(qkv): Linear(in_features=64, out_features=192, bias=False)
|
143 |
+
(proj): Linear(in_features=64, out_features=64, bias=False)
|
144 |
+
)
|
145 |
+
(mlp): Glm4VisionMlp(
|
146 |
+
(gate_proj): Linear(in_features=64, out_features=64, bias=False)
|
147 |
+
(up_proj): Linear(in_features=64, out_features=64, bias=False)
|
148 |
+
(down_proj): Linear(in_features=64, out_features=64, bias=False)
|
149 |
+
(act_fn): SiLU()
|
150 |
+
)
|
151 |
+
)
|
152 |
+
)
|
153 |
+
(merger): Glm4vVisionPatchMerger(
|
154 |
+
(proj): Linear(in_features=64, out_features=64, bias=False)
|
155 |
+
(post_projection_norm): LayerNorm((64,), eps=1e-05, elementwise_affine=True)
|
156 |
+
(gate_proj): Linear(in_features=64, out_features=128, bias=False)
|
157 |
+
(up_proj): Linear(in_features=64, out_features=128, bias=False)
|
158 |
+
(down_proj): Linear(in_features=128, out_features=64, bias=False)
|
159 |
+
(act1): GELU(approximate='none')
|
160 |
+
(act_fn): SiLU()
|
161 |
+
)
|
162 |
+
(post_conv_layernorm): Glm4vRMSNorm((64,), eps=1e-05)
|
163 |
+
(downsample): Conv2d(64, 64, kernel_size=(2, 2), stride=(2, 2))
|
164 |
+
(post_layernorm): Glm4vRMSNorm((64,), eps=1e-05)
|
165 |
+
)
|
166 |
+
(language_model): Glm4vTextModel(
|
167 |
+
(embed_tokens): Embedding(151552, 64, padding_idx=151329)
|
168 |
+
(layers): ModuleList(
|
169 |
+
(0-1): 2 x Glm4vTextDecoderLayer(
|
170 |
+
(self_attn): Glm4vTextAttention(
|
171 |
+
(q_proj): Linear(in_features=64, out_features=64, bias=True)
|
172 |
+
(k_proj): Linear(in_features=64, out_features=32, bias=True)
|
173 |
+
(v_proj): Linear(in_features=64, out_features=32, bias=True)
|
174 |
+
(o_proj): Linear(in_features=64, out_features=64, bias=False)
|
175 |
+
)
|
176 |
+
(mlp): Glm4vTextMLP(
|
177 |
+
(gate_up_proj): Linear(in_features=64, out_features=256, bias=False)
|
178 |
+
(down_proj): Linear(in_features=128, out_features=64, bias=False)
|
179 |
+
(activation_fn): SiLU()
|
180 |
+
)
|
181 |
+
(input_layernorm): Glm4vRMSNorm((64,), eps=1e-05)
|
182 |
+
(post_attention_layernorm): Glm4vRMSNorm((64,), eps=1e-05)
|
183 |
+
(post_self_attn_layernorm): Glm4vRMSNorm((64,), eps=1e-05)
|
184 |
+
(post_mlp_layernorm): Glm4vRMSNorm((64,), eps=1e-05)
|
185 |
+
)
|
186 |
+
)
|
187 |
+
(norm): Glm4vRMSNorm((64,), eps=1e-05)
|
188 |
+
(rotary_emb): Glm4vTextRotaryEmbedding()
|
189 |
+
)
|
190 |
+
)
|
191 |
+
(lm_head): Linear(in_features=64, out_features=151552, bias=False)
|
192 |
+
)
|
193 |
+
```
|
chat_template.jinja
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[gMASK]<sop>
|
2 |
+
{%- for msg in messages %}
|
3 |
+
{%- if msg.role == 'system' %}
|
4 |
+
<|system|>
|
5 |
+
{{ msg.content }}
|
6 |
+
{%- elif msg.role == 'user' %}
|
7 |
+
<|user|>{{ '\n' }}
|
8 |
+
|
9 |
+
{%- if msg.content is string %}
|
10 |
+
{{ msg.content }}
|
11 |
+
{%- else %}
|
12 |
+
{%- for item in msg.content %}
|
13 |
+
{%- if item.type == 'video' or 'video' in item %}
|
14 |
+
<|begin_of_video|><|video|><|end_of_video|>
|
15 |
+
{%- elif item.type == 'image' or 'image' in item %}
|
16 |
+
<|begin_of_image|><|image|><|end_of_image|>
|
17 |
+
{%- elif item.type == 'text' %}
|
18 |
+
{{ item.text }}
|
19 |
+
{%- endif %}
|
20 |
+
{%- endfor %}
|
21 |
+
{%- endif %}
|
22 |
+
{%- elif msg.role == 'assistant' %}
|
23 |
+
{%- if msg.metadata %}
|
24 |
+
<|assistant|>{{ msg.metadata }}
|
25 |
+
{{ msg.content }}
|
26 |
+
{%- else %}
|
27 |
+
<|assistant|>
|
28 |
+
{{ msg.content }}
|
29 |
+
{%- endif %}
|
30 |
+
{%- endif %}
|
31 |
+
{%- endfor %}
|
32 |
+
{% if add_generation_prompt %}<|assistant|>
|
33 |
+
{% endif %}
|
config.json
ADDED
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"Glm4vForConditionalGeneration"
|
4 |
+
],
|
5 |
+
"attention_bias": true,
|
6 |
+
"attention_dropout": 0.0,
|
7 |
+
"eos_token_id": [
|
8 |
+
151329,
|
9 |
+
151336,
|
10 |
+
151338,
|
11 |
+
151348
|
12 |
+
],
|
13 |
+
"hidden_act": "silu",
|
14 |
+
"hidden_size": 64,
|
15 |
+
"image_end_token_id": 151340,
|
16 |
+
"image_start_token_id": 151339,
|
17 |
+
"image_token_id": 151343,
|
18 |
+
"initializer_range": 0.02,
|
19 |
+
"intermediate_size": 128,
|
20 |
+
"max_position_embeddings": 65536,
|
21 |
+
"model_type": "glm4v",
|
22 |
+
"num_attention_heads": 2,
|
23 |
+
"num_hidden_layers": 2,
|
24 |
+
"num_key_value_heads": 1,
|
25 |
+
"pad_token_id": 151329,
|
26 |
+
"partial_rotary_factor": 0.5,
|
27 |
+
"rms_norm_eps": 1e-05,
|
28 |
+
"rope_scaling": {
|
29 |
+
"mrope_section": [
|
30 |
+
2,
|
31 |
+
2,
|
32 |
+
4
|
33 |
+
],
|
34 |
+
"rope_type": "default",
|
35 |
+
"type": "default"
|
36 |
+
},
|
37 |
+
"rope_theta": 10000.0,
|
38 |
+
"text_config": {
|
39 |
+
"architectures": [
|
40 |
+
"Glm4vForConditionalGeneration"
|
41 |
+
],
|
42 |
+
"attention_bias": true,
|
43 |
+
"attention_dropout": 0.0,
|
44 |
+
"eos_token_id": [
|
45 |
+
151329,
|
46 |
+
151336,
|
47 |
+
151338,
|
48 |
+
151348
|
49 |
+
],
|
50 |
+
"hidden_act": "silu",
|
51 |
+
"hidden_size": 64,
|
52 |
+
"image_token_id": null,
|
53 |
+
"initializer_range": 0.02,
|
54 |
+
"intermediate_size": 128,
|
55 |
+
"max_position_embeddings": 65536,
|
56 |
+
"model_type": "glm4v_text",
|
57 |
+
"num_attention_heads": 2,
|
58 |
+
"num_hidden_layers": 2,
|
59 |
+
"num_key_value_heads": 1,
|
60 |
+
"pad_token_id": 151329,
|
61 |
+
"partial_rotary_factor": 0.5,
|
62 |
+
"rms_norm_eps": 1e-05,
|
63 |
+
"rope_scaling": {
|
64 |
+
"mrope_section": [
|
65 |
+
2,
|
66 |
+
2,
|
67 |
+
4
|
68 |
+
],
|
69 |
+
"rope_type": "default",
|
70 |
+
"type": "default"
|
71 |
+
},
|
72 |
+
"rope_theta": 10000.0,
|
73 |
+
"tie_word_embeddings": true,
|
74 |
+
"torch_dtype": "bfloat16",
|
75 |
+
"use_cache": true,
|
76 |
+
"video_token_id": null,
|
77 |
+
"vocab_size": 151552
|
78 |
+
},
|
79 |
+
"torch_dtype": "bfloat16",
|
80 |
+
"transformers_version": "4.54.0.dev0",
|
81 |
+
"use_cache": true,
|
82 |
+
"video_end_token_id": 151342,
|
83 |
+
"video_start_token_id": 151341,
|
84 |
+
"video_token_id": 151344,
|
85 |
+
"vision_config": {
|
86 |
+
"attention_bias": false,
|
87 |
+
"attention_dropout": 0.0,
|
88 |
+
"depth": 2,
|
89 |
+
"hidden_act": "silu",
|
90 |
+
"hidden_dropout_prob": 0.0,
|
91 |
+
"hidden_size": 64,
|
92 |
+
"image_size": 336,
|
93 |
+
"in_channels": 3,
|
94 |
+
"initializer_range": 0.02,
|
95 |
+
"intermediate_size": 128,
|
96 |
+
"model_type": "glm4v",
|
97 |
+
"num_heads": 2,
|
98 |
+
"out_hidden_size": 64,
|
99 |
+
"patch_size": 14,
|
100 |
+
"rms_norm_eps": 1e-05,
|
101 |
+
"spatial_merge_size": 2,
|
102 |
+
"temporal_patch_size": 2
|
103 |
+
},
|
104 |
+
"vocab_size": 151552
|
105 |
+
}
|
generation_config.json
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"eos_token_id": [
|
4 |
+
151329,
|
5 |
+
151336,
|
6 |
+
151338,
|
7 |
+
151348
|
8 |
+
],
|
9 |
+
"pad_token_id": 151329,
|
10 |
+
"transformers_version": "4.54.0.dev0"
|
11 |
+
}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a51b22dc53e1de592646c2a898839abada0c547804444a118dac2508ec322d5c
|
3 |
+
size 19984480
|
preprocessor_config.json
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"do_convert_rgb": true,
|
3 |
+
"do_normalize": true,
|
4 |
+
"do_rescale": true,
|
5 |
+
"do_resize": true,
|
6 |
+
"image_mean": [
|
7 |
+
0.48145466,
|
8 |
+
0.4578275,
|
9 |
+
0.40821073
|
10 |
+
],
|
11 |
+
"image_processor_type": "Glm4vImageProcessor",
|
12 |
+
"image_std": [
|
13 |
+
0.26862954,
|
14 |
+
0.26130258,
|
15 |
+
0.27577711
|
16 |
+
],
|
17 |
+
"merge_size": 2,
|
18 |
+
"patch_size": 14,
|
19 |
+
"processor_class": "Glm4vProcessor",
|
20 |
+
"resample": 3,
|
21 |
+
"rescale_factor": 0.00392156862745098,
|
22 |
+
"size": {
|
23 |
+
"longest_edge": 11760000,
|
24 |
+
"shortest_edge": 12544
|
25 |
+
},
|
26 |
+
"temporal_patch_size": 2
|
27 |
+
}
|
special_tokens_map.json
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"additional_special_tokens": [
|
3 |
+
"<|endoftext|>",
|
4 |
+
"[MASK]",
|
5 |
+
"[gMASK]",
|
6 |
+
"[sMASK]",
|
7 |
+
"<sop>",
|
8 |
+
"<eop>",
|
9 |
+
"<|system|>",
|
10 |
+
"<|user|>",
|
11 |
+
"<|assistant|>",
|
12 |
+
"<|observation|>",
|
13 |
+
"<|begin_of_image|>",
|
14 |
+
"<|end_of_image|>",
|
15 |
+
"<|begin_of_video|>",
|
16 |
+
"<|end_of_video|>",
|
17 |
+
"<|image|>",
|
18 |
+
"<|video|>"
|
19 |
+
],
|
20 |
+
"eos_token": {
|
21 |
+
"content": "<|endoftext|>",
|
22 |
+
"lstrip": false,
|
23 |
+
"normalized": false,
|
24 |
+
"rstrip": false,
|
25 |
+
"single_word": false
|
26 |
+
},
|
27 |
+
"pad_token": {
|
28 |
+
"content": "<|endoftext|>",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false
|
33 |
+
}
|
34 |
+
}
|
tokenizer.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bcc742fc44db1f0870d7320b495e6240cc40e202565fc96786220fa0d9ddb41c
|
3 |
+
size 19968183
|
tokenizer_config.json
ADDED
@@ -0,0 +1,220 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"151329": {
|
4 |
+
"content": "<|endoftext|>",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"151330": {
|
12 |
+
"content": "[MASK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"151331": {
|
20 |
+
"content": "[gMASK]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"151332": {
|
28 |
+
"content": "[sMASK]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"151333": {
|
36 |
+
"content": "<sop>",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
},
|
43 |
+
"151334": {
|
44 |
+
"content": "<eop>",
|
45 |
+
"lstrip": false,
|
46 |
+
"normalized": false,
|
47 |
+
"rstrip": false,
|
48 |
+
"single_word": false,
|
49 |
+
"special": true
|
50 |
+
},
|
51 |
+
"151335": {
|
52 |
+
"content": "<|system|>",
|
53 |
+
"lstrip": false,
|
54 |
+
"normalized": false,
|
55 |
+
"rstrip": false,
|
56 |
+
"single_word": false,
|
57 |
+
"special": true
|
58 |
+
},
|
59 |
+
"151336": {
|
60 |
+
"content": "<|user|>",
|
61 |
+
"lstrip": false,
|
62 |
+
"normalized": false,
|
63 |
+
"rstrip": false,
|
64 |
+
"single_word": false,
|
65 |
+
"special": true
|
66 |
+
},
|
67 |
+
"151337": {
|
68 |
+
"content": "<|assistant|>",
|
69 |
+
"lstrip": false,
|
70 |
+
"normalized": false,
|
71 |
+
"rstrip": false,
|
72 |
+
"single_word": false,
|
73 |
+
"special": true
|
74 |
+
},
|
75 |
+
"151338": {
|
76 |
+
"content": "<|observation|>",
|
77 |
+
"lstrip": false,
|
78 |
+
"normalized": false,
|
79 |
+
"rstrip": false,
|
80 |
+
"single_word": false,
|
81 |
+
"special": true
|
82 |
+
},
|
83 |
+
"151339": {
|
84 |
+
"content": "<|begin_of_image|>",
|
85 |
+
"lstrip": false,
|
86 |
+
"normalized": false,
|
87 |
+
"rstrip": false,
|
88 |
+
"single_word": false,
|
89 |
+
"special": true
|
90 |
+
},
|
91 |
+
"151340": {
|
92 |
+
"content": "<|end_of_image|>",
|
93 |
+
"lstrip": false,
|
94 |
+
"normalized": false,
|
95 |
+
"rstrip": false,
|
96 |
+
"single_word": false,
|
97 |
+
"special": true
|
98 |
+
},
|
99 |
+
"151341": {
|
100 |
+
"content": "<|begin_of_video|>",
|
101 |
+
"lstrip": false,
|
102 |
+
"normalized": false,
|
103 |
+
"rstrip": false,
|
104 |
+
"single_word": false,
|
105 |
+
"special": true
|
106 |
+
},
|
107 |
+
"151342": {
|
108 |
+
"content": "<|end_of_video|>",
|
109 |
+
"lstrip": false,
|
110 |
+
"normalized": false,
|
111 |
+
"rstrip": false,
|
112 |
+
"single_word": false,
|
113 |
+
"special": true
|
114 |
+
},
|
115 |
+
"151343": {
|
116 |
+
"content": "<|image|>",
|
117 |
+
"lstrip": false,
|
118 |
+
"normalized": false,
|
119 |
+
"rstrip": false,
|
120 |
+
"single_word": false,
|
121 |
+
"special": true
|
122 |
+
},
|
123 |
+
"151344": {
|
124 |
+
"content": "<|video|>",
|
125 |
+
"lstrip": false,
|
126 |
+
"normalized": false,
|
127 |
+
"rstrip": false,
|
128 |
+
"single_word": false,
|
129 |
+
"special": true
|
130 |
+
},
|
131 |
+
"151345": {
|
132 |
+
"content": "<think>",
|
133 |
+
"lstrip": false,
|
134 |
+
"normalized": false,
|
135 |
+
"rstrip": false,
|
136 |
+
"single_word": false,
|
137 |
+
"special": false
|
138 |
+
},
|
139 |
+
"151346": {
|
140 |
+
"content": "</think>",
|
141 |
+
"lstrip": false,
|
142 |
+
"normalized": false,
|
143 |
+
"rstrip": false,
|
144 |
+
"single_word": false,
|
145 |
+
"special": false
|
146 |
+
},
|
147 |
+
"151347": {
|
148 |
+
"content": "<answer>",
|
149 |
+
"lstrip": false,
|
150 |
+
"normalized": false,
|
151 |
+
"rstrip": false,
|
152 |
+
"single_word": false,
|
153 |
+
"special": false
|
154 |
+
},
|
155 |
+
"151348": {
|
156 |
+
"content": "</answer>",
|
157 |
+
"lstrip": false,
|
158 |
+
"normalized": false,
|
159 |
+
"rstrip": false,
|
160 |
+
"single_word": false,
|
161 |
+
"special": false
|
162 |
+
},
|
163 |
+
"151349": {
|
164 |
+
"content": "<|begin_of_box|>",
|
165 |
+
"lstrip": false,
|
166 |
+
"normalized": false,
|
167 |
+
"rstrip": false,
|
168 |
+
"single_word": false,
|
169 |
+
"special": false
|
170 |
+
},
|
171 |
+
"151350": {
|
172 |
+
"content": "<|end_of_box|>",
|
173 |
+
"lstrip": false,
|
174 |
+
"normalized": false,
|
175 |
+
"rstrip": false,
|
176 |
+
"single_word": false,
|
177 |
+
"special": false
|
178 |
+
},
|
179 |
+
"151351": {
|
180 |
+
"content": "<|sep|>",
|
181 |
+
"lstrip": false,
|
182 |
+
"normalized": false,
|
183 |
+
"rstrip": false,
|
184 |
+
"single_word": false,
|
185 |
+
"special": false
|
186 |
+
}
|
187 |
+
},
|
188 |
+
"additional_special_tokens": [
|
189 |
+
"<|endoftext|>",
|
190 |
+
"[MASK]",
|
191 |
+
"[gMASK]",
|
192 |
+
"[sMASK]",
|
193 |
+
"<sop>",
|
194 |
+
"<eop>",
|
195 |
+
"<|system|>",
|
196 |
+
"<|user|>",
|
197 |
+
"<|assistant|>",
|
198 |
+
"<|observation|>",
|
199 |
+
"<|begin_of_image|>",
|
200 |
+
"<|end_of_image|>",
|
201 |
+
"<|begin_of_video|>",
|
202 |
+
"<|end_of_video|>",
|
203 |
+
"<|image|>",
|
204 |
+
"<|video|>"
|
205 |
+
],
|
206 |
+
"clean_up_tokenization_spaces": false,
|
207 |
+
"do_lower_case": false,
|
208 |
+
"eos_token": "<|endoftext|>",
|
209 |
+
"extra_special_tokens": {},
|
210 |
+
"model_input_names": [
|
211 |
+
"input_ids",
|
212 |
+
"attention_mask"
|
213 |
+
],
|
214 |
+
"model_max_length": 65536,
|
215 |
+
"pad_token": "<|endoftext|>",
|
216 |
+
"padding_side": "left",
|
217 |
+
"processor_class": "Glm4vProcessor",
|
218 |
+
"remove_space": false,
|
219 |
+
"tokenizer_class": "PreTrainedTokenizerFast"
|
220 |
+
}
|
video_preprocessor_config.json
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"crop_size": null,
|
3 |
+
"data_format": "channels_first",
|
4 |
+
"default_to_square": true,
|
5 |
+
"device": null,
|
6 |
+
"do_center_crop": null,
|
7 |
+
"do_convert_rgb": true,
|
8 |
+
"do_normalize": true,
|
9 |
+
"do_pad": null,
|
10 |
+
"do_rescale": true,
|
11 |
+
"do_resize": true,
|
12 |
+
"do_sample_frames": true,
|
13 |
+
"fps": 2,
|
14 |
+
"image_mean": [
|
15 |
+
0.48145466,
|
16 |
+
0.4578275,
|
17 |
+
0.40821073
|
18 |
+
],
|
19 |
+
"image_std": [
|
20 |
+
0.26862954,
|
21 |
+
0.26130258,
|
22 |
+
0.27577711
|
23 |
+
],
|
24 |
+
"input_data_format": null,
|
25 |
+
"max_image_size": {
|
26 |
+
"longest_edge": 47040000
|
27 |
+
},
|
28 |
+
"merge_size": 2,
|
29 |
+
"num_frames": 16,
|
30 |
+
"patch_size": 14,
|
31 |
+
"processor_class": "Glm4vProcessor",
|
32 |
+
"resample": 3,
|
33 |
+
"rescale_factor": 0.00392156862745098,
|
34 |
+
"size": {
|
35 |
+
"longest_edge": 47040000,
|
36 |
+
"shortest_edge": 12544
|
37 |
+
},
|
38 |
+
"size_divisor": null,
|
39 |
+
"temporal_patch_size": 2,
|
40 |
+
"video_metadata": null,
|
41 |
+
"video_processor_type": "Glm4vVideoProcessor"
|
42 |
+
}
|