Tentative update to use transformers pipeline

#1
Files changed (1) hide show
  1. config.json +26 -20
config.json CHANGED
@@ -1,20 +1,26 @@
1
- {
2
- "_name_or_path": "./models/thai_transformer_rope",
3
- "architectures": [
4
- "ThaiTransformerModel"
5
- ],
6
- "block_size": 2048,
7
- "confidence_threshold": 0.7,
8
- "conv_kernel_size": 7,
9
- "dropout": 0.1,
10
- "dtype": "float32",
11
- "max_position_embeddings": 2048,
12
- "max_reasoning_effort": 1.0,
13
- "model_type": "thai_transformer",
14
- "n_embd": 384,
15
- "n_head": 6,
16
- "n_layer": 6,
17
- "torch_dtype": "float32",
18
- "transformers_version": "4.36.0",
19
- "vocab_size": 44216
20
- }
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./models/thai_transformer_rope",
3
+ "architectures": [
4
+ "ThaiTransformerModel"
5
+ ],
6
+ "auto_map": {
7
+ "AutoConfig": "hf_model.ThaiTransformerConfig",
8
+ "AutoModelForCausalLM": "hf_model.ThaiTransformerModel"
9
+ },
10
+ "block_size": 2048,
11
+ "confidence_threshold": 0.7,
12
+ "conv_kernel_size": 7,
13
+ "dropout": 0.1,
14
+ "dtype": "float32",
15
+ "max_position_embeddings": 2048,
16
+ "max_reasoning_effort": 1.0,
17
+ "model_type": "thai_transformer",
18
+ "n_embd": 384,
19
+ "n_head": 6,
20
+ "n_layer": 6,
21
+ "num_hidden_layers": 6,
22
+ "tokenizer_class": "PreTrainedTokenizerFast",
23
+ "torch_dtype": "float32",
24
+ "transformers_version": "4.36.0",
25
+ "vocab_size": 44216
26
+ }