lj1995 commited on
Commit
8d8416e
·
1 Parent(s): dabda30

Delete pretrained_models

Browse files
pretrained_models/chinese-hubert-base/config.json DELETED
@@ -1,72 +0,0 @@
1
- {
2
- "_name_or_path": "/data/docker/liujing04/gpt-vits/chinese-hubert-base",
3
- "activation_dropout": 0.1,
4
- "apply_spec_augment": true,
5
- "architectures": [
6
- "HubertModel"
7
- ],
8
- "attention_dropout": 0.1,
9
- "bos_token_id": 1,
10
- "classifier_proj_size": 256,
11
- "conv_bias": false,
12
- "conv_dim": [
13
- 512,
14
- 512,
15
- 512,
16
- 512,
17
- 512,
18
- 512,
19
- 512
20
- ],
21
- "conv_kernel": [
22
- 10,
23
- 3,
24
- 3,
25
- 3,
26
- 3,
27
- 2,
28
- 2
29
- ],
30
- "conv_stride": [
31
- 5,
32
- 2,
33
- 2,
34
- 2,
35
- 2,
36
- 2,
37
- 2
38
- ],
39
- "ctc_loss_reduction": "sum",
40
- "ctc_zero_infinity": false,
41
- "do_stable_layer_norm": false,
42
- "eos_token_id": 2,
43
- "feat_extract_activation": "gelu",
44
- "feat_extract_norm": "group",
45
- "feat_proj_dropout": 0.0,
46
- "feat_proj_layer_norm": true,
47
- "final_dropout": 0.1,
48
- "hidden_act": "gelu",
49
- "hidden_dropout": 0.1,
50
- "hidden_size": 768,
51
- "initializer_range": 0.02,
52
- "intermediate_size": 3072,
53
- "layer_norm_eps": 1e-05,
54
- "layerdrop": 0.1,
55
- "mask_feature_length": 10,
56
- "mask_feature_min_masks": 0,
57
- "mask_feature_prob": 0.0,
58
- "mask_time_length": 10,
59
- "mask_time_min_masks": 2,
60
- "mask_time_prob": 0.05,
61
- "model_type": "hubert",
62
- "num_attention_heads": 12,
63
- "num_conv_pos_embedding_groups": 16,
64
- "num_conv_pos_embeddings": 128,
65
- "num_feat_extract_layers": 7,
66
- "num_hidden_layers": 12,
67
- "pad_token_id": 0,
68
- "torch_dtype": "float16",
69
- "transformers_version": "4.30.2",
70
- "use_weighted_layer_sum": false,
71
- "vocab_size": 32
72
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
pretrained_models/chinese-hubert-base/preprocessor_config.json DELETED
@@ -1,9 +0,0 @@
1
- {
2
- "do_normalize": true,
3
- "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
- "feature_size": 1,
5
- "padding_side": "right",
6
- "padding_value": 0,
7
- "return_attention_mask": false,
8
- "sampling_rate": 16000
9
- }
 
 
 
 
 
 
 
 
 
 
pretrained_models/chinese-roberta-wwm-ext-large/config.json DELETED
@@ -1,34 +0,0 @@
1
- {
2
- "_name_or_path": "/data/docker/liujing04/bert-vits2/Bert-VITS2-master20231106/bert/chinese-roberta-wwm-ext-large",
3
- "architectures": [
4
- "BertForMaskedLM"
5
- ],
6
- "attention_probs_dropout_prob": 0.1,
7
- "bos_token_id": 0,
8
- "classifier_dropout": null,
9
- "directionality": "bidi",
10
- "eos_token_id": 2,
11
- "hidden_act": "gelu",
12
- "hidden_dropout_prob": 0.1,
13
- "hidden_size": 1024,
14
- "initializer_range": 0.02,
15
- "intermediate_size": 4096,
16
- "layer_norm_eps": 1e-12,
17
- "max_position_embeddings": 512,
18
- "model_type": "bert",
19
- "num_attention_heads": 16,
20
- "num_hidden_layers": 24,
21
- "output_past": true,
22
- "pad_token_id": 0,
23
- "pooler_fc_size": 768,
24
- "pooler_num_attention_heads": 12,
25
- "pooler_num_fc_layers": 3,
26
- "pooler_size_per_head": 128,
27
- "pooler_type": "first_token_transform",
28
- "position_embedding_type": "absolute",
29
- "torch_dtype": "float16",
30
- "transformers_version": "4.30.2",
31
- "type_vocab_size": 2,
32
- "use_cache": true,
33
- "vocab_size": 21128
34
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
pretrained_models/chinese-roberta-wwm-ext-large/tokenizer.json DELETED
The diff for this file is too large to render. See raw diff