File size: 4,771 Bytes
c96df66
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
from dataclasses import dataclass, field
from typing import Optional

from transformers import TrainingArguments


@dataclass
class ModelArgs:
    # * base
    model_name_or_path: str = field(
        default="meta-llama/Llama-2-7b-chat-hf",
        metadata={
            "help": "Path to pretrained model or model identifier from huggingface.co/models"
        },
    )
    super_tokenizer_name_or_path: str = field(
        default="/share/ninglu_shao/code/PluginTransformer/data/outputs/90k_0104+8-longalpaca_0106/super_tokenizer",
        metadata={
            "help": "Path to pretrained model or model identifier from huggingface.co/models"
        },
    )
    # * other
    super_tokenizer_num_hidden_layers: int = field(
        default=8,
        metadata={"help": "Encoder model's layer num."},
    )
    is_model_frozen: bool = field(
        default=True,
        metadata={"help": "Freeze or not decoder model."},
    )
    use_flash_attention_2: bool = field(
        default=True,
        metadata={"help": "Use flash attention 2 or not."},
    )
    dtype: str = field(
        default="bf16",
    )
    device_map: Optional[str] = field(
        default=None,
    )


@dataclass
class DataArgs:
    # * base
    dataset_list: str = field(
        default="wikipedia",
        metadata={"help": "Path of dataset"},
    )
    dataset_save_dir: str = field(
        default="/share/ninglu_shao/data/PluginTransformer",
        metadata={"help": "The path to save dataset."},
    )

    def __post_init__(self):
        self.dataset_list = [dataset.strip() for dataset in self.dataset_list.split(",")]


@dataclass
class TrainingArgs(TrainingArguments):
    # * base
    output_dir: str = field(
        default="outputs/test_4",
        metadata={
            "help": "The output directory where the model predictions and checkpoints will be written."
        },
    )
    overwrite_output_dir: bool = field(
        default=False,
        metadata={"help": "If True, overwrite the content of the output directory."},
    )
    # * basic train parameter
    learning_rate: float = field(
        default=1e-4,
        metadata={"help": "The initial learning rate for optimizer."},
    )
    warmup_ratio: float = field(
        default=0.1,
        metadata={"help": "The ratio of warmup steps for optimizer."},
    )
    num_train_epochs: float = field(
        default=1,
        metadata={"help": "Total number of training epochs to perform."},
    )
    per_device_train_batch_size: int = field(
        default=8,
        metadata={"help": "The batch size per GPU/TPU core/CPU for training."},
    )
    # * data parameter
    dataloader_num_workers: int = field(
        default=32,
        metadata={"help": "Number of subprocesses to use for data loading."},
    )
    remove_unused_columns: bool = field(
        default=False,
        metadata={
            "help": "Whether or not to automatically remove the columns unused by the model forward method."
        },
    )
    # * save & log parameter
    save_strategy: str = field(
        default="steps",
        metadata={"help": "The checkpoint save strategy to adopt during training."},
    )
    save_steps: int = field(
        default=500,
        metadata={"help": "Saving frequency according to saving strategy"},
    )
    save_total_limit: int = field(
        default=None,
        metadata={"help": "How many checkpoints to keep in the output_dir."},
    )
    logging_steps: int = field(
        default=10,
        metadata={"help": "Logging frequency according to logging strategy."},
    )
    # * half precesion & ddp parameter & gradient_checkpointing
    fp16: bool = field(
        default=False,
        metadata={
            "help": "Whether to use fp16 16-bit (mixed) precision training instead of 32-bit training."
        },
    )
    bf16: bool = field(
        default=True,
        metadata={
            "help": "Whether to use bf16 16-bit (mixed) precision training instead of 32-bit training."
        },
    )
    # ddp_find_unused_parameters: bool = field(
    #     default=False,
    #     metadata={
    #         "help": "When using distributed training, the value of the flag find_unused_parameters passed to DistributedDataParallel."
    #     },
    # )


@dataclass
class GenerationArgs:
    do_sample: bool = field(
        default=False,
        metadata={"help": "Sample when decoding?"},
    )
    num_return_sequences: int = field(
        default=1,
        metadata={"help": "How many sequences to generate?"},
    )
    max_length: int = field(
        default=1024,
        metadata={"help": "Maximum length."},
    )