|
from dataclasses import dataclass, field |
|
from typing import Optional |
|
|
|
from transformers import TrainingArguments |
|
|
|
|
|
@dataclass |
|
class ModelArgs: |
|
|
|
model_name_or_path: str = field( |
|
default="meta-llama/Llama-2-7b-chat-hf", |
|
metadata={ |
|
"help": "Path to pretrained model or model identifier from huggingface.co/models" |
|
}, |
|
) |
|
super_tokenizer_name_or_path: str = field( |
|
default="/share/ninglu_shao/code/PluginTransformer/data/outputs/90k_0104+8-longalpaca_0106/super_tokenizer", |
|
metadata={ |
|
"help": "Path to pretrained model or model identifier from huggingface.co/models" |
|
}, |
|
) |
|
|
|
super_tokenizer_num_hidden_layers: int = field( |
|
default=8, |
|
metadata={"help": "Encoder model's layer num."}, |
|
) |
|
is_model_frozen: bool = field( |
|
default=True, |
|
metadata={"help": "Freeze or not decoder model."}, |
|
) |
|
use_flash_attention_2: bool = field( |
|
default=True, |
|
metadata={"help": "Use flash attention 2 or not."}, |
|
) |
|
dtype: str = field( |
|
default="bf16", |
|
) |
|
device_map: Optional[str] = field( |
|
default=None, |
|
) |
|
|
|
|
|
@dataclass |
|
class DataArgs: |
|
|
|
dataset_list: str = field( |
|
default="wikipedia", |
|
metadata={"help": "Path of dataset"}, |
|
) |
|
dataset_save_dir: str = field( |
|
default="/share/ninglu_shao/data/PluginTransformer", |
|
metadata={"help": "The path to save dataset."}, |
|
) |
|
|
|
def __post_init__(self): |
|
self.dataset_list = [dataset.strip() for dataset in self.dataset_list.split(",")] |
|
|
|
|
|
@dataclass |
|
class TrainingArgs(TrainingArguments): |
|
|
|
output_dir: str = field( |
|
default="outputs/test_4", |
|
metadata={ |
|
"help": "The output directory where the model predictions and checkpoints will be written." |
|
}, |
|
) |
|
overwrite_output_dir: bool = field( |
|
default=False, |
|
metadata={"help": "If True, overwrite the content of the output directory."}, |
|
) |
|
|
|
learning_rate: float = field( |
|
default=1e-4, |
|
metadata={"help": "The initial learning rate for optimizer."}, |
|
) |
|
warmup_ratio: float = field( |
|
default=0.1, |
|
metadata={"help": "The ratio of warmup steps for optimizer."}, |
|
) |
|
num_train_epochs: float = field( |
|
default=1, |
|
metadata={"help": "Total number of training epochs to perform."}, |
|
) |
|
per_device_train_batch_size: int = field( |
|
default=8, |
|
metadata={"help": "The batch size per GPU/TPU core/CPU for training."}, |
|
) |
|
|
|
dataloader_num_workers: int = field( |
|
default=32, |
|
metadata={"help": "Number of subprocesses to use for data loading."}, |
|
) |
|
remove_unused_columns: bool = field( |
|
default=False, |
|
metadata={ |
|
"help": "Whether or not to automatically remove the columns unused by the model forward method." |
|
}, |
|
) |
|
|
|
save_strategy: str = field( |
|
default="steps", |
|
metadata={"help": "The checkpoint save strategy to adopt during training."}, |
|
) |
|
save_steps: int = field( |
|
default=500, |
|
metadata={"help": "Saving frequency according to saving strategy"}, |
|
) |
|
save_total_limit: int = field( |
|
default=None, |
|
metadata={"help": "How many checkpoints to keep in the output_dir."}, |
|
) |
|
logging_steps: int = field( |
|
default=10, |
|
metadata={"help": "Logging frequency according to logging strategy."}, |
|
) |
|
|
|
fp16: bool = field( |
|
default=False, |
|
metadata={ |
|
"help": "Whether to use fp16 16-bit (mixed) precision training instead of 32-bit training." |
|
}, |
|
) |
|
bf16: bool = field( |
|
default=True, |
|
metadata={ |
|
"help": "Whether to use bf16 16-bit (mixed) precision training instead of 32-bit training." |
|
}, |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass |
|
class GenerationArgs: |
|
do_sample: bool = field( |
|
default=False, |
|
metadata={"help": "Sample when decoding?"}, |
|
) |
|
num_return_sequences: int = field( |
|
default=1, |
|
metadata={"help": "How many sequences to generate?"}, |
|
) |
|
max_length: int = field( |
|
default=1024, |
|
metadata={"help": "Maximum length."}, |
|
) |