rainym00d's picture
Upload folder using huggingface_hub
c96df66 verified
from dataclasses import dataclass, field
from typing import Optional
from transformers import TrainingArguments
@dataclass
class ModelArgs:
# * base
model_name_or_path: str = field(
default="meta-llama/Llama-2-7b-chat-hf",
metadata={
"help": "Path to pretrained model or model identifier from huggingface.co/models"
},
)
super_tokenizer_name_or_path: str = field(
default="/share/ninglu_shao/code/PluginTransformer/data/outputs/90k_0104+8-longalpaca_0106/super_tokenizer",
metadata={
"help": "Path to pretrained model or model identifier from huggingface.co/models"
},
)
# * other
super_tokenizer_num_hidden_layers: int = field(
default=8,
metadata={"help": "Encoder model's layer num."},
)
is_model_frozen: bool = field(
default=True,
metadata={"help": "Freeze or not decoder model."},
)
use_flash_attention_2: bool = field(
default=True,
metadata={"help": "Use flash attention 2 or not."},
)
dtype: str = field(
default="bf16",
)
device_map: Optional[str] = field(
default=None,
)
@dataclass
class DataArgs:
# * base
dataset_list: str = field(
default="wikipedia",
metadata={"help": "Path of dataset"},
)
dataset_save_dir: str = field(
default="/share/ninglu_shao/data/PluginTransformer",
metadata={"help": "The path to save dataset."},
)
def __post_init__(self):
self.dataset_list = [dataset.strip() for dataset in self.dataset_list.split(",")]
@dataclass
class TrainingArgs(TrainingArguments):
# * base
output_dir: str = field(
default="outputs/test_4",
metadata={
"help": "The output directory where the model predictions and checkpoints will be written."
},
)
overwrite_output_dir: bool = field(
default=False,
metadata={"help": "If True, overwrite the content of the output directory."},
)
# * basic train parameter
learning_rate: float = field(
default=1e-4,
metadata={"help": "The initial learning rate for optimizer."},
)
warmup_ratio: float = field(
default=0.1,
metadata={"help": "The ratio of warmup steps for optimizer."},
)
num_train_epochs: float = field(
default=1,
metadata={"help": "Total number of training epochs to perform."},
)
per_device_train_batch_size: int = field(
default=8,
metadata={"help": "The batch size per GPU/TPU core/CPU for training."},
)
# * data parameter
dataloader_num_workers: int = field(
default=32,
metadata={"help": "Number of subprocesses to use for data loading."},
)
remove_unused_columns: bool = field(
default=False,
metadata={
"help": "Whether or not to automatically remove the columns unused by the model forward method."
},
)
# * save & log parameter
save_strategy: str = field(
default="steps",
metadata={"help": "The checkpoint save strategy to adopt during training."},
)
save_steps: int = field(
default=500,
metadata={"help": "Saving frequency according to saving strategy"},
)
save_total_limit: int = field(
default=None,
metadata={"help": "How many checkpoints to keep in the output_dir."},
)
logging_steps: int = field(
default=10,
metadata={"help": "Logging frequency according to logging strategy."},
)
# * half precesion & ddp parameter & gradient_checkpointing
fp16: bool = field(
default=False,
metadata={
"help": "Whether to use fp16 16-bit (mixed) precision training instead of 32-bit training."
},
)
bf16: bool = field(
default=True,
metadata={
"help": "Whether to use bf16 16-bit (mixed) precision training instead of 32-bit training."
},
)
# ddp_find_unused_parameters: bool = field(
# default=False,
# metadata={
# "help": "When using distributed training, the value of the flag find_unused_parameters passed to DistributedDataParallel."
# },
# )
@dataclass
class GenerationArgs:
do_sample: bool = field(
default=False,
metadata={"help": "Sample when decoding?"},
)
num_return_sequences: int = field(
default=1,
metadata={"help": "How many sequences to generate?"},
)
max_length: int = field(
default=1024,
metadata={"help": "Maximum length."},
)