peacock-data-public-datasets-idc-config_toyds
/
bigscience
/train
/tr14-mup
/tr14-2B7-mup-cluster.slurm
#!/bin/bash | |
#SBATCH --job-name=tr14-2B7-mup | |
#SBATCH --partition=production-cluster | |
#SBATCH --nodes=8 | |
#SBATCH --cpus-per-task=12 | |
#SBATCH --ntasks-per-node=1 | |
#SBATCH --gres=gpu:a100:8 | |
#SBATCH --hint=nomultithread | |
#SBATCH --time 100:00:00 | |
#SBATCH --output=/fsx/teven/mup/tr14-2B7-%j.out | |
#SBATCH --exclude=ip-26-0-159-215,ip-26-0-153-238 | |
echo "START TIME: $(date)" | |
mkdir -p $LOGS_PATH | |
# >>> conda initialize >>> | |
# !! Contents within this block are managed by 'conda init' !! | |
__conda_setup="$('/admin/home/teven/miniconda3/bin/conda' 'shell.bash' 'hook' 2> /dev/null)" | |
if [ $? -eq 0 ]; then | |
eval "$__conda_setup" | |
else | |
if [ -f "/admin/home/teven/miniconda3/etc/profile.d/conda.sh" ]; then | |
. "/admin/home/teven/miniconda3/etc/profile.d/conda.sh" | |
else | |
export PATH="/admin/home/teven/miniconda3/bin:$PATH" | |
fi | |
fi | |
unset __conda_setup | |
# <<< conda initialize <<< | |
# Proper env variables | |
conda activate tvn_dev | |
export PATH=/usr/local/cuda-11.4/bin:$PATH | |
export NCCL_PROTO=simple | |
export PATH=/opt/amazon/efa/bin:$PATH | |
export FI_EFA_FORK_SAFE=1 | |
export FI_LOG_LEVEL=1 | |
export FI_EFA_USE_DEVICE_RDMA=1 # use for p4dn | |
#export NCCL_ALGO=ring | |
#export NCCL_DEBUG=info | |
#export NCCL_DEBUG_SUBSYS=INIT,ENV,GRAPH,COLL | |
export PYTHONFAULTHANDLER=1 | |
export CUDA_LAUNCH_BLOCKING=0 | |
export OMPI_MCA_mtl_base_verbose=1 | |
export FI_EFA_ENABLE_SHM_TRANSFER=0 | |
export FI_PROVIDER=efa | |
export FI_EFA_TX_MIN_CREDITS=64 | |
export NCCL_TREE_THRESHOLD=0 | |
#export TORCH_CPP_LOG_LEVEL=INFO | |
#export TORCH_DISTRIBUTED_DEBUG=INFO | |
export NCCL_ASYNC_ERROR_HANDLING=1 | |
#export NCCL_P2P_DISABLE=1 | |
#export NCCL_IBEXT_DISABLE=1 | |
#export NCCL_SOCKET_IFNAME="eth0,en,eth,em,bond" | |
# testing for potential faulty nodes | |
srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"' | |
# so processes know who to talk to | |
export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1) | |
export MASTER_PORT=12802 | |
MEGATRON_DEEPSPEED_REPO=/fsx/teven/Megatron-DeepSpeed | |
cd $MEGATRON_DEEPSPEED_REPO | |
TOKENIZER_NAME_OR_PATH=t5-small | |
variant=main | |
DATA_PATH=/fsx/data/gpt2tok_c4_text_document | |
DATA_OUTPUT_PATH=/fsx/mup_exps/checkpoints/tr14-2B7-lr$1-init0.1-inpm10-outm10-atnm10-mup | |
CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant | |
REPO_PATH=$DATA_OUTPUT_PATH/tr14-2B7-test-lr$1-init0.1-inpm10-outm10-atnm10-mup | |
TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant | |
LOGS_PATH=$REPO_PATH/logs/$variant | |
GPUS_PER_NODE=8 | |
NNODES=$SLURM_NNODES | |
PP_SIZE=1 | |
TP_SIZE=2 | |
MICRO_BATCH_SIZE=16 | |
GLOBAL_BATCH_SIZE=512 | |
NLAYERS=32 | |
NHIDDEN=2560 | |
NHEADS=32 | |
SEQ_LEN=2048 | |
SAVE_INTERVAL=250 | |
TRAIN_SAMPLES=1_953_125 # 50B tokens | |
LR_DECAY_SAMPLES=1_953_125 # Decay in the same amount | |
LR_WARMUP_SAMPLES=183_105 # 375M tokens | |
MUP_ARGS=" \ | |
--lr $1 \ | |
--min-lr `bc <<< "scale=3; $1/10"` \ | |
--init-method-std 0.1 \ | |
--mup \ | |
--mup-input-mult 10 \ | |
--mup-output-mult 10 \ | |
--mup-attn-mult 10 \ | |
" | |
OPTIMIZER_ARGS=" \ | |
--optimizer adam \ | |
--adam-beta1 0.9 \ | |
--adam-beta2 0.95 \ | |
--adam-eps 1e-8 \ | |
--lr-decay-style cosine \ | |
--lr-decay-samples $LR_DECAY_SAMPLES \ | |
--lr-warmup-samples $LR_WARMUP_SAMPLES \ | |
--clip-grad 1.0 \ | |
--weight-decay 1e-1 \ | |
" | |
# for 20h 1190, for 100h 5990 | |
EXIT_OPTS=" \ | |
--exit-duration-in-mins 1190 \ | |
" | |
GPT_ARGS=" \ | |
--pp-partition-method 'type:transformer' \ | |
--num-layers $NLAYERS \ | |
--hidden-size $NHIDDEN \ | |
--num-attention-heads $NHEADS \ | |
--seq-length $SEQ_LEN \ | |
--max-position-embeddings $SEQ_LEN \ | |
--micro-batch-size $MICRO_BATCH_SIZE \ | |
--global-batch-size $GLOBAL_BATCH_SIZE \ | |
--train-samples $TRAIN_SAMPLES \ | |
--tokenizer-type PretrainedFromHF \ | |
--tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \ | |
--embed-layernorm \ | |
--fp16 \ | |
--seed 42 \ | |
--position-embedding-type alibi \ | |
--checkpoint-activations \ | |
--abort-on-unmet-fused-kernel-constraints \ | |
--pad-vocab-size-to 51200 \ | |
$OPTIMIZER_ARGS \ | |
$EXIT_OPTS \ | |
" | |
# TODO: decide on efficient eval-interval + eval-iters | |
OUTPUT_ARGS=" \ | |
--log-interval 1 \ | |
--save-interval $SAVE_INTERVAL \ | |
--eval-interval 1000 \ | |
--eval-iters 1 \ | |
--tensorboard-dir $TENSORBOARD_PATH \ | |
--tensorboard-queue-size 5 \ | |
--log-timers-to-tensorboard \ | |
--log-batch-size-to-tensorboard \ | |
--log-validation-ppl-to-tensorboard \ | |
" | |
ZERO_STAGE=1 | |
config_json="./ds_config.$SLURM_JOBID.json" | |
# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() | |
cat <<EOT > $config_json | |
{ | |
"train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, | |
"train_batch_size": $GLOBAL_BATCH_SIZE, | |
"gradient_clipping": 1.0, | |
"zero_optimization": { | |
"stage": $ZERO_STAGE | |
}, | |
"fp16": { | |
"enabled": true, | |
"loss_scale": 0, | |
"loss_scale_window": 500, | |
"hysteresis": 2, | |
"min_loss_scale": 1, | |
"initial_scale_power": 12 | |
}, | |
"steps_per_print": 2000, | |
"wall_clock_breakdown": false | |
} | |
EOT | |
DEEPSPEED_ARGS=" \ | |
--deepspeed \ | |
--deepspeed_config ${config_json} \ | |
--zero-stage ${ZERO_STAGE} \ | |
--deepspeed-activation-checkpointing \ | |
" | |
export LAUNCHER="python -u -m torch.distributed.run \ | |
--nproc_per_node $GPUS_PER_NODE \ | |
--nnodes $NNODES \ | |
--rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \ | |
--rdzv_backend c10d \ | |
--max_restarts 0 \ | |
--tee 3 \ | |
" | |
export CMD=" \ | |
`pwd`/pretrain_gpt.py \ | |
--tensor-model-parallel-size $TP_SIZE \ | |
--pipeline-model-parallel-size $PP_SIZE \ | |
$GPT_ARGS \ | |
$OUTPUT_ARGS \ | |
$MUP_ARGS \ | |
--save $CHECKPOINT_PATH \ | |
--load $CHECKPOINT_PATH \ | |
--data-path $DATA_PATH \ | |
--data-impl mmap \ | |
--distributed-backend nccl \ | |
$DEEPSPEED_ARGS \ | |
" | |
echo $CMD | |
# do not remove or the training will hang and nodes will be lost w/o this workaround | |
export CUDA_LAUNCH_BLOCKING=1 | |
# hide duplicated errors using this hack - will be properly fixed in pt-1.12 | |
export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json | |
clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt | |
echo "END TIME: $(date)" | |