#!/bin/bash | |
#SBATCH --job-name=benchmarking | |
#SBATCH --nodes=1 | |
#SBATCH --qos=normal | |
# set 2h for job wall time limit | |
#SBATCH --time=02:00:00 | |
# activate the requeue option | |
#SBATCH --requeue | |
#SBATCH --ntasks-per-node=1 | |
#SBATCH --cpus-per-task=12 | |
#SBATCH --gres=gpu:1 | |
#SBATCH --mem-per-cpu=10G | |
#SBATCH --partition=production-cluster | |
#SBATCH -o /fsx/sayak/logs/benchmarking-job/sdxl-fast-%x-%j.out | |
set -x -e | |
source /admin/home/sayak/.bashrc | |
source /admin/home/sayak/miniconda3/etc/profile.d/conda.sh | |
conda activate /fsx/sayak | |
echo "START TIME: $(date)" | |
REPO=/fsx/sayak/sdxl-fast | |
OUTPUT_DIR=/fsx/sayak/benchmarking | |
LOG_PATH=$OUTPUT_DIR/main_log.txt | |
mkdir -p $OUTPUT_DIR | |
touch $LOG_PATH | |
pushd $REPO | |
PROGRAM="run_specific.sh" | |
# Note: it is important to escape `$SLURM_PROCID` since we want the srun on each node to evaluate this variable | |
export LAUNCHER="CUDA_VISIBLE_DEVICES=0 bash" | |
export CMD="$LAUNCHER $PROGRAM" | |
echo $CMD | |
# AWS specific | |
export NCCL_PROTO=simple | |
export RDMAV_FORK_SAFE=1 | |
export FI_EFA_FORK_SAFE=1 | |
export FI_EFA_USE_DEVICE_RDMA=1 | |
export FI_PROVIDER=efa | |
export FI_LOG_LEVEL=1 | |
export NCCL_IB_DISABLE=1 | |
export NCCL_SOCKET_IFNAME=ens | |
# srun error handling: | |
# --wait=60: wait 60 sec after the first task terminates before terminating all remaining tasks | |
# --kill-on-bad-exit=1: terminate a step if any task exits with a non-zero exit code | |
SRUN_ARGS=" \ | |
--wait=60 \ | |
--kill-on-bad-exit=1 \ | |
" | |
clear; srun $SRUN_ARGS --jobid $SLURM_JOB_ID bash -c "$CMD" 2>&1 | tee $LOG_PATH | |
echo "END TIME: $(date)" | |