export MODEL_PATH='google/gemma-2b' export MASTER_ADDR="localhost" export MASTER_PORT="1231" export GLOO_SOCKET_IFNAME="lo" export NCCL_SOCKET_IFNAME="lo" #SVFT_PLAIN export SAVE_PATH='./Gemma_2B_metamath40k_svft_plain' CUDA_VISIBLE_DEVICES=0 python3 -m torch.distributed.launch --master_addr ${MASTER_ADDR} --master_port ${MASTER_PORT} --nproc_per_node=1 --use_env train_math.py \ --model_name_or_path $MODEL_PATH \ --data_path "./data/train/MetaMathQA-40K.json" \ --data_length 10000000 \ --bf16 True \ --output_dir $SAVE_PATH \ --per_device_train_batch_size 4 \ --per_device_eval_batch_size 4 \ --gradient_accumulation_steps 16 \ --evaluation_strategy "no" \ --save_strategy "steps" \ --save_steps 1000 \ --save_total_limit 2 \ --learning_rate 5e-2\ --weight_decay 0. \ --warmup_ratio 0.1 \ --lr_scheduler_type "cosine" \ --logging_steps 1 \ --num_train_epochs 2 \ --pattern "banded" \ --off_diag 0 \ --target_modules q_proj k_proj v_proj up_proj down_proj o_proj gate_proj \ --adapter_name "svft" #SVFT_Random_d=16 export SAVE_PATH='./Gemma_2B_metamath40k_svft_16diag_random' CUDA_VISIBLE_DEVICES=0 python3 -m torch.distributed.launch --master_addr ${MASTER_ADDR} --master_port ${MASTER_PORT} --nproc_per_node=1 --use_env train_math.py \ --model_name_or_path $MODEL_PATH \ --data_path "./data/train/MetaMathQA-40K.json" \ --data_length 10000000 \ --bf16 True \ --output_dir $SAVE_PATH \ --per_device_train_batch_size 4 \ --per_device_eval_batch_size 4 \ --gradient_accumulation_steps 16 \ --evaluation_strategy "no" \ --save_strategy "steps" \ --save_steps 1000 \ --save_total_limit 2 \ --learning_rate 5e-2\ --weight_decay 0. \ --warmup_ratio 0.1 \ --lr_scheduler_type "cosine" \ --logging_steps 1 \ --num_train_epochs 2 \ --pattern "random" \ --off_diag 16 \ --target_modules q_proj k_proj v_proj up_proj down_proj o_proj gate_proj \ --adapter_name "svft" #SVFT_Random_d=16 export SAVE_PATH='./Gemma_2B_metamath40k_svft_16diag_banded' CUDA_VISIBLE_DEVICES=0 python3 -m torch.distributed.launch --master_addr ${MASTER_ADDR} --master_port ${MASTER_PORT} --nproc_per_node=1 --use_env train_math.py \ --model_name_or_path $MODEL_PATH \ --data_path "./data/train/MetaMathQA-40K.json" \ --data_length 10000000 \ --bf16 True \ --output_dir $SAVE_PATH \ --per_device_train_batch_size 4 \ --per_device_eval_batch_size 4 \ --gradient_accumulation_steps 16 \ --evaluation_strategy "no" \ --save_strategy "steps" \ --save_steps 1000 \ --save_total_limit 2 \ --learning_rate 5e-3\ --weight_decay 0. \ --warmup_ratio 0.1 \ --lr_scheduler_type "cosine" \ --logging_steps 1 \ --num_train_epochs 2 \ --pattern "banded" \ --off_diag 16 \ --target_modules q_proj k_proj v_proj up_proj down_proj o_proj gate_proj \ --adapter_name "svft" #SVFT_Random_d=16 export SAVE_PATH='./Gemma_2B_metamath40k_svft_16diag_topk' CUDA_VISIBLE_DEVICES=0 python3 -m torch.distributed.launch --master_addr ${MASTER_ADDR} --master_port ${MASTER_PORT} --nproc_per_node=1 --use_env train_math.py \ --model_name_or_path $MODEL_PATH \ --data_path "./data/train/MetaMathQA-40K.json" \ --data_length 10000000 \ --bf16 True \ --output_dir $SAVE_PATH \ --per_device_train_batch_size 4 \ --per_device_eval_batch_size 4 \ --gradient_accumulation_steps 16 \ --evaluation_strategy "no" \ --save_strategy "steps" \ --save_steps 1000 \ --save_total_limit 2 \ --learning_rate 5e-3\ --weight_decay 0. \ --warmup_ratio 0.1 \ --lr_scheduler_type "cosine" \ --logging_steps 1 \ --num_train_epochs 2 \ --pattern "top_k" \ --off_diag 16 \ --target_modules q_proj k_proj v_proj up_proj down_proj o_proj gate_proj \ --adapter_name "svft" #EVAL #python eval_gsm8k.py --model './Gemma_2B_metamath40k_svft_16diag_random' --data_file ../MetaMath/data/test/GSM8K_test.jsonl #python eval_math.py --model './Gemma_2B_metamath40k_svft_16diag_random' --data_file ../MetaMath/data/test/MATH_test.jsonl