SimCSE-test / run_unsup_GPU.sh
pere's picture
updated run script
cd5fe97
raw
history blame contribute delete
969 Bytes
#!/bin/bash
# In this example, we show how to train SimCSE on unsupervised Wikipedia data.
# If you want to train it with multiple GPU cards, see "run_sup_example.sh"
# about how to use PyTorch's distributed data parallel.
export ZONE=us-central2-b
export XRT_TPU_CONFIG="localservice;0;localhost:51011"
export TPU_NUM_DEVICES=4
export ALLOW_MULTIPLE_LIBTPU_LOAD=1
python3 ../../SimCSE/train.py \
--model_name_or_path NbAiLab/nb-bert-base \
--train_file data/nor_news_1998_2019_sentences_1M.txt \
--output_dir result/unsup-simcse-nb-bert-bert-base-gpu \
--num_train_epochs 1 \
--per_device_train_batch_size 64 \
--learning_rate 3e-5 \
--max_seq_length 32 \
--evaluation_strategy steps \
--metric_for_best_model stsb_spearman \
--load_best_model_at_end \
--eval_steps 125 \
--pooler_type cls \
--mlp_only_train \
--overwrite_output_dir \
--temp 0.05 \
--do_train \
--do_eval \
--fp16 \
"$@"