PRE_SEQ_LEN=128 LR=1e-2 NUM_GPUS=1 torchrun --standalone --nnodes=1 --nproc-per-node=$NUM_GPUS ptuning/main.py \ --do_train \ --train_file train_linghua.json \ --validation_file dev_linghua.json \ --preprocessing_num_workers 10 \ --prompt_column prompt \ --response_column response \ --history_column history \ --overwrite_cache \ --model_name_or_path chatglm2-6b \ --output_dir output_lh \ --overwrite_output_dir \ --max_source_length 1024 \ --max_target_length 1024 \ --per_device_train_batch_size 1 \ --per_device_eval_batch_size 1 \ --gradient_accumulation_steps 16 \ --predict_with_generate \ --max_steps 600 \ --logging_steps 10 \ --save_steps 100 \ --learning_rate $LR \ --pre_seq_len $PRE_SEQ_LEN