File size: 1,944 Bytes
f94fdeb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import torch
from datasets import load_dataset
from peft import LoraConfig, get_peft_model
from transformers import AutoTokenizer, AutoModelForCausalLM
from trl import DPOTrainer


if __name__ == "__main__":
	model_name = "..."
	dataset = load_dataset(...)
	
	tokenizer = AutoTokenizer.from_pretrained(model_name)
	tokenizer.pad_token = tokenizer.eos_token

	model = AutoModelForCausalLM.from_pretrained(
		model_name,
		low_cpu_mem_usage=True,
		torch_dtype=torch.bfloat16,
		load_in_4bit=True,
		use_flash_attention_2=True,
		bnb_4bit_compute_dtype=torch.bfloat16,
		bnb_4bit_quant_type="nf4",
	)
	model.resize_token_embeddings(len(tokenizer))
	model.config.pad_token_id = tokenizer.pad_token_id
	model.config.use_cache = False

	ref_model = AutoModelForCausalLM.from_pretrained(
		model_name,
		low_cpu_mem_usage=True,
		torch_dtype=torch.bfloat16,
		load_in_4bit=True,
		use_flash_attention_2=True,
		bnb_4bit_compute_dtype=torch.bfloat16,
	).eval()

	peft_config = LoraConfig(
		lora_alpha=128,
		lora_dropout=0.05,
		r=64,
		bias="none",
		task_type="CAUSAL_LM",
		target_modules=[
			"q_proj",
			"k_proj",
			"v_proj",
		],
	)
	model = get_peft_model(model, peft_config)

	training_args = DPOConfig(
		num_train_epochs=3,
		learning_rate=5e-07,
		per_device_train_batch_size=1,
		do_eval=True,
		per_device_eval_batch_size=1,
		adam_epsilon=1e-08,
		lr_scheduler_type="linear",
		warmup_ratio=0.1,
		seed=42,
		logging_steps=100,
		save_steps=500,
		save_strategy="steps",
		output_dir="./output-dir",
		gradient_checkpointing=True,
		bf16=True,
		remove_unused_columns=False,
	)

	dpo_trainer = DPOTrainer(
		model,
		ref_model,
		args=training_args,
		beta=training_args.beta,
		train_dataset=dataset["train"],
		eval_dataset=dataset["test"],
		tokenizer=tokenizer,
		max_length=training_args.max_length,
		max_prompt_length=training_args.max_prompt_length,
		peft_config=peft_config,
	)
	dpo_trainer.train()
	dpo_trainer.save_model()