Training in progress, epoch 1
Browse files
README.md
CHANGED
|
@@ -27,7 +27,7 @@ print(output["generated_text"])
|
|
| 27 |
|
| 28 |
## Training procedure
|
| 29 |
|
| 30 |
-
[<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/yoon307-kaist/medgemma-27b-it-dr4-Project/runs/
|
| 31 |
|
| 32 |
|
| 33 |
This model was trained with SFT.
|
|
|
|
| 27 |
|
| 28 |
## Training procedure
|
| 29 |
|
| 30 |
+
[<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/yoon307-kaist/medgemma-27b-it-dr4-Project/runs/md2qaah5)
|
| 31 |
|
| 32 |
|
| 33 |
This model was trained with SFT.
|
adapter_config.json
CHANGED
|
@@ -28,16 +28,16 @@
|
|
| 28 |
"rank_pattern": {},
|
| 29 |
"revision": null,
|
| 30 |
"target_modules": [
|
| 31 |
-
"
|
| 32 |
"q_proj",
|
| 33 |
-
"
|
| 34 |
-
"o_proj",
|
| 35 |
"fc1",
|
| 36 |
"fc2",
|
| 37 |
-
"
|
| 38 |
-
"
|
| 39 |
-
"
|
| 40 |
-
"up_proj"
|
|
|
|
| 41 |
],
|
| 42 |
"task_type": "CAUSAL_LM",
|
| 43 |
"trainable_token_indices": null,
|
|
|
|
| 28 |
"rank_pattern": {},
|
| 29 |
"revision": null,
|
| 30 |
"target_modules": [
|
| 31 |
+
"out_proj",
|
| 32 |
"q_proj",
|
| 33 |
+
"k_proj",
|
|
|
|
| 34 |
"fc1",
|
| 35 |
"fc2",
|
| 36 |
+
"gate_proj",
|
| 37 |
+
"v_proj",
|
| 38 |
+
"o_proj",
|
| 39 |
+
"up_proj",
|
| 40 |
+
"down_proj"
|
| 41 |
],
|
| 42 |
"task_type": "CAUSAL_LM",
|
| 43 |
"trainable_token_indices": null,
|
adapter_model.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 6127553104
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1e7416daa15f9e3546c7acc2cc8d97c53a83ba8f13cc7d919d2a14b5098fb268
|
| 3 |
size 6127553104
|
runs/Jul20_11-55-00_meedgxh100a/events.out.tfevents.1753026902.meedgxh100a.1324727.0
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:45f7558c6db5183cc387904438acb08fb8bc0ae7b66c3ef1af40a4f394dee7c1
|
| 3 |
+
size 9917
|
train_medgemma_ft_copy.py
CHANGED
|
@@ -215,9 +215,9 @@ if __name__ == '__main__':
|
|
| 215 |
if train:
|
| 216 |
return random.sample(neg, 5*num_sample), random.sample(pos, num_sample)
|
| 217 |
else:
|
| 218 |
-
|
| 219 |
# return random.sample(neg, 15), random.sample(pos, 15)
|
| 220 |
-
return neg, pos
|
| 221 |
train_dataset = sum(subset(train_dataset,train=True), [])
|
| 222 |
val_dataset_raw = sum(subset(val_dataset_raw,train=False), [])
|
| 223 |
|
|
@@ -286,7 +286,7 @@ if __name__ == '__main__':
|
|
| 286 |
from peft import PeftModel
|
| 287 |
print("🔁 Loading trained PEFT weights...")
|
| 288 |
# model = PeftModel.from_pretrained(model, exp_name)
|
| 289 |
-
model = PeftModel.from_pretrained(model, exp_name+"/checkpoint-
|
| 290 |
# model = PeftModel.from_pretrained(model, "llava-1.5-7b-hf-dr-all/checkpoint-80")
|
| 291 |
phase= "val"
|
| 292 |
else:
|
|
@@ -309,7 +309,7 @@ if __name__ == '__main__':
|
|
| 309 |
save_strategy="epoch", # Save checkpoint every epoch
|
| 310 |
eval_strategy="steps", # Evaluate every `eval_steps`
|
| 311 |
eval_steps=10000, # Number of steps between evaluations
|
| 312 |
-
learning_rate=
|
| 313 |
bf16=True, # Use bfloat16 precision
|
| 314 |
max_grad_norm=0.3, # Max gradient norm based on QLoRA paper
|
| 315 |
warmup_ratio=0.03, # Warmup ratio based on QLoRA paper
|
|
|
|
| 215 |
if train:
|
| 216 |
return random.sample(neg, 5*num_sample), random.sample(pos, num_sample)
|
| 217 |
else:
|
| 218 |
+
return random.sample(neg, num_sample), pos
|
| 219 |
# return random.sample(neg, 15), random.sample(pos, 15)
|
| 220 |
+
# return neg, pos
|
| 221 |
train_dataset = sum(subset(train_dataset,train=True), [])
|
| 222 |
val_dataset_raw = sum(subset(val_dataset_raw,train=False), [])
|
| 223 |
|
|
|
|
| 286 |
from peft import PeftModel
|
| 287 |
print("🔁 Loading trained PEFT weights...")
|
| 288 |
# model = PeftModel.from_pretrained(model, exp_name)
|
| 289 |
+
model = PeftModel.from_pretrained(model, exp_name+"/checkpoint-368")
|
| 290 |
# model = PeftModel.from_pretrained(model, "llava-1.5-7b-hf-dr-all/checkpoint-80")
|
| 291 |
phase= "val"
|
| 292 |
else:
|
|
|
|
| 309 |
save_strategy="epoch", # Save checkpoint every epoch
|
| 310 |
eval_strategy="steps", # Evaluate every `eval_steps`
|
| 311 |
eval_steps=10000, # Number of steps between evaluations
|
| 312 |
+
learning_rate=5e-4, # Learning rate based on QLoRA paper
|
| 313 |
bf16=True, # Use bfloat16 precision
|
| 314 |
max_grad_norm=0.3, # Max gradient norm based on QLoRA paper
|
| 315 |
warmup_ratio=0.03, # Warmup ratio based on QLoRA paper
|
training_args.bin
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 5816
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d6872e82880b94ff7074d3aa909ee437acad559e5263eff6ee7d85664a176db4
|
| 3 |
size 5816
|