File size: 1,960 Bytes
fe56b43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
{
  "best_metric": 4.2374186515808105,
  "best_model_checkpoint": "/admin/home-alexmallen/elk-generalization/sft-models/pythia-1b-sentiment-first-ft/checkpoint-1",
  "epoch": 0.0006129798482874875,
  "eval_steps": 10000000000,
  "global_step": 1,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.0,
      "eval_val_acc_stderr": 0.007124041923148224,
      "eval_val_accuracy": 0.5301548492257538,
      "eval_val_loss": 4.2374186515808105,
      "eval_val_runtime": 26.8554,
      "eval_val_samples_per_second": 182.757,
      "eval_val_steps_per_second": 45.689,
      "step": 1
    },
    {
      "epoch": 0.0,
      "eval_val_alice_acc_stderr": 0.009082794641258494,
      "eval_val_alice_accuracy": 0.3908523908523909,
      "eval_val_alice_loss": 4.348449230194092,
      "eval_val_alice_runtime": 15.021,
      "eval_val_alice_samples_per_second": 192.131,
      "eval_val_alice_steps_per_second": 48.066,
      "step": 1
    },
    {
      "epoch": 0.0,
      "eval_val_bob_acc_stderr": 0.009912923089384363,
      "eval_val_bob_accuracy": 0.7265084075173096,
      "eval_val_bob_loss": 4.0783257484436035,
      "eval_val_bob_runtime": 10.2741,
      "eval_val_bob_samples_per_second": 196.806,
      "eval_val_bob_steps_per_second": 49.25,
      "step": 1
    },
    {
      "epoch": 0.0,
      "eval_val_bob_gt_acc_stderr": 0.011083605680821933,
      "eval_val_bob_gt_accuracy": 0.5400593471810089,
      "eval_val_bob_gt_loss": 4.27299690246582,
      "eval_val_bob_gt_runtime": 10.2876,
      "eval_val_bob_gt_samples_per_second": 196.546,
      "eval_val_bob_gt_steps_per_second": 49.185,
      "step": 1
    }
  ],
  "logging_steps": 50,
  "max_steps": 122325,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 75,
  "save_steps": 10000000000,
  "total_flos": 28080654336000.0,
  "train_batch_size": 2,
  "trial_name": null,
  "trial_params": null
}