| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 5.0, | |
| "eval_steps": 500, | |
| "global_step": 310, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 5.806451612903226e-05, | |
| "loss": 0.486, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 5.612903225806452e-05, | |
| "loss": 0.346, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 5.4193548387096774e-05, | |
| "loss": 0.3352, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "learning_rate": 5.225806451612903e-05, | |
| "loss": 0.3119, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "learning_rate": 5.0322580645161296e-05, | |
| "loss": 0.3066, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "learning_rate": 4.838709677419355e-05, | |
| "loss": 0.3096, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_f1": 0.3706818864523881, | |
| "eval_loss": 0.28623858094215393, | |
| "eval_runtime": 4.0392, | |
| "eval_samples_per_second": 244.852, | |
| "eval_steps_per_second": 7.675, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 1.13, | |
| "learning_rate": 4.6451612903225805e-05, | |
| "loss": 0.3, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.29, | |
| "learning_rate": 4.451612903225807e-05, | |
| "loss": 0.2951, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.45, | |
| "learning_rate": 4.2580645161290327e-05, | |
| "loss": 0.2923, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.61, | |
| "learning_rate": 4.064516129032258e-05, | |
| "loss": 0.2892, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.77, | |
| "learning_rate": 3.870967741935484e-05, | |
| "loss": 0.2917, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.94, | |
| "learning_rate": 3.67741935483871e-05, | |
| "loss": 0.2863, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_f1": 0.44221665274296856, | |
| "eval_loss": 0.28044137358665466, | |
| "eval_runtime": 4.1947, | |
| "eval_samples_per_second": 235.772, | |
| "eval_steps_per_second": 7.39, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 2.1, | |
| "learning_rate": 3.483870967741936e-05, | |
| "loss": 0.2758, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 2.26, | |
| "learning_rate": 3.2903225806451614e-05, | |
| "loss": 0.2688, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 2.42, | |
| "learning_rate": 3.096774193548387e-05, | |
| "loss": 0.2708, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 2.58, | |
| "learning_rate": 2.903225806451613e-05, | |
| "loss": 0.2659, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 2.74, | |
| "learning_rate": 2.7096774193548387e-05, | |
| "loss": 0.2668, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 2.9, | |
| "learning_rate": 2.5161290322580648e-05, | |
| "loss": 0.2618, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_f1": 0.49894403379091873, | |
| "eval_loss": 0.277263343334198, | |
| "eval_runtime": 8.3935, | |
| "eval_samples_per_second": 117.829, | |
| "eval_steps_per_second": 3.693, | |
| "step": 186 | |
| }, | |
| { | |
| "epoch": 3.06, | |
| "learning_rate": 2.3225806451612902e-05, | |
| "loss": 0.2612, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 3.23, | |
| "learning_rate": 2.1290322580645163e-05, | |
| "loss": 0.2461, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 3.39, | |
| "learning_rate": 1.935483870967742e-05, | |
| "loss": 0.2419, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 3.55, | |
| "learning_rate": 1.741935483870968e-05, | |
| "loss": 0.2504, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 3.71, | |
| "learning_rate": 1.5483870967741936e-05, | |
| "loss": 0.2432, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 3.87, | |
| "learning_rate": 1.3548387096774194e-05, | |
| "loss": 0.2432, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_f1": 0.5223157357816883, | |
| "eval_loss": 0.27642112970352173, | |
| "eval_runtime": 3.9495, | |
| "eval_samples_per_second": 250.414, | |
| "eval_steps_per_second": 7.849, | |
| "step": 248 | |
| }, | |
| { | |
| "epoch": 4.03, | |
| "learning_rate": 1.1612903225806451e-05, | |
| "loss": 0.2335, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 4.19, | |
| "learning_rate": 9.67741935483871e-06, | |
| "loss": 0.2243, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 4.35, | |
| "learning_rate": 7.741935483870968e-06, | |
| "loss": 0.2245, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 4.52, | |
| "learning_rate": 5.8064516129032256e-06, | |
| "loss": 0.2259, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 4.68, | |
| "learning_rate": 3.870967741935484e-06, | |
| "loss": 0.2251, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 4.84, | |
| "learning_rate": 1.935483870967742e-06, | |
| "loss": 0.2249, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "learning_rate": 0.0, | |
| "loss": 0.2241, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_f1": 0.5464019851116626, | |
| "eval_loss": 0.27576151490211487, | |
| "eval_runtime": 7.713, | |
| "eval_samples_per_second": 128.225, | |
| "eval_steps_per_second": 4.019, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "step": 310, | |
| "total_flos": 1.843925880757248e+16, | |
| "train_loss": 0.2750998620064028, | |
| "train_runtime": 948.4301, | |
| "train_samples_per_second": 41.722, | |
| "train_steps_per_second": 0.327 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 310, | |
| "num_train_epochs": 5, | |
| "save_steps": 500, | |
| "total_flos": 1.843925880757248e+16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |