| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 471, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.06369426751592357, | |
| "grad_norm": 21.267967714208993, | |
| "learning_rate": 1.8750000000000003e-06, | |
| "loss": 2.447, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.12738853503184713, | |
| "grad_norm": 4.5780419436683655, | |
| "learning_rate": 3.958333333333333e-06, | |
| "loss": 1.8303, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.1910828025477707, | |
| "grad_norm": 2.6941629656600843, | |
| "learning_rate": 6.041666666666667e-06, | |
| "loss": 1.2619, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.25477707006369427, | |
| "grad_norm": 2.212026239851483, | |
| "learning_rate": 8.125000000000001e-06, | |
| "loss": 1.0527, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.3184713375796178, | |
| "grad_norm": 2.201982410023539, | |
| "learning_rate": 9.999862102299874e-06, | |
| "loss": 0.958, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.3821656050955414, | |
| "grad_norm": 1.784738906519362, | |
| "learning_rate": 9.983323579940351e-06, | |
| "loss": 0.917, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.445859872611465, | |
| "grad_norm": 1.8089946959171528, | |
| "learning_rate": 9.939310009499348e-06, | |
| "loss": 0.893, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.5095541401273885, | |
| "grad_norm": 1.9080843392439555, | |
| "learning_rate": 9.868064055324204e-06, | |
| "loss": 0.8733, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.5732484076433121, | |
| "grad_norm": 1.7471544376369283, | |
| "learning_rate": 9.76997852474223e-06, | |
| "loss": 0.8497, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.6369426751592356, | |
| "grad_norm": 1.7890465333644847, | |
| "learning_rate": 9.645594202357438e-06, | |
| "loss": 0.8419, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.7006369426751592, | |
| "grad_norm": 1.6554683912315524, | |
| "learning_rate": 9.495596868489588e-06, | |
| "loss": 0.8287, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.7643312101910829, | |
| "grad_norm": 1.9772255855653391, | |
| "learning_rate": 9.320813518194084e-06, | |
| "loss": 0.8228, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.8280254777070064, | |
| "grad_norm": 1.6826525630629694, | |
| "learning_rate": 9.122207801708802e-06, | |
| "loss": 0.7981, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.89171974522293, | |
| "grad_norm": 1.6662718650026702, | |
| "learning_rate": 8.900874711466436e-06, | |
| "loss": 0.7887, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.9554140127388535, | |
| "grad_norm": 1.8810043934603964, | |
| "learning_rate": 8.658034544965003e-06, | |
| "loss": 0.7796, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.019108280254777, | |
| "grad_norm": 1.9585298536176439, | |
| "learning_rate": 8.395026176781627e-06, | |
| "loss": 0.7628, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.0828025477707006, | |
| "grad_norm": 1.7354782145978755, | |
| "learning_rate": 8.113299676823614e-06, | |
| "loss": 0.7154, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.1464968152866242, | |
| "grad_norm": 1.7084830286293966, | |
| "learning_rate": 7.814408315515419e-06, | |
| "loss": 0.7106, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.2101910828025477, | |
| "grad_norm": 1.825987376263087, | |
| "learning_rate": 7.500000000000001e-06, | |
| "loss": 0.7161, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.2738853503184713, | |
| "grad_norm": 1.9190047988376515, | |
| "learning_rate": 7.1718081885702905e-06, | |
| "loss": 0.6945, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.3375796178343948, | |
| "grad_norm": 2.1028987446528586, | |
| "learning_rate": 6.831642333423068e-06, | |
| "loss": 0.6882, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 1.4012738853503186, | |
| "grad_norm": 1.8166775369423513, | |
| "learning_rate": 6.481377904428171e-06, | |
| "loss": 0.6805, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.4649681528662422, | |
| "grad_norm": 1.8389280087712272, | |
| "learning_rate": 6.122946048915991e-06, | |
| "loss": 0.6799, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 1.5286624203821657, | |
| "grad_norm": 1.8583284059589031, | |
| "learning_rate": 5.75832294449293e-06, | |
| "loss": 0.6706, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.5923566878980893, | |
| "grad_norm": 1.9125066189135418, | |
| "learning_rate": 5.389518903587016e-06, | |
| "loss": 0.6609, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.6560509554140128, | |
| "grad_norm": 1.7255138285261142, | |
| "learning_rate": 5.0185672897946515e-06, | |
| "loss": 0.663, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 1.7197452229299364, | |
| "grad_norm": 1.9014882215546125, | |
| "learning_rate": 4.647513307137076e-06, | |
| "loss": 0.6542, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 1.78343949044586, | |
| "grad_norm": 1.859385090321573, | |
| "learning_rate": 4.278402724035868e-06, | |
| "loss": 0.6518, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 1.8471337579617835, | |
| "grad_norm": 1.7047877441447445, | |
| "learning_rate": 3.913270594176665e-06, | |
| "loss": 0.6469, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 1.910828025477707, | |
| "grad_norm": 1.8677605053838808, | |
| "learning_rate": 3.5541300364475067e-06, | |
| "loss": 0.6451, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.9745222929936306, | |
| "grad_norm": 1.8699686248923728, | |
| "learning_rate": 3.202961135812437e-06, | |
| "loss": 0.641, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 2.038216560509554, | |
| "grad_norm": 1.9163560637178272, | |
| "learning_rate": 2.861700026314308e-06, | |
| "loss": 0.5846, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 2.1019108280254777, | |
| "grad_norm": 1.8210677922305096, | |
| "learning_rate": 2.5322282163965096e-06, | |
| "loss": 0.5441, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 2.1656050955414012, | |
| "grad_norm": 1.890986095076787, | |
| "learning_rate": 2.216362215397393e-06, | |
| "loss": 0.5451, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 2.229299363057325, | |
| "grad_norm": 1.8976818157941764, | |
| "learning_rate": 1.91584351841065e-06, | |
| "loss": 0.5372, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 2.2929936305732483, | |
| "grad_norm": 1.91457529049141, | |
| "learning_rate": 1.6323290047291196e-06, | |
| "loss": 0.5315, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 2.356687898089172, | |
| "grad_norm": 1.9690598059338476, | |
| "learning_rate": 1.367381802809185e-06, | |
| "loss": 0.5271, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 2.4203821656050954, | |
| "grad_norm": 1.8472167222098534, | |
| "learning_rate": 1.1224626721209141e-06, | |
| "loss": 0.5296, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 2.484076433121019, | |
| "grad_norm": 2.058144196428039, | |
| "learning_rate": 8.989219493991791e-07, | |
| "loss": 0.5325, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 2.5477707006369426, | |
| "grad_norm": 1.8587448364695671, | |
| "learning_rate": 6.979921036993042e-07, | |
| "loss": 0.5207, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 2.611464968152866, | |
| "grad_norm": 2.0928715703094056, | |
| "learning_rate": 5.207809413041914e-07, | |
| "loss": 0.5217, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 2.6751592356687897, | |
| "grad_norm": 2.000665019908754, | |
| "learning_rate": 3.6826549794698074e-07, | |
| "loss": 0.522, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 2.738853503184713, | |
| "grad_norm": 2.023087471979692, | |
| "learning_rate": 2.4128665202382327e-07, | |
| "loss": 0.5162, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 2.802547770700637, | |
| "grad_norm": 2.0096470315046724, | |
| "learning_rate": 1.4054448849631087e-07, | |
| "loss": 0.5117, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 2.8662420382165603, | |
| "grad_norm": 1.9073167823590265, | |
| "learning_rate": 6.659443904419638e-08, | |
| "loss": 0.5215, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 2.9299363057324843, | |
| "grad_norm": 2.0220121289035116, | |
| "learning_rate": 1.984421974927375e-08, | |
| "loss": 0.5242, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 2.9936305732484074, | |
| "grad_norm": 1.9251801115406384, | |
| "learning_rate": 5.515831941993455e-10, | |
| "loss": 0.5166, | |
| "step": 470 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 471, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 200, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 36272660545536.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |