underwater45's picture
Upload fine-tuned math coder model
3d3062f verified
{
"best_global_step": 2000,
"best_metric": 0.2030767798423767,
"best_model_checkpoint": "./qwen2-7b-math-coder/checkpoint-2000",
"epoch": 1.0,
"eval_steps": 500,
"global_step": 2183,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.004580852038479157,
"grad_norm": 3.668745756149292,
"learning_rate": 4.5e-06,
"loss": 2.0337,
"step": 10
},
{
"epoch": 0.009161704076958314,
"grad_norm": 3.277991533279419,
"learning_rate": 9.5e-06,
"loss": 1.7448,
"step": 20
},
{
"epoch": 0.013742556115437472,
"grad_norm": 2.6093215942382812,
"learning_rate": 1.45e-05,
"loss": 1.2869,
"step": 30
},
{
"epoch": 0.01832340815391663,
"grad_norm": 1.7964814901351929,
"learning_rate": 1.9500000000000003e-05,
"loss": 0.9077,
"step": 40
},
{
"epoch": 0.022904260192395786,
"grad_norm": 1.5602203607559204,
"learning_rate": 2.45e-05,
"loss": 0.5228,
"step": 50
},
{
"epoch": 0.027485112230874943,
"grad_norm": 2.1516997814178467,
"learning_rate": 2.95e-05,
"loss": 0.3872,
"step": 60
},
{
"epoch": 0.0320659642693541,
"grad_norm": 1.4016958475112915,
"learning_rate": 3.45e-05,
"loss": 0.3371,
"step": 70
},
{
"epoch": 0.03664681630783326,
"grad_norm": 1.5028544664382935,
"learning_rate": 3.9500000000000005e-05,
"loss": 0.31,
"step": 80
},
{
"epoch": 0.04122766834631241,
"grad_norm": 1.7520164251327515,
"learning_rate": 4.4500000000000004e-05,
"loss": 0.2495,
"step": 90
},
{
"epoch": 0.04580852038479157,
"grad_norm": 0.7592225074768066,
"learning_rate": 4.9500000000000004e-05,
"loss": 0.238,
"step": 100
},
{
"epoch": 0.050389372423270726,
"grad_norm": 1.602748990058899,
"learning_rate": 4.978396543446952e-05,
"loss": 0.2472,
"step": 110
},
{
"epoch": 0.054970224461749886,
"grad_norm": 0.8727825880050659,
"learning_rate": 4.954392702832453e-05,
"loss": 0.2455,
"step": 120
},
{
"epoch": 0.05955107650022904,
"grad_norm": 0.8755759596824646,
"learning_rate": 4.930388862217955e-05,
"loss": 0.2514,
"step": 130
},
{
"epoch": 0.0641319285387082,
"grad_norm": 1.3854660987854004,
"learning_rate": 4.906385021603457e-05,
"loss": 0.2415,
"step": 140
},
{
"epoch": 0.06871278057718735,
"grad_norm": 0.6988345384597778,
"learning_rate": 4.8823811809889585e-05,
"loss": 0.248,
"step": 150
},
{
"epoch": 0.07329363261566652,
"grad_norm": 0.7716445326805115,
"learning_rate": 4.8583773403744604e-05,
"loss": 0.2289,
"step": 160
},
{
"epoch": 0.07787448465414568,
"grad_norm": 0.618770956993103,
"learning_rate": 4.8343734997599615e-05,
"loss": 0.2631,
"step": 170
},
{
"epoch": 0.08245533669262482,
"grad_norm": 0.311239093542099,
"learning_rate": 4.8103696591454634e-05,
"loss": 0.2291,
"step": 180
},
{
"epoch": 0.08703618873110398,
"grad_norm": 0.6665637493133545,
"learning_rate": 4.786365818530965e-05,
"loss": 0.2227,
"step": 190
},
{
"epoch": 0.09161704076958314,
"grad_norm": 0.7171844840049744,
"learning_rate": 4.762361977916467e-05,
"loss": 0.2291,
"step": 200
},
{
"epoch": 0.0961978928080623,
"grad_norm": 0.606952965259552,
"learning_rate": 4.738358137301969e-05,
"loss": 0.2496,
"step": 210
},
{
"epoch": 0.10077874484654145,
"grad_norm": 0.6014417409896851,
"learning_rate": 4.71435429668747e-05,
"loss": 0.217,
"step": 220
},
{
"epoch": 0.10535959688502061,
"grad_norm": 0.5063177943229675,
"learning_rate": 4.690350456072972e-05,
"loss": 0.2155,
"step": 230
},
{
"epoch": 0.10994044892349977,
"grad_norm": 0.4963516294956207,
"learning_rate": 4.666346615458473e-05,
"loss": 0.2067,
"step": 240
},
{
"epoch": 0.11452130096197893,
"grad_norm": 0.6876777410507202,
"learning_rate": 4.642342774843975e-05,
"loss": 0.2368,
"step": 250
},
{
"epoch": 0.11910215300045808,
"grad_norm": 0.33562833070755005,
"learning_rate": 4.618338934229477e-05,
"loss": 0.2215,
"step": 260
},
{
"epoch": 0.12368300503893724,
"grad_norm": 0.45656174421310425,
"learning_rate": 4.5943350936149786e-05,
"loss": 0.2051,
"step": 270
},
{
"epoch": 0.1282638570774164,
"grad_norm": 0.5994214415550232,
"learning_rate": 4.5703312530004805e-05,
"loss": 0.2271,
"step": 280
},
{
"epoch": 0.13284470911589555,
"grad_norm": 0.6960343718528748,
"learning_rate": 4.5463274123859816e-05,
"loss": 0.1914,
"step": 290
},
{
"epoch": 0.1374255611543747,
"grad_norm": 0.5771898031234741,
"learning_rate": 4.5223235717714835e-05,
"loss": 0.2365,
"step": 300
},
{
"epoch": 0.14200641319285387,
"grad_norm": 0.3877688944339752,
"learning_rate": 4.498319731156985e-05,
"loss": 0.2018,
"step": 310
},
{
"epoch": 0.14658726523133303,
"grad_norm": 0.5205092430114746,
"learning_rate": 4.474315890542487e-05,
"loss": 0.2592,
"step": 320
},
{
"epoch": 0.1511681172698122,
"grad_norm": 0.8437159061431885,
"learning_rate": 4.450312049927989e-05,
"loss": 0.2497,
"step": 330
},
{
"epoch": 0.15574896930829135,
"grad_norm": 1.130053997039795,
"learning_rate": 4.426308209313491e-05,
"loss": 0.2258,
"step": 340
},
{
"epoch": 0.1603298213467705,
"grad_norm": 0.4410316050052643,
"learning_rate": 4.402304368698992e-05,
"loss": 0.2134,
"step": 350
},
{
"epoch": 0.16491067338524965,
"grad_norm": 0.7471569776535034,
"learning_rate": 4.378300528084494e-05,
"loss": 0.2121,
"step": 360
},
{
"epoch": 0.1694915254237288,
"grad_norm": 0.6091960072517395,
"learning_rate": 4.354296687469995e-05,
"loss": 0.2367,
"step": 370
},
{
"epoch": 0.17407237746220797,
"grad_norm": 0.37467148900032043,
"learning_rate": 4.330292846855497e-05,
"loss": 0.2127,
"step": 380
},
{
"epoch": 0.17865322950068713,
"grad_norm": 0.4893397390842438,
"learning_rate": 4.306289006240999e-05,
"loss": 0.2018,
"step": 390
},
{
"epoch": 0.1832340815391663,
"grad_norm": 0.41944998502731323,
"learning_rate": 4.2822851656265006e-05,
"loss": 0.2163,
"step": 400
},
{
"epoch": 0.18781493357764545,
"grad_norm": 0.327027291059494,
"learning_rate": 4.2582813250120024e-05,
"loss": 0.2274,
"step": 410
},
{
"epoch": 0.1923957856161246,
"grad_norm": 0.4042939245700836,
"learning_rate": 4.2342774843975036e-05,
"loss": 0.182,
"step": 420
},
{
"epoch": 0.19697663765460377,
"grad_norm": 0.5198964476585388,
"learning_rate": 4.2102736437830054e-05,
"loss": 0.2229,
"step": 430
},
{
"epoch": 0.2015574896930829,
"grad_norm": 0.5307021737098694,
"learning_rate": 4.186269803168507e-05,
"loss": 0.2123,
"step": 440
},
{
"epoch": 0.20613834173156206,
"grad_norm": 0.3677486777305603,
"learning_rate": 4.162265962554009e-05,
"loss": 0.2166,
"step": 450
},
{
"epoch": 0.21071919377004122,
"grad_norm": 0.38226303458213806,
"learning_rate": 4.138262121939511e-05,
"loss": 0.2389,
"step": 460
},
{
"epoch": 0.21530004580852038,
"grad_norm": 0.4093310534954071,
"learning_rate": 4.114258281325012e-05,
"loss": 0.1985,
"step": 470
},
{
"epoch": 0.21988089784699955,
"grad_norm": 0.3003683090209961,
"learning_rate": 4.090254440710514e-05,
"loss": 0.1982,
"step": 480
},
{
"epoch": 0.2244617498854787,
"grad_norm": 0.48493343591690063,
"learning_rate": 4.066250600096015e-05,
"loss": 0.1956,
"step": 490
},
{
"epoch": 0.22904260192395787,
"grad_norm": 0.44902732968330383,
"learning_rate": 4.042246759481517e-05,
"loss": 0.2145,
"step": 500
},
{
"epoch": 0.22904260192395787,
"eval_loss": 0.21088281273841858,
"eval_runtime": 30.1602,
"eval_samples_per_second": 16.081,
"eval_steps_per_second": 8.057,
"step": 500
},
{
"epoch": 0.233623453962437,
"grad_norm": 0.26024484634399414,
"learning_rate": 4.018242918867019e-05,
"loss": 0.2156,
"step": 510
},
{
"epoch": 0.23820430600091616,
"grad_norm": 0.3754175901412964,
"learning_rate": 3.994239078252521e-05,
"loss": 0.2071,
"step": 520
},
{
"epoch": 0.24278515803939532,
"grad_norm": 0.32323503494262695,
"learning_rate": 3.9702352376380225e-05,
"loss": 0.2093,
"step": 530
},
{
"epoch": 0.24736601007787448,
"grad_norm": 0.41066277027130127,
"learning_rate": 3.946231397023524e-05,
"loss": 0.182,
"step": 540
},
{
"epoch": 0.25194686211635364,
"grad_norm": 0.32582810521125793,
"learning_rate": 3.9222275564090255e-05,
"loss": 0.1954,
"step": 550
},
{
"epoch": 0.2565277141548328,
"grad_norm": 0.2638744115829468,
"learning_rate": 3.8982237157945274e-05,
"loss": 0.2034,
"step": 560
},
{
"epoch": 0.26110856619331196,
"grad_norm": 0.3939040005207062,
"learning_rate": 3.874219875180029e-05,
"loss": 0.2009,
"step": 570
},
{
"epoch": 0.2656894182317911,
"grad_norm": 0.3458320200443268,
"learning_rate": 3.850216034565531e-05,
"loss": 0.2195,
"step": 580
},
{
"epoch": 0.2702702702702703,
"grad_norm": 0.5092883706092834,
"learning_rate": 3.826212193951033e-05,
"loss": 0.2103,
"step": 590
},
{
"epoch": 0.2748511223087494,
"grad_norm": 0.3199244439601898,
"learning_rate": 3.802208353336534e-05,
"loss": 0.196,
"step": 600
},
{
"epoch": 0.2794319743472286,
"grad_norm": 0.2731751799583435,
"learning_rate": 3.778204512722036e-05,
"loss": 0.1827,
"step": 610
},
{
"epoch": 0.28401282638570774,
"grad_norm": 0.5365939736366272,
"learning_rate": 3.754200672107537e-05,
"loss": 0.2467,
"step": 620
},
{
"epoch": 0.2885936784241869,
"grad_norm": 0.6997144222259521,
"learning_rate": 3.730196831493039e-05,
"loss": 0.204,
"step": 630
},
{
"epoch": 0.29317453046266606,
"grad_norm": 0.5586032271385193,
"learning_rate": 3.706192990878541e-05,
"loss": 0.206,
"step": 640
},
{
"epoch": 0.2977553825011452,
"grad_norm": 0.4491008222103119,
"learning_rate": 3.6821891502640426e-05,
"loss": 0.2008,
"step": 650
},
{
"epoch": 0.3023362345396244,
"grad_norm": 0.4853453040122986,
"learning_rate": 3.658185309649544e-05,
"loss": 0.2112,
"step": 660
},
{
"epoch": 0.3069170865781035,
"grad_norm": 0.5321414470672607,
"learning_rate": 3.6341814690350456e-05,
"loss": 0.1952,
"step": 670
},
{
"epoch": 0.3114979386165827,
"grad_norm": 0.3484733998775482,
"learning_rate": 3.6101776284205475e-05,
"loss": 0.2126,
"step": 680
},
{
"epoch": 0.31607879065506184,
"grad_norm": 0.5444738268852234,
"learning_rate": 3.586173787806049e-05,
"loss": 0.203,
"step": 690
},
{
"epoch": 0.320659642693541,
"grad_norm": 0.4600459933280945,
"learning_rate": 3.562169947191551e-05,
"loss": 0.1978,
"step": 700
},
{
"epoch": 0.32524049473202016,
"grad_norm": 0.32603543996810913,
"learning_rate": 3.538166106577053e-05,
"loss": 0.1853,
"step": 710
},
{
"epoch": 0.3298213467704993,
"grad_norm": 0.23920612037181854,
"learning_rate": 3.514162265962554e-05,
"loss": 0.1951,
"step": 720
},
{
"epoch": 0.3344021988089785,
"grad_norm": 0.39811599254608154,
"learning_rate": 3.490158425348056e-05,
"loss": 0.2005,
"step": 730
},
{
"epoch": 0.3389830508474576,
"grad_norm": 0.3018939793109894,
"learning_rate": 3.466154584733557e-05,
"loss": 0.1566,
"step": 740
},
{
"epoch": 0.3435639028859368,
"grad_norm": 0.47183758020401,
"learning_rate": 3.442150744119059e-05,
"loss": 0.208,
"step": 750
},
{
"epoch": 0.34814475492441593,
"grad_norm": 0.3024599254131317,
"learning_rate": 3.418146903504561e-05,
"loss": 0.1986,
"step": 760
},
{
"epoch": 0.3527256069628951,
"grad_norm": 0.6011927723884583,
"learning_rate": 3.394143062890063e-05,
"loss": 0.2189,
"step": 770
},
{
"epoch": 0.35730645900137425,
"grad_norm": 0.3051789700984955,
"learning_rate": 3.370139222275564e-05,
"loss": 0.2245,
"step": 780
},
{
"epoch": 0.3618873110398534,
"grad_norm": 0.46620339155197144,
"learning_rate": 3.346135381661066e-05,
"loss": 0.222,
"step": 790
},
{
"epoch": 0.3664681630783326,
"grad_norm": 0.44738519191741943,
"learning_rate": 3.3221315410465676e-05,
"loss": 0.192,
"step": 800
},
{
"epoch": 0.3710490151168117,
"grad_norm": 0.47053661942481995,
"learning_rate": 3.2981277004320694e-05,
"loss": 0.1994,
"step": 810
},
{
"epoch": 0.3756298671552909,
"grad_norm": 0.37152695655822754,
"learning_rate": 3.274123859817571e-05,
"loss": 0.2318,
"step": 820
},
{
"epoch": 0.38021071919377003,
"grad_norm": 0.3711836338043213,
"learning_rate": 3.250120019203073e-05,
"loss": 0.209,
"step": 830
},
{
"epoch": 0.3847915712322492,
"grad_norm": 0.6674831509590149,
"learning_rate": 3.226116178588574e-05,
"loss": 0.2195,
"step": 840
},
{
"epoch": 0.38937242327072835,
"grad_norm": 0.9787792563438416,
"learning_rate": 3.202112337974076e-05,
"loss": 0.1758,
"step": 850
},
{
"epoch": 0.39395327530920754,
"grad_norm": 0.34313926100730896,
"learning_rate": 3.178108497359578e-05,
"loss": 0.2291,
"step": 860
},
{
"epoch": 0.3985341273476867,
"grad_norm": 0.273093044757843,
"learning_rate": 3.154104656745079e-05,
"loss": 0.2128,
"step": 870
},
{
"epoch": 0.4031149793861658,
"grad_norm": 0.39359042048454285,
"learning_rate": 3.130100816130581e-05,
"loss": 0.1886,
"step": 880
},
{
"epoch": 0.407695831424645,
"grad_norm": 0.28753426671028137,
"learning_rate": 3.106096975516083e-05,
"loss": 0.1942,
"step": 890
},
{
"epoch": 0.4122766834631241,
"grad_norm": 0.5091426372528076,
"learning_rate": 3.082093134901584e-05,
"loss": 0.2132,
"step": 900
},
{
"epoch": 0.4168575355016033,
"grad_norm": 0.5309694409370422,
"learning_rate": 3.058089294287086e-05,
"loss": 0.215,
"step": 910
},
{
"epoch": 0.42143838754008245,
"grad_norm": 0.2943398952484131,
"learning_rate": 3.0340854536725876e-05,
"loss": 0.2151,
"step": 920
},
{
"epoch": 0.42601923957856164,
"grad_norm": 0.29028087854385376,
"learning_rate": 3.0100816130580895e-05,
"loss": 0.1953,
"step": 930
},
{
"epoch": 0.43060009161704077,
"grad_norm": 0.2482720911502838,
"learning_rate": 2.9860777724435913e-05,
"loss": 0.2022,
"step": 940
},
{
"epoch": 0.4351809436555199,
"grad_norm": 0.23267552256584167,
"learning_rate": 2.962073931829093e-05,
"loss": 0.2134,
"step": 950
},
{
"epoch": 0.4397617956939991,
"grad_norm": 0.2027808278799057,
"learning_rate": 2.9380700912145943e-05,
"loss": 0.2015,
"step": 960
},
{
"epoch": 0.4443426477324782,
"grad_norm": 0.3373633027076721,
"learning_rate": 2.914066250600096e-05,
"loss": 0.1959,
"step": 970
},
{
"epoch": 0.4489234997709574,
"grad_norm": 0.3878941237926483,
"learning_rate": 2.8900624099855977e-05,
"loss": 0.1879,
"step": 980
},
{
"epoch": 0.45350435180943655,
"grad_norm": 0.2899917960166931,
"learning_rate": 2.8660585693710995e-05,
"loss": 0.2115,
"step": 990
},
{
"epoch": 0.45808520384791573,
"grad_norm": 0.2504674196243286,
"learning_rate": 2.8420547287566014e-05,
"loss": 0.219,
"step": 1000
},
{
"epoch": 0.45808520384791573,
"eval_loss": 0.20872141420841217,
"eval_runtime": 30.4982,
"eval_samples_per_second": 15.903,
"eval_steps_per_second": 7.968,
"step": 1000
},
{
"epoch": 0.46266605588639487,
"grad_norm": 0.3412993550300598,
"learning_rate": 2.818050888142103e-05,
"loss": 0.1925,
"step": 1010
},
{
"epoch": 0.467246907924874,
"grad_norm": 0.2508837878704071,
"learning_rate": 2.7940470475276047e-05,
"loss": 0.2208,
"step": 1020
},
{
"epoch": 0.4718277599633532,
"grad_norm": 0.4609987139701843,
"learning_rate": 2.770043206913106e-05,
"loss": 0.1932,
"step": 1030
},
{
"epoch": 0.4764086120018323,
"grad_norm": 0.21953082084655762,
"learning_rate": 2.7460393662986077e-05,
"loss": 0.2213,
"step": 1040
},
{
"epoch": 0.4809894640403115,
"grad_norm": 0.3218333125114441,
"learning_rate": 2.7220355256841096e-05,
"loss": 0.2062,
"step": 1050
},
{
"epoch": 0.48557031607879064,
"grad_norm": 0.4241851270198822,
"learning_rate": 2.6980316850696114e-05,
"loss": 0.2001,
"step": 1060
},
{
"epoch": 0.49015116811726983,
"grad_norm": 0.3520572781562805,
"learning_rate": 2.6740278444551133e-05,
"loss": 0.1886,
"step": 1070
},
{
"epoch": 0.49473202015574896,
"grad_norm": 0.573712170124054,
"learning_rate": 2.6500240038406148e-05,
"loss": 0.1942,
"step": 1080
},
{
"epoch": 0.49931287219422815,
"grad_norm": 0.18866749107837677,
"learning_rate": 2.626020163226116e-05,
"loss": 0.2078,
"step": 1090
},
{
"epoch": 0.5038937242327073,
"grad_norm": 0.3237588107585907,
"learning_rate": 2.6020163226116178e-05,
"loss": 0.2003,
"step": 1100
},
{
"epoch": 0.5084745762711864,
"grad_norm": 0.49110954999923706,
"learning_rate": 2.5780124819971196e-05,
"loss": 0.2009,
"step": 1110
},
{
"epoch": 0.5130554283096656,
"grad_norm": 0.22500014305114746,
"learning_rate": 2.5540086413826215e-05,
"loss": 0.2043,
"step": 1120
},
{
"epoch": 0.5176362803481448,
"grad_norm": 0.35644659399986267,
"learning_rate": 2.5300048007681233e-05,
"loss": 0.1928,
"step": 1130
},
{
"epoch": 0.5222171323866239,
"grad_norm": 0.22695258259773254,
"learning_rate": 2.5060009601536248e-05,
"loss": 0.1993,
"step": 1140
},
{
"epoch": 0.5267979844251031,
"grad_norm": 0.3560752868652344,
"learning_rate": 2.4819971195391263e-05,
"loss": 0.2123,
"step": 1150
},
{
"epoch": 0.5313788364635822,
"grad_norm": 0.5743483304977417,
"learning_rate": 2.457993278924628e-05,
"loss": 0.2096,
"step": 1160
},
{
"epoch": 0.5359596885020614,
"grad_norm": 0.6463412046432495,
"learning_rate": 2.4339894383101297e-05,
"loss": 0.2109,
"step": 1170
},
{
"epoch": 0.5405405405405406,
"grad_norm": 0.40462425351142883,
"learning_rate": 2.4099855976956315e-05,
"loss": 0.1983,
"step": 1180
},
{
"epoch": 0.5451213925790197,
"grad_norm": 0.43881702423095703,
"learning_rate": 2.385981757081133e-05,
"loss": 0.2367,
"step": 1190
},
{
"epoch": 0.5497022446174988,
"grad_norm": 0.5048204064369202,
"learning_rate": 2.361977916466635e-05,
"loss": 0.1943,
"step": 1200
},
{
"epoch": 0.554283096655978,
"grad_norm": 1.1803090572357178,
"learning_rate": 2.3379740758521364e-05,
"loss": 0.215,
"step": 1210
},
{
"epoch": 0.5588639486944572,
"grad_norm": 0.31324031949043274,
"learning_rate": 2.313970235237638e-05,
"loss": 0.1895,
"step": 1220
},
{
"epoch": 0.5634448007329363,
"grad_norm": 0.4800913631916046,
"learning_rate": 2.2899663946231397e-05,
"loss": 0.1957,
"step": 1230
},
{
"epoch": 0.5680256527714155,
"grad_norm": 0.5718823075294495,
"learning_rate": 2.2659625540086416e-05,
"loss": 0.2093,
"step": 1240
},
{
"epoch": 0.5726065048098946,
"grad_norm": 0.30681464076042175,
"learning_rate": 2.241958713394143e-05,
"loss": 0.2077,
"step": 1250
},
{
"epoch": 0.5771873568483737,
"grad_norm": 0.49799302220344543,
"learning_rate": 2.217954872779645e-05,
"loss": 0.2158,
"step": 1260
},
{
"epoch": 0.581768208886853,
"grad_norm": 0.21711622178554535,
"learning_rate": 2.1939510321651464e-05,
"loss": 0.2057,
"step": 1270
},
{
"epoch": 0.5863490609253321,
"grad_norm": 0.31030192971229553,
"learning_rate": 2.169947191550648e-05,
"loss": 0.2032,
"step": 1280
},
{
"epoch": 0.5909299129638113,
"grad_norm": 0.7181716561317444,
"learning_rate": 2.1459433509361498e-05,
"loss": 0.2101,
"step": 1290
},
{
"epoch": 0.5955107650022904,
"grad_norm": 0.251812219619751,
"learning_rate": 2.1219395103216516e-05,
"loss": 0.2245,
"step": 1300
},
{
"epoch": 0.6000916170407696,
"grad_norm": 0.293948769569397,
"learning_rate": 2.097935669707153e-05,
"loss": 0.1992,
"step": 1310
},
{
"epoch": 0.6046724690792488,
"grad_norm": 0.28254538774490356,
"learning_rate": 2.073931829092655e-05,
"loss": 0.1858,
"step": 1320
},
{
"epoch": 0.6092533211177279,
"grad_norm": 0.4375278651714325,
"learning_rate": 2.0499279884781568e-05,
"loss": 0.2239,
"step": 1330
},
{
"epoch": 0.613834173156207,
"grad_norm": 0.38942787051200867,
"learning_rate": 2.0259241478636583e-05,
"loss": 0.1999,
"step": 1340
},
{
"epoch": 0.6184150251946862,
"grad_norm": 0.2908436954021454,
"learning_rate": 2.0019203072491598e-05,
"loss": 0.1757,
"step": 1350
},
{
"epoch": 0.6229958772331654,
"grad_norm": 0.3524945080280304,
"learning_rate": 1.9779164666346617e-05,
"loss": 0.1893,
"step": 1360
},
{
"epoch": 0.6275767292716445,
"grad_norm": 0.2588827610015869,
"learning_rate": 1.9539126260201635e-05,
"loss": 0.1961,
"step": 1370
},
{
"epoch": 0.6321575813101237,
"grad_norm": 0.4896959662437439,
"learning_rate": 1.929908785405665e-05,
"loss": 0.2097,
"step": 1380
},
{
"epoch": 0.6367384333486028,
"grad_norm": 0.7204357385635376,
"learning_rate": 1.905904944791167e-05,
"loss": 0.2129,
"step": 1390
},
{
"epoch": 0.641319285387082,
"grad_norm": 0.6241590976715088,
"learning_rate": 1.8819011041766684e-05,
"loss": 0.2015,
"step": 1400
},
{
"epoch": 0.6459001374255612,
"grad_norm": 0.267800509929657,
"learning_rate": 1.85789726356217e-05,
"loss": 0.2002,
"step": 1410
},
{
"epoch": 0.6504809894640403,
"grad_norm": 0.4345090985298157,
"learning_rate": 1.8338934229476717e-05,
"loss": 0.2089,
"step": 1420
},
{
"epoch": 0.6550618415025194,
"grad_norm": 0.32288020849227905,
"learning_rate": 1.8098895823331736e-05,
"loss": 0.1892,
"step": 1430
},
{
"epoch": 0.6596426935409986,
"grad_norm": 0.6691403388977051,
"learning_rate": 1.785885741718675e-05,
"loss": 0.2104,
"step": 1440
},
{
"epoch": 0.6642235455794778,
"grad_norm": 0.46801456809043884,
"learning_rate": 1.761881901104177e-05,
"loss": 0.212,
"step": 1450
},
{
"epoch": 0.668804397617957,
"grad_norm": 0.930931031703949,
"learning_rate": 1.7378780604896784e-05,
"loss": 0.2013,
"step": 1460
},
{
"epoch": 0.6733852496564361,
"grad_norm": 0.5171828866004944,
"learning_rate": 1.71387421987518e-05,
"loss": 0.1992,
"step": 1470
},
{
"epoch": 0.6779661016949152,
"grad_norm": 0.2657724618911743,
"learning_rate": 1.6898703792606818e-05,
"loss": 0.2084,
"step": 1480
},
{
"epoch": 0.6825469537333944,
"grad_norm": 0.43800339102745056,
"learning_rate": 1.6658665386461836e-05,
"loss": 0.2024,
"step": 1490
},
{
"epoch": 0.6871278057718736,
"grad_norm": 0.35703760385513306,
"learning_rate": 1.641862698031685e-05,
"loss": 0.187,
"step": 1500
},
{
"epoch": 0.6871278057718736,
"eval_loss": 0.20446836948394775,
"eval_runtime": 30.5178,
"eval_samples_per_second": 15.892,
"eval_steps_per_second": 7.963,
"step": 1500
},
{
"epoch": 0.6917086578103527,
"grad_norm": 0.45478180050849915,
"learning_rate": 1.617858857417187e-05,
"loss": 0.2277,
"step": 1510
},
{
"epoch": 0.6962895098488319,
"grad_norm": 0.44794324040412903,
"learning_rate": 1.5938550168026885e-05,
"loss": 0.2169,
"step": 1520
},
{
"epoch": 0.700870361887311,
"grad_norm": 0.39482489228248596,
"learning_rate": 1.56985117618819e-05,
"loss": 0.1952,
"step": 1530
},
{
"epoch": 0.7054512139257902,
"grad_norm": 0.48929938673973083,
"learning_rate": 1.5458473355736918e-05,
"loss": 0.2114,
"step": 1540
},
{
"epoch": 0.7100320659642694,
"grad_norm": 0.35949134826660156,
"learning_rate": 1.5218434949591937e-05,
"loss": 0.2038,
"step": 1550
},
{
"epoch": 0.7146129180027485,
"grad_norm": 0.34692731499671936,
"learning_rate": 1.4978396543446952e-05,
"loss": 0.2074,
"step": 1560
},
{
"epoch": 0.7191937700412276,
"grad_norm": 0.2859888970851898,
"learning_rate": 1.4738358137301968e-05,
"loss": 0.184,
"step": 1570
},
{
"epoch": 0.7237746220797068,
"grad_norm": 0.24184338748455048,
"learning_rate": 1.4498319731156987e-05,
"loss": 0.2076,
"step": 1580
},
{
"epoch": 0.728355474118186,
"grad_norm": 0.29981064796447754,
"learning_rate": 1.4258281325012002e-05,
"loss": 0.2057,
"step": 1590
},
{
"epoch": 0.7329363261566652,
"grad_norm": 0.3872995376586914,
"learning_rate": 1.4018242918867019e-05,
"loss": 0.1896,
"step": 1600
},
{
"epoch": 0.7375171781951443,
"grad_norm": 0.35126006603240967,
"learning_rate": 1.3778204512722037e-05,
"loss": 0.2043,
"step": 1610
},
{
"epoch": 0.7420980302336234,
"grad_norm": 0.43742287158966064,
"learning_rate": 1.3538166106577052e-05,
"loss": 0.2179,
"step": 1620
},
{
"epoch": 0.7466788822721027,
"grad_norm": 0.416395366191864,
"learning_rate": 1.329812770043207e-05,
"loss": 0.1966,
"step": 1630
},
{
"epoch": 0.7512597343105818,
"grad_norm": 0.4944773316383362,
"learning_rate": 1.3058089294287087e-05,
"loss": 0.2045,
"step": 1640
},
{
"epoch": 0.7558405863490609,
"grad_norm": 0.7220922708511353,
"learning_rate": 1.2818050888142102e-05,
"loss": 0.1979,
"step": 1650
},
{
"epoch": 0.7604214383875401,
"grad_norm": 0.32805320620536804,
"learning_rate": 1.257801248199712e-05,
"loss": 0.1795,
"step": 1660
},
{
"epoch": 0.7650022904260192,
"grad_norm": 0.24584101140499115,
"learning_rate": 1.2337974075852136e-05,
"loss": 0.1847,
"step": 1670
},
{
"epoch": 0.7695831424644984,
"grad_norm": 0.34454578161239624,
"learning_rate": 1.2097935669707154e-05,
"loss": 0.1826,
"step": 1680
},
{
"epoch": 0.7741639945029776,
"grad_norm": 0.33527764678001404,
"learning_rate": 1.1857897263562171e-05,
"loss": 0.2289,
"step": 1690
},
{
"epoch": 0.7787448465414567,
"grad_norm": 0.5374043583869934,
"learning_rate": 1.1617858857417186e-05,
"loss": 0.2076,
"step": 1700
},
{
"epoch": 0.7833256985799358,
"grad_norm": 0.30848637223243713,
"learning_rate": 1.1377820451272205e-05,
"loss": 0.2074,
"step": 1710
},
{
"epoch": 0.7879065506184151,
"grad_norm": 0.31407004594802856,
"learning_rate": 1.1137782045127221e-05,
"loss": 0.2089,
"step": 1720
},
{
"epoch": 0.7924874026568942,
"grad_norm": 0.30398833751678467,
"learning_rate": 1.0897743638982236e-05,
"loss": 0.2211,
"step": 1730
},
{
"epoch": 0.7970682546953733,
"grad_norm": 0.33623895049095154,
"learning_rate": 1.0657705232837255e-05,
"loss": 0.1959,
"step": 1740
},
{
"epoch": 0.8016491067338525,
"grad_norm": 0.29691770672798157,
"learning_rate": 1.0417666826692272e-05,
"loss": 0.2146,
"step": 1750
},
{
"epoch": 0.8062299587723316,
"grad_norm": 0.3234691321849823,
"learning_rate": 1.0177628420547288e-05,
"loss": 0.2009,
"step": 1760
},
{
"epoch": 0.8108108108108109,
"grad_norm": 0.3283917009830475,
"learning_rate": 9.937590014402305e-06,
"loss": 0.194,
"step": 1770
},
{
"epoch": 0.81539166284929,
"grad_norm": 0.3270140588283539,
"learning_rate": 9.697551608257322e-06,
"loss": 0.2035,
"step": 1780
},
{
"epoch": 0.8199725148877691,
"grad_norm": 0.42055293917655945,
"learning_rate": 9.457513202112338e-06,
"loss": 0.1784,
"step": 1790
},
{
"epoch": 0.8245533669262483,
"grad_norm": 0.40599197149276733,
"learning_rate": 9.217474795967355e-06,
"loss": 0.2256,
"step": 1800
},
{
"epoch": 0.8291342189647274,
"grad_norm": 0.3134915828704834,
"learning_rate": 8.977436389822372e-06,
"loss": 0.2128,
"step": 1810
},
{
"epoch": 0.8337150710032066,
"grad_norm": 0.2934601604938507,
"learning_rate": 8.737397983677389e-06,
"loss": 0.2083,
"step": 1820
},
{
"epoch": 0.8382959230416858,
"grad_norm": 0.3657926321029663,
"learning_rate": 8.497359577532405e-06,
"loss": 0.2143,
"step": 1830
},
{
"epoch": 0.8428767750801649,
"grad_norm": 0.34556886553764343,
"learning_rate": 8.257321171387422e-06,
"loss": 0.2155,
"step": 1840
},
{
"epoch": 0.847457627118644,
"grad_norm": 0.6355950832366943,
"learning_rate": 8.017282765242439e-06,
"loss": 0.211,
"step": 1850
},
{
"epoch": 0.8520384791571233,
"grad_norm": 0.2931668758392334,
"learning_rate": 7.777244359097456e-06,
"loss": 0.2027,
"step": 1860
},
{
"epoch": 0.8566193311956024,
"grad_norm": 0.4482835531234741,
"learning_rate": 7.5372059529524725e-06,
"loss": 0.2023,
"step": 1870
},
{
"epoch": 0.8612001832340815,
"grad_norm": 0.366603285074234,
"learning_rate": 7.29716754680749e-06,
"loss": 0.2023,
"step": 1880
},
{
"epoch": 0.8657810352725607,
"grad_norm": 0.3448999226093292,
"learning_rate": 7.057129140662507e-06,
"loss": 0.1831,
"step": 1890
},
{
"epoch": 0.8703618873110398,
"grad_norm": 0.5412635803222656,
"learning_rate": 6.817090734517523e-06,
"loss": 0.1722,
"step": 1900
},
{
"epoch": 0.874942739349519,
"grad_norm": 0.26920849084854126,
"learning_rate": 6.57705232837254e-06,
"loss": 0.1878,
"step": 1910
},
{
"epoch": 0.8795235913879982,
"grad_norm": 0.26313847303390503,
"learning_rate": 6.337013922227557e-06,
"loss": 0.213,
"step": 1920
},
{
"epoch": 0.8841044434264773,
"grad_norm": 0.4767347276210785,
"learning_rate": 6.096975516082574e-06,
"loss": 0.1922,
"step": 1930
},
{
"epoch": 0.8886852954649564,
"grad_norm": 0.28474193811416626,
"learning_rate": 5.8569371099375905e-06,
"loss": 0.2121,
"step": 1940
},
{
"epoch": 0.8932661475034357,
"grad_norm": 0.4611247777938843,
"learning_rate": 5.616898703792607e-06,
"loss": 0.201,
"step": 1950
},
{
"epoch": 0.8978469995419148,
"grad_norm": 0.38272824883461,
"learning_rate": 5.376860297647624e-06,
"loss": 0.1789,
"step": 1960
},
{
"epoch": 0.902427851580394,
"grad_norm": 0.32007524371147156,
"learning_rate": 5.136821891502641e-06,
"loss": 0.1638,
"step": 1970
},
{
"epoch": 0.9070087036188731,
"grad_norm": 0.24914689362049103,
"learning_rate": 4.8967834853576575e-06,
"loss": 0.1852,
"step": 1980
},
{
"epoch": 0.9115895556573522,
"grad_norm": 0.28896644711494446,
"learning_rate": 4.656745079212674e-06,
"loss": 0.1999,
"step": 1990
},
{
"epoch": 0.9161704076958315,
"grad_norm": 0.24370577931404114,
"learning_rate": 4.416706673067691e-06,
"loss": 0.2219,
"step": 2000
},
{
"epoch": 0.9161704076958315,
"eval_loss": 0.2030767798423767,
"eval_runtime": 30.3507,
"eval_samples_per_second": 15.98,
"eval_steps_per_second": 8.006,
"step": 2000
},
{
"epoch": 0.9207512597343106,
"grad_norm": 0.2931880056858063,
"learning_rate": 4.176668266922708e-06,
"loss": 0.1897,
"step": 2010
},
{
"epoch": 0.9253321117727897,
"grad_norm": 0.3072756826877594,
"learning_rate": 3.9366298607777245e-06,
"loss": 0.228,
"step": 2020
},
{
"epoch": 0.9299129638112689,
"grad_norm": 0.23250657320022583,
"learning_rate": 3.6965914546327413e-06,
"loss": 0.2486,
"step": 2030
},
{
"epoch": 0.934493815849748,
"grad_norm": 0.23434172570705414,
"learning_rate": 3.456553048487758e-06,
"loss": 0.17,
"step": 2040
},
{
"epoch": 0.9390746678882272,
"grad_norm": 0.3257196545600891,
"learning_rate": 3.216514642342775e-06,
"loss": 0.1975,
"step": 2050
},
{
"epoch": 0.9436555199267064,
"grad_norm": 0.27868446707725525,
"learning_rate": 2.976476236197792e-06,
"loss": 0.2306,
"step": 2060
},
{
"epoch": 0.9482363719651855,
"grad_norm": 0.24388882517814636,
"learning_rate": 2.7364378300528087e-06,
"loss": 0.1957,
"step": 2070
},
{
"epoch": 0.9528172240036646,
"grad_norm": 0.382403165102005,
"learning_rate": 2.4963994239078254e-06,
"loss": 0.2142,
"step": 2080
},
{
"epoch": 0.9573980760421439,
"grad_norm": 0.236972838640213,
"learning_rate": 2.256361017762842e-06,
"loss": 0.2143,
"step": 2090
},
{
"epoch": 0.961978928080623,
"grad_norm": 0.42855915427207947,
"learning_rate": 2.016322611617859e-06,
"loss": 0.2206,
"step": 2100
},
{
"epoch": 0.9665597801191022,
"grad_norm": 0.37074267864227295,
"learning_rate": 1.7762842054728759e-06,
"loss": 0.197,
"step": 2110
},
{
"epoch": 0.9711406321575813,
"grad_norm": 0.3856116235256195,
"learning_rate": 1.5362457993278924e-06,
"loss": 0.2155,
"step": 2120
},
{
"epoch": 0.9757214841960604,
"grad_norm": 0.24265874922275543,
"learning_rate": 1.2962073931829094e-06,
"loss": 0.2078,
"step": 2130
},
{
"epoch": 0.9803023362345397,
"grad_norm": 0.4275590479373932,
"learning_rate": 1.056168987037926e-06,
"loss": 0.2118,
"step": 2140
},
{
"epoch": 0.9848831882730188,
"grad_norm": 0.25369495153427124,
"learning_rate": 8.16130580892943e-07,
"loss": 0.1737,
"step": 2150
},
{
"epoch": 0.9894640403114979,
"grad_norm": 0.27173060178756714,
"learning_rate": 5.760921747479597e-07,
"loss": 0.2097,
"step": 2160
},
{
"epoch": 0.9940448923499771,
"grad_norm": 0.3790707290172577,
"learning_rate": 3.360537686029765e-07,
"loss": 0.2016,
"step": 2170
},
{
"epoch": 0.9986257443884563,
"grad_norm": 0.33324310183525085,
"learning_rate": 9.601536245799327e-08,
"loss": 0.1995,
"step": 2180
}
],
"logging_steps": 10,
"max_steps": 2183,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 9.535324676161536e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}