| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.4814814814814814, | |
| "eval_steps": 500, | |
| "global_step": 1000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "entropy": 1.805763495862484, | |
| "epoch": 0.07407407407407407, | |
| "grad_norm": 0.6806204319000244, | |
| "learning_rate": 8.032786885245902e-05, | |
| "loss": 1.5484, | |
| "mean_token_accuracy": 0.6665007689595223, | |
| "num_tokens": 408149.0, | |
| "step": 50 | |
| }, | |
| { | |
| "entropy": 0.41144788280129435, | |
| "epoch": 0.14814814814814814, | |
| "grad_norm": 0.38455039262771606, | |
| "learning_rate": 9.990765991730485e-05, | |
| "loss": 0.3321, | |
| "mean_token_accuracy": 0.9129975068569184, | |
| "num_tokens": 816230.0, | |
| "step": 100 | |
| }, | |
| { | |
| "entropy": 0.16323913749307395, | |
| "epoch": 0.2222222222222222, | |
| "grad_norm": 0.29704713821411133, | |
| "learning_rate": 9.950545603782162e-05, | |
| "loss": 0.1447, | |
| "mean_token_accuracy": 0.9618216013908386, | |
| "num_tokens": 1224471.0, | |
| "step": 150 | |
| }, | |
| { | |
| "entropy": 0.1175146003998816, | |
| "epoch": 0.2962962962962963, | |
| "grad_norm": 0.35487300157546997, | |
| "learning_rate": 9.878674879048427e-05, | |
| "loss": 0.1071, | |
| "mean_token_accuracy": 0.9733556269109249, | |
| "num_tokens": 1632497.0, | |
| "step": 200 | |
| }, | |
| { | |
| "entropy": 0.1009879010822624, | |
| "epoch": 0.37037037037037035, | |
| "grad_norm": 0.17419321835041046, | |
| "learning_rate": 9.775613308830824e-05, | |
| "loss": 0.0925, | |
| "mean_token_accuracy": 0.9769376286864281, | |
| "num_tokens": 2041392.0, | |
| "step": 250 | |
| }, | |
| { | |
| "entropy": 0.09154447751119733, | |
| "epoch": 0.4444444444444444, | |
| "grad_norm": 0.20543242990970612, | |
| "learning_rate": 9.642019796948866e-05, | |
| "loss": 0.0836, | |
| "mean_token_accuracy": 0.9787026332318782, | |
| "num_tokens": 2450311.0, | |
| "step": 300 | |
| }, | |
| { | |
| "entropy": 0.08632300381548702, | |
| "epoch": 0.5185185185185185, | |
| "grad_norm": 0.17172595858573914, | |
| "learning_rate": 9.478748447168449e-05, | |
| "loss": 0.0812, | |
| "mean_token_accuracy": 0.9789653661847114, | |
| "num_tokens": 2858744.0, | |
| "step": 350 | |
| }, | |
| { | |
| "entropy": 0.08412999271415174, | |
| "epoch": 0.5925925925925926, | |
| "grad_norm": 0.1447569578886032, | |
| "learning_rate": 9.28684310265789e-05, | |
| "loss": 0.0805, | |
| "mean_token_accuracy": 0.9786932443082332, | |
| "num_tokens": 3265542.0, | |
| "step": 400 | |
| }, | |
| { | |
| "entropy": 0.08065679710358381, | |
| "epoch": 0.6666666666666666, | |
| "grad_norm": 0.19630704820156097, | |
| "learning_rate": 9.067530672382544e-05, | |
| "loss": 0.0773, | |
| "mean_token_accuracy": 0.9797722736001014, | |
| "num_tokens": 3674162.0, | |
| "step": 450 | |
| }, | |
| { | |
| "entropy": 0.07874332463368773, | |
| "epoch": 0.7407407407407407, | |
| "grad_norm": 0.08524929732084274, | |
| "learning_rate": 8.822213287104348e-05, | |
| "loss": 0.0762, | |
| "mean_token_accuracy": 0.9801681047677994, | |
| "num_tokens": 4082734.0, | |
| "step": 500 | |
| }, | |
| { | |
| "entropy": 0.07778633100911975, | |
| "epoch": 0.8148148148148148, | |
| "grad_norm": 0.10848797112703323, | |
| "learning_rate": 8.552459335135381e-05, | |
| "loss": 0.0753, | |
| "mean_token_accuracy": 0.9801766823232174, | |
| "num_tokens": 4491115.0, | |
| "step": 550 | |
| }, | |
| { | |
| "entropy": 0.07791180345229805, | |
| "epoch": 0.8888888888888888, | |
| "grad_norm": 0.12547598779201508, | |
| "learning_rate": 8.259993435156559e-05, | |
| "loss": 0.0752, | |
| "mean_token_accuracy": 0.9802092918753624, | |
| "num_tokens": 4899794.0, | |
| "step": 600 | |
| }, | |
| { | |
| "entropy": 0.07790746555663645, | |
| "epoch": 0.9629629629629629, | |
| "grad_norm": 0.0992884486913681, | |
| "learning_rate": 7.946685410208296e-05, | |
| "loss": 0.0759, | |
| "mean_token_accuracy": 0.979848040342331, | |
| "num_tokens": 5307342.0, | |
| "step": 650 | |
| }, | |
| { | |
| "entropy": 0.07524958597496152, | |
| "epoch": 1.037037037037037, | |
| "grad_norm": 0.086652472615242, | |
| "learning_rate": 7.614538333345735e-05, | |
| "loss": 0.0731, | |
| "mean_token_accuracy": 0.9808213406801224, | |
| "num_tokens": 5716156.0, | |
| "step": 700 | |
| }, | |
| { | |
| "entropy": 0.07525249728001654, | |
| "epoch": 1.1111111111111112, | |
| "grad_norm": 0.13188883662223816, | |
| "learning_rate": 7.265675721386285e-05, | |
| "loss": 0.0728, | |
| "mean_token_accuracy": 0.9810096868872642, | |
| "num_tokens": 6123905.0, | |
| "step": 750 | |
| }, | |
| { | |
| "entropy": 0.07601501471363008, | |
| "epoch": 1.1851851851851851, | |
| "grad_norm": 0.0819055363535881, | |
| "learning_rate": 6.902327958623736e-05, | |
| "loss": 0.0736, | |
| "mean_token_accuracy": 0.9805573572218418, | |
| "num_tokens": 6532143.0, | |
| "step": 800 | |
| }, | |
| { | |
| "entropy": 0.07429057988338172, | |
| "epoch": 1.2592592592592593, | |
| "grad_norm": 0.09344803541898727, | |
| "learning_rate": 6.526818037306228e-05, | |
| "loss": 0.0727, | |
| "mean_token_accuracy": 0.9813062380254268, | |
| "num_tokens": 6940424.0, | |
| "step": 850 | |
| }, | |
| { | |
| "entropy": 0.07440330957062542, | |
| "epoch": 1.3333333333333333, | |
| "grad_norm": 0.11321987956762314, | |
| "learning_rate": 6.14154670604355e-05, | |
| "loss": 0.0725, | |
| "mean_token_accuracy": 0.9811275874078274, | |
| "num_tokens": 7348719.0, | |
| "step": 900 | |
| }, | |
| { | |
| "entropy": 0.07393304943107068, | |
| "epoch": 1.4074074074074074, | |
| "grad_norm": 0.087185338139534, | |
| "learning_rate": 5.7489771210944564e-05, | |
| "loss": 0.0726, | |
| "mean_token_accuracy": 0.9807305666804313, | |
| "num_tokens": 7756710.0, | |
| "step": 950 | |
| }, | |
| { | |
| "entropy": 0.07433672657236456, | |
| "epoch": 1.4814814814814814, | |
| "grad_norm": 0.10077723860740662, | |
| "learning_rate": 5.351619098663021e-05, | |
| "loss": 0.0726, | |
| "mean_token_accuracy": 0.9807634821534157, | |
| "num_tokens": 8165027.0, | |
| "step": 1000 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 2025, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3.4107496965918106e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |