| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.7407407407407407, | |
| "eval_steps": 500, | |
| "global_step": 500, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "entropy": 1.805763495862484, | |
| "epoch": 0.07407407407407407, | |
| "grad_norm": 0.6806204319000244, | |
| "learning_rate": 8.032786885245902e-05, | |
| "loss": 1.5484, | |
| "mean_token_accuracy": 0.6665007689595223, | |
| "num_tokens": 408149.0, | |
| "step": 50 | |
| }, | |
| { | |
| "entropy": 0.41144788280129435, | |
| "epoch": 0.14814814814814814, | |
| "grad_norm": 0.38455039262771606, | |
| "learning_rate": 9.990765991730485e-05, | |
| "loss": 0.3321, | |
| "mean_token_accuracy": 0.9129975068569184, | |
| "num_tokens": 816230.0, | |
| "step": 100 | |
| }, | |
| { | |
| "entropy": 0.16323913749307395, | |
| "epoch": 0.2222222222222222, | |
| "grad_norm": 0.29704713821411133, | |
| "learning_rate": 9.950545603782162e-05, | |
| "loss": 0.1447, | |
| "mean_token_accuracy": 0.9618216013908386, | |
| "num_tokens": 1224471.0, | |
| "step": 150 | |
| }, | |
| { | |
| "entropy": 0.1175146003998816, | |
| "epoch": 0.2962962962962963, | |
| "grad_norm": 0.35487300157546997, | |
| "learning_rate": 9.878674879048427e-05, | |
| "loss": 0.1071, | |
| "mean_token_accuracy": 0.9733556269109249, | |
| "num_tokens": 1632497.0, | |
| "step": 200 | |
| }, | |
| { | |
| "entropy": 0.1009879010822624, | |
| "epoch": 0.37037037037037035, | |
| "grad_norm": 0.17419321835041046, | |
| "learning_rate": 9.775613308830824e-05, | |
| "loss": 0.0925, | |
| "mean_token_accuracy": 0.9769376286864281, | |
| "num_tokens": 2041392.0, | |
| "step": 250 | |
| }, | |
| { | |
| "entropy": 0.09154447751119733, | |
| "epoch": 0.4444444444444444, | |
| "grad_norm": 0.20543242990970612, | |
| "learning_rate": 9.642019796948866e-05, | |
| "loss": 0.0836, | |
| "mean_token_accuracy": 0.9787026332318782, | |
| "num_tokens": 2450311.0, | |
| "step": 300 | |
| }, | |
| { | |
| "entropy": 0.08632300381548702, | |
| "epoch": 0.5185185185185185, | |
| "grad_norm": 0.17172595858573914, | |
| "learning_rate": 9.478748447168449e-05, | |
| "loss": 0.0812, | |
| "mean_token_accuracy": 0.9789653661847114, | |
| "num_tokens": 2858744.0, | |
| "step": 350 | |
| }, | |
| { | |
| "entropy": 0.08412999271415174, | |
| "epoch": 0.5925925925925926, | |
| "grad_norm": 0.1447569578886032, | |
| "learning_rate": 9.28684310265789e-05, | |
| "loss": 0.0805, | |
| "mean_token_accuracy": 0.9786932443082332, | |
| "num_tokens": 3265542.0, | |
| "step": 400 | |
| }, | |
| { | |
| "entropy": 0.08065679710358381, | |
| "epoch": 0.6666666666666666, | |
| "grad_norm": 0.19630704820156097, | |
| "learning_rate": 9.067530672382544e-05, | |
| "loss": 0.0773, | |
| "mean_token_accuracy": 0.9797722736001014, | |
| "num_tokens": 3674162.0, | |
| "step": 450 | |
| }, | |
| { | |
| "entropy": 0.07874332463368773, | |
| "epoch": 0.7407407407407407, | |
| "grad_norm": 0.08524929732084274, | |
| "learning_rate": 8.822213287104348e-05, | |
| "loss": 0.0762, | |
| "mean_token_accuracy": 0.9801681047677994, | |
| "num_tokens": 4082734.0, | |
| "step": 500 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 2025, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.7054669570308915e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |