ppo-Pyramids / run_logs /timers.json
Lakoc's picture
PPO agent
e2910d3
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.28421467542648315,
"min": 0.28421467542648315,
"max": 1.4405357837677002,
"count": 50
},
"Pyramids.Policy.Entropy.sum": {
"value": 8503.703125,
"min": 8503.703125,
"max": 43700.09375,
"count": 50
},
"Pyramids.Step.mean": {
"value": 1499903.0,
"min": 29952.0,
"max": 1499903.0,
"count": 50
},
"Pyramids.Step.sum": {
"value": 1499903.0,
"min": 29952.0,
"max": 1499903.0,
"count": 50
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6422605514526367,
"min": -0.12505429983139038,
"max": 0.6437637209892273,
"count": 50
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 178.54843139648438,
"min": -30.013032913208008,
"max": 180.89759826660156,
"count": 50
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.00918685831129551,
"min": -0.009508224204182625,
"max": 0.5712061524391174,
"count": 50
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.5539464950561523,
"min": -2.6147615909576416,
"max": 135.3758544921875,
"count": 50
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0669284677683858,
"min": 0.06261540081544727,
"max": 0.07318900337309796,
"count": 50
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9369985487574013,
"min": 0.5105135901503255,
"max": 1.0654427599057432,
"count": 50
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015309359984322717,
"min": 2.626431715347892e-05,
"max": 0.017680035694779474,
"count": 50
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21433103978051804,
"min": 0.00036770044014870486,
"max": 0.24752049972691262,
"count": 50
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.00015146664236827855,
"min": 0.00015146664236827855,
"max": 0.00029838354339596195,
"count": 50
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0021205329931558996,
"min": 0.0020886848037717336,
"max": 0.003801056332981266,
"count": 50
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.15048886428571429,
"min": 0.15048886428571429,
"max": 0.19946118095238097,
"count": 50
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.1068441,
"min": 1.3962282666666668,
"max": 2.667018733333334,
"count": 50
},
"Pyramids.Policy.Beta.mean": {
"value": 0.005053837542142857,
"min": 0.005053837542142857,
"max": 0.009946171977142856,
"count": 50
},
"Pyramids.Policy.Beta.sum": {
"value": 0.07075372559,
"min": 0.06962320384,
"max": 0.12671517146,
"count": 50
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008841591887176037,
"min": 0.008407366462051868,
"max": 0.5084035992622375,
"count": 50
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12378229200839996,
"min": 0.11770313233137131,
"max": 3.5588250160217285,
"count": 50
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 291.1188118811881,
"min": 289.969696969697,
"max": 999.0,
"count": 50
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29403.0,
"min": 15984.0,
"max": 32467.0,
"count": 50
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6098455259262925,
"min": -1.0000000521540642,
"max": 1.6696141258333668,
"count": 50
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 162.59439811855555,
"min": -31.998801663517952,
"max": 167.99539843201637,
"count": 50
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6098455259262925,
"min": -1.0000000521540642,
"max": 1.6696141258333668,
"count": 50
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 162.59439811855555,
"min": -31.998801663517952,
"max": 167.99539843201637,
"count": 50
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.026676239869972265,
"min": 0.026482453851400866,
"max": 10.544406680390239,
"count": 50
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.694300226867199,
"min": 2.6217629312886856,
"max": 168.71050688624382,
"count": 50
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675262501",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1675267025"
},
"total": 4523.411260270001,
"count": 1,
"self": 0.6396289430012985,
"children": {
"run_training.setup": {
"total": 0.18999339700008022,
"count": 1,
"self": 0.18999339700008022
},
"TrainerController.start_learning": {
"total": 4522.5816379299995,
"count": 1,
"self": 3.299644830998659,
"children": {
"TrainerController._reset_env": {
"total": 6.748880285999803,
"count": 1,
"self": 6.748880285999803
},
"TrainerController.advance": {
"total": 4512.358936176001,
"count": 95942,
"self": 3.0160458228401694,
"children": {
"env_step": {
"total": 2856.0333407072317,
"count": 95942,
"self": 2654.137594056332,
"children": {
"SubprocessEnvManager._take_step": {
"total": 199.7660629909958,
"count": 95942,
"self": 9.029875897853572,
"children": {
"TorchPolicy.evaluate": {
"total": 190.73618709314223,
"count": 94165,
"self": 42.13363956603712,
"children": {
"TorchPolicy.sample_actions": {
"total": 148.60254752710512,
"count": 94165,
"self": 148.60254752710512
}
}
}
}
},
"workers": {
"total": 2.129683659903776,
"count": 95942,
"self": 0.0,
"children": {
"worker_root": {
"total": 4513.928799117089,
"count": 95942,
"is_parallel": true,
"self": 2090.086618842244,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.003678712999999334,
"count": 1,
"is_parallel": true,
"self": 0.001608643000054144,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00207006999994519,
"count": 8,
"is_parallel": true,
"self": 0.00207006999994519
}
}
},
"UnityEnvironment.step": {
"total": 0.08292716899995867,
"count": 1,
"is_parallel": true,
"self": 0.0006813109998802247,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0006417800000235729,
"count": 1,
"is_parallel": true,
"self": 0.0006417800000235729
},
"communicator.exchange": {
"total": 0.07926404499994533,
"count": 1,
"is_parallel": true,
"self": 0.07926404499994533
},
"steps_from_proto": {
"total": 0.0023400330001095426,
"count": 1,
"is_parallel": true,
"self": 0.0005988769999021315,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017411560002074111,
"count": 8,
"is_parallel": true,
"self": 0.0017411560002074111
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2423.8421802748444,
"count": 95941,
"is_parallel": true,
"self": 59.7987793307002,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 39.34728622500961,
"count": 95941,
"is_parallel": true,
"self": 39.34728622500961
},
"communicator.exchange": {
"total": 2113.0560614410897,
"count": 95941,
"is_parallel": true,
"self": 2113.0560614410897
},
"steps_from_proto": {
"total": 211.6400532780449,
"count": 95941,
"is_parallel": true,
"self": 54.18102753587914,
"children": {
"_process_rank_one_or_two_observation": {
"total": 157.45902574216575,
"count": 767528,
"is_parallel": true,
"self": 157.45902574216575
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1653.3095496459293,
"count": 95942,
"self": 6.141486998865503,
"children": {
"process_trajectory": {
"total": 294.5213748260728,
"count": 95942,
"self": 294.0877338470734,
"children": {
"RLTrainer._checkpoint": {
"total": 0.43364097899939225,
"count": 3,
"self": 0.43364097899939225
}
}
},
"_update_policy": {
"total": 1352.646687820991,
"count": 680,
"self": 363.6795565189875,
"children": {
"TorchPPOOptimizer.update": {
"total": 988.9671313020035,
"count": 34427,
"self": 988.9671313020035
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.97000008483883e-06,
"count": 1,
"self": 1.97000008483883e-06
},
"TrainerController._save_models": {
"total": 0.17417466699953366,
"count": 1,
"self": 0.005935031999797502,
"children": {
"RLTrainer._checkpoint": {
"total": 0.16823963499973615,
"count": 1,
"self": 0.16823963499973615
}
}
}
}
}
}
}