0ac2b33e8c63304a50db7d2b484368299706b58b,slm_lab/agent/net/mlp.py,MLPNet,training_step,#MLPNet#Any#Any#Any#Any#Any#,130

Before Change


        if self.clip_grad:
            logger.debug(f"Clipping gradient: {self.clip_grad_val}")
            torch.nn.utils.clip_grad_norm_(self.parameters(), self.clip_grad_val)
        if global_net is None:
            self.optim.step()
        else:  // distributed training with global net
            net_util.push_global_grad(self, global_net)
            self.optim.step()
            net_util.pull_global_param(self, global_net)
        self.store_grad_norms()
        if net_util.to_assert_trained():
            model = getattr(self, "model", None) or getattr(self, "model_body")
            assert_trained(model, loss)

After Change


        Takes a single training step: one forward and one backwards pass
        More most RL usage, we have custom, often complicated, loss functions. Compute its value and put it in a pytorch tensor then pass it in as loss
        """
        if hasattr(self, "model_tails") and x is not None:
            raise ValueError("Loss computation from x,y not supported for multitails")
        self.lr_scheduler.step(epoch=lr_t)
        self.train()
        self.optim.zero_grad()
        if loss is None:
Italian Trulli
In pattern: SUPERPATTERN

Frequency: 3

Non-data size: 6

Instances


Project Name: kengz/SLM-Lab
Commit Name: 0ac2b33e8c63304a50db7d2b484368299706b58b
Time: 2018-11-14
Author: kengzwl@gmail.com
File Name: slm_lab/agent/net/mlp.py
Class Name: MLPNet
Method Name: training_step


Project Name: kengz/SLM-Lab
Commit Name: 0ac2b33e8c63304a50db7d2b484368299706b58b
Time: 2018-11-14
Author: kengzwl@gmail.com
File Name: slm_lab/agent/net/recurrent.py
Class Name: RecurrentNet
Method Name: training_step


Project Name: reinforceio/tensorforce
Commit Name: ee950b503eeed5aca3747a4bcf2a40f624b743a0
Time: 2019-01-21
Author: alexkuhnle@t-online.de
File Name: tensorforce/core/optimizers/multi_step.py
Class Name: MultiStep
Method Name: tf_step