0ac2b33e8c63304a50db7d2b484368299706b58b,slm_lab/agent/net/recurrent.py,RecurrentNet,training_step,#RecurrentNet#Any#Any#Any#Any#Any#,164
Before Change
assert_trained = net_util.gen_assert_trained(self.rnn_model)
loss.backward(retain_graph=retain_graph)
if self.clip_grad:
logger.debug(f"Clipping gradient: {self.clip_grad_val}")
torch.nn.utils.clip_grad_norm_(self.parameters(), self.clip_grad_val)
if global_net is None:
self.optim.step()
else: // distributed training with global net
After Change
def training_step(self, x=None, y=None, loss=None, retain_graph=False, lr_t=None):
"""Takes a single training step: one forward and one backwards pass"""
if hasattr(self, "model_tails") and x is not None:
raise ValueError("Loss computation from x,y not supported for multitails")
self.lr_scheduler.step(epoch=lr_t)
self.train()
self.optim.zero_grad()
if loss is None:
In pattern: SUPERPATTERN
Frequency: 3
Non-data size: 6
Instances Project Name: kengz/SLM-Lab
Commit Name: 0ac2b33e8c63304a50db7d2b484368299706b58b
Time: 2018-11-14
Author: kengzwl@gmail.com
File Name: slm_lab/agent/net/recurrent.py
Class Name: RecurrentNet
Method Name: training_step
Project Name: automl/SMAC3
Commit Name: 8dcd55aab0ed3559b015a97790b1ff71812c1905
Time: 2017-03-21
Author: joshua.marben@neptun.uni-freiburg.de
File Name: smac/smbo/smbo.py
Class Name: SMBO
Method Name: run
Project Name: kengz/SLM-Lab
Commit Name: 0ac2b33e8c63304a50db7d2b484368299706b58b
Time: 2018-11-14
Author: kengzwl@gmail.com
File Name: slm_lab/agent/net/mlp.py
Class Name: MLPNet
Method Name: training_step