learning_rate = self.optimizer.param_groups[0].get("lr", -1.)
if torch.is_tensor(learning_rate):
learning_rate = learning_rate[0]
return learning_rate
def cuda(self, devices=None):
Train on the GPU.
After Change
if there"s just one.
learning_rate = [param_group.get("lr", -1.)
for param_group in self.optimizer.param_groups]
learning_rate = [_learning_rate[0] if thu.is_tensor(_learning_rate) else _learning_rate
for _learning_rate in learning_rate]
return pyu.from_iterable(learning_rate)
def cuda(self, devices=None):
Train on the GPU.