84e32315af7b4a479450a1e65b9e277226d75576,fairseq/optim/fp16_optimizer.py,_FP16OptimizerMixin,step,#_FP16OptimizerMixin#Any#,183
Before Change
// copy FP32 params back into FP16 model
if self.has_flat_params:
offset = 0
for p in self.fp16_params:
if not p.requires_grad:
continue
numel = p.data.numel()
p.data.copy_(self.fp32_params.data[offset:offset+numel].view_as(p.data))
offset += numel
else:
for p, p32 in zip(self.fp16_params, self.fp32_params):
if not p.requires_grad:
continue
After Change
self._sync_fp16_grads_to_fp32()
self.fp32_optimizer.step(closure)
if self.scaler is not None:
self.scaler.update()
self._sync_fp32_grads_to_fp16()
def zero_grad(self):
Clears the gradients of all optimized parameters.
In pattern: SUPERPATTERN
Frequency: 3
Non-data size: 16
Instances Project Name: pytorch/fairseq
Commit Name: 84e32315af7b4a479450a1e65b9e277226d75576
Time: 2020-07-23
Author: andersonic@fb.com
File Name: fairseq/optim/fp16_optimizer.py
Class Name: _FP16OptimizerMixin
Method Name: step
Project Name: pytorch/fairseq
Commit Name: 03a57decde62c76783ef7e2288bd61bc87f6e266
Time: 2018-12-24
Author: myleott@fb.com
File Name: fairseq/optim/fp16_optimizer.py
Class Name: FP16Optimizer
Method Name: step