06c9cefed73cfd43f9453c616e1b9d3ef63f58cf,fairseq/models/speech_to_text/s2t_transformer.py,S2TTransformerEncoder,forward,#S2TTransformerEncoder#Any#Any#,297
Before Change
self.layer_norm = None
def forward(self, src_tokens, src_lengths):
x, input_lengths = self.subsample(src_tokens, src_lengths)
x = self.embed_scale * x
encoder_padding_mask = lengths_to_padding_mask(input_lengths)
positions = self.embed_positions(encoder_padding_mask).transpose(0, 1)
x += positions
x = self.dropout_module(x)
for layer in self.transformer_layers:
x = layer(x, encoder_padding_mask)
if self.layer_norm is not None:
x = self.layer_norm(x)
return {
"encoder_out": [x], // T x B x C
"encoder_padding_mask": [encoder_padding_mask] if encoder_padding_mask.any() else [], // B x T
"encoder_embedding": [], // B x T x C
"encoder_states": [], // List[T x B x C]
"src_tokens": [],
"src_lengths": [],
}
def reorder_encoder_out(self, encoder_out, new_order):
new_encoder_out = (
[] if len(encoder_out["encoder_out"]) == 0
After Change
def forward(self, src_tokens, src_lengths):
if self.num_updates < self.encoder_freezing_updates:
with torch.no_grad():
x = self._forward(src_tokens, src_lengths)
else:
x = self._forward(src_tokens, src_lengths)
return x
def reorder_encoder_out(self, encoder_out, new_order):
new_encoder_out = (
[] if len(encoder_out["encoder_out"]) == 0
In pattern: SUPERPATTERN
Frequency: 3
Non-data size: 5
Instances
Project Name: pytorch/fairseq
Commit Name: 06c9cefed73cfd43f9453c616e1b9d3ef63f58cf
Time: 2021-03-25
Author: changhan@fb.com
File Name: fairseq/models/speech_to_text/s2t_transformer.py
Class Name: S2TTransformerEncoder
Method Name: forward
Project Name: elbayadm/attn2d
Commit Name: 6e4b7e22eeb79f7e1c39d862f10ec3e61e51c979
Time: 2017-11-08
Author: myleott@fb.com
File Name: fairseq/models/fconv.py
Class Name: GradMultiply
Method Name: forward