decOut, decStates, attn = self.model.decoder(
batch.tgt[:-1], batch.src, context, decStates)
aeq(decOut.size(), batch.tgt[1:].data.size())
for dec, tgt in zip(decOut, batch.tgt[1:].data):
// Log prob of each word.
out = self.model.generator.forward(dec)
After Change
// (2) if a target is specified, compute the "goldScore"
// (i.e. log likelihood) of the target under the model
goldScores = torch.FloatTensor(batch.batch_size).fill_(0)
decOut, decStates, attn = self.model.decoder(
batch.tgt[:-1], src, context, decStates)
// print(decOut.size(), batch.tgt[1:].data.size())