for i in range(dim):
for j in range(max_len):
k = float(j) / (10000.0 ** (2.0*i / float(dim)))
pe[j, 0, i] = math.cos(k) if i % 2 == 1 else math.sin(k)
return pe
def load_pretrained_vectors(self, emb_file):
if emb_file is not None:
After Change
div_term = 1 / torch.pow(10000, torch.arange(0, dim * 2, 2) / dim)
pe = pe * div_term.expand_as(pe)
pe[:, 0::2] = torch.sin(pe[:, 0::2])
pe[:, 1::2] = torch.cos(pe[:, 1::2])
return pe.unsqueeze(1)
def load_pretrained_vectors(self, emb_file):
if emb_file is not None: