if max_phrase_len > min(len(x) for x in inputs):
idx = torch.tensor(self.vocab_map[self.pad], requires_grad=False, device=device)
pad_vector = self.embedding(idx)
input_tensor = []
for phrase in inputs:
// build a list of the vectors we want for this sentence / phrase
After Change
indices.append(PAD_ID)
indices = torch.tensor(indices, requires_grad=False, device=device)
input_vectors = self.embedding(indices)
for unknown in unknowns:
input_vectors[unknown, :] = self.unk
// we will now have an N x emb_size tensor