for sen in sentences:
tokens = [el for el in sen.split() if el]
if len(tokens) > self.opt["text_size"]:
tokens = tokens[:self.opt["text_size"]]
embeddings = self.fasttext_model.infer(" ".join(tokens))
if len(tokens) < self.opt["text_size"]:
pads = [np.zeros(self.opt["embedding_size"])
for _ in range(self.opt["text_size"] - len(tokens))]
embeddings = pads + embeddings
After Change
pad = np.zeros(self.opt["embedding_size"])
embeddings_batch = self.fasttext_model([" ".join(sen.split()[:self.opt["text_size"]]) for sen in sentences])
embeddings_batch = [[pad] * (self.opt["text_size"] - len(tokens)) + tokens for tokens in embeddings_batch]
embeddings_batch = np.asarray(embeddings_batch)