args.max_target_positions = 1024
use_cuda = torch.cuda.is_available() and not args.cpu
dataset = data_loaders.load_dataset(args, [args.gen_subset], False)
// Load ensemble
print("| loading model(s) from {}".format(args.path))
models, _ = utils.load_ensemble_for_inference(args.path.split(","), dataset.src_dict, dataset.dst_dict)
print("| Dictionary: {} types".format(len(dataset.src_dict)))
print("| {} {} {} examples".format(args.data, args.gen_subset, len(dataset.splits[args.gen_subset])))
// Optimize ensemble for generation and set the source and dest dicts on the model (required by scorer)
for model in models:
model.make_generation_fast_()
model.src_dict = dataset.src_dict
model.dst_dict = dataset.dst_dict
itr = dataset.eval_dataloader(
args.gen_subset,