args.max_target_positions = 1024
use_cuda = torch.cuda.is_available() and not args.cpu
dataset = data_loaders.load_dataset(args, [args.gen_subset], False)
// Load ensemble
print("| loading model(s) from {}".format(args.path))
models, _ = utils.load_ensemble_for_inference(args.path.split(","), dataset.src_dict, dataset.dst_dict)
print("| Dictionary: {} types".format(len(dataset.src_dict)))
print("| {} {} {} examples".format(args.data, args.gen_subset, len(dataset.splits[args.gen_subset])))
// Optimize ensemble for generation and set the source and dest dicts on the model (required by scorer)
for model in models:
model.make_generation_fast_()
model.src_dict = dataset.src_dictmodel.dst_dict = dataset.dst_dict
itr = dataset.eval_dataloader(
args.gen_subset,
max_sentences=args.max_sentences or 4,
max_positions=args.max_target_positions or 1024,
descending=True,
)
itr = data_utils.ShardedIterator(itr, args.num_shards, args.shard_id)
gen_timer = StopwatchMeter()
scorer = SequenceScorer(models)
if use_cuda:
After Change
use_cuda = torch.cuda.is_available() and not args.cpu
// Load dataset splits
task = tasks.setup_task(args)
task.load_dataset(args.gen_subset)
print("| {} {} {} examples".format(args.data, args.gen_subset, len(task.dataset(args.gen_subset))))
// Load ensemble
print("| loading model(s) from {}".format(args.path))
models, _ = utils.load_ensemble_for_inference(args.path.split(","), task)
// Optimize ensemble for generation and set the source and dest dicts on the model (required by scorer)
for model in models:
model.make_generation_fast_()
itr = data.EpochBatchIterator(
dataset=task.dataset(args.gen_subset),
max_sentences=args.max_sentences or 4,
max_positions=model.max_positions(),
num_shards=args.num_shards,