ff68a9ef501e7286501dba1719024dfaaab4b473,eval_lm.py,,main,#Any#,18

Before Change


        args.max_target_positions = 1024

    use_cuda = torch.cuda.is_available() and not args.cpu
    dataset = data_loaders.load_dataset(args, [args.gen_subset], False)

    // Load ensemble
    print("| loading model(s) from {}".format(args.path))
    models, _ = utils.load_ensemble_for_inference(args.path.split(","), dataset.src_dict, dataset.dst_dict)

    print("| Dictionary: {} types".format(len(dataset.src_dict)))
    print("| {} {} {} examples".format(args.data, args.gen_subset, len(dataset.splits[args.gen_subset])))

    // Optimize ensemble for generation and set the source and dest dicts on the model (required by scorer)
    for model in models:
        model.make_generation_fast_()
        model.src_dict = dataset.src_dict
        model.dst_dict = dataset.dst_dict

    itr = dataset.eval_dataloader(
        args.gen_subset,
        max_sentences=args.max_sentences or 4,
        max_positions=args.max_target_positions or 1024,
        descending=True,
    )
    itr = data_utils.ShardedIterator(itr, args.num_shards, args.shard_id)

    gen_timer = StopwatchMeter()
    scorer = SequenceScorer(models)
    if use_cuda:

After Change


    use_cuda = torch.cuda.is_available() and not args.cpu

    // Load dataset splits
    task = tasks.setup_task(args)
    task.load_dataset(args.gen_subset)
    print("| {} {} {} examples".format(args.data, args.gen_subset, len(task.dataset(args.gen_subset))))

    // Load ensemble
    print("| loading model(s) from {}".format(args.path))
    models, _ = utils.load_ensemble_for_inference(args.path.split(","), task)

    // Optimize ensemble for generation and set the source and dest dicts on the model (required by scorer)
    for model in models:
        model.make_generation_fast_()

    itr = data.EpochBatchIterator(
        dataset=task.dataset(args.gen_subset),
        max_sentences=args.max_sentences or 4,
        max_positions=model.max_positions(),
        num_shards=args.num_shards,
Italian Trulli
In pattern: SUPERPATTERN

Frequency: 3

Non-data size: 7

Instances


Project Name: pytorch/fairseq
Commit Name: ff68a9ef501e7286501dba1719024dfaaab4b473
Time: 2018-06-15
Author: myleott@fb.com
File Name: eval_lm.py
Class Name:
Method Name: main


Project Name: nilmtk/nilmtk
Commit Name: b523b464d8cafe29e352981c1c6df941f205592a
Time: 2014-07-09
Author: jack-list@xlk.org.uk
File Name: nilmtk/metrics.py
Class Name:
Method Name: mean_normalized_error_power