0be455f86d595c12333541c09f2c5861dd76c2d4,eval_utils.py,,eval_split,#Any#Any#Any#Any#,97

Before Change


                        entry = {"image_id": data["infos"][k]["id"], "caption": sent}
                        n_predictions.append(entry)
            // case 2 sample_max =0 temperature xx
            elif sample_n_method == "sample":
                tmp_eval_kwargs.update({"sample_max": 0, "beam_size": 1}) // randomness from sample
                with torch.no_grad():
                    _seq, _sampleLogprobs = model(fc_feats, att_feats, att_masks, opt=tmp_eval_kwargs, mode="sample")
                _sents = utils.decode_sequence(loader.get_vocab(), _seq)
                for k, sent in enumerate(_sents):
                    entry = {"image_id": data["infos"][k // sample_n]["id"], "caption": sent}
                    n_predictions.append(entry)
            // case 3 gumbel max
            elif sample_n_method == "gumbel":
                tmp_eval_kwargs.update({"sample_max": 2, "beam_size": 1}) // randomness from sample
                with torch.no_grad():
                    _seq, _sampleLogprobs = model(fc_feats, att_feats, att_masks, opt=tmp_eval_kwargs, mode="sample")

After Change


                        entry = {"image_id": data["infos"][k]["id"], "caption": sent}
                        n_predictions.append(entry)
            // case 2 sample_max =0 temperature xx / gumbel / topk sampling
            elif sample_n_method == "sample" or \
                 sample_n_method == "gumbel" or \
                 sample_n_method.startswith("top"):
                if sample_n_method == "sample":
                    tmp_sample_max = 0
                elif sample_n_method == "gumbel":
                    tmp_sample_max = 2
                elif sample_n_method.startswith("top"):
                    tmp_sample_max = -int(sample_n_method[3:])
                tmp_eval_kwargs.update({"sample_max": tmp_sample_max, "beam_size": 1}) // randomness from sample
                with torch.no_grad():
                    _seq, _sampleLogprobs = model(fc_feats, att_feats, att_masks, opt=tmp_eval_kwargs, mode="sample")
                _sents = utils.decode_sequence(loader.get_vocab(), _seq)
Italian Trulli
In pattern: SUPERPATTERN

Frequency: 3

Non-data size: 10

Instances


Project Name: ruotianluo/ImageCaptioning.pytorch
Commit Name: 0be455f86d595c12333541c09f2c5861dd76c2d4
Time: 2019-04-27
Author: rluo@ttic.edu
File Name: eval_utils.py
Class Name:
Method Name: eval_split


Project Name: deepmipt/DeepPavlov
Commit Name: d3270eb0aea3c1c4b264ed1494412a7fe5174f2d
Time: 2018-10-19
Author: yoptar@gmail.com
File Name: deeppavlov/skills/default_skill/default_skill.py
Class Name: DefaultStatelessSkill
Method Name: __call__