70a188776f7470c838dd22b1636462b75573a734,src/gluonnlp/models/bert.py,BertModel,__init__,#BertModel#Any#Any#Any#Any#Any#Any#Any#Any#Any#Any#Any#Any#Any#Any#Any#Any#Any#Any#Any#,200

Before Change


        self.weight_initializer = weight_initializer
        self.bias_initializer = bias_initializer
        self.layer_norm_eps = layer_norm_eps
        with self.name_scope():
            // Construct BertTransformer
            self.encoder = BertTransformer(
                units=units,
                hidden_size=hidden_size,
                num_layers=num_layers,
                num_heads=num_heads,
                attention_dropout_prob=attention_dropout_prob,
                hidden_dropout_prob=hidden_dropout_prob,
                output_attention=False,
                output_all_encodings=False,
                activation=activation,
                layer_norm_eps=layer_norm_eps,
                weight_initializer=weight_initializer,
                bias_initializer=bias_initializer,
                dtype=dtype,
                prefix="enc_",
            )
            self.encoder.hybridize()
            // Construct word embedding
            self.word_embed = nn.Embedding(input_dim=vocab_size,
                                           output_dim=units,
                                           weight_initializer=embed_initializer,
                                           dtype=dtype,
                                           prefix="word_embed_")
            self.embed_layer_norm = nn.LayerNorm(epsilon=self.layer_norm_eps,
                                                 prefix="embed_ln_")
            self.embed_dropout = nn.Dropout(hidden_dropout_prob)
            // Construct token type embedding
            self.token_type_embed = nn.Embedding(input_dim=num_token_types,
                                                 output_dim=units,
                                                 weight_initializer=weight_initializer,
                                                 prefix="token_type_embed_")
            self.token_pos_embed = PositionalEmbedding(units=units,
                                                       max_length=max_length,
                                                       dtype=self._dtype,
                                                       method=pos_embed_type,
                                                       prefix="token_pos_embed_")
            if self.use_pooler:
                // Construct pooler
                self.pooler = nn.Dense(units=units,
                                       in_units=units,
                                       flatten=False,
                                       activation="tanh",
                                       weight_initializer=weight_initializer,
                                       bias_initializer=bias_initializer,
                                       prefix="pooler_")

    def hybrid_forward(self, F, inputs, token_types, valid_length):
        // pylint: disable=arguments-differ
        Generate the representation given the inputs.

After Change


                 hidden_dropout_prob=0.,
                 attention_dropout_prob=0.,
                 num_token_types=2,
                 pos_embed_type="learned",
                 activation="gelu",
                 layer_norm_eps=1E-12,
                 embed_initializer=TruncNorm(stdev=0.02),
                 weight_initializer=TruncNorm(stdev=0.02),
                 bias_initializer="zeros",
                 dtype="float32",
                 use_pooler=True):
        super().__init__()
        self._dtype = dtype
        self.use_pooler = use_pooler
        self.pos_embed_type = pos_embed_type
        self.num_token_types = num_token_types
        self.vocab_size = vocab_size
        self.units = units
        self.max_length = max_length
        self.activation = activation
        self.embed_initializer = embed_initializer
        self.weight_initializer = weight_initializer
        self.bias_initializer = bias_initializer
Italian Trulli
In pattern: SUPERPATTERN

Frequency: 3

Non-data size: 6

Instances


Project Name: dmlc/gluon-nlp
Commit Name: 70a188776f7470c838dd22b1636462b75573a734
Time: 2020-07-16
Author: lausen@amazon.com
File Name: src/gluonnlp/models/bert.py
Class Name: BertModel
Method Name: __init__


Project Name: dmlc/gluon-nlp
Commit Name: 70a188776f7470c838dd22b1636462b75573a734
Time: 2020-07-16
Author: lausen@amazon.com
File Name: src/gluonnlp/models/albert.py
Class Name: AlbertForPretrain
Method Name: __init__


Project Name: dmlc/gluon-nlp
Commit Name: 70a188776f7470c838dd22b1636462b75573a734
Time: 2020-07-16
Author: lausen@amazon.com
File Name: src/gluonnlp/models/bert.py
Class Name: BertForPretrain
Method Name: __init__