9d56361641a64ff73ac630812ecd4964eedbc7aa,gat/graph_attention_layer.py,GraphAttention,call,#GraphAttention#Any#,72

Before Change


            // TODO: change the for loop to a loop over tf.unstack(combinations)
            combination_slices = tf.unstack(K.reshape(combinations, (B, -1, 2 * self.F_)))
            output_features = []
            for slice in combination_slices:
                dense = Dense(1)(slice)  // N x 1 (basically "a(Wh_i, Wh_j)" in the paper)
                // TODO: masking
                e_i = K.reshape(dense, (1, -1))  // 1 x N (e_i in the paper)
                softmax = K.squeeze(K.softmax(e_i))  // N (alpha_i in the paper)
                softmax_broadcast = K.transpose(K.reshape(K.tile(softmax, [self.F_]), [self.F_, -1]))
                node_features = K.sum(softmax_broadcast * linear_transf, axis=0)
                if self.use_bias:
                    output = K.bias_add(node_features, self.bias)
                if self.heads_combination == "concat" and self.activation is not None:
                    node_features = self.activation(node_features)
                output_features.append(node_features)

            output_features = K.stack(output_features)
            outputs.append(output_features)

        if self.heads_combination == "concat":

After Change


        X = inputs[0]  // input graph (B x F)
        G = inputs[1]  // full graph (N x F) (this is necessary in code, but not in theory. Check section 2.2 of the paper)
        B = K.shape(X)[0]  // Get batch size at runtime
        N = K.shape(G)[0]  // Get number of nodes in the graph at runtime

        outputs = []  // Will store the outputs of each attention head (B x F" or B x KF")
        for head in range(self.attention_heads):
            kernel = self.kernels[head]  // W in the paper (F x F")
            attention_kernel = self.attention_kernels[head]  // Attention network a in paper (2*F" x 1)

            // Compute inputs to attention network
            linear_transf_X = K.dot(X, kernel)  // B x F"
            linear_transf_G = K.dot(G, kernel)  // N x F"

            // Repeat feature vectors of input: [[1], [2]] becomes [[1], [1], [2], [2]]
            repeated = K.reshape(K.tile(linear_transf_X, [1, N]), (-1, self.F_))  // B*N x F"
            // Tile feature vectors of full graph: [[1], [2]] becomes [[1], [2], [1], [2]]
            tiled = K.tile(linear_transf_G, [B, 1])  // B*N x F"
            // Build combinations
            combinations = K.concatenate([repeated, tiled])  // N*B x 2F"
            combination_slices = K.reshape(combinations, (B, -1, 2 * self.F_))  // B x N x 2F"

            // Attention head
            dense = K.dot(combination_slices, attention_kernel)  // B x N x 1 (a(Wh_i, Wh_j) in the paper)
            dense = K.squeeze(dense, -1)  // B x N
            dense = K.softmax(dense)  // B x N

            // TODO: masking with Vaswani method (add -inf to masked coefficients)
Italian Trulli
In pattern: SUPERPATTERN

Frequency: 4

Non-data size: 7

Instances


Project Name: danielegrattarola/keras-gat
Commit Name: 9d56361641a64ff73ac630812ecd4964eedbc7aa
Time: 2017-11-09
Author: daniele.grattarola@gmail.com
File Name: gat/graph_attention_layer.py
Class Name: GraphAttention
Method Name: call


Project Name: tensorly/tensorly
Commit Name: 3dba9054b3c7bf4b9edabb430eb457a07e05b6ca
Time: 2018-07-05
Author: jean.kossaifi@gmail.com
File Name: tensorly/mps_tensor.py
Class Name:
Method Name: mps_to_tensor


Project Name: NifTK/NiftyNet
Commit Name: 6f98c641dc85bd665732e34acf61bc3e27cd9893
Time: 2017-05-25
Author: wenqi.li@ucl.ac.uk
File Name: nn/histogram_standardisation.py
Class Name:
Method Name: create_mapping_perc


Project Name: markovmodel/PyEMMA
Commit Name: e1adf9416201a9debe4036f1f1ceb83caf6f962a
Time: 2018-02-04
Author: fabian.paul@mpikg.mpg.de
File Name: pyemma/coordinates/tests/test_vamp.py
Class Name: TestVAMPSelfConsitency
Method Name: test