for slice in combination_slices:
dense = Dense(1)(slice) // N x 1 (basically "a(Wh_i, Wh_j)" in the paper)
// TODO: masking
e_i = K.reshape(dense, (1, -1)) // 1 x N (e_i in the paper)
softmax = K.squeeze(K.softmax(e_i)) // N (alpha_i in the paper)
softmax_broadcast = K.transpose(K.reshape(K.tile(softmax, [self.F_]), [self.F_, -1]))
node_features = K.sum(softmax_broadcast * linear_transf, axis=0)
if self.use_bias:
output = K.bias_add(node_features, self.bias)
if self.heads_combination == "concat" and self.activation is not None:
After Change
outputs = [] // Will store the outputs of each attention head (B x F" or B x KF")
for head in range(self.attention_heads):
kernel = self.kernels[head] // W in the paper (F x F")
attention_kernel = self.attention_kernels[head] // Attention network a in paper (2*F" x 1)
// Compute inputs to attention network
linear_transf_X = K.dot(X, kernel) // B x F"
linear_transf_G = K.dot(G, kernel) // N x F"
// Repeat feature vectors of input: [[1], [2]] becomes [[1], [1], [2], [2]]
repeated = K.reshape(K.tile(linear_transf_X, [1, N]), (-1, self.F_)) // B*N x F"