887f94bd4f292778622304ba627727a6dacad43f,deepchem/models/tensorgraph/layers.py,IterRefLSTMEmbedding,create_tensor,#IterRefLSTMEmbedding#Any#Any#,2896

Before Change



    // Support set lstm
    support_lstm = LSTMStep(n_feat, 2 * n_feat)
    self.q_init = model_ops.zeros([self.n_support, n_feat])
    self.support_states_init = support_lstm.get_initial_states(
        [self.n_support, n_feat])

    // Test lstm
    test_lstm = LSTMStep(n_feat, 2 * n_feat)
    self.p_init = model_ops.zeros([self.n_test, n_feat])
    self.test_states_init = test_lstm.get_initial_states([self.n_test, n_feat])

    self.trainable_weights = []

    // self.build()
    inputs = self._get_input_tensors(in_layers)
    if len(inputs) != 2:
      raise ValueError(
          "IterRefLSTMEmbedding layer must have exactly two parents")
    x, xp = inputs

    // Get initializations
    p = self.p_init
    q = self.q_init
    // Rename support
    z = xp
    states = self.support_states_init
    x_states = self.test_states_init

    for d in range(self.max_depth):
      // Process support xp using attention
      e = _cosine_dist(z + q, xp)
      a = tf.nn.softmax(e)
      // Get linear combination of support set
      r = model_ops.dot(a, xp)

      // Process test x using attention
      x_e = _cosine_dist(x + p, z)
      x_a = tf.nn.softmax(x_e)
      s = model_ops.dot(x_a, z)

      // Generate new support attention states
      qr = model_ops.concatenate([q, r], axis=1)
      q, states = support_lstm(qr, *states)

      // Generate new test attention states
      ps = model_ops.concatenate([p, s], axis=1)
      p, x_states = test_lstm(ps, *x_states)

      // Redefine
      z = r

    if set_tensors:
      self.xp = x + p
      self.xpq = xp + q
      self.out_tensor = self.xp

After Change


    self.test_states_init = test_lstm.get_initial_states([self.n_test, n_feat])
    return (support_lstm, test_lstm)

  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    Execute this layer on input tensors.

    Parameters
    ----------
    in_layers: list
      List of two tensors (X, Xp). X should be of shape (n_test, n_feat) and
      Xp should be of shape (n_support, n_feat) where n_test is the size of
      the test set, n_support that of the support set, and n_feat is the number
      of per-atom features.

    Returns
    -------
    list
      Returns two tensors of same shape as input. Namely the output shape will
      be [(n_test, n_feat), (n_support, n_feat)]
    
    if tfe.in_eager_mode():
      if not self._built:
        self._support_lstm, self._test_lstm = self._create_variables()
      support_lstm = self._support_lstm
      test_lstm = self._test_lstm
    else:
      support_lstm, test_lstm = self._create_variables()
      self.trainable_weights = []

    // self.build()
    inputs = self._get_input_tensors(in_layers)
    if len(inputs) != 2:
      raise ValueError(
          "IterRefLSTMEmbedding layer must have exactly two parents")
    x, xp = inputs

    // Get initializations
    p = self.p_init
    q = self.q_init
    // Rename support
    z = xp
    states = self.support_states_init
    x_states = self.test_states_init

    for d in range(self.max_depth):
      // Process support xp using attention
      e = _cosine_dist(z + q, xp)
      a = tf.nn.softmax(e)
      // Get linear combination of support set
      r = model_ops.dot(a, xp)

      // Process test x using attention
      x_e = _cosine_dist(x + p, z)
      x_a = tf.nn.softmax(x_e)
      s = model_ops.dot(x_a, z)

      // Generate new support attention states
      qr = model_ops.concatenate([q, r], axis=1)
      q, states = support_lstm(qr, *states)

      // Generate new test attention states
      ps = model_ops.concatenate([p, s], axis=1)
      p, x_states = test_lstm(ps, *x_states)

      // Redefine
      z = r

    if set_tensors:
      self.xp = x + p
      self.xpq = xp + q
      self.out_tensor = self.xp
    if tfe.in_eager_mode() and not self._built:
      self.variables = self._support_lstm.variables + self._test_lstm.variables + [
          self.q_init, self.p_init
      ]
      self._built = True

    return [x + p, xp + q]

  def none_tensors(self):
    p_init, q_init = self.p_init, self.q_init,
Italian Trulli
In pattern: SUPERPATTERN

Frequency: 3

Non-data size: 25

Instances


Project Name: deepchem/deepchem
Commit Name: 887f94bd4f292778622304ba627727a6dacad43f
Time: 2018-03-25
Author: peastman@stanford.edu
File Name: deepchem/models/tensorgraph/layers.py
Class Name: IterRefLSTMEmbedding
Method Name: create_tensor


Project Name: deepchem/deepchem
Commit Name: 887f94bd4f292778622304ba627727a6dacad43f
Time: 2018-03-25
Author: peastman@stanford.edu
File Name: deepchem/models/tensorgraph/layers.py
Class Name: GraphConv
Method Name: create_tensor


Project Name: deepchem/deepchem
Commit Name: 887f94bd4f292778622304ba627727a6dacad43f
Time: 2018-03-25
Author: peastman@stanford.edu
File Name: deepchem/models/tensorgraph/layers.py
Class Name: IterRefLSTMEmbedding
Method Name: create_tensor


Project Name: deepchem/deepchem
Commit Name: 887f94bd4f292778622304ba627727a6dacad43f
Time: 2018-03-25
Author: peastman@stanford.edu
File Name: deepchem/models/tensorgraph/layers.py
Class Name: AttnLSTMEmbedding
Method Name: create_tensor