af1a77ca7fbc2bdabec7e859b7046ea3a28b6081,dnc/dnc.py,DNC,forward,#DNC#Any#Any#Any#,201

Before Change


    outputs = None
    chxs = []
    if self.debug:
      viz = [mem_hidden["memory"][0]] if self.share_memory else [mem_hidden[0]["memory"][0]]

    read_vectors = [last_read] * max_length
    // outs = [input[:, x, :] for x in range(max_length)]
    outs = [T.cat([input[:, x, :], last_read], 1) for x in range(max_length)]

    for layer in range(self.num_layers):
      // this layer"s hidden states
      chx = controller_hidden[layer]

      m = mem_hidden if self.share_memory else mem_hidden[layer]
      // pass through controller
      outs, _, (chx, m) = self._layer_forward(
          outs,
          layer,
          (chx, m)
      )

      // debug memory
      if self.debug:
        viz.append(m["memory"][0])

      // store the memory back (per layer or shared)
      if self.share_memory:
        mem_hidden = m
      else:
        mem_hidden[layer] = m
      chxs.append(chx)

      if layer == self.num_layers - 1:
        // final outputs
        outputs = T.stack(outs, 1)
      else:
        // the controller output + read vectors go into next layer
        outs = [T.cat([o, r], 1) for o, r in zip(outs, read_vectors)]
        // outs = [o for o in outs]

    if self.debug:
      viz = T.cat(viz, 0).transpose(0, 1)

    controller_hidden = chxs

    if not self.batch_first:

After Change


    outputs = None
    chxs = []
    if self.debug:
      viz = []

    read_vectors = [last_read] * max_length
    // outs = [input[:, x, :] for x in range(max_length)]
    outs = [T.cat([input[:, x, :], last_read], 1) for x in range(max_length)]

    for layer in range(self.num_layers):
      // this layer"s hidden states
      chx = controller_hidden[layer]

      m = mem_hidden if self.share_memory else mem_hidden[layer]
      // pass through controller
      if self.debug:
        outs, _, mem_debug, (chx, m) = self._layer_forward(outs,layer,(chx, m))
      else:
        outs, _, (chx, m) = self._layer_forward(outs, layer, (chx, m))

      // debug memory
      if self.debug:
        viz.append(mem_debug)

      // store the memory back (per layer or shared)
      if self.share_memory:
        mem_hidden = m
      else:
        mem_hidden[layer] = m
      chxs.append(chx)

      if layer == self.num_layers - 1:
        // final outputs
        outputs = T.stack(outs, 1)
      else:
        // the controller output + read vectors go into next layer
        outs = [T.cat([o, r], 1) for o, r in zip(outs, read_vectors)]
        // outs = [o for o in outs]

    if self.debug:
      viz = np.array(viz)
      s = list(viz.shape)
      viz = viz.reshape(s[0]*s[1], s[2]*s[3])

    controller_hidden = chxs

    if not self.batch_first:
Italian Trulli
In pattern: SUPERPATTERN

Frequency: 3

Non-data size: 7

Instances


Project Name: ixaxaar/pytorch-dnc
Commit Name: af1a77ca7fbc2bdabec7e859b7046ea3a28b6081
Time: 2017-11-01
Author: root@ixaxaar.in
File Name: dnc/dnc.py
Class Name: DNC
Method Name: forward


Project Name: interactiveaudiolab/nussl
Commit Name: fa6f47e7aee228226421c52e61cce4e1ab4cc099
Time: 2020-03-15
Author: prem@u.northwestern.edu
File Name: tests/ml/test_loss.py
Class Name:
Method Name: test_permutation_invariant_loss_tf


Project Name: ixaxaar/pytorch-dnc
Commit Name: 4dde9c49f8cf467b7c11675005cdcd2be1b07e1a
Time: 2017-11-01
Author: root@ixaxaar.in
File Name: dnc/dnc.py
Class Name: DNC
Method Name: forward