for b in range(batch_size):
value = samples[b].value[0]
min = samples[b].distribution.prior_min
l -= log_weights[b, int(value) - min] // Should we average this over dimensions? See http://pytorch.org/docs/nn.html//torch.nn.KLDivLoss
return l
class ProposalNormal(nn.Module):
After Change
log_weights = torch.log(proposal_output + util.epsilon)
l = 0
for b in range(batch_size):
value = Variable(samples[b].value, requires_grad=False) // value is one-hot
l -= torch.sum(log_weights[b] * value) // Should we average this over dimensions? See http://pytorch.org/docs/nn.html//torch.nn.KLDivLoss
return l
class ProposalNormal(nn.Module):