// gradient. Use the actual data point, v0.
g_weights_amp -= torch.einsum("i,j->ij",(h0_amp_batch[row_count], v0)) / batch_size
g_vb_amp -= v0 / batch_size
g_hb_amp -= h0_amp_batch[row_count] / batch_size
else:
// Compute the rotated gradients.
After Change
// If there are no non-trivial unitaries for the data point v0,
// calculate the positive phase of regular (i.e. non-complex RBM)
// gradient. Use the actual data point, v0.
prob_amp = F.sigmoid(F.linear(v0, self.rbm_amp.weights, self.rbm_amp.hidden_bias))
g_weights_amp -= torch.einsum("i,j->ij", (prob_amp, v0)) / batch_size
g_vb_amp -= v0 / batch_size
g_hb_amp -= prob_amp / batch_size