e = np.random.randn(*dims)
for _ in range(self.max_iter):
d = self.finite_diff * self._normalize(d)
e = self.finite_diff * self._normalize(e)
preds_new = self.classifier.predict(np.stack((val + d, val + e)))
// Compute KL divergence between logits
from scipy.stats import entropy
After Change
for _ in range(self.max_iter):
d = self._normalize(d)
preds_new = self.classifier.predict((val + d)[None, ...], logits=False)
from scipy.stats import entropy
kl_div1 = entropy(preds[ind], preds_new[0])
// TODO remove for loop
d_new = d
for i in range(*dims):
d[i] += self.finite_diff
preds_new = self.classifier.predict((val + d)[None, ...], logits=False)
kl_div2 = entropy(preds[ind], preds_new[0])
d_new[i] = (kl_div2-kl_div1)/self.finite_diff
d[i] -= self.finite_diff
d = d_new
// Apply perturbation and clip