:return: `None`
// Check if train and output_ph available
if self._train is None or self._output_ph is None:
raise ValueError("Need the training objective and the output placeholder to train the model.")
num_batch = int(np.ceil(len(inputs) / batch_size))
ind = np.arange(len(inputs))
// Start training
for _ in range(nb_epochs):
// Shuffle the examples
random.shuffle(ind)
// Train for one epoch
for m in range(num_batch):
if m < num_batch - 1:
i_batch = inputs[ind[m * batch_size:(m + 1) * batch_size]]
o_batch = outputs[ind[m * batch_size:(m + 1) * batch_size]]
else:
i_batch = inputs[ind[m*batch_size:]]
o_batch = outputs[ind[m * batch_size:]]
// Run train step
if self._learning is None:
self._sess.run(self._train, feed_dict={self._input_ph: i_batch, self._output_ph: o_batch})
else:
self._sess.run(self._train, feed_dict={self._input_ph: i_batch, self._output_ph: o_batch,
self._learning: True})
def class_gradient(self, inputs, logits=False):
Compute per-class derivatives w.r.t. `input`.
After Change
return preds
def fit(self, inputs, outputs, batch_size=128, nb_epochs=10):
Fit the classifier on the training set `(inputs, outputs)`.
:param inputs: Training data.
:type inputs: `np.ndarray`
:param outputs: Labels.
:type outputs: `np.ndarray`
:param batch_size: Size of batches.
:type batch_size: `int`
:param nb_epochs: Number of epochs to use for trainings.
:type nb_epochs: `int`
:return: `None`
// Set train phase
self._model.train(True)
// Apply defences
inputs, outputs = self._apply_defences_fit(inputs, outputs)
num_batch = int(np.ceil(len(inputs) / batch_size))
ind = np.arange(len(inputs))
// Start training
for _ in range(nb_epochs):
// Shuffle the examples
random.shuffle(ind)
// Train for one epoch
for m in range(num_batch):
if m < num_batch - 1:
i_batch = inputs[ind[m * batch_size:(m + 1) * batch_size]]
o_batch = outputs[ind[m * batch_size:(m + 1) * batch_size]]
else:
i_batch = inputs[ind[m*batch_size:]]
o_batch = outputs[ind[m * batch_size:]]
// Zero the parameter gradients
self._optimizer.zero_grad()
// Actual training
m_batch = self._model(i_batch)
loss = self._loss(m_batch, o_batch)
loss.backward()
self._optimizer.step()
def class_gradient(self, inputs, logits=False):