// Find optimal model hyperparameters
gp_model.train()
likelihood.train()
optimizer = optim.Adam(list(gp_model.parameters()) + list(likelihood.parameters()), lr=0.1)
optimizer.n_iter = 0
for _ in range(50):
optimizer.zero_grad()
output = gp_model(train_x12)
loss = -mll(output, train_y12).sum()
loss.backward()
optimizer.n_iter += 1
optimizer.step()
for param in gp_model.parameters():
self.assertTrue(param.grad is not None)
After Change
def test_train_on_batch_test_on_batch(self):
// We"re manually going to set the hyperparameters to something they shouldn"t be
likelihood = GaussianLikelihood(
log_noise_prior=gpytorch.priors.NormalPrior(loc=torch.zeros(2), scale=torch.ones(2), log_transform=True),
batch_size=2,
)
gp_model = ExactGPModel(train_x12, train_y12, likelihood, batch_size=2)