// Step 4. Compute your loss function. (Again, Torch wants the target
// word wrapped in a variable)
loss = loss_function(log_probs, autograd.Variable(
torch.LongTensor([word_to_ix[target]])))
// Step 5. Do the backward pass and update the gradient
loss.backward()
After Change
// Step 4. Compute your loss function. (Again, Torch wants the target
// word wrapped in a variable)
loss = loss_function(log_probs, torch.tensor([word_to_ix[target]], dtype=torch.long))
// Step 5. Do the backward pass and update the gradient
loss.backward()