y = torch.randn((2, 2))
z = x + y // These are Tensor types, and backprop would not be possible
var_x = autograd.Variable(x, requires_grad=True)
var_y = autograd.Variable(y, requires_grad=True)
// var_z contains enough information to compute gradients, as we saw above
var_z = var_x + var_y
print(var_z.grad_fn)
var_z_data = var_z.data // Get the wrapped Tensor object out of var_z...
// Re-wrap the tensor in a new variable
new_var_z = autograd.Variable(var_z_data)
After Change
// ``.requires_grad_( ... )`` changes an existing Tensor"s ``requires_grad``
// flag in-place. The input flag defaults to ``True`` if not given.
x = x.requires_grad_()
y = y.requires_grad_()
// z contains enough information to compute gradients, as we saw above
z = x + y
print(z.grad_fn)
// If any input to an operation has ``requires_grad=True``, so will the output
print(z.requires_grad)
// Now z has the computation history that relates itself to x and y
// Can we just take its values, and **detach** it from its history?
new_z = z.detach()
// ... does new_z have information to backprop to x and y?
// NO!
print(new_z.grad_fn)