//
x = torch.randn((2, 2))
y = torch.randn((2, 2))
z = x + y // These are Tensor types, and backprop would not be possible
var_x = autograd.Variable(x, requires_grad=True)
var_y = autograd.Variable(y, requires_grad=True)
// var_z contains enough information to compute gradients, as we saw above
var_z = var_x + var_y
print(var_z.grad_fn)
var_z_data = var_z.data // Get the wrapped Tensor object out of var_z...
// Re-wrap the tensor in a new variable
new_var_z = autograd.Variable(var_z_data)
After Change
// with requires_grad=True by wrapping the code block in
// ``with torch.no_grad():``
print(x.requires_grad)
print((x ** 2).requires_grad)
with torch.no_grad():
print((x ** 2).requires_grad)