print(x.data)
// You can also do all the same operations you did with tensors with Variables.
y = autograd.Variable(torch.Tensor([4., 5., 6]), requires_grad=True)
z = x + y
print(z.data)
After Change
// ``.requires_grad_( ... )`` changes an existing Tensor"s ``requires_grad``
// flag in-place. The input flag defaults to ``True`` if not given.
x = x.requires_grad_()
y = y.requires_grad_()
// z contains enough information to compute gradients, as we saw above
z = x + y
print(z.grad_fn)
// If any input to an operation has ``requires_grad=True``, so will the output
print(z.requires_grad)
// Now z has the computation history that relates itself to x and y
// Can we just take its values, and **detach** it from its history?
new_z = z.detach()
// ... does new_z have information to backprop to x and y?
// NO!
print(new_z.grad_fn)
// And how could it? ``z.detach()`` returns a tensor that shares the same storage
// as ``z``, but with the computation history forgotten. It doesn"t know anything
// about how it was computed.
// In essence, we have broken the Tensor away from its past history
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// You can also stops autograd from tracking history on Tensors
// with requires_grad=True by wrapping the code block in
// ``with torch.no_grad():``
print(x.requires_grad)
print((x ** 2).requires_grad)
with torch.no_grad():
print((x ** 2).requires_grad)