Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import torch
- from torch.autograd import Variable
- import torch.autograd as autograd
- import torch.nn as nn
- import torch.nn.functional as F
- v_in = Variable(torch.Tensor([0.1, 0.1]).view(2, 1), requires_grad=True)
- def forward(v_in):
- f1 = lambda x: x * 2
- f2 = torch.nn.Linear(1, 1)
- grad_out = Variable(torch.ones(2, 1))
- gradient = torch.autograd.grad(outputs=f2(f1(v_in)), inputs=v_in,
- grad_outputs=grad_out,
- create_graph=True, retain_graph=True,
- only_inputs=True)[0]
- out = gradient.sum()
- return out
- # returns false
- autograd.gradcheck(forward, [v_in])
- forward(v_in).backward()
- v_in.grad # these are non-existent?
Add Comment
Please, Sign In to add comment