Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- class Net(torch.nn.Module):
- def __init__(self):
- super(Net, self).__init__()
- self.l1 = torch.nn.Linear(1, 1)
- self.optimizer = torch.optim.Adadelta(self.parameters())
- self.loss_function = torch.nn.MSELoss()
- def forward(self):
- return self.l1(torch.tensor([1], dtype=torch.float))
- def backward(self, loss):
- self.optimizer.zero_grad()
- loss.backward()
- if not self.l1.weight.grad is None:
- #(*) changing the grad to 0
- self.l1.weight.grad[0] = torch.tensor([0], dtype=torch.float)
- self.optimizer.step()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement