Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- def _assert_no_grad(variable):
- assert not variable.requires_grad
- def forward(self, predicted, target):
- """
- Computes cross entropy between targets and predictions.
- """
- # No gradient over target
- _assert_no_grad(target)
- # Define variables
- p = predicted.clamp(0.01, 0.99)
- t = target.float()
- #Compute cross entropy
- h1 = p.log()*t
- h2 = (1-t)*((1-p).log())
- ce = torch.add(h1, h2)
- ce_out = torch.mean(ce, 1)
- ce_out = torch.mean(ce_out, 0)
- # Save for backward step
- self.save_for_backward(ce_out)
- for t in range(50):
- print('Epoch {}'.format(t))
- if t > 0:
- print('Loss ->', loss)
- for batch_idx, (x_batch, y_batch) in enumerate(train_loader):
- # Wrap in Variable
- x_in, target = Variable(x_batch), Variable(y_batch)
- predicted = model(x_in)
- # Compute and print loss
- loss = criterion(predicted, target)
- # Zero gradients, perform a backward pass, and update the weights.
- optimizer.zero_grad()
- loss.backward()
- optimizer.step()
Add Comment
Please, Sign In to add comment