Guest User

Untitled

a guest
Nov 22nd, 2017
82
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 1.13 KB | None | 0 0
  1. def _assert_no_grad(variable):
  2. assert not variable.requires_grad
  3.  
  4. def forward(self, predicted, target):
  5. """
  6. Computes cross entropy between targets and predictions.
  7. """
  8. # No gradient over target
  9. _assert_no_grad(target)
  10.  
  11. # Define variables
  12. p = predicted.clamp(0.01, 0.99)
  13. t = target.float()
  14.  
  15. #Compute cross entropy
  16. h1 = p.log()*t
  17. h2 = (1-t)*((1-p).log())
  18. ce = torch.add(h1, h2)
  19. ce_out = torch.mean(ce, 1)
  20. ce_out = torch.mean(ce_out, 0)
  21.  
  22. # Save for backward step
  23. self.save_for_backward(ce_out)
  24.  
  25. for t in range(50):
  26.  
  27. print('Epoch {}'.format(t))
  28. if t > 0:
  29. print('Loss ->', loss)
  30.  
  31. for batch_idx, (x_batch, y_batch) in enumerate(train_loader):
  32. # Wrap in Variable
  33. x_in, target = Variable(x_batch), Variable(y_batch)
  34.  
  35. predicted = model(x_in)
  36.  
  37. # Compute and print loss
  38. loss = criterion(predicted, target)
  39.  
  40. # Zero gradients, perform a backward pass, and update the weights.
  41. optimizer.zero_grad()
  42. loss.backward()
  43. optimizer.step()
Add Comment
Please, Sign In to add comment