Advertisement
nolog1n

Untitled

May 15th, 2019
112
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 1.04 KB | None | 0 0
  1. # def train_epoch(net, optimizer, criterion, dataloader, pad_index):
  2. # print_count = 10
  3. # print_every = len(dataloader) // print_count
  4. # running_loss = 0.0
  5. # batches_ran = 0
  6. # for iteration, x in tqdm_notebook(enumerate(dataloader)):
  7. # batches_ran += 1
  8.  
  9. # optimizer.zero_grad()
  10. # splits = np.random.randint(0, x.shape[1]-1, size=x.shape[0])
  11. # x = x.cuda()
  12. # labels = torch.empty(x.shape[0],dtype=torch.long).cuda()
  13. # for i,spl in enumerate(splits):
  14. # idx = x[i,spl].item()
  15. # x[i,spl:] = pad_index
  16. # labels[i] = idx
  17.  
  18. # output, h = net(x)
  19. # loss = criterion(output, labels)
  20. # loss.backward()
  21.  
  22. # running_loss += loss.item()
  23.  
  24. # optimizer.step()
  25.  
  26. # if iteration % print_every == print_every - 1:
  27. # print("iteration {} loss {}".format(iteration, running_loss / batches_ran))
  28. # running_loss = 0.0
  29. # batches_ran = 0
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement