Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- def train_epoch(model, optimizer, train_loader, criterion):
- model.train()
- for X, y in train_loader:
- out = model(X.to(device))
- loss = criterion(out, y.to(device))
- optimizer.zero_grad()
- loss.backward()
- optimizer.step()
- return
- def evaluate_loss(loader, model):
- with torch.no_grad():
- model.eval()
- loss = 0
- k = 0
- for X, y in loader:
- k += 1
- out = model(X.to(device))
- loss += criterion(out, y.to(device)).cpu().numpy()
- loss /= k + 1
- return loss
- def train(model, opt, train_loader, test_loader, criterion, n_epochs, writer=False, verbose=True, save=False,
- scheduler=False):
- for epoch in range(n_epochs):
- train_epoch(model, opt, train_loader, criterion)
- train_loss = evaluate_loss(train_loader, model)
- val_loss = evaluate_loss(test_loader, model)
- if writer:
- writer.add_scalars('loss', {'validation':val_loss, 'train':train_loss}, global_step=epoch)
- if save:
- torch.save(model.state_dict(), experiment_path + 'model')
- if verbose:
- print(('Epoch [%d/%d], Loss (train/test): %.6f/%.6f,')
- %(epoch+1, n_epochs, \
- train_loss, val_loss))
- if scheduler:
- scheduler.step()
- return
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement