Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- def train(net, X, Y, criterion, batch_size=2, n_epoch=5):
- loss_history = []
- for i in range(n_epoch):
- for x_batch, y_batch in get_batches((X, Y), batch_size):
- net.zeroGradParameters()
- # Forward
- predictions = net.forward(x_batch)
- loss = criterion.forward(predictions, y_batch)
- # Backward
- dp = criterion.backward(predictions, y_batch)
- net.backward(x_batch, dp)
- # Update weights
- sgd_momentum(net.getParameters(),
- net.getGradParameters(),
- optimizer_config,
- optimizer_state)
- loss_history.append(loss)
- # Visualize
- display.clear_output(wait=True)
- plt.figure(figsize=(8, 6))
- plt.title("Training loss")
- plt.xlabel("#iteration")
- plt.ylabel("loss")
- plt.plot(loss_history, 'b')
- plt.show()
- print('Current loss: %f' % loss)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement