Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- from keras.callbacks import Callback
- import keras.backend as K
- import numpy as np
- import matplotlib.pyplot as plt
- class LRFinder(Callback):
- def __init__(self, min_lr, max_lr):
- self.min_lr = min_lr
- self.max_lr = max_lr
- def on_train_begin(self, logs={}):
- n_iterations = self.params['samples']//self.params['batch_size']
- self.learning_rates = np.geomspace(self.min_lr, self.max_lr, \
- num=n_iterations+1)
- self.losses=[]
- self.iteration=0
- self.best_loss=0
- def on_batch_end(self, batch, logs={}):
- loss = logs.get('loss')
- if self.iteration==0 or loss < self.best_loss:
- self.best_loss = loss
- lr = self.learning_rates[self.iteration]
- K.set_value(self.model.optimizer.lr, lr)
- self.losses.append(loss)
- self.iteration += 1
- if loss > self.best_loss*10: # Stop criteria
- self.model.stop_training = True
- def on_train_end(self, logs=None):
- plt.figure(figsize=(12, 6))
- plt.plot(self.learning_rates[:len(self.losses)], self.losses)
- plt.xlabel("Learning Rate")
- plt.ylabel("Loss")
- plt.xscale('log')
- plt.show()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement