Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- # set basic hyperparameters
- learning_rate = 0.001
- n_epochs = 2000
- # define loss and accuracy
- bce_loss = tf.keras.losses.BinaryCrossentropy()
- accuracy = tf.keras.metrics.Accuracy()
- # define optimizer
- optimizer = tf.optimizers.Adam(learning_rate = learning_rate)
- # save training progress in lists
- loss_history = []
- accuracy_history = []
- for epoch in range(n_epochs):
- with tf.GradientTape() as tape:
- # take binary cross-entropy (bce_loss)
- current_loss = bce_loss(model(df), classification)
- # Update weights based on the gradient of the loss function
- gradients = tape.gradient(current_loss, model.trainable_variables)
- optimizer.apply_gradients(zip(gradients, model.trainable_variables))
- # save in history vectors
- current_loss = current_loss.numpy()
- loss_history.append(current_loss)
- accuracy.update_state(model(df), classification)
- current_accuracy = accuracy.result().numpy()
- accuracy_history.append(current_accuracy)
- # print loss and accuracy scores each 100 epochs
- if (epoch+1) % 100 == 0:
- print(str(epoch+1) + '.tTrain Loss: ' + str(current_loss) + ',tAccuracy: ' + str(current_accuracy))
- print('nTraining complete.')
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement