Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- def __init__(self, input_shape) -> None:
- self.input_shape = input_shape
- input_tensor = tf.keras.Input(input_shape)
- output = self.heatmap_head(input_tensor)
- self.model = tf.keras.Model(input_tensor, output)
- def custom_loss(self, y_actual, y_pred):
- delta = 0.00000001
- err = -tf.reduce_sum(
- y_actual * tf.math.log(y_pred + delta)
- + (1 - y_actual) * tf.math.log(1 - y_pred + delta)
- )
- return err
- def getTotalLoss(self, prediction, annotation):
- total_loss = 0
- for pred in prediction:
- total_loss += self.custom_loss(annotation, pred)
- return total_loss
- def getValidationLoss(
- self, valDataGenerator: coco_data_generator.DataGenerator, batchSize
- ) -> float:
- is_epoch_complete = False
- total_loss = 0
- ctr = 0
- while is_epoch_complete is not True:
- (
- imageBatch,
- annotationBatch,
- is_epoch_complete,
- ) = valDataGenerator.getBatch(batchSize)
- imageBatch = self.normalizeImage(imageBatch)
- validationHeat = self.model(imageBatch, training=False) # training = False is inference mode <<<<<<<<<<<
- loss = self.getTotalLoss(validationHeat, annotationBatch)
- total_loss += loss
- if ctr % 200 == 0:
- showPredictions(
- imageBatch,
- annotationBatch,
- validationHeat[0].numpy(),
- Path("validation_images"),
- 127.5 / 255.0,
- 1.0,
- "Validation",
- )
- ctr += 1
- return total_loss / ctr
- def normalizeImage(self, image_batch):
- return (image_batch - 127.5) / 255.0
- def train(
- self,
- train_data_generator: coco_data_generator.DataGenerator,
- validation_generator: coco_data_generator.DataGenerator,
- ):
- epochs = 1000
- learning_rate = 0.01
- batch_size = 1
- optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
- summary_writer = tf.summary.create_file_writer(logdir="./log")
- checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=self.model)
- manager = tf.train.CheckpointManager(
- checkpoint, directory="checkpoint/model", max_to_keep=5
- )
- status = checkpoint.restore(manager.latest_checkpoint)
- optimizer.lr.assign(learning_rate)
- loss_ctr = 0
- validation_ctr = 0
- N = 1
- for epoch in range(epochs):
- is_epoch_complete = False
- running_loss = 0.0
- ctr = 0
- print("Start new epoch")
- while is_epoch_complete is not True:
- (
- image_batch,
- annotation_batch,
- is_epoch_complete,
- ) = train_data_generator.getBatch(batch_size)
- image_batch = self.normalizeImage(image_batch)
- with tf.GradientTape() as tape:
- heat_pred = self.model(image_batch, training=True) # <<<<<< Training mode
- loss = self.getTotalLoss(heat_pred, annotation_batch)
- print("traing: {}".format(loss))
- grads = tape.gradient(loss, self.model.trainable_weights)
- ctr += 1
- optimizer.apply_gradients(zip(grads, self.model.trainable_weights))
- validation_loss = self.getValidationLoss(validation_generator, 1)
- print("Validation loss: {}".format(validation_loss))
- if ctr - N == 0 and ctr > 0:
- if tf.is_tensor(image_batch):
- image_batch = image_batch.numpy()
- showPredictions(
- image_batch,
- annotation_batch,
- heat_pred[0].numpy(),
- Path("debug_images"),
- 127.5 / 255.0,
- 1.0,
- "Training",
- )
- manager.save()
- if ctr % 100 == 0:
- print("ctr: {}".format(ctr))
- with summary_writer.as_default():
- tf.summary.scalar(
- "training_loss",
- loss,
- step=epoch,
- )
- tf.summary.scalar("validation_loss", validation_loss, step=epoch)
- self.model.save_weights("weights/weights")
- tf.keras.backend.clear_session()
Add Comment
Please, Sign In to add comment