Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import tensorflow as tf
- import numpy as np
- import matplotlib.pyplot as plt
- from tensorflow.keras.layers import Dense, Flatten, Conv2D, MaxPooling2D
- from tensorflow.keras import Model
- class myCallback(tf.keras.callbacks.Callback):
- def on_epoch_end(self, epoch, logs={}):
- if logs.get('accuracy') > 0.90:
- print("\n90% accuracy reached and stopping training for now")
- self.model.stop_training = True
- class_names = ['negative', 'benign_calcification', 'benign_mass', 'malignant_calcification', 'malignant_mass']
- train_path_files = ['training10_0/training10_0.tfrecords',
- 'training10_1/training10_1.tfrecords',
- 'training10_2/training10_2.tfrecords']
- val_path_file = ['training10_3/training10_3.tfrecords']
- test_path_file = ['training10_4/training10_4.tfrecords']
- extracted_train_data = tf.data.TFRecordDataset(train_path_files)
- extracted_val_data = tf.data.TFRecordDataset(val_path_file)
- extracted_test_data = tf.data.TFRecordDataset(test_path_file)
- feature_description = {
- 'label': tf.io.FixedLenFeature([], tf.int64, default_value=0),
- 'label_normal': tf.io.FixedLenFeature([], tf.int64, default_value=0),
- 'image': tf.io.FixedLenFeature([], tf.string, default_value='')
- }
- def decode(serialized_example):
- feature = tf.io.parse_single_example(serialized_example, feature_description)
- # 2. Convert the data
- image = tf.io.decode_raw(feature['image'], tf.uint8)
- label = feature['label']
- # 3. reshape
- image = tf.reshape(image, [-1, 299, 299, 1])
- image = tf.cast(image, tf.float32)
- return image, label
- def _parse_function(example_proto):
- return tf.io.parse_single_example(example_proto, feature_description)
- # 44707 images total
- parsed_training_data = extracted_train_data.map(decode)
- parsed_val_data = extracted_val_data.map(decode)
- parsed_testing_data = extracted_test_data.map(decode)
- #batch_size = 32
- #parsed_training_data = parsed_training_data.batch(batch_size).repeat()
- #parsed_val_data = parsed_val_data.batch(batch_size).repeat()
- #parsed_testing_data = parsed_testing_data.batch(batch_size).repeat()
- callback = myCallback()
- model = tf.keras.models.Sequential([
- tf.keras.layers.Conv2D(128, (3, 3), activation='relu', input_shape=(299, 299, 1)),
- tf.keras.layers.MaxPool2D(2, 2),
- tf.keras.layers.Flatten(),
- tf.keras.layers.Dense(128, activation='relu'),
- tf.keras.layers.Dropout(0.2),
- tf.keras.layers.Dense(5, activation='softmax')
- ])
- model.compile(optimizer='adam',
- loss='sparse_categorical_crossentropy',
- metrics=['accuracy'])
- # verbose is the progress bar when training
- history = model.fit(
- parsed_training_data,
- steps_per_epoch=10,
- shuffle=True,
- validation_data=parsed_val_data,
- validation_steps=2,
- epochs=5,
- verbose=2,
- callbacks=[callback]
- )
- print('\nhistory dict:', history.history)
- print('\n# Evaluate on test data')
- results = model.evaluate(parsed_testing_data, steps=1)
- print('test loss, test acc:', results)
- for image, label in parsed_testing_data.take(5):
- predictions = model.predict(image.numpy())
- image = tf.reshape(image, [299,299])
- plt.imshow(image.numpy(), cmap=plt.cm.binary)
- plt.xlabel('True Value: %s,\n Predicted Values [%0.2f, %0.2f, %0.2f, %0.2f, %0.2f]' % (class_names[label.numpy()],
- predictions[0, 0],
- predictions[0, 1],
- predictions[0, 2],
- predictions[0, 3],
- predictions[0, 4]
- ))
- plt.show()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement