Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import tensorflow as tf
- import os
- import cv2
- import imghdr
- import numpy as np
- from matplotlib import pyplot as plt
- from tensorflow import keras
- from keras.models import Sequential, load_model
- from keras.layers import Conv2D, MaxPooling2D, Dense, Flatten
- from keras.metrics import Precision, Recall, BinaryAccuracy
- os.environ["CUDA_VISIBLE_DEVICES"] = "1"
- # GPU overrun control
- gpus = tf.config.experimental.list_physical_devices('GPU')
- for gpu in gpus:
- tf.config.experimental.set_memory_growth(gpu, True)
- # Remove bad images that doesn't fit the model (Manual labour needed for removing wrongly decided images)
- data_dir = "Data"
- image_exts = ['jpeg', 'jpg', 'bmp', 'png']
- for image_class in os.listdir(data_dir):
- for image in os.listdir(os.path.join(data_dir, image_class)):
- image_path = os.path.join(data_dir, image_class, image)
- try:
- img = cv2.imread(image_path)
- tip = imghdr.what(image_path)
- if tip not in image_exts:
- print('Image not in ext list {}'.format(image_path))
- os.remove(image_path)
- except Exception as e:
- print('Issue with image {}'.format(image_path))
- # os.remove(image_path)
- data = tf.keras.utils.image_dataset_from_directory('Data')
- data_iterator = data.as_numpy_iterator()
- # Get Another batch from the iterator
- batch = data_iterator.next()
- # Images represented as numpy arrays
- # Class 0 = Happy
- # Class 1 = Sad
- fig, ax = plt.subplots(ncols=4, figsize=(20, 20))
- for idx, img in enumerate(batch[0][:4]):
- ax[idx].imshow(img)
- ax[idx].title.set_text(batch[1][idx])
- # Preprocessing data
- # Scale data
- data = data.map(lambda x, y: (x / 255, y))
- data.as_numpy_iterator().next()[0].max()
- # Split data - Training/Testing -
- train_size = int(len(data) * .7)
- val_size = int(len(data) * -2)
- test_size = int(len(data) * .1) + 1
- # ^ Tweak nødvendigt for at passe til størrelse af datasæt
- train = data.take(train_size)
- val = data.skip(train_size).take(val_size)
- test = data.skip(train_size + val_size).take(test_size)
- # Deep model
- # Deep Learning Model
- model = Sequential()
- model.add(Conv2D(64, (3, 3), 1, activation="relu", input_shape=(256, 256, 1)))
- model.add(MaxPooling2D())
- model.add(Conv2D(128, (3, 3), 1, activation="relu"))
- model.add(MaxPooling2D())
- model.add(Conv2D(64, (3, 3), 1, activation="relu"))
- model.add(MaxPooling2D())
- model.add(Flatten())
- model.add(Dense(256, activation="relu"))
- model.add(Dense(1, activation="sigmoid"))
- model.compile('adam', loss=tf.losses.BinaryCrossentropy(), metrics=["accuracy"])
- # Train
- logdir = "logs"
- tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir)
- hist = model.fit(train, epochs=20, validation_data=val, callbacks=[tensorboard_callback])
- # Plot performance - Graph plot over training data
- # loss/val_loss
- fig = plt.figure()
- plt.plot(hist.history['loss'], color='teal', label='loss')
- plt.plot(hist.history['val_loss'], color='orange', label='val_loss')
- fig.suptitle('Loss', fontsize=20)
- plt.legend(loc='upper left')
- plt.show()
- # accuracy/val_accuracy
- fig = plt.figure()
- plt.plot(hist.history['accuracy'], color='teal', label='accuracy')
- plt.plot(hist.history['val_accuracy'], color='orange', label='val_accuracy')
- fig.suptitle('Loss', fontsize=20)
- plt.legend(loc='upper left')
- plt.show()
- # Evaluate Performance
- pre = Precision()
- re = Recall()
- acc = BinaryAccuracy()
- for batch in test.as_numpy_iterator():
- x, y = batch
- yhat = model.predict(x)
- pre.update_state(y, yhat)
- re.update_state(y, yhat)
- acc.update_state(y, yhat)
- # Save The Model
- model.save(os.path.join('/home/victor/PycharmProjects/tensorflow/Image Classification/models', 'convolution_model.h5'))
- # Test
- new_model = load_model(os.path.join('models', 'convolution_model.h5'))
- img = cv2.imread('/home/victor/PycharmProjects/tensorflow/Image Classification/Test Data/Test_Good.jpg')
- plt.imshow(img)
- plt.show()
- resize = tf.image.resize(img, (256, 256, 1))
- plt.imshow(resize.numpy().astype(int))
- plt.show()
- #Np.expand for at teste med et enkelt billede.
- np.expand_dims(resize, 0)
- yhat = model.predict(np.expand_dims(resize/255, 0))
- if yhat > 0.5:
- print(f'Predicted class is good')
- else:
- print(f'predicted class is bad')
Add Comment
Please, Sign In to add comment