Guest User

Untitled

a guest
Feb 24th, 2023
212
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 4.28 KB | None | 0 0
  1. import tensorflow as tf
  2. import os
  3. import cv2
  4. import imghdr
  5. import numpy as np
  6. from matplotlib import pyplot as plt
  7. from tensorflow import keras
  8. from keras.models import Sequential, load_model
  9. from keras.layers import Conv2D, MaxPooling2D, Dense, Flatten
  10. from keras.metrics import Precision, Recall, BinaryAccuracy
  11.  
  12. os.environ["CUDA_VISIBLE_DEVICES"] = "1"
  13.  
  14. # GPU overrun control
  15. gpus = tf.config.experimental.list_physical_devices('GPU')
  16. for gpu in gpus:
  17. tf.config.experimental.set_memory_growth(gpu, True)
  18.  
  19.  
  20.  
  21.  
  22. # Remove bad images that doesn't fit the model (Manual labour needed for removing wrongly decided images)
  23. data_dir = "Data"
  24.  
  25. image_exts = ['jpeg', 'jpg', 'bmp', 'png']
  26.  
  27. for image_class in os.listdir(data_dir):
  28. for image in os.listdir(os.path.join(data_dir, image_class)):
  29. image_path = os.path.join(data_dir, image_class, image)
  30. try:
  31. img = cv2.imread(image_path)
  32. tip = imghdr.what(image_path)
  33. if tip not in image_exts:
  34. print('Image not in ext list {}'.format(image_path))
  35. os.remove(image_path)
  36. except Exception as e:
  37. print('Issue with image {}'.format(image_path))
  38. # os.remove(image_path)
  39.  
  40. data = tf.keras.utils.image_dataset_from_directory('Data')
  41.  
  42. data_iterator = data.as_numpy_iterator()
  43. # Get Another batch from the iterator
  44. batch = data_iterator.next()
  45. # Images represented as numpy arrays
  46.  
  47. # Class 0 = Happy
  48. # Class 1 = Sad
  49.  
  50.  
  51. fig, ax = plt.subplots(ncols=4, figsize=(20, 20))
  52. for idx, img in enumerate(batch[0][:4]):
  53. ax[idx].imshow(img)
  54. ax[idx].title.set_text(batch[1][idx])
  55.  
  56. # Preprocessing data
  57.  
  58. # Scale data
  59. data = data.map(lambda x, y: (x / 255, y))
  60. data.as_numpy_iterator().next()[0].max()
  61.  
  62. # Split data - Training/Testing -
  63.  
  64. train_size = int(len(data) * .7)
  65. val_size = int(len(data) * -2)
  66. test_size = int(len(data) * .1) + 1
  67. # ^ Tweak nødvendigt for at passe til størrelse af datasæt
  68.  
  69. train = data.take(train_size)
  70. val = data.skip(train_size).take(val_size)
  71. test = data.skip(train_size + val_size).take(test_size)
  72.  
  73. # Deep model
  74.  
  75. # Deep Learning Model
  76. model = Sequential()
  77. model.add(Conv2D(64, (3, 3), 1, activation="relu", input_shape=(256, 256, 1)))
  78. model.add(MaxPooling2D())
  79.  
  80. model.add(Conv2D(128, (3, 3), 1, activation="relu"))
  81. model.add(MaxPooling2D())
  82.  
  83. model.add(Conv2D(64, (3, 3), 1, activation="relu"))
  84. model.add(MaxPooling2D())
  85.  
  86. model.add(Flatten())
  87.  
  88. model.add(Dense(256, activation="relu"))
  89. model.add(Dense(1, activation="sigmoid"))
  90.  
  91. model.compile('adam', loss=tf.losses.BinaryCrossentropy(), metrics=["accuracy"])
  92.  
  93. # Train
  94. logdir = "logs"
  95. tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir)
  96. hist = model.fit(train, epochs=20, validation_data=val, callbacks=[tensorboard_callback])
  97.  
  98. # Plot performance - Graph plot over training data
  99.  
  100. # loss/val_loss
  101. fig = plt.figure()
  102. plt.plot(hist.history['loss'], color='teal', label='loss')
  103. plt.plot(hist.history['val_loss'], color='orange', label='val_loss')
  104. fig.suptitle('Loss', fontsize=20)
  105. plt.legend(loc='upper left')
  106. plt.show()
  107.  
  108. # accuracy/val_accuracy
  109. fig = plt.figure()
  110. plt.plot(hist.history['accuracy'], color='teal', label='accuracy')
  111. plt.plot(hist.history['val_accuracy'], color='orange', label='val_accuracy')
  112. fig.suptitle('Loss', fontsize=20)
  113. plt.legend(loc='upper left')
  114. plt.show()
  115.  
  116. # Evaluate Performance
  117.  
  118. pre = Precision()
  119. re = Recall()
  120. acc = BinaryAccuracy()
  121.  
  122. for batch in test.as_numpy_iterator():
  123. x, y = batch
  124. yhat = model.predict(x)
  125. pre.update_state(y, yhat)
  126. re.update_state(y, yhat)
  127. acc.update_state(y, yhat)
  128.  
  129. # Save The Model
  130. model.save(os.path.join('/home/victor/PycharmProjects/tensorflow/Image Classification/models', 'convolution_model.h5'))
  131.  
  132.  
  133. # Test
  134. new_model = load_model(os.path.join('models', 'convolution_model.h5'))
  135.  
  136. img = cv2.imread('/home/victor/PycharmProjects/tensorflow/Image Classification/Test Data/Test_Good.jpg')
  137. plt.imshow(img)
  138. plt.show()
  139.  
  140. resize = tf.image.resize(img, (256, 256, 1))
  141. plt.imshow(resize.numpy().astype(int))
  142. plt.show()
  143. #Np.expand for at teste med et enkelt billede.
  144. np.expand_dims(resize, 0)
  145. yhat = model.predict(np.expand_dims(resize/255, 0))
  146.  
  147. if yhat > 0.5:
  148. print(f'Predicted class is good')
  149. else:
  150. print(f'predicted class is bad')
Add Comment
Please, Sign In to add comment