Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import numpy as np
- from matplotlib import pyplot as plt
- from keras.utils import np_utils
- from keras.models import Sequential
- from keras.layers.core import Dense, Dropout, Flatten
- from keras.callbacks import EarlyStopping, ModelCheckpoint
- from keras.layers import Conv2D, MaxPooling2D
- from keras.optimizers import Adam
- from keras.callbacks import EarlyStopping
- %matplotlib inline
- import matplotlib.pyplot as plt
- import tensorflow as tf
- import numpy as np
- from sklearn.metrics import confusion_matrix
- import time
- from datetime import timedelta
- import math
- #Load Data
- import tensorflow as tf
- import numpy as np
- import imageio
- import matplotlib.pyplot as plt
- try:
- from scipy import misc
- except ImportError:
- !pip install scipy
- from scipy import misc
- import tensorflow as tf
- import matplotlib.pyplot as plt
- from PIL import Image
- import numpy as np
- training_size = 720
- train_images = np.empty(shape=(training_size, 250,250,1))
- import glob
- i = 0
- for filename in glob.glob('C:/Mah/Database/HKPU/HKPU60CLS/Train/*.bmp'):
- image = np.array(Image.open(filename))
- image1 = image[:,:, np.newaxis]
- image2 = tf.Session().run(tf.image.resize_image_with_crop_or_pad(image1, 250, 250))
- train_images[i] = image2
- i+=1
- X_train = train_images[:,:,:,0]
- a= [0,0,0,0,0,0,0,0,0,0,0,0,
- 1,1,1,1,1,1,1,1,1,1,1,1,
- 2,2,2,2,2,2,2,2,2,2,2,2,
- 3,3,3,3,3,3,3,3,3,3,3,3,
- 4,4,4,4,4,4,4,4,4,4,4,4,
- 5,5,5,5,5,5,5,5,5,5,5,5,
- 6,6,6,6,6,6,6,6,6,6,6,6,
- 7,7,7,7,7,7,7,7,7,7,7,7,
- 8,8,8,8,8,8,8,8,8,8,8,8,
- 9,9,9,9,9,9,9,9,9,9,9,9,
- 10,10,10,10,10,10,10,10,10,10,10,10,
- 11,11,11,11,11,11,11,11,11,11,11,11,
- 12,12,12,12,12,12,12,12,12,12,12,12,
- 13,13,13,13,13,13,13,13,13,13,13,13,
- 14,14,14,14,14,14,14,14,14,14,14,14,
- 15,15,15,15,15,15,15,15,15,15,15,15,
- 16,16,16,16,16,16,16,16,16,16,16,16,
- 17,17,17,17,17,17,17,17,17,17,17,17,
- 18,18,18,18,18,18,18,18,18,18,18,18,
- 19,19,19,19,19,19,19,19,19,19,19,19,
- 20,20,20,20,20,20,20,20,20,20,20,20,
- 21,21,21,21,21,21,21,21,21,21,21,21,
- 22,22,22,22,22,22,22,22,22,22,22,22,
- 23,23,23,23,23,23,23,23,23,23,23,23,
- 24,24,24,24,24,24,24,24,24,24,24,24,
- 25,25,25,25,25,25,25,25,25,25,25,25,
- 26,26,26,26,26,26,26,26,26,26,26,26,
- 27,27,27,27,27,27,27,27,27,27,27,27,
- 28,28,28,28,28,28,28,28,28,28,28,28,
- 29,29,29,29,29,29,29,29,29,29,29,29,
- 30,30,30,30,30,30,30,30,30,30,30,30,
- 31,31,31,31,31,31,31,31,31,31,31,31,
- 32,32,32,32,32,32,32,32,32,32,32,32,
- 33,33,33,33,33,33,33,33,33,33,33,33,
- 34,34,34,34,34,34,34,34,34,34,34,34,
- 35,35,35,35,35,35,35,35,35,35,35,35,
- 36,36,36,36,36,36,36,36,36,36,36,36,
- 37,37,37,37,37,37,37,37,37,37,37,37,
- 38,38,38,38,38,38,38,38,38,38,38,38,
- 39,39,39,39,39,39,39,39,39,39,39,39,
- 40,40,40,40,40,40,40,40,40,40,40,40,
- 41,41,41,41,41,41,41,41,41,41,41,41,
- 42,42,42,42,42,42,42,42,42,42,42,42,
- 43,43,43,43,43,43,43,43,43,43,43,43,
- 44,44,44,44,44,44,44,44,44,44,44,44,
- 45,45,45,45,45,45,45,45,45,45,45,45,
- 46,46,46,46,46,46,46,46,46,46,46,46,
- 47,47,47,47,47,47,47,47,47,47,47,47,
- 48,48,48,48,48,48,48,48,48,48,48,48,
- 49,49,49,49,49,49,49,49,49,49,49,49,
- 50,50,50,50,50,50,50,50,50,50,50,50,
- 51,51,51,51,51,51,51,51,51,51,51,51,
- 52,52,52,52,52,52,52,52,52,52,52,52,
- 53,53,53,53,53,53,53,53,53,53,53,53,
- 54,54,54,54,54,54,54,54,54,54,54,54,
- 55,55,55,55,55,55,55,55,55,55,55,55,
- 56,56,56,56,56,56,56,56,56,56,56,56,
- 57,57,57,57,57,57,57,57,57,57,57,57,
- 58,58,58,58,58,58,58,58,58,58,58,58,
- 59,59,59,59,59,59,59,59,59,59,59,59]
- from sklearn.preprocessing import OneHotEncoder
- train_labels = OneHotEncoder(sparse=False).fit_transform(np.asarray(a).reshape(-1, 1))
- print(train_labels)
- y_train = np.argmax(train_labels, axis=1)
- print(X_train.shape)
- print(y_train.shape)
- #Load Data
- import tensorflow as tf
- import numpy as np
- import imageio
- import matplotlib.pyplot as plt
- try:
- from scipy import misc
- except ImportError:
- !pip install scipy
- from scipy import misc
- test_size = 480
- img_size = 384*284
- test_images = np.empty(shape=(test_size, 250,250,1))
- import glob
- i = 0
- for filename in glob.glob('C:/Mah/Database/HKPU/HKPU60CLS/Test/*.bmp'):
- image = np.array(Image.open(filename))
- image1 = image[:,:, np.newaxis]
- image2 = tf.Session().run(tf.image.resize_image_with_crop_or_pad(image1, 250, 250))
- test_images[i] = image2
- i+=1
- X_test = test_images[:,:,:,0]
- c= [0,0,0,0,0,0,0,0,
- 1,1,1,1,1,1,1,1,
- 2,2,2,2,2,2,2,2,
- 3,3,3,3,3,3,3,3,
- 4,4,4,4,4,4,4,4,
- 5,5,5,5,5,5,5,5,
- 6,6,6,6,6,6,6,6,
- 7,7,7,7,7,7,7,7,
- 8,8,8,8,8,8,8,8,
- 9,9,9,9,9,9,9,9,
- 10,10,10,10,10,10,10,10,
- 11,11,11,11,11,11,11,11,
- 12,12,12,12,12,12,12,12,
- 13,13,13,13,13,13,13,13,
- 14,14,14,14,14,14,14,14,
- 15,15,15,15,15,15,15,15,
- 16,16,16,16,16,16,16,16,
- 17,17,17,17,17,17,17,17,
- 18,18,18,18,18,18,18,18,
- 19,19,19,19,19,19,19,19,
- 20,20,20,20,20,20,20,20,
- 21,21,21,21,21,21,21,21,
- 22,22,22,22,22,22,22,22,
- 23,23,23,23,23,23,23,23,
- 24,24,24,24,24,24,24,24,
- 25,25,25,25,25,25,25,25,
- 26,26,26,26,26,26,26,26,
- 27,27,27,27,27,27,27,27,
- 28,28,28,28,28,28,28,28,
- 29,29,29,29,29,29,29,29,
- 30,30,30,30,30,30,30,30,
- 31,31,31,31,31,31,31,31,
- 32,32,32,32,32,32,32,32,
- 33,33,33,33,33,33,33,33,
- 34,34,34,34,34,34,34,34,
- 35,35,35,35,35,35,35,35,
- 36,36,36,36,36,36,36,36,
- 37,37,37,37,37,37,37,37,
- 38,38,38,38,38,38,38,38,
- 39,39,39,39,39,39,39,39,
- 40,40,40,40,40,40,40,40,
- 41,41,41,41,41,41,41,41,
- 42,42,42,42,42,42,42,42,
- 43,43,43,43,43,43,43,43,
- 44,44,44,44,44,44,44,44,
- 45,45,45,45,45,45,45,45,
- 46,46,46,46,46,46,46,46,
- 47,47,47,47,47,47,47,47,
- 48,48,48,48,48,48,48,48,
- 49,49,49,49,49,49,49,49,
- 50,50,50,50,50,50,50,50,
- 51,51,51,51,51,51,51,51,
- 52,52,52,52,52,52,52,52,
- 53,53,53,53,53,53,53,53,
- 54,54,54,54,54,54,54,54,
- 55,55,55,55,55,55,55,55,
- 56,56,56,56,56,56,56,56,
- 57,57,57,57,57,57,57,57,
- 58,58,58,58,58,58,58,58,
- 59,59,59,59,59,59,59,59]
- from sklearn.preprocessing import OneHotEncoder
- test_labels = OneHotEncoder(sparse=False).fit_transform(np.asarray(c).reshape(-1, 1))
- print(test_labels)
- y_test = np.argmax(test_labels, axis=1)
- print(y_train.shape)
- print(X_test.shape)
- print(y_test.shape)
- img_rows, img_cols = X_train[0].shape[0], X_train[0].shape[1]
- X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
- X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
- input_shape = (img_rows, img_cols, 1)
- X_train = X_train.astype('float32')/255.
- X_test = X_test.astype('float32')/255.
- n_classes = len(set(y_train))
- y_train = np_utils.to_categorical(y_train, n_classes)
- y_test = np_utils.to_categorical(y_test, n_classes)
- model = Sequential()
- model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
- model.add(MaxPooling2D(pool_size=(2, 2)))
- model.add(Conv2D(128, kernel_size=(3, 3), activation='relu', padding='same'))
- model.add(MaxPooling2D(pool_size=(2, 2)))
- model.add(Conv2D(256, kernel_size=(3, 3), activation='relu', padding='same'))
- model.add(MaxPooling2D(pool_size=(2, 2)))
- model.add(Dropout(0.5))
- model.add(Flatten())
- model.add(Dense(128, activation='relu'))
- model.add(Dropout(0.5))
- model.add(Dense(n_classes, activation='softmax'))
- model.compile(loss=['categorical_crossentropy'], optimizer='adam', metrics=['accuracy'])
- model.summary()
- callbacks = [EarlyStopping(monitor='val_acc', patience=5)]
- batch_size = 50
- n_epochs = 1000
- model.fit(X_train, y_train, batch_size=batch_size, epochs=n_epochs, verbose=1, validation_split=0.2, callbacks=callbacks)
- score = model.evaluate(X_test, y_test, verbose=0)
- print('Test loss:', score[0])
- print('Test accuracy:', score[1])
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement