Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- # input image dimensions
- img_rows, img_cols = 187, 100
- # preparing and import the dataset
- path1 = '/home/fatmasaid/new_flicker' #path of folder of images
- path2 = '/home/fatmasaid/new_flicker_data_resized' #path of folder to save images
- listing = os.listdir(path1)
- listing.sort(key=lambda f: int(filter(str.isdigit, f)))
- num_samples=size(listing)
- print (num_samples)
- #exit()
- for file in listing:
- im = Image.open(path1 + '//' + file)
- img = im.resize((img_rows,img_cols))
- gray = img.convert('L')
- gray.save(path2 +'//' + file, "JPEG")
- imlist = os.listdir(path2)
- #sorting the list in ascending order by image name
- imlist.sort(key=lambda f: int(filter(str.isdigit, f)))
- im1 = array(Image.open('new_flicker_data_resized' + '//'+ imlist[0])) # open one image to get size et size
- m,n = im1.shape[0:2] # get the size of the images
- imnbr = len(imlist) # get the number of images
- # create matrix to store all flattened images
- immatrix = array([array(Image.open('new_flicker_data_resized'+ '//' + im2)).flatten()
- for im2 in imlist],'f')
- #divide the dataset to 2 classes
- label=np.ones((num_samples,),dtype = int)
- label[0:64878]=0 #images with low No of likes labeled 0
- label[64878:]=1 #images with high No of likes labeled 1
- data,Label = shuffle(immatrix,label, random_state=2)
- train_data = [data,Label]
- print (train_data[0].shape) # the train data
- print (train_data[1].shape) # the labels of these data
- #batch_size to train
- batch_size = 16
- # number of output classes
- nb_classes = 2
- # number of epochs to train
- nb_epoch =150 #each epochs contains around 70000/128=468 batches with 128 images
- img_rows, img_cols = 187, 100
- (X, Y) = (train_data[0],train_data[1])
- # STEP 1: split X and y into training and testing sets
- X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=4)
- X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
- X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
- X_train = X_train.astype('float32')
- X_test = X_test.astype('float32')
- #Normalization
- X_train /= 255
- X_test /= 255
- # compute the total time of prearing and importing the dataset
- t_generateArray = time.time()
- print('Generating Array Time:{}'.format(t_generateArray - t_start))
- print('X_train shape:', X_train.shape)
- print(X_train.shape[0], 'train samples')
- print(X_test.shape[0], 'test samples')
- # convert class vectors to binary class matrices
- Y_train = np_utils.to_categorical(Y_train, nb_classes)
- Y_test = np_utils.to_categorical(Y_test, nb_classes)
- # Step 1:Network structure
- model = Sequential()
- model.add(ZeroPadding2D((1, 1), input_shape=(1, img_rows, img_cols)))
- model.add(Convolution2D(64, 3, 3, activation='relu',init='glorot_uniform'))
- model.add(ZeroPadding2D((1, 1)))
- model.add(Convolution2D(64, 3, 3, activation='relu',init='glorot_uniform'))
- model.add(MaxPooling2D((2, 2), strides=(2, 2)))
- model.add(ZeroPadding2D((1, 1)))
- model.add(Convolution2D(128, 3, 3, activation='relu',init='glorot_uniform'))
- model.add(ZeroPadding2D((1, 1)))
- model.add(Convolution2D(128, 3, 3, activation='relu',init='glorot_uniform'))
- model.add(MaxPooling2D((2, 2), strides=(2, 2)))
- model.add(ZeroPadding2D((1, 1)))
- model.add(Convolution2D(256, 3, 3, activation='relu',init='glorot_uniform'))
- model.add(ZeroPadding2D((1, 1)))
- model.add(Convolution2D(256, 3, 3, activation='relu',init='glorot_uniform'))
- model.add(ZeroPadding2D((1, 1)))
- model.add(Convolution2D(256, 3, 3, activation='relu',init='glorot_uniform'))
- model.add(ZeroPadding2D((1, 1)))
- model.add(Convolution2D(256, 3, 3, activation='relu',init='glorot_uniform'))
- model.add(MaxPooling2D((2, 2), strides=(2, 2)))
- model.add(ZeroPadding2D((1, 1)))
- model.add(Convolution2D(512, 3, 3, activation='relu',init='glorot_uniform'))
- model.add(ZeroPadding2D((1, 1)))
- model.add(Convolution2D(512, 3, 3, activation='relu',init='glorot_uniform'))
- model.add(ZeroPadding2D((1, 1)))
- model.add(Convolution2D(512, 3, 3, activation='relu',init='glorot_uniform'))
- model.add(ZeroPadding2D((1, 1)))
- model.add(Convolution2D(512, 3, 3, activation='relu',init='glorot_uniform'))
- model.add(MaxPooling2D((2, 2), strides=(2, 2)))
- model.add(ZeroPadding2D((1, 1)))
- model.add(Convolution2D(512, 3, 3, activation='relu',init='glorot_uniform'))
- model.add(ZeroPadding2D((1, 1)))
- model.add(Convolution2D(512, 3, 3, activation='relu',init='glorot_uniform'))
- model.add(ZeroPadding2D((1, 1)))
- model.add(Convolution2D(512, 3, 3, activation='relu',init='glorot_uniform'))
- model.add(ZeroPadding2D((1, 1)))
- model.add(Convolution2D(512, 3, 3, activation='relu',init='glorot_uniform'))
- model.add(MaxPooling2D((2, 2), strides=(2, 2)))
- model.add(Flatten())
- model.add(Dense(1024, activation='relu',init='glorot_uniform'))
- model.add(Dropout(0.5))
- model.add(Dense(1024, activation='relu',init='glorot_uniform'))
- model.add(Dropout(0.5))
- model.add(Dense(nb_classes, activation='softmax')) # neuron number
- # load weights
- #model.load_weights("Train_Test_weights.h5")
- #step 2: Learning target(computiong the loss using the entropy function
- sgd = SGD(lr=0.0001, decay=1e-6, momentum=0.9, nesterov=True)
- # adagrad = Adagrad(lr=0.01, epsilon=1e-08)
- model.compile(loss='categorical_crossentropy',optimizer= sgd,metrics=['accuracy'])
- checkpointer=ModelCheckpoint(filepath='new_flicker_Train_Test_weights.h5',monitor='val_acc', verbose=1, save_best_only=True, mode='max')
- #training the model
- print ('Training Start....')
- hist = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
- verbose=1, validation_data=(X_test, Y_test),callbacks=[checkpointer])
- #model.save_weights('Train_Test_weights.h5')
- #evaluate the model
- score = model.evaluate(X_test, Y_test,verbose=0)
- print('Test score:', score[0])
- print('Test accuracy:', score[1])
- print(model.predict_classes(X_test[2:10]))
- print(Y_test[2:10])
- print ('drawing the training process...')
- TrainERR = hist.history['loss']
- ValidERR = hist.history['val_loss']
- plt.figure(0)
- plt.plot(TrainERR, 'b', label='TrainERR')
- plt.plot(ValidERR, 'r', label='ValidERR')
- plt.legend(loc='best')
- plt.xlabel('epoch')
- plt.ylabel('loss')
- plt.grid(True)
- #plt.show()
- plt.savefig('new_flicker_Train_Test_Loss&Epoch.png', dpi=150)
- Trainer = hist.history['acc']
- Valider = hist.history['val_acc']
- plt.figure(1)
- plt.plot(Trainer, 'b', label='Trainer')
- plt.plot(Valider, 'r', label='Valider')
- plt.legend(loc='best')
- plt.xlabel('epoch')
- plt.ylabel('Accuracy')
- plt.grid(True)
- #plt.show()
- plt.savefig('new_flicker_Train_Test_Accuracy&Epoch.png', dpi=150)
- t_end = time.time()
- print('Time:{}'.format(t_end - t_start))
- print('Generating Array Time:{}'.format(t_generateArray - t_start))
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement