Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- #from __future__ import print_function
- import numpy as np
- from keras.callbacks import EarlyStopping
- from keras.datasets import cifar10
- from keras.models import Sequential, model_from_json, Model
- from keras.layers.core import Dense, Dropout, Flatten
- from keras.layers.convolutional import Conv2D
- from keras.optimizers import Adam
- from keras.layers.pooling import MaxPooling2D
- from keras.utils import to_categorical
- (X_train, Y_train), (X_test, Y_test) = cifar10.load_data()
- X_train = X_train.astype('float32')
- X_test = X_test.astype('float32')
- X_train /= 255
- X_test /= 255
- Y_train = to_categorical(Y_train)
- Y_test = to_categorical(Y_test)
- # randomly permute label data
- order = np.random.permutation(10)
- Y_train = Y_train[:,order]
- Y_test = Y_test[:,order]
- # Загружаем данные об архитектуре сети
- json_file = open("CNN_cifar10.json", "r")
- loaded_model_json = json_file.read()
- json_file.close()
- # Создаем модель
- loaded_model = model_from_json(loaded_model_json)
- # Загружаем сохраненные веса в модель
- loaded_model.load_weights("CNN_cifar10.h5")
- # Freezing all layers
- for layer in loaded_model.layers:
- layer.trainable = False
- # Leaving only convolutional part of the network
- model_bottom = Model
- (input=loaded_model.input,output=loaded_model.layers[-4].output)
- # select part of the data for training
- ind_train = np.in1d(np.sum(Y_train[:,0:5],axis=1),1) # select indices
- for the required digits
- ind_test = np.in1d(np.sum(Y_test[:,0:5],axis=1),1)
- X04_train = X_train[ind_train,:,:,:]
- X04_test = X_test[ind_test,:,:,:]
- y04_train = Y_train[ind_train,0:5]
- y04_test = Y_test[ind_test,0:5]
- # add new layers to the pretrained bottom
- model04 = Sequential()
- model04.add(model_bottom)
- model04.add(Dense(1024, activation='relu'))
- model04.add(Dropout(0.5))
- model04.add(Dense(5, activation='softmax'))
- model04.compile(loss='categorical_crossentropy',
- optimizer=Adam(lr=0.0001, decay=1e-6),metrics=['accuracy'])
- model04.fit(X04_train, y04_train,batch_size=128,shuffle=True,
- epochs=250,validation_split=0.1,
- callbacks=[EarlyStopping(min_delta=0.001, patience=3)])
- scores = model04.evaluate(X04_test, y04_test, verbose=0)
- print("Точность работы на тестовых данных: %.2f%%" % (scores[1]*100))
- # select part of the data for training
- ind_train = np.in1d(np.sum(Y_train[:,5:10],axis=1),1) # select indices
- for the required digits
- ind_test = np.in1d(np.sum(Y_test[:,5:10],axis=1),1)
- X59_train = X_train[ind_train,:,:,:]
- X59_test = X_test[ind_test,:,:,:]
- y59_train = Y_train[ind_train,5:10]
- y59_test = Y_test[ind_test,5:10]
- # add new layers to the pretrained bottom
- model59 = Sequential()
- model59.add(model_bottom)
- model59.add(Dense(1024, activation='relu'))
- model59.add(Dropout(0.5))
- model59.add(Dense(5, activation='softmax'))
- model59.compile(loss='categorical_crossentropy',
- optimizer=Adam(lr=0.0001, decay=1e-6),metrics=['accuracy'])
- model59.fit(X59_train,
- y59_train,batch_size=128,shuffle=True,epochs=250,
- validation_split=0.1,callbacks=[EarlyStopping(min_delta=0.001,
- patience=3)])
- scores = model59.evaluate(X59_test, y59_test, verbose=0)
- print("Точность работы на тестовых данных: %.2f%%" % (scores[1]*100))
- # construct fused network
- modelF = Sequential()
- modelF.add(model_bottom)
- modelF.add(Dense(1024,activation="relu"))
- modelF.add(Dense(10,activation="softmax"))
- # sum weights of the first level (is there better way to implement it
- in keras?)
- weights = []
- weights.append(model59.layers[-3].get_weights()
- [0]+model04.layers[-3].get_weights()[0]) # sum kernel weights
- weights.append(model59.layers[-3].get_weights()
- [1]+model04.layers[-3].get_weights()[1]) # sum biases
- modelF.layers[-2].set_weights(weights)
- # concatebate weights of the output level
- weights = []
- weights.append(np.hstack((model04.layers[-1].get_weights()
- [0],model59.layers[-1].get_weights()[0]))) # concatenate weights
- weights.append(np.hstack((model04.layers[-1].get_weights()
- [1],model59.layers[-1].get_weights()[1]))) # concatenate biases
- modelF.layers[-1].set_weights(weights)
- modelF.compile(loss='categorical_crossentropy',optimizer=Adam(lr=0.0001,
- decay=1e-6),metrics=['accuracy'])
- #test the performance of the fused network
- score = modelF.evaluate(X_test,Y_test,verbose=1) # test model accuracy
- print('Test accuracy for the fused network:', score[1])
Add Comment
Please, Sign In to add comment