Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- from keras import applications, Input, Model
- from keras.applications import VGG16
- from keras.preprocessing.image import ImageDataGenerator
- from keras import optimizers
- from keras.models import Sequential
- from keras.layers import Dropout, Flatten, Dense
- # path to the model weights files.
- weights_path = '../keras/examples/vgg16_weights.h5'
- top_model_weights_path = 'fc_model.h5'
- # dimensions of our images.
- img_width, img_height = 150, 150
- train_data_dir = 'data/train'
- validation_data_dir = 'data/validation'
- nb_train_samples = 2000
- nb_validation_samples = 800
- epochs = 50
- batch_size = 16
- input_tensor = Input(shape=(150, 150, 3))
- base_model = VGG16(weights='imagenet', include_top=False, input_tensor=input_tensor)
- top_model = Sequential()
- top_model.add(Flatten(input_shape=base_model.output_shape[1:]))
- top_model.add(Dense(256, activation='relu'))
- top_model.add(Dropout(0.5))
- top_model.add(Dense(1, activation='sigmoid'))
- top_model.load_weights('bottleneck_fc_model.h5')
- model = Model(input=base_model.input, output=top_model(base_model.output))
- # set the first 25 layers (up to the last conv block)
- # to non-trainable (weights will not be updated)
- for layer in model.layers[:25]:
- layer.trainable = False
- # compile the model with a SGD/momentum optimizer
- # and a very slow learning rate.
- model.compile(loss='binary_crossentropy',
- optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
- metrics=['accuracy'])
- # prepare data augmentation configuration
- train_datagen = ImageDataGenerator(
- rescale=1. / 255,
- shear_range=0.2,
- zoom_range=0.2,
- horizontal_flip=True)
- test_datagen = ImageDataGenerator(rescale=1. / 255)
- train_generator = train_datagen.flow_from_directory(
- train_data_dir,
- target_size=(img_height, img_width),
- batch_size=batch_size,
- class_mode='binary')
- validation_generator = test_datagen.flow_from_directory(
- validation_data_dir,
- target_size=(img_height, img_width),
- batch_size=batch_size,
- class_mode='binary')
- model.fit_generator(
- train_generator,
- steps_per_epoch=2000 // batch_size,
- epochs=50,
- validation_data=validation_generator,
- validation_steps=800 // batch_size, verbose=2)
- model.save_weights('transfer_learning.h5')
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement