Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- def single_layer_model(input_shape):
- model = Sequential()
- # 2D convolution layer (e.g. spatial convolution over images).
- model.add(Conv2D(32, (3, 3), input_shape=input_shape))
- # Flattens the input. Does not affect the batch size.
- model.add(Flatten())
- # Regular densely-connected NN layer.
- model.add(Dense(10, activation='softmax'))
- model.compile(loss='categorical_crossentropy',
- optimizer='adam',
- metrics=['accuracy'])
- return model
- def evaluate():
- with Timer("Summary time: "):
- img_width, img_height = 50, 50
- train_data_dir = 'nn_labs/exp1/train'
- validation_data_dir = 'nn_labs/exp1/validation'
- epochs = 50
- batch_size = 32
- # nb_train_samples = 200
- nb_train_samples = 500
- nb_validation_samples = 500
- if K.image_data_format() == 'channels_first':
- input_shape = (3, img_width, img_height)
- else:
- input_shape = (img_width, img_height, 3)
- # this is the augmentation configuration we will use for training
- train_datagen = ImageDataGenerator(rescale=1./255)
- test_datagen = ImageDataGenerator(rescale=1./255)
- train_generator = train_datagen.flow_from_directory(
- train_data_dir,
- target_size=(img_width, img_height),
- batch_size=batch_size)
- validation_generator = test_datagen.flow_from_directory(
- validation_data_dir,
- target_size=(img_width, img_height),
- batch_size=batch_size)
- model = single_layer_model(input_shape)
- with Timer("Fit time: "):
- csv_logger = CSVLogger('exp7.log')
- model.fit_generator(
- train_generator,
- steps_per_epoch=nb_train_samples // batch_size,
- epochs=epochs,
- validation_data=validation_generator,
- validation_steps=nb_validation_samples // batch_size,
- callbacks=[csv_logger])
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement