Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- channels = 1
- scan_input = Input(shape=(256, 256, channels))
- # Convolutional Layers
- x = layers.Conv2D(32, (3, 3), padding='same', activation='relu')(scan_input)
- x = layers.Conv2D(32, (3, 3), padding='same', activation='relu')(x)
- x = layers.MaxPooling2D((2, 2))(x)
- x = layers.Dropout(0.2)(x)
- x = layers.Conv2D(64, (3, 3), padding='same', activation='relu')(x)
- x = layers.Conv2D(64, (3, 3), padding='same', activation='relu')(x)
- x = layers.MaxPooling2D((2, 2))(x)
- x = layers.Dropout(0.2)(x)
- x = layers.Conv2D(128, (3, 3), padding='same', activation='relu')(x)
- x = layers.Conv2D(128, (3, 3), padding='same', activation='relu')(x)
- x = layers.MaxPooling2D((2, 2))(x)
- x = layers.Dropout(0.2)(x)
- x = layers.Conv2D(128, (3, 3), padding='same', activation='relu')(x)
- x = layers.Conv2D(128, (3, 3), padding='same', activation='relu')(x)
- x = layers.MaxPooling2D((2, 2))(x)
- x = layers.Dropout(0.5)(x)
- x = layers.Flatten()(x)
- x = layers.Dense(512, activation='relu')(x)
- # Dense layers - branched out for each prediction
- # atomic prediction
- atomic_prediction = layers.Dense(1, activation='sigmoid', name='atomic')(x)
- # quality prediction
- quality_prediction = layers.Dense(3, activation='softmax', name='quality')(x)
- # complete model
- model = models.Model(scan_input, [atomic_prediction, quality_prediction])
- model.summary()
- batch_size = 32
- # Choose either scale or samplewise center/normalization, but not both
- # Make sure data isn't normalized earlier when using scaling
- train_datagen = image.ImageDataGenerator(
- samplewise_center=True,
- samplewise_std_normalization=True,
- #scale=1/features_std_mean,
- #rotation_range=180,
- width_shift_range=0.2,
- height_shift_range=0.2,
- horizontal_flip=True,
- vertical_flip=True,
- fill_mode='nearest')
- test_datagen = image.ImageDataGenerator(samplewise_center=True,
- samplewise_std_normalization=True)
- train_datagen.fit(X_train)
- def generate_data_generator(generator, X, A, Q, batch_size=64, seed=7):
- # append to single array
- y = np.append(A[:, np.newaxis], Q, axis=1)
- genX = generator.flow(X, y=y, batch_size=batch_size, seed=seed)
- while True:
- Xi, yi = genX.next()
- Ai = yi[:, 0]
- Qi = yi[:, 1:]
- yield Xi, {'atomic': Ai, 'quality': Qi}
- train_gen = generate_data_generator(train_datagen, X_train, A_train, Q_train, batch_size=batch_size, seed=7)
- val_gen = generate_data_generator(test_datagen, X_val, A_val, Q_val, batch_size=batch_size, seed=7)
- test_gen = generate_data_generator(test_datagen, X_test, A_test, Q_test, batch_size=batch_size, seed=7)
- callbacks_list = [
- callbacks.ReduceLROnPlateau(
- monitor='val_loss',
- factor=0.1,
- patience=10,
- ),
- callbacks.ModelCheckpoint(
- filepath='cnn6_model.h5',
- monitor='val_loss',
- save_best_only=True,
- )
- ]
- model.compile(optimizer=optimizers.Adam(lr=1e-4), loss={'atomic': 'binary_crossentropy', 'quality': 'categorical_crossentropy'},
- metrics={'atomic': 'binary_accuracy', 'quality': 'categorical_accuracy'},
- loss_weights={'atomic': 1., 'quality': 0.6})
- history = model.fit_generator(train_gen, validation_data=val_gen, validation_steps = len(X_val) // batch_size,
- steps_per_epoch=len(X_train) // batch_size, epochs=100,
- callbacks=callbacks_list)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement