Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- model = models.Sequential()
- model.add(layers.Conv2D(32, (3, 3), activation='relu',
- input_shape=(1500, 1500, 3)))
- model.add(layers.MaxPooling2D((2, 2)))
- model.add(layers.Conv2D(64, (3, 3), activation='relu'))
- model.add(layers.MaxPooling2D((2, 2)))
- model.add(layers.Conv2D(128, (3, 3), activation='relu'))
- model.add(layers.MaxPooling2D((2, 2)))
- model.add(layers.Conv2D(128, (3, 3), activation='relu'))
- model.add(layers.MaxPooling2D((2, 2)))
- model.add(layers.Conv2D(256, (3, 3), activation='relu'))
- model.add(layers.MaxPooling2D((2, 2)))
- model.add(layers.Conv2D(256, (3, 3), activation='relu'))
- model.add(layers.MaxPooling2D((2, 2)))
- model.add(layers.Flatten())
- model.add(layers.Dense(512, activation='relu'))
- model.add(layers.Dense(1, activation='sigmoid'))
- # All images will be rescaled by 1./255
- train_datagen = ImageDataGenerator(rescale=1./255)
- test_datagen = ImageDataGenerator(rescale=1./255)
- train_generator = train_datagen.flow_from_directory(
- # This is the target directory
- train_dir,
- # All images will be resized to 1500x1500
- target_size=(1500, 1500),
- batch_size=2,
- # Since we use binary_crossentropy loss, we need binary labels
- class_mode='binary')
- validation_generator = test_datagen.flow_from_directory(
- validation_dir,
- target_size=(1500, 1500),
- batch_size=2,
- class_mode='binary')
- test_generator = test_datagen.flow_from_directory(
- test_dir,
- target_size=(1500, 1500),
- batch_size=2,
- class_mode='binary')
- probabilities = model.predict_generator(test_generator,5)
- array([[2.6628117e-28],
- [6.6314442e-06],
- [3.2372427e-20],
- [7.8302348e-04],
- [1.0000000e+00],
- [1.0000000e+00],
- [1.0000000e+00],
- [8.4590050e-14],
- [9.9938679e-01],
- [3.6370282e-25]], dtype=float32)
Add Comment
Please, Sign In to add comment