Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- from keras import models
- from keras import layers
- # Model
- from keras.layers import LeakyReLU
- network = models.Sequential()
- network.add(layers.Conv2D(32, (3,3), activation=None, input_shape=(img_rows, img_cols, 1), padding='valid', use_bias=False,))
- network.add(LeakyReLU(alpha=0.1))
- network.add(layers.MaxPooling2D(pool_size=(2, 2)))
- network.add(layers.Conv2D(32, (3,3), activation=None,padding='valid', use_bias=True))
- network.add(LeakyReLU(alpha=0.1))
- network.add(layers.MaxPooling2D(pool_size=(2,2)))
- network.add(layers.Flatten())
- network.add(layers.Dense(64, activation='relu'))
- network.add(layers.Dense(3, activation='softmax'))
- network.compile(optimizer='adam',
- loss='categorical_crossentropy',
- metrics=['accuracy'])
- layer {
- name: "data"
- type: "Data"
- top: "data"
- top: "label"
- include {
- phase: TRAIN
- }
- data_param {
- source: "input/lmdb/train_lmdb"
- batch_size: 84
- backend: LMDB
- }
- }
- layer {
- name: "data"
- type: "Data"
- top: "data"
- top: "label"
- include {
- phase: TEST
- }
- data_param {
- source: "input/lmdb/valid_lmdb"
- batch_size: 84
- backend: LMDB
- }
- }
- layer {
- name: "conv1"
- type: "Convolution"
- bottom: "data"
- top: "conv1"
- convolution_param {
- num_output: 32
- pad: 0
- kernel_size: 3
- stride: 1
- }
- }
- layer {
- name: "lr1"
- type: "ReLU"
- bottom: "conv1"
- top: "lr1"
- relu_param {
- negative_slope: 0.10000000149
- }
- }
- layer {
- name: "mp1"
- type: "Pooling"
- bottom: "lr1"
- top: "mp1"
- pooling_param {
- pool: MAX
- kernel_size: 2
- stride: 1
- pad: 0
- }
- }
- layer {
- name: "conv2"
- type: "Convolution"
- bottom: "mp1"
- top: "conv2"
- convolution_param {
- num_output: 32
- pad: 0
- kernel_size: 3
- stride: 1
- }
- }
- layer {
- name: "lr2"
- type: "ReLU"
- bottom: "conv2"
- top: "lr2"
- relu_param {
- negative_slope: 0.10000000149
- }
- }
- layer {
- name: "mp2"
- type: "Pooling"
- bottom: "lr2"
- top: "mp2"
- pooling_param {
- pool: MAX
- kernel_size: 2
- stride: 1
- pad: 0
- }
- }
- layer {
- name: "fl1"
- type: "Flatten"
- bottom: "mp2"
- top: "fl1"
- }
- layer {
- name: "fc1"
- type: "InnerProduct"
- bottom: "fl1"
- top: "fc1"
- inner_product_param {
- num_output: 64
- weight_filler {
- type: "gaussian"
- #type: "xavier"
- std: 0.001
- }
- bias_filler {
- type: "constant"
- value: 1
- }
- }
- }
- layer {
- name: "relu1"
- type: "ReLU"
- bottom: "fc1"
- top: "relu1"
- }
- layer {
- name: "fc2"
- type: "InnerProduct"
- bottom: "relu1"
- top: "fc2"
- inner_product_param {
- num_output: 3
- weight_filler {
- type: "gaussian"
- #type: "xavier"
- std: 0.001
- }
- bias_filler {
- type: "constant"
- value: 1
- }
- }
- }
- layer {
- name: "softmax"
- type: "Softmax"
- bottom: "fc2"
- top: "softmax"
- }
- layer {
- name: "accuracy"
- type: "Accuracy"
- bottom: "softmax"
- bottom: "label"
- top: "accuracy"
- include {
- phase: TEST
- }
- }
- layer {
- name: "loss"
- type: "MultinomialLogisticLoss"
- bottom: "softmax"
- bottom: "label"
- top: "loss"
- }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement