Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- # Build a convolutional neural network
- def conv_net(x, n_classes, dropout, reuse, is_training):
- # Define a scope for reusing the variables
- with tf.variable_scope('ConvNet', reuse=reuse):
- # MNIST data input is a 1-D vector of 784 features (28*28 pixels)
- # Reshape to match picture format [Height x Width x Channel]
- # Tensor input become 4-D: [Batch Size, Height, Width, Channel]
- x = tf.reshape(x, shape=[-1, 28, 28, 1])
- # Convolution Layer with 64 filters and a kernel size of 5
- x = tf.layers.conv2d(x, 64, 5, activation=tf.nn.relu)
- # Max Pooling (down-sampling) with strides of 2 and kernel size of 2
- x = tf.layers.max_pooling2d(x, 2, 2)
- # Convolution Layer with 256 filters and a kernel size of 5
- x = tf.layers.conv2d(x, 256, 3, activation=tf.nn.relu)
- # Convolution Layer with 512 filters and a kernel size of 5
- x = tf.layers.conv2d(x, 512, 3, activation=tf.nn.relu)
- # Max Pooling (down-sampling) with strides of 2 and kernel size of 2
- x = tf.layers.max_pooling2d(x, 2, 2)
- # Flatten the data to a 1-D vector for the fully connected layer
- x = tf.contrib.layers.flatten(x)
- # Fully connected layer (in contrib folder for now)
- x = tf.layers.dense(x, 2048)
- # Apply Dropout (if is_training is False, dropout is not applied)
- x = tf.layers.dropout(x, rate=dropout, training=is_training)
- # Fully connected layer (in contrib folder for now)
- x = tf.layers.dense(x, 1024)
- # Apply Dropout (if is_training is False, dropout is not applied)
- x = tf.layers.dropout(x, rate=dropout, training=is_training)
- # Output layer, class prediction
- out = tf.layers.dense(x, n_classes)
- # Because 'softmax_cross_entropy_with_logits' loss already apply
- # softmax, we only apply softmax to testing network
- out = tf.nn.softmax(out) if not is_training else out
- return out
Add Comment
Please, Sign In to add comment