Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import tensorflow as tf
- import numpy as np
- from functools import partial
- def get_kaminet(inputs, training, n_outputs, reuse):
- with tf.variable_scope("kaminet", reuse=reuse):
- scale = 0.001
- he_init = tf.contrib.layers.variance_scaling_initializer()
- my_conv = partial(tf.layers.conv2d, activation=tf.nn.relu, kernel_regularizer=tf.contrib.layers.l1_regularizer(scale), kernel_initializer=he_init)
- my_dense = partial(tf.layers.dense, activation=tf.nn.relu, kernel_regularizer=tf.contrib.layers.l1_regularizer(scale), kernel_initializer=he_init)
- conv1 = my_conv(inputs, filters=16, kernel_size=4, strides=(1, 1), padding="SAME", name="conv1")
- max_pool2 = tf.layers.max_pooling2d(conv1, pool_size=(3, 3), strides=(2, 2), padding="SAME", name="max_pool2") # 100x100
- norm3 = tf.nn.local_response_normalization(max_pool2, depth_radius=2, bias=1, alpha=0.00002, beta=0.75, name="norm3")
- conv4 = my_conv(norm3, filters=32, kernel_size=1, strides=(1, 1), padding="SAME", name="conv4")
- conv5 = my_conv(conv4, filters=32, kernel_size=3, strides=(2, 2), padding="SAME", name="conv5")
- norm6 = tf.nn.local_response_normalization(conv5, depth_radius=2, bias=1, alpha=0.00002, beta=0.75, name="norm6")
- max_pool7 = tf.layers.max_pooling2d(norm6, pool_size=(3, 3), strides=(2, 2), padding="SAME", name="max_pool7") # 25x25
- conv8 = my_conv(max_pool7, filters=64, kernel_size=1, strides=(1, 1), padding="SAME", name="conv8")
- conv9 = my_conv(conv8, filters=54, kernel_size=3, strides=(2, 2), padding="SAME", name="conv9")
- conv10 = my_conv(conv9, filters=128, kernel_size=3, strides=(2, 2), padding="SAME", name="conv10")
- mean_pool11 = tf.layers.average_pooling2d(conv10, pool_size=(3, 3), strides=(1, 1), padding="VALID", name="mean_pool11")
- shape = mean_pool11.shape
- mean_pool11_flat = tf.reshape(mean_pool11, shape=(-1, shape[1] * shape[2] * shape[3]), name="mean_pool11_flat")
- mean_pool11_flat_drop = tf.layers.dropout(mean_pool11_flat, rate=0.4, training=training, name="mean_pool11_flat_drop")
- dense12 = my_dense(mean_pool11_flat_drop, 1000, name="dense12")
- dense12_drop = tf.layers.dropout(dense12, rate=0.4, training=training, name="dense12_drop")
- dense13 = my_dense(dense12_drop, 200, name="dense13")
- dense14 = my_dense(dense13, 50, name="dense14")
- logits = tf.layers.dense(dense14, n_outputs, name="logits")
- return logits
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement