Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import tensorflow as tf
- def conv2d(x,output_filters, kh=5,kw=5, sh=2, sw=2, stddev=0.02, scope="conv2d"):
- with tf.compat.v1.variable_scope(scope):
- shape = x.get_shape().as_list()
- W = tf.compat.v1.get_variable('W', [kh,kw,shape[-1],output_filters], initializer=tf.compat.v1.truncated_normal_initializer(stddev=stddev))
- Wconv = tf.nn.conv2d(x,W,strides=[1,sh,sw,1], padding='SAME')
- biases = tf.compat.v1.get_variable('b',[output_filters], initializer=tf.compat.v1.constant_initializer(0.0))
- Wconv_plus_b = tf.reshape(tf.nn.bias_add(Wconv, biases), Wconv.get_shape())
- return Wconv_plus_b
- def dropout(x,rate):
- return tf.nn.dropout(x=x, rate=rate)
- def fc(x,output_size, stddev=0.02, scope='fc'):
- with tf.compat.v1.variable_scope(scope):
- shape = x.get_shape().as_list()
- W = tf.compat.v1.get_variable('W', [shape[1],output_size], tf.float32, tf.random_normal_initializer(stddev=stddev))
- b = tf.compat.v1.get_variable('b', [output_size], initializer=tf.constant_initializer(0.0))
- return tf.matmul(x,W) + b
- def flatten(x):
- shape = x.get_shape().as_list()
- return tf.reshape(x, [shape[0], shape[1]*shape[2]*shape[3]])
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement