Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import tensorflow as tf
- import numpy as np
- USING_SLICE = True
- def leaky_relu(x, alpha=0.2):
- return tf.maximum(x, alpha * x)
- def conv2d_leaky(input, kernel_shape, bias_shape, strides=1, relu=True, padding='SAME'):
- # Conv2D
- weights = tf.get_variable("weights", kernel_shape, initializer=tf.contrib.layers.xavier_initializer())
- biases = tf.get_variable("biases", bias_shape, initializer=tf.truncated_normal_initializer())
- output = tf.nn.conv2d(input, weights, strides=[1, strides, strides, 1], padding=padding)
- output = tf.nn.bias_add(output, biases)
- # ReLU (if required)
- if relu == False:
- print ('WARNING: reLU disabled')
- else:
- output = leaky_relu(output)
- return output
- def decoder(volume):
- with tf.variable_scope("conv1") as scope:
- conv1 = conv2d_leaky(volume, [3, 3, volume.get_shape().as_list()[3], 128], [128], 1, True)
- with tf.variable_scope("conv2") as scope:
- conv2 = conv2d_leaky(conv1, [3, 3, conv1.get_shape().as_list()[3], 128], [128], 1, True)
- with tf.variable_scope("conv3") as scope:
- conv3 = conv2d_leaky(conv2, [3, 3, conv2.get_shape().as_list()[3], 96], [96], 1, True)
- with tf.variable_scope("conv4") as scope:
- conv4 = conv2d_leaky(conv3, [3, 3, conv3.get_shape().as_list()[3], 64], [64], 1, True)
- with tf.variable_scope("conv5") as scope:
- conv5 = conv2d_leaky(conv4, [3, 3, conv4.get_shape().as_list()[3], 32], [32], 1, True)
- with tf.variable_scope("conv6") as scope:
- conv6 = conv2d_leaky(conv5, [3, 3, conv5.get_shape().as_list()[3], 8], [8], 1, False)
- return conv6
- def encoder(input_batch):
- with tf.variable_scope("conv1"):
- conv1 = conv2d_leaky(input_batch, [3, 3, 3, 16], [16], 2, True)
- with tf.variable_scope("conv2"):
- conv2 = conv2d_leaky(conv1, [3, 3, 16, 16], [16], 1, True)
- with tf.variable_scope("conv3"):
- conv3 = conv2d_leaky(conv2, [3, 3, 16, 32], [32], 2, True)
- with tf.variable_scope("conv4"):
- conv4 = conv2d_leaky(conv3, [3, 3, 32, 32], [32], 1, True)
- with tf.variable_scope("conv5"):
- conv5 = conv2d_leaky(conv4, [3, 3, 32, 64], [64], 2, True)
- with tf.variable_scope("conv6"):
- conv6 = conv2d_leaky(conv5, [3, 3, 64, 64], [64], 1, True)
- with tf.variable_scope("conv7"):
- conv7 = conv2d_leaky(conv6, [3, 3, 64, 96], [96], 2, True)
- with tf.variable_scope("conv8"):
- conv8 = conv2d_leaky(conv7, [3, 3, 96, 96], [96], 1, True)
- with tf.variable_scope("conv9"):
- conv9 = conv2d_leaky(conv8, [3, 3, 96, 128], [128], 2, True)
- with tf.variable_scope("conv10"):
- conv10 = conv2d_leaky(conv9, [3, 3, 128, 128], [128], 1, True)
- with tf.variable_scope("conv11"):
- conv11 = conv2d_leaky(conv10, [3, 3, 128, 192], [192], 2, True)
- with tf.variable_scope("conv12"):
- conv12 = conv2d_leaky(conv11, [3, 3, 192, 192], [192], 1, True)
- return conv12
- def myNet(image, sliceOp=True):
- with tf.variable_scope("encoder-1") as scope:
- features1 = encoder(image)
- with tf.variable_scope("encoder-2") as scope:
- features2 = encoder(image)
- if sliceOp:
- features = tf.concat([features1, tf.slice(features2, [0, 0, 0, 0], [-1, -1, -1, 1])], -1)
- else:
- features = tf.concat([features1, features2], -1)
- output = decoder(features)
- return output
- def run(name, height, width):
- with tf.Graph().as_default():
- image = tf.placeholder("float", [1, height, width, 3], name="input")
- output = myNet(image, USING_SLICE)
- with tf.variable_scope("result") as scope:
- output = output + output
- init = tf.group(tf.global_variables_initializer(),
- tf.local_variables_initializer())
- saver = tf.train.Saver()
- with tf.Session() as sess:
- #loading weights TODO
- sess.run(init)
- saver.save(sess, "output/"+name)
- run('myNet', 576, 960)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement