Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import tensorflow as tf
- class ConvLayer:
- def __init__(self, layernum, filtersize=[1, 1, 1], stride=1):
- self.layernum = layernum
- with tf.name_scope('convLayer{}'.format(layernum)):
- self.W = tf.Variable(tf.random_normal(filtersize, mean=0, stddev=0.2, dtype=tf.float32, name='Weight{}'.format(layernum)))
- self.b = tf.Variable(tf.zeros(filtersize[-1]), name='Bias{}'.format(layernum))
- self.stride = stride
- def exec(self, inputlayer):
- output = tf.nn.conv1d(inputlayer, self.W, stride=self.stride, padding="SAME", name="conv{}".format(self.layernum)) + self.b
- output = tf.nn.leaky_relu(output)
- output = tf.layers.batch_normalization(output)
- #output = tf.layers.dropout(output, training=True)
- return output
- def phase_shift(I, r, layerlen, filters, layernum):
- ofilters = (int)(filters / r)
- X = tf.reshape(I, [-1, layerlen, r, ofilters], name="PS{}".format(layernum))
- X = tf.split(X, layerlen, 1)
- X = tf.squeeze(tf.concat(X, 2))
- return X
- class downsize_layer:
- def __init__(self, r, layernum, filtersize=[1, 1, 1]):
- self.layer = ConvLayer(layernum, filtersize=filtersize, stride=r)
- def exec(self, inputlayer):
- return self.layer.exec(inputlayer)
- class upsize_layer:
- def __init__(self, prev_len, prev_filters, r, layernum):
- self.layer = ConvLayer(layernum, filtersize=[3, prev_filters, (int)(prev_filters/2)], stride=1)
- self.r = r
- self.prev_len = prev_len
- self.prev_filters = prev_filters
- self.layernum = layernum
- def exec(self, inputlayer):
- X = phase_shift(self.layer.exec(inputlayer), self.r, self.prev_len, (int)(self.prev_filters/2), self.layernum)
- X = tf.nn.leaky_relu(X)
- X = tf.layers.batch_normalization(X)
- return X
- class FCLayer:
- def __init__(self, inputlen, inputfilters, outputlen, acttype):
- self.W = tf.get_variable(name='FCWeight', shape=[inputlen*inputfilters, outputlen], initializer=tf.contrib.layers.xavier_initializer())
- self.b = tf.Variable(tf.zeros([outputlen]), name='FCBias')
- self.acttype = acttype
- def exec(self, inputlayer):
- print(tf.shape(inputlayer))
- if self.acttype == 1:
- return tf.nn.leaky_relu(tf.matmul(tf.reshape(inputlayer, [-1, tf.shape(self.W)[0]]), self.W)+self.b)
- if self.acttype == 2:
- return tf.sigmoid(tf.matmul(tf.reshape(inputlayer, [-1, tf.shape(self.W)[0]]), self.W)+self.b)
- class lFCLayer:
- def __init__(self, layernum, inputlen, outputlen, acttype):
- self.W = tf.get_variable(name='Weight{}'.format(layernum), shape=[inputlen, outputlen], initializer=tf.contrib.layers.xavier_initializer())
- self.b = tf.Variable(tf.zeros([outputlen]), name='Bias{}'.format(layernum))
- self.acttype = acttype
- def exec(self, inputlayer):
- if self.acttype == 1:
- return tf.nn.relu(tf.matmul(inputlayer, self.W)+self.b)
- if self.acttype == 2:
- return tf.sigmoid(tf.matmul(inputlayer, self.W)+self.b)
Add Comment
Please, Sign In to add comment