Guest User

Untitled

a guest
Nov 19th, 2018
99
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 3.02 KB | None | 0 0
  1. import tensorflow as tf
  2.  
  3.  
  4. class ConvLayer:
  5. def __init__(self, layernum, filtersize=[1, 1, 1], stride=1):
  6. self.layernum = layernum
  7. with tf.name_scope('convLayer{}'.format(layernum)):
  8. self.W = tf.Variable(tf.random_normal(filtersize, mean=0, stddev=0.2, dtype=tf.float32, name='Weight{}'.format(layernum)))
  9. self.b = tf.Variable(tf.zeros(filtersize[-1]), name='Bias{}'.format(layernum))
  10. self.stride = stride
  11.  
  12. def exec(self, inputlayer):
  13. output = tf.nn.conv1d(inputlayer, self.W, stride=self.stride, padding="SAME", name="conv{}".format(self.layernum)) + self.b
  14. output = tf.nn.leaky_relu(output)
  15. output = tf.layers.batch_normalization(output)
  16. #output = tf.layers.dropout(output, training=True)
  17. return output
  18.  
  19.  
  20. def phase_shift(I, r, layerlen, filters, layernum):
  21. ofilters = (int)(filters / r)
  22. X = tf.reshape(I, [-1, layerlen, r, ofilters], name="PS{}".format(layernum))
  23. X = tf.split(X, layerlen, 1)
  24. X = tf.squeeze(tf.concat(X, 2))
  25. return X
  26.  
  27.  
  28. class downsize_layer:
  29. def __init__(self, r, layernum, filtersize=[1, 1, 1]):
  30. self.layer = ConvLayer(layernum, filtersize=filtersize, stride=r)
  31.  
  32. def exec(self, inputlayer):
  33. return self.layer.exec(inputlayer)
  34.  
  35.  
  36. class upsize_layer:
  37. def __init__(self, prev_len, prev_filters, r, layernum):
  38. self.layer = ConvLayer(layernum, filtersize=[3, prev_filters, (int)(prev_filters/2)], stride=1)
  39. self.r = r
  40. self.prev_len = prev_len
  41. self.prev_filters = prev_filters
  42. self.layernum = layernum
  43.  
  44. def exec(self, inputlayer):
  45. X = phase_shift(self.layer.exec(inputlayer), self.r, self.prev_len, (int)(self.prev_filters/2), self.layernum)
  46. X = tf.nn.leaky_relu(X)
  47. X = tf.layers.batch_normalization(X)
  48. return X
  49.  
  50. class FCLayer:
  51. def __init__(self, inputlen, inputfilters, outputlen, acttype):
  52. self.W = tf.get_variable(name='FCWeight', shape=[inputlen*inputfilters, outputlen], initializer=tf.contrib.layers.xavier_initializer())
  53. self.b = tf.Variable(tf.zeros([outputlen]), name='FCBias')
  54. self.acttype = acttype
  55.  
  56. def exec(self, inputlayer):
  57. print(tf.shape(inputlayer))
  58. if self.acttype == 1:
  59. return tf.nn.leaky_relu(tf.matmul(tf.reshape(inputlayer, [-1, tf.shape(self.W)[0]]), self.W)+self.b)
  60. if self.acttype == 2:
  61. return tf.sigmoid(tf.matmul(tf.reshape(inputlayer, [-1, tf.shape(self.W)[0]]), self.W)+self.b)
  62.  
  63.  
  64. class lFCLayer:
  65. def __init__(self, layernum, inputlen, outputlen, acttype):
  66. self.W = tf.get_variable(name='Weight{}'.format(layernum), shape=[inputlen, outputlen], initializer=tf.contrib.layers.xavier_initializer())
  67. self.b = tf.Variable(tf.zeros([outputlen]), name='Bias{}'.format(layernum))
  68. self.acttype = acttype
  69.  
  70. def exec(self, inputlayer):
  71. if self.acttype == 1:
  72. return tf.nn.relu(tf.matmul(inputlayer, self.W)+self.b)
  73. if self.acttype == 2:
  74. return tf.sigmoid(tf.matmul(inputlayer, self.W)+self.b)
Add Comment
Please, Sign In to add comment