Guest User

Untitled

a guest
Nov 22nd, 2017
76
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 7.75 KB | None | 0 0
  1. input_data = tf.placeholder(tf.float32, shape=[1,n_features], name="train_input")
  2. pw = tf.placeholder(tf.float32, shape=[n_features,num_hidden], name='weights')
  3. pbh_ = tf.placeholder(tf.float32, shape=[num_hidden],name='hidden_bias')
  4. pbv_ = tf.placeholder(tf.float32, shape=[n_features],name='visible_bias')
  5.  
  6. Traceback (most recent call last):
  7. File "rbm_1.py", line 245, in <module>
  8. tr_err = train_network()
  9. File "rbm_1.py", line 197, in train_network
  10. n_w = sess.run([g['w_upd8']], feed_dict={pw: o_w, pbh_: o_hb, pbv_: o_vb})
  11. File "/home/wolborg/anaconda2/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 895, in run
  12. run_metadata_ptr)
  13. File "/home/wolborg/anaconda2/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1124, in _run
  14. feed_dict_tensor, options, run_metadata)
  15. File "/home/wolborg/anaconda2/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1321, in _do_run
  16. options, run_metadata)
  17. File "/home/wolborg/anaconda2/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1340, in _do_call
  18. raise type(e)(node_def, op, message)
  19. tensorflow.python.framework.errors_impl.InvalidArgumentError: You must feed a value for placeholder tensor 'train_input' with dtype float and shape [1,50000]
  20. [[Node: train_input = Placeholder[dtype=DT_FLOAT, shape=[1,50000], _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
  21.  
  22. Caused by op u'train_input', defined at:
  23. File "rbm_1.py", line 244, in <module>
  24. g = struct_network()
  25. File "rbm_1.py", line 100, in struct_network
  26. input_data = tf.placeholder(tf.float32, shape=[1,n_features], name="train_input")
  27. File "/home/wolborg/anaconda2/lib/python2.7/site-packages/tensorflow/python/ops/array_ops.py", line 1548, in placeholder
  28. return gen_array_ops._placeholder(dtype=dtype, shape=shape, name=name)
  29. File "/home/wolborg/anaconda2/lib/python2.7/site-packages/tensorflow/python/ops/gen_array_ops.py", line 2094, in _placeholder
  30. name=name)
  31. File "/home/wolborg/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/op_def_library.py", line 767, in apply_op
  32. op_def=op_def)
  33. File "/home/wolborg/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 2630, in create_op
  34. original_op=self._default_original_op, op_def=op_def)
  35. File "/home/wolborg/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 1204, in __init__
  36. self._traceback = self._graph._extract_stack() # pylint: disable=protected-access
  37.  
  38. InvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'train_input' with dtype float and shape [1,50000]
  39. [[Node: train_input = Placeholder[dtype=DT_FLOAT, shape=[1,50000], _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
  40.  
  41. import tensorflow as tf
  42. import numpy as np
  43.  
  44. # for taking MFCC and label input
  45. import rnn_input_data
  46. import sound_constants
  47.  
  48. # for displaying elapsed time
  49. import calendar as cal
  50. import time
  51. import sys
  52. import os
  53.  
  54. # Training Parameters
  55. num_input = 198 # mfcc data input
  56. n_features = 50000
  57. training_data_size = 150 # determines number of files in training and testing module
  58. testing_data_size = num_input - training_data_size
  59.  
  60. # Network Parameters
  61. learning_rate = 0.0001 # for large training set, it can be set 0.001
  62. num_hidden = 300 # number of hidden layers
  63. num_classes = 28 # total alphabet classes (a-z) + extra symbols (', ' ')
  64. epoch = 5 # number of iterations
  65. batch_size = 1 # number of batches
  66. gibbs_sampling_steps = 1
  67.  
  68. #shutting down debug logs
  69. #os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
  70.  
  71. ####################################################################################
  72. mfcc_coeffs, _ = rnn_input_data.mfcc_and_text_encoding()
  73.  
  74. class DataGenerator:
  75. def __init__(self, data_size):
  76. self.ptr = 0
  77. self.epochs = 0
  78. self.data_size = data_size
  79.  
  80. def next_batch(self):
  81. if self.ptr > self.data_size:
  82. self.epochs += 1
  83. self.ptr = 0
  84.  
  85. self.ptr += batch_size
  86.  
  87. return mfcc_coeffs[ self.ptr-batch_size : self.ptr]
  88.  
  89.  
  90. def sample_hidden_from_visible(visible):
  91.  
  92. hprobs = tf.nn.sigmoid(tf.add(tf.matmul((visible),w), bh_))
  93. #hstates = tf.nn.relu(tf.sign(hprobs - hrand))
  94. return hprobs
  95.  
  96.  
  97. def sample_visible_from_hidden(num_hidden, n_features):
  98. visible_activation = tf.add(tf.matmul(num_hidden, tf.transpose(w)), bv_)
  99. vprobs = tf.truncated_normal((1, n_features), mean=visible_activation, stddev=0.1)
  100. return vprobs
  101.  
  102.  
  103.  
  104. def gibbs_sampling_step(visible, n_features):
  105.  
  106. hprobs = sample_hidden_from_visible(visible)
  107. vprobs = sample_visible_from_hidden(hprobs, n_features)
  108. hprobs1 = sample_hidden_from_visible(vprobs)
  109.  
  110.  
  111. return hprobs, vprobs, hprobs1
  112.  
  113. w = tf.Variable(tf.truncated_normal(shape=[n_features,num_hidden], stddev=0.1), name='weights')
  114. bh_ = tf.Variable(tf.constant(0.1, shape=[num_hidden]),name='hidden_bias')
  115. bv_ = tf.Variable(tf.constant(0.1, shape=[n_features]),name='visible_bias')
  116.  
  117. pw = tf.placeholder(tf.float32, shape=[n_features,num_hidden], name='weights')
  118. pbh_ = tf.placeholder(tf.float32, shape=[num_hidden],name='hidden_bias')
  119. pbv_ = tf.placeholder(tf.float32, shape=[n_features],name='visible_bias')
  120.  
  121.  
  122. #hrand = np.random.rand(num_hidden,num_hidden)
  123. #vrand = np.random.rand(n_features, num_hidden)
  124. def struct_network():
  125.  
  126. input_data = tf.placeholder(tf.float32, shape=[1,n_features], name="train_input")
  127.  
  128. encode = sample_hidden_from_visible(input_data)
  129. reconstruction = sample_visible_from_hidden(encode, n_features)
  130.  
  131. hprob0,vprob,hprob1 = gibbs_sampling_step(input_data, n_features)
  132. positive = tf.matmul(tf.transpose(input_data), hprob0)
  133.  
  134.  
  135. nn_input = vprob
  136.  
  137. for step in range(gibbs_sampling_steps - 1):
  138. hprob,vprob, hprob1 = gibbs_sampling_step(nn_input, n_features)
  139. nn_input = vprob
  140.  
  141. negative = tf.matmul(tf.transpose(vprob), hprob1)
  142.  
  143. w_upd8 = w + (learning_rate *(positive - negative))
  144. bh_upd8 = bh_ + learning_rate * tf.reduce_mean(hprob0 - hprob1, 0)
  145. bv_upd8 = bv_ + learning_rate * tf.reduce_mean(n_features - vprob, 0)
  146.  
  147. h_rand = tf.nn.sigmoid(tf.add(tf.matmul(input_data,w), bh_))
  148. v_rand = tf.nn.sigmoid(tf.add(tf.matmul(h_rand, tf.transpose(w)),bv_))
  149. err = input_data - v_rand
  150.  
  151. err_sum = tf.reduce_mean(err)
  152.  
  153. # returning components as dictionary elements
  154. return {'input_data' : input_data,
  155. 'reconstruction_error':err_sum,
  156. 'w_upd8':w_upd8,
  157. 'bh_upd8':bh_upd8,
  158. 'bv_upd8':bv_upd8
  159. }
  160.  
  161.  
  162. def train_network():
  163.  
  164. with tf.Session() as sess:
  165.  
  166. train_instance = DataGenerator(training_data_size)
  167.  
  168. sess.run(tf.global_variables_initializer())
  169. step, error = 0, 0
  170. tr_err = []
  171. current_epoch = 0
  172.  
  173. while current_epoch < epoch:
  174. start_time = cal.timegm(time.gmtime())
  175. step += 1
  176. trb = train_instance.next_batch()
  177.  
  178.  
  179. n_w = np.zeros([n_features, num_hidden], np.float32)
  180. n_vb = np.zeros([n_features], np.float32)
  181. n_hb = np.zeros([num_hidden], np.float32)
  182. o_w = np.zeros([n_features, num_hidden], np.float32)
  183. o_vb = np.zeros([n_features], np.float32)
  184. o_hb = np.zeros([num_hidden], np.float32)
  185.  
  186.  
  187. print (trb[0])
  188.  
  189. n_w = sess.run([g['w_upd8']], feed_dict={pw: o_w, pbh_: o_hb, pbv_: o_vb})
  190. n_hb = sess.run([g['bh_upd8']], feed_dict={pw: o_w, pbh_: o_hb, pbv_: o_vb})
  191. n_vb = sess.run([g['bv_upd8']], feed_dict={pw: o_w, pbh_: o_hb, pbv_: o_vb})
  192.  
  193. feed = {g['input_data'] : np.transpose(trb[0])}
  194. error_ = sess.run([g['reconstruction_error']], feed_dict=feed)[0]
  195. error = error - error_
  196.  
  197. o_w = n_w
  198. o_vb = n_vb
  199. o_hb = n_hb
  200.  
  201. if train_instance.epochs > current_epoch:
  202. current_epoch += 1
  203. tr_err.append(error/ step)
  204. step, error = 0, 0
  205.  
  206. return tr_err
  207.  
  208. g = struct_network()
  209. tr_err = train_network()
Add Comment
Please, Sign In to add comment