Advertisement
Guest User

Untitled

a guest
Oct 23rd, 2016
117
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 4.21 KB | None | 0 0
  1.     def length(sequence): ##Zero padding to fit the max lenght... Question whether that is a good idea.
  2.         used = tf.sign(tf.reduce_max(tf.abs(sequence), reduction_indices=2))
  3.         length = tf.reduce_sum(used, reduction_indices=1)
  4.         length = tf.cast(length, tf.int32)
  5.         return length
  6.    
  7.     def cost(output, target):
  8.         # Compute cross entropy for each frame.
  9.         print output
  10.         cross_entropy = target * tf.log(output)
  11.         print "Hello world"
  12.         cross_entropy = -tf.reduce_sum(cross_entropy, reduction_indices=2)
  13.         mask = tf.sign(tf.reduce_max(tf.abs(target), reduction_indices=2))
  14.         cross_entropy *= mask
  15.         # Average over actual sequence lengths.
  16.         cross_entropy = tf.reduce_sum(cross_entropy, reduction_indices=1)
  17.         cross_entropy /= tf.reduce_sum(mask, reduction_indices=1)
  18.         return tf.reduce_mean(cross_entropy)
  19.    
  20.     def last_relevant(output):
  21.         max_length = int(output.get_shape()[1])
  22.         relevant = tf.reduce_sum(tf.mul(output, tf.expand_dims(tf.one_hot(length(output), max_length), -1)), 1)
  23.         return relevant
  24.    
  25.     files_train_path = [dnn_train+f for f in listdir(dnn_train) if isfile(join(dnn_train, f))]
  26.     files_test_path = [dnn_test+f for f in listdir(dnn_test) if isfile(join(dnn_test, f))]
  27.    
  28.     files_train_name = [f for f in listdir(dnn_train) if isfile(join(dnn_train, f))]
  29.     files_test_name = [f for f in listdir(dnn_test) if isfile(join(dnn_test, f))]
  30.    
  31.     os.chdir(dnn_train)
  32.    
  33.     train_name,train_data = generate_list_of_names_data(files_train_path)
  34.     train_data, train_names, train_output_data, train_class_output = load_sound_files(files_train_path,train_name,train_data)
  35.    
  36.     max_length = 0 ## Used for variable sequence input
  37.    
  38.     for element in train_data:
  39.         if element.size > max_length:
  40.             max_length = element.size
  41.    
  42.     NUM_EXAMPLES = len(train_data)/2
  43.    
  44.     test_data = train_data[NUM_EXAMPLES:]
  45.     test_output = train_output_data[NUM_EXAMPLES:]
  46.    
  47.     train_data = train_data[:NUM_EXAMPLES]
  48.     train_output = train_output_data[:NUM_EXAMPLES]
  49.     print("--- %s seconds ---" % (time.time() - start_time))
  50.    
  51.     #----------------------------------------------------------------------#
  52.     #----------------------------Main--------------------------------------#
  53.     ### Tensorflow neural network setup
  54.    
  55.     batch_size = None
  56.     sequence_length_max = max_length
  57.     input_dimension=1
  58.    
  59.     data = tf.placeholder(tf.float32,[batch_size,sequence_length_max,input_dimension])
  60.     target = tf.placeholder(tf.float32,[None,14])
  61.    
  62.     num_hidden = 24 ## Hidden layer
  63.     cell = tf.nn.rnn_cell.LSTMCell(num_hidden,state_is_tuple=True)  ## Long short term memory
  64.    
  65.     output, state = tf.nn.dynamic_rnn(cell, data, dtype=tf.float32,sequence_length = length(data))  ## Creates the Rnn skeleton
  66.    
  67.     last = last_relevant(output)#tf.gather(val, int(val.get_shape()[0]) - 1) ## Appedning as last
  68.    
  69.     weight = tf.Variable(tf.truncated_normal([num_hidden, int(target.get_shape()[1])]))
  70.     bias = tf.Variable(tf.constant(0.1, shape=[target.get_shape()[1]]))
  71.    
  72.     prediction = tf.nn.softmax(tf.matmul(last, weight) + bias)
  73.    
  74.     cross_entropy = cost(output,target)# How far am I from correct value?
  75.    
  76.     optimizer = tf.train.AdamOptimizer() ## TensorflowOptimizer
  77.     minimize = optimizer.minimize(cross_entropy)
  78.    
  79.     mistakes = tf.not_equal(tf.argmax(target, 1), tf.argmax(prediction, 1))
  80.     error = tf.reduce_mean(tf.cast(mistakes, tf.float32))
  81.    
  82.     ## Training ##
  83.    
  84.     init_op = tf.initialize_all_variables()
  85.     sess = tf.Session()
  86.     sess.run(init_op)
  87.    
  88.     batch_size = 1000
  89.     no_of_batches = int(len(train_data)/batch_size)
  90.     epoch = 5000
  91.     for i in range(epoch):
  92.         ptr = 0
  93.         for j in range(no_of_batches):
  94.             inp, out = train_data[ptr:ptr+batch_size], train_output[ptr:ptr+batch_size]
  95.             ptr+=batch_size
  96.             sess.run(minimize,{data: inp, target: out})
  97.         print "Epoch - ",str(i)
  98.     incorrect = sess.run(error,{data: test_data, target: test_output})
  99.     print('Epoch {:2d} error {:3.1f}%'.format(i + 1, 100 * incorrect))
  100.     sess.close()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement