Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- LSTMCell = tf.contrib.rnn.BasicLSTMCell(HIDDEN_SIZE, state_is_tuple=True)
- LSTMCell = tf.contrib.rnn.DropoutWrapper(LSTMCell, output_keep_prob=0.5, seed=42)
- MultiRNNCell = tf.contrib.rnn.MultiRNNCell([LSTMCell]* NUM_LAYERS, state_is_tuple=True)
- state = get_initialstate_variables(MultiRNNCell)
- rnn_outputs,finalstate = tf.nn.dynamic_rnn(MultiRNNCell, x, dtype=tf.float32,initial_state=state)#, sequence_length=seqlen, getSequenceLength(sequence))
- rnn_outputs = tf.reshape(rnn_outputs, [-1, HIDDEN_SIZE])
- logits = tf.add(tf.matmul(rnn_outputs, weights['output']),bias['output'])
- v = [i+(TRUNCATED_BACKPROPAGATION_FEATUREFRAME- NUM_FRAMELABELS_TOPREDICT) for i in range(NUM_FRAMELABELS_TOPREDICT)]
- index = tf.transpose([(tf.range(0,BATCH_SIZE)*TRUNCATED_BACKPROPAGATION_FEATUREFRAME) + i for i in v])
- index = tf.reshape(index, [-1])
- logits = tf.gather(logits, index)
- logits = tf.reshape(logits, [BATCH_SIZE, -1])#, FEATURE_PER_FRAME])
- return logits
- x_split = tf.split(features, TRUNCATED_BACKPROPAGATION_FEATUREFRAME,1)
- y_split = tf.split( labels, NUM_FRAMELABELS_TOPREDICT, 1)
- weights = {
- 'output': tf.get_variable("wo", initializer=tf.random_normal([LSTM_SIZE*2, FEATURE_PER_FRAME], seed=seed, stddev=1/np.sqrt(FEATURE_PER_FRAME * 1.0)))
- }
- biases = {
- 'output': tf.get_variable("bo", initializer=tf.random_normal([FEATURE_PER_FRAME], seed=seed, stddev=1/np.sqrt(FEATURE_PER_FRAME * 1.0)))
- }
- lstm = tf.contrib.rnn.BasicLSTMCell(LSTM_SIZE, state_is_tuple=True)
- cost = tf.constant(0.0);
- output_layer_final=[]
- initial_state = state = (tf.zeros([BATCH_SIZE, lstm.state_size[0]]), tf.zeros([BATCH_SIZE, lstm.state_size[1]]))
- with tf.variable_scope("myrnn") as scope:
- for i in range(TRUNCATED_BACKPROPAGATION_FEATUREFRAME):
- if i > 0:
- scope.reuse_variables()
- output, state = lstm(x_split[i], state)
- if i >= (TRUNCATED_BACKPROPAGATION_FEATUREFRAME - NUM_FRAMELABELS_TOPREDICT):
- print i
- output_layer = tf.add(tf.matmul(tf.concat([state[0], state[1]], 1), weights['output']), biases['output'])
- c = tf.reduce_mean(tf.square(output_layer - y_split[i-(TRUNCATED_BACKPROPAGATION_FEATUREFRAME - NUM_FRAMELABELS_TOPREDICT)]))
- if i == (TRUNCATED_BACKPROPAGATION_FEATUREFRAME- NUM_FRAMELABELS_TOPREDICT):
- output_layer_final = output_layer
- cost = c
- elif i== (TRUNCATED_BACKPROPAGATION_FEATUREFRAME-1):
- output_layer_final = tf.concat([output_layer_final, output_layer], 1, name="output")
- cost = tf.add(cost, c)
- else:
- output_layer_final = tf.concat([output_layer_final, output_layer], 1)
- cost = tf.add(cost,c)
- cost = tf.divide(cost, tf.constant(NUM_FRAMELABELS_TOPREDICT, dtype=tf.float32))
- logits = output_layer_final
- return cost, logits
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement