Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- LSTM_cells = 200
- num_classes = 2
- lstm_cell = rnn.BasicLSTMCell(LSTM_cells, forget_bias=1.0)
- state_in = lstm_cell.zero_state(1, tf.float32)
- X = tf.placeholder(tf.float32, [None, 3])
- Y = tf.placeholder(tf.float32, [None, num_classes])
- X = tf.reshape(X, [1, -1, 3])
- rnnex_t, rnn_state = tf.nn.dynamic_rnn(
- inputs=X, cell=lstm_cell, dtype=tf.float32, initial_state=state_in)
- rnnex = tf.reshape(rnnex_t, [-1, LSTM_cells])
- out = tf.add(tf.matmul(rnnex, weights['out']), biases['out'])
- logits = tf.reshape(out, [-1, num_classes])
- prediction = tf.nn.softmax(logits)
- # Define loss and optimizer
- loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y))
- optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
- train_op = optimizer.minimize(loss_op)
Add Comment
Please, Sign In to add comment