Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- py_x = model(X, w_h, w_o)
- cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(py_x, Y))
- train_op = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost)
- predict_op = tf.argmax(py_x, 1)
- sess = tf.Session()
- init = tf.initialize_all_variables()
- sess.run(init)
- for i in range(700):
- for start, end in zip(range(0, len(X_train),1 ), range(1, len(X_train),1)):
- sess.run(train_op, feed_dict={X: X_train[start:end], Y: y_train[start:end]})
- if i % 100 == 0:
- print (i, np.mean(np.argmax(y_test, axis=1) ==
- sess.run(predict_op, feed_dict={X: x_test, Y: y_test})))
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement