Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import tensorflow as tf
- tf.reset_default_graph()
- # Input properties
- xSize = 13 #xData[0].shape[1] #13
- ySize = 18 #xData[0].shape[0] #18
- # Network Parameter
- n_input = xSize * ySize
- n_hidden_1 = 600
- n_hidden_2 = 500
- learning_rate = 0.1
- power_t=0.25
- #epoch = 100000
- epoch = 100000
- dropout = 1.0
- batch_size = 100
- bias_start = 0.0
- def reg_perceptron(t, weights, biases):
- t = tf.nn.relu(tf.add(tf.matmul(t, weights['h1']), biases['b1']), name = "layer_1")
- t = tf.nn.sigmoid(tf.add(tf.matmul(t, weights['h2']), biases['b2']), name = "layer_2")
- t = tf.add(tf.matmul(t, weights['hOut'], name="LOut_MatMul"), biases['bOut'], name="LOut_Add")
- return tf.reshape(t, [-1], name="Y_GroundTruth")
- def sum_of_squares(predictions, targets):
- predictions.get_shape().assert_is_compatible_with(targets.get_shape()) # Just a check
- predictions = tf.to_float(predictions) * const200
- targets = tf.to_float(targets) * const200
- losses = tf.square(tf.sub(predictions, targets))
- RMSE = tf.sqrt(tf.reduce_mean(losses))
- return RMSE
- # Tensor placeholders and variables
- _x = tf.placeholder(tf.float32, [None, n_input], name="X_Input")
- _y = tf.placeholder(tf.float32, [None], name="Y_GroundTruth")
- _adaptive_learning_rate = tf.placeholder(tf.float32, shape=[], name="adaptive_learning_rate")
- _epoch_count = tf.Variable(0, dtype=tf.float32, name="epoch_count")
- _cost = tf.Variable(1, dtype=tf.float32, name="cost")
- variable_summaries(_cost, "cost")
- _error = tf.Variable(tf.zeros([len(test_y)]), dtype=tf.float32, name="error")
- variable_summaries(_error, "error")
- const200 = tf.constant(200.0, dtype=tf.float32)
- # Network weights and biases
- # Why xavier initialization should be good: https://www.quora.com/What-is-an-intuitive-explanation-of-the-Xavier-Initialization-for-Deep-Neural-Networks
- # Paper: http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
- rg_weights = {
- 'h1': vs.get_variable("weights0", [n_input, n_hidden_1], initializer=tf.contrib.layers.xavier_initializer()),
- 'h2': vs.get_variable("weights1", [n_hidden_1, n_hidden_2], initializer=tf.contrib.layers.xavier_initializer()),
- 'hOut': vs.get_variable("weightsOut", [n_hidden_2, 1], initializer=tf.contrib.layers.xavier_initializer())
- }
- for key, value in rg_weights.iteritems():
- variable_summaries(value, 'weights/' + key)
- rg_biases = {
- 'b1': vs.get_variable("bias0", [n_hidden_1], initializer=init_ops.constant_initializer(bias_start)),
- 'b2': vs.get_variable("bias1", [n_hidden_2], initializer=init_ops.constant_initializer(bias_start)),
- 'bOut': vs.get_variable("biasOut", [1], initializer=init_ops.constant_initializer(bias_start))
- }
- for key, value in rg_biases.iteritems():
- variable_summaries(value, 'biases/' + key)
- # Network layer definitions
- pred = reg_perceptron(_x, rg_weights, rg_biases)
- print(str(pred))
- # Definition of cost function
- cost = sum_of_squares(pred, _y)
- # Create optimizer
- optimizer = tf.train.AdagradOptimizer(learning_rate=_adaptive_learning_rate).minimize(cost)
- # Create summary for TensorBoard
- merged_summary = tf.merge_all_summaries()
- timestamp = int(time.time())
- print "Starting session, TS =", timestamp
- train_writer = tf.train.SummaryWriter('/data/tensorboard/_' + angle + str(file_count) + '_' + str(timestamp)+ '_Leslie' + '/', graph=tf.get_default_graph())
- # Session operations
- init_op = tf.initialize_all_variables()
- inc_epoch_op = _epoch_count.assign_add(1.0) # increase epoch counter by 1
- saver = tf.train.Saver()
- sess = tf.Session()
- #saver.restore(sess, model_save_path)
- saver.restore(sess, "/data/tensorboard/models/Y6_1476978999")
- sess.run(tf.initialize_all_variables())
- with tf.Session() as sess:
- saver.restore(sess, "/data/tensorboard/models/Y6_1476978999")
- feed_dict={_x: test_x, _y: test_y}
- pred_y, cost = sess.run([pred, cost], feed_dict=feed_dict)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement