Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- v_dimen = 300
- n_samples = 10000
- batch_size = 32
- X = tf.random_normal([n_samples, v_dimen], mean=0, stddev=1)
- Y = tf.random_normal([n_samples, 1], mean=0, stddev=1)
- q_in = tf.FIFOQueue(capacity=5, dtypes=tf.float32) # enqueue 5 batches
- enqueue_op = q_in.enqueue(X)
- numberOfThreads = 1
- qr = tf.train.QueueRunner(q_in, [enqueue_op] * numberOfThreads)
- tf.train.add_queue_runner(qr)
- X_batch = q_in.dequeue()
- q_out = tf.FIFOQueue(capacity=5, dtypes=tf.float32) # enqueue 5 batches
- enqueue_op = q_out.enqueue(Y)
- numberOfThreads = 1
- qr = tf.train.QueueRunner(q_out, [enqueue_op] * numberOfThreads)
- tf.train.add_queue_runner(qr)
- Y_batch = q_out.dequeue()
- W = tf.Variable(tf.random.truncated_normal((v_dimen, 1), mean=0.0,stddev=0.001))
- predicted_Y = f(X_batch) # some function on X, like tf.matmul(X_batch,W)
- loss = tf.nn.l2_loss(Y_batch - predicted_Y)
- optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(loss, var_list=[W])
- init = tf.global_variables_initializer()
- with tf.Session() as sess:
- sess.run(init)
- coord = tf.train.Coordinator()
- threads = tf.train.start_queue_runners(coord=coord)
- for i in range(1000):
- sess.run([optimizer]) # would like to run on mini batches
- coord.request_stop()
- coord.join(threads)
Add Comment
Please, Sign In to add comment