Guest User

Untitled

a guest
May 24th, 2018
85
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 1.85 KB | None | 0 0
  1. def conv_encoder(x, weigths, biases):
  2. x = tf.reshape(x, shape=[-1, 28, 28, 1])
  3. conv1 = conv2d(x, weigths['ew1'], biases['eb1'])
  4. pool1 = max_pool2d(conv1)
  5. conv2 = conv2d(pool1, weigths['ew2'], biases['eb2'])
  6. pool2 = max_pool2d(conv2)
  7.  
  8. return pool2
  9.  
  10. def conv_decoder(x, weights, biases):
  11. deconv1 = deconv2d(x, weights['dw1'], biases['db1'], [batch_size, 14, 14, 32], strides=2)
  12. deconv2 = deconv2d(deconv1, weights['dw2'], biases['db2'], [batch_size, 28, 28, 16], strides=2)
  13. output = deconv2d(deconv2, weights['out'], biases['out'], [batch_size, 28, 28, 1], strides=1)
  14.  
  15. return output
  16.  
  17. ew_conv = {
  18. 'ew1': tf.Variable(tf.random_normal([5, 5, 1, 32])),
  19. 'ew2': tf.Variable(tf.random_normal([5, 5, 32, 64]))
  20. }
  21.  
  22. dw_conv = {
  23. 'dw1': tf.Variable(tf.random_normal([5, 5, 32, 64])),
  24. 'dw2': tf.Variable(tf.random_normal([5, 5, 16, 32])),
  25. 'out': tf.Variable(tf.random_normal([5, 5, 1, 16]))
  26.  
  27. }
  28.  
  29. eb_conv = {
  30. 'eb1': tf.Variable(tf.random_normal([32])),
  31. 'eb2': tf.Variable(tf.random_normal([64]))
  32. }
  33. db_conv = {
  34. 'db1': tf.Variable(tf.random_normal([32])),
  35. 'db2': tf.Variable(tf.random_normal([16])),
  36. 'out': tf.Variable(tf.random_normal([1]))
  37. }
  38.  
  39. x = tf.placeholder(tf.float32, shape=([None, num_input]))
  40. encode_op = conv_encoder(x, ew_conv, eb_conv)
  41. decode_op = conv_decoder(encode_op, dw_conv, db_conv)
  42.  
  43. y_pred = decode_op
  44. y_true = x
  45.  
  46. loss = tf.reduce_mean(tf.pow(y_pred - y_true, 2))
  47. optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(loss)
  48.  
  49. init = tf.global_variables_initializer()
  50. with tf.Session() as sess:
  51. sess.run(init)
  52.  
  53. for step in range(1, num_steps+1):
  54. batch_xs, batch_ys = mnist.train.next_batch(batch_size)
  55.  
  56. _, l = sess.run([optimizer, loss], feed_dict={x:batch_xs})
  57.  
  58. if step % display_steps == 0 or step == 1:
  59. print('Step %i: Minibatch Loss: %f'%(step, l))
  60.  
  61. OOM when allocating tensor of shape [256,28,28,784] and type float
Add Comment
Please, Sign In to add comment