Guest User

Untitled

a guest
Apr 26th, 2018
100
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 4.53 KB | None | 0 0
  1. Traceback (most recent call last):
  2. File "D:/Development_Avector/PycharmProjects/TensorFlow/gan.py", line 41, in <module>
  3. DgL = discriminator(G_sample, reuse=True)
  4. File "D:/Development_Avector/PycharmProjects/TensorFlow/gan.py", line 31, in discriminator
  5. Z1 = tf.layers.conv2d(x, kernel_size=5,filters=64, strides=2, padding='SAME')
  6. File "D:Development_AvectoAnacondaenvstensorflowlibsite-packagestensorflowpythonlayersconvolutional.py", line 551, in conv2d
  7. return layer.apply(inputs)
  8. File "D:Development_AvectoAnacondaenvstensorflowlibsite-packagestensorflowpythonlayersbase.py", line 492, in apply
  9. return self.__call__(inputs, *args, **kwargs)
  10. File "D:Development_AvectoAnacondaenvstensorflowlibsite-packagestensorflowpythonlayersbase.py", line 428, in __call__
  11. self._assert_input_compatibility(inputs)
  12. File "D:Development_AvectoAnacondaenvstensorflowlibsite-packagestensorflowpythonlayersbase.py", line 540, in _assert_input_compat
  13. ibility
  14. str(x.get_shape().as_list()))
  15. ValueError: Input 0 of layer conv2d_5 is incompatible with the layer: expected ndim=4, found ndim=2. Full shape received: [None, 6]
  16.  
  17. # The part that discriminates
  18. X = tf.placeholder(tf.float32, shape=[None, 299, 299, 3], name='X')
  19.  
  20.  
  21. # The part that generates
  22. Z = tf.placeholder(tf.float32, shape=[None, 299, 299, 3], name='Z')
  23.  
  24.  
  25.  
  26. def generator(z,reuse=False):
  27. with tf.variable_scope('generator',reuse=reuse):
  28. #z = tf.reshape(z, shape=[-1, 299, 299, 64])
  29. Z1 = tf.layers.conv2d(z,kernel_size=5,filters=64, strides=2, padding='SAME')
  30. A1 = tf.nn.relu(Z1)
  31. Z2 = tf.layers.conv2d(A1,kernel_size=5, filters=64, strides=1, padding='SAME')
  32. A2 = tf.nn.relu(Z2)
  33. P2 = tf.contrib.layers.flatten(A2)
  34. Z3 = tf.contrib.layers.fully_connected(P2, 6, activation_fn=None)
  35. return Z3
  36.  
  37.  
  38. def discriminator(x,reuse=False):
  39. with tf.variable_scope('discriminator',reuse=reuse):
  40. #x = tf.reshape(x, shape=[-1, 299, 299, 3])
  41. Z1 = tf.layers.conv2d(x, kernel_size=5,filters=64, strides=2, padding='SAME')
  42. A1 = tf.nn.relu(Z1)
  43. Z2 = tf.layers.conv2d(A1, kernel_size=5,filters=64, strides=1, padding='SAME')
  44. A2 = tf.nn.relu(Z2)
  45. P2 = tf.contrib.layers.flatten(A2)
  46. Z3 = tf.contrib.layers.fully_connected(P2, 6, activation_fn=None)
  47. return Z3
  48.  
  49. G_sample = generator(Z)
  50. DxL = discriminator(X)
  51. DgL = discriminator(G_sample, reuse=True)
  52. print (DxL)
  53. D_Disc_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = DxL, labels = tf.ones_like(DxL)))
  54. D_Disc_loss1 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = DgL, labels = tf.ones_like(DgL)))
  55. D_MainLoss = D_Disc_loss + D_Disc_loss1
  56. G_Generate_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = DgL, labels = tf.ones_like(DgL)))
  57.  
  58. D_loss = tf.summary.scalar("Discriminator Loss", D_MainLoss)
  59. G_loss = tf.summary.scalar("Generator Loss", G_Generate_loss)
  60. merge = tf.summary.merge_all()
  61.  
  62. variables = tf.trainable_variables()
  63. dvariables = [var for var in variables if var.name.startswith("discriminator")]
  64. print (dvariables)
  65. gvariables = [var for var in variables if var.name.startswith("generator")]
  66. print (gvariables)
  67.  
  68. D_optimizer = tf.train.AdamOptimizer().minimize(D_Disc_loss, var_list=dvariables)
  69. G_optimizer = tf.train.AdamOptimizer().minimize(G_Generate_loss, var_list=gvariables)
  70.  
  71. def train():
  72. filenames = tf.train.string_input_producer(
  73. tf.train.match_filenames_once("D:/Development_Avecto/TensorFlow/resizedimages/*.png"))
  74. reader = tf.WholeFileReader()
  75. _, input = reader.read(filenames)
  76. input = tf.Print(input,[input,tf.shape(input),"Input shape"])
  77. input = tf.image.decode_png(input, channels=3)
  78. input.set_shape([299, 299, 3])
  79.  
  80. batch = tf.train.batch([input],
  81. batch_size=2)
  82.  
  83. init = (tf.global_variables_initializer(), tf.local_variables_initializer())
  84.  
  85. with tf.Session() as sess:
  86. sess.run(init)
  87. coord = tf.train.Coordinator()
  88. threads = tf.train.start_queue_runners(coord=coord)
  89. train_writer = tf.summary.FileWriter('D:/Development_Avecto/TensorFlow/logs/1/train', sess.graph)
  90.  
  91.  
  92. for it in range(50):
  93. _, X_batch = sess.run([input,batch])
  94. summary,_, DiscriminatorLoss = sess.run([merge,D_optimizer, D_Disc_loss], feed_dict={X: X_batch})
  95. summary,_, GeneratorLoss = sess.run([merge,G_optimizer, G_Generate_loss])
  96.  
  97. train_writer.add_summary(summary, it)
  98. train_writer.flush()
  99.  
  100. train_writer.close()
  101. coord.request_stop()
  102. coord.join(threads)
  103. sess.close()
  104.  
  105. #x = tf.reshape(x, shape=[-1, 299, 299, 3])
  106.  
  107. dim = x.get_shape().as_list()
  108. x = tf.reshape(x, shape = [-1, *dim[1:]])
Add Comment
Please, Sign In to add comment