Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- Traceback (most recent call last):
- File "D:/Development_Avector/PycharmProjects/TensorFlow/gan.py", line 41, in <module>
- DgL = discriminator(G_sample, reuse=True)
- File "D:/Development_Avector/PycharmProjects/TensorFlow/gan.py", line 31, in discriminator
- Z1 = tf.layers.conv2d(x, kernel_size=5,filters=64, strides=2, padding='SAME')
- File "D:Development_AvectoAnacondaenvstensorflowlibsite-packagestensorflowpythonlayersconvolutional.py", line 551, in conv2d
- return layer.apply(inputs)
- File "D:Development_AvectoAnacondaenvstensorflowlibsite-packagestensorflowpythonlayersbase.py", line 492, in apply
- return self.__call__(inputs, *args, **kwargs)
- File "D:Development_AvectoAnacondaenvstensorflowlibsite-packagestensorflowpythonlayersbase.py", line 428, in __call__
- self._assert_input_compatibility(inputs)
- File "D:Development_AvectoAnacondaenvstensorflowlibsite-packagestensorflowpythonlayersbase.py", line 540, in _assert_input_compat
- ibility
- str(x.get_shape().as_list()))
- ValueError: Input 0 of layer conv2d_5 is incompatible with the layer: expected ndim=4, found ndim=2. Full shape received: [None, 6]
- # The part that discriminates
- X = tf.placeholder(tf.float32, shape=[None, 299, 299, 3], name='X')
- # The part that generates
- Z = tf.placeholder(tf.float32, shape=[None, 299, 299, 3], name='Z')
- def generator(z,reuse=False):
- with tf.variable_scope('generator',reuse=reuse):
- #z = tf.reshape(z, shape=[-1, 299, 299, 64])
- Z1 = tf.layers.conv2d(z,kernel_size=5,filters=64, strides=2, padding='SAME')
- A1 = tf.nn.relu(Z1)
- Z2 = tf.layers.conv2d(A1,kernel_size=5, filters=64, strides=1, padding='SAME')
- A2 = tf.nn.relu(Z2)
- P2 = tf.contrib.layers.flatten(A2)
- Z3 = tf.contrib.layers.fully_connected(P2, 6, activation_fn=None)
- return Z3
- def discriminator(x,reuse=False):
- with tf.variable_scope('discriminator',reuse=reuse):
- #x = tf.reshape(x, shape=[-1, 299, 299, 3])
- Z1 = tf.layers.conv2d(x, kernel_size=5,filters=64, strides=2, padding='SAME')
- A1 = tf.nn.relu(Z1)
- Z2 = tf.layers.conv2d(A1, kernel_size=5,filters=64, strides=1, padding='SAME')
- A2 = tf.nn.relu(Z2)
- P2 = tf.contrib.layers.flatten(A2)
- Z3 = tf.contrib.layers.fully_connected(P2, 6, activation_fn=None)
- return Z3
- G_sample = generator(Z)
- DxL = discriminator(X)
- DgL = discriminator(G_sample, reuse=True)
- print (DxL)
- D_Disc_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = DxL, labels = tf.ones_like(DxL)))
- D_Disc_loss1 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = DgL, labels = tf.ones_like(DgL)))
- D_MainLoss = D_Disc_loss + D_Disc_loss1
- G_Generate_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = DgL, labels = tf.ones_like(DgL)))
- D_loss = tf.summary.scalar("Discriminator Loss", D_MainLoss)
- G_loss = tf.summary.scalar("Generator Loss", G_Generate_loss)
- merge = tf.summary.merge_all()
- variables = tf.trainable_variables()
- dvariables = [var for var in variables if var.name.startswith("discriminator")]
- print (dvariables)
- gvariables = [var for var in variables if var.name.startswith("generator")]
- print (gvariables)
- D_optimizer = tf.train.AdamOptimizer().minimize(D_Disc_loss, var_list=dvariables)
- G_optimizer = tf.train.AdamOptimizer().minimize(G_Generate_loss, var_list=gvariables)
- def train():
- filenames = tf.train.string_input_producer(
- tf.train.match_filenames_once("D:/Development_Avecto/TensorFlow/resizedimages/*.png"))
- reader = tf.WholeFileReader()
- _, input = reader.read(filenames)
- input = tf.Print(input,[input,tf.shape(input),"Input shape"])
- input = tf.image.decode_png(input, channels=3)
- input.set_shape([299, 299, 3])
- batch = tf.train.batch([input],
- batch_size=2)
- init = (tf.global_variables_initializer(), tf.local_variables_initializer())
- with tf.Session() as sess:
- sess.run(init)
- coord = tf.train.Coordinator()
- threads = tf.train.start_queue_runners(coord=coord)
- train_writer = tf.summary.FileWriter('D:/Development_Avecto/TensorFlow/logs/1/train', sess.graph)
- for it in range(50):
- _, X_batch = sess.run([input,batch])
- summary,_, DiscriminatorLoss = sess.run([merge,D_optimizer, D_Disc_loss], feed_dict={X: X_batch})
- summary,_, GeneratorLoss = sess.run([merge,G_optimizer, G_Generate_loss])
- train_writer.add_summary(summary, it)
- train_writer.flush()
- train_writer.close()
- coord.request_stop()
- coord.join(threads)
- sess.close()
- #x = tf.reshape(x, shape=[-1, 299, 299, 3])
- dim = x.get_shape().as_list()
- x = tf.reshape(x, shape = [-1, *dim[1:]])
Add Comment
Please, Sign In to add comment