Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- saving model weights...done
- 2018-09-21 10:47:53.688368: E tensorflow/stream_executor/cuda/cuda_dnn.cc:81]
- Exception in thread Thread-1:
- Traceback (most recent call last):
- File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/client/session.py", line 1278, in _do_call
- return fn(*args)
- File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/client/session.py", line 1263, in _run_fn
- options, feed_dict, fetch_list, target_list, run_metadata)
- File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/client/session.py", line 1350, in _call_tf_sessionrun
- run_metadata)
- tensorflow.python.framework.errors_impl.InternalError: cuDNN Backward Filter function launch failure : input shape([16,128,128,128]) filter shape([3,3,128,256])
- [[Node: training/Adam/gradients/model_2/conv2d_9/convolution_grad/Conv2DBackpropFilter = Conv2DBackpropFilter[T=DT_FLOAT, _class=["loc:@train...kpropInput"], data_format="NCHW", dilations=[1, 1, 1, 1], padding="SAME", strides=[1, 1, 1, 1], use_cudnn_on_gpu=true, _device="/job:localhost/replica:0/task:0/device:GPU:0"](training/Adam/gradients/model_2/conv2d_9/convolution_grad/Conv2DBackpropFilter-0-TransposeNHWCToNCHW-LayoutOptimizer, ConstantFolding/training/Adam/gradients/model_2/conv2d_9/convolution_grad/ShapeN-matshapes-1, training/Adam/gradients/AddN_2)]]
- During handling of the above exception, another exception occurred:
- Traceback (most recent call last):
- File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
- self.run()
- File "/usr/lib/python3.6/threading.py", line 864, in run
- self._target(*self._args, **self._kwargs)
- File "/content/faceswap/scripts/train.py", line 97, in process_thread
- raise err
- File "/content/faceswap/scripts/train.py", line 89, in process_thread
- self.run_training_cycle(model, trainer)
- File "/content/faceswap/scripts/train.py", line 124, in run_training_cycle
- trainer.train_one_step(iteration, viewer)
- File "/content/faceswap/plugins/Model_OriginalHighRes/Trainer.py", line 38, in train_one_step
- loss_A = self.model.autoencoder_A.train_on_batch(warped_A, target_A)
- File "/usr/local/lib/python3.6/dist-packages/keras/engine/training.py", line 1883, in train_on_batch
- outputs = self.train_function(ins)
- File "/usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py", line 2482, in __call__
- **self.session_kwargs)
- File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/client/session.py", line 877, in run
- run_metadata_ptr)
- File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/client/session.py", line 1100, in _run
- feed_dict_tensor, options, run_metadata)
- File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/client/session.py", line 1272, in _do_run
- run_metadata)
- File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/client/session.py", line 1291, in _do_call
- raise type(e)(node_def, op, message)
- tensorflow.python.framework.errors_impl.InternalError: cuDNN Backward Filter function launch failure : input shape([16,128,128,128]) filter shape([3,3,128,256])
- [[Node: training/Adam/gradients/model_2/conv2d_9/convolution_grad/Conv2DBackpropFilter = Conv2DBackpropFilter[T=DT_FLOAT, _class=["loc:@train...kpropInput"], data_format="NCHW", dilations=[1, 1, 1, 1], padding="SAME", strides=[1, 1, 1, 1], use_cudnn_on_gpu=true, _device="/job:localhost/replica:0/task:0/device:GPU:0"](training/Adam/gradients/model_2/conv2d_9/convolution_grad/Conv2DBackpropFilter-0-TransposeNHWCToNCHW-LayoutOptimizer, ConstantFolding/training/Adam/gradients/model_2/conv2d_9/convolution_grad/ShapeN-matshapes-1, training/Adam/gradients/AddN_2)]]
- Caused by op 'training/Adam/gradients/model_2/conv2d_9/convolution_grad/Conv2DBackpropFilter', defined at:
- File "/usr/lib/python3.6/threading.py", line 884, in _bootstrap
- self._bootstrap_inner()
- File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
- self.run()
- File "/usr/lib/python3.6/threading.py", line 864, in run
- self._target(*self._args, **self._kwargs)
- File "/content/faceswap/scripts/train.py", line 89, in process_thread
- self.run_training_cycle(model, trainer)
- File "/content/faceswap/scripts/train.py", line 124, in run_training_cycle
- trainer.train_one_step(iteration, viewer)
- File "/content/faceswap/plugins/Model_OriginalHighRes/Trainer.py", line 38, in train_one_step
- loss_A = self.model.autoencoder_A.train_on_batch(warped_A, target_A)
- File "/usr/local/lib/python3.6/dist-packages/keras/engine/training.py", line 1882, in train_on_batch
- self._make_train_function()
- File "/usr/local/lib/python3.6/dist-packages/keras/engine/training.py", line 992, in _make_train_function
- loss=self.total_loss)
- File "/usr/local/lib/python3.6/dist-packages/keras/legacy/interfaces.py", line 91, in wrapper
- return func(*args, **kwargs)
- File "/usr/local/lib/python3.6/dist-packages/keras/optimizers.py", line 445, in get_updates
- grads = self.get_gradients(loss, params)
- File "/usr/local/lib/python3.6/dist-packages/keras/optimizers.py", line 78, in get_gradients
- grads = K.gradients(loss, params)
- File "/usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py", line 2519, in gradients
- return tf.gradients(loss, variables, colocate_gradients_with_ops=True)
- File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/gradients_impl.py", line 596, in gradients
- gate_gradients, aggregation_method, stop_gradients)
- File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/gradients_impl.py", line 779, in _GradientsHelper
- lambda: grad_fn(op, *out_grads))
- File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/gradients_impl.py", line 398, in _MaybeCompile
- return grad_fn() # Exit early
- File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/gradients_impl.py", line 779, in <lambda>
- lambda: grad_fn(op, *out_grads))
- File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/nn_grad.py", line 529, in _Conv2DGrad
- data_format=data_format)
- File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/gen_nn_ops.py", line 1091, in conv2d_backprop_filter
- dilations=dilations, name=name)
- File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/op_def_library.py", line 787, in _apply_op_helper
- op_def=op_def)
- File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/util/deprecation.py", line 454, in new_func
- return func(*args, **kwargs)
- File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py", line 3155, in create_op
- op_def=op_def)
- File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py", line 1717, in __init__
- self._traceback = tf_stack.extract_stack()
- ...which was originally created as op 'model_2/conv2d_9/convolution', defined at:
- File "/usr/lib/python3.6/threading.py", line 884, in _bootstrap
- self._bootstrap_inner()
- [elided 1 identical lines from previous traceback]
- File "/usr/lib/python3.6/threading.py", line 864, in run
- self._target(*self._args, **self._kwargs)
- File "/content/faceswap/scripts/train.py", line 86, in process_thread
- model = self.load_model()
- File "/content/faceswap/scripts/train.py", line 102, in load_model
- model = PluginLoader.get_model(self.trainer_name)(model_dir, self.args.gpus)
- File "/content/faceswap/plugins/Model_OriginalHighRes/Model.py", line 101, in __init__
- self.initModel()
- File "/content/faceswap/plugins/Model_OriginalHighRes/Model.py", line 109, in initModel
- self.autoencoder_A = KerasModel(x, self.decoder_A(self.encoder(x)))
- File "/usr/local/lib/python3.6/dist-packages/keras/engine/topology.py", line 619, in __call__
- output = self.call(inputs, **kwargs)
- File "/usr/local/lib/python3.6/dist-packages/keras/engine/topology.py", line 2085, in call
- output_tensors, _, _ = self.run_internal_graph(inputs, masks)
- File "/usr/local/lib/python3.6/dist-packages/keras/engine/topology.py", line 2235, in run_internal_graph
- output_tensors = _to_list(layer.call(computed_tensor, **kwargs))
- File "/usr/local/lib/python3.6/dist-packages/keras/layers/convolutional.py", line 168, in call
- dilation_rate=self.dilation_rate)
- File "/usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py", line 3341, in conv2d
- data_format=tf_data_format)
- File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/nn_ops.py", line 780, in convolution
- return op(input, filter)
- File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/nn_ops.py", line 868, in __call__
- return self.conv_op(inp, filter)
- File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/nn_ops.py", line 520, in __call__
- return self.call(inp, filter)
- File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/nn_ops.py", line 204, in __call__
- name=self.name)
- File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/gen_nn_ops.py", line 956, in conv2d
- data_format=data_format, dilations=dilations, name=name)
- File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/op_def_library.py", line 787, in _apply_op_helper
- op_def=op_def)
- File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/util/deprecation.py", line 454, in new_func
- return func(*args, **kwargs)
- File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py", line 3155, in create_op
- op_def=op_def)
- File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py", line 1717, in __init__
- self._traceback = tf_stack.extract_stack()
- InternalError (see above for traceback): cuDNN Backward Filter function launch failure : input shape([16,128,128,128]) filter shape([3,3,128,256])
- [[Node: training/Adam/gradients/model_2/conv2d_9/convolution_grad/Conv2DBackpropFilter = Conv2DBackpropFilter[T=DT_FLOAT, _class=["loc:@train...kpropInput"], data_format="NCHW", dilations=[1, 1, 1, 1], padding="SAME", strides=[1, 1, 1, 1], use_cudnn_on_gpu=true, _device="/job:localhost/replica:0/task:0/device:GPU:0"](training/Adam/gradients/model_2/conv2d_9/convolution_grad/Conv2DBackpropFilter-0-TransposeNHWCToNCHW-LayoutOptimizer, ConstantFolding/training/Adam/gradients/model_2/conv2d_9/convolution_grad/ShapeN-matshapes-1, training/Adam/gradients/AddN_2)]]
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement