Guest User

Untitled

a guest
Oct 20th, 2018
109
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 3.35 KB | None | 0 0
  1. FailedPreconditionError: Attempting to use uninitialized value Variable
  2. [[Node: Variable/read = Identity[T=DT_FLOAT, _class=["loc:@Variable"], _device="/job:localhost/replica:0/task:0/device:GPU:0"](Variable)]]
  3. [[Node: GradientDescent/update/_18 = _Recv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/device:CPU:0", send_device="/job:localhost/replica:0/task:0/device:GPU:0", send_device_incarnation=1, tensor_name="edge_487_GradientDescent/update", tensor_type=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:CPU:0"]()]]
  4.  
  5. [[1, 18, 8, 19, 9, 20, 21, 0, 0, 0],
  6. [1, 22, 10, 0, 0, 0, 0, 0, 0, 0]]
  7.  
  8. [[18, 8, 19, 9, 20, 21, 2, 0, 0, 0],
  9. [22, 10, 2, 0, 0, 0, 0, 0, 0, 0]]
  10.  
  11. class Model(object):
  12.  
  13. def __init__(self, is_training, batch_size, seq_max_length, hidden_size, vocab_size,
  14. num_layers, dropout=0.5, init_scale=0.05):
  15. self.is_training = is_training
  16. self.batch_size = batch_size
  17. self.seq_max_length = seq_max_length
  18. self.inputs = tf.placeholder(name='inputs', shape=[batch_size, seq_max_length], dtype=tf.int32)
  19. self.targets = tf.placeholder(name='targets', shape=[batch_size, seq_max_length], dtype=tf.int32)
  20. self.hidden_size = hidden_size
  21.  
  22. embedding = tf.get_variable('embedding', shape=[vocab_size, self.hidden_size], dtype=tf.float32)
  23. inputs = tf.nn.embedding_lookup(embedding, self.inputs)
  24.  
  25. if is_training and dropout < 1:
  26. inputs = tf.nn.dropout(inputs, dropout)
  27.  
  28. cell = tf.contrib.rnn.LSTMCell(self.hidden_size)
  29.  
  30. if num_layers > 1:
  31. cell = tf.contrib.rnn.MultiRNNCell([cell for _ in range(num_layers)], state_is_tuple=True)
  32.  
  33. output, self.state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
  34. output = tf.reshape(output, [-1, hidden_size])
  35.  
  36. softmax_w = tf.Variable(tf.random_uniform([self.hidden_size, vocab_size], -init_scale, init_scale))
  37. softmax_b = tf.Variable(tf.random_uniform([vocab_size], -init_scale, init_scale))
  38.  
  39. logits = tf.nn.xw_plus_b(output, softmax_w, softmax_b)
  40. logits = tf.reshape(logits, [self.batch_size, self.seq_max_length, vocab_size])
  41.  
  42. loss = tf.contrib.seq2seq.sequence_loss(
  43. logits,
  44. self.targets,
  45. tf.ones([self.batch_size, self.seq_max_length], dtype=tf.float32),
  46. average_across_timesteps=False,
  47. average_across_batch=True
  48. )
  49.  
  50. self.cost = tf.reduce_sum(loss)
  51.  
  52. if not is_training:
  53. return
  54.  
  55. tvars = tf.trainable_variables()
  56. grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost, tvars), 5)
  57. optimizer = tf.train.GradientDescentOptimizer(learning_rate=1)
  58.  
  59. self.train_op = optimizer.apply_gradients(
  60. zip(grads, tvars),
  61. global_step=tf.train.get_or_create_global_step())
  62.  
  63. tf.reset_default_graph()
  64. init_global = tf.global_variables_initializer()
  65. init_local = tf.local_variables_initializer()
  66.  
  67. with tf.Session() as sess:
  68. sess.run(init_global)
  69. sess.run(init_local)
  70.  
  71. model = Model(is_training=True, batch_size=2, seq_max_length=10, hidden_size=100, vocab_size=72, num_layers=1)
  72. batch_x = np.array(train_X[0:2])
  73. batch_y = np.array(train_y[0:2])
  74. cost_value, _= sess.run([model.cost, model.train_op], feed_dict={model.inputs: batch_x, model.targets: batch_y})
  75. print(cost_value)
Add Comment
Please, Sign In to add comment