Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- FailedPreconditionError: Attempting to use uninitialized value Variable
- [[Node: Variable/read = Identity[T=DT_FLOAT, _class=["loc:@Variable"], _device="/job:localhost/replica:0/task:0/device:GPU:0"](Variable)]]
- [[Node: GradientDescent/update/_18 = _Recv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/device:CPU:0", send_device="/job:localhost/replica:0/task:0/device:GPU:0", send_device_incarnation=1, tensor_name="edge_487_GradientDescent/update", tensor_type=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:CPU:0"]()]]
- [[1, 18, 8, 19, 9, 20, 21, 0, 0, 0],
- [1, 22, 10, 0, 0, 0, 0, 0, 0, 0]]
- [[18, 8, 19, 9, 20, 21, 2, 0, 0, 0],
- [22, 10, 2, 0, 0, 0, 0, 0, 0, 0]]
- class Model(object):
- def __init__(self, is_training, batch_size, seq_max_length, hidden_size, vocab_size,
- num_layers, dropout=0.5, init_scale=0.05):
- self.is_training = is_training
- self.batch_size = batch_size
- self.seq_max_length = seq_max_length
- self.inputs = tf.placeholder(name='inputs', shape=[batch_size, seq_max_length], dtype=tf.int32)
- self.targets = tf.placeholder(name='targets', shape=[batch_size, seq_max_length], dtype=tf.int32)
- self.hidden_size = hidden_size
- embedding = tf.get_variable('embedding', shape=[vocab_size, self.hidden_size], dtype=tf.float32)
- inputs = tf.nn.embedding_lookup(embedding, self.inputs)
- if is_training and dropout < 1:
- inputs = tf.nn.dropout(inputs, dropout)
- cell = tf.contrib.rnn.LSTMCell(self.hidden_size)
- if num_layers > 1:
- cell = tf.contrib.rnn.MultiRNNCell([cell for _ in range(num_layers)], state_is_tuple=True)
- output, self.state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
- output = tf.reshape(output, [-1, hidden_size])
- softmax_w = tf.Variable(tf.random_uniform([self.hidden_size, vocab_size], -init_scale, init_scale))
- softmax_b = tf.Variable(tf.random_uniform([vocab_size], -init_scale, init_scale))
- logits = tf.nn.xw_plus_b(output, softmax_w, softmax_b)
- logits = tf.reshape(logits, [self.batch_size, self.seq_max_length, vocab_size])
- loss = tf.contrib.seq2seq.sequence_loss(
- logits,
- self.targets,
- tf.ones([self.batch_size, self.seq_max_length], dtype=tf.float32),
- average_across_timesteps=False,
- average_across_batch=True
- )
- self.cost = tf.reduce_sum(loss)
- if not is_training:
- return
- tvars = tf.trainable_variables()
- grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost, tvars), 5)
- optimizer = tf.train.GradientDescentOptimizer(learning_rate=1)
- self.train_op = optimizer.apply_gradients(
- zip(grads, tvars),
- global_step=tf.train.get_or_create_global_step())
- tf.reset_default_graph()
- init_global = tf.global_variables_initializer()
- init_local = tf.local_variables_initializer()
- with tf.Session() as sess:
- sess.run(init_global)
- sess.run(init_local)
- model = Model(is_training=True, batch_size=2, seq_max_length=10, hidden_size=100, vocab_size=72, num_layers=1)
- batch_x = np.array(train_X[0:2])
- batch_y = np.array(train_y[0:2])
- cost_value, _= sess.run([model.cost, model.train_op], feed_dict={model.inputs: batch_x, model.targets: batch_y})
- print(cost_value)
Add Comment
Please, Sign In to add comment