Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- tf.reset_default_graph()
- # define placeholders
- sentences = tf.placeholder(tf.int32, [None, None], name='sentences')
- #embedding
- self.word_embedding = tf.get_variable(name='word_embedding',
- shape=[vocab_size,wdim],
- dtype=tf.float32,
- initializer=tf.constant_initializer(np.array(word_embeddi)),trainable=True)
- #embedding_lookup
- embedding_lookup = tf.nn.embedding_lookup(self.word_embedding,sentences)
- #actual sequence length after padding ex : padded sentence [ 1,3,4,0,0,0 ] then seq_len is 3 ( exclude 0's)
- sequence_len = tf.count_nonzero(sentences, axis=-1)
- #bi-directional rnn
- with tf.variable_scope('encoder') as scope:
- _ , (fsf, fsb) = tf.nn.bidirectional_dynamic_rnn(
- rnn.DropoutWrapper(tf.nn.rnn_cell.LSTMCell(hdim),dtype=tf.float32),
- rnn.DropoutWrapper(tf.nn.rnn_cell.LSTMCell(hdim),dtype=tf.float32),
- inputs=embedding_lookup,
- sequence_length=sequence_len,
- dtype=tf.float32)
- #since it's bidirectional so concat the forward and backward rnn
- state_output = tf.concat([fsf.c, fsb.c], axis=-1)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement