Advertisement
Guest User

Untitled

a guest
Jun 20th, 2019
65
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 1.18 KB | None | 0 0
  1. tf.reset_default_graph()
  2.  
  3. # define placeholders
  4.  
  5. sentences = tf.placeholder(tf.int32, [None, None], name='sentences')
  6.  
  7. #embedding
  8. self.word_embedding = tf.get_variable(name='word_embedding',
  9. shape=[vocab_size,wdim],
  10. dtype=tf.float32,
  11. initializer=tf.constant_initializer(np.array(word_embeddi)),trainable=True)
  12.  
  13. #embedding_lookup
  14. embedding_lookup = tf.nn.embedding_lookup(self.word_embedding,sentences)
  15.  
  16.  
  17. #actual sequence length after padding ex : padded sentence [ 1,3,4,0,0,0 ] then seq_len is 3 ( exclude 0's)
  18. sequence_len = tf.count_nonzero(sentences, axis=-1)
  19.  
  20.  
  21. #bi-directional rnn
  22.  
  23. with tf.variable_scope('encoder') as scope:
  24.  
  25. _ , (fsf, fsb) = tf.nn.bidirectional_dynamic_rnn(
  26. rnn.DropoutWrapper(tf.nn.rnn_cell.LSTMCell(hdim),dtype=tf.float32),
  27. rnn.DropoutWrapper(tf.nn.rnn_cell.LSTMCell(hdim),dtype=tf.float32),
  28. inputs=embedding_lookup,
  29. sequence_length=sequence_len,
  30. dtype=tf.float32)
  31.  
  32.  
  33. #since it's bidirectional so concat the forward and backward rnn
  34. state_output = tf.concat([fsf.c, fsb.c], axis=-1)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement