Advertisement
Guest User

Untitled

a guest
Oct 19th, 2017
84
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 2.40 KB | None | 0 0
  1. # Imports
  2. import numpy as np
  3. import tensorflow as tf
  4. import matplotlib.pyplot as plt
  5. import time, os
  6.  
  7. class RNN():
  8.     def __init__(self,batch_size,time_steps,num_classes,num_layers,layer_size,alpha):
  9.         self.batch_size = batch_size
  10.         self.time_steps = time_steps
  11.         self.num_classes = num_classes
  12.         self.num_layers = num_layers
  13.         self.layer_size = layer_size
  14.         self.alpha = alpha
  15.  
  16.         self.x = tf.placeholder(tf.int32, [None, None], name='input')
  17.         self.targets = tf.placeholder(tf.int32, [self.batch_size, self.time_steps], name='input')
  18.  
  19.         W = tf.get_variable('Weight', [self.layer_size, self.num_classes])
  20.         b = tf.get_variable('Bias', [num_classes], initializer=tf.constant_initializer(0.0))
  21.  
  22.         embeddings = tf.get_variable('embedding_matrix', [self.num_classes, self.layer_size])
  23.         rnn_inputs = tf.nn.embedding_lookup(embeddings, self.x)
  24.  
  25.         self.cell = tf.nn.rnn_cell.LSTMCell(self.layer_size, state_is_tuple=True)
  26.         self.cell = tf.nn.rnn_cell.DropoutWrapper(self.cell, input_keep_prob=0.95, output_keep_prob=0.95) #dropout to reduce overfitting
  27.         self.cell = tf.nn.rnn_cell.MultiRNNCell([self.cell] * self.num_layers, state_is_tuple=True)
  28.  
  29.         self.initial = self.cell.zero_state(self.batch_size, tf.float32)
  30.         self.rnn_outputs, self.final_state = tf.nn.dynamic_rnn(self.cell, rnn_inputs, initial_state=self.initial)
  31.  
  32.         self.rnn_outputs = tf.reshape(tf.concat(self.rnn_outputs, 1), [-1, layer_size])
  33.         transposed_targets = tf.reshape(self.targets, [-1])
  34.  
  35.         self.logits = tf.matmul(self.rnn_outputs, W) + b
  36.         self.distribution = tf.nn.softmax(self.logits)
  37.  
  38.         self.cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.rnn_outputs, labels=transposed_targets))
  39.         self.optimizer = tf.train.AdamOptimizer(self.alpha).minimize(self.cost)
  40.         '''gradient_vectors = self.optimizer.compute_gradients(self.cost)
  41.         capped_gvs = gradient_vectors if gradient_vectors[0] is None else [(tf.clip_by_value(grad, -5., 5.), var) for grad, var in gradient_vectors]
  42.         self.train_op = optimizer.apply_gradients(capped_gvs)'''
  43.  
  44.  
  45.     def generate_notes(self,sess, num_notes, notes_to_ind, ind_to_notes):
  46.         state = sess.run(self.cell.zero_state(1, tf.float32))
  47.         note = 'X'
  48.         for n in range(num_notes):
  49.             x = np.zeros((1,1))
  50.             x[0,0] = notes_to_ind[note]
  51.             input_feed = {self.x: x, self.initial: state}
  52.             probs, state = sess.run([self.distribution, self.final_state], input_feed)
  53.             print(probs)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement