Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- # Imports
- import numpy as np
- import tensorflow as tf
- import matplotlib.pyplot as plt
- import time, os
- class RNN():
- def __init__(self,batch_size,time_steps,num_classes,num_layers,layer_size,alpha):
- self.batch_size = batch_size
- self.time_steps = time_steps
- self.num_classes = num_classes
- self.num_layers = num_layers
- self.layer_size = layer_size
- self.alpha = alpha
- self.x = tf.placeholder(tf.int32, [None, None], name='input')
- self.targets = tf.placeholder(tf.int32, [self.batch_size, self.time_steps], name='input')
- W = tf.get_variable('Weight', [self.layer_size, self.num_classes])
- b = tf.get_variable('Bias', [num_classes], initializer=tf.constant_initializer(0.0))
- embeddings = tf.get_variable('embedding_matrix', [self.num_classes, self.layer_size])
- rnn_inputs = tf.nn.embedding_lookup(embeddings, self.x)
- self.cell = tf.nn.rnn_cell.LSTMCell(self.layer_size, state_is_tuple=True)
- self.cell = tf.nn.rnn_cell.DropoutWrapper(self.cell, input_keep_prob=0.95, output_keep_prob=0.95) #dropout to reduce overfitting
- self.cell = tf.nn.rnn_cell.MultiRNNCell([self.cell] * self.num_layers, state_is_tuple=True)
- self.initial = self.cell.zero_state(self.batch_size, tf.float32)
- self.rnn_outputs, self.final_state = tf.nn.dynamic_rnn(self.cell, rnn_inputs, initial_state=self.initial)
- self.rnn_outputs = tf.reshape(tf.concat(self.rnn_outputs, 1), [-1, layer_size])
- transposed_targets = tf.reshape(self.targets, [-1])
- self.logits = tf.matmul(self.rnn_outputs, W) + b
- self.distribution = tf.nn.softmax(self.logits)
- self.cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.rnn_outputs, labels=transposed_targets))
- self.optimizer = tf.train.AdamOptimizer(self.alpha).minimize(self.cost)
- '''gradient_vectors = self.optimizer.compute_gradients(self.cost)
- capped_gvs = gradient_vectors if gradient_vectors[0] is None else [(tf.clip_by_value(grad, -5., 5.), var) for grad, var in gradient_vectors]
- self.train_op = optimizer.apply_gradients(capped_gvs)'''
- def generate_notes(self,sess, num_notes, notes_to_ind, ind_to_notes):
- state = sess.run(self.cell.zero_state(1, tf.float32))
- note = 'X'
- for n in range(num_notes):
- x = np.zeros((1,1))
- x[0,0] = notes_to_ind[note]
- input_feed = {self.x: x, self.initial: state}
- probs, state = sess.run([self.distribution, self.final_state], input_feed)
- print(probs)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement