Advertisement
Guest User

Untitled

a guest
Jul 31st, 2016
120
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 5.56 KB | None | 0 0
  1. import numpy as np
  2. import tensorflow as tf
  3. import matplotlib
  4. matplotlib.use('Agg')
  5. import matplotlib.pyplot as plt
  6.  
  7. SEQ_LENGTH = 10
  8.  
  9. class testrnn:
  10. def __init__(self,config):
  11. self.config = config
  12. tf.reset_default_graph()
  13. with tf.variable_scope("rnnlch") as scope:
  14. self.input_data = tf.placeholder(tf.float32,[None,config.MaxNumSteps])
  15. self.eval_input_data = tf.placeholder(tf.float32,[1,3])#create eval node, 3 time steps
  16. self.eval_target = tf.Variable(tf.constant(0.0,shape=[SEQ_LENGTH]))
  17. self.target = tf.placeholder(tf.float32,[None,config.MaxNumSteps])
  18. loss = tf.Variable(0.,trainable=False)
  19. x_split = tf.split(0,config.batch_size,self.input_data)
  20. y_split = tf.split(0,config.batch_size,self.target)
  21. x_eval_split = tf.split(0,1,self.eval_input_data)# split the evaluation input by the number of time steps
  22.  
  23. w = tf.Variable(tf.random_normal([config.hidden_size,config.FC_Units],stddev=0.1),trainable=True)
  24. b = tf.Variable(tf.constant(0.0,shape=[config.FC_Units]),trainable=True)
  25. w_2 = tf.Variable(tf.random_normal([config.FC_Units,config.MaxNumSteps],stddev=0.1),trainable=True)
  26. b_2 = tf.Variable(tf.constant(0.0,shape=[config.MaxNumSteps]),trainable=True)
  27. #Initialize basic lstm cell
  28. lstm = tf.nn.rnn_cell.BasicLSTMCell(config.hidden_size,state_is_tuple=True)
  29. ops, states = tf.nn.rnn(lstm,x_split,dtype=tf.float32)
  30. #lstm_multi = tf.nn.rnn_cell.MultiRNNCell([lstm]*config.num_layers,state_is_tuple=True)
  31. #ops, states = tf.nn.rnn(lstm_multi,x_split,dtype=tf.float32)
  32. self.output = []
  33. #Compute loss
  34. for op,target in zip(ops,y_split):
  35. transform = tf.nn.elu(tf.matmul(op,w)+b)
  36. drop_out = tf.nn.dropout(transform,keep_prob=0.6)
  37. fc_layer2 = tf.nn.elu(tf.matmul(drop_out,w_2)+b_2)
  38. self.output.append(fc_layer2)
  39. loss += self.loss_function(transform,target,w,b)
  40.  
  41. #Use the variables above to also unravel the eval node
  42. self.loss = loss
  43. self.lr = tf.Variable(0.0, trainable=False)
  44. '''
  45. tvars = tf.trainable_variables()
  46. grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars),
  47. config.max_grad_norm)
  48. #optimizer = tf.train.GradientDescentOptimizer(self.lr)
  49. #optimizer = tf.train.MomentumOptimizer(learning_rate=self.lr,momentum=0.5)
  50. optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)
  51. self.train_op = optimizer.apply_gradients(zip(grads, tvars))
  52. '''
  53. self.train_op = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(loss)
  54. #Eval network
  55. scope.reuse_variables()
  56. for tstep in range(SEQ_LENGTH):
  57. if tstep < 3:
  58. import IPython; IPython.embed()
  59. tmp = lstm(x_eval_split,tf.zeros([1,1]))
  60. self.eval_target[tstep] = tmp
  61. else:
  62. self.eval_target[tstep] = lstm(self.eval_target[tstep])
  63.  
  64.  
  65.  
  66.  
  67.  
  68. def loss_function(self,ip,op,w,b):
  69. return tf.reduce_mean((ip-op)**2) + tf.nn.l2_loss(w) + tf.nn.l2_loss(b)
  70.  
  71.  
  72. def generate_data(self,num=None):
  73. x_data_list = np.zeros((num,self.config.MaxNumSteps+1))
  74. y_data_list = np.zeros((num,self.config.MaxNumSteps+1))
  75. for j in range(0,num):
  76. #generate randome sequence equal to number of points
  77. x_data = np.array([np.linspace(0,4*np.pi,num = self.config.MaxNumSteps+1)] )
  78. #x_data = np.random.rand(BATCH_SIZE,TOTAL_NUM_POINTS)
  79. y_data = self.transform(x_data)
  80. #y_data = np.reshape(y_data,[BATCH_SIZE,TOTAL_NUM_POINTS])
  81. x_data_list[j,:] = x_data
  82. y_data_list[j,:] = y_data
  83.  
  84. return x_data_list,y_data_list
  85.  
  86. def transform(self,x):
  87. return 0.5*np.sin(x) + np.random.rand()/100.
  88. #return 0.5*np.sin(x) + 0.25
  89.  
  90. def assign_lr(self, session, lr_value):
  91. session.run(tf.assign(self.lr, lr_value))
  92.  
  93. def plot_data(self,data):
  94. fig = plt.figure()
  95. for ii in range(data.shape[0]):
  96. plt.plot(data[ii,:])
  97. plt.hold(True)
  98. return fig
  99.  
  100.  
  101.  
  102.  
  103. def run_model(sess,m,data,eval_op,verbose=True):
  104. cost,_ = sess.run([m.loss,eval_op],{m.input_data: data[:,:-1],m.target: data[:,1:]})
  105. return cost
  106.  
  107.  
  108. class TestConfig(object):
  109. """Tiny config, for testing."""
  110. learning_rate = 1e-3
  111. max_grad_norm = 0.1
  112. num_layers = 2
  113. MaxNumSteps = 20
  114. feat_dims = 1
  115. hidden_size = 10
  116. max_epoch = 1
  117. lr_decay = 0.
  118. batch_size = 20
  119. num_layers = 2
  120. FC_Units = 20
  121.  
  122. if __name__ == "__main__":
  123. config = TestConfig()
  124. m = testrnn(config)
  125. cost_lst = []
  126. with tf.Session() as sess:
  127. sess.run(tf.initialize_all_variables())
  128. for ii in range(500):
  129. m.assign_lr(sess,config.learning_rate)
  130. ind,data = m.generate_data(m.config.batch_size)
  131. cost = run_model(sess,m,data,m.train_op)
  132. cost_lst.append(cost)
  133. if np.mod(ii,100) == 0:
  134. print("cost is {}".format(cost))
  135. plt.plot(cost_lst)
  136. plt.title('Cost vs iterations')
  137. plt.savefig('RNN_train_1.png')
  138. import IPython; IPython.embed()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement