Guest User

Untitled

a guest
Jul 16th, 2018
72
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 6.73 KB | None | 0 0
  1. # Neuron Network for FFR data
  2. import os
  3. os.environ["TF_CPP_MIN_LOG_LEVEL"]="2" #ignore warnings
  4. import tensorflow as tf
  5. import numpy as np
  6. import matplotlib.pyplot as plt
  7. import xlrd
  8. import pandas
  9. import csv
  10. import xlsxwriter
  11. from pandas import ExcelWriter
  12.  
  13. data_type=tf.float64
  14.  
  15. # Normalized function
  16. def normalize_cols(m):
  17. col_max=m.max(axis=0)
  18. col_min=m.min(axis=0)
  19. return (m-col_min)/(col_max-col_min)
  20. # Initialized function
  21. def init_weight(shape,st_dev):
  22. weight=tf.Variable(tf.cast(tf.random_normal(shape,stddev=st_dev),data_type))
  23. return (weight)
  24. def init_bias(shape,st_dev):
  25. bias=tf.Variable(tf.cast(tf.random_normal(shape,stddev=st_dev),data_type))
  26. return (bias)
  27. # Activation function
  28. def fully_connected(input_layer,weights,biases,activation=True, name = None):
  29. if name:
  30. linear_layer=tf.add(tf.matmul(input_layer,weights),biases, name = name)
  31. else:
  32. linear_layer=tf.add(tf.matmul(input_layer,weights),biases)
  33. if activation: # Using Sigmoid func.
  34. return (tf.nn.sigmoid(linear_layer))
  35. else: # Linear func. (Note: only for output layer)
  36. return linear_layer
  37.  
  38. # No. features
  39. input_num_units=4
  40. output_num_units=2
  41. # Read dataset
  42. file_location = "D:/TensorFlow_ANN/ML-3DPrinterData/ML-3DPrinterData/TrainingData.xlsx"
  43. workbook=xlrd.open_workbook(file_location)
  44. sheet=workbook.sheet_by_index(0)
  45. data_array=[[0 for j in range(sheet.ncols)] for i in range(sheet.nrows)]
  46. for row in range(sheet.nrows):
  47. for col in range(sheet.ncols):
  48. data_array[row][col]=float(sheet.cell_value(row,col))
  49.  
  50. # Get inputs
  51. x_vals=np.array([[x[col] for col in range(input_num_units)] for x in data_array])
  52. y_vals=np.array([[y[col] for col in range(input_num_units,input_num_units+output_num_units)] for y in data_array])
  53.  
  54. # No. samples
  55. data_size=x_vals.shape[0]
  56.  
  57. #for i in range(data_size):
  58. # print(x_vals[i],y_vals[i])
  59.  
  60. #
  61. x_max=[x_vals.max(axis=0)[i] for i in range(input_num_units)]
  62. x_min=[x_vals.min(axis=0)[i] for i in range(input_num_units)]
  63.  
  64. x_vals=np.nan_to_num(normalize_cols(x_vals))
  65.  
  66. # Set seed number for random generator
  67. seed=10
  68. tf.set_random_seed(seed)
  69. np.random.seed(seed)
  70. # Standard deviation of Normal distribution
  71. norm_st_dev=np.sqrt(2.0/float(input_num_units+1)) # Xavier initialization
  72.  
  73. # Split dataset into training (80%) and test (20%) set
  74. train_size=round(data_size*0.80)
  75. test_size=data_size-train_size
  76. # No. samples in a batch (to compute the derivative of loss func.)
  77. batch_size=int(train_size*1) #large size to reduce the noise of loss func and error.
  78.  
  79. # Get randomly training and test dataset
  80. train_indices=np.random.choice(data_size,train_size,replace=False)
  81. test_indices=np.array(list(set(range(data_size))-set(train_indices)))
  82. x_vals_train=x_vals[train_indices]
  83. y_vals_train=y_vals[train_indices]
  84. x_vals_test=x_vals[test_indices]
  85. y_vals_test=y_vals[test_indices]
  86.  
  87. # Evaluate Error
  88. actuals=y_vals
  89. train_actuals=actuals[train_indices]
  90. test_actuals=actuals[test_indices]
  91.  
  92. # Normalize input values
  93. x_vals_train=np.nan_to_num(normalize_cols(x_vals_train))
  94. x_vals_test=np.nan_to_num(normalize_cols(x_vals_test))
  95.  
  96. # Declare placeholder
  97. x_data=tf.placeholder(shape=[None,input_num_units],dtype=data_type, name = 'x_data')
  98. y_target=tf.placeholder(shape=[None,output_num_units],dtype=data_type)
  99.  
  100. #
  101. hiddien1_num_units = 16 #32
  102. hiddien2_num_units = 8 #16
  103. hiddien3_num_units = 4 #4
  104.  
  105. # 1st layer
  106. weight_1 = init_weight(shape=[input_num_units, hiddien1_num_units], st_dev = norm_st_dev)
  107. bias_1 = init_bias(shape =[hiddien1_num_units], st_dev = norm_st_dev)
  108. layer_1 = fully_connected(x_data, weight_1, bias_1)
  109.  
  110. # 2nd layer
  111. weight_2 = init_weight(shape =[hiddien1_num_units,hiddien2_num_units], st_dev = norm_st_dev)
  112. bias_2 = init_bias(shape = [hiddien2_num_units], st_dev = norm_st_dev)
  113. layer_2 = fully_connected(layer_1, weight_2, bias_2)
  114.  
  115. # 3rd layer
  116. weight_3 = init_weight(shape = [hiddien2_num_units, hiddien3_num_units], st_dev = norm_st_dev)
  117. bias_3 = init_bias(shape = [hiddien3_num_units], st_dev = norm_st_dev)
  118. layer_3 = fully_connected(layer_2, weight_3, bias_3)
  119.  
  120. # Output layer
  121. weight_4 = init_weight(shape = [hiddien3_num_units,output_num_units], st_dev = norm_st_dev)
  122. bias_4 = init_bias(shape = [output_num_units], st_dev = norm_st_dev)
  123. final_output = fully_connected(layer_3, weight_4, bias_4,activation=False, name = "final_output")
  124.  
  125. # Loss function
  126. loss=tf.reduce_mean(tf.square(y_target-final_output))
  127.  
  128. # Optimizer method
  129. learning_rate=0.050
  130. my_opt=tf.train.GradientDescentOptimizer(learning_rate)
  131. train_step=my_opt.minimize(loss)
  132.  
  133. # Initialize
  134. init=tf.initialize_all_variables()
  135. #
  136. # initialize the loss vectors
  137. loss_vec = []
  138. test_loss = []
  139.  
  140. # Training
  141. if True:
  142. with tf.Session() as sess:
  143. sess.run(init)
  144.  
  145. # Initialize arrays
  146. train_loss=[]
  147. test_loss=[]
  148. train_perr=[]
  149. test_perr=[]
  150.  
  151. # Start training
  152. generation_size=2000
  153. for i in range(generation_size):
  154. # Choose random indices for batch selection
  155. rand_index=np.random.choice(train_size,size=batch_size)
  156. # Get random batch
  157. rand_x=x_vals_train[rand_index]
  158. rand_y=y_vals_train[rand_index]
  159. # Run the training step
  160. sess.run(train_step,feed_dict={x_data:rand_x,y_target:rand_y})
  161. # Get and store the train loss
  162. temp_train_loss=sess.run(loss,feed_dict={x_data:rand_x,y_target:rand_y})
  163. train_loss.append(np.sqrt(temp_train_loss))
  164. # Get and store the test loss
  165. temp_test_loss=sess.run(loss,feed_dict={x_data:x_vals_test,y_target:y_vals_test})
  166. test_loss.append(np.sqrt(temp_test_loss))
  167. # Get and store the percentage error
  168. train_preds=[y for y in sess.run(final_output,feed_dict={x_data:x_vals_train})]
  169. test_preds =[y for y in sess.run(final_output,feed_dict={x_data:x_vals_test})]
  170. #
  171. temp_train_perr=0.0
  172. for j in range(train_size):
  173. tmp=0.0
  174. for k in range(output_num_units):
  175. tmp=tmp+np.abs((train_preds[j][k]-train_actuals[j][k])/train_actuals[j][k])
  176. temp_train_perr = temp_train_perr + tmp/output_num_units
  177. temp_train_perr=temp_train_perr*100.0/train_size
  178. train_perr.append(temp_train_perr)
  179. #
  180. temp_test_perr=0.0
  181. for j in range(test_size):
  182. tmp=0.0
  183. for k in range(output_num_units):
  184. tmp=tmp+np.abs((test_preds[j][k]-test_actuals[j][k])/test_actuals[j][k])
  185. temp_test_perr = temp_test_perr + tmp/output_num_units
  186. temp_test_perr=temp_test_perr*100.0/test_size
  187. test_perr.append(temp_test_perr)
  188.  
  189. if (i+1)%(100)==0:
  190. print("Generation: "+str(i+1)+". Loss= "+str(train_loss[-1]))
  191. print("Train Error: %f%% Test Error: %f%%" %(train_perr[-1],test_perr[-1]))
  192. print()
  193. #
  194. model_path="D:/TensorFlow_ANN/ML-3DPrinterData/ML-3DPrinterData/tmp/trained"
  195. saver=tf.train.Saver()
  196. save_path=saver.save(sess,model_path)
  197.  
  198. plt.subplot(212)
  199. plt.plot(train_perr,"k-",label="Train")
  200. plt.plot(test_perr,"r--",label="Test")
  201. plt.title("Error%")
  202. plt.xlabel("Generation")
  203. plt.ylabel("Error%")
  204. plt.ylim(0,100)
  205. plt.yticks(range(0, 101, 10))
  206. plt.legend(loc="upper right")
  207. plt.show()
  208. plt.close()
Add Comment
Please, Sign In to add comment