Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- # Neuron Network for FFR data
- import os
- os.environ["TF_CPP_MIN_LOG_LEVEL"]="2" #ignore warnings
- import tensorflow as tf
- import numpy as np
- import matplotlib.pyplot as plt
- import xlrd
- import pandas
- import csv
- import xlsxwriter
- from pandas import ExcelWriter
- data_type=tf.float64
- # Normalized function
- def normalize_cols(m):
- col_max=m.max(axis=0)
- col_min=m.min(axis=0)
- return (m-col_min)/(col_max-col_min)
- # Initialized function
- def init_weight(shape,st_dev):
- weight=tf.Variable(tf.cast(tf.random_normal(shape,stddev=st_dev),data_type))
- return (weight)
- def init_bias(shape,st_dev):
- bias=tf.Variable(tf.cast(tf.random_normal(shape,stddev=st_dev),data_type))
- return (bias)
- # Activation function
- def fully_connected(input_layer,weights,biases,activation=True, name = None):
- if name:
- linear_layer=tf.add(tf.matmul(input_layer,weights),biases, name = name)
- else:
- linear_layer=tf.add(tf.matmul(input_layer,weights),biases)
- if activation: # Using Sigmoid func.
- return (tf.nn.sigmoid(linear_layer))
- else: # Linear func. (Note: only for output layer)
- return linear_layer
- # No. features
- input_num_units=4
- output_num_units=2
- # Read dataset
- file_location = "D:/TensorFlow_ANN/ML-3DPrinterData/ML-3DPrinterData/TrainingData.xlsx"
- workbook=xlrd.open_workbook(file_location)
- sheet=workbook.sheet_by_index(0)
- data_array=[[0 for j in range(sheet.ncols)] for i in range(sheet.nrows)]
- for row in range(sheet.nrows):
- for col in range(sheet.ncols):
- data_array[row][col]=float(sheet.cell_value(row,col))
- # Get inputs
- x_vals=np.array([[x[col] for col in range(input_num_units)] for x in data_array])
- y_vals=np.array([[y[col] for col in range(input_num_units,input_num_units+output_num_units)] for y in data_array])
- # No. samples
- data_size=x_vals.shape[0]
- #for i in range(data_size):
- # print(x_vals[i],y_vals[i])
- #
- x_max=[x_vals.max(axis=0)[i] for i in range(input_num_units)]
- x_min=[x_vals.min(axis=0)[i] for i in range(input_num_units)]
- x_vals=np.nan_to_num(normalize_cols(x_vals))
- # Set seed number for random generator
- seed=10
- tf.set_random_seed(seed)
- np.random.seed(seed)
- # Standard deviation of Normal distribution
- norm_st_dev=np.sqrt(2.0/float(input_num_units+1)) # Xavier initialization
- # Split dataset into training (80%) and test (20%) set
- train_size=round(data_size*0.80)
- test_size=data_size-train_size
- # No. samples in a batch (to compute the derivative of loss func.)
- batch_size=int(train_size*1) #large size to reduce the noise of loss func and error.
- # Get randomly training and test dataset
- train_indices=np.random.choice(data_size,train_size,replace=False)
- test_indices=np.array(list(set(range(data_size))-set(train_indices)))
- x_vals_train=x_vals[train_indices]
- y_vals_train=y_vals[train_indices]
- x_vals_test=x_vals[test_indices]
- y_vals_test=y_vals[test_indices]
- # Evaluate Error
- actuals=y_vals
- train_actuals=actuals[train_indices]
- test_actuals=actuals[test_indices]
- # Normalize input values
- x_vals_train=np.nan_to_num(normalize_cols(x_vals_train))
- x_vals_test=np.nan_to_num(normalize_cols(x_vals_test))
- # Declare placeholder
- x_data=tf.placeholder(shape=[None,input_num_units],dtype=data_type, name = 'x_data')
- y_target=tf.placeholder(shape=[None,output_num_units],dtype=data_type)
- #
- hiddien1_num_units = 16 #32
- hiddien2_num_units = 8 #16
- hiddien3_num_units = 4 #4
- # 1st layer
- weight_1 = init_weight(shape=[input_num_units, hiddien1_num_units], st_dev = norm_st_dev)
- bias_1 = init_bias(shape =[hiddien1_num_units], st_dev = norm_st_dev)
- layer_1 = fully_connected(x_data, weight_1, bias_1)
- # 2nd layer
- weight_2 = init_weight(shape =[hiddien1_num_units,hiddien2_num_units], st_dev = norm_st_dev)
- bias_2 = init_bias(shape = [hiddien2_num_units], st_dev = norm_st_dev)
- layer_2 = fully_connected(layer_1, weight_2, bias_2)
- # 3rd layer
- weight_3 = init_weight(shape = [hiddien2_num_units, hiddien3_num_units], st_dev = norm_st_dev)
- bias_3 = init_bias(shape = [hiddien3_num_units], st_dev = norm_st_dev)
- layer_3 = fully_connected(layer_2, weight_3, bias_3)
- # Output layer
- weight_4 = init_weight(shape = [hiddien3_num_units,output_num_units], st_dev = norm_st_dev)
- bias_4 = init_bias(shape = [output_num_units], st_dev = norm_st_dev)
- final_output = fully_connected(layer_3, weight_4, bias_4,activation=False, name = "final_output")
- # Loss function
- loss=tf.reduce_mean(tf.square(y_target-final_output))
- # Optimizer method
- learning_rate=0.050
- my_opt=tf.train.GradientDescentOptimizer(learning_rate)
- train_step=my_opt.minimize(loss)
- # Initialize
- init=tf.initialize_all_variables()
- #
- # initialize the loss vectors
- loss_vec = []
- test_loss = []
- # Training
- if True:
- with tf.Session() as sess:
- sess.run(init)
- # Initialize arrays
- train_loss=[]
- test_loss=[]
- train_perr=[]
- test_perr=[]
- # Start training
- generation_size=2000
- for i in range(generation_size):
- # Choose random indices for batch selection
- rand_index=np.random.choice(train_size,size=batch_size)
- # Get random batch
- rand_x=x_vals_train[rand_index]
- rand_y=y_vals_train[rand_index]
- # Run the training step
- sess.run(train_step,feed_dict={x_data:rand_x,y_target:rand_y})
- # Get and store the train loss
- temp_train_loss=sess.run(loss,feed_dict={x_data:rand_x,y_target:rand_y})
- train_loss.append(np.sqrt(temp_train_loss))
- # Get and store the test loss
- temp_test_loss=sess.run(loss,feed_dict={x_data:x_vals_test,y_target:y_vals_test})
- test_loss.append(np.sqrt(temp_test_loss))
- # Get and store the percentage error
- train_preds=[y for y in sess.run(final_output,feed_dict={x_data:x_vals_train})]
- test_preds =[y for y in sess.run(final_output,feed_dict={x_data:x_vals_test})]
- #
- temp_train_perr=0.0
- for j in range(train_size):
- tmp=0.0
- for k in range(output_num_units):
- tmp=tmp+np.abs((train_preds[j][k]-train_actuals[j][k])/train_actuals[j][k])
- temp_train_perr = temp_train_perr + tmp/output_num_units
- temp_train_perr=temp_train_perr*100.0/train_size
- train_perr.append(temp_train_perr)
- #
- temp_test_perr=0.0
- for j in range(test_size):
- tmp=0.0
- for k in range(output_num_units):
- tmp=tmp+np.abs((test_preds[j][k]-test_actuals[j][k])/test_actuals[j][k])
- temp_test_perr = temp_test_perr + tmp/output_num_units
- temp_test_perr=temp_test_perr*100.0/test_size
- test_perr.append(temp_test_perr)
- if (i+1)%(100)==0:
- print("Generation: "+str(i+1)+". Loss= "+str(train_loss[-1]))
- print("Train Error: %f%% Test Error: %f%%" %(train_perr[-1],test_perr[-1]))
- print()
- #
- model_path="D:/TensorFlow_ANN/ML-3DPrinterData/ML-3DPrinterData/tmp/trained"
- saver=tf.train.Saver()
- save_path=saver.save(sess,model_path)
- plt.subplot(212)
- plt.plot(train_perr,"k-",label="Train")
- plt.plot(test_perr,"r--",label="Test")
- plt.title("Error%")
- plt.xlabel("Generation")
- plt.ylabel("Error%")
- plt.ylim(0,100)
- plt.yticks(range(0, 101, 10))
- plt.legend(loc="upper right")
- plt.show()
- plt.close()
Add Comment
Please, Sign In to add comment