Guest User

xor.py-vatsal

a guest
Oct 29th, 2017
137
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 4.57 KB | None | 0 0
  1. #!/usr/bin/env python3
  2. import numpy as np
  3. import pandas as pd
  4. # import matplotlib.pyplot as plt
  5. import tensorflow as tf
  6. from tensorflow.python.framework import ops
  7. from sklearn.model_selection import train_test_split
  8.  
  9.  
  10. def read_and_split_training_data():
  11.     dataset = pd.read_csv("xor_data.csv") # Read Data Set
  12.     print(dataset['xor'].count()) # Print count of dataset
  13.     print(dataset.head()) #Check first five of dataset
  14.     num_examples = dataset['xor'].count() #Get number of examples in dataset
  15.     num_classes = len(dataset.count()) # Get number of classes
  16.     X = dataset.iloc[:, [0, num_classes - 2]].values # split dataset into X
  17.     y = dataset.iloc[:, num_classes - 1].values # And Y
  18.     y = y.reshape(y.shape[0], 1)
  19.     y = np.asmatrix(y)
  20.     X_train, X_test, y_train, y_test = train_test_split(X, y,\
  21.             test_size = 0.30, random_state = 43) # Split X and Y into train and test
  22.     np.squeeze(y_train)
  23.     np.squeeze(y_test)
  24.     return X_train.T, X_test.T, y_train.T, y_test.T
  25.  
  26. def create_placeholders(n_x, n_y):
  27.     # We use None to allow for multiple testcases
  28.     X = tf.placeholder(tf.float32, (n_x, None), name = 'X') # Placeholder for input layer
  29.     Y = tf.placeholder(tf.float32, (n_y, None), name = 'Y') # Placeholder for output layer
  30.     return X, Y
  31.  
  32. def initialize_parameters():
  33.     # Create Weights and Biases for Hidden Layer and Output Layer
  34.     W1 = tf.get_variable("W1", [2, 2], initializer = tf.contrib.layers.xavier_initializer())
  35.     b1 = tf.get_variable("b1", [2, 1], initializer = tf.zeros_initializer())
  36.     W2 = tf.get_variable("W2", [1, 2], initializer = tf.contrib.layers.xavier_initializer())
  37.     b2 = tf.get_variable("b2", [1, 1], initializer = tf.zeros_initializer())
  38.     parameters = {
  39.             "W1" : W1,
  40.             "b1" : b1,
  41.             "W2" : W2,
  42.             "b2" : b2
  43.     }
  44.     return parameters
  45.  
  46. def forward_propogation(X, parameters):
  47.  
  48.     threshold = tf.constant(0.5, name = "threshold")
  49.     W1, b1 = parameters["W1"], parameters["b1"]
  50.     W2, b2 = parameters["W2"], parameters["b2"]
  51.  
  52.     Z1 = tf.add(tf.matmul(W1, X), b1)
  53.     A1 = tf.nn.relu(Z1)
  54.     tf.squeeze(A1)
  55.     Z2 = tf.add(tf.matmul(W2, A1), b2)
  56.     A2 = tf.round(tf.sigmoid(Z2))
  57.     print(A2.shape)
  58.     tf.squeeze(A2)
  59.     A2 = tf.reshape(A2, [1, 1])
  60.     print(A2.shape)
  61.     return A2
  62.  
  63. def compute_cost(A, Y):
  64.  
  65.     logits = tf.transpose(A)
  66.     labels = tf.transpose(Y)
  67.     cost = tf.nn.sigmoid_cross_entropy_with_logits(logits = logits, labels = labels)
  68.     return cost
  69.  
  70. def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.0001, num_epochs = 1500):
  71.  
  72.     ops.reset_default_graph()
  73.     (n_x, m) = X_train.shape
  74.     n_y = Y_train.shape[0]
  75.     costs = []
  76.     X, Y = create_placeholders(n_x, n_y)
  77.     parameters = initialize_parameters()
  78.     A2 = forward_propogation(X, parameters)
  79.     cost = compute_cost(A2, Y)
  80.     optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)
  81.     init = tf.global_variables_initializer()
  82.  
  83.     with tf.Session() as session:
  84.         session.run(init)
  85.         for epoch in range(num_epochs):
  86.             epoch_cost = 0
  87.             _, epoch_cost = session.run([optimizer, cost], feed_dict = {X : X_train, Y : Y_train})
  88.         parameters = session.run(parameters)
  89.         correct_prediction = tf.equal(tf.argmax(A2), tf.argmax(Y))
  90.         accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
  91.         print("Training Accuracy is {0} %...".format(accuracy.eval({X : X_train, Y : Y_train})))
  92.         print("Test Accuracy is {0} %...".format(accuracy.eval({X : X_test, Y : Y_test})))
  93.     return parameters
  94.  
  95. def accuracy_check(X_t, Y_t, parameters):
  96.     ops.reset_default_graph()
  97.     (n_x, m) = X_t.shape
  98.     n_y = Y_t.shape[0]
  99.     costs = []
  100.     X, Y = create_placeholders(n_x, n_y)
  101.     parameters = parameters
  102.     A2 = forward_propogation(X, parameters)
  103.     init = tf.global_variables_initializer()
  104.     with tf.Session() as session:
  105.         session.run(init)
  106.         A = session.run(A2, feed_dict = {X : X_t, Y : Y_t})
  107.     for i in A:
  108.         for j in i:
  109.             if j >= 0.5:
  110.                 j = 1
  111.             else:
  112.                 j = 0
  113.     count = 0
  114.     tot = A.shape[1]
  115.     print(A.shape, Y_t.shape)
  116.     for i in range(A.shape[0]):
  117.         for j in range(A.shape[1]):
  118.             if A[i][j] - Y_t[i][j] is 0:
  119.                 count += 1
  120.     return count * 100 / tot
  121. def main():
  122.     X_train, X_test, Y_train, Y_test = read_and_split_training_data()
  123.     parameters = model(X_train, Y_train, X_test, Y_test)
  124.     return
  125.  
  126. if __name__ ==  "__main__":
  127.     main()
Add Comment
Please, Sign In to add comment