Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- #!/usr/bin/env python3
- import numpy as np
- import pandas as pd
- # import matplotlib.pyplot as plt
- import tensorflow as tf
- from tensorflow.python.framework import ops
- from sklearn.model_selection import train_test_split
- def read_and_split_training_data():
- dataset = pd.read_csv("xor_data.csv") # Read Data Set
- print(dataset['xor'].count()) # Print count of dataset
- print(dataset.head()) #Check first five of dataset
- num_examples = dataset['xor'].count() #Get number of examples in dataset
- num_classes = len(dataset.count()) # Get number of classes
- X = dataset.iloc[:, [0, num_classes - 2]].values # split dataset into X
- y = dataset.iloc[:, num_classes - 1].values # And Y
- y = y.reshape(y.shape[0], 1)
- y = np.asmatrix(y)
- X_train, X_test, y_train, y_test = train_test_split(X, y,\
- test_size = 0.30, random_state = 43) # Split X and Y into train and test
- np.squeeze(y_train)
- np.squeeze(y_test)
- return X_train.T, X_test.T, y_train.T, y_test.T
- def create_placeholders(n_x, n_y):
- # We use None to allow for multiple testcases
- X = tf.placeholder(tf.float32, (n_x, None), name = 'X') # Placeholder for input layer
- Y = tf.placeholder(tf.float32, (n_y, None), name = 'Y') # Placeholder for output layer
- return X, Y
- def initialize_parameters():
- # Create Weights and Biases for Hidden Layer and Output Layer
- W1 = tf.get_variable("W1", [2, 2], initializer = tf.contrib.layers.xavier_initializer())
- b1 = tf.get_variable("b1", [2, 1], initializer = tf.zeros_initializer())
- W2 = tf.get_variable("W2", [1, 2], initializer = tf.contrib.layers.xavier_initializer())
- b2 = tf.get_variable("b2", [1, 1], initializer = tf.zeros_initializer())
- parameters = {
- "W1" : W1,
- "b1" : b1,
- "W2" : W2,
- "b2" : b2
- }
- return parameters
- def forward_propogation(X, parameters):
- threshold = tf.constant(0.5, name = "threshold")
- W1, b1 = parameters["W1"], parameters["b1"]
- W2, b2 = parameters["W2"], parameters["b2"]
- Z1 = tf.add(tf.matmul(W1, X), b1)
- A1 = tf.nn.relu(Z1)
- tf.squeeze(A1)
- Z2 = tf.add(tf.matmul(W2, A1), b2)
- A2 = tf.round(tf.sigmoid(Z2))
- print(A2.shape)
- tf.squeeze(A2)
- A2 = tf.reshape(A2, [1, 1])
- print(A2.shape)
- return A2
- def compute_cost(A, Y):
- logits = tf.transpose(A)
- labels = tf.transpose(Y)
- cost = tf.nn.sigmoid_cross_entropy_with_logits(logits = logits, labels = labels)
- return cost
- def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.0001, num_epochs = 1500):
- ops.reset_default_graph()
- (n_x, m) = X_train.shape
- n_y = Y_train.shape[0]
- costs = []
- X, Y = create_placeholders(n_x, n_y)
- parameters = initialize_parameters()
- A2 = forward_propogation(X, parameters)
- cost = compute_cost(A2, Y)
- optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)
- init = tf.global_variables_initializer()
- with tf.Session() as session:
- session.run(init)
- for epoch in range(num_epochs):
- epoch_cost = 0
- _, epoch_cost = session.run([optimizer, cost], feed_dict = {X : X_train, Y : Y_train})
- parameters = session.run(parameters)
- correct_prediction = tf.equal(tf.argmax(A2), tf.argmax(Y))
- accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
- print("Training Accuracy is {0} %...".format(accuracy.eval({X : X_train, Y : Y_train})))
- print("Test Accuracy is {0} %...".format(accuracy.eval({X : X_test, Y : Y_test})))
- return parameters
- def accuracy_check(X_t, Y_t, parameters):
- ops.reset_default_graph()
- (n_x, m) = X_t.shape
- n_y = Y_t.shape[0]
- costs = []
- X, Y = create_placeholders(n_x, n_y)
- parameters = parameters
- A2 = forward_propogation(X, parameters)
- init = tf.global_variables_initializer()
- with tf.Session() as session:
- session.run(init)
- A = session.run(A2, feed_dict = {X : X_t, Y : Y_t})
- for i in A:
- for j in i:
- if j >= 0.5:
- j = 1
- else:
- j = 0
- count = 0
- tot = A.shape[1]
- print(A.shape, Y_t.shape)
- for i in range(A.shape[0]):
- for j in range(A.shape[1]):
- if A[i][j] - Y_t[i][j] is 0:
- count += 1
- return count * 100 / tot
- def main():
- X_train, X_test, Y_train, Y_test = read_and_split_training_data()
- parameters = model(X_train, Y_train, X_test, Y_test)
- return
- if __name__ == "__main__":
- main()
Add Comment
Please, Sign In to add comment