Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import glob
- import numpy as np
- from PIL import Image
- import tensorflow as tf
- import numpy as np
- import matplotlib.image as mpimg
- import matplotlib.pyplot as plt
- from sklearn.model_selection import train_test_split
- from sklearn.model_selection import KFold
- from sklearn.model_selection import LeaveOneOut
- img_dir_path = 'train_test_data/*.jpg'
- train_test_filelist = glob.glob(img_dir_path)
- #y_train_test_list = [0 if 'Y' in files else 1 for files in train_test_filelist]
- X_train_test = np.array([np.array(Image.open(fname)) for fname in train_test_filelist])
- learning_rate = 0.01
- num_steps = 500
- batch_size = 128
- n_hidden_1 = 6000 # 1st layer number of neurons
- n_hidden_2 = 600 # 2nd layer number of neurons
- num_input = 34560 # MNIST data input (img shape: 28*28)
- num_classes = 2 # MNIST total classes (0-9 digits)
- def weight_variable(shape):
- initial = tf.truncated_normal(shape, stddev=0.1)
- return tf.Variable(initial)
- def bias_variable(shape):
- initial = tf.constant(0.1, shape=shape)
- return tf.Variable(initial)
- def init_weights(shape):
- return tf.Variable(tf.random_normal(shape, stddev=0.01))
- def model(X, w_h, w_o):
- h = tf.nn.sigmoid(tf.matmul(X, w_h)) # this is a basic mlp, think 2 stacked logistic regressions
- return tf.matmul(h, w_o)
- def neural_net(x):
- # Hidden fully connected layer with 256 neurons
- layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
- # Hidden fully connected layer with 256 neurons
- layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
- # Output fully connected layer with a neuron for each class
- out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
- return out_layer
- weights = {
- 'h1': tf.Variable(tf.random_normal([num_input, n_hidden_1])),
- 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
- 'out': tf.Variable(tf.random_normal([n_hidden_2, num_classes]))
- }
- biases = {
- 'b1': tf.Variable(tf.random_normal([n_hidden_1])),
- 'b2': tf.Variable(tf.random_normal([n_hidden_2])),
- 'out': tf.Variable(tf.random_normal([num_classes]))
- }
- array = []
- with open('train_test_data_labels.txt') as fileobj:
- for line in fileobj:
- for ch in line:
- array.append(ch)
- y_train_test_list = [a for a in array if a != '\n']
- y_train_test = np.asarray(y_train_test_list)
- X = tf.placeholder("float", [None, num_input])
- #Y = tf.placeholder("float", [None, 2])
- Y = tf.placeholder("float", [None, num_classes])
- w_h = init_weights([34560, 10000]) # create symbolic variables
- w_o = init_weights([10000, 2])
- py_x = model(X, w_h, w_o)
- logits = neural_net(X)
- prediction = tf.nn.softmax(logits)
- X_train, X_test = X_train_test[:7950], X_train_test[7950:9940]
- y_train, y_test = y_train_test[:7950], y_train_test[7950:9940]
- #X_train, y_train = unison_shuffled_copies(X_train, y_train)
- #X_test, y_test = unison_shuffled_copies(X_test, y_test)
- X_train_reshaped = X_train.reshape(7950,34560)
- X_test_reshaped = X_test.reshape(1990,34560)
- X_train = np.expand_dims(X_train, axis=4)###############################################################3
- keep_prob = tf.placeholder(tf.float32)
- train_len = len(y_train)
- test_len = len(y_test)
- cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=logits))
- #optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
- #train_op = optimizer.minimize(loss_op)
- train_step = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cross_entropy) # construct an optimizer
- correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
- accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
- config = tf.ConfigProto()
- config.gpu_options.allow_growth=True
- onehot_label_data_train = tf.one_hot(y_train, 2)
- onehot_label_data_test = tf.one_hot(y_test, 2)
- batch_size = 128
- with tf.Session(config=config) as sess:
- onehot_label_data_train = sess.run(onehot_label_data_train)
- onehot_label_data_test = sess.run(onehot_label_data_test)
- sess.run(tf.global_variables_initializer())
- train_batch_count = 0
- test_batch_count = 0
- tot_train_acc = 0
- tot_test_acc = 0
- for i in range(50):
- for j in range(0,train_len,batch_size):
- train_batch_count += 1
- train_step.run(feed_dict={X:X_train_reshaped[j:j+batch_size],Y:onehot_label_data_train[j:j+batch_size],keep_prob: 0.5})
- train_accuracy = sess.run(accuracy, feed_dict={X:X_train_reshaped[j:j+batch_size,:],Y:onehot_label_data_train[j:j+batch_size,:],keep_prob: 1.0})
- tot_train_acc = tot_train_acc + train_accuracy
- avg_train_acc = tot_train_acc / (train_batch_count+1)
- test_accuracy = sess.run(accuracy, feed_dict={X:X_test_reshaped,Y:onehot_label_data_test, keep_prob: 1.0})
- print('Epoch:%g Test accuracy %g' % (i, test_accuracy))
Add Comment
Please, Sign In to add comment