tuttelikz

mlp_tf_v3.py [draft+but not learning+unnec]

Jan 19th, 2018
101
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 4.86 KB | None | 0 0
  1. import glob
  2. import numpy as np
  3. from PIL import Image
  4. import tensorflow as tf
  5. import numpy as np
  6. import matplotlib.image as mpimg
  7. import matplotlib.pyplot as plt
  8. from sklearn.model_selection import train_test_split
  9. from sklearn.model_selection import KFold
  10. from sklearn.model_selection import LeaveOneOut
  11.  
  12.  
  13. img_dir_path = 'train_test_data/*.jpg'
  14.  
  15.  
  16. train_test_filelist = glob.glob(img_dir_path)
  17.  
  18. #y_train_test_list = [0 if 'Y' in files else 1 for files in train_test_filelist]
  19. X_train_test = np.array([np.array(Image.open(fname)) for fname in train_test_filelist])
  20.  
  21.  
  22. learning_rate = 0.01
  23. num_steps = 500
  24. batch_size = 128
  25.  
  26. n_hidden_1 = 6000 # 1st layer number of neurons
  27. n_hidden_2 = 600 # 2nd layer number of neurons
  28. num_input = 34560 # MNIST data input (img shape: 28*28)
  29. num_classes = 2 # MNIST total classes (0-9 digits)
  30.  
  31. def weight_variable(shape):
  32.     initial = tf.truncated_normal(shape, stddev=0.1)
  33.     return tf.Variable(initial)
  34.  
  35. def bias_variable(shape):
  36.     initial = tf.constant(0.1, shape=shape)
  37.     return tf.Variable(initial)
  38.  
  39. def init_weights(shape):
  40.     return tf.Variable(tf.random_normal(shape, stddev=0.01))
  41.  
  42. def model(X, w_h, w_o):
  43.     h = tf.nn.sigmoid(tf.matmul(X, w_h)) # this is a basic mlp, think 2 stacked logistic regressions
  44.     return tf.matmul(h, w_o)
  45.  
  46. def neural_net(x):
  47.     # Hidden fully connected layer with 256 neurons
  48.     layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
  49.     # Hidden fully connected layer with 256 neurons
  50.     layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
  51.     # Output fully connected layer with a neuron for each class
  52.     out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
  53.     return out_layer
  54.  
  55. weights = {
  56.     'h1': tf.Variable(tf.random_normal([num_input, n_hidden_1])),
  57.     'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
  58.     'out': tf.Variable(tf.random_normal([n_hidden_2, num_classes]))
  59. }
  60. biases = {
  61.     'b1': tf.Variable(tf.random_normal([n_hidden_1])),
  62.     'b2': tf.Variable(tf.random_normal([n_hidden_2])),
  63.     'out': tf.Variable(tf.random_normal([num_classes]))
  64. }
  65.  
  66. array = []
  67. with open('train_test_data_labels.txt') as fileobj:
  68.     for line in fileobj:
  69.         for ch in line:
  70.             array.append(ch)
  71.  
  72. y_train_test_list = [a for a in array if a != '\n']
  73. y_train_test = np.asarray(y_train_test_list)
  74.  
  75.  
  76. X = tf.placeholder("float", [None, num_input])
  77. #Y = tf.placeholder("float", [None, 2])
  78. Y = tf.placeholder("float", [None, num_classes])
  79.  
  80.  
  81. w_h = init_weights([34560, 10000]) # create symbolic variables
  82. w_o = init_weights([10000, 2])
  83.  
  84. py_x = model(X, w_h, w_o)
  85.  
  86.  
  87. logits = neural_net(X)
  88. prediction = tf.nn.softmax(logits)
  89.  
  90. X_train, X_test = X_train_test[:7950], X_train_test[7950:9940]
  91. y_train, y_test = y_train_test[:7950], y_train_test[7950:9940]
  92.  
  93. #X_train, y_train = unison_shuffled_copies(X_train, y_train)
  94. #X_test, y_test = unison_shuffled_copies(X_test, y_test)
  95.  
  96. X_train_reshaped = X_train.reshape(7950,34560)
  97. X_test_reshaped = X_test.reshape(1990,34560)
  98.  
  99. X_train = np.expand_dims(X_train, axis=4)###############################################################3
  100.  
  101. keep_prob = tf.placeholder(tf.float32)
  102.  
  103. train_len = len(y_train)
  104. test_len = len(y_test)
  105.  
  106.  
  107. cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=logits))
  108. #optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
  109. #train_op = optimizer.minimize(loss_op)
  110.  
  111. train_step = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cross_entropy) # construct an optimizer
  112.  
  113. correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
  114. accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
  115.  
  116. config = tf.ConfigProto()
  117. config.gpu_options.allow_growth=True
  118.  
  119. onehot_label_data_train = tf.one_hot(y_train, 2)
  120. onehot_label_data_test = tf.one_hot(y_test, 2)
  121.  
  122. batch_size = 128
  123.  
  124. with tf.Session(config=config) as sess:
  125.     onehot_label_data_train = sess.run(onehot_label_data_train)
  126.     onehot_label_data_test = sess.run(onehot_label_data_test)
  127.     sess.run(tf.global_variables_initializer())
  128.     train_batch_count = 0
  129.     test_batch_count = 0
  130.     tot_train_acc = 0
  131.     tot_test_acc = 0
  132.  
  133.     for i in range(50):
  134.         for j in range(0,train_len,batch_size):
  135.             train_batch_count += 1
  136.             train_step.run(feed_dict={X:X_train_reshaped[j:j+batch_size],Y:onehot_label_data_train[j:j+batch_size],keep_prob: 0.5})
  137.             train_accuracy = sess.run(accuracy, feed_dict={X:X_train_reshaped[j:j+batch_size,:],Y:onehot_label_data_train[j:j+batch_size,:],keep_prob: 1.0})
  138.             tot_train_acc = tot_train_acc + train_accuracy
  139.         avg_train_acc = tot_train_acc / (train_batch_count+1)
  140.         test_accuracy = sess.run(accuracy, feed_dict={X:X_test_reshaped,Y:onehot_label_data_test, keep_prob: 1.0})
  141.  
  142.         print('Epoch:%g Test accuracy %g' % (i, test_accuracy))
Add Comment
Please, Sign In to add comment