Advertisement
Guest User

Untitled

a guest
Nov 6th, 2016
133
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 4.07 KB | None | 0 0
  1. import tensorflow as tf
  2. import numpy as np
  3. from PIL import Image
  4. from os import listdir
  5.  
  6. nodes_l1 = 500
  7. nodes_l2 = 100
  8. nodes_l3 = 500
  9. num_batches = 20
  10. num_epochs = 50
  11.  
  12. # Array of file dirs
  13. human_file_array = listdir('human/')
  14. human_file_array = [['human/'+human_file_array[i],[1,0]] for i in range(len(human_file_array))]
  15. cucumber_file_array = listdir('cucumber/')
  16. cucumber_file_array = [['cucumber/'+cucumber_file_array[i],[0,1]] for i in range(len(cucumber_file_array))]
  17. file_array_shuffled = human_file_array + cucumber_file_array
  18. np.random.shuffle(file_array_shuffled)
  19.  
  20. htest_file_array = listdir('human_test/')
  21. htest_file_array = [['human_test/'+htest_file_array[i],[1,0]] for i in range(len(htest_file_array))]
  22. ctest_file_array = listdir('cucumber_test/')
  23. ctest_file_array = [['cucumber_test/'+ctest_file_array[i],[0,1]] for i in range(len(ctest_file_array))]
  24. test_file_array = ctest_file_array + htest_file_array
  25. np.random.shuffle(test_file_array)
  26.  
  27. input_data = tf.placeholder('float', [None, 250*250*3])
  28. output_data = tf.placeholder('float')
  29.  
  30. hl1_vars = {
  31.     'weight': tf.Variable(tf.random_normal([250*250*3, nodes_l1])),
  32.     'bias': tf.Variable(tf.random_normal([nodes_l1]))
  33. }
  34.  
  35. hl2_vars = {
  36.     'weight': tf.Variable(tf.random_normal([nodes_l1, nodes_l2])),
  37.     'bias': tf.Variable(tf.random_normal([nodes_l2]))
  38. }
  39.  
  40. hl3_vars = {
  41.     'weight': tf.Variable(tf.random_normal([nodes_l2, nodes_l3])),
  42.     'bias': tf.Variable(tf.random_normal([nodes_l3]))
  43. }
  44.  
  45. output_layer_vars = {
  46.     'weight': tf.Variable(tf.random_normal([nodes_l3, 2])),
  47.     'bias': tf.Variable(tf.random_normal([2]))
  48. }
  49.  
  50. layer1 = tf.add(tf.matmul(input_data, hl1_vars['weight']),hl1_vars['bias'])
  51. layer1 = tf.nn.softmax(layer1)
  52.  
  53. layer2 = tf.add(tf.matmul(layer1, hl2_vars['weight']), hl2_vars['bias'])
  54. layer2 = tf.nn.softmax(layer2)
  55.  
  56. layer3 = tf.add(tf.matmul(layer2, hl3_vars['weight']), hl3_vars['bias'])
  57. layer3 = tf.nn.softmax(layer3)
  58.  
  59. output = tf.add(tf.matmul(layer3, output_layer_vars['weight']), output_layer_vars['bias'])
  60. output = tf.nn.softmax(output)
  61.  
  62. def convert_image(path):
  63.     with Image.open(path) as img:
  64.         img = img.resize((250,250))
  65.         img = img.convert('RGB')
  66.         return img
  67.  
  68. def train_network():
  69.     #prediction = output
  70.     cost = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(output, output_data)) # output is the prediction, output_data is key
  71.     optimizer = tf.train.AdamOptimizer().minimize(cost)
  72.  
  73.     with tf.Session() as sess:
  74.         sess.run(tf.initialize_all_variables())
  75.         saver = tf.train.Saver()
  76.  
  77.         for epoch in range(num_epochs):
  78.             epoch_error = 0
  79.             batch_size = int((len(file_array_shuffled)/num_batches))
  80.             for i in range(num_batches):
  81.                 path_var = []
  82.                 key_var = []
  83.                 img_var = []
  84.                 #Still Filename Batch!!
  85.                 batch_file_array = file_array_shuffled[batch_size*i:(batch_size*i)+batch_size] #batch1['file&val array']['val']
  86.                 for batch_val in batch_file_array:
  87.                     path_var.append(batch_val[0])
  88.                     key_var.append(batch_val[1])
  89.                 #FROM HERE ON path_var AND key_var HAVE MATCHING INDEXES DO NOT RANDOMIZE!!!
  90.  
  91.                 #This section here is complicated!
  92.                 for path in path_var:
  93.                     img = convert_image(path)
  94.                     img_var.append(np.reshape(np.array(img), 250*250*3))
  95.                 #print np.shape(img_var),np.shape(key_var) #img_var is array of size (batch#, 64*64*3) key_var is the key [human, cucumber]
  96.  
  97.                 #End of complicationimage conversion
  98.                 _,c = sess.run([optimizer, cost], feed_dict={input_data:img_var, output_data:key_var})
  99.                 epoch_error += c
  100.                 #print "Batch",i+1,"done out of",num_batches
  101.             print "Epoch",epoch+1,"completed out of",num_epochs,"\tError",epoch_error
  102.             save_path = saver.save(sess, "model.ckpt")
  103.  
  104. train_network()
  105.  
  106.  
  107. def use_network():
  108.     #prediction = output
  109.     with tf.Session() as sess:
  110.         sess.run(tf.initialize_all_variables())
  111.  
  112.         saver = tf.train.Saver()
  113.         saver.restore(sess, "model.ckpt")
  114.  
  115.         for test_file in test_file_array:
  116.             #print test_file
  117.             img = np.reshape(np.array(convert_image(test_file[0])), 250*250*3)
  118.             result = output.eval(feed_dict={input_data:[img]})
  119.             print result,tf.argmax(result,1).eval(),test_file[1]
  120.  
  121. use_network()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement