Advertisement
Guest User

Untitled

a guest
Nov 20th, 2019
117
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 4.89 KB | None | 0 0
  1. import numpy as np
  2. from sklearn.preprocessing import StandardScaler
  3. import tensorflow as tf
  4.  
  5.  
  6. from sklearn.preprocessing import MinMaxScaler
  7.  
  8. import pandas as pd
  9. # Read data from file 'filename.csv'
  10. # (in the same directory that your python process is based)
  11. # Control delimiters, rows, column names with read_csv (see later)
  12. df = pd.read_csv("photodata.csv")
  13.  
  14. trainBase = df[(df.emotion < 2) & (df.Usage =='Training')]
  15. testBase = df[(df.emotion < 2) & (df.Usage =='PublicTest')]
  16.  
  17.  
  18. classTrain = trainBase.drop(columns =["pixels", "Usage"])
  19. pixelsTrain = trainBase.drop(columns =["emotion", "Usage"])
  20.  
  21. c = []
  22. for element in pixelsTrain['pixels']:
  23. b = [float(x) for x in element.split()]
  24. c.append(b)
  25.  
  26.  
  27.  
  28.  
  29.  
  30. #pixelsTrain = pixelsTrain.applymap(lambda element: [float(x) for x in element.split()])
  31.  
  32. #classTrain = classTrain.to_list()
  33. #classTrain = map(float, classTrain.split())
  34. train_y = np.array(classTrain)
  35. train_x = np.array(c)
  36.  
  37.  
  38.  
  39.  
  40. classTest = testBase.drop(columns =["pixels", "Usage"])
  41. pixelsTest = testBase.drop(columns =["emotion", "Usage"])
  42. #pixelsTest = pixelsTest.applymap(lambda element: [float(x) for x in element.split()])
  43. c = []
  44. for element in pixelsTest['pixels']:
  45. b = [float(x) for x in element.split()]
  46. c.append(b)
  47.  
  48. test_y = np.array(classTest)
  49. test_x = np.array(c)
  50.  
  51.  
  52. #######################################################
  53.  
  54.  
  55. #standardization of data ( mean to zero and standard diviation to 1)
  56. #standardscalar (mean to zero and standard diviation to 1) *works best with neural networks
  57. #MinMaxScaler (the values are between 0 and 1)
  58. scaler = StandardScaler()
  59. scaler.fit(train_x) #sets the paramaters in StandardScaler()
  60. train_x = scaler.transform(train_x)
  61. train = np.append(train_x, train_y, axis=1)
  62.  
  63. scaler.fit(test_x) #sets the paramaters in StandardScaler()
  64. train_x = scaler.transform(test_x)
  65.  
  66. n_class= 2
  67. n_train_samples = len(train)
  68.  
  69. tf.reset_default_graph()
  70. X = tf.placeholder(tf.float32, [None, 2304]) # 2304 pixel
  71. X_img = tf.reshape(X, [-1, 48, 48, 1]) # n, 28by28 1 color black/white
  72. Y = tf.placeholder(tf.int32, [None, 1])
  73.  
  74. #layer one
  75. #input = 48x48 image
  76.  
  77.  
  78. W1 = tf.Variable(tf.random_normal([3, 3, 1, 32], stddev=0.01)) #3by3 filter 1 color(black/white) 32 filters
  79. L1 = tf.nn.conv2d(X_img, W1, strides=[1,1,1,1], padding='SAME') # stride 1 passing same
  80. L1 = tf.nn.relu(L1) #activation function sigmoid
  81. L1 = tf.nn.avg_pool(L1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') # after pass pooling, image will be 14by14
  82. #dimesions are cut by half 114z14x32
  83.  
  84. #L2 ImgIn shape=(?,14,14,32)
  85. #current depth is after the first layer is 32 and new depth is 64
  86. W2 = tf.Variable(tf.random_normal([3,3,32,64], stddev=0.01))
  87. L2 = tf.nn.conv2d(L1, W2, strides=[1,1,1,1], padding='SAME')
  88. L2 = tf.nn.relu(L2) #relu activation function
  89. L2 = tf.nn.max_pool(L2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
  90.  
  91.  
  92. W3 = tf.Variable(tf.random_normal([3,3,64,128], stddev=0.01))
  93. L3 = tf.nn.conv2d(L2, W3, strides=[1,1,1,1], padding='SAME')
  94. L3 = tf.nn.relu(L3) #relu activation function
  95. L3 = tf.nn.max_pool(L3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
  96.  
  97. W4 = tf.Variable(tf.random_normal([3,3,128,256], stddev=0.01))
  98. L4 = tf.nn.conv2d(L3, W4, strides=[1,1,1,1], padding='SAME')
  99. L4 = tf.nn.relu(L4) #relu activation function
  100. L4 = tf.reshape(L4, [-1,6*6*256])
  101.  
  102.  
  103. #last output is 7x7x64
  104. #Final FC 7X7X64 inputs -> 10 outputs
  105. W5 = tf.get_variable("W4", shape=[6*6*256, 2], initializer=tf.contrib.layers.xavier_initializer())
  106. b = tf.Variable(tf.random_normal([2]))
  107. H = tf.nn.softmax(tf.matmul(L4,W5)+b)
  108.  
  109. Y_onehot = tf.one_hot(Y, n_class)
  110. Y_onehot = tf.reshape(Y_onehot, [-1, n_class])
  111.  
  112. cross_entropy = -tf.reduce_sum(Y_onehot*tf.log(tf.clip_by_value(H,1e-10,1.0)))
  113. #cost = tf.reduce_mean(cross_entropy)
  114.  
  115. cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=H, labels=Y_onehot))
  116. optimizer=tf.train.AdamOptimizer().minimize(cost)
  117.  
  118. is_correct = tf.equal(tf.argmax(H,1), tf.argmax(Y_onehot, 1))
  119. accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))
  120.  
  121.  
  122. sess = tf.Session()
  123. sess.run(tf.global_variables_initializer())
  124.  
  125. n_epoch = 100
  126. batch_size = 100
  127. with tf.Session() as sess:
  128. sess.run(tf.global_variables_initializer())
  129. for epoch in range(n_epoch):
  130. np.random.shuffle(train)
  131. train_x = train[:, :-1]
  132. train_y = train[:, [-1]]
  133. avg_cost = 0
  134. total_batch = int(n_train_samples/batch_size)
  135. for i in range(total_batch):
  136. a=i*batch_size
  137. b=(i+1)*batch_size
  138. c, _ = sess.run([cost, optimizer], feed_dict={X: train_x[a:b,:], Y: train_y[a:b,:]})
  139. avg_cost += c /total_batch
  140. print('Epoch:','%04d' % (epoch+1), 'cost=', avg_cost)
  141. print("Train Accuracy: ", sess.run([accuracy], feed_dict={X: train_x, Y: train_y}))
  142. print("Test Accuracy: ", sess.run([accuracy], feed_dict={X: test_x, Y: test_y}))
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement