Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import numpy as np
- from sklearn.preprocessing import StandardScaler
- import tensorflow as tf
- from sklearn.preprocessing import MinMaxScaler
- import pandas as pd
- # Read data from file 'filename.csv'
- # (in the same directory that your python process is based)
- # Control delimiters, rows, column names with read_csv (see later)
- df = pd.read_csv("photodata.csv")
- trainBase = df[(df.emotion < 2) & (df.Usage =='Training')]
- testBase = df[(df.emotion < 2) & (df.Usage =='PublicTest')]
- classTrain = trainBase.drop(columns =["pixels", "Usage"])
- pixelsTrain = trainBase.drop(columns =["emotion", "Usage"])
- c = []
- for element in pixelsTrain['pixels']:
- b = [float(x) for x in element.split()]
- c.append(b)
- #pixelsTrain = pixelsTrain.applymap(lambda element: [float(x) for x in element.split()])
- #classTrain = classTrain.to_list()
- #classTrain = map(float, classTrain.split())
- train_y = np.array(classTrain)
- train_x = np.array(c)
- classTest = testBase.drop(columns =["pixels", "Usage"])
- pixelsTest = testBase.drop(columns =["emotion", "Usage"])
- #pixelsTest = pixelsTest.applymap(lambda element: [float(x) for x in element.split()])
- c = []
- for element in pixelsTest['pixels']:
- b = [float(x) for x in element.split()]
- c.append(b)
- test_y = np.array(classTest)
- test_x = np.array(c)
- #######################################################
- #standardization of data ( mean to zero and standard diviation to 1)
- #standardscalar (mean to zero and standard diviation to 1) *works best with neural networks
- #MinMaxScaler (the values are between 0 and 1)
- scaler = StandardScaler()
- scaler.fit(train_x) #sets the paramaters in StandardScaler()
- train_x = scaler.transform(train_x)
- train = np.append(train_x, train_y, axis=1)
- scaler.fit(test_x) #sets the paramaters in StandardScaler()
- train_x = scaler.transform(test_x)
- n_class= 2
- n_train_samples = len(train)
- tf.reset_default_graph()
- X = tf.placeholder(tf.float32, [None, 2304]) # 2304 pixel
- X_img = tf.reshape(X, [-1, 48, 48, 1]) # n, 28by28 1 color black/white
- Y = tf.placeholder(tf.int32, [None, 1])
- #layer one
- #input = 48x48 image
- W1 = tf.Variable(tf.random_normal([3, 3, 1, 32], stddev=0.01)) #3by3 filter 1 color(black/white) 32 filters
- L1 = tf.nn.conv2d(X_img, W1, strides=[1,1,1,1], padding='SAME') # stride 1 passing same
- L1 = tf.nn.relu(L1) #activation function sigmoid
- L1 = tf.nn.avg_pool(L1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') # after pass pooling, image will be 14by14
- #dimesions are cut by half 114z14x32
- #L2 ImgIn shape=(?,14,14,32)
- #current depth is after the first layer is 32 and new depth is 64
- W2 = tf.Variable(tf.random_normal([3,3,32,64], stddev=0.01))
- L2 = tf.nn.conv2d(L1, W2, strides=[1,1,1,1], padding='SAME')
- L2 = tf.nn.relu(L2) #relu activation function
- L2 = tf.nn.max_pool(L2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
- W3 = tf.Variable(tf.random_normal([3,3,64,128], stddev=0.01))
- L3 = tf.nn.conv2d(L2, W3, strides=[1,1,1,1], padding='SAME')
- L3 = tf.nn.relu(L3) #relu activation function
- L3 = tf.nn.max_pool(L3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
- W4 = tf.Variable(tf.random_normal([3,3,128,256], stddev=0.01))
- L4 = tf.nn.conv2d(L3, W4, strides=[1,1,1,1], padding='SAME')
- L4 = tf.nn.relu(L4) #relu activation function
- L4 = tf.reshape(L4, [-1,6*6*256])
- #last output is 7x7x64
- #Final FC 7X7X64 inputs -> 10 outputs
- W5 = tf.get_variable("W4", shape=[6*6*256, 2], initializer=tf.contrib.layers.xavier_initializer())
- b = tf.Variable(tf.random_normal([2]))
- H = tf.nn.softmax(tf.matmul(L4,W5)+b)
- Y_onehot = tf.one_hot(Y, n_class)
- Y_onehot = tf.reshape(Y_onehot, [-1, n_class])
- cross_entropy = -tf.reduce_sum(Y_onehot*tf.log(tf.clip_by_value(H,1e-10,1.0)))
- #cost = tf.reduce_mean(cross_entropy)
- cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=H, labels=Y_onehot))
- optimizer=tf.train.AdamOptimizer().minimize(cost)
- is_correct = tf.equal(tf.argmax(H,1), tf.argmax(Y_onehot, 1))
- accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))
- sess = tf.Session()
- sess.run(tf.global_variables_initializer())
- n_epoch = 100
- batch_size = 100
- with tf.Session() as sess:
- sess.run(tf.global_variables_initializer())
- for epoch in range(n_epoch):
- np.random.shuffle(train)
- train_x = train[:, :-1]
- train_y = train[:, [-1]]
- avg_cost = 0
- total_batch = int(n_train_samples/batch_size)
- for i in range(total_batch):
- a=i*batch_size
- b=(i+1)*batch_size
- c, _ = sess.run([cost, optimizer], feed_dict={X: train_x[a:b,:], Y: train_y[a:b,:]})
- avg_cost += c /total_batch
- print('Epoch:','%04d' % (epoch+1), 'cost=', avg_cost)
- print("Train Accuracy: ", sess.run([accuracy], feed_dict={X: train_x, Y: train_y}))
- print("Test Accuracy: ", sess.run([accuracy], feed_dict={X: test_x, Y: test_y}))
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement