Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import tensorflow as tf
- import pandas as pd
- import numpy as np
- import matplotlib.pyplot as plt
- from sklearn import linear_model
- from sklearn.preprocessing import OneHotEncoder
- from sklearn.model_selection import train_test_split
- import os
- os.environ["CUDA_VISIBLE_DEVICES"]="0"
- df = pd.read_excel(r"C:/Users/ggmah/Desktop/HMM Data updated.xlsx")
- tf.logging.set_verbosity(tf.logging.INFO)
- dff = OneHotEncoder(df)
- dfg = pd.get_dummies(df)
- o =list(df.columns.values)
- label_dict = dict()
- for i,value in enumerate(o):
- label_dict[i] = value
- training_iters = 220
- learning_rate = 0.002
- batch_size = 16
- n_input = 59
- n_classes = 11
- x = tf.placeholder("float", [None, 60,11,1])
- y = tf.placeholder("float", [None, n_classes])
- def conv2d(x, W, b, strides=1):
- x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
- x = tf.nn.bias_add(x, b)
- return tf.nn.relu(x)
- def maxpool2d(x, k=2):
- return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],padding='SAME')
- weights = {
- 'wc1': tf.get_variable('W0', shape=(3,3,1,32), initializer=tf.contrib.layers.xavier_initializer()),
- 'wc2': tf.get_variable('W1', shape=(3,3,32,64), initializer=tf.contrib.layers.xavier_initializer()),
- 'wc3': tf.get_variable('W2', shape=(3,3,64,128), initializer=tf.contrib.layers.xavier_initializer()),
- 'wd1': tf.get_variable('W3', shape=(4*4*128,128), initializer=tf.contrib.layers.xavier_initializer()),
- 'out': tf.get_variable('W6', shape=(128,n_classes), initializer=tf.contrib.layers.xavier_initializer()),
- }
- biases = {
- 'bc1': tf.get_variable('B0', shape=(32), initializer=tf.contrib.layers.xavier_initializer()),
- 'bc2': tf.get_variable('B1', shape=(64), initializer=tf.contrib.layers.xavier_initializer()),
- 'bc3': tf.get_variable('B2', shape=(128), initializer=tf.contrib.layers.xavier_initializer()),
- 'bd1': tf.get_variable('B3', shape=(128), initializer=tf.contrib.layers.xavier_initializer()),
- 'out': tf.get_variable('B4', shape=(10), initializer=tf.contrib.layers.xavier_initializer()),
- }
- X = df[['Att1','Att2','Att3','Att4','Att5','Att6','Att7','Att8','Att9','Att10']]
- Y = df[['Att11']]
- train_X, test_X,train_y,test_y = train_test_split(X,Y,train_size=0.88,random_state=5)
- def conv_net(x, weights, biases):
- here we call the conv2d function we had defined above and pass the input image x, weights wc1 and bias bc1.
- conv1 = conv2d(x, weights['wc1'], biases['bc1'])
- Max Pooling (down-sampling), this chooses the max value from a 2*2 matrix window and outputs a 14*14 matrix.
- conv1 = maxpool2d(conv1, k=2)
- Convolution Layer
- conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])
- conv2 = maxpool2d(conv2, k=2)
- conv3 = conv2d(conv2, weights['wc3'], biases['bc3'])
- conv3 = maxpool2d(conv3, k=2)
- fc1 = tf.reshape(conv3, [-1, weights['wd1'].get_shape().as_list()[0]])
- fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
- fc1 = tf.nn.relu(fc1)
- Output, class prediction
- finally we multiply the fully connected layer with the weights and add a bias term.
- out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])
- return out
- pred = conv_net(x, weights, biases)
- cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
- optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
- Here you check whether the index of the maximum value of the predicted image is equal to the actual labelled image. and
- both will be a column vector.
- correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
- calculate accuracy across all the given images and average them out.
- accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement