Advertisement
Guest User

Untitled

a guest
Jun 26th, 2019
77
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 3.68 KB | None | 0 0
  1. import tensorflow as tf
  2. import pandas as pd
  3. import numpy as np
  4. import matplotlib.pyplot as plt
  5. from sklearn import linear_model
  6. from sklearn.preprocessing import OneHotEncoder
  7. from sklearn.model_selection import train_test_split
  8. import os
  9. os.environ["CUDA_VISIBLE_DEVICES"]="0"
  10.  
  11.  
  12.  
  13.  
  14.  
  15. df = pd.read_excel(r"C:/Users/ggmah/Desktop/HMM Data updated.xlsx")
  16. tf.logging.set_verbosity(tf.logging.INFO)
  17. dff = OneHotEncoder(df)
  18. dfg = pd.get_dummies(df)
  19.  
  20.  
  21.  
  22. o =list(df.columns.values)
  23. label_dict = dict()
  24. for i,value in enumerate(o):
  25. label_dict[i] = value
  26.  
  27.  
  28.  
  29. training_iters = 220
  30. learning_rate = 0.002
  31. batch_size = 16
  32. n_input = 59
  33. n_classes = 11
  34. x = tf.placeholder("float", [None, 60,11,1])
  35. y = tf.placeholder("float", [None, n_classes])
  36.  
  37.  
  38.  
  39.  
  40.  
  41. def conv2d(x, W, b, strides=1):
  42.  
  43. x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
  44. x = tf.nn.bias_add(x, b)
  45. return tf.nn.relu(x)
  46.  
  47. def maxpool2d(x, k=2):
  48. return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],padding='SAME')
  49.  
  50.  
  51.  
  52.  
  53.  
  54. weights = {
  55. 'wc1': tf.get_variable('W0', shape=(3,3,1,32), initializer=tf.contrib.layers.xavier_initializer()),
  56. 'wc2': tf.get_variable('W1', shape=(3,3,32,64), initializer=tf.contrib.layers.xavier_initializer()),
  57. 'wc3': tf.get_variable('W2', shape=(3,3,64,128), initializer=tf.contrib.layers.xavier_initializer()),
  58. 'wd1': tf.get_variable('W3', shape=(4*4*128,128), initializer=tf.contrib.layers.xavier_initializer()),
  59. 'out': tf.get_variable('W6', shape=(128,n_classes), initializer=tf.contrib.layers.xavier_initializer()),
  60. }
  61. biases = {
  62. 'bc1': tf.get_variable('B0', shape=(32), initializer=tf.contrib.layers.xavier_initializer()),
  63. 'bc2': tf.get_variable('B1', shape=(64), initializer=tf.contrib.layers.xavier_initializer()),
  64. 'bc3': tf.get_variable('B2', shape=(128), initializer=tf.contrib.layers.xavier_initializer()),
  65. 'bd1': tf.get_variable('B3', shape=(128), initializer=tf.contrib.layers.xavier_initializer()),
  66. 'out': tf.get_variable('B4', shape=(10), initializer=tf.contrib.layers.xavier_initializer()),
  67. }
  68.  
  69.  
  70.  
  71.  
  72.  
  73.  
  74. X = df[['Att1','Att2','Att3','Att4','Att5','Att6','Att7','Att8','Att9','Att10']]
  75. Y = df[['Att11']]
  76. train_X, test_X,train_y,test_y = train_test_split(X,Y,train_size=0.88,random_state=5)
  77.  
  78.  
  79.  
  80.  
  81.  
  82. def conv_net(x, weights, biases):
  83.  
  84. here we call the conv2d function we had defined above and pass the input image x, weights wc1 and bias bc1.
  85. conv1 = conv2d(x, weights['wc1'], biases['bc1'])
  86. Max Pooling (down-sampling), this chooses the max value from a 2*2 matrix window and outputs a 14*14 matrix.
  87. conv1 = maxpool2d(conv1, k=2)
  88.  
  89. Convolution Layer
  90.  
  91. conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])
  92.  
  93. conv2 = maxpool2d(conv2, k=2)
  94.  
  95. conv3 = conv2d(conv2, weights['wc3'], biases['bc3'])
  96.  
  97. conv3 = maxpool2d(conv3, k=2)
  98.  
  99.  
  100.  
  101. fc1 = tf.reshape(conv3, [-1, weights['wd1'].get_shape().as_list()[0]])
  102. fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
  103. fc1 = tf.nn.relu(fc1)
  104. Output, class prediction
  105. finally we multiply the fully connected layer with the weights and add a bias term.
  106. out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])
  107. return out
  108.  
  109.  
  110.  
  111.  
  112. pred = conv_net(x, weights, biases)
  113.  
  114. cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
  115.  
  116. optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
  117.  
  118. Here you check whether the index of the maximum value of the predicted image is equal to the actual labelled image. and
  119. both will be a column vector.
  120. correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
  121.  
  122. calculate accuracy across all the given images and average them out.
  123. accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement