Advertisement
Guest User

Untitled

a guest
Nov 22nd, 2017
219
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 20.45 KB | None | 0 0
  1. %matplotlib inline
  2. import matplotlib.pyplot as plt
  3. import tensorflow as tf
  4. import numpy as np
  5. from sklearn.metrics import confusion_matrix
  6. import time
  7. from datetime import timedelta
  8. import math
  9.  
  10. # Convolutional Layer 1.
  11. filter_size1 = 5 # Convolution filters are 5 x 5 pixels.
  12. num_filters1 = 16 # There are 16 of these filters.
  13.  
  14. # Convolutional Layer 2.
  15. filter_size2 = 5 # Convolution filters are 5 x 5 pixels.
  16. num_filters2 = 36 # There are 36 of these filters.
  17.  
  18. # Fully-connected layer.
  19. fc_size = 128
  20.  
  21. #Load Data
  22. import tensorflow as tf
  23. import numpy as np
  24. import imageio
  25. import matplotlib.pyplot as plt
  26. try:
  27. from scipy import misc
  28. except ImportError:
  29. !pip install scipy
  30. from scipy import misc
  31.  
  32.  
  33. training_size = 720
  34. img_size = 384*284
  35. train_images = np.empty(shape=(training_size, 384*284))
  36.  
  37. import glob
  38. i = 0
  39. for filename in glob.glob('D:/HKPU/HKPU60CLS/Train/*.bmp'):
  40. image = imageio.imread(filename)
  41. print(image.shape)
  42. train_images[i] = image.reshape(-1)
  43. i+=1
  44.  
  45.  
  46. a= [0,0,0,0,0,0,0,0,0,0,0,0,
  47. 1,1,1,1,1,1,1,1,1,1,1,1,
  48. 2,2,2,2,2,2,2,2,2,2,2,2,
  49. 3,3,3,3,3,3,3,3,3,3,3,3,
  50. 4,4,4,4,4,4,4,4,4,4,4,4,
  51. 5,5,5,5,5,5,5,5,5,5,5,5,
  52. 6,6,6,6,6,6,6,6,6,6,6,6,
  53. 7,7,7,7,7,7,7,7,7,7,7,7,
  54. 8,8,8,8,8,8,8,8,8,8,8,8,
  55. 9,9,9,9,9,9,9,9,9,9,9,9,
  56. 10,10,10,10,10,10,10,10,10,10,10,10,
  57. 11,11,11,11,11,11,11,11,11,11,11,11,
  58. 12,12,12,12,12,12,12,12,12,12,12,12,
  59. 13,13,13,13,13,13,13,13,13,13,13,13,
  60. 14,14,14,14,14,14,14,14,14,14,14,14,
  61. 15,15,15,15,15,15,15,15,15,15,15,15,
  62. 16,16,16,16,16,16,16,16,16,16,16,16,
  63. 17,17,17,17,17,17,17,17,17,17,17,17,
  64. 18,18,18,18,18,18,18,18,18,18,18,18,
  65. 19,19,19,19,19,19,19,19,19,19,19,19,
  66. 20,20,20,20,20,20,20,20,20,20,20,20,
  67. 21,21,21,21,21,21,21,21,21,21,21,21,
  68. 22,22,22,22,22,22,22,22,22,22,22,22,
  69. 23,23,23,23,23,23,23,23,23,23,23,23,
  70. 24,24,24,24,24,24,24,24,24,24,24,24,
  71. 25,25,25,25,25,25,25,25,25,25,25,25,
  72. 26,26,26,26,26,26,26,26,26,26,26,26,
  73. 27,27,27,27,27,27,27,27,27,27,27,27,
  74. 28,28,28,28,28,28,28,28,28,28,28,28,
  75. 29,29,29,29,29,29,29,29,29,29,29,29,
  76. 30,30,30,30,30,30,30,30,30,30,30,30,
  77. 31,31,31,31,31,31,31,31,31,31,31,31,
  78. 32,32,32,32,32,32,32,32,32,32,32,32,
  79. 33,33,33,33,33,33,33,33,33,33,33,33,
  80. 34,34,34,34,34,34,34,34,34,34,34,34,
  81. 35,35,35,35,35,35,35,35,35,35,35,35,
  82. 36,36,36,36,36,36,36,36,36,36,36,36,
  83. 37,37,37,37,37,37,37,37,37,37,37,37,
  84. 38,38,38,38,38,38,38,38,38,38,38,38,
  85. 39,39,39,39,39,39,39,39,39,39,39,39,
  86. 40,40,40,40,40,40,40,40,40,40,40,40,
  87. 41,41,41,41,41,41,41,41,41,41,41,41,
  88. 42,42,42,42,42,42,42,42,42,42,42,42,
  89. 43,43,43,43,43,43,43,43,43,43,43,43,
  90. 44,44,44,44,44,44,44,44,44,44,44,44,
  91. 45,45,45,45,45,45,45,45,45,45,45,45,
  92. 46,46,46,46,46,46,46,46,46,46,46,46,
  93. 47,47,47,47,47,47,47,47,47,47,47,47,
  94. 48,48,48,48,48,48,48,48,48,48,48,48,
  95. 49,49,49,49,49,49,49,49,49,49,49,49,
  96. 50,50,50,50,50,50,50,50,50,50,50,50,
  97. 51,51,51,51,51,51,51,51,51,51,51,51,
  98. 52,52,52,52,52,52,52,52,52,52,52,52,
  99. 53,53,53,53,53,53,53,53,53,53,53,53,
  100. 54,54,54,54,54,54,54,54,54,54,54,54,
  101. 55,55,55,55,55,55,55,55,55,55,55,55,
  102. 56,56,56,56,56,56,56,56,56,56,56,56,
  103. 57,57,57,57,57,57,57,57,57,57,57,57,
  104. 58,58,58,58,58,58,58,58,58,58,58,58,
  105. 59,59,59,59,59,59,59,59,59,59,59,59]
  106.  
  107.  
  108.  
  109. from sklearn.preprocessing import OneHotEncoder
  110. train_labels = OneHotEncoder(sparse=False).fit_transform(np.asarray(a).reshape(-1, 1))
  111. print(train_labels)
  112.  
  113.  
  114. #Test
  115.  
  116. test_size = 480
  117. img_size = 384*284
  118. test_images = np.empty(shape=(training_size, 384*284))
  119.  
  120. import glob
  121. i = 0
  122. for filename in glob.glob('D:/HKPU/HKPU60CLS/Test/*.bmp'):
  123. image = imageio.imread(filename)
  124. print(image.shape)
  125. test_images[i] = image.reshape(-1)
  126. i+=1
  127.  
  128.  
  129.  
  130. c= [0,0,0,0,0,0,0,0,
  131. 1,1,1,1,1,1,1,1,
  132. 2,2,2,2,2,2,2,2,
  133. 3,3,3,3,3,3,3,3,
  134. 4,4,4,4,4,4,4,4,
  135. 5,5,5,5,5,5,5,5,
  136. 6,6,6,6,6,6,6,6,
  137. 7,7,7,7,7,7,7,7,
  138. 8,8,8,8,8,8,8,8,
  139. 9,9,9,9,9,9,9,9,
  140. 10,10,10,10,10,10,10,10,
  141. 11,11,11,11,11,11,11,11,
  142. 12,12,12,12,12,12,12,12,
  143. 13,13,13,13,13,13,13,13,
  144. 14,14,14,14,14,14,14,14,
  145. 15,15,15,15,15,15,15,15,
  146. 16,16,16,16,16,16,16,16,
  147. 17,17,17,17,17,17,17,17,
  148. 18,18,18,18,18,18,18,18,
  149. 19,19,19,19,19,19,19,19,
  150. 20,20,20,20,20,20,20,20,
  151. 21,21,21,21,21,21,21,21,
  152. 22,22,22,22,22,22,22,22,
  153. 23,23,23,23,23,23,23,23,
  154. 24,24,24,24,24,24,24,24,
  155. 25,25,25,25,25,25,25,25,
  156. 26,26,26,26,26,26,26,26,
  157. 27,27,27,27,27,27,27,27,
  158. 28,28,28,28,28,28,28,28,
  159. 29,29,29,29,29,29,29,29,
  160. 30,30,30,30,30,30,30,30,
  161. 31,31,31,31,31,31,31,31,
  162. 32,32,32,32,32,32,32,32,
  163. 33,33,33,33,33,33,33,33,
  164. 34,34,34,34,34,34,34,34,
  165. 35,35,35,35,35,35,35,35,
  166. 36,36,36,36,36,36,36,36,
  167. 37,37,37,37,37,37,37,37,
  168. 38,38,38,38,38,38,38,38,
  169. 39,39,39,39,39,39,39,39,
  170. 40,40,40,40,40,40,40,40,
  171. 41,41,41,41,41,41,41,41,
  172. 42,42,42,42,42,42,42,42,
  173. 43,43,43,43,43,43,43,43,
  174. 44,44,44,44,44,44,44,44,
  175. 45,45,45,45,45,45,45,45,
  176. 46,46,46,46,46,46,46,46,
  177. 47,47,47,47,47,47,47,47,
  178. 48,48,48,48,48,48,48,48,
  179. 49,49,49,49,49,49,49,49,
  180. 50,50,50,50,50,50,50,50,
  181. 51,51,51,51,51,51,51,51,
  182. 52,52,52,52,52,52,52,52,
  183. 53,53,53,53,53,53,53,53,
  184. 54,54,54,54,54,54,54,54,
  185. 55,55,55,55,55,55,55,55,
  186. 56,56,56,56,56,56,56,56,
  187. 57,57,57,57,57,57,57,57,
  188. 58,58,58,58,58,58,58,58,
  189. 59,59,59,59,59,59,59,59]
  190.  
  191.  
  192.  
  193. from sklearn.preprocessing import OneHotEncoder
  194. test_labels = OneHotEncoder(sparse=False).fit_transform(np.asarray(c).reshape(-1, 1))
  195. print(test_labels)
  196.  
  197.  
  198.  
  199.  
  200.  
  201. print("Size of:")
  202. print("- Training-set:\t\t{}".format(len(train_labels)))
  203. print("- Test-set:\t\t{}".format(len(test_labels)))
  204. #print("- Validation-set:\t{}".format(len(data.validation.labels)))
  205.  
  206.  
  207.  
  208. test_cls = np.argmax(test_labels, axis=1)
  209.  
  210.  
  211. #Data dimensions¶
  212. # We know that our images are 400 pixels in each dimension.
  213. img_size1 = 284
  214. img_size2 = 384
  215. # Images are stored in one-dimensional arrays of this length.
  216. img_size_flat = img_size1 * img_size2
  217.  
  218. # Tuple with height and width of images used to reshape arrays.
  219. img_shape = (img_size1, img_size2)
  220.  
  221. # Number of colour channels for the images: 1 channel for gray-scale.
  222. num_channels = 1
  223.  
  224. # Number of classes, one class for each of 53 persons.
  225. num_classes = 60
  226.  
  227.  
  228.  
  229. #Helper-function for plotting images¶
  230. def plot_images(images, cls_true, cls_pred=None):
  231. assert len(images) == len(cls_true) == 9
  232.  
  233. # Create figure with 3x3 sub-plots.
  234. fig, axes = plt.subplots(3, 3)
  235. fig.subplots_adjust(hspace=0.3, wspace=0.3)
  236.  
  237. for i, ax in enumerate(axes.flat):
  238. # Plot image.
  239. ax.imshow(images[i].reshape(img_shape), cmap='binary')
  240.  
  241. # Show true and predicted classes.
  242. if cls_pred is None:
  243. xlabel = "True: {0}".format(cls_true[i])
  244. else:
  245. xlabel = "True: {0}, Pred: {1}".format(cls_true[i], cls_pred[i])
  246.  
  247. ax.set_xlabel(xlabel)
  248.  
  249. # Remove ticks from the plot.
  250. ax.set_xticks([])
  251. ax.set_yticks([])
  252.  
  253.  
  254.  
  255. #Plot a few images to see if data is correct¶
  256. # Get the first images from the test-set.
  257. images = test_images[0:9]
  258.  
  259. # Get the true classes for those images.
  260. cls_true = test_cls[0:9]
  261.  
  262. # Plot the images and labels using our helper-function above.
  263. plot_images(images=images, cls_true=cls_true)
  264.  
  265.  
  266. #Helper-functions for creating new variables¶
  267. def new_weights(shape):
  268. return tf.Variable(tf.truncated_normal(shape, stddev=0.05))
  269. def new_biases(length):
  270. return tf.Variable(tf.constant(0.05, shape=[length]))
  271.  
  272.  
  273. #Helper-function for creating a new Convolutional Layer¶
  274. def new_conv_layer(input, # The previous layer.
  275. num_input_channels, # Num. channels in prev. layer.
  276. filter_size, # Width and height of each filter.
  277. num_filters, # Number of filters.
  278. use_pooling=True): # Use 2x2 max-pooling.
  279.  
  280. # Shape of the filter-weights for the convolution.
  281. # This format is determined by the TensorFlow API.
  282. shape = [filter_size, filter_size, num_input_channels, num_filters]
  283.  
  284. # Create new weights aka. filters with the given shape.
  285. weights = new_weights(shape=shape)
  286.  
  287. # Create new biases, one for each filter.
  288. biases = new_biases(length=num_filters)
  289.  
  290. # Create the TensorFlow operation for convolution.
  291. # Note the strides are set to 1 in all dimensions.
  292. # The first and last stride must always be 1,
  293. # because the first is for the image-number and
  294. # the last is for the input-channel.
  295. # But e.g. strides=[1, 2, 2, 1] would mean that the filter
  296. # is moved 2 pixels across the x- and y-axis of the image.
  297. # The padding is set to 'SAME' which means the input image
  298. # is padded with zeroes so the size of the output is the same.
  299. layer = tf.nn.conv2d(input=input,
  300. filter=weights,
  301. strides=[1, 1, 1, 1],
  302. padding='SAME')
  303.  
  304. # Add the biases to the results of the convolution.
  305. # A bias-value is added to each filter-channel.
  306. layer += biases
  307.  
  308. # Use pooling to down-sample the image resolution?
  309. if use_pooling:
  310. # This is 2x2 max-pooling, which means that we
  311. # consider 2x2 windows and select the largest value
  312. # in each window. Then we move 2 pixels to the next window.
  313. layer = tf.nn.max_pool(value=layer,
  314. ksize=[1, 2, 2, 1],
  315. strides=[1, 2, 2, 1],
  316. padding='SAME')
  317.  
  318. # Rectified Linear Unit (ReLU).
  319. # It calculates max(x, 0) for each input pixel x.
  320. # This adds some non-linearity to the formula and allows us
  321. # to learn more complicated functions.
  322. layer = tf.nn.relu(layer)
  323.  
  324. # Note that ReLU is normally executed before the pooling,
  325. # but since relu(max_pool(x)) == max_pool(relu(x)) we can
  326. # save 75% of the relu-operations by max-pooling first.
  327.  
  328. # We return both the resulting layer and the filter-weights
  329. # because we will plot the weights later.
  330. return layer, weights
  331.  
  332.  
  333.  
  334.  
  335. #Helper-function for flattening a layer¶
  336. def flatten_layer(layer):
  337. # Get the shape of the input layer.
  338. layer_shape = layer.get_shape()
  339.  
  340. # The shape of the input layer is assumed to be:
  341. # layer_shape == [num_images, img_height, img_width, num_channels]
  342.  
  343. # The number of features is: img_height * img_width * num_channels
  344. # We can use a function from TensorFlow to calculate this.
  345. num_features = layer_shape[1:4].num_elements()
  346.  
  347. # Reshape the layer to [num_images, num_features].
  348. # Note that we just set the size of the second dimension
  349. # to num_features and the size of the first dimension to -1
  350. # which means the size in that dimension is calculated
  351. # so the total size of the tensor is unchanged from the reshaping.
  352. layer_flat = tf.reshape(layer, [-1, num_features])
  353.  
  354. # The shape of the flattened layer is now:
  355. # [num_images, img_height * img_width * num_channels]
  356.  
  357. # Return both the flattened layer and the number of features.
  358. return layer_flat, num_features
  359.  
  360.  
  361. #Helper-function for creating a new Fully-Connected Layer¶
  362. def new_fc_layer(input, # The previous layer.
  363. num_inputs, # Num. inputs from prev. layer.
  364. num_outputs, # Num. outputs.
  365. use_relu=True): # Use Rectified Linear Unit (ReLU)?
  366.  
  367. # Create new weights and biases.
  368. weights = new_weights(shape=[num_inputs, num_outputs])
  369. biases = new_biases(length=num_outputs)
  370.  
  371. # Calculate the layer as the matrix multiplication of
  372. # the input and weights, and then add the bias-values.
  373. layer = tf.matmul(input, weights) + biases
  374.  
  375. # Use ReLU?
  376. if use_relu:
  377. layer = tf.nn.relu(layer)
  378.  
  379. return layer
  380.  
  381.  
  382. #Placeholder variables
  383. x = tf.placeholder(tf.float32, shape=[None, img_size_flat], name='x')
  384. x_image = tf.reshape(x, [-1, img_size1, img_size2, num_channels])
  385. y_true = tf.placeholder(tf.float32, shape=[None, num_classes], name='y_true')
  386. y_true_cls = tf.argmax(y_true, axis=1)
  387.  
  388.  
  389. #Convolutional Layer 1¶
  390. layer_conv1, weights_conv1 = \
  391. new_conv_layer(input=x_image,
  392. num_input_channels=num_channels,
  393. filter_size=filter_size1,
  394. num_filters=num_filters1,
  395. use_pooling=True)
  396. layer_conv1
  397.  
  398.  
  399. #Convolutional Layer 2¶
  400. layer_conv2, weights_conv2 = \
  401. new_conv_layer(input=layer_conv1,
  402. num_input_channels=num_filters1,
  403. filter_size=filter_size2,
  404. num_filters=num_filters2,
  405. use_pooling=True)
  406. layer_conv2
  407.  
  408. #Flatten Layer¶
  409. layer_flat, num_features = flatten_layer(layer_conv2)
  410.  
  411. layer_flat
  412.  
  413.  
  414.  
  415. num_features
  416.  
  417.  
  418. layer_fc1 = new_fc_layer(input=layer_flat,
  419. num_inputs=num_features,
  420. num_outputs=fc_size,
  421. use_relu=True)
  422. layer_fc1
  423.  
  424.  
  425. layer_fc2 = new_fc_layer(input=layer_fc1,
  426. num_inputs=fc_size,
  427. num_outputs=num_classes,
  428. use_relu=False)
  429. layer_fc2
  430.  
  431.  
  432. y_pred = tf.nn.softmax(layer_fc2)
  433.  
  434.  
  435. y_pred_cls = tf.argmax(y_pred, dimension=1)
  436.  
  437.  
  438.  
  439. cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=layer_fc2,
  440. labels=y_true)
  441.  
  442.  
  443. cost = tf.reduce_mean(cross_entropy)
  444.  
  445.  
  446. optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cost)
  447.  
  448.  
  449.  
  450. correct_prediction = tf.equal(y_pred_cls, y_true_cls)
  451.  
  452.  
  453. accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
  454.  
  455.  
  456. session = tf.Session()
  457. session.run(tf.global_variables_initializer())
  458.  
  459.  
  460.  
  461. import numpy as np
  462.  
  463. def next_batch(num, data, labels):
  464. '''
  465. Return a total of `num` random samples and labels.
  466. '''
  467. idx = np.arange(0 , len(data))
  468. np.random.shuffle(idx)
  469. idx = idx[:num]
  470. data_shuffle = [data[ i] for i in idx]
  471. labels_shuffle = [labels[ i] for i in idx]
  472.  
  473. return np.asarray(data_shuffle), np.asarray(labels_shuffle)
  474.  
  475.  
  476.  
  477. train_batch_size = 10
  478. # Counter for total number of iterations performed so far.
  479. total_iterations = 0
  480.  
  481. def optimize(num_iterations):
  482. # Ensure we update the global variable rather than a local copy.
  483. global total_iterations
  484.  
  485. # Start-time used for printing time-usage below.
  486. start_time = time.time()
  487.  
  488. for i in range(total_iterations,
  489. total_iterations + num_iterations):
  490.  
  491. # Get a batch of training examples.
  492. # x_batch now holds a batch of images and
  493. # y_true_batch are the true labels for those images.
  494. x_batch,y_true_batch=next_batch(100,train_images,train_labels)
  495.  
  496. # Put the batch into a dict with the proper names
  497. # for placeholder variables in the TensorFlow graph.
  498. feed_dict_train = {x: x_batch,
  499. y_true: y_true_batch}
  500.  
  501. # Run the optimizer using this batch of training data.
  502. # TensorFlow assigns the variables in feed_dict_train
  503. # to the placeholder variables and then runs the optimizer.
  504. session.run(optimizer, feed_dict=feed_dict_train)
  505.  
  506. # Print status every 100 iterations.
  507. if i % 100 == 0:
  508. # Calculate the accuracy on the training-set.
  509. acc = session.run(accuracy, feed_dict=feed_dict_train)
  510.  
  511. # Message for printing.
  512. msg = "Optimization Iteration: {0:>6}, Training Accuracy: {1:>6.1%}"
  513.  
  514. # Print it.
  515. print(msg.format(i + 1, acc))
  516.  
  517. # Update the total number of iterations performed.
  518. total_iterations += num_iterations
  519.  
  520. # Ending time.
  521. end_time = time.time()
  522.  
  523. # Difference between start and end-times.
  524. time_dif = end_time - start_time
  525.  
  526. # Print the time-usage.
  527. print("Time usage: " + str(timedelta(seconds=int(round(time_dif)))))
  528.  
  529.  
  530.  
  531. #Helper-function to plot example errors¶
  532. def plot_example_errors(cls_pred, correct):
  533. # This function is called from print_test_accuracy() below.
  534.  
  535. # cls_pred is an array of the predicted class-number for
  536. # all images in the test-set.
  537.  
  538. # correct is a boolean array whether the predicted class
  539. # is equal to the true class for each image in the test-set.
  540.  
  541. # Negate the boolean array.
  542. incorrect = (correct == False)
  543.  
  544. # Get the images from the test-set that have been
  545. # incorrectly classified.
  546. images = test_images[incorrect]
  547.  
  548. # Get the predicted classes for those images.
  549. cls_pred = cls_pred[incorrect]
  550.  
  551. # Get the true classes for those images.
  552. cls_true = test_cls[incorrect]
  553.  
  554. # Plot the first 9 images.
  555. plot_images(images=images[0:9],
  556. cls_true=cls_true[0:9],
  557. cls_pred=cls_pred[0:9])
  558.  
  559.  
  560.  
  561.  
  562. #Helper-function to plot confusion matrix¶
  563. def plot_confusion_matrix(cls_pred):
  564. # This is called from print_test_accuracy() below.
  565.  
  566. # cls_pred is an array of the predicted class-number for
  567. # all images in the test-set.
  568.  
  569. # Get the true classifications for the test-set.
  570. cls_true = test_cls
  571.  
  572. # Get the confusion matrix using sklearn.
  573. cm = confusion_matrix(y_true=cls_true,
  574. y_pred=cls_pred)
  575.  
  576. # Print the confusion matrix as text.
  577. print(cm)
  578.  
  579. # Plot the confusion matrix as an image.
  580. plt.matshow(cm)
  581.  
  582. # Make various adjustments to the plot.
  583. plt.colorbar()
  584. tick_marks = np.arange(num_classes)
  585. plt.xticks(tick_marks, range(num_classes))
  586. plt.yticks(tick_marks, range(num_classes))
  587. plt.xlabel('Predicted')
  588. plt.ylabel('True')
  589.  
  590. # Ensure the plot is shown correctly with multiple plots
  591. # in a single Notebook cell.
  592. plt.show()
  593.  
  594. #Helper-function for showing the performance¶
  595. # Split the test-set into smaller batches of this size.
  596. test_batch_size = 10
  597.  
  598. def print_test_accuracy(show_example_errors=False,
  599. show_confusion_matrix=False):
  600.  
  601. # Number of images in the test-set.
  602. num_test = len(test_images)
  603.  
  604. # Allocate an array for the predicted classes which
  605. # will be calculated in batches and filled into this array.
  606. cls_pred = np.zeros(shape=num_test, dtype=np.int)
  607.  
  608. # Now calculate the predicted classes for the batches.
  609. # We will just iterate through all the batches.
  610. # There might be a more clever and Pythonic way of doing this.
  611.  
  612. # The starting index for the next batch is denoted i.
  613. i = 0
  614.  
  615. while i < num_test:
  616. # The ending index for the next batch is denoted j.
  617. j = min(i + test_batch_size, num_test)
  618.  
  619. # Get the images from the test-set between index i and j.
  620. images = test_images[i:j, :]
  621.  
  622. # Get the associated labels.
  623. labels = test_labels[i:j, :]
  624.  
  625. # Create a feed-dict with these images and labels.
  626. feed_dict = {x: images,
  627. y_true: labels}
  628.  
  629. # Calculate the predicted class using TensorFlow.
  630. cls_pred[i:j] = session.run(y_pred_cls, feed_dict=feed_dict)
  631.  
  632. # Set the start-index for the next batch to the
  633. # end-index of the current batch.
  634. i = j
  635.  
  636. # Convenience variable for the true class-numbers of the test-set.
  637. cls_true = test_cls
  638.  
  639. # Create a boolean array whether each image is correctly classified.
  640. correct = (cls_true == cls_pred)
  641.  
  642. # Calculate the number of correctly classified images.
  643. # When summing a boolean array, False means 0 and True means 1.
  644. correct_sum = correct.sum()
  645.  
  646. # Classification accuracy is the number of correctly classified
  647. # images divided by the total number of images in the test-set.
  648. acc = float(correct_sum) / num_test
  649.  
  650. # Print the accuracy.
  651. msg = "Accuracy on Test-Set: {0:.1%} ({1} / {2})"
  652. print(msg.format(acc, correct_sum, num_test))
  653.  
  654. # Plot some examples of mis-classifications, if desired.
  655. if show_example_errors:
  656. print("Example errors:")
  657. plot_example_errors(cls_pred=cls_pred, correct=correct)
  658.  
  659. # Plot the confusion matrix, if desired.
  660. if show_confusion_matrix:
  661. print("Confusion Matrix:")
  662. plot_confusion_matrix(cls_pred=cls_pred)
  663.  
  664. #Performance before any optimization¶
  665. print_test_accuracy()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement