Advertisement
Guest User

Untitled

a guest
Nov 13th, 2019
116
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 7.04 KB | None | 0 0
  1. from __future__ import print_function # Use a function definition from future version (say 3.x from 2.7 interpreter)
  2. import matplotlib.image as mpimg
  3. import matplotlib.pyplot as plt
  4. import numpy as np
  5. import sys
  6. import os
  7.  
  8. import cntk as C
  9. import cntk.tests.test_utils
  10. cntk.tests.test_utils.set_device_from_pytest_env() # (only needed for our build system)
  11. C.cntk_py.set_fixed_random_seed(1) # fix a random seed for CNTK components
  12.  
  13.  
  14. # Read a CTF formatted text (as mentioned above) using the CTF deserializer from a file
  15. def create_reader(path, is_training, input_dim, num_label_classes):
  16. return C.io.MinibatchSource(C.io.CTFDeserializer(path, C.io.StreamDefs(
  17. labels = C.io.StreamDef(field='labels', shape=num_label_classes, is_sparse=False),
  18. features = C.io.StreamDef(field='features', shape=input_dim, is_sparse=False)
  19. )), randomize = is_training, max_sweeps = C.io.INFINITELY_REPEAT if is_training else 1)
  20.  
  21. def create_model(features):
  22. with C.layers.default_options(init = C.layers.glorot_uniform(), activation = C.ops.relu):
  23. h = features
  24. for _ in range(num_hidden_layers):
  25. h = C.layers.Dense(hidden_layers_dim)(h)
  26. r = C.layers.Dense(num_output_classes, activation = None)(h)
  27. return r
  28.  
  29. # Define a utility function to compute the moving average sum.
  30. # A more efficient implementation is possible with np.cumsum() function
  31. def moving_average(a, w=5):
  32. if len(a) < w:
  33. return a[:] # Need to send a copy of the array
  34. return [val if idx < w else sum(a[(idx-w):idx])/w for idx, val in enumerate(a)]
  35.  
  36.  
  37. # Defines a utility that prints the training progress
  38. def print_training_progress(trainer, mb, frequency, verbose=1):
  39. training_loss = "NA"
  40. eval_error = "NA"
  41.  
  42. if mb%frequency == 0:
  43. training_loss = trainer.previous_minibatch_loss_average
  44. eval_error = trainer.previous_minibatch_evaluation_average
  45. if verbose:
  46. print ("Minibatch: {0}, Loss: {1:.4f}, Error: {2:.2f}%".format(mb, training_loss, eval_error*100))
  47.  
  48. return mb, training_loss, eval_error
  49.  
  50. # Define the data dimensions
  51. input_dim = 784
  52. num_output_classes = 10
  53.  
  54. # Ensure the training and test data is generated and available for this tutorial.
  55. # We search in two locations in the toolkit for the cached MNIST data set.
  56. data_found = False
  57. data_dir = 'C:/Users/benke/Desktop/AI/CNTK'
  58. train_file = os.path.join(data_dir, "Train-28x28_cntk_text.txt")
  59. test_file = os.path.join(data_dir, "Test-28x28_cntk_text.txt")
  60. if os.path.isfile(train_file) and os.path.isfile(test_file):
  61. data_found = True
  62. if not data_found:
  63. raise ValueError("Prasau parsisiuskite duomenis")
  64. print("Data directory is {0}".format(data_dir))
  65.  
  66.  
  67. num_hidden_layers = 2
  68. hidden_layers_dim = 400
  69.  
  70. input = C.input_variable(input_dim)
  71. label = C.input_variable(num_output_classes)
  72. z = create_model(input/255.0) #sukuriam neuronini tinkla
  73.  
  74.  
  75. loss = C.cross_entropy_with_softmax(z, label)
  76. label_error = C.classification_error(z, label)
  77.  
  78. # Instantiate the trainer object to drive the model training
  79. learning_rate = 0.2
  80. lr_schedule = C.learning_parameter_schedule(learning_rate)
  81. learner = C.sgd(z.parameters, lr_schedule)
  82. trainer = C.Trainer(z, (loss, label_error), [learner])
  83.  
  84. # Create the reader to training data set
  85. reader_train = create_reader(train_file, True, input_dim, num_output_classes)
  86.  
  87. # Map the data streams to the input and labels.
  88. input_map = {
  89. label : reader_train.streams.labels,
  90. input : reader_train.streams.features
  91. }
  92.  
  93.  
  94. # Initialize the parameters for the trainer
  95. minibatch_size = 64
  96. num_samples_per_sweep = 60000
  97. num_sweeps_to_train_with = 10
  98. num_minibatches_to_train = (num_samples_per_sweep * num_sweeps_to_train_with) / minibatch_size
  99.  
  100.  
  101. # Run the trainer on and perform model training
  102. training_progress_output_freq = 500
  103.  
  104. plotdata = {"batchsize":[], "loss":[], "error":[]}
  105.  
  106. for i in range(0, int(num_minibatches_to_train)):
  107.  
  108. # Read a mini batch from the training data file
  109. data = reader_train.next_minibatch(minibatch_size, input_map = input_map)
  110.  
  111. trainer.train_minibatch(data)
  112. batchsize, loss, error = print_training_progress(trainer, i, training_progress_output_freq, verbose=1)
  113.  
  114. if not (loss == "NA" or error =="NA"):
  115. plotdata["batchsize"].append(batchsize)
  116. plotdata["loss"].append(loss)
  117. plotdata["error"].append(error)
  118.  
  119. # Compute the moving average loss to smooth out the noise in SGD
  120. plotdata["avgloss"] = moving_average(plotdata["loss"])
  121. plotdata["avgerror"] = moving_average(plotdata["error"])
  122.  
  123. plt.figure(1)
  124. plt.subplot(211)
  125. plt.plot(plotdata["batchsize"], plotdata["avgloss"], 'b--')
  126. plt.xlabel('Minibatch number')
  127. plt.ylabel('Loss')
  128. plt.title('Minibatch run vs. Training loss')
  129.  
  130. plt.show()
  131.  
  132. plt.subplot(212)
  133. plt.plot(plotdata["batchsize"], plotdata["avgerror"], 'r--')
  134. plt.xlabel('Minibatch number')
  135. plt.ylabel('Label Prediction Error')
  136. plt.title('Minibatch run vs. Label Prediction Error')
  137. plt.show()
  138.  
  139.  
  140. #>>>>>>>>>>>>>>>>TEsting data>>>>>>>>>>>>>>
  141. # Read the testing data
  142. reader_test = create_reader(test_file, False, input_dim, num_output_classes)
  143.  
  144. test_input_map = {
  145. label : reader_test.streams.labels,
  146. input : reader_test.streams.features,
  147. }
  148.  
  149. # Test data for trained model
  150. test_minibatch_size = 512
  151. num_samples = 10000
  152. num_minibatches_to_test = num_samples // test_minibatch_size
  153. test_result = 0.0
  154.  
  155. for i in range(num_minibatches_to_test):
  156.  
  157. # We are loading test data in batches specified by test_minibatch_size
  158. # Each data point in the minibatch is a MNIST digit image of 784 dimensions
  159. # with one pixel per dimension that we will encode / decode with the
  160. # trained model.
  161. data = reader_test.next_minibatch(test_minibatch_size,
  162. input_map = test_input_map)
  163.  
  164. eval_error = trainer.test_minibatch(data)
  165. test_result = test_result + eval_error
  166.  
  167. # Average of evaluation errors of all test minibatches
  168. print("Average test error: {0:.2f}%".format(test_result*100 / num_minibatches_to_test))
  169.  
  170. #vieno sample patikra
  171. out = C.softmax(z)
  172.  
  173. # Read the data for evaluation
  174. reader_eval = create_reader(test_file, False, input_dim, num_output_classes)
  175.  
  176. eval_minibatch_size = 25
  177. eval_input_map = {input: reader_eval.streams.features}
  178.  
  179. data = reader_test.next_minibatch(eval_minibatch_size, input_map = test_input_map)
  180.  
  181. img_label = data[label].asarray()
  182. img_data = data[input].asarray()
  183. predicted_label_prob = [out.eval(img_data[i]) for i in range(len(img_data))]
  184.  
  185.  
  186. # Find the index with the maximum value for both predicted as well as the ground truth
  187. pred = [np.argmax(predicted_label_prob[i]) for i in range(len(predicted_label_prob))]
  188. gtlabel = [np.argmax(img_label[i]) for i in range(len(img_label))]
  189.  
  190. print("Label :", gtlabel[:25])
  191. print("Predicted:", pred)
  192.  
  193. # Plot a random image
  194. sample_number = 5
  195. plt.imshow(img_data[sample_number].reshape(28,28), cmap="gray_r")
  196. plt.axis('off')
  197.  
  198. img_gt, img_pred = gtlabel[sample_number], pred[sample_number]
  199. print("Image Label: ", img_pred)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement