Advertisement
Guest User

Untitled

a guest
Dec 13th, 2019
86
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 5.35 KB | None | 0 0
  1. from __future__ import print_function # Use a function definition from future version (say 3.x from 2.7 interpreter)
  2. import matplotlib.image as mpimg
  3. import matplotlib.pyplot as plt
  4. import numpy as np
  5. import sys
  6. import os
  7. import time
  8.  
  9. import cntk as C
  10. import cntk.tests.test_utils
  11. cntk.tests.test_utils.set_device_from_pytest_env() # (only needed for our build system)
  12. C.cntk_py.set_fixed_random_seed(1) # fix a random seed for CNTK components
  13.  
  14. # Read a CTF formatted text (as mentioned above) using the CTF deserializer from a file
  15. def create_reader(path, is_training, input_dim, num_label_classes):
  16.     ctf = C.io.CTFDeserializer(path,C.io.StreamDefs(
  17.         labels = C.io.StreamDef(field='labels', shape=num_label_classes, is_sparse=False),
  18.         features = C.io.StreamDef(field='features', shape=input_dim, is_sparse=False)))
  19.     return C.io.MinibatchSource(ctf, randomize=is_training)
  20.  
  21. def create_model(features):
  22.     with C.layers.default_options(init=C.glorot_uniform(),activation=C.relu):
  23.         h = features
  24.         h = C.layers.Convolution2D(filter_shape=(3,3),
  25.                                    num_filters=32,
  26.                                    strides=(2,2),
  27.                                    pad=True, name='first_conv')(h)
  28.  
  29.         h = C.layers.AveragePooling(filter_shape=(2,2),strides=(2,2),
  30.                                 name="first_max")(h)
  31.  
  32.         h = C.layers.Convolution2D(filter_shape=(3,3),
  33.                                    num_filters=16,
  34.                                    strides=(2,2),
  35.                                    pad=True, name='second_conv')(h)
  36.        
  37.         h = C.layers.AveragePooling(filter_shape=(3,3),strides=(3,3),
  38.                                 name="second_max")(h)
  39.  
  40.         r = C.layers.Dense(num_output_classes, activation=None, name='classify')(h)
  41.         return r
  42.  
  43. def create_criterion_function(model, labels):
  44.     loss = C.cross_entropy_with_softmax(model,labels)
  45.     errs = C.classification_error(model,labels)
  46.     return loss, errs
  47.  
  48. # Define a utility function to compute the moving average sum.
  49. # A more efficient implementation is possible with np.cumsum() function
  50. def moving_average(a, w=5):
  51.     if len(a) < w:
  52.         return a[:]    # Need to send a copy of the array
  53.     return [val if idx < w else sum(a[(idx-w):idx])/w for idx, val in enumerate(a)]
  54.  
  55. # Defines a utility that prints the training progress
  56. def print_training_progress(trainer, mb, frequency, verbose=1):
  57.     training_loss = "NA"
  58.     eval_error = "NA"
  59.  
  60.     if mb%frequency == 0:
  61.         training_loss = trainer.previous_minibatch_loss_average
  62.         eval_error = trainer.previous_minibatch_evaluation_average
  63.         if verbose:
  64.             print ("Minibatch: {0}, Loss: {1:.4f}, Error: {2:.2f}%".format(mb, training_loss, eval_error*100))
  65.  
  66.     return mb, training_loss, eval_error
  67.  
  68. def train_test(train_reader, test_reader, model_func, num_sweeps_to_train_with=10):
  69.     model = model_func(x/255)
  70.     loss, label_error = create_criterion_function(model,y)
  71.     learning_rate = 0.1
  72.     lr_schedule = C.learning_parameter_schedule(learning_rate)
  73.     learner = C.sgd(z.parameters, lr_schedule)
  74.     trainer = C.Trainer(z, (loss, label_error),[learner])
  75.  
  76.     minibatch_size = 64
  77.     num_samples_per_sweep = 60000
  78.     num_sweeps_to_train_with = 10
  79.     num_minibatches_to_train = (num_samples_per_sweep*num_sweeps_to_train_with)/minibatch_size
  80.  
  81.     input_map = {y : train_reader.streams.labels,
  82.                  x : train_reader.streams.features}
  83.  
  84.     training_progress_output_freq = 250
  85.     start = time.time()
  86.  
  87.     for i in range(0, int(num_minibatches_to_train)):
  88.         data = train_reader.next_minibatch(minibatch_size, input_map = input_map)
  89.         trainer.train_minibatch(data)
  90.         print_training_progress(trainer, i, training_progress_output_freq, verbose=1)
  91.  
  92.     print("Training time {:.1f} sec.".format(time.time()-start))
  93.  
  94.     #Testing part
  95.     test_input_map = {y : test_reader.streams.labels,
  96.                       x : test_reader.streams.features}
  97.     test_minibatch_size = 512
  98.     num_samples = 10000
  99.     num_minibatches_to_test = num_samples // test_minibatch_size
  100.  
  101.     test_result = 0.0
  102.     for i in range(num_minibatches_to_test):
  103.         data = test_reader.next_minibatch(test_minibatch_size, input_map=test_input_map)
  104.         eval_error = trainer.test_minibatch(data)
  105.         test_result = test_result + eval_error
  106.    
  107.     print("Average test error: {0:.2f}%".format(test_result*100/num_minibatches_to_test))
  108.  
  109. def do_train_test():
  110.     global z
  111.     z = create_model(x)
  112.     reader_train = create_reader(train_file, True, input_dim,num_output_classes)
  113.     reader_test = create_reader(test_file, True, input_dim,num_output_classes)
  114.     train_test(reader_train, reader_test, z)
  115.  
  116.  
  117.  
  118.  
  119.  
  120. input_dim_model = (1, 28, 28)
  121. input_dim = 28*28
  122. num_output_classes = 10
  123.  
  124. data_found = False
  125. data_dir = 'E:/Darbas/kita/vtk/robotistai/2019/MNIST/'
  126. train_file = os.path.join(data_dir,"Train-28x28_cntk_text.txt")
  127. test_file = os.path.join(data_dir,"Test-28x28_cntk_text.txt")
  128. if os.path.isfile(train_file) and os.path.isfile(test_file):
  129.     data_found = True
  130. if data_found == False:
  131.     raise ValueError("Tokiu duomenu nera")
  132.  
  133. x = C.input_variable(input_dim_model)
  134. y = C.input_variable(num_output_classes)
  135.  
  136. #z = create_model(x)
  137.  
  138. #parametru skaicius
  139. #C.logging.log_number_of_parameters(z)
  140.  
  141. do_train_test()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement