Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- from __future__ import print_function # Use a function definition from future version (say 3.x from 2.7 interpreter)
- import matplotlib.image as mpimg
- import matplotlib.pyplot as plt
- import numpy as np
- import sys
- import os
- import time
- import cntk as C
- import cntk.tests.test_utils
- cntk.tests.test_utils.set_device_from_pytest_env() # (only needed for our build system)
- C.cntk_py.set_fixed_random_seed(1) # fix a random seed for CNTK components
- # Read a CTF formatted text (as mentioned above) using the CTF deserializer from a file
- def create_reader(path, is_training, input_dim, num_label_classes):
- ctf = C.io.CTFDeserializer(path,C.io.StreamDefs(
- labels = C.io.StreamDef(field='labels', shape=num_label_classes, is_sparse=False),
- features = C.io.StreamDef(field='features', shape=input_dim, is_sparse=False)))
- return C.io.MinibatchSource(ctf, randomize=is_training)
- def create_model(features):
- with C.layers.default_options(init=C.glorot_uniform(),activation=C.relu):
- h = features
- h = C.layers.Convolution2D(filter_shape=(3,3),
- num_filters=32,
- strides=(2,2),
- pad=True, name='first_conv')(h)
- h = C.layers.AveragePooling(filter_shape=(2,2),strides=(2,2),
- name="first_max")(h)
- h = C.layers.Convolution2D(filter_shape=(3,3),
- num_filters=16,
- strides=(2,2),
- pad=True, name='second_conv')(h)
- h = C.layers.AveragePooling(filter_shape=(3,3),strides=(3,3),
- name="second_max")(h)
- r = C.layers.Dense(num_output_classes, activation=None, name='classify')(h)
- return r
- def create_criterion_function(model, labels):
- loss = C.cross_entropy_with_softmax(model,labels)
- errs = C.classification_error(model,labels)
- return loss, errs
- # Define a utility function to compute the moving average sum.
- # A more efficient implementation is possible with np.cumsum() function
- def moving_average(a, w=5):
- if len(a) < w:
- return a[:] # Need to send a copy of the array
- return [val if idx < w else sum(a[(idx-w):idx])/w for idx, val in enumerate(a)]
- # Defines a utility that prints the training progress
- def print_training_progress(trainer, mb, frequency, verbose=1):
- training_loss = "NA"
- eval_error = "NA"
- if mb%frequency == 0:
- training_loss = trainer.previous_minibatch_loss_average
- eval_error = trainer.previous_minibatch_evaluation_average
- if verbose:
- print ("Minibatch: {0}, Loss: {1:.4f}, Error: {2:.2f}%".format(mb, training_loss, eval_error*100))
- return mb, training_loss, eval_error
- def train_test(train_reader, test_reader, model_func, num_sweeps_to_train_with=10):
- model = model_func(x/255)
- loss, label_error = create_criterion_function(model,y)
- learning_rate = 0.1
- lr_schedule = C.learning_parameter_schedule(learning_rate)
- learner = C.sgd(z.parameters, lr_schedule)
- trainer = C.Trainer(z, (loss, label_error),[learner])
- minibatch_size = 64
- num_samples_per_sweep = 60000
- num_sweeps_to_train_with = 10
- num_minibatches_to_train = (num_samples_per_sweep*num_sweeps_to_train_with)/minibatch_size
- input_map = {y : train_reader.streams.labels,
- x : train_reader.streams.features}
- training_progress_output_freq = 250
- start = time.time()
- for i in range(0, int(num_minibatches_to_train)):
- data = train_reader.next_minibatch(minibatch_size, input_map = input_map)
- trainer.train_minibatch(data)
- print_training_progress(trainer, i, training_progress_output_freq, verbose=1)
- print("Training time {:.1f} sec.".format(time.time()-start))
- #Testing part
- test_input_map = {y : test_reader.streams.labels,
- x : test_reader.streams.features}
- test_minibatch_size = 512
- num_samples = 10000
- num_minibatches_to_test = num_samples // test_minibatch_size
- test_result = 0.0
- for i in range(num_minibatches_to_test):
- data = test_reader.next_minibatch(test_minibatch_size, input_map=test_input_map)
- eval_error = trainer.test_minibatch(data)
- test_result = test_result + eval_error
- print("Average test error: {0:.2f}%".format(test_result*100/num_minibatches_to_test))
- def do_train_test():
- global z
- z = create_model(x)
- reader_train = create_reader(train_file, True, input_dim,num_output_classes)
- reader_test = create_reader(test_file, True, input_dim,num_output_classes)
- train_test(reader_train, reader_test, z)
- input_dim_model = (1, 28, 28)
- input_dim = 28*28
- num_output_classes = 10
- data_found = False
- data_dir = 'E:/Darbas/kita/vtk/robotistai/2019/MNIST/'
- train_file = os.path.join(data_dir,"Train-28x28_cntk_text.txt")
- test_file = os.path.join(data_dir,"Test-28x28_cntk_text.txt")
- if os.path.isfile(train_file) and os.path.isfile(test_file):
- data_found = True
- if data_found == False:
- raise ValueError("Tokiu duomenu nera")
- x = C.input_variable(input_dim_model)
- y = C.input_variable(num_output_classes)
- #z = create_model(x)
- #parametru skaicius
- #C.logging.log_number_of_parameters(z)
- do_train_test()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement