Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import matplotlib.pyplot as plt
- import numpy as np
- import sys
- import os
- import cntk as C
- import cntk.tests.test_utils
- cntk.tests.test_utils.set_device_from_pytest_env()
- C.cntk_py.set_fixed_random_seed(1)
- np.random.seed(0)
- input_dim = 2
- num_output_classes = 2
- num_hidden_layers = 2
- hidden_layer_dim = 50
- def generate_random_data_sample(sample_size, feature_dim, num_classes):
- Y = np.random.randint(size=(sample_size, 1), low=0, high=num_classes)
- X = (np.random.randn(sample_size, feature_dim) + 3) * (Y + 1)
- X = X.astype(np.float32)
- class_ind = [Y == class_number for class_number in range(num_classes)]
- Y = np.asarray(np.hstack(class_ind), dtype=np.float32)
- return X, Y
- def linear_layer(input_var, output_dim):
- input_dim = input_var.shape[0]
- weight = C.parameter(shape=(input_dim, output_dim))
- bias = C.parameter(shape=(output_dim))
- return C.times(input_var, weight) + bias
- def dense_layer(input_var, output_dim, nonlinearity):
- l = linear_layer(input_var, output_dim)
- return nonlinearity(l)
- def fully_connected_classifier_net(
- input_var, num_output_classes, hidden_layer_dim,
- num_hidden_layers, nonlinearity):
- h = dense_layer(input_var, hidden_layer_dim, nonlinearity)
- for i in range(1, num_hidden_layers):
- h = dense_layer(h, hidden_layer_dim, nonlinearity)
- return linear_layer(h, num_output_classes)
- def create_model(features):
- with C.layers.default_options(init=C.layers.glorot_uniform(),
- activation=C.sigmoid):
- h = features
- for _ in range(num_hidden_layers):
- h = C.layers.Dense(hidden_layer_dim)(h)
- last_layer = C.layers.Dense(num_output_classes, activation=None)
- return last_layer(h)
- def moving_average(a, w=10):
- if len(a) < w:
- return a[:]
- return [val if idx < w
- else sum(a[(idx - w):idx]) / w for idx, val in enumerate(a)]
- def print_training_progress(trainer, mb, frequency, verbose=1):
- training_loss = "NA"
- eval_error = "NA"
- if mb % frequency == 0:
- training_loss = trainer.previous_minibatch_loss_average
- eval_error = trainer.previous_minibatch_evaluation_average
- if verbose:
- print("Minibatch: {}, Train Loss: {}, Train Error: {}".format(
- mb, training_loss, eval_error))
- return mb, training_loss, eval_error
- input = C.input_variable(input_dim, name="input")
- label = C.input_variable(num_output_classes, name="label")
- z = fully_connected_classifier_net(
- input, num_output_classes,
- hidden_layer_dim, num_hidden_layers, C.sigmoid)
- z = create_model(input)
- loss = C.cross_entropy_with_softmax(z, label)
- eval_error = C.classification_error(z, label)
- learning_rate = 0.5
- lr_schedule = C.learning_parameter_schedule(learning_rate)
- learner = C.sgd(z.parameters, lr_schedule)
- trainer = C.Trainer(z, (loss, eval_error), [learner])
- minibatch_size = 25
- num_samples = 20000
- num_minibatches_to_train = num_samples / minibatch_size
- training_progress_output_freq = 20
- plotdata = {"batchsize": [], "loss": [], "error": []}
- for i in range(0, int(num_minibatches_to_train)):
- features, labels = generate_random_data_sample(
- minibatch_size, input_dim, num_output_classes)
- trainer.train_minibatch({"input": features, "label": labels})
- batchsize, loss, error = print_training_progress(
- trainer, i, training_progress_output_freq, verbose=0)
- if not (loss == "NA" or error == "NA"):
- plotdata["batchsize"].append(batchsize)
- plotdata["loss"].append(loss)
- plotdata["error"].append(error)
- z.save("mydnn.dnn")
- plotdata["avgloss"] = moving_average(plotdata["loss"])
- plotdata["avgerror"] = moving_average(plotdata["error"])
- plt.figure(1)
- plt.subplot(211)
- plt.plot(plotdata["batchsize"], plotdata["avgloss"], 'b--')
- plt.xlabel('Minibatch number')
- plt.ylabel('Loss')
- plt.title('Minibatch run vs. Training loss')
- plt.subplot(212)
- plt.plot(plotdata["batchsize"], plotdata["avgerror"], 'r--')
- plt.xlabel('Minibatch number')
- plt.ylabel('Label Prediction Error')
- plt.title('Minibatch run vs. Label Prediction Error')
- plt.show()
- test_minibatch_size = 25
- features, labels = generate_random_data_sample(
- test_minibatch_size, input_dim, num_output_classes)
- trainer.test_minibatch({input: features, label: labels})
Add Comment
Please, Sign In to add comment