Guest User

Untitled

a guest
Feb 25th, 2018
93
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 4.24 KB | None | 0 0
  1. import matplotlib.pyplot as plt
  2. import numpy as np
  3. import sys
  4. import os
  5. import cntk as C
  6. import cntk.tests.test_utils
  7.  
  8. cntk.tests.test_utils.set_device_from_pytest_env()
  9. C.cntk_py.set_fixed_random_seed(1)
  10.  
  11. np.random.seed(0)
  12. input_dim = 2
  13. num_output_classes = 2
  14. num_hidden_layers = 2
  15. hidden_layer_dim = 50
  16.  
  17.  
  18. def generate_random_data_sample(sample_size, feature_dim, num_classes):
  19. Y = np.random.randint(size=(sample_size, 1), low=0, high=num_classes)
  20. X = (np.random.randn(sample_size, feature_dim) + 3) * (Y + 1)
  21. X = X.astype(np.float32)
  22. class_ind = [Y == class_number for class_number in range(num_classes)]
  23. Y = np.asarray(np.hstack(class_ind), dtype=np.float32)
  24. return X, Y
  25.  
  26.  
  27. def linear_layer(input_var, output_dim):
  28. input_dim = input_var.shape[0]
  29. weight = C.parameter(shape=(input_dim, output_dim))
  30. bias = C.parameter(shape=(output_dim))
  31.  
  32. return C.times(input_var, weight) + bias
  33.  
  34.  
  35. def dense_layer(input_var, output_dim, nonlinearity):
  36. l = linear_layer(input_var, output_dim)
  37. return nonlinearity(l)
  38.  
  39.  
  40. def fully_connected_classifier_net(
  41. input_var, num_output_classes, hidden_layer_dim,
  42. num_hidden_layers, nonlinearity):
  43. h = dense_layer(input_var, hidden_layer_dim, nonlinearity)
  44. for i in range(1, num_hidden_layers):
  45. h = dense_layer(h, hidden_layer_dim, nonlinearity)
  46. return linear_layer(h, num_output_classes)
  47.  
  48.  
  49. def create_model(features):
  50. with C.layers.default_options(init=C.layers.glorot_uniform(),
  51. activation=C.sigmoid):
  52. h = features
  53. for _ in range(num_hidden_layers):
  54. h = C.layers.Dense(hidden_layer_dim)(h)
  55. last_layer = C.layers.Dense(num_output_classes, activation=None)
  56. return last_layer(h)
  57.  
  58.  
  59. def moving_average(a, w=10):
  60. if len(a) < w:
  61. return a[:]
  62. return [val if idx < w
  63. else sum(a[(idx - w):idx]) / w for idx, val in enumerate(a)]
  64.  
  65.  
  66. def print_training_progress(trainer, mb, frequency, verbose=1):
  67. training_loss = "NA"
  68. eval_error = "NA"
  69.  
  70. if mb % frequency == 0:
  71. training_loss = trainer.previous_minibatch_loss_average
  72. eval_error = trainer.previous_minibatch_evaluation_average
  73. if verbose:
  74. print("Minibatch: {}, Train Loss: {}, Train Error: {}".format(
  75. mb, training_loss, eval_error))
  76.  
  77. return mb, training_loss, eval_error
  78.  
  79.  
  80. input = C.input_variable(input_dim, name="input")
  81. label = C.input_variable(num_output_classes, name="label")
  82.  
  83. z = fully_connected_classifier_net(
  84. input, num_output_classes,
  85. hidden_layer_dim, num_hidden_layers, C.sigmoid)
  86.  
  87.  
  88. z = create_model(input)
  89.  
  90. loss = C.cross_entropy_with_softmax(z, label)
  91. eval_error = C.classification_error(z, label)
  92.  
  93. learning_rate = 0.5
  94. lr_schedule = C.learning_parameter_schedule(learning_rate)
  95. learner = C.sgd(z.parameters, lr_schedule)
  96. trainer = C.Trainer(z, (loss, eval_error), [learner])
  97.  
  98.  
  99. minibatch_size = 25
  100. num_samples = 20000
  101. num_minibatches_to_train = num_samples / minibatch_size
  102.  
  103. training_progress_output_freq = 20
  104.  
  105. plotdata = {"batchsize": [], "loss": [], "error": []}
  106.  
  107. for i in range(0, int(num_minibatches_to_train)):
  108. features, labels = generate_random_data_sample(
  109. minibatch_size, input_dim, num_output_classes)
  110.  
  111. trainer.train_minibatch({"input": features, "label": labels})
  112. batchsize, loss, error = print_training_progress(
  113. trainer, i, training_progress_output_freq, verbose=0)
  114.  
  115. if not (loss == "NA" or error == "NA"):
  116. plotdata["batchsize"].append(batchsize)
  117. plotdata["loss"].append(loss)
  118. plotdata["error"].append(error)
  119.  
  120. z.save("mydnn.dnn")
  121.  
  122.  
  123. plotdata["avgloss"] = moving_average(plotdata["loss"])
  124. plotdata["avgerror"] = moving_average(plotdata["error"])
  125.  
  126.  
  127. plt.figure(1)
  128. plt.subplot(211)
  129. plt.plot(plotdata["batchsize"], plotdata["avgloss"], 'b--')
  130. plt.xlabel('Minibatch number')
  131. plt.ylabel('Loss')
  132. plt.title('Minibatch run vs. Training loss')
  133.  
  134. plt.subplot(212)
  135. plt.plot(plotdata["batchsize"], plotdata["avgerror"], 'r--')
  136. plt.xlabel('Minibatch number')
  137. plt.ylabel('Label Prediction Error')
  138. plt.title('Minibatch run vs. Label Prediction Error')
  139. plt.show()
  140.  
  141. test_minibatch_size = 25
  142. features, labels = generate_random_data_sample(
  143. test_minibatch_size, input_dim, num_output_classes)
  144.  
  145. trainer.test_minibatch({input: features, label: labels})
Add Comment
Please, Sign In to add comment