Advertisement
Guest User

Untitled

a guest
Aug 22nd, 2017
87
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 3.57 KB | None | 0 0
  1. # Image Classifier using Linear Classification method with Softmax and CIFAR 10 dataset
  2.  
  3. import numpy as np
  4. import pickle
  5. import matplotlib.pyplot as plt
  6.  
  7. # get batches from file
  8. def get_batches(filename):
  9. with open(filename, 'rb') as file:
  10. data = pickle.load(file, encoding='bytes')
  11.  
  12. return data
  13.  
  14. # activation function
  15. def softmax(values):
  16. # prevent overflow
  17. values = values - np.max(values)
  18. return np.exp(values) / np.sum(np.exp(values))
  19.  
  20. # cross entropy loss? cost? function
  21. def cross_entropy_loss(target_output, estimated_output):
  22. estimated_output = np.clip(estimated_output, 0.00001, 0.99999)
  23. return -np.mean(target_output * np.log(estimated_output) +
  24. (1 - target_output) * np.log(1 - estimated_output))
  25.  
  26. # train batches
  27. def train():
  28. # make random wegith and bias
  29. W = np.random.rand(10, 3072)
  30. b = np.random.rand(10)
  31.  
  32. # file_name_format
  33. file_prefix = 'cifar-10-batches-py/data_batch_{0}'
  34.  
  35. for i in range(1, 6):
  36. # get batches
  37. batches = get_batches(file_prefix.format(i))
  38.  
  39. for k in range(10000):
  40. # get batch
  41. x = batches[b'data'][k]
  42. # get output with Weight and bias using softmax activation function
  43. output = softmax(np.matmul(W, x) + b)
  44.  
  45. # get answer
  46. answer = np.zeros(10)
  47. answer[batches[b'labels'][k]] = 1
  48.  
  49. # get loss with answer and output using cross entropy loss function
  50. loss = cross_entropy_loss(answer, output)
  51.  
  52. # update weight & bias
  53.  
  54. # prevent "divided by zero exception"
  55. output_dc = np.clip(output, 0.00001, 0.99999)
  56.  
  57. # dc/do : derivative of cross entropy loss function
  58. dc = (-answer / output_dc + (1 - answer) / (1 - output_dc)) / 10
  59. # do/dz : derivative of softmax function
  60. do = output_dc * (1 - output_dc)
  61. # dz/dw : derivative of z
  62. # z1 = w11 * x1 + w12 * x2 + ...
  63. # z1` = x1
  64. dz = np.copy(x)
  65.  
  66. # do, dx, dz for updating weight
  67. do_w = np.repeat(do, 3072).reshape(10, 3072)
  68. dc_w = np.repeat(dc, 3072).reshape(10, 3072)
  69. dz_w = np.tile(dz, 10).reshape(10, 3072)
  70.  
  71. # update weight (partial derivative of Cost function with respect to Weight)
  72. W -= dc_w * do_w * dz_w
  73. # update bias (partial derivative of Cost function with respect to Bias)
  74. b -= dc * do * 1
  75.  
  76. # return Weight and bias
  77. return W, b
  78.  
  79. # predict
  80. def predict(W, x, b):
  81. # get output using Neural Network
  82. output = softmax(np.matmul(W, x) + b)
  83. # argmax -> result
  84. return np.argmax(output)
  85.  
  86. if __name__ == "__main__":
  87. # get trained Weight and Bias
  88. W, b = train()
  89.  
  90. # predict
  91. result = []
  92.  
  93. # get test data
  94. batches = get_batches('cifar-10-batches-py/test_batch')
  95. for i in range(10000):
  96. # get single test image and label.
  97. x = batches[b'data'][i]
  98. y = batches[b'labels'][i]
  99.  
  100. # get output from neural network
  101. y_ = predict(W, x, b)
  102.  
  103. # add result to result list
  104. result.append(y_ == y)
  105.  
  106. # get accuracy
  107. print("Accuracy : %f"%np.mean(np.array(result, dtype='float32')))
  108.  
  109. # visualize weight and bias
  110. meta = get_batches('cifar-10-batches-py/batches.meta')
  111.  
  112. for i in range(10):
  113. a = plt.subplot(2, 5, i + 1)
  114. a.set_title(meta[b'label_names'][i])
  115. I = np.copy(W[i]).reshape([-1, 1024]) + b[i]
  116. plt.imshow((I[0] * (2**16) + I[1] * (2**8) + I[2]).reshape(32, 32))
  117.  
  118. plt.show()
  119.  
  120.  
  121. # result
  122. # Accuracy : 0.237800
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement