Advertisement
Guest User

Untitled

a guest
Apr 6th, 2020
160
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 2.43 KB | None | 0 0
  1. import numpy as np
  2. import random
  3. import math
  4. from typing import List, Any
  5.  
  6. # INITAILIZATION
  7. MIN_NUMBER = 1e-6
  8.  
  9. m = 1000
  10. n = 100
  11.  
  12. alpha = 0.0001 # learning rate
  13. loss = 0
  14.  
  15. W = np.array([0, 0])
  16. b = 0
  17.  
  18. # data generator
  19. def generate_random_data(size):
  20.     X = []
  21.     Y = []
  22.     for i in range(size):
  23.         x1 = random.randint(-2, 2)
  24.         x2 = random.randint(-2, 2)
  25.         if x1 + x2 > 0:
  26.             Y.append(1)
  27.         else:
  28.             Y.append(0)
  29.         X.append(np.array([x1, x2]))
  30.     return X, Y
  31.  
  32. # Sigmoid function
  33. def sigmoid(val):
  34.     return 1/(1+math.exp(-val))
  35.  
  36. # Loss function for Logistic Regression
  37. def L(a, y):
  38.     return -(y*math.log(a) + (1-y)*math.log(1-a))
  39.  
  40. # training function
  41. def train_vectorized(X, Y):
  42.     global W, b
  43.     batch_dW = np.array([0, 0])
  44.     batch_db = 0
  45.     for Xi, Yi in zip(X, Y):
  46.         z = np.dot(W, Xi) + b
  47.         a = sigmoid(z)
  48.         da = -Yi/a + (1-Yi)/(1-a)
  49.         dz = da * a * (1-a)
  50.         dW = Xi*dz
  51.         db = dz
  52.         batch_dW = batch_dW + dW/len(X)
  53.         batch_db += db/len(X)
  54.     W = W - alpha*batch_dW
  55.     b -= alpha*batch_db
  56.  
  57. def forward(Xi):
  58.     global W, b
  59.     z = np.dot(W, Xi) + b
  60.     a = sigmoid(z)
  61.     MIN_VAL = 1e-10
  62.     a = max(a, MIN_VAL)
  63.     a = min(a, 1 - MIN_VAL)
  64.     return a
  65.  
  66. def loss_with_vectorization(X, Y):
  67.     batch_loss = 0
  68.     for i in range(len(X)):
  69.         pred_y = forward(X[i])
  70.         batch_loss -= Y[i] * math.log(pred_y) + (1 - Y[i]) * math.log(1 - pred_y)
  71.     batch_loss /= len(X)
  72.     return batch_loss
  73.  
  74. def accuracy_with_vectorization(X, Y):
  75.     num_correct = 0
  76.     for i in range(len(X)):
  77.         z = np.dot(W, X[i]) + b
  78.         a = sigmoid(z)
  79.         if Y[i] == round(forward(X[i])):
  80.             num_correct += 1
  81.     return num_correct/len(X)
  82.  
  83. def print_vectorized_w_b():
  84.     print('w1: {}, w2: {}, b: {}'.format(W[0], W[1], b))
  85.  
  86. if __name__ == '__main__':
  87.     train_X, train_Y = generate_random_data(m)
  88.     test_X, test_Y = generate_random_data(n)
  89.     for i in range(m):
  90.         train_vectorized(train_X, train_Y)
  91.         # print("Iteration: "+ i.__str__())
  92.     print('w1: {}, w2: {}, b: {}'.format(W[0], W[1], b))
  93.     print('train_vectorized loss: {}, accuracy: {}'.format(loss_with_vectorization(train_X, train_Y), accuracy_with_vectorization(train_X, train_Y)))
  94.     print('test loss: {}, accuracy: {}'.format(loss_with_vectorization(test_X, test_Y), accuracy_with_vectorization(test_X, test_Y)))
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement