Advertisement
Guest User

Untitled

a guest
Apr 23rd, 2017
57
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 3.86 KB | None | 0 0
  1. import numpy as np, scipy as sp, sklearn as sl
  2. from scipy import special as ss
  3. from sklearn.base import ClassifierMixin, BaseEstimator
  4. from sklearn.datasets import make_classification
  5. import theano.tensor as T
  6.  
  7. def lossf(w, X, y, l1, l2):
  8. w.resize((w.shape[0],1))
  9. y.resize((y.shape[0],1))
  10.  
  11. lossf1 = np.sum(ss.log1p(1 + ss.expm1(np.multiply(-y, np.dot(X, w)))))
  12. lossf2 = l2 * (np.dot(np.transpose(w), w))
  13. lossf3 = l1 * sum(abs(w))
  14. lossf = np.float(lossf1 + lossf2 + lossf3)
  15. return lossf
  16.  
  17. def gradf(w, X, y, l1, l2):
  18. w.resize((w.shape[0],1))
  19. y.resize((y.shape[0],1))
  20.  
  21. gradw1 = l2 * 2 * w
  22. gradw2 = l1 * np.sign(w)
  23. gradw3 = np.multiply(-y,(2 + ss.expm1(np.multiply(-y, np.dot(X, w)))))
  24. gradw3 = gradw3 / (2 + (ss.expm1((np.multiply(-y, np.dot(X, w))))))
  25. gradw3 = np.sum(np.multiply(gradw3, X), axis=0)
  26. gradw3.resize(gradw3.shape[0],1)
  27. gradw = gradw1 + gradw2 + gradw3
  28. gradw.resize(gradw.shape[0],)
  29. return np.transpose(gradw)
  30.  
  31. class LR(ClassifierMixin, BaseEstimator):
  32. def __init__(self, lr=0.0001, l1=0.1, l2=0.1, num_iter=100, verbose=0):
  33. self.l1 = l1
  34. self.l2 = l2
  35. self.w = None
  36. self.lr = lr
  37. self.verbose = verbose
  38. self.num_iter = num_iter
  39.  
  40. def fit(self, X, y):
  41. n, d = X.shape
  42. self.w = np.zeros(shape=(d,))
  43. for i in range(self.num_iter):
  44. g = gradf(self.w, X, y, self.l1, self.l2)
  45. g.resize((g.shape[0],1))
  46. self.w = self.w - g
  47. print "Loss: ", lossf(self.w, X, y, self.l1, self.l2)
  48. return self
  49.  
  50. def predict_proba(self, X):
  51. probs = 1/(2 + ss.expm1(np.dot(-X, self.w)))
  52. return probs
  53.  
  54. def predict(self, X):
  55. probs = self.predict_proba(X)
  56. probs = np.sign(2 * probs - 1)
  57. probs.resize((probs.shape[0],))
  58. return probs
  59.  
  60. X, y = make_classification(n_features=100, n_samples=100)
  61. y = 2 * (y - 0.5)
  62. clf = LR(lr=0.000001, l1=0.1, l2=0.1, num_iter=10, verbose=0)
  63. clf = clf.fit(X, y)
  64. yp = clf.predict(X)
  65. yp.resize((100,1))
  66. accuracy = int(sum(y == yp))/len(y)
  67.  
  68. gradw3 = get_gradw3(w,X,y)
  69.  
  70. w,X,y = T.matrices("wXy")
  71. logloss = T.sum(T.log1p(1 + T.expm1(-y* T.dot(X, w))))
  72. get_gradw3 = theano.function([w,X,y],T.grad(logloss,w).reshape(w.shape))
  73.  
  74. def lossf(w, X, y, l1, l2):
  75. w.resize((w.shape[0],1))
  76. y.resize((y.shape[0],1))
  77.  
  78. lossf1 = np.sum(ss.log1p(1 + np.nan_to_num(ss.expm1(-y * np.dot(X, w)))))
  79. lossf2 = l2 * (np.dot(np.transpose(w), w))
  80. lossf3 = l1 * sum(abs(w))
  81. lossf = np.float(lossf1 + lossf2 + lossf3)
  82. return lossf
  83.  
  84. def gradf(w, X, y, l1, l2):
  85. w.resize((w.shape[0],1))
  86. y.resize((y.shape[0],1))
  87.  
  88. gradw1 = l2 * 2 * w
  89. gradw2 = l1 * np.sign(w)
  90. gradw3 = -y * (1 + np.nan_to_num(ss.expm1(-y * np.dot(X, w))))
  91. gradw3 = gradw3 / (2 + np.nan_to_num(ss.expm1(-y * np.dot(X, w))))
  92. gradw3 = np.sum(gradw3 * X, axis=0)
  93. gradw3.resize(gradw3.shape[0],1)
  94. gradw = gradw1 + gradw2 + gradw3
  95. gradw.resize(gradw.shape[0],)
  96. return np.transpose(gradw)
  97. class LR(ClassifierMixin, BaseEstimator):
  98. def __init__(self, lr=0.000001, l1=0.1, l2=0.1, num_iter=100, verbose=0):
  99. self.l1 = l1
  100. self.l2 = l2
  101. self.w = None
  102. self.lr = lr
  103. self.verbose = verbose
  104. self.num_iter = num_iter
  105.  
  106. def fit(self, X, y):
  107. n, d = X.shape
  108. self.w = np.zeros(shape=(d,))
  109. for i in range(self.num_iter):
  110. print "n", "Iteration ", i
  111. g = gradf(self.w, X, y, self.l1, self.l2)
  112. g.resize((g.shape[0],1))
  113. self.w = self.w - g
  114. print "Loss: ", lossf(self.w, X, y, self.l1, self.l2)
  115. return self
  116.  
  117. def predict_proba(self, X):
  118. probs = 1/(2 + ss.expm1(np.dot(-X, self.w)))
  119. return probs
  120.  
  121. def predict(self, X):
  122. probs = self.predict_proba(X)
  123. probs = np.sign(2 * probs - 1)
  124. probs.resize((probs.shape[0],))
  125. return probs
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement