Advertisement
Guest User

Untitled

a guest
Jun 29th, 2016
76
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 6.33 KB | None | 0 0
  1. __author__ = 'Shaf'
  2. import DB
  3. import numpy as np
  4. import matplotlib.pyplot as plt
  5. from mpl_toolkits.mplot3d import Axes3D
  6.  
  7. # X = (hours sleeping, hours studying), y = Score on test
  8. X = np.array((DB.INlist), dtype=float)
  9. y = np.array((DB.OUTlist), dtype=float)
  10.  
  11. div_val = 0.9*len(X)
  12.  
  13. def remove_duplicates(values):
  14.     output = []
  15.     seen = set()
  16.     for value in DB.futrestd:
  17.         if value not in seen:
  18.             output.append(value)
  19.             seen.add(value)
  20.     return output
  21.  
  22. values = DB.futrestd
  23. result = remove_duplicates(values)
  24. result = np.array((result), dtype=int)
  25.  
  26. train_set_input = X[:div_val]
  27. test_set_input = X[div_val:]
  28. train_set_output = y[:div_val]
  29. test_set_output = y[div_val:]
  30. test_set_IDs = result[div_val:]
  31.  
  32. found = []
  33.  
  34. with open('foo.txt', 'r') as f:
  35.     found = [line.strip() for line in f]
  36.  
  37. class Neural_Network(object):
  38.     def __init__(self):        
  39.         #Define Hyperparameters
  40.         self.inputLayerSize = 2
  41.         self.outputLayerSize = 1
  42.         self.hiddenLayerSize = 3
  43.        
  44.         #Weights (parameters)
  45.         self.W1 = np.array([[438.48462036,-0.156290411005,-207.851889587],[1029.60759116,0.0840455612294,-367.446844544]])
  46.         self.W2 = np.array([[95.9665139077],[-194.535164675],[-296.164925476]])
  47.  
  48.     def forward(self, X):
  49.         #Propogate inputs though network
  50.         self.z2 = np.dot(X, self.W1)
  51.         self.a2 = self.sigmoid(self.z2)
  52.         self.z3 = np.dot(self.a2, self.W2)
  53.         yHat = self.sigmoid(self.z3)
  54.         return yHat
  55.        
  56.     def sigmoid(self, z):
  57.         #Apply sigmoid activation function to scalar, vector, or matrix
  58.         return 1/(1+np.exp(-z))
  59.    
  60.     def sigmoidPrime(self,z):
  61.         #Gradient of sigmoid
  62.         return np.exp(-z)/((1+np.exp(-z))**2)
  63.    
  64.     def costFunction(self, X, y):
  65.         #Compute cost for given X,y, use weights already stored in class.
  66.         self.yHat = self.forward(X)
  67.         J = 0.5*sum((y-self.yHat)**2)
  68.         return J
  69.        
  70.     def costFunctionPrime(self, X, y):
  71.         #Compute derivative with respect to W and W2 for a given X and y:
  72.         self.yHat = self.forward(X)
  73.        
  74.         delta3 = np.multiply(-(y-self.yHat), self.sigmoidPrime(self.z3))
  75.         dJdW2 = np.dot(self.a2.T, delta3)
  76.        
  77.         delta2 = np.dot(delta3, self.W2.T)*self.sigmoidPrime(self.z2)
  78.         dJdW1 = np.dot(X.T, delta2)  
  79.        
  80.         return dJdW1, dJdW2
  81.    
  82.     #Helper Functions for interacting with other classes:
  83.     def getParams(self):
  84.         #Get W1 and W2 unrolled into vector:
  85.         params = np.concatenate((self.W1.ravel(), self.W2.ravel()))
  86.         return params
  87.    
  88.     def setParams(self, params):
  89.         #Set W1 and W2 using single paramater vector.
  90.         W1_start = 0
  91.         W1_end = self.hiddenLayerSize * self.inputLayerSize
  92.         self.W1 = np.reshape(params[W1_start:W1_end], (self.inputLayerSize , self.hiddenLayerSize))
  93.         W2_end = W1_end + self.hiddenLayerSize*self.outputLayerSize
  94.         self.W2 = np.reshape(params[W1_end:W2_end], (self.hiddenLayerSize, self.outputLayerSize))
  95.  
  96.  
  97.     def computeGradients(self, X, y):
  98.         dJdW1, dJdW2 = self.costFunctionPrime(X, y)
  99.         return np.concatenate((dJdW1.ravel(), dJdW2.ravel()))
  100.  
  101. def computeNumericalGradient(N, X, y):
  102.         paramsInitial = N.getParams()
  103.         numgrad = np.zeros(paramsInitial.shape)
  104.         perturb = np.zeros(paramsInitial.shape)
  105.         e = 1e-4
  106.  
  107.         for p in range(len(paramsInitial)):
  108.             #Set perturbation vector
  109.             perturb[p] = e
  110.             N.setParams(paramsInitial + perturb)
  111.             loss2 = N.costFunction(X, y)
  112.            
  113.             N.setParams(paramsInitial - perturb)
  114.             loss1 = N.costFunction(X, y)
  115.  
  116.             #Compute Numerical Gradient
  117.             numgrad[p] = (loss2 - loss1) / (2*e)
  118.  
  119.             #Return the value we changed to zero:
  120.             perturb[p] = 0
  121.            
  122.         #Return Params to original value:
  123.         N.setParams(paramsInitial)
  124.  
  125.         return numgrad
  126.        
  127. ## ----------------------- Part 6 ---------------------------- ##
  128. from scipy import optimize
  129.  
  130.  
  131. class trainer(object):
  132.     def __init__(self, N):
  133.         #Make Local reference to network:
  134.         self.N = N
  135.  
  136.     def callbackF(self, params):
  137.         self.N.setParams(params)
  138.         self.J.append(self.N.costFunction(self.X, self.y))  
  139.        
  140.     def costFunctionWrapper(self, params, X, y):
  141.         self.N.setParams(params)
  142.         cost = self.N.costFunction(X, y)
  143.         grad = self.N.computeGradients(X,y)
  144.         return cost, grad
  145.        
  146.     def train(self, X, y):
  147.         #Make an internal variable for the callback function:
  148.         self.X = X
  149.         self.y = y
  150.  
  151.         #Make empty list to store costs:
  152.         self.J = []
  153.        
  154.         params0 = self.N.getParams()
  155.  
  156.         options = {'maxiter': 200, 'disp' : True}
  157.         _res = optimize.minimize(self.costFunctionWrapper, params0, jac=True, method='BFGS', \
  158.                                  args=(X, y), options=options, callback=self.callbackF)
  159.  
  160.         self.N.setParams(_res.x)
  161.         self.optimizationResults = _res
  162.  
  163. predicted_output = []
  164.  
  165. NN = Neural_Network()
  166. T = trainer(NN)
  167. T.train(train_set_input,train_set_output)
  168.  
  169. for test in test_set_input:
  170.     predicted_output.append(4*NN.forward(test))
  171.  
  172.  
  173. print predicted_output
  174. lost = NN.getParams()
  175.  
  176. fo = open("foo.txt", "w")
  177. for item in lost:
  178.   fo.write("%s\n" % item)
  179.  
  180. gpa = float(DB.INPUTGPA)
  181. hours = int(DB.INPUTATD)
  182.  
  183. #print "Your predicted GPA is: "
  184. #print 4*(NN.forward([gpa/4, hours/100]))
  185.  
  186. #Test network for various combinatins of CGPA/ATD
  187. ATD = np.linspace(0,100,100)
  188. CGPA = np.linspace(0,4,100)
  189. #Normalize data
  190. hoursSleepnorm = ATD/100
  191. hoursStudynorm = CGPA/4
  192.  
  193. #Create 2D version of input
  194. a, b = np.meshgrid(hoursSleepnorm, hoursStudynorm)
  195.  
  196. #Join into single matrix
  197. allInputs = np.zeros((a.size,2))
  198. allInputs[:,0] = a.ravel()
  199. allInputs[:,1] = b.ravel()
  200. allOutputs = NN.forward(allInputs)
  201. yy = np.dot(CGPA.reshape(100,1), np.ones((1,100)))
  202. xx = np.dot(ATD.reshape(100,1), np.ones((1,100))).T
  203. #print xx
  204. #print yy
  205. #print 4*allOutputs.reshape(100,100)
  206.  
  207. fig = plt.figure()
  208. ax = fig.gca(projection='3d')
  209. surf = ax.plot_surface(xx, yy, 4*allOutputs.reshape(100,100), cmap=plt.cm.jet)
  210. ax.set_xlabel('ATD')
  211. ax.set_ylabel('CGPA')
  212. ax.set_zlabel('GPA')
  213. plt.show()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement