Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- #This function takes a value from X and two thetas that determine what side
- #of the sigmoid curve x falls on, this returns either 1 or 0
- def sigmoid( x, theta0, theta1):
- output = 1/ (1 + np.exp( -( theta0 + theta1 * x)))
- return output
- def round( num):
- if num > .5:
- return 1
- else:
- return 0
- # this is the sum of the sigmoid function for all of X
- def sumofSigmoid( X, Y ,theta0, theta1):
- sum = 0
- for i in range( 0, len(X)):
- sum += ((sigmoid( X[i], theta0, theta1) - Y[i]) * X[i])
- return np.squeeze(sum)
- #minimises theta0 and theta1 using one step of gradient descent method, sounds like decending grades :/
- def gradient( X, Y, theta0, theta1, learning_rate):
- temp0 = theta0 - (learning_rate * sumofSigmoid( X, Y, theta0, theta1))
- temp1 = theta0 - (learning_rate * sumofSigmoid( X, Y, theta0, theta1))
- return temp0, temp1
- #this function updates the gradient for n amount of iterations
- def gradeDescent( X, Y, iterations, learning_rate):
- tempa = -4
- tempb = 1
- #normalize X
- X = (X - np.mean(X, axis=0)) / np.std(X, axis=0)
- for i in range( 1, iterations):
- tempa, tempb = gradient( X, Y, tempa, tempb, learning_rate)
- print cost(X, Y, tempa, tempb)
- return tempa, tempb
- #returns the total cost of X and Y based on theta
- def cost( X, Y, theta0, theta1):
- step1 = 0
- step2 = 0
- #for each X and Y in the dataset find cost for 1 and 0
- for i in range( 0, len(X)):
- temp = sigmoid( X[i], theta0, theta1)
- step1 += Y[i] * np.log(temp)
- step2 += (1-Y[i]) * np.log(1 - temp)
- #completes the algorithm for finging the cost and returns
- total = -step1 - step2
- return np.mean(total)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement