Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import numpy as np
- import random
- def MinMax(x,Min,Max,sMin,sMax):
- return ((x - Min)/(Max - Min) * (sMax - sMin)) + sMin
- class NeuralNetwork():
- def __init__(self,inputs,hidden,outputs,rate=0.1,hiddenLayer=1,func = "sigmoid"):
- self.weightsInputToHidden = np.random.rand(inputs,hidden)
- self.weightsHiddenTooutput = np.random.rand(hidden,outputs)
- self.hiddenLArr = []
- self.hiddenBArr = []
- self.hiddenLayer = hiddenLayer
- for i in range(hiddenLayer):
- if i == 0:
- tmp = np.random.rand(inputs,hidden)
- self.hiddenLArr.append(tmp)
- tmp = np.random.rand(1,hidden)
- self.hiddenBArr.append(tmp)
- else:
- tmp =np.random.rand(hidden,hidden)
- self.hiddenLArr.append(tmp)
- tmp = np.random.rand(1,hidden)
- self.hiddenBArr.append(tmp)
- if func == "relu":
- self.ActiveFunction = self.ReLU
- elif func == "tanh":
- self.ActiveFunction = self.TanH
- else:
- self.ActiveFunction = self.Sigmoid
- self.biosH = np.random.rand(1,hidden)
- self.biosH.shape = (1,hidden)
- self.biosO = np.random.rand(1,outputs) * np.sqrt(2/(inputs + outputs))
- self.biosO.shape = (1,outputs)
- self.rate =rate
- def SoftMax(self,output):
- l = []
- sumOfExp = 0
- for i in output:
- sumOfExp +=np.exp(i)
- for i in output:
- l.append(np.exp(i)/sumOfExp)
- return l
- def SetLearningRate(self,x):
- self.rate = x
- def TanH(self,x):
- try:
- a = 2
- b = 1 + np.exp(-2 * x)
- return (a / b)-1
- except Exception as identifier:
- return 0
- def ReLU(self,x):
- return x * (x > 0)
- def Sigmoid(self,x):
- return 1.0 /(1+np.exp(-x))
- def DSigmoid(self,x):
- return x *(1-x)
- def Train(self,inputs,targets,iter):
- for i in range(iter):
- index = random.randint(0,len(inputs)-1)
- input = np.array(inputs[index])
- input.shape = (1,len(input))
- hidden = np.dot(input,self.weightsInputToHidden)
- hidden = np.add(hidden,self.biosH)
- hidden = self.Sigmoid(hidden)
- output = np.dot(hidden,self.weightsHiddenTooutput)
- output = np.add(output,self.biosO)
- output = self.Sigmoid(output)
- error = targets[index] - output
- gradients = self.DSigmoid(output)
- cost =gradients * error * self.rate
- dCost = np.dot(np.transpose(hidden),cost)
- self.weightsHiddenTooutput+= dCost
- self.biosO+= cost
- #
- error = np.dot(error, np.transpose(self.weightsHiddenTooutput))
- gradients = self.DSigmoid(hidden)
- cost = gradients * error * self.rate
- dCost = np.dot(np.transpose(input),cost)
- self.weightsInputToHidden+= dCost
- self.biosH+= cost
- def Predict(self,inputs,func = "sigmoid"):
- input = np.array(inputs)
- input.shape = (1,len(input))
- if func == "sigmoid":
- hidden = np.dot(input,self.weightsInputToHidden)
- hidden = np.add(hidden,self.biosH)
- hidden = self.Sigmoid(hidden)
- output = np.dot(hidden,self.weightsHiddenTooutput)
- output = np.add(output,self.biosO)
- output = self.Sigmoid(output)
- elif func == "relu":
- hidden = np.dot(input,self.weightsInputToHidden)
- hidden = np.add(hidden,self.biosH)
- hidden = self.ReLU(hidden)
- output = np.dot(hidden,self.weightsHiddenTooutput)
- output = np.add(output,self.biosO)
- output = self.ReLU(output)
- elif func == "tanH":
- hidden = np.dot(input,self.weightsInputToHidden)
- hidden = np.add(hidden,self.biosH)
- hidden = self.TanH(hidden)
- output = np.dot(hidden,self.weightsHiddenTooutput)
- output = np.add(output,self.biosO)
- output = self.TanH(output)
- return self.SoftMax(output.tolist()[0])
- def Predict2(self,inputs,softMax=True):
- input = np.array(inputs)
- input.shape = (1,len(input))
- hidden = np.dot(input,self.hiddenLArr[0])
- hidden = np.add(hidden,self.hiddenBArr[0])
- hidden = self.ActiveFunction(hidden)
- for i in range(self.hiddenLayer-1):
- hidden = np.dot(hidden,self.hiddenLArr[i+1])
- hidden = np.add(hidden,self.hiddenBArr[i+1])
- hidden = self.ActiveFunction(hidden)
- output = np.dot(hidden,self.weightsHiddenTooutput)
- output = np.add(output,self.biosO)
- output = self.ActiveFunction(output)
- if softMax:
- return self.SoftMax(output.tolist()[0])
- else:
- return output.tolist()[0]
- def mutation(self,n,mutaion = 0.1):
- for i in range(n.hiddenLayer):
- for j in range(len(n.hiddenLArr[i])):
- for k in range(len(n.hiddenLArr[i][j])):
- chance = np.random.uniform(0,1)
- if chance < mutaion:
- r = np.random.uniform(-0.5,0.5)
- self.hiddenLArr[i][j][k] = n.hiddenLArr[i][j][k] + r
- else:
- self.hiddenLArr[i][j][k] = n.hiddenLArr[i][j][k]
- for i in range(n.hiddenLayer):
- for j in range(len(n.hiddenBArr[i])):
- for k in range(len(n.hiddenBArr[i][j])):
- chance = np.random.uniform(0,1)
- if chance < mutaion:
- r = np.random.uniform(-0.5,0.5)
- self.hiddenBArr[i][j][k] = n.hiddenBArr[i][j][k] + r
- else:
- self.hiddenBArr[i][j][k] = n.hiddenBArr[i][j][k]
- for i in range(len(n.weightsHiddenTooutput)):
- for j in range(len(n.weightsHiddenTooutput[i])):
- chance = np.random.uniform(0,1)
- if chance < mutaion:
- r = np.random.uniform(-0.5,0.5)
- self.weightsHiddenTooutput[i][j] = n.weightsHiddenTooutput[i][j] + r
- else:
- self.weightsHiddenTooutput[i][j] = n.weightsHiddenTooutput[i][j]
- for i in range(len(n.biosO)):
- for j in range(len(n.biosO[i])):
- chance = np.random.uniform(0,1)
- if chance < mutaion:
- r = np.random.uniform(-0.5,0.5)
- self.biosO[i][j] = n.biosO[i][j] + r
- else:
- self.biosO[i][j] = n.biosO[i][j]
- self.rate =n.rate
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement