Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import numpy as np
- import random
- import copy
- import matplotlib.pyplot as plt
- class Linear:
- def __init__(self,m,n):
- self.W,self.b = np.random.randn(m,n)/m,np.random.randn(1,n)/m
- self.dW,self.db = None,None
- def forward(self,x):
- self.x = x
- out = np.dot(x,self.W)+self.b
- return out
- def backward(self,dout):
- dx = np.dot(dout,self.W.T)
- self.dW = np.dot(self.x.T,dout)
- self.db = np.sum(dout,axis = 0)
- return dx
- class ReLU:
- def __init__(self):
- pass
- def forward(self,x):
- self.mask = (x <= 0)
- out = x
- out[out <= 0] = 0
- return out
- def backward(self,dout):
- dx = dout
- dx[self.mask] = 0
- return dx
- class Sigmoid:
- def __init__(self):
- pass
- def forward(self,x):
- out = 1/(1+np.exp(-x))
- self.o = out
- return out
- def backward(self,dout):
- dx = dout*self.o*(1-self.o)
- return dx
- class Tanh:
- def __init__(self):
- pass
- def forward(self,x):
- out = (np.exp(x)-np.exp(-x))/(np.exp(x)+np.exp(-x))
- self.o = out
- return out
- def backward(self,dout):
- dx = dout*(1-self.o**2)
- return dx
- class Loss:
- def __init__(self):
- pass
- def forward(self,y,ybar):
- self.ybar = ybar
- return np.sum((y-ybar)**2)
- def backward(self,dout):
- dy = -(2*(y-self.ybar))
- return dy
- class TwoLayer:
- def __init__(self,m,n,o):
- self.linear1 = Linear(m,n)
- self.relu = ReLU()
- self.linear2 = Linear(n,o)
- self.sigmoid = Sigmoid()
- self.loss = Loss()
- self.last_dW1,self.last_db1 = 0,0
- self.last_dW2,self.last_db2 = 0,0
- def forward(self,x):
- x = self.linear1.forward(x)
- x = self.relu.forward(x)
- x = self.linear2.forward(x)
- self.ybar = self.sigmoid.forward(x)
- return self.ybar
- def backward(self,y):
- self.L = self.loss.forward(y,self.ybar)
- g = self.loss.backward(1)
- g = self.sigmoid.backward(g)
- g = self.linear2.backward(g)
- g = self.relu.backward(g)
- g = self.linear1.backward(g)
- def update(self,eta,alpha):
- self.linear1.W = self.linear1.W - eta*self.linear1.dW + alpha*self.last_dW1
- self.linear1.b = self.linear1.b - eta*self.linear1.db + alpha*self.last_db1
- self.last_dW1 = eta*self.linear1.dW
- self.last_db1 = eta*self.linear1.db
- self.linear2.W = self.linear2.W - eta*self.linear2.dW + alpha*self.last_dW2
- self.linear2.b = self.linear2.b - eta*self.linear2.db + alpha*self.last_db2
- self.last_dW2 = eta*self.linear2.dW
- self.last_db2 = eta*self.linear2.db
- class FiveLayer:
- def __init__(self,m,n,o,p,q,r):
- self.linear1 = Linear(m,n)
- self.relu = ReLU()
- self.linear2 = Linear(n,o)
- self.linear3 = Linear(o,p)
- self.linear4 = Linear(p,q)
- self.linear5 = Linear(q,r)
- self.sigmoid = Sigmoid()
- self.loss = Loss()
- self.last_dW1,self.last_db1 = 0,0
- self.last_dW2,self.last_db2 = 0,0
- self.last_dW3,self.last_db3 = 0,0
- self.last_dW4,self.last_db4 = 0,0
- self.last_dW5,self.last_db5 = 0,0
- def forward(self,x):
- x = self.linear1.forward(x)
- x = self.relu.forward(x)
- x = self.linear2.forward(x)
- x = self.sigmoid.forward(x)
- x = self.linear3.forward(x)
- x = self.relu.forward(x)
- x = self.linear4.forward(x)
- x = self.relu.forward(x)
- x = self.linear5.forward(x)
- self.ybar = self.sigmoid.forward(x)
- return self.ybar
- def backward(self,y):
- self.L = self.loss.forward(y,self.ybar)
- g = self.loss.backward(1)
- g = self.sigmoid.backward(g)
- g = self.linear5.backward(g)
- g = self.relu.backward(g)
- g = self.linear4.backward(g)
- g = self.relu.backward(g)
- g = self.linear3.backward(g)
- g = self.sigmoid.backward(g)
- g = self.linear2.backward(g)
- g = self.relu.backward(g)
- g = self.linear1.backward(g)
- def update(self,eta,alpha):
- self.linear1.W = self.linear1.W - eta*self.linear1.dW + alpha*self.last_dW1
- self.linear1.b = self.linear1.b - eta*self.linear1.db + alpha*self.last_db1
- self.last_dW1 = eta*self.linear1.dW
- self.last_db1 = eta*self.linear1.db
- self.linear2.W = self.linear2.W - eta*self.linear2.dW + alpha*self.last_dW2
- self.linear2.b = self.linear2.b - eta*self.linear2.db + alpha*self.last_db2
- self.last_dW2 = eta*self.linear2.dW
- self.last_db2 = eta*self.linear2.db
- self.linear3.W = self.linear3.W - eta*self.linear3.dW + alpha*self.last_dW3
- self.linear3.b = self.linear3.b - eta*self.linear3.db + alpha*self.last_db3
- self.last_dW3 = eta*self.linear3.dW
- self.last_db3 = eta*self.linear3.db
- self.linear4.W = self.linear4.W - eta*self.linear4.dW + alpha*self.last_dW4
- self.linear4.b = self.linear4.b - eta*self.linear4.db + alpha*self.last_db4
- self.last_dW4 = eta*self.linear4.dW
- self.last_db4 = eta*self.linear4.db
- self.linear5.W = self.linear5.W - eta*self.linear5.dW + alpha*self.last_dW5
- self.linear5.b = self.linear5.b - eta*self.linear5.db + alpha*self.last_db5
- self.last_dW5 = eta*self.linear5.dW
- self.last_db5 = eta*self.linear5.db
- class NLayer:
- def __init__(self,Neuronin,math,numofNeurons):
- self.function = []
- self.linear = []
- self.neuron = []
- self.last_dW = []
- self.last_db = []
- self.neuron.append(Neuronin)
- self.neuron += numofNeurons
- self.loss = Loss()
- self.lengh = len(math)
- for i in range(0,self.lengh):
- self.linear.append(Linear(self.neuron[i],self.neuron[i+1]))
- self.last_dW.append(0)
- self.last_db.append(0)
- if math[i] == 'relu':
- self.function.append(ReLU())
- if math[i] == 'sigmoid':
- self.function.append(Sigmoid())
- if math[i] == 'tanh':
- self.function.append(Tanh())
- def forward(self,x):
- for i in range(0,self.lengh):
- x = self.linear[i].forward(x)
- x = self.function[i].forward(x)
- self.ybar = x
- return self.ybar
- def backward(self,y):
- self.L = self.loss.forward(y,self.ybar)
- g = self.loss.backward(1)
- for i in range(self.lengh-1,-1,-1):
- g = self.function[i].backward(g)
- g = self.linear[i].backward(g)
- def update(self,eta,alpha):
- for i in range(0,self.lengh):
- self.linear[i].W = self.linear[i].W - eta*self.linear[i].dW + alpha*self.last_dW[i]
- self.linear[i].b = self.linear[i].b - eta*self.linear[i].db + alpha*self.last_db[i]
- self.last_dW[i] = eta*self.linear[i].dW
- self.last_db[i] = eta*self.linear[i].db
- def graph(lossm,epoch):
- plt.style.use('seaborn-talk')
- plt.figure()
- plt.plot(epoch, lossm, label = "Training_Error")
- plt.xlabel("Epochs")
- plt.ylabel("Training Error")
- plt.legend()
- plt.show()
- x1 = []
- y1 = []
- lossm = []
- epoch = []
- for i in range(0,256):
- count = 0
- x1.append([])
- y1.append([])
- num = '{0:b}'.format(i)
- num_c = 0
- for j in range(0,8):
- if (8-len(num)) > j:
- x1[i].append(0)
- else:
- x1[i].append(int(num[num_c]))
- num_c = num_c+1
- if x1[i][j] == 1:
- count = count + 1
- if count%2 == 0:
- y1[i].append(0)
- else:
- y1[i].append(1)
- X = np.array(x1)
- y = np.array(y1)
- model = NLayer(8,['relu','tanh','sigmoid'],[64,256,1])
- max_epochs,chk_epochs = 10000,1000
- last_dW,last_db = 0,0
- eta,alpha = 0.03,0.6
- accuracy = 0
- for e in range(max_epochs):
- model.forward(X)
- model.backward(y)
- model.update(eta,alpha)
- if(e+1)%chk_epochs == 0:
- print('Epcoh %3d: loss =%6f, accuracy = %3f'%(e+1,model.L,accuracy))
- #print("test data is '['1,1,1,1,0,1,0,0']' ")
- model.forward(X)
- print(model.ybar.T)
- graph(lossm,epoch)
Add Comment
Please, Sign In to add comment