Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import numpy as np
- from scipy.stats import truncnorm
- @np.vectorize
- def ReLU(diff, x):
- if diff:
- out = 0 if x <= 0 else 1
- else:
- out = np.maximum(0.0, x)
- return out
- @np.vectorize
- def sigmoid(diff, x):
- func = lambda a : 1/(1 + np.e ** -a)
- inv_func = lambda a : np.log( a / (1 - a) )
- if diff:
- y = inv_func(x)
- out = func(y) * (1 - func(y))
- else:
- out = func(x)
- return out
- def truncated_normal(mean, sd, low, upp):
- return truncnorm(
- (low - mean) / sd, (upp - mean) / sd, loc=mean, scale=sd)
- class NeuralNet():
- def __init__(self, nodes, learning_rate, act_func, bias):
- self.nodes = nodes
- self.learning_rate = learning_rate
- self.act_func = act_func
- self.bias = bias
- self.gen_rand_weight_matricies()
- def gen_rand_weight_matricies(self):
- bias_node = 1 if self.bias else 0
- self.weight_dict = {}
- for i in range(0, (self.nodes.size - 1)):
- self.weight_dict[i] = self.rnd_weights(self.nodes[i], self.nodes[i + 1], bias_node)
- def rnd_weights(self, nd_1, nd_2, bias_node):
- ran = 1/np.sqrt(nd_1 + bias_node)
- dist = truncated_normal(mean=0, sd=1, low=-ran, upp = ran )
- return dist.rvs((nd_2, nd_1 + bias_node))
- def convert_vec(self, vec):
- return np.array(vec, ndmin=2).T
- def train(self, input_vec, target_vec):
- if self.bias:
- bias_node = 1
- input_vec = np.concatenate( (input_vec, [1]) )
- else:
- bias_node = 0
- input_vec, target_vec = self.convert_vec(input_vec), self.convert_vec(target_vec)
- self.train_dict = {}
- val = self.nodes.size - 1 #3
- self.train_dict[val]= input_vec
- for key in self.weight_dict:
- i = val - (key + 1)
- self.train_dict[i] = self.act_func(False, np.dot(self.weight_dict[key], self.train_dict[i+1]))
- if self.bias:
- self.train_dict[i] = np.concatenate( (self.train_dict[i], [1] ) )
- output_errors = target_vec - self.train_dict[0]
- val = self.nodes.size - 1
- for key in self.weight_dict:
- i = val - (key + 1)
- """
- print("layer {0}".format(i))
- print("output errors: {0}".format(output_errors))
- print("input vectors {0}".format(self.train_dict[key]))
- a = input("")
- """
- tmp = output_errors * self.act_func(True, self.train_dict[key])
- if (self.bias and key > 0):
- x = self.learning_rate * np.dot(tmp, self.train_dict[key+1].T)[:-1,:]
- else:
- x = self.learning_rate * np.dot(tmp, self.train_dict[key+1].T)
- self.weight_dict[i] += x
- #print(self.weight_dict[i])
- #print(output_errors)
- #a = input("")
- output_errors = np.dot(self.weight_dict[i].T, output_errors)
- def run(self, input_vec):
- if self.bias:
- input_vec = np.concatenate( (input_vec, [1] ))
- temp_vec = self.convert_vec(input_vec)
- #print(input_vec)
- for key in self.weight_dict:
- #print(self.weight_dict[key])
- temp_vec = self.act_func(False, np.dot(self.weight_dict[key], temp_vec))
- if self.bias:
- temp_vec = np.concatenate( (temp_vec, [1]) )
- #print(temp_vec)
- return temp_vec
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement