Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- #!/usr/bin/env python3
- # -*- coding: utf-8 -*-
- """
- Created on Sun Oct 21 18:15:05 2018
- @author: dsu
- """
- import numpy as np
- import matplotlib.pyplot as plt
- class Dataset():
- def __init__(self, data, dtype = np.float64):
- self._data = np.array(data, dtype = dtype)
- self._size = len(self._data)
- self._example_size = len(self._data[0])
- self._status = "pure"
- self._range = None
- self._expert = None
- self._percentage = None
- self._max_elems = None
- self._min_elems = None
- self._centers = None
- def tune_up(self, expert = None, percentage = None):
- self._max_elems = np.zeros((self._example_size))
- self._min_elems = np.zeros((self._example_size))
- self._centers = np.zeros((self._example_size))
- if expert:
- for i in range(self._example_size):
- if expert[i]:
- self._min_elems[i] = expert[i][0]
- self._min_elems[i] = expert[i][1]
- else:
- self._max_elems[i] = max(self._data[:,i])
- self._min_elems[i] = min(self._data[:,i])
- else:
- for i in range(self._example_size):
- self._max_elems[i] = max(self._data[:,i])
- self._min_elems[i] = min(self._data[:,i])
- if percentage:
- for i in range(self._example_size):
- if percentage[i]:
- self._max_elems[i] += percentage[i]*(self._max_elems[i]
- - self._min_elems[i])
- self._min_elems[i] -= percentage[i]*(self._max_elems[i]
- - self._min_elems[i])
- else: continue
- for i in range(self._example_size):
- self._centers[i] = (self._max_elems[i] + self._min_elems[i])/2
- def normalisation_linear(self, rng):
- if rng[0]:
- for i in range(self._size):
- for j in range(self._example_size):
- numerator = 2*(self._data[i][j] - self._min_elems[j])
- denominator = self._max_elems[j] - self._min_elems[j]
- self._data[i][j] = numerator/denominator - 1
- else:
- for i in range(self._size):
- for j in range(self._example_size):
- numerator = self._data[i][j] - self._min_elems[j]
- denominator = self._max_elems[j] - self._min_elems[j]
- self._data[i][j] = numerator/denominator
- def normalisation_nonlinear(self, rng, alfa):
- if rng[0]:
- for i in range(self._size):
- for j in range(self._example_size):
- degree = -alfa[j]*(self.data[i][j] - self._centers[j])
- self._data[i][j] = (np.exp(degree)-1)/(np.exp(degree)+1)
- else:
- for i in range(self._size):
- for j in range(self._example_size):
- degree = -alfa[j]*(self.data[i][j] - self._centers[j])
- self._data[i][j] = 1/np.exp(degree) + 1
- def denormalisation_linear(self, rng, example):
- out = np.zeroes((self._example_size))
- if rng[0]:
- for i in range(self._example_size):
- substraction = self._max_elems[i] - self._min_elems[i]
- out[i] = self._min_elems[i] + (example[i] + 1)*substraction/2
- else:
- for i in range(self._example_size):
- substraction = self._max_elems[i] - self._min_elems[i]
- out[i] = self._min_elems[i] + example[i]*substraction
- def denormalisation_nonlinear(self, rng, alfa, example):
- out = np.zeroes((self._example_size))
- if rng[0]:
- for i in range(self._example_size):
- out[i] = self._centers[i] - (1/alfa[i])* np.log(1/example[i]-1)
- else:
- for i in range(self._example_size):
- temp = (1-self._centers[i])/(1+self._centers[i])
- out[i] = self._centers[i] - (1/alfa[i])* np.log(temp)
- def get_status(self):
- return self._status
- def example_size(self):
- return self._example_size
- def __getitem__(self, key):
- return self._data[key]
- def __len__(self):
- return self._size
- def __iter__(self):
- return (example for example in self._data)
- class TanH:
- def __init__(self, alfa, t):
- self.alfa = alfa
- self.t = t
- def __call__(self, state):
- return (np.exp(self.alfa*(state - self.t)) - 1)/(np.exp(self.alfa*(state - self.t)) + 1)
- class Sigmoid:
- def __init__(self, alfa, t):
- self.alfa = alfa
- self.t = t
- def __call__(self, state):
- return 1/(1 + np.exp(state*(-self.alfa)))
- class Percep:
- def __init__(self, inputs_cnt, outs_cnt, activation_func):
- self.inputs_cnt = inputs_cnt
- self.outs_cnt = outs_cnt
- self.biases = np.random.rand(outs_cnt)
- self.weights = np.random.rand(inputs_cnt, outs_cnt)
- self.activation_func = activation_func
- def predict(self,example):
- state = np.dot(example, self.weights) + self.biases
- out = self.activation_func(state)
- return out
- def train(self, dataset, epoches_cnt, tr_speed):
- x_train = dataset[0]
- y_train = dataset[1]
- for i in range(epoches_cnt):
- for i in range(len(x_train)):
- y_calc = self.predict(x_train[i])
- deltas = y_train[i] - y_calc
- self.weights += tr_speed*deltas*np.array(x_train[i])[:,np.newaxis]
- self.biases += tr_speed*deltas
- def RMSE(req_out,nn_out):
- summ = 0
- for i in range(len(req_out)):
- summ += sum((req_out[i] - nn_out[i])**2)
- a = len(req_out)*len(req_out[0])
- return (summ/a)**(1/2)
- y1 = lambda x1,x2: x1+x2
- y2 = lambda x1,x2,x3: x3 - 2*x1 + x2
- CNT_EX = 200
- TR_PRC = 0.5
- x = Dataset([[np.random.randint(-10,10), np.random.randint(-10,10),np.random.randint(-10,10)] for i in range(CNT_EX)])
- x.tune_up()
- x.normalisation_linear([-1,1])
- y = Dataset([[y1(x[i][0],x[i][1]), y2(x[i][0], x[i][1], x[i][2])] for i in range(CNT_EX)])
- y.tune_up()
- y.normalisation_linear([-1,1])
- end = int(CNT_EX - CNT_EX*TR_PRC)
- x_tr = x[:end]
- x_tst = x[end:]
- y_tr = y[:end]
- y_tst = y[end:]
- fig, ax = plt.subplots()
- ax.set_title("Зависимость количества эпох от скорости")
- ax.set_xlabel("Скорость обучения")
- ax.set_ylabel("Количество эпох")
- ax_x = [x for x in range(100, 1100, 100)]
- nets = []
- tr_err = []
- tst_err = []
- epoches_cnt = []
- tr_speed = []
- for i in range(4):
- nets.append(Percep(3,2, TanH(0.001,0)))
- cnt = 0
- out = [nets[i].predict(ex) for ex in x_tr]
- while(RMSE(y_tr, out) >= 0.06):
- nets[i].train((x_tr,y_tr), 1, 1/(i+1))
- out = [nets[i].predict(ex) for ex in x_tr]
- cnt+=1
- epoches_cnt.append(cnt)
- tr_speed.append(1/(i+1))
- ax.plot(tr_speed,epoches_cnt, label ="Error = 0.06")
- ax.legend()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement