Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import numpy as np
- import reservoir as r
- import equalization as e
- import matplotlib.pyplot as plt
- def train_lin_reg(states, targets, l_train, l_test, l_tot):
- #TRAIN_LIN_REG standard batch offline method for RC
- targets = np.array(targets)
- """
- print("targets")
- print(targets)
- """
- #compute readout weights
- X = states[:,0:l_train] #check indices
- R = X.dot(np.transpose(X))
- P = X.dot(np.transpose(targets[0:l_train]))
- W = np.transpose(P).dot(np.linalg.pinv(R)) #check order
- #W = W*2
- """
- X = states(:,1:l_train); % reservoir states for training
- R = X*X'; % + ridge*eye(size(X,1)); % correlation matrix
- P = X * targets(1:l_train)'; % cross-correlation vector
- W = P' * pinv(R); % readout weights
- """
- #evaluate system performance
- rcouts = W.dot(states) #reservoir outputs
- rcouts_mch = (np.round(rcouts/2+1.5)-1.5)*2 #test this
- #err_train = np.sum([1 if rcouts_mch[k] != targets[k] else 0 for k in range(0,l_train)])/l_train #check of course
- #err_test = np.sum([1 if rcouts_mch[k] != targets[k] else 0 for k in range(l_train,l_tot)])/l_test #check of course
- #print(len(targets[0:l_train]))
- #print(rcouts_mch[0:l_train])
- #print("len states:")
- #print(len(states))
- #print(len(targets))
- #print(len(rcouts_mch))
- print("Rcouts")
- print(rcouts[0:20])
- print("rcout_mch")
- print(rcouts_mch[0:20])
- print("targets")
- print(targets[0:20])
- """
- print("JKLJLK")
- print(len(rcouts_mch))
- print(len(targets))
- print(targets)
- np.set_printoptions(threshold=np.nan)
- print("rcouts_mch")
- print(rcouts_mch)
- #print(rcouts_mch)
- print("rcouts")
- print(len(rcouts))
- """
- #print("W")
- #print(W)
- err_train = np.sum(rcouts_mch[0:l_train] != targets[0:l_train])/l_train
- err_test = np.sum(rcouts_mch[l_train:l_tot] != targets[l_train:l_tot])/l_test
- return W, rcouts,rcouts_mch,err_train,err_test
- l_chauffe1 = 100
- l_chauffe2 = 200
- l_train = 1000
- l_test = 10000
- l_tot = l_train+l_test #a verifier
- N = 100
- spectralRadius = 300
- alpha = 0.5
- beta = 0.17 #jusqua 1.92 a priori
- W = r.generateMatrix(N, spectralRadius)
- nbrIt = l_chauffe1+l_chauffe2+l_tot+1
- PHI = r.initializePHI(N)
- #allPHIs = np.array([[0.0]*len(PHI)]*nbrIt)
- allPHIs = np.array([[0.0]*nbrIt]*len(PHI))
- allPHIs[:,0] = PHI
- """
- for i in range(0,l_chauffe1):
- allPHIs[:,i+1] = r.phiMap(allPHIs[:,i],W,beta,N)
- """
- datatargets, datainputs = e.genNLchEQ(l_chauffe2+l_tot)
- print("datataa")
- print(len(datatargets))
- #print([(datatargets[k],datainputs[k]) for k in range(0,len(datatargets))])
- """
- plt.plot(datainputs[0:30])
- plt.plot(datatargets[0:30])
- plt.show()
- """
- print(datainputs)
- for i in range(l_chauffe1,l_chauffe1+l_chauffe2+l_tot):
- allPHIs[:,i+1] = r.phiMap(allPHIs[:,i],W,beta,N) + r.dataInput(alpha, datainputs[i-l_chauffe1],N)#+ 0.5*datainputs[i]#
- """
- for h in range(0,N):
- plt.plot([allPHIs[h,i] for i in range(l_chauffe1+l_chauffe2,l_chauffe1+l_chauffe2+100)])
- plt.show()
- """
- states = allPHIs[:,1+l_chauffe1+l_chauffe2:l_chauffe1+l_chauffe2+l_tot+1]
- print(len(states[0]))
- print("JKL")
- #print(allPHIs[:,l_tot])
- #print(l_tot)
- #print("dsjflksjf")
- #print(len(states[0]))
- W, rcouts,rcouts_mch,err_train,err_test = train_lin_reg(states, datatargets[l_chauffe2:], l_train, l_test, l_tot)
- print("err_train = "+str(err_train))
- print("err_test = "+str(err_test))
- """
- function [ results ] = train_lin_reg( states, targets, l_train, l_test, l_tot )
- %TRAIN_LIN_REG Standard batch offline method for RC
- % NOTE: ridge regression can be added with *ridge* term
- % Written by P. Antonik, Feb 2018
- % compute readout weights
- X = states(:,1:l_train); % reservoir states for training
- R = X*X'; % + ridge*eye(size(X,1)); % correlation matrix
- P = X * targets(1:l_train)'; % cross-correlation vector
- W = P' * pinv(R); % readout weights
- % evaluate system performance
- rcouts = W * states; % reservoir outputs
- rcouts_mch = (round(rcouts./2+1.5)-1.5)*2; % outputs matched to symbols
- err_train = sum(rcouts_mch(1:l_train)... % training error
- ~=targets(1:l_train))/l_train;
- err_test = sum(rcouts_mch(l_train+1:l_tot)... % test error
- ~=targets(l_train+1:l_tot))/l_test;
- % save results
- results = struct('W', W, ...
- 'rcouts', rcouts, ...
- 'rcouts_mch', rcouts_mch, ...
- 'err_train', err_train, ...
- 'err_test', err_test);
- end
- """
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement