Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- D = []
- for class_number in range(40):
- current_class = []
- for img in range(5):
- current_class.append(train_X[class_number*5 + img])
- D.append(current_class)
- print(np.shape(D))
- means = []
- for current_class in D:
- means.append(np.mean(current_class, axis=0))
- print(np.shape(means))
- overall_mean = np.array([np.mean(train_X, axis=0)])
- print(np.shape(overall_mean))
- nk = 5
- Sb = np.zeros((10304, 10304))
- for mu in means:
- sub = mu - overall_mean
- Sb += nk*(np.dot(sub.T, sub))
- print(np.shape(Sb))
- Z = []
- for mu, Di in zip(means, D):
- current_z = Di - np.ones((5,1))*mu
- Z.append(current_z)
- Z = np.array(Z)
- print(np.shape(Z))
- S = np.zeros((10304,10304))
- for z in Z:
- S += np.dot(z.T, z)
- print(np.shape(S))
- S_inv = np.linalg.inv(S)
- print(np.shape(S_inv))
- A = np.dot(S_inv, Sb)
- print(np.shape(A))
- eigen_values, eigen_vectors = np.linalg.eigh(A)
- print(np.shape(eigen_values))
- print(np.shape(eigen_vectors))
- eigen_vectors = eigen_vectors.T
- idx = np.argsort(eigen_values)[::-1]
- eigen_values = eigen_values[idx]
- eigen_vectors = eigen_vectors[:,idx]
- dominant_eigen_vectors = []
- for i,row in zip(range(39),eigen_vectors.T):
- dominant_eigen_vectors.append(row)
- dominant_eigen_vectors = np.array(dominant_eigen_vectors)
- print(np.shape(dominant_eigen_vectors))
- from sklearn.neighbors import KNeighborsClassifier
- neighbors=np.array([1,3,5,7])
- accuracy=[]
- projected_train=np.dot(train_X,dominant_eigen_vectors.T)
- projected_test=np.dot(test_X,dominant_eigen_vectors.T)
- for neighbor_num in neighbors:
- print(f'neighbors = {neighbor_num}')
- neigh = KNeighborsClassifier(n_neighbors=neighbor_num,weights = 'distance')
- neigh.fit(projected_train,train_y)
- accuracy.append(neigh.score(projected_test,test_y))
- print(neigh.score(projected_test,test_y),neigh.score(projected_train,train_y),'\n\n')
Add Comment
Please, Sign In to add comment