Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import numpy as np
- import matplotlib.pyplot as plt
- import seaborn as sns
- import pandas as pd
- from sklearn.datasets import make_blobs
- from sklearn.decomposition import PCA
- from sklearn.model_selection import train_test_split
- from sklearn.metrics import silhouette_score, accuracy_score
- from random import random
- def activation_function(x, beta=1.0):
- return 1 / (1 + np.exp(-beta * x)) # Сигмоїдна функція з параметром beta
- class Perceptron:
- def __init__(self, learning_rate=0.003, n_iters=15041, eps=0.1, return_stop=False, beta=1.0):
- self.lr = learning_rate
- self.n_iters = n_iters
- self.weights = None
- self.bias = None
- self.eps = eps
- self.return_stop = return_stop
- self.beta = beta
- self.activation_function = lambda x: activation_function(x, self.beta)
- self.misclassified_counts = []
- def fit(self, X, Y):
- n_samples, n_features = X.shape
- self.weights = np.array([random() for _ in range(n_features)])
- self.bias = random()
- for iter_ in range(self.n_iters):
- n_misclassified = 0
- for index, x_i in enumerate(X):
- linear_output = np.dot(x_i, self.weights) + self.bias
- y_predicted = self.activation_function(linear_output)
- error = Y[index] - y_predicted
- self.weights += self.lr * error * y_predicted * (1 - y_predicted) * x_i * self.beta
- self.bias += self.lr * error * y_predicted * (1 - y_predicted) * self.beta
- # Перевірка на відповідність умовам зупинки
- if np.abs(error) > self.eps:
- n_misclassified += 1
- self.misclassified_counts.append(n_misclassified)
- if n_misclassified == 0:
- print(f"Навчання зупинено на епохі {iter_}")
- if self.return_stop != False:
- return iter_
- break
- if self.return_stop != False:
- return iter_
- def predict(self, x):
- linear_output = np.dot(x, self.weights) + self.bias
- y_predicted = self.activation_function(linear_output)
- return y_predicted
- N = 100
- M = 10
- data = np.random.randint(0, 2, size=(N, M))
- target = np.where(np.mean(data, axis=1) < 0.5, 0, 1)
- bias = np.ones((N, 1))
- data_with_bias = np.hstack((data, bias))
- df = pd.DataFrame(data_with_bias, columns=[f'x{i}' for i in range(1, M+1)] + ['bias'])
- df['target'] = (np.mean(data, axis=1))
- print(df)
- x_train, x_test, y_train, y_test = train_test_split(df.iloc[:, :-1], df[['target']], test_size=0.2, random_state=42)
- model = Perceptron(learning_rate=0.01, n_iters=1000, eps=0.1, beta=2)
- model.fit(X=x_train.to_numpy(), Y=y_train.to_numpy())
- def get_results(model, x, y):
- y_pred = model.predict(x.to_numpy())
- preds_df = y.copy()
- preds_df['preds'] = y_pred
- preds_df['error'] = np.abs(preds_df['preds'] - preds_df['target'])
- return preds_df
- y_pred = model.predict(x_test.to_numpy())
- y_pred_train = model.predict(x_train.to_numpy())
- preds_train_df = get_results(model, x_train, y_train)
- preds_df = get_results(model, x_test, y_test)
- print(f"Err in train = {len(preds_train_df.loc[preds_train_df['error'] > 0.1])}")
- print(f"Err in test = {len(preds_df.loc[preds_df['error'] > 0.1])}")
- print(preds_train_df)
- print(preds_df)
- print(preds_train_df.describe())
- print(preds_df.describe())
- plt.figure(figsize=(10, 5))
- plt.title(f'Залежнiсть кiлькостi помилок вiд номера епохи навчання(lr={model.lr})')
- plt.plot(np.arange(1, len(model.misclassified_counts)+1), model.misclassified_counts, '-')
- plt.xlabel('Номер епохи')
- plt.ylabel('Кількість помилок')
- plt.grid()
- plt.show()
- learning_rates = np.linspace(0.001, 6, 500)
- epochs_needed = []
- for lr in learning_rates:
- print(lr)
- model = Perceptron(learning_rate=lr, n_iters=500, eps=0.1, beta=1, return_stop=1)
- epochs = model.fit(X=x_train.to_numpy(), Y=y_train.to_numpy())
- epochs_needed.append(epochs)
- plt.figure(figsize=(10, 6))
- plt.plot(learning_rates, epochs_needed)
- plt.xlabel("Крок навчання (learning rate)")
- plt.ylabel("Кількість епох")
- plt.title("Залежність кількості епох від кроку навчання")
- plt.grid(True)
- plt.show()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement