Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- # -*- coding: utf-8 -*-
- """Перцептрон.ipynb
- Automatically generated by Colaboratory.
- Original file is located at
- https://colab.research.google.com/drive/1B6_exDJbrp6sk0ABTZtsPHiIluxq0iLL
- """
- target = "Price"
- import numpy as np
- import pandas as pd
- class Layer:
- def __init__(self, input_size, output_size):
- self.weights = np.random.randn(input_size, output_size)
- self.bias = np.zeros((1, output_size))
- self.inputs = None
- self.outputs = None
- self.d_weights = None
- self.d_bias = None
- def forward(self, inputs):
- self.inputs = inputs
- self.outputs = np.dot(inputs, self.weights) + self.bias
- return self.outputs
- def backward(self, d_outputs, learning_rate):
- d_inputs = np.dot(d_outputs, self.weights.T)
- self.d_weights = np.dot(self.inputs.reshape(-1, 1), d_outputs)
- self.d_bias = np.sum(d_outputs, axis=0, keepdims=True)
- # Update weights and bias
- self.weights -= learning_rate * self.d_weights
- self.bias -= learning_rate * self.d_bias
- return d_inputs
- def sigmoid(x):
- return 1 / (1 + np.exp(-x))
- def sigmoid_derivative(x):
- return sigmoid(x) * (1 - sigmoid(x))
- class Activation:
- def __init__(self, activation_func, activation_derivative):
- self.activation_func = activation_func
- self.activation_derivative = activation_derivative
- def forward(self, inputs):
- self.inputs = inputs
- return self.activation_func(inputs)
- def backward(self, d_outputs):
- return d_outputs * self.activation_derivative(self.inputs)
- class NeuralNetwork:
- def __init__(self, layers):
- self.layers = layers
- def forward_propagation(self, X):
- for layer in self.layers:
- X = layer.forward(X)
- return X
- def back_propagation(self, d_output, learning_rate):
- for layer in reversed(self.layers):
- d_output = layer.backward(d_output, learning_rate) if isinstance(layer, Layer) else layer.backward(d_output)
- def mse(self, y_true, y_pred):
- return ((y_true - y_pred) ** 2).mean()
- def fit(self, X, y, learning_rate, epochs):
- for epoch in range(epochs):
- total_error = 0
- for i in range(len(X)):
- outputs = self.forward_propagation(X[i])
- error = self.mse(y[i], outputs)
- total_error += error
- d_output = outputs - y[i]
- self.back_propagation(d_output, learning_rate)
- mean_error = total_error / len(X)
- print(f'Epoch {epoch + 1}/{epochs}, Mean Squared Error: {mean_error}')
- def guess(self, x):
- output = self.forward_propagation(x)
- return output
- # input_size = 2
- # hidden_size = 2
- # output_size = 1
- # layer1 = Layer(input_size, hidden_size)
- # activation1 = Activation(sigmoid, sigmoid_derivative)
- # layer2 = Layer(hidden_size, output_size)
- # neural_network = NeuralNetwork([layer1, activation1, layer2])
- # x = np.array([[0, 0], [0, 1], [1, 0], [1, 1], [0.5, 0.5]])
- # y = np.array([[0], [1], [1], [0], [0]])
- # learning_rate = 0.1
- # epochs = 1000
- # neural_network.fit(X, y, learning_rate, epochs)
- # # Making a prediction
- # new_sample = np.array([[1, 0]])
- # prediction = neural_network.guess(new_sample)
- # print("Prediction for", new_sample, ":", prediction)
- import numpy as np
- # Define the sine dataset
- x = np.linspace(0, 2*np.pi, 100) # Input values from 0 to 2*pi
- y = np.sin(x).reshape(-1, 1) # Corresponding sine values
- input_size = 1
- hidden_size = 2
- output_size = 1
- layer1 = Layer(input_size, hidden_size)
- activation1 = Activation(sigmoid, sigmoid_derivative)
- layer2 = Layer(hidden_size, output_size)
- neural_network = NeuralNetwork([layer1, activation1, layer2])
- learning_rate = 0.1
- epochs = 1000
- neural_network.fit(x, y, learning_rate, epochs)
- # Making a prediction
- new_sample = np.array([[np.pi/2]]) # Predict the sine value of pi/2
- prediction = neural_network.guess(new_sample)
- print("Prediction for", new_sample, ":", prediction)
- from sklearn.preprocessing import StandardScaler
- import pandas as pd
- import numpy as np
- # Чтение данных из файла
- df = pd.read_csv("data.csv")
- # Преобразование категориальных признаков в бинарные
- encoded_data = pd.get_dummies(df, columns=['Brand'], prefix='Brand', drop_first=True)
- # Разделение данных на матрицу признаков X и вектор целевой переменной y
- X = encoded_data[["Storage_Capacity"]].values
- scaler = StandardScaler()
- X = scaler.fit_transform(X)
- print(X)
- y = encoded_data[target].values
- # Определение размеров слоев
- input_size = X.shape[1]
- hidden_size1 = 8
- hidden_size2 = 16
- hidden_size3 = 8
- output_size = 1
- # Создание слоев и активаций
- layer1 = Layer(input_size, hidden_size1)
- activation1 = Activation(sigmoid, sigmoid_derivative)
- layer2 = Layer(hidden_size1, hidden_size2)
- activation2 = Activation(sigmoid, sigmoid_derivative)
- layer3 = Layer(hidden_size2, hidden_size3)
- activation3 = Activation(sigmoid, sigmoid_derivative)
- layer4 = Layer(hidden_size3, output_size)
- # Создание нейронной сети с добавленными слоями
- neural_network = NeuralNetwork([layer1, activation1, layer2, activation2, layer3, activation3, layer4])
- # Обучение нейронной сети
- learning_rate = 0.1
- epochs = 1000
- neural_network.fit(X, y, learning_rate, epochs)
- res = neural_network.guess(X[0])
- print(res)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement