Advertisement
K1SR

zad

Dec 8th, 2023
116
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 5.21 KB | None | 0 0
  1. import numpy as np
  2. import matplotlib.pyplot as plt
  3.  
  4. def sigmoid(x):
  5. return 1 / (1 + np.exp(-x))
  6.  
  7. def sigmoid_derivative(x):
  8. return x * (1 - x)
  9.  
  10. class NeuralNetwork:
  11. def __init__(self, input_size, hidden_size, output_size):
  12. self.weights_input_hidden = np.random.rand(input_size, hidden_size)
  13. self.biases_hidden = np.zeros((1, hidden_size))
  14. self.weights_hidden_output = np.random.rand(hidden_size, output_size)
  15. self.biases_output = np.zeros((1, output_size))
  16.  
  17. # Inicijalizacija promenljivih za Adam
  18. self.m_input_hidden = np.zeros_like(self.weights_input_hidden)
  19. self.v_input_hidden = np.zeros_like(self.weights_input_hidden)
  20. self.m_biases_hidden = np.zeros_like(self.biases_hidden)
  21. self.v_biases_hidden = np.zeros_like(self.biases_hidden)
  22.  
  23. self.m_hidden_output = np.zeros_like(self.weights_hidden_output)
  24. self.v_hidden_output = np.zeros_like(self.weights_hidden_output)
  25. self.m_biases_output = np.zeros_like(self.biases_output)
  26. self.v_biases_output = np.zeros_like(self.biases_output)
  27.  
  28. # Parametri za Adam algoritam
  29. self.beta1 = 0.83
  30. self.beta2 = 0.94
  31. self.epsilon = 1e-8
  32.  
  33. def forward(self, inputs):
  34. self.hidden_layer_input = np.dot(inputs, self.weights_input_hidden) + self.biases_hidden
  35. self.hidden_layer_output = sigmoid(self.hidden_layer_input)
  36.  
  37. self.output_layer_input = np.dot(self.hidden_layer_output, self.weights_hidden_output) + self.biases_output
  38. self.predicted_output = sigmoid(self.output_layer_input)
  39.  
  40. return self.predicted_output
  41.  
  42. def backward(self, inputs, targets, learning_rate):
  43. error = targets - self.predicted_output
  44.  
  45. output_delta = error * sigmoid_derivative(self.predicted_output)
  46. hidden_error = output_delta.dot(self.weights_hidden_output.T)
  47. hidden_delta = hidden_error * sigmoid_derivative(self.hidden_layer_output)
  48.  
  49. # Adam updates
  50. self.m_input_hidden = self.beta1 * self.m_input_hidden + (1 - self.beta1) * inputs.T.dot(hidden_delta)
  51. self.v_input_hidden = self.beta2 * self.v_input_hidden + (1 - self.beta2) * (inputs.T.dot(hidden_delta) ** 2)
  52. self.weights_input_hidden += (learning_rate * self.m_input_hidden) / (np.sqrt(self.v_input_hidden) + self.epsilon)
  53.  
  54. self.m_biases_hidden = self.beta1 * self.m_biases_hidden + (1 - self.beta1) * np.sum(hidden_delta, axis=0, keepdims=True)
  55. self.v_biases_hidden = self.beta2 * self.v_biases_hidden + (1 - self.beta2) * (np.sum(hidden_delta, axis=0, keepdims=True) ** 2)
  56. self.biases_hidden += (learning_rate * self.m_biases_hidden) / (np.sqrt(self.v_biases_hidden) + self.epsilon)
  57.  
  58. self.m_hidden_output = self.beta1 * self.m_hidden_output + (1 - self.beta1) * self.hidden_layer_output.T.dot(output_delta)
  59. self.v_hidden_output = self.beta2 * self.v_hidden_output + (1 - self.beta2) * (self.hidden_layer_output.T.dot(output_delta) ** 2)
  60. self.weights_hidden_output += (learning_rate * self.m_hidden_output) / (np.sqrt(self.v_hidden_output) + self.epsilon)
  61.  
  62. self.m_biases_output = self.beta1 * self.m_biases_output + (1 - self.beta1) * np.sum(output_delta, axis=0, keepdims=True)
  63. self.v_biases_output = self.beta2 * self.v_biases_output + (1 - self.beta2) * (np.sum(output_delta, axis=0, keepdims=True) ** 2)
  64. self.biases_output += (learning_rate * self.m_biases_output) / (np.sqrt(self.v_biases_output) + self.epsilon)
  65.  
  66. def train(self, inputs, targets, epochs, learning_rate):
  67. loss_history = []
  68.  
  69. for epoch in range(epochs):
  70. predicted_output = self.forward(inputs)
  71.  
  72. loss = np.mean(0.5 * (targets - predicted_output) ** 2)
  73. loss_history.append(loss)
  74.  
  75. self.backward(inputs, targets, learning_rate)
  76.  
  77. if epoch % 100 == 0:
  78. print(f"Epoch {epoch}, Loss: {loss}")
  79.  
  80. return loss_history
  81.  
  82. import numpy as np
  83. import matplotlib.pyplot as plt
  84.  
  85.  
  86. np.random.seed(42)
  87. X = 2 * np.random.rand(100, 1)
  88. y = 2 * X**2 + 3 * X + np.random.randn(100, 1)
  89.  
  90. # Normalizacija podataka
  91. X_normalized = (X - X.mean()) / X.std()
  92. y_normalized = (y - y.mean()) / y.std()
  93.  
  94. # Hiperparametri NN mreže, tj parametri njene arhitekture
  95. input_size = 1
  96. hidden_size = 5
  97. learning_rate = 0.2
  98. output_size = 1
  99. epochs = 600
  100.  
  101. # Pravljenje modela mreže
  102. model = NeuralNetwork(input_size, hidden_size, output_size)
  103. loss_history = model.train(X_normalized, y_normalized, epochs, learning_rate)
  104.  
  105.  
  106.  
  107. # Prikaz kako se menjala greška izlata pri procesu obuke
  108.  
  109. plt.plot(loss_history)
  110. plt.title('GD Training Loss')
  111.  
  112. plt.show()
  113.  
  114. # Testiranje mreže na podacima koje mreža "nije videla", tj nad kojima nije trenirana
  115. X_test = np.linspace(X_normalized.min(), X_normalized.max(), 100).reshape(-1, 1)
  116. y_pred_gd = model.forward(X_test)
  117.  
  118.  
  119. # Uporedni prikaz stvarnih izlaznih podataka i onoga što je mreža predvidjela
  120. plt.figure(figsize=(10, 6))
  121. plt.scatter(X_normalized, y_normalized, label='True Data')
  122. plt.plot(X_test, y_pred_gd, label='SGD', linewidth=2)
  123.  
  124. plt.title('Neural Network Regression ')
  125. plt.xlabel('Normalized X')
  126. plt.ylabel('Normalized y')
  127. plt.legend()
  128. plt.show()
  129.  
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement