Advertisement
xmd79

Neural Network Showcase | Alien_Algorithms

Oct 14th, 2023
638
0
Never
1
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 8.92 KB | None | 0 0
  1. //@version=5
  2. indicator("Neural Network Showcase | Alien_Algorithms", overlay=true, shorttitle="Neural Showcase | Alien_Algorithms")
  3.  
  4. lr = input.float(title='Learning Rate', defval=0.1, minval=0.00001)
  5. epochs = input.int(title='Epochs', defval=60, minval=10, maxval=1000)
  6. use_simple_backprop = input(defval=false, title='Simple Backpropagation')
  7.  
  8. plot_loss_curve = input(true, 'Plot Loss Curve', group='Statistics')
  9. chart_scaling = input.int(title='Chart Scaling Factor', defval=1, minval=1, group = 'Statistics')
  10. horizontal_offset = input.int(title='Chart Horizontal Offset', defval = 100, group='Statistics')
  11. vertical_offset = input.int(title='Chart Vertical Offset (percentage)', defval = -2, group='Statistics')
  12. vertical_offset_pct = 1 + (vertical_offset / 100)
  13.  
  14. // Begin logging the maximum price, to be used in normalization
  15. var max_scale = 0.0
  16. max_scale := high > max_scale ? high : max_scale
  17.  
  18. // Initialize weight matrices at random for better feature distribution
  19. var w1 = matrix.new<float>(2, 2, 0.0)
  20. var w2 = matrix.new<float>(1, 2, 0.0)
  21.  
  22.  
  23. // Function to fill each element of a matrix with random values, while maintaining reproducibility with a seed.
  24. // This is needed because matrix.new() can only set the same value for the entire matrix.
  25. fillMatrixRandomly(matrix, rows, cols) =>
  26. seed = 1337 // Any fixed number as a seed will do
  27. for i = 0 to rows - 1
  28. for j = 0 to cols - 1
  29. // The seed is altered for each matrix element to ensure different values
  30. // while still being repeatable on every run
  31. matrix.set(matrix, i, j, math.random(0, 1, seed + i + j * rows))
  32.  
  33. // Fill w1 and w2 with random values
  34. // It is important that the weights are not initialized with the same values, to induce asymmetry during gradient updates.
  35. // This allows the network to utilize all the weights effectively
  36. if barstate.isfirst
  37. fillMatrixRandomly(w1, 2, 2)
  38. fillMatrixRandomly(w2, 1, 2)
  39.  
  40. // Sigmoid activation function
  41. sigmoid(x) =>
  42. 1 / (1 + math.exp(-x))
  43.  
  44. // Mean Squared Error Loss function
  45. mse_loss(predicted, actual) =>
  46. math.pow(predicted - actual, 2)
  47.  
  48. // Normalize the data between 0 and 1
  49. normalize(data) =>
  50. data / max_scale
  51.  
  52. // Revert the data back to the original form
  53. standardize(data) =>
  54. data * max_scale
  55.  
  56. // Feed forward through the neural network
  57. // This process passes the input data through the network, and obtains an output
  58. feedforward(input) =>
  59. hidden_out = array.new_float(0)
  60.  
  61. // Push through the first layer
  62. for i = 0 to 1
  63. sum = 0.0
  64. for j = 0 to 1
  65. sum := sum + w1.get(i, j) * array.get(input, j)
  66. array.push(hidden_out, sigmoid(sum))
  67.  
  68. // Push through the second layer
  69. output = 0.0
  70. for i = 0 to 1
  71. output := output + w2.get(0, i) * array.get(hidden_out, i)
  72. output := sigmoid(output)
  73. [output, hidden_out]
  74.  
  75. // Backpropagation observes the difference in actual and predicted values (obtained from the feedforward), and adjusts the weights using a gradient to lower the error (loss).
  76. backpropagation_simple(input, actual_output, predicted_output, hidden_out) =>
  77.  
  78. // Update weights of the second layer
  79. for i = 0 to 1
  80. w2.set(0, i, w2.get(0, i) - lr * 2 * (predicted_output - actual_output) * array.get(hidden_out, i))
  81.  
  82. // Update weights of the first layer
  83. for i = 0 to 1
  84. for j = 0 to 1
  85. w1.set(i, j, w1.get(i, j) - lr * 2 * (predicted_output - actual_output) * w2.get(0, i) * array.get(input, j))
  86.  
  87. backpropagation_verbose(input, actual_output, predicted_output, hidden_out) =>
  88. // Calculate the derivative of the loss with respect to the output (MSE loss)
  89. float d_loss_d_output = 2 * (predicted_output - actual_output)
  90.  
  91. // Update weights of the second layer
  92. for i = 0 to 1
  93. float hidden_val = array.get(hidden_out, i)
  94. float d_loss_d_w2 = d_loss_d_output * hidden_val
  95. w2.set(0, i, w2.get(0, i) - lr * d_loss_d_w2)
  96.  
  97. // Update weights of the first layer
  98. for i = 0 to 1
  99. for j = 0 to 1
  100. float input_val = array.get(input, j)
  101. float w2_val = w2.get(0, i)
  102. float hidden_val = array.get(hidden_out, i)
  103. float sigmoid_derivative = hidden_val * (1 - hidden_val)
  104. float d_loss_d_w1 = d_loss_d_output * w2_val * sigmoid_derivative * input_val
  105. w1.set(i, j, w1.get(i, j) - lr * d_loss_d_w1)
  106.  
  107.  
  108. // A wrapper function that trains the neural network with the set parameters (Learning Rate, Epochs)
  109. train_nn(input, actual_output) =>
  110. loss_curve = array.new<float>(0)
  111.  
  112. for epoch = 1 to epochs
  113. // Predicting
  114. [predicted_output, hidden_out] = feedforward(input)
  115. loss = mse_loss(predicted_output, actual_output)
  116.  
  117. // Metrics
  118. log.warning("~~~~ Epoch {0} ~~~~", epoch)
  119. log.info("Loss: {0}", str.tostring(loss))
  120. array.push(loss_curve, loss)
  121.  
  122. // Weight Adjustment (Training)
  123. if use_simple_backprop
  124. backpropagation_simple(input, actual_output, predicted_output, hidden_out)
  125. else
  126. backpropagation_verbose(input, actual_output, predicted_output, hidden_out)
  127.  
  128. loss_curve
  129.  
  130.  
  131.  
  132. // Define input and output variables that the network will use.
  133. float[] input = array.new_float(0)
  134. array.push(input, normalize(close[1]))
  135. array.push(input, normalize(close[2]))
  136. actual_output = normalize(close)
  137.  
  138. // Perform training only on the last confirmed bar to save resources
  139. float predicted_output = na
  140. if barstate.islastconfirmedhistory
  141. // Training updates all the weights and returns a loss curve containing the loss for each epoch in an array, which can then be visualized.
  142. loss_curve = train_nn(input, actual_output)
  143.  
  144. // Get neural network output
  145. [predicted_output_normalized, _] = feedforward(input)
  146. predicted_output := standardize(predicted_output_normalized)
  147.  
  148.  
  149. log.error(str.tostring(w1))
  150. log.error(str.tostring(w2))
  151. // ~~~~~~~~~~~~~ Plot the neural network output ~~~~~~~~~~~~~
  152. label.new(bar_index+40, predicted_output, text = 'Predicted Output', style = label.style_label_lower_left, color=color.purple, textcolor = color.white, tooltip = 'The price which the neural network predicted')
  153. line.new(bar_index, predicted_output, bar_index+40, predicted_output, color=color.purple, width=4)
  154.  
  155. label.new(bar_index+40, standardize(actual_output), text = 'Actual Output',style = label.style_label_lower_left, color=color.green, textcolor = color.black, tooltip = 'The price which the neural network aims to predict')
  156. line.new(bar_index, standardize(actual_output), bar_index+40, standardize(actual_output), color=color.green, width=4)
  157.  
  158. mid = math.abs(standardize(actual_output) - predicted_output) / 2
  159. mid := predicted_output > standardize(actual_output) ? predicted_output - mid : predicted_output + mid
  160.  
  161. label.new(bar_index+10, mid, text = 'MSE Loss: '+str.tostring(array.get(loss_curve,epochs-1)), color=color.rgb(195, 195, 195), style=label.style_label_left, textcolor = color.black, tooltip = 'The rate of error between the prediction and actual value')
  162. line.new(bar_index+10, predicted_output, bar_index+10, standardize(actual_output), color=color.rgb(177, 177, 177), style=line.style_dashed, width=1)
  163.  
  164. if plot_loss_curve
  165. size = array.size(loss_curve)
  166. float max_loss = array.max(loss_curve)
  167. float min_loss = array.min(loss_curve)
  168. float loss_range = max_loss - min_loss
  169.  
  170. float chart_range = (high - low) / chart_scaling
  171. float scaling_factor = chart_range / loss_range
  172.  
  173. var points = array.new<chart.point>()
  174. for i = 0 to size - 1
  175. float normalized_loss = (array.get(loss_curve, i) - min_loss) / loss_range // Normalize to [0, 1]
  176. float scaled_loss = normalized_loss * scaling_factor // Scale to chart range
  177. float shifted_loss = scaled_loss + (close * vertical_offset_pct) // Shift to match chart values
  178. point = chart.point.new(time+i+horizontal_offset, bar_index+i+horizontal_offset, shifted_loss)
  179. points.push(point)
  180.  
  181. float first_loss = (array.get(loss_curve, 0) - min_loss) / loss_range * scaling_factor + (close * vertical_offset_pct)
  182. float last_loss = (array.get(loss_curve, size-1) - min_loss) / loss_range * scaling_factor + (close * vertical_offset_pct)
  183.  
  184. label.new(bar_index+horizontal_offset+size, last_loss - 0.01 * scaling_factor, text = 'Loss Curve', style = label.style_label_upper_left, color=color.rgb(87, 87, 87), textcolor = color.rgb(255, 255, 255), tooltip='The MSE Loss (y-axis) plotted over the epoch iterations (x-axis)')
  185. box.new(bar_index+horizontal_offset, first_loss, bar_index+horizontal_offset+size, last_loss - 0.01 * scaling_factor, bgcolor = color.rgb(120, 123, 134, 81),border_width = 3)
  186. polyline.new(points, curved=true, line_color=color.rgb(194, 208, 0), line_width = 1)
  187.  
Advertisement
Comments
  • # text 0.12 KB | 0 0
    1. i founded all types of premium tradingview indicators codes available on telegram - https://t.me/tradingview_premium_indicator
Add Comment
Please, Sign In to add comment
Advertisement