Hexadroid

node 1

Sep 13th, 2020
866
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
  1. #pragma hdrstop
  2. #pragma argsused
  3.  
  4. #ifdef _WIN32
  5. #include <tchar.h>
  6. #else
  7.   typedef char _TCHAR;
  8.   #define _tmain main
  9. #endif
  10.  
  11. #include <stdio.h>
  12. #include <math.h>
  13.  
  14.  
  15. int i;
  16. const long double version = 0.000000000000002;
  17. const double learning_rate = 0.1;
  18. double bias = 0.025;
  19. double node_c_cost_function, node_c_cost_function_derivative,node_c_cost_function_gradient ,node_b_cost_function_gradient, node_a_cost_function_gradient;
  20. double node_a_input, node_b_input, node_c_input;
  21. double node_a_weight, node_b_weight, node_c_weight;
  22. double node_a_output, node_b_output, node_c_output, node_u_cost_function_gradient, node_w_cost_function_gradient;
  23. double node_u_input, node_w_input, node_u_weight, node_w_weight, node_u_output, node_w_output;
  24. double node_c_desired_output, node_u_part, node_b_part, node_w_part, node_c_part;
  25.  
  26. double layer1_C_node_a, layer1_C_node_u, layer2_C_node_w, layer2_C_node_b, layer1_C, layer2_C, layer3_C;
  27.  
  28. double node_c_cost_function_gradient_layer3, node_c_cost_function_gradient_layer2,node_c_cost_function_gradient_layer1 ;
  29.  
  30. double node_a_part, layer3_C_node_c;
  31.  
  32. //                                                "node b"
  33. //NETWORK SIMPLE         "node a"  input->O-----------O------------\
  34. //                                             x                    O "node c" --> output
  35. //NETWORK SIMPLE         "node u"  input->O-----------O------------/
  36. //                                                "node w"
  37.  
  38. //               x:the output of node a also goes to node w, the output of node u also goes to node b.
  39. //               in this case, both the 2 outputs of "node u" are exactly the same. will be another test to see what happens in case there are 2 different outputs per input-node.
  40.  
  41. //               code 'seems' to be working, but sometimes this is an illusion ; in many cases some parameters are not perfectly set or mixed up, so testing is necessary.
  42.  
  43.  
  44. double another_node_function (double x)
  45. {
  46.   return exp(x)-exp(-x)/(exp(x)+exp(-x));
  47. }
  48.  
  49. double another_node_function_derivative (double x)
  50. {
  51.     return 4/pow((exp(x)+exp(-x)),2);
  52. }
  53.  
  54.  
  55. double node_x_function(double x)
  56. {
  57.     return 1 / (1 + exp(-(x)));
  58. }
  59.  
  60. double node_x_function_derivative(double x)
  61. {
  62.      return x * (1 - x);
  63. }
  64.  
  65. int _tmain(int argc, _TCHAR* argv[])
  66. {
  67.  
  68.     node_a_input = 0.02;
  69.     node_u_input = 0.09;
  70.     node_a_weight = 0.32; node_b_weight = 0.15; node_c_weight = 0.45;node_u_weight = 0.8; node_w_weight = 0.55;
  71.     node_c_desired_output = 0.005;
  72.  
  73.   for (i=1; i < 1730000; i++) {
  74.  
  75.                         //node_a_input is constantly the same, example.
  76.                         node_a_part = node_a_input*node_a_weight + bias;
  77.                         node_a_output = node_x_function(node_a_part);
  78.  
  79.                         //node_u_input is constantly the same, example.
  80.                         node_u_part =  node_u_input*node_u_weight + bias;
  81.                         node_u_output = node_x_function(node_u_part);
  82.  
  83.                                 layer1_C_node_a = 2*(node_c_desired_output - node_a_part);
  84.                                 layer1_C_node_u = 2*(node_c_desired_output - node_u_part);
  85.                                 layer1_C        = layer1_C_node_a + layer1_C_node_u;
  86.  
  87.  
  88.  
  89.  
  90.  
  91.  
  92.  
  93.                         node_b_input = node_a_output+ node_u_output;
  94.                         node_b_part  = node_b_input*node_b_weight + bias;
  95.                         node_b_output = node_x_function(node_b_part);
  96.  
  97.                         node_w_input = node_a_output+node_u_output;
  98.                         node_w_part  =  node_w_input*node_w_weight + bias;
  99.                         node_w_output = another_node_function(node_w_part);   //use another function for w-node
  100.  
  101.                                 layer2_C_node_w = 2*(node_c_desired_output - node_w_part);//pow(node_c_desired_output - node_w_part,2);
  102.                                 layer2_C_node_b = 2*(node_c_desired_output - node_b_part);//pow(node_c_desired_output - node_b_part,2);
  103.                                 layer2_C    = layer2_C_node_w + layer2_C_node_b;
  104.  
  105.  
  106.  
  107.  
  108.  
  109.  
  110.  
  111.                         node_c_input =  node_b_output+node_w_output;
  112.                         node_c_part =  node_c_input*node_c_weight + bias;
  113.                         node_c_output = node_x_function(node_c_part);
  114.                                 layer3_C_node_c = 2*(node_c_desired_output - node_c_part);
  115.                                 layer3_C = layer3_C_node_c; // +layer3_C_node_x +layer3_C_node_y etc..
  116.  
  117.  
  118.  
  119.  
  120.  
  121.  
  122.                         //needs forward propagation in here, tbd.
  123.  
  124.                         //
  125.  
  126.  
  127.  
  128.  
  129.  
  130.                         //multi layer 'network' , backward propagation test phase
  131.                         node_c_cost_function = pow(node_c_desired_output - node_c_output,2);
  132.                         node_c_cost_function_derivative = 2*(node_c_desired_output - node_c_output);
  133.  
  134.                         node_c_cost_function_gradient_layer3 = node_x_function_derivative(node_c_output) * node_c_cost_function_derivative;
  135.                         node_c_cost_function_gradient_layer2 = node_x_function_derivative(node_c_output) * 2*(node_c_desired_output-layer2_C);
  136.                         node_c_cost_function_gradient_layer1 = node_x_function_derivative(node_c_output) * 2*(node_c_desired_output-layer1_C) ;
  137.  
  138.  
  139.  
  140.  
  141.                         node_b_cost_function_gradient =  node_x_function_derivative(node_b_output) *node_c_weight * node_c_cost_function_gradient_layer2;
  142.                         node_w_cost_function_gradient =  another_node_function_derivative(node_w_input)  *node_c_weight * node_c_cost_function_gradient_layer2;
  143.  
  144.                         node_a_cost_function_gradient =  node_x_function_derivative(node_a_input) *node_b_weight*   node_c_weight * node_c_cost_function_gradient_layer1;
  145.                         node_u_cost_function_gradient =  node_x_function_derivative(node_u_input) *node_w_weight* node_c_weight *  node_c_cost_function_gradient_layer1;
  146.  
  147.                         node_c_weight = node_c_weight + learning_rate * node_c_cost_function_gradient_layer3;
  148.  
  149.                         node_b_weight = node_b_weight + learning_rate * node_b_cost_function_gradient ;
  150.                         node_w_weight = node_w_weight + learning_rate * node_w_cost_function_gradient ;
  151.  
  152.                         node_a_weight = node_a_weight + learning_rate * node_a_cost_function_gradient ;
  153.                         node_u_weight = node_u_weight + learning_rate * node_u_cost_function_gradient;
  154.  
  155.  
  156.                            if (i>1690) {
  157.  
  158.  
  159.                             //printf("\nout: %.15lf %.15lf %.15lf %.15lf %.15lf", node_a_input, node_b_input, node_a_output, node_b_output, node_c_desired_output);
  160.                             //printf("\nout: %.15lf %.15lf", node_c_cost_function, node_c_cost_function_derivative);
  161.                             printf("\noutput %d %.15lf",i, node_c_output);
  162.                            }
  163.                       }
  164.  
  165.     scanf("%d",&i);
  166.     return 0;
  167. }
RAW Paste Data