Advertisement
Giorgos_Xou

NeuralNetwork.h

Jan 23rd, 2021
693
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C++ 39.64 KB | None | 0 0
  1. // https://learn.sparkfun.com/tutorials/efficient-arduino-programming-with-arduino-cli-and-visual-studio-code/all
  2. // https://www.arduino.cc/en/Hacking/libraryTutorial
  3.  
  4. /*
  5.   NeuralNetwork.h - Library for MLP Neural Networks.
  6.   Created by George Chousos, April 11, 2019. *0
  7.   Released into the public domain.
  8.  
  9.   *0: Based On https://www.youtube.com/watch?v=L_PByyJ9g-I
  10. */
  11.  
  12. /*
  13. [double on some Arduino boards like UNO for example is equal to float]
  14.  
  15.   and for a safety reason, i decided to use float instead of double,
  16.   because of PROGMEM and the fact that pgm_read_float(&of_a_double),
  17.   works on an arduino Uno and that makes me feel insecure ... So   ,
  18.   i think it would be better like this, at least for now, as a float
  19.   and not a double. In case u want to use double, just replace every
  20.   float with double and use pgm_read_dword instead of pgm_read_float
  21.   if your microcontroller supports double precicion.
  22.   Thanks for reading. (:
  23.  
  24. - https://www.arduino.cc/reference/en/language/variables/data-types/double/
  25. - https://forum.arduino.cc/index.php?topic=613873.0
  26. */
  27.  
  28. /*
  29. [Error #777]
  30.    
  31.  NeuralNetwork(const unsigned int *layer_, const float *default_Weights, const float *default_Bias, const unsigned int &NumberOflayers , bool isProgmem)  
  32.  
  33.  Because someone, might want to have default or pretrained weights and biases and not in the PROGMEM but in the SRAM.
  34.  
  35. - https://stackoverflow.com/questions/56024569/arduino-compiler-takes-into-account-const-progmem-type-as-equal-to-const-type
  36. - https://forum.arduino.cc/index.php?topic=614438.0
  37. */
  38.  
  39. // i would love if there could be a list of microcontrollers or a way to determine with defined(..) the size of SRAM or etc.
  40. // Defines a list of microcontroller Attiny series As__AVR_ATtinyXXX__
  41. #if defined(__AVR_ATtiny2313__) || defined(__AVR_ATtiny4313__) || defined(__AVR_ATtiny24__) || defined(__AVR_ATtiny44__) || defined(__AVR_ATtiny84__) || defined(__AVR_ATtiny25__) || defined(__AVR_ATtiny45__) || defined(__AVR_ATtiny85__)
  42.     #define As__AVR_ATtinyX__
  43.  
  44.     #if defined(__AVR_ATtiny85__)
  45.         #warning [⚠] Last Time i tried backpropagation on an ATtiny85 I had Issues [...]
  46.     #endif
  47. #endif
  48.  
  49. // Defines a list of microcontroller series (as) As__No_Common_Serial_Support (in that moment)
  50. #if defined(As__AVR_ATtinyX__) // or etc.
  51. #define As__No_Common_Serial_Support
  52. #endif
  53.  
  54.  
  55. // - This prevents problems if someone accidently #include's your library twice.
  56. #ifndef NeuralNetwork_h
  57. #define NeuralNetwork_h
  58.  
  59.  
  60. // - That gives you access to the standard types and constants of the Arduino language.
  61. #include "Arduino.h"
  62. //#include <math.h>
  63.  
  64. // - And code goes here...
  65.  
  66. #if defined(_1_OPTIMIZE)
  67.     #if ((_1_OPTIMIZE bitor B01111111) == B11111111)
  68.         #define USE_PROGMEM
  69.         #define NO_BACKPROP
  70.     #endif
  71.     #if ((_1_OPTIMIZE bitor B10111111) == B11111111)
  72.         #define REDUCE_RAM_DELETE_OUTPUTS
  73.         #define NO_BACKPROP
  74.     #endif  
  75.    
  76.     #if ((_1_OPTIMIZE bitor B11101111) == B11111111)
  77.         #define REDUCE_RAM_WEIGHTS_COMMON
  78.         #define REDUCE_RAM_WEIGHTS_LVL2
  79.         //#warning [⚠] Backpropagating more than once after a FeedForward [...]
  80.     #elif ((_1_OPTIMIZE bitor B11011111) == B11111111)
  81.         #warning [⚠] 00100000 is not implemented yet.
  82.         //#define REDUCE_RAM_WEIGHTS_COMMON
  83.         //#define REDUCE_RAM_WEIGHTS_LVL1
  84.     #endif
  85.  
  86.      #if ((_1_OPTIMIZE bitor B11110111) == B11111111)
  87.         #define REDUCE_RAM_DELETE_PREVIOUS_LAYER_GAMMA
  88.         #warning [⚠] 00001000 always Enabled not switchable yet.
  89.      #endif
  90.  
  91. #endif
  92.  
  93. #define STR_HELPER(x) #x
  94. #define STR(x) STR_HELPER(x)
  95.  
  96.  #define ACT1 0
  97.  #define ACT2 0
  98.  #define ACT3 0
  99.  #define ACT4 0
  100.  
  101.  #if !defined(ACTIVATION_FUNCTION_PER_LAYER)
  102.     //DEFAULT
  103.     // i will also create a mechanism to show #error if more than one is defined with B opperations?
  104.     #define ACTIVATION //Sigmoid default but for more than one you must declare it
  105.     #define Sigmoid Sigmoid
  106.     #define ACTIVEATION_FUNCTION Sigmoid
  107.     // ACTIVATION_FUNCTION_PER_LAYER will have any per neuron [2D Array/Matrix] or per layer [1d Array/Vector]
  108. #elif defined(Sigmoid)
  109.     #define ACT1 1
  110.     #define ACTIVATION //Sigmoid default but for more than one you must declare it
  111.     #define Sigmoid Sigmoid
  112.     #define ACTIVEATION_FUNCTION Sigmoid
  113. #endif
  114.  
  115. #if defined(Tanh)
  116.     #define ACT2 1
  117.     #define ACTIVATION
  118.     #define ACTIVEATION_FUNCTION Tanh
  119.     #define Tanh Tanh
  120. #endif
  121.  
  122. #if defined(ReLU)
  123.     #define ACT3 1
  124.     #define ACTIVATION
  125.     #define ACTIVEATION_FUNCTION ReLU
  126.     #define ReLU ReLU
  127.     #define SUPPORTS_CLIPPING // i mean  "supports" / usually-needs  
  128. #endif
  129.  
  130. #if defined(ELU)
  131.     #define ACT4 1
  132.     #define ACTIVATION
  133.     #define ACTIVEATION_FUNCTION ELU
  134.     #define ELU ELU
  135.     #define SUPPORTS_CLIPPING // i mean  "supports" / usually-needs  
  136. #endif
  137.  
  138. #if defined(ACTIVATION_FUNCTION_PER_LAYER)
  139.     #if !defined(ACTIVATION)
  140.         #error Define At least 2 Activation Functions e.g. "#define Sigmoid".
  141.     #endif
  142. #endif
  143.  
  144. #define NUM_OF_USED_ACTIVATION_FUNCTIONS (ACT1 + ACT2 + ACT3 + ACT4)
  145. //#pragma message "content : " STR(NUM_OF_USED_ACTIVATION_FUNCTIONS)
  146.  
  147.  
  148. #define MAKE_FUN_NAME1(actname,value) actname(value)
  149. #define ACTIVATE_WITH(actname,value) MAKE_FUN_NAME1(actname,value)
  150.  
  151. #define MAKE_FUN_NAME2(actname,value) actname ## Der(value)
  152. #define DERIVATIVE_OF(actname,value) MAKE_FUN_NAME2(actname,value)
  153.  
  154.  
  155. class NeuralNetwork
  156. {
  157. private:
  158.     // Guessing that BackProp wont be called more than once excactly after a feedforward call, IF REDUCE_RAM_WEIGHTS_LVL2  else i should have had a temp weights-size variable or something
  159.     // need to add a function for those who want to switch/redirect the pointer to a deferent weight Array... maybe? ... Why not?!? lol.
  160.     #if defined(REDUCE_RAM_WEIGHTS_LVL2)
  161.         static float *weights; //                              pointer to sketch's        Array of Weights.    #(used if     #REDUCE_RAM_WEIGHTS_LVL2 defined)
  162.         static  int i_j;
  163.     #endif  
  164.  
  165.     static float sumSquaredError;// #3
  166.  
  167.     int Individual_Input = 0;
  168.     bool FIRST_TIME_FDFp = false; // determines if there are trashes left in last outputs .
  169.     const float *_inputs;         // Pointer to primary/first Inputs Array from Sketch    .
  170.                                   // (Used for backpropagation)                           .
  171.    
  172.     class Layer
  173.     {
  174.     public:
  175.         unsigned int _numberOfInputs;  // # of neurons in the previous layer.
  176.         unsigned int _numberOfOutputs; // # of neurons in the current  layer.
  177.  
  178.         float *bias;         // bias    of this     layer
  179.         float *outputs;      // outputs of this     layer  [1D Array] pointers.
  180.        
  181.         //#if defined(REDUCE_RAM_WEIGHTS_LVL1)
  182.         //    float *weights;      // weights of this     layer  [1D Array] pointers.                           #(used if     #REDUCE_RAM_WEIGHTS_LVL1   defined)        
  183.         //#endif
  184.         #if !defined(REDUCE_RAM_WEIGHTS_COMMON)
  185.             float **weights;     // weights of this     layer  [2D Array] pointers.                             #(used if NOT #REDUCE_RAM_WEIGHTS_COMMON defined)
  186.         #endif
  187.         float *preLgamma;    // gamma   of previous layer  [1D Array] pointers.
  188.  
  189.         // Default Constractor                                                         .
  190.         //      #0 Constructor                                                         .
  191.         //      #1 Constructor With default/("probably") preptained, weights and biases.
  192.         Layer();
  193.         Layer(const unsigned int &NumberOfInputs, const unsigned int &NumberOfOutputs);                                              // #0  
  194.         Layer(const unsigned int &NumberOfInputs, const unsigned int &NumberOfOutputs, float *default_Bias); //                             #(used if     #REDUCE_RAM_WEIGHTS_LVL2 defined)
  195.         Layer(const unsigned int &NumberOfInputs, const unsigned int &NumberOfOutputs, float *default_Weights, float *default_Bias); // #1  #(used if NOT #REDUCE_RAM_WEIGHTS_LVL2 defined)
  196.      
  197.         void FeedForward_Individual(const float &input, const int &j);
  198.         void FdF_Individual_PROGMEM(const float &input, const int &j);
  199.        
  200.         void FeedForward(const float *inputs); // Calculates the outputs() of layer.
  201.         void FdF_PROGMEM(const float *inputs);
  202.  
  203.         void BackPropOutput(const float *_expected_, const float *inputs);
  204.         void BackPropHidden(const Layer *frontLayer, const float *inputs);
  205.  
  206.  
  207.         //if i acctually consider using other complicated activation functions i might need to think again about the before_Activation_output Array [...]
  208.         float Sigmoid   (const float &x ); // Sigmoid Activation Function 1/(1+e^(-x)) .
  209.         float SigmoidDer(const float &fx); // Derivative of Sigmoid Activation Function.
  210.        
  211.         float Tanh   (const float &x );
  212.         float TanhDer(const float &fx);
  213.  
  214.         float ReLU   (const float &x );
  215.         float ReLUDer(const float &fx); // x is also fx on ReLU
  216.  
  217.         float ELU   (const float &x ); // α = 1
  218.         float ELUDer(const float &fx);
  219.        
  220.         // Maybe use https://stackoverflow.com/a/42264773/11465149
  221.  
  222.         void print_PROGMEM();
  223.         void print();  
  224.         void test();  
  225.     };    
  226.     //just like "static float *wights" [...]  i might have a function to switch?
  227.     #if defined(ACTIVATION_FUNCTION_PER_LAYER)
  228.         static byte *ActFunctionPerLayer; // lets be realistic... byte because. xD
  229.         static unsigned int AtlayerIndex; // who 's gonna make a network with more than 255 layers :P ?!?!? but anyways i will use int or i will add byte too, using a property definition with bunch of other things like this for max optimization ... lol
  230.  
  231.         typedef float (Layer::*method_function) (const float &);
  232.         static const method_function (activation_Function_ptrs)[NUM_OF_USED_ACTIVATION_FUNCTIONS] = {
  233.             #if defined(Sigmoid)
  234.                 &Layer::Sigmoid,
  235.             #endif
  236.             #if defined(Tanh)
  237.                 &Layer::Tanh,
  238.             #endif
  239.             #if defined(ReLU)
  240.                 &Layer::ReLU,
  241.             #endif
  242.             #if defined(ELU)
  243.                 &Layer::ELU,
  244.             #endif
  245.         };
  246.         #if !defined(NO_BACKPROP)
  247.             static const method_function (derivative_Function_ptrs)[NUM_OF_USED_ACTIVATION_FUNCTIONS] = {
  248.                 #if defined(Sigmoid)
  249.                     &Layer::SigmoidDer,
  250.                 #endif
  251.                 #if defined(Tanh)
  252.                     &Layer::TanhDer,
  253.                 #endif
  254.                 #if defined(ReLU)
  255.                     &Layer::ReLUDer,
  256.                 #endif
  257.                 #if defined(ELU)
  258.                     &Layer::ELUDer,
  259.                 #endif
  260.             };
  261.         #endif  
  262.         //https://stackoverflow.com/a/31708674/11465149
  263.         //http://www.cs.technion.ac.il/users/yechiel/c++-faq/array-memfnptrs.html // ??? [x]
  264.     #endif
  265.    
  266.  
  267. public:
  268.  
  269.     Layer *layers;               // layers in the network [1D Array].
  270.     unsigned int numberOflayers; // Number of layers.
  271.  
  272.     // unsigned float doesn't exist..? lol
  273.     static float LearningRateOfWeights; // Learning Rate of Weights.
  274.     static float LearningRateOfBiases;  // Learning Rate of Biases .
  275.     static float MeanSqrdError;// #3
  276.  
  277.     ~NeuralNetwork(); // Destractor.
  278.  
  279.     NeuralNetwork();
  280.     NeuralNetwork(const unsigned int *layer_, const unsigned int &NumberOflayers, byte *ActFunctionPerLayer = nullptr);                                              // #0
  281.     NeuralNetwork(const unsigned int *layer_, const unsigned int &NumberOflayers, const float &LRw, const float &LRb, byte *ActFunctionPerLayer = nullptr);          // #0
  282.     NeuralNetwork(const unsigned int *layer_, float *default_Weights, float *default_Bias, const unsigned int &NumberOflayers, byte *ActFunctionPerLayer = nullptr); // #1
  283.  
  284.    // NeuralNetwork(const unsigned int *layer_, const PROGMEM float *default_Weights, const PROGMEM float *default_Bias, const unsigned int &NumberOflayers , bool isProgmem); // isProgmem (because of the Error #777) ? i get it in a way but ..
  285.    
  286.     void  reset_Individual_Input_Counter();
  287.     float *FeedForward_Individual(const float &input);
  288.     float *FeedForward(const float *inputs); // Moves Calculated outputs as inputs to next layer.
  289.    
  290.     float GetMeanSqrdError(unsigned int inputsPerEpoch); //#3
  291.     void BackProp(const float *expected);    // BackPropopagation - (error, delta-weights, etc.).
  292.     void print();
  293.    
  294.    
  295. };
  296. float NeuralNetwork::LearningRateOfWeights = 0.33;
  297. float NeuralNetwork::LearningRateOfBiases  = 0.066;
  298. float NeuralNetwork::MeanSqrdError = 0; //#3
  299. #if defined(REDUCE_RAM_WEIGHTS_LVL2)
  300.     float *NeuralNetwork::weights = nullptr;
  301.     int NeuralNetwork::i_j = 0;
  302. #endif
  303. #if defined(ACTIVATION_FUNCTION_PER_LAYER)
  304.     byte *NeuralNetwork::ActFunctionPerLayer;
  305.     unsigned int NeuralNetwork::AtlayerIndex = 0;
  306. #endif  
  307.    
  308. float NeuralNetwork::sumSquaredError = 0; //#3
  309.  
  310. //=======================================================================================================================================================================
  311. //NeuralNetwork.cpp
  312. //=======================================================================================================================================================================
  313. #pragma region NeuralNetwork.cpp
  314.  
  315.     void NeuralNetwork::Layer::test(){
  316.         //Serial.println(ActFunctionPerLayer[AtlayerIndex]);
  317.        
  318.        /*
  319.         float *a = new float[5]{};
  320.         a[1] = ((this)->*(activation_Function_ptrs)[AtlayerIndex])((*bias) + 2);
  321.         Serial.println(a[1]);
  322.         */
  323.  
  324.        Serial.println(((this)->*(activation_Function_ptrs)[0])((*bias) + 2));
  325.        Serial.println(((this)->*(activation_Function_ptrs)[AtlayerIndex])((*bias) + 2));
  326.     }
  327.  
  328.     NeuralNetwork::~NeuralNetwork()
  329.     {
  330.  
  331.         for (int i = 0; i < numberOflayers; i++)
  332.         {
  333.             #if !defined(REDUCE_RAM_WEIGHTS_COMMON)
  334.                 for (int j = 0; j < layers[i]._numberOfOutputs; j++) // because of this i wont make _numberOfOutputs/inputs private :/ or maybe.. i ll see... or i will change them to const* ... what? i've just read it again lol
  335.                 {
  336.                     delete[] layers[i].weights[j];
  337.                     layers[i].weights[j] = NULL;    
  338.                 }
  339.             #endif
  340.  
  341.             delete layers[i].bias;
  342.             layers[i].bias = NULL;
  343.  
  344.             #if !defined(REDUCE_RAM_DELETE_OUTPUTS)
  345.                 delete[] layers[i].outputs;
  346.                 layers[i].outputs = NULL;
  347.             #endif
  348.  
  349.             /*
  350.             #if defined(REDUCE_RAM_WEIGHTS_LVL1)
  351.                 delete[] layers[i].weights;
  352.                 layers[i].weights = NULL;
  353.             #endif
  354.             */
  355.         }
  356.  
  357.         #if defined(REDUCE_RAM_WEIGHTS_LVL2)
  358.             delete weights;
  359.             weights = NULL;
  360.         #endif
  361.  
  362.         delete[] layers;
  363.         layers = NULL; // 18/5/2019
  364.     }
  365.  
  366.     NeuralNetwork::NeuralNetwork() {}
  367.  
  368.     NeuralNetwork::NeuralNetwork(const unsigned int *layer_, float *default_Weights, float *default_Bias, const unsigned int &NumberOflayers, byte *ActFunctionPerLayer )
  369.     {
  370.         numberOflayers = NumberOflayers - 1;
  371.  
  372.         layers = new Layer[numberOflayers]; // there has to be a faster way by alocating memory for example...
  373.  
  374.         #if defined(ACTIVATION_FUNCTION_PER_LAYER)
  375.             this->ActFunctionPerLayer = ActFunctionPerLayer;
  376.         #endif  
  377.         #if defined(REDUCE_RAM_WEIGHTS_LVL2)
  378.             weights = default_Weights;
  379.         #else
  380.             unsigned int weightsFromPoint = 0;
  381.         #endif
  382.  
  383.         for (int i = 0; i < numberOflayers; i++)
  384.         {
  385.             #if defined(REDUCE_RAM_WEIGHTS_LVL2) // #1.1
  386.                 layers[i] = Layer(layer_[i], layer_[i + 1], &default_Bias[i]);
  387.             #else
  388.                 layers[i] = Layer(layer_[i], layer_[i + 1], &default_Weights[weightsFromPoint], &default_Bias[i]);
  389.                 weightsFromPoint += layer_[i] * layer_[i + 1];
  390.             #endif
  391.            
  392.         }
  393.     }
  394.  
  395.     NeuralNetwork::NeuralNetwork(const unsigned int *layer_, const unsigned int &NumberOflayers, const float &LRw, const float &LRb,  byte *ActFunctionPerLayer )
  396.     {
  397.         LearningRateOfWeights = LRw; // Initializing the Learning Rate of Weights
  398.         LearningRateOfBiases = LRb;  // Initializing the Learning Rate of Biases
  399.  
  400.         numberOflayers = NumberOflayers - 1;
  401.  
  402.         layers = new Layer[numberOflayers];
  403.  
  404.         #if defined(ACTIVATION_FUNCTION_PER_LAYER)
  405.             this->ActFunctionPerLayer = ActFunctionPerLayer;
  406.         #endif
  407.  
  408.         #if defined(REDUCE_RAM_WEIGHTS_LVL2) //footprint episis san leksi
  409.             for (int i = 0; i < numberOflayers; i++)
  410.                 i_j += layer_[i] * layer_[i + 1];
  411.            
  412.             weights = new float[i_j];
  413.             i_j=0;
  414.         #endif
  415.  
  416.         for (int i = 0; i < numberOflayers; i++)
  417.         {
  418.             layers[i] =  Layer(layer_[i], layer_[i + 1]);
  419.         }
  420.  
  421.     }
  422.  
  423.     //maybe i will  add one more constructor so i can release memory from feedforward outputs in case i dont want backprop?
  424.  
  425.     NeuralNetwork::NeuralNetwork(const unsigned int *layer_, const unsigned int &NumberOflayers,  byte *ActFunctionPerLayer )
  426.     {
  427.  
  428.         numberOflayers = NumberOflayers - 1;
  429.  
  430.         layers = new Layer[numberOflayers];
  431.  
  432.         #if defined(ACTIVATION_FUNCTION_PER_LAYER)
  433.             this->ActFunctionPerLayer = ActFunctionPerLayer;
  434.         #endif
  435.  
  436.         #if defined(REDUCE_RAM_WEIGHTS_LVL2) //footprint episis san leksi
  437.             for (int i = 0; i < numberOflayers; i++)
  438.                 i_j += layer_[i] * layer_[i + 1];
  439.            
  440.             weights = new float[i_j];
  441.             i_j=0;
  442.         #endif
  443.  
  444.         for (int i = 0; i < numberOflayers; i++)
  445.         {
  446.             layers[i] =  Layer(layer_[i], layer_[i + 1]);
  447.         }
  448.  
  449.     }
  450.  
  451.     void NeuralNetwork::reset_Individual_Input_Counter()
  452.     {
  453.         Individual_Input = 0;
  454.     }
  455.  
  456.     float *NeuralNetwork::FeedForward_Individual(const float &input)
  457.     {
  458.  
  459.         #if defined(USE_PROGMEM)
  460.             layers[0].FdF_Individual_PROGMEM(input, Individual_Input);
  461.         #else
  462.             layers[0].FeedForward_Individual(input, Individual_Input);
  463.         #endif
  464.         Individual_Input++;
  465.  
  466.         if (Individual_Input == layers[0]._numberOfInputs)
  467.         {
  468.             Individual_Input=0;
  469.        
  470.             #if defined(REDUCE_RAM_DELETE_OUTPUTS)
  471.                 if (FIRST_TIME_FDFp == true) // is it the first time ? if not, then delete trashes
  472.                 {
  473.                     delete[] layers[numberOflayers - 1].outputs;
  474.                     layers[numberOflayers - 1].outputs = NULL;
  475.                 }else{
  476.                     FIRST_TIME_FDFp = true;
  477.                 }
  478.             #endif
  479.  
  480.             for (int i = 1; i < numberOflayers; i++)
  481.             {
  482.                 #if defined(USE_PROGMEM)
  483.                     layers[i].FdF_PROGMEM(layers[i - 1].outputs);
  484.                 #else
  485.                     layers[i].FeedForward(layers[i - 1].outputs);
  486.                 #endif
  487.                 #if defined(REDUCE_RAM_DELETE_OUTPUTS)
  488.                     delete[] layers[i - 1].outputs;
  489.                     layers[i - 1].outputs = NULL;
  490.                 #endif
  491.             }
  492.  
  493.             return layers[numberOflayers - 1].outputs;
  494.         }
  495.  
  496.         return nullptr;
  497.     }
  498.  
  499.     float *NeuralNetwork::FeedForward(const float *inputs)
  500.     {
  501.         _inputs = inputs;
  502.        
  503.         #if defined(REDUCE_RAM_DELETE_OUTPUTS)
  504.             if (FIRST_TIME_FDFp == true) // is it the first time ? if not, then delete trashes
  505.             {
  506.                 delete[] layers[numberOflayers - 1].outputs;
  507.                 layers[numberOflayers - 1].outputs = NULL;
  508.             }else{
  509.                 FIRST_TIME_FDFp = true;
  510.             }
  511.         #endif
  512.  
  513.         #if defined(ACTIVATION_FUNCTION_PER_LAYER)
  514.             AtlayerIndex = 0;
  515.         #endif  
  516.         // resets starting point (could have had a function returning it insted of initializing it on constructor too?)
  517.         #if defined(REDUCE_RAM_WEIGHTS_LVL2)
  518.             i_j=0;
  519.         #endif
  520.        
  521.         #if defined(USE_PROGMEM)
  522.             layers[0].FdF_PROGMEM(_inputs);
  523.         #else
  524.             layers[0].FeedForward(_inputs);
  525.         #endif
  526.  
  527.         for (int i = 1; i < numberOflayers; i++)
  528.         {
  529.             #if defined(ACTIVATION_FUNCTION_PER_LAYER)
  530.                 AtlayerIndex = i;
  531.             #endif  
  532.             #if defined(USE_PROGMEM)
  533.                 layers[i].FdF_PROGMEM(layers[i - 1].outputs);
  534.             #else
  535.                 layers[i].FeedForward(layers[i - 1].outputs);
  536.             #endif
  537.             #if defined(REDUCE_RAM_DELETE_OUTPUTS)
  538.                 delete[] layers[i - 1].outputs;
  539.                 layers[i - 1].outputs = NULL;
  540.             #endif
  541.         }
  542.  
  543.         return layers[numberOflayers - 1].outputs;
  544.     }
  545.  
  546.  
  547.  
  548.     void NeuralNetwork::BackProp(const float *expected)
  549.     {
  550.         /* i dont find any reason of having this if Backprop will never be used more than once imidiatly after once [...] but just in case ... commented
  551.         #if defined(ACTIVATION_FUNCTION_PER_LAYER)
  552.             AtlayerIndex = numberOflayers - 1;
  553.         #endif  
  554.         */
  555.  
  556.         layers[numberOflayers - 1].BackPropOutput(expected, layers[numberOflayers - 2].outputs); // issue because backprop einai anapoda ta weights [Fixed]
  557.  
  558.         for (int i = numberOflayers - 2; i > 0; i--)
  559.         {
  560.             #if defined(ACTIVATION_FUNCTION_PER_LAYER)
  561.                 AtlayerIndex = i;
  562.             #endif
  563.  
  564.             layers[i].BackPropHidden(&layers[i + 1], layers[i - 1].outputs);
  565.             delete[] layers[i + 1].preLgamma;
  566.             layers[i + 1].preLgamma = NULL; // 18/5/2019
  567.         }
  568.  
  569.         layers[0].BackPropHidden(&layers[1], _inputs);
  570.  
  571.         delete[] layers[1].preLgamma;
  572.         delete[] layers[0].preLgamma;
  573.  
  574.         layers[0].preLgamma = NULL;
  575.         layers[1].preLgamma = NULL;
  576.     }
  577.  
  578.      //#3
  579.      
  580.     float NeuralNetwork::GetMeanSqrdError(unsigned int inputsPerEpoch)
  581.     {
  582.         MeanSqrdError = (sumSquaredError/ (layers[numberOflayers - 1]._numberOfOutputs * inputsPerEpoch));
  583.         sumSquaredError = 0;
  584.         return MeanSqrdError;
  585.     }
  586.    
  587.  
  588.     //If Microcontroller isn't one of the .._No_Common_Serial_Support Series then it compiles the code below.
  589.     #if !defined(As__No_Common_Serial_Support) // then Compile:
  590.     void NeuralNetwork::print()
  591.     {
  592.         #if defined(REDUCE_RAM_WEIGHTS_LVL2)
  593.             i_j=0;
  594.         #endif
  595.  
  596.         Serial.println();
  597.         Serial.println("----------------------");
  598.  
  599.         for (int i = 0; i < numberOflayers; i++)
  600.         {
  601.             #if defined(USE_PROGMEM)
  602.                 layers[i].print_PROGMEM();
  603.             #else
  604.                 layers[i].print();
  605.             #endif
  606.         }
  607.     }
  608.     #endif
  609. #pragma endregion
  610.  
  611.  
  612.  
  613. //=======================================================================================================================================================================
  614. // Layer.cpp
  615. //=======================================================================================================================================================================
  616.  
  617. #pragma region Layer.cpp
  618.  
  619.     NeuralNetwork::Layer::Layer() {}
  620.  
  621.  
  622.     #if !defined(REDUCE_RAM_WEIGHTS_LVL2) // #1.1
  623.         NeuralNetwork::Layer::Layer(const unsigned int &NumberOfInputs, const unsigned int &NumberOfOutputs, float *default_Weights, float *default_Bias)
  624.         {
  625.             _numberOfInputs = NumberOfInputs;   //  (this) layer's  Number of Inputs .
  626.             _numberOfOutputs = NumberOfOutputs; //           ##1    Number of Outputs.
  627.  
  628.             #if !defined(REDUCE_RAM_DELETE_OUTPUTS)
  629.                 outputs = new float[_numberOfOutputs]; //    ##1    New Array of Outputs.
  630.             #endif
  631.            
  632.             bias = default_Bias; //                          ##1    Bias as Default Bias.
  633.             weights = new float *[_numberOfOutputs]; //      ##1    New Array of Pointers to (float) weights.
  634.  
  635.             for (int i = 0; i < _numberOfOutputs; i++)              // [matrix] (_numberOfOutputs * _numberOfInputs)
  636.                 weights[i] = &default_Weights[i * _numberOfInputs]; // Passing Default weights to ##1 weights by reference.
  637.            
  638.         }
  639.  
  640.     #else
  641.         NeuralNetwork::Layer::Layer(const unsigned int &NumberOfInputs, const unsigned int &NumberOfOutputs, float *default_Bias)
  642.         {
  643.             _numberOfInputs = NumberOfInputs;   //  (this) layer's  Number of Inputs .
  644.             _numberOfOutputs = NumberOfOutputs; //           ##1    Number of Outputs.
  645.  
  646.             #if !defined(REDUCE_RAM_DELETE_OUTPUTS)
  647.                 outputs = new float[_numberOfOutputs]; //    ##1    New Array of Outputs.
  648.             #endif
  649.            
  650.             bias = default_Bias; //                          ##1    Bias as Default Bias.
  651.         }
  652.  
  653.     #endif
  654.  
  655.     //- [ numberOfInputs in into this layer , NumberOfOutputs of this layer ]
  656.     NeuralNetwork::Layer::Layer(const unsigned int &NumberOfInputs, const unsigned int &NumberOfOutputs)
  657.     {
  658.  
  659.         _numberOfInputs = NumberOfInputs;                             // ##1       Number of Inputs .
  660.         _numberOfOutputs = NumberOfOutputs;                           // ##1       Number of Outputs.
  661.  
  662.         outputs = new float[_numberOfOutputs];                        // ##1    New Array of Outputs.
  663.         #if !defined(REDUCE_RAM_WEIGHTS_COMMON)      
  664.             weights = new float *[_numberOfOutputs];                  // ##1    New Array of Pointers to (float) weights.
  665.         #endif
  666.         bias = new float;                                             // ##1    New          Bias   .
  667.         *bias = 1.0;
  668.  
  669.         float _random;
  670.  
  671.         for (int i = 0; i < _numberOfOutputs; i++)
  672.         {
  673.             #if !defined(REDUCE_RAM_WEIGHTS_COMMON)
  674.                 weights[i] = new float[_numberOfInputs];
  675.             #endif
  676.            
  677.             for (int j = 0; j < _numberOfInputs; j++)
  678.             {
  679.                 _random = (random(-90000, 90000)); // Pseudo-Random Number between -90000 and 90000
  680.                 #if defined(REDUCE_RAM_WEIGHTS_LVL2)
  681.                     weights[i_j] = _random / 100000;
  682.                     i_j++;
  683.                 #else
  684.                     weights[i][j] = _random / 100000;  // Divided by 100000 = a Number between -0.90000 and 0.90000
  685.                 #endif
  686.             }
  687.         }
  688.  
  689.     }
  690.  
  691.     void NeuralNetwork::Layer::FdF_Individual_PROGMEM(const float &input, const int &j)
  692.     {
  693.         #if defined(REDUCE_RAM_DELETE_OUTPUTS)
  694.             if (j == 0) // if it is the first input then create output array (for the output layer of this current layer)
  695.                 outputs = new float[_numberOfOutputs]; // ? speed ? or {} or memset .. it matters
  696.         #endif
  697.         //outputs[i] = 0; kai o midenismos se for
  698.  
  699.         #if defined(REDUCE_RAM_WEIGHTS_LVL2)
  700.             i_j = 0; // 1/2 only places that represents ony the i
  701.         #endif
  702.  
  703.         //feed forwards
  704.         int i;
  705.        
  706.         for (i = 0; i < _numberOfOutputs; i++)
  707.         {
  708.             if (j == 0)
  709.                 outputs[i] = 0; // ? speed ? safe one..
  710.  
  711.             #if defined(REDUCE_RAM_WEIGHTS_LVL2)
  712.                 outputs[i] += input * pgm_read_float(&weights[i_j+j]);
  713.             #else
  714.                 outputs[i] += input * pgm_read_float(&weights[i][j]); // if double pgm_read_dword
  715.             #endif
  716.  
  717.             #if defined(REDUCE_RAM_WEIGHTS_LVL2)
  718.                 i_j += _numberOfInputs;
  719.             #endif
  720.         }
  721.  
  722.         // when all individual inputs get summed and multiplied by their weights in their outputs, then pass them from the activation function
  723.         if (j == _numberOfInputs -1)
  724.             for (i = 0; i < _numberOfOutputs; i++)
  725.                 outputs[i] = ACTIVATE_WITH(ACTIVEATION_FUNCTION, outputs[i] + pgm_read_float(bias)); // if double pgm_read_dword
  726.  
  727.     }
  728.  
  729.     void NeuralNetwork::Layer::FeedForward_Individual(const float &input, const int &j)
  730.     {
  731.         #if defined(REDUCE_RAM_DELETE_OUTPUTS)
  732.             if (j == 0) // if it is the first input then create output array (for the output layer of this current layer)
  733.                 outputs = new float[_numberOfOutputs];
  734.         #endif
  735.         //outputs[i] = 0; kai o midenismos se for
  736.  
  737.         #if defined(REDUCE_RAM_WEIGHTS_LVL2)
  738.             i_j = 0; // 1/2 only places that represents ony the i
  739.         #endif
  740.  
  741.         //feed forwards
  742.         int i;
  743.         for (i = 0; i < _numberOfOutputs; i++)
  744.         {
  745.             if (j == 0)
  746.                 outputs[i] = 0;
  747.  
  748.             #if defined(REDUCE_RAM_WEIGHTS_LVL2)
  749.                 outputs[i] += input * weights[i_j+j];
  750.             #else
  751.                 outputs[i] += input * weights[i][j]; // if double pgm_read_dword
  752.             #endif
  753.  
  754.             #if defined(REDUCE_RAM_WEIGHTS_LVL2)
  755.                 i_j += _numberOfInputs;
  756.             #endif
  757.         }
  758.  
  759.         // when all individual inputs get summed and multiplied by their weights in their outputs, then pass them from the activation function
  760.         if (j == _numberOfInputs -1)
  761.             for (i = 0; i < _numberOfOutputs; i++)
  762.                 outputs[i] = ACTIVATE_WITH(ACTIVEATION_FUNCTION, outputs[i] + (*bias)); // if double pgm_read_dword
  763.  
  764.     }
  765.  
  766.     void NeuralNetwork::Layer::FdF_PROGMEM(const float *inputs) //*
  767.     {
  768.         #if defined(REDUCE_RAM_DELETE_OUTPUTS)
  769.             outputs = new float[_numberOfOutputs];
  770.         #endif
  771.        
  772.         //feed forwards
  773.         for (int i = 0; i < _numberOfOutputs; i++)
  774.         {
  775.             outputs[i] = 0; // #2
  776.             for (int j = 0; j < _numberOfInputs; j++)
  777.             {
  778.                 #if defined(REDUCE_RAM_WEIGHTS_LVL2)
  779.                     outputs[i] += inputs[j] * pgm_read_float(&weights[i_j]);
  780.                     i_j++;
  781.                 #else
  782.                     outputs[i] += inputs[j] * pgm_read_float(&weights[i][j]); // if double pgm_read_dword
  783.                 #endif
  784.             }
  785.             outputs[i] = ACTIVATE_WITH(ACTIVEATION_FUNCTION, outputs[i] + pgm_read_float(bias)); // if double pgm_read_dword
  786.         }
  787.  
  788.     }
  789.  
  790.     void NeuralNetwork::Layer::FeedForward(const float *inputs) //*
  791.     {
  792.         #if defined(REDUCE_RAM_DELETE_OUTPUTS)
  793.             outputs = new float[_numberOfOutputs];
  794.         #endif
  795.    
  796.         //feed forwards
  797.         for (int i = 0; i < _numberOfOutputs; i++)
  798.         {
  799.             outputs[i] = 0;
  800.             for (int j = 0; j < _numberOfInputs; j++)
  801.             {
  802.                 #if defined(REDUCE_RAM_WEIGHTS_LVL2)
  803.                     outputs[i] += inputs[j] * weights[i_j];
  804.                     i_j++;
  805.                 #else
  806.                     outputs[i] += inputs[j] * weights[i][j]; // (neuron[i]'s 1D array/vector of inputs) * (neuron[i]'s 2D array/matrix weights) = neuron[i]'s output
  807.                 #endif
  808.                
  809.             }
  810.             #if defined(ACTIVATION_FUNCTION_PER_LAYER)
  811.                 //ActFunctionPerLayer[AtlayerIndex]
  812.                 //int a = AtlayerIndex;
  813.                 //outputs[i] = ((this)->*(activation_Function_ptrs)[ActFunctionPerLayer[AtlayerIndex]])(outputs[i] + (*bias));
  814.                // outputs[i] = ((this)->*(activation_Function_ptrs)[33])(outputs[i] + (*bias));//(this->*activation_Function_ptrs[ActFunctionPerLayer[AtlayerIndex]])();
  815.             #else
  816.                 outputs[i] = ACTIVATE_WITH(ACTIVEATION_FUNCTION, outputs[i] + (*bias)); //  (neuron[i]'s output) = Sigmoid_Activation_Function_Value_Of((neuron[i]'s output) + (bias of current layer))
  817.             #endif
  818.         }
  819.  
  820.         // return outputs;
  821.     }
  822.     /*
  823.     αρα θα βγαλω τη sigmoid απο _numberOfOutputs->Sigmoid(ACTIVEATION_FUNCTION, outputs[i] + (*bias))
  824.     και θα τη βαλω στο _numberOfInputs->inputs[j] (για εξοικονομηση RAM, διαφορετικα BeforeActivationOutputs 1D Array [πριν το activation δλδ] με Property)
  825.     ωστε να μπορω οταν θα κανω backprop να παρω τις παραγωγους και αλλων συναρτισεων ενεργοποιησης
  826.     που απετουν το Input πριν το activation.... αααχ issues τελικα it was too good to be true..
  827.  
  828.     https://stackoverflow.com/questions/1253934/c-pre-processor-defining-for-generated-function-names
  829.     */
  830.  
  831.  
  832.     float NeuralNetwork::Layer::Sigmoid  (const float &x) { return 1 / (1 + exp(-x))               ;}
  833.     float NeuralNetwork::Layer::Tanh     (const float &x) { return (exp(2*x) - 1) / (exp(2*x) + 1) ;}
  834.     float NeuralNetwork::Layer::ReLU     (const float &x) { return (x > 0) ? x : 0                 ;}
  835.     float NeuralNetwork::Layer::ELU      (const float &x) { return (x > 0) ? x : (exp(x) - 1)      ;}
  836.  
  837.     //  v USE_PROGMEM not sure if i should do this ...
  838.     #if !defined(NO_BACKPROP)
  839.  
  840.         float NeuralNetwork::Layer::SigmoidDer  (const float &fx) { return fx - fx * fx          ;}
  841.         float NeuralNetwork::Layer::TanhDer     (const float &fx) { return 1 - fx * fx           ;}
  842.         float NeuralNetwork::Layer::ReLUDer     (const float &fx) { return (fx > 0) ? 1 : 0      ;}
  843.         float NeuralNetwork::Layer::ELUDer      (const float &fx) { return (fx < 0) ? fx + 1 : 1 ;}
  844.  
  845.  
  846.         void NeuralNetwork::Layer::BackPropOutput(const float *_expected_, const float *inputs)
  847.         {
  848.  
  849.             preLgamma = new float[_numberOfInputs]{}; // create gamma of previous layer and initialize{} values to 0 .. meh
  850.            
  851.  
  852.             float bias_Delta = 1.0;
  853.             float gamma;
  854.  
  855.             #if defined(REDUCE_RAM_WEIGHTS_LVL2)
  856.                 for (int i = _numberOfOutputs -1; i >= 0; i--) // i want to believe that it is being optimized, lol
  857.                 {
  858.                     //    γ  = (Error) * Derivative_of_Sigmoid_Activation_Function
  859.                     gamma = (outputs[i] - _expected_[i]);
  860.                     sumSquaredError += gamma * gamma;
  861.                     gamma = gamma * DERIVATIVE_OF(ACTIVEATION_FUNCTION, outputs[i]);
  862.  
  863.                     bias_Delta *= gamma;
  864.  
  865.                     for (int j = _numberOfInputs -1; j >= 0; j--)
  866.                     {
  867.                         i_j--;
  868.                         preLgamma[j] += gamma * weights[i_j];
  869.                         weights[i_j] -= (gamma * inputs[j]) * LearningRateOfWeights;  
  870.                     }
  871.                 }
  872.  
  873.             #else
  874.                 for (int i = 0; i < _numberOfOutputs; i++)
  875.                 {
  876.                     //    γ  = (Error) * Derivative_of_Sigmoid_Activation_Function
  877.                     //gamma = (outputs[i] - _expected_[i]) * DERIVATIVE_OF(ACTIVEATION_FUNCTION, outputs[i]); // outputs[i] is f(x) not x in this case, because i wanted to delete the array of inputs before activation
  878.  
  879.                      //#3
  880.                     gamma = (outputs[i] - _expected_[i]);
  881.                     sumSquaredError += gamma * gamma; // I want to believe that it is being optimised when not used
  882.                     gamma = gamma * DERIVATIVE_OF(ACTIVEATION_FUNCTION, outputs[i]);
  883.                    
  884.                     bias_Delta *= gamma;
  885.  
  886.                     for (int j = 0; j < _numberOfInputs; j++)
  887.                     {
  888.                         preLgamma[j] += gamma * weights[i][j];
  889.                         weights[i][j] -= (gamma * inputs[j]) * LearningRateOfWeights;
  890.                     }
  891.                 }
  892.             #endif
  893.            
  894.             *bias -= bias_Delta * LearningRateOfBiases;
  895.         }
  896.  
  897.         void NeuralNetwork::Layer::BackPropHidden(const Layer *frontLayer, const float *inputs)
  898.         {
  899.             preLgamma = new float[_numberOfInputs]{};
  900.  
  901.             float bias_Delta = 1.0;
  902.             float gamma;
  903.  
  904.             #if defined(REDUCE_RAM_WEIGHTS_LVL2)
  905.                 for (int i = _numberOfOutputs -1; i >= 0; i--)
  906.                 {
  907.                     gamma = frontLayer->preLgamma[i] * DERIVATIVE_OF(ACTIVEATION_FUNCTION, outputs[i]); // if i remember well , frontLayer->preLgamma[i] means current layer gamma?
  908.                     bias_Delta *= gamma;
  909.  
  910.                     for (int j = _numberOfInputs -1; j >= 0; j--)
  911.                     {
  912.                         i_j--;
  913.                         preLgamma[j] += gamma * weights[i_j];
  914.                         weights[i_j] -= (gamma * inputs[j]) * LearningRateOfWeights;
  915.                     }
  916.  
  917.                 }
  918.  
  919.             #else
  920.                 for (int i = 0; i < _numberOfOutputs; i++)
  921.                 {
  922.                     gamma = frontLayer->preLgamma[i] * DERIVATIVE_OF(ACTIVEATION_FUNCTION, outputs[i]);
  923.                     bias_Delta *= gamma;
  924.  
  925.                     for (int j = 0; j < _numberOfInputs; j++)
  926.                     {
  927.                         preLgamma[j] += gamma * weights[i][j];
  928.                         weights[i][j] -= (gamma * inputs[j]) * LearningRateOfWeights;
  929.                     }
  930.  
  931.                 }
  932.             #endif
  933.  
  934.             *bias -= bias_Delta * LearningRateOfBiases;
  935.            
  936.         }
  937.  
  938.  
  939.  
  940.     #endif
  941.  
  942.  
  943.     //If Microcontroller isn't one of the Attiny Series then it compiles the code below.
  944.     #if !defined(As__No_Common_Serial_Support) // then Compile:
  945.     void NeuralNetwork::Layer::print()
  946.     {
  947.  
  948.  
  949.         Serial.print(_numberOfInputs);
  950.         Serial.print(" ");
  951.         Serial.print(_numberOfOutputs);
  952.         Serial.print("| bias:");
  953.         Serial.print(*bias);
  954.         Serial.println();
  955.  
  956.         for (int i = 0; i < _numberOfOutputs; i++)
  957.         {
  958.             Serial.print(i + 1);
  959.             Serial.print(" ");
  960.             for (int j = 0; j < _numberOfInputs; j++)
  961.             {
  962.                 Serial.print(" W:");
  963.                 #if defined(REDUCE_RAM_WEIGHTS_LVL2)
  964.                     if (weights[i_j] > 0) Serial.print(" "); // dont even bothered to opt. here lol
  965.                     Serial.print(weights[i_j], 7);
  966.                     i_j++;
  967.                 #else
  968.                     if (weights[i][j] > 0) Serial.print(" ");
  969.                     Serial.print(weights[i][j], 7);
  970.                 #endif
  971.                 Serial.print(" ");
  972.             }
  973.             Serial.println("");
  974.         }
  975.         Serial.println("----------------------");
  976.  
  977.     }
  978.  
  979.     void NeuralNetwork::Layer::print_PROGMEM()
  980.     {
  981.  
  982.         Serial.print(_numberOfInputs);
  983.         Serial.print(" ");
  984.         Serial.print(_numberOfOutputs);
  985.         Serial.print("| bias:");
  986.         Serial.print(pgm_read_float(bias));
  987.         Serial.println();
  988.  
  989.         for (int i = 0; i < _numberOfOutputs; i++)
  990.         {
  991.             Serial.print(i + 1);
  992.             Serial.print(" ");
  993.             for (int j = 0; j < _numberOfInputs; j++)
  994.             {
  995.                 //weights[i][j] = (float)j;
  996.                 Serial.print(" W:");
  997.                 #if defined(REDUCE_RAM_WEIGHTS_LVL2)
  998.                     if (pgm_read_float(&weights[i_j]) > 0) Serial.print(" "); // if gratter than 10 too or something would be nice
  999.                     Serial.print(pgm_read_float(&weights[i_j]), 6);
  1000.                     i_j++;
  1001.                 #else
  1002.                     if (pgm_read_float(&weights[i][j]) > 0 ) Serial.print(" ");
  1003.                     Serial.print(pgm_read_float(&weights[i][j]), 6);
  1004.                 #endif
  1005.                 Serial.print(" ");
  1006.             }
  1007.             Serial.println("");
  1008.         }
  1009.         Serial.println("----------------------");
  1010.     }
  1011.  
  1012.     #endif
  1013.  
  1014. #pragma endregion
  1015.  
  1016. #endif
  1017.  
  1018.  
  1019. /*
  1020.  
  1021. #2 https://stackoverflow.com/questions/22318677/is-it-faster-to-have-the-compiler-initialize-an-array-or-to-manually-iterate-over
  1022.     I am slightly confused. Anyways...
  1023.  
  1024. */
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement