Advertisement
Guest User

Onboarding NN

a guest
Jun 27th, 2022
14
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C++ 4.48 KB | None | 0 0
  1. #include <iostream>
  2. #include <vector>
  3. #include <cstdlib>
  4. #include <cassert>
  5. #include <cmath>
  6.  
  7. using namespace std;
  8.  
  9. class Neuron;
  10.  
  11. typedef vector<Neuron> Layer;
  12.  
  13. struct Connection
  14. {
  15.     double weight;
  16. };
  17.  
  18. class Neuron
  19. {
  20. public:
  21.     Neuron(unsigned numOutputs, unsigned myIndex);
  22.     void setOutputVal(double val) { m_outputVal = val; }
  23.     double getOutputVal(void) const { return m_outputVal; }
  24.     void feedForward(const Layer &prevLayer);
  25.     void loadInputWeights(Layer &prevLayer, vector<double> &weights);
  26.  
  27. private:
  28.     static double transferFunction(double x);
  29.     double m_outputVal;
  30.     vector<Connection> m_outputWeights;
  31.     unsigned m_myIndex;
  32. };
  33.  
  34. Neuron::Neuron(unsigned numOutputs, unsigned myIndex)
  35. {
  36.  
  37.     for (unsigned c = 0; c < numOutputs; c++)
  38.     {
  39.         m_outputWeights.push_back(Connection());
  40.     }
  41.  
  42.     m_myIndex = myIndex;
  43. }
  44.  
  45. void Neuron::feedForward(const Layer &prevLayer)
  46. {
  47.     double sum = 0.0;
  48.  
  49.     for (unsigned n = 0; n < prevLayer.size(); n++)
  50.     {
  51.         sum += prevLayer[n].getOutputVal() * prevLayer[n].m_outputWeights[m_myIndex].weight;
  52.     }
  53.  
  54.     m_outputVal = Neuron::transferFunction(sum);
  55. }
  56.  
  57. double Neuron::transferFunction(double x)
  58. {
  59.     return tanh(x);
  60. }
  61.  
  62. void Neuron::loadInputWeights(Layer &prevLayer, vector<double> &weights)
  63. {
  64.     for (unsigned n = 0; n < prevLayer.size(); n++)
  65.     {
  66.         Neuron &neuron = prevLayer[n];
  67.  
  68.         neuron.m_outputWeights[m_myIndex].weight = weights[n];
  69.     }
  70. }
  71.  
  72. class Net
  73. {
  74. public:
  75.     Net(const vector<unsigned> &topology);
  76.     void feedForward(const vector<double> &inputVals);
  77.     void getResults(vector<double> &resultVals) const;
  78.     void fillWeights(vector<vector<vector<double>>> &weights);
  79.  
  80. private:
  81.     vector<Layer> m_layers;
  82. };
  83.  
  84. Net::Net(const vector<unsigned> &topology)
  85. {
  86.     unsigned numLayers = topology.size();
  87.  
  88.     for (unsigned layerNum = 0; layerNum < numLayers; layerNum++)
  89.     {
  90.         m_layers.push_back(Layer());
  91.  
  92.         unsigned numOutputs = layerNum == topology.size() - 1 ? 0 : topology[layerNum + 1];
  93.  
  94.         for (unsigned neuronNum = 0; neuronNum <= topology[layerNum]; neuronNum++)
  95.         {
  96.             m_layers.back().push_back(Neuron(numOutputs, neuronNum));
  97.         }
  98.  
  99.         m_layers.back().back().setOutputVal(1.0);
  100.     }
  101. }
  102.  
  103. void Net::feedForward(const vector<double> &inputVals)
  104. {
  105.     assert(inputVals.size() == m_layers[0].size() - 1);
  106.  
  107.     for (unsigned i = 0; i < inputVals.size(); i++)
  108.     {
  109.         m_layers[0][i].setOutputVal(inputVals[i]);
  110.     }
  111.  
  112.     for (unsigned layerNum = 1; layerNum < m_layers.size(); layerNum++)
  113.     {
  114.         Layer &prevLayer = m_layers[layerNum - 1];
  115.  
  116.         for (unsigned n = 0; n < m_layers[layerNum].size() - 1; n++)
  117.         {
  118.             m_layers[layerNum][n].feedForward(prevLayer);
  119.         }
  120.     }
  121. }
  122.  
  123. void Net::fillWeights(vector<vector<vector<double>>> &weights)
  124. {
  125.     for (unsigned layerNum = 1; layerNum < m_layers.size(); layerNum++)
  126.     {
  127.         Layer &layer = m_layers[layerNum];
  128.         Layer &prevLayer = m_layers[layerNum - 1];
  129.  
  130.         for (unsigned n = 0; n < layer.size() - 1; n++)
  131.         {
  132.             layer[n].loadInputWeights(prevLayer, weights[layerNum - 1][n]);
  133.         }
  134.     }
  135. }
  136.  
  137. void Net::getResults(vector<double> &resultVals) const
  138. {
  139.     resultVals.clear();
  140.  
  141.     for (unsigned n = 0; n < m_layers.back().size() - 1; n++)
  142.     {
  143.         resultVals.push_back(m_layers.back()[n].getOutputVal());
  144.     }
  145. }
  146.  
  147. int main()
  148. {
  149.     vector<unsigned> topology = {2, 2, 1};
  150.     Net myNet(topology);
  151.  
  152.     vector<vector<vector<double>>> weights = {{{-0.688558, 0.756463, -0.000669394}, {2.35899, -2.38341, -0.692542}}, {{0.0261347, 1.43365, 1.28669}}};
  153.     myNet.fillWeights(weights);
  154.  
  155.     while (true)
  156.     {
  157.         double a;
  158.         double b;
  159.         string nameA;
  160.         string nameB;
  161.  
  162.         cin >> nameA;
  163.         cin.ignore();
  164.         cin >> a;
  165.         cin.ignore();
  166.         cin >> nameB;
  167.         cin.ignore();
  168.         cin >> b;
  169.         cin.ignore();
  170.  
  171.         a /= 100.0;
  172.         b /= 100.0;
  173.  
  174.         vector<double> inputVals;
  175.         inputVals.push_back(a);
  176.         inputVals.push_back(b);
  177.         myNet.feedForward(inputVals);
  178.  
  179.         vector<double> resultVals;
  180.         myNet.getResults(resultVals);
  181.         int answerId = round(abs(resultVals.back()));
  182.  
  183.         if (answerId == 0)
  184.         {
  185.             cout << nameA << endl;
  186.         }
  187.         else
  188.         {
  189.             cout << nameB << endl;
  190.         }
  191.     }
  192. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement