Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- #include <BasicLinearAlgebra.h>
- #include <math.h>
- using namespace BLA;
- double sigmoid(double a)
- {
- return (1 / (1 + exp(a * -1)));
- }
- /*
- void connectLayers(const Matrix &B, String act)
- {
- Multiply(in, weight, out);
- Add(out, bias, out);
- for (int i = 0; i < out.GetColCount(); i++)
- {
- if(act = "tanh")
- {
- out(0, i) = tanh(out(0, i));
- }
- else if(act = "sigmoid")
- {
- out(0, i) = sigmoid(out(0, i));
- }
- }
- }*/
- void setup() {
- Serial.begin(9600);
- float TempVal[40], SalVal[40];
- unsigned long timeStamp[40] = {0};
- unsigned long offset = micros();
- for (int i = 0; i < 40; i++)
- {
- timeStamp[i] = (micros() - offset);
- TempVal[i] = analogRead(A2);
- SalVal[i] = analogRead(A6);
- }
- //Assuming deep, fully connected layers
- const int iS = 120; //# of neurons in input layer
- const int hS1 = 1; //# of neurons in hidden layer 1
- const int hS2 = 1; //# of neurons in hidden layer 2
- const int hS3 = 1; //# of neurons in hidden layer 3
- const int oS = 1; //# of neurons in output layer
- BLA::Matrix <1, iS> input;
- BLA::Matrix <1, iS> resizeMin = {1.520E+02, 6.140E+02, 4.000E+00, 1.520E+02, 6.140E+02, 2.320E+02,
- 1.530E+02, 6.120E+02, 4.640E+02, 1.520E+02, 6.140E+02, 6.960E+02,
- 1.520E+02, 6.140E+02, 9.280E+02, 1.520E+02, 6.130E+02, 1.160E+03,
- 1.520E+02, 6.130E+02, 1.392E+03, 1.520E+02, 6.130E+02, 1.624E+03,
- 1.520E+02, 6.130E+02, 1.856E+03, 1.520E+02, 6.140E+02, 2.088E+03,
- 1.530E+02, 6.120E+02, 2.320E+03, 1.520E+02, 6.100E+02, 2.552E+03,
- 1.520E+02, 6.140E+02, 2.784E+03, 1.510E+02, 6.130E+02, 3.016E+03,
- 1.520E+02, 6.130E+02, 3.248E+03, 1.510E+02, 6.140E+02, 3.480E+03,
- 1.520E+02, 6.140E+02, 3.712E+03, 1.520E+02, 6.130E+02, 3.944E+03,
- 1.520E+02, 6.110E+02, 4.176E+03, 1.510E+02, 6.140E+02, 4.408E+03,
- 1.520E+02, 6.120E+02, 4.640E+03, 1.520E+02, 6.140E+02, 4.872E+03,
- 1.510E+02, 6.130E+02, 5.104E+03, 1.520E+02, 6.140E+02, 5.340E+03,
- 1.510E+02, 6.130E+02, 5.572E+03, 1.520E+02, 6.140E+02, 5.804E+03,
- 1.530E+02, 6.140E+02, 6.036E+03, 1.520E+02, 6.130E+02, 6.268E+03,
- 1.520E+02, 6.130E+02, 6.500E+03, 1.520E+02, 6.120E+02, 6.732E+03,
- 1.520E+02, 6.140E+02, 6.964E+03, 1.510E+02, 6.140E+02, 7.196E+03,
- 1.520E+02, 6.140E+02, 7.428E+03, 1.520E+02, 6.120E+02, 7.660E+03,
- 1.510E+02, 6.140E+02, 7.892E+03, 1.530E+02, 6.130E+02, 8.124E+03,
- 1.520E+02, 6.140E+02, 8.356E+03, 1.520E+02, 6.120E+02, 8.588E+03,
- 1.520E+02, 6.110E+02, 8.820E+03, 1.520E+02, 6.130E+02, 9.052E+03};
- BLA::Matrix <1, iS> resizeMax = {1.550E+02, 6.330E+02, 8.000E+00, 1.550E+02, 6.340E+02, 3.320E+02,
- 1.550E+02, 6.320E+02, 5.640E+02, 1.550E+02, 6.330E+02, 7.960E+02,
- 1.540E+02, 6.320E+02, 1.028E+03, 1.550E+02, 6.320E+02, 1.260E+03,
- 1.550E+02, 6.320E+02, 1.492E+03, 1.550E+02, 6.340E+02, 1.724E+03,
- 1.550E+02, 6.310E+02, 1.956E+03, 1.550E+02, 6.320E+02, 2.188E+03,
- 1.550E+02, 6.340E+02, 2.420E+03, 1.550E+02, 6.310E+02, 2.652E+03,
- 1.550E+02, 6.330E+02, 2.884E+03, 1.540E+02, 6.320E+02, 3.116E+03,
- 1.550E+02, 6.330E+02, 3.348E+03, 1.540E+02, 6.310E+02, 3.580E+03,
- 1.540E+02, 6.320E+02, 3.812E+03, 1.550E+02, 6.310E+02, 4.044E+03,
- 1.550E+02, 6.320E+02, 4.276E+03, 1.550E+02, 6.320E+02, 4.508E+03,
- 1.550E+02, 6.310E+02, 4.740E+03, 1.550E+02, 6.330E+02, 4.972E+03,
- 1.540E+02, 6.320E+02, 5.204E+03, 1.550E+02, 6.330E+02, 5.436E+03,
- 1.540E+02, 6.310E+02, 5.668E+03, 1.550E+02, 6.340E+02, 5.900E+03,
- 1.550E+02, 6.310E+02, 6.132E+03, 1.540E+02, 6.320E+02, 6.372E+03,
- 1.540E+02, 6.310E+02, 6.604E+03, 1.550E+02, 6.320E+02, 6.836E+03,
- 1.550E+02, 6.330E+02, 7.068E+03, 1.540E+02, 6.320E+02, 7.300E+03,
- 1.550E+02, 6.330E+02, 7.532E+03, 1.540E+02, 6.320E+02, 7.764E+03,
- 1.540E+02, 6.320E+02, 7.996E+03, 1.550E+02, 6.320E+02, 8.228E+03,
- 1.550E+02, 6.340E+02, 8.460E+03, 1.550E+02, 6.320E+02, 8.692E+03,
- 1.540E+02, 6.310E+02, 8.924E+03, 1.550E+02, 6.320E+02, 9.156E+03};
- for (int i = 0; i < input.GetColCount(); i+=3)
- {
- //xdata = (xdata - xmin) / (xmax - xmin) # element wise operations
- input(0, i) = (TempVal[i]-resizeMin(i))/(resizeMax(i)-resizeMin(i));
- input(0, i+1) = (SalVal[i+1]-resizeMin(i+1))/(resizeMax(i+1)-resizeMin(i+1));
- input(0, i+2)= (timeStamp[i+2]-resizeMin(i+2))/(resizeMax(i+2)-resizeMin(i+2));
- }
- BLA::Matrix <1, hS1> hidden1;
- BLA::Matrix <1, hS2> hidden2;
- BLA::Matrix <1, hS3> hidden3;
- BLA::Matrix <1, oS> output;
- input.Fill(1);
- hidden1.Fill(1);
- hidden2.Fill(1);
- hidden3.Fill(1);
- output.Fill(1);
- //weights from input -> hidden1
- BLA::Matrix<iS, hS1> w1 = { -2.251689787954092026E-03, 2.078449279069900513E-01, -6.174370646476745605E-02, -1.294941604137420654E-01, -1.960633248090744019E-01, 2.095378339290618896E-01, -1.128478348255157471E-01, 2.074594050645828247E-01, 1.797222197055816650E-01, -1.595804244279861450E-01, 1.617034673690795898E-01, 1.519532408565282822E-02, 1.532623022794723511E-01, -1.523475646972656250E-01, -1.060240492224693298E-01, 1.646637767553329468E-01, 1.431714743375778198E-01, -7.227062433958053589E-02, -1.439640522003173828E-01, 4.284194484353065491E-02, 2.209243178367614746E-01, 7.616808172315359116E-03, -2.310660481452941895E-01, -2.087087035179138184E-01, -1.661327630281448364E-01, 1.959848850965499878E-01, -1.410448998212814331E-01, -8.355728536844253540E-02, 5.052351579070091248E-02, 2.200548350811004639E-01, 5.016517825424671173E-03, -1.194399520754814148E-01, -1.496096402406692505E-01, -9.792125225067138672E-02, 1.715912818908691406E-01, -1.294056177139282227E-01, -1.837181299924850464E-01, 1.814135462045669556E-01, -1.262866109609603882E-01, -1.174561679363250732E-01, -1.747280657291412354E-01, -6.911590695381164551E-02, -1.444210261106491089E-01, 1.909952163696289062E-01, -1.836621314287185669E-01, -2.292088866233825684E-01, 6.661105155944824219E-02, -9.071853756904602051E-02, 1.107472404837608337E-01, -1.990463733673095703E-01, 1.913081407546997070E-01, -2.117955386638641357E-01, -1.817548722028732300E-01, 1.037685573101043701E-01, -1.178410351276397705E-01, -2.112298905849456787E-01, -1.337243765592575073E-01, -1.069791018962860107E-01, -7.672908157110214233E-02, 1.246274113655090332E-01, 1.839692592620849609E-01, 1.742247343063354492E-01, -1.339842528104782104E-01, -1.048430949449539185E-01, 5.569282919168472290E-02, 1.340724080801010132E-01, -1.821028739213943481E-01, 8.409734070301055908E-02, 8.550775796175003052E-02, -1.375128775835037231E-01, -1.748649030923843384E-02, -8.476732671260833740E-02, 8.759501576423645020E-02, 5.966226384043693542E-02, -1.625532656908035278E-01, -2.238687723875045776E-01, -1.820734143257141113E-01, 7.845860719680786133E-02, -2.067508101463317871E-01, -2.267828583717346191E-01, 1.421210914850234985E-02, 1.710806638002395630E-01, -1.220725029706954956E-01, -1.230347454547882080E-01, -2.408410236239433289E-02, -1.400239169597625732E-01, 1.552093476057052612E-01, 1.353525519371032715E-01, -8.251619525253772736E-03, 6.090170703828334808E-03, -1.159833297133445740E-01, -5.444481223821640015E-02, -2.193169742822647095E-01, -1.008751392364501953E-01, -2.081227153539657593E-01, -3.562032803893089294E-02, 1.269473433494567871E-01, -6.943794339895248413E-02, 5.718484171666204929E-04, 1.164589449763298035E-01, 1.406312286853790283E-01, 2.029875218868255615E-01, -1.916677653789520264E-01, -1.550870686769485474E-01, 4.468404501676559448E-02, -1.010407432913780212E-01, 1.392701119184494019E-01, 2.657910995185375214E-02, -1.483597457408905029E-01, 1.252990067005157471E-01, -1.630185246467590332E-01, -5.802119150757789612E-02, -2.273037135601043701E-01, -4.355586320161819458E-02, 1.874085068702697754E-01, -1.690577715635299683E-01, -9.867803892120718956E-04, -1.497109234333038330E-01, -1.793888360261917114E-01, 1.655142754316329956E-01};
- //weights from hidden1 -> hidden2
- BLA::Matrix<hS1, hS2> w2 = {-1.386107921600341797E+00};
- //weights from hidden2 -> hidden3
- BLA::Matrix<hS2, hS3> w3 = {-6.963229179382324219E-01};
- //weights from hidden3 -> output
- BLA::Matrix<hS3, oS> w4 = {1.730884790420532227E+00};
- //biases from input -> hidden1
- BLA::Matrix<1, hS1> b1 = {-1.929938048124313354E-02};
- //biases from hidden1 -> hidden2
- BLA::Matrix<1, hS2> b2 = {5.753378942608833313E-02};
- //biases from hidden2 -> hidden3
- BLA::Matrix<1, hS3> b3 = {-3.557488322257995605E-01};
- //biases from hidden3 -> output
- BLA::Matrix<1, oS> b4 = {-3.349284529685974121E-01};
- Multiply(input, w1, hidden1);
- Add(hidden1, b1, hidden1);
- for (int i = 0; i < hidden1.GetColCount(); i++)
- {
- hidden1(0, i) = tanh(hidden1(0, i));
- }
- Multiply(hidden1, w2, hidden2);
- Add(hidden2, b2, hidden2);
- for (int i = 0; i < hidden2.GetColCount(); i++)
- {
- hidden2(0, i) = tanh(hidden2(0, i));
- }
- Multiply(hidden2, w3, hidden3);
- Add(hidden3, b3, hidden3);
- for (int i = 0; i < hidden3.GetColCount(); i++)
- {
- hidden3(0, i) = tanh(hidden3(0, i));
- }
- Multiply(hidden3, w4, output);
- Add(output, b4, output);
- for (int i = 0; i < output.GetColCount(); i++)
- {
- output(0, i) = tanh(output(0, i));
- }
- Serial.print("Prediction: ");
- Serial.println(output(0, 0));
- }
- void loop()
- {
- }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement