Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- #pragma hdrstop
- #pragma argsused
- #ifdef _WIN32
- #include <tchar.h>
- #else
- typedef char _TCHAR;
- #define _tmain main
- #endif
- #include <algorithm>
- #include <stdio.h>
- #include <math.h>
- #include <iostream>
- using std::cin;
- using std::cout;
- using std::endl;
- const long double learningrate=0.01;
- const long double a = 3;
- const long double k = 1.555;
- const long double pi = 2 * acos(0.00);
- const long double version = 0.001901;
- const long double cyclesi = 2500; // 1% cycles per printout
- const int training_inputs=100;
- const int mt = 30;
- const int mt_implemented=10; //nr of fully implemented methods
- const long double TESTbooster = 20; // set to 1. Only set to higher values when testing ! speeds up the training. typical values between 1 and 40
- const char *colour[mt_implemented+1] = { "Unused", "Sinoid", "Softplus", "Sigmoid", "TanH", "Arctan", "RELU", "Square", "EliSH", "Softsign", "Test 1"};
- long double training_set_inputs[training_inputs][3];
- long double training_set_outputs[training_inputs], new_situation[training_inputs][3];
- int mc=1;
- long double xSynapticWeight[mt][4];
- long double xOutput5[mt], xDOToutput5[mt], xoutput[mt];
- long double Bxoutput[mt][training_inputs], BxPart[mt][training_inputs], Bx_DerivativePart[mt][training_inputs], BxerrorPart[mt][training_inputs], Bx[mt][training_inputs], BxadjustSynapticWeight[mt][4], BxadjustmentPart[mt][training_inputs];
- long double oldoutput, cool, indicator,i=1;
- int m,q,qq, create;
- int f,g,h,y;
- long double amethod(long double fx, long double x, int b, int c) //function, direct-input,method number,1fx or 2 derivative.
- {
- if (c==1) {
- switch(b) {
- case 0 : return 1; //fx : not used
- case 1 : return sin(fx); //fx : sinoid
- case 2 : return logl(1+expl(fx))/k; //fx : softplus
- case 3 : return 1 / (1 + expl(-(fx))); //fx : sigmoid
- case 4 : return expl(fx)-expl(-fx)/(expl(fx)+expl(-fx));; //fx : TanH
- case 5 : return atan(fx); //fx : Arctan
- case 6 : return (fx>=0)?(fx):(0); //fx : RELU
- case 7 : return fx / powl( (1+a*fx*fx),0.5); //fx : Inverse Square Root Linear Units
- case 8 : return (fx>=0)?(fx/(fx+expl(-fx))):((expl(fx)-1)/(1+expl(-fx))); // fx : ELiSH
- case 9 : return fx/(1+abs(fx)); // fx ElliotSig / Softsign
- case 10 : return expl(fx*pi)+1; // fx : Expirimental
- default :
- cout << "Error !" << endl;
- }
- return 0;
- }
- if (c==2) {
- //fx and x names are meant for the derivatives.
- switch(b) {
- case 0 : return 1; //derivative : not used
- case 1 : return cos(fx); //derivative : sinoid
- case 2 : return expl(x)/(k*expl(x)+k); //derivative: softplus
- case 3 : return fx * (1 - fx); //derivative : sigmoid
- case 4 : return 4/powl((expl(fx)+expl(-fx)),2); //derivative : TanH
- case 5 : return 1/((x*x)+1); //derivative : Arctan
- case 6 : return (x>=0)?(1):(0); //derivative : RELU
- case 7 : return powl(1 / powl( (1+a*x*x),0.5),3); // derivative : Inverse Square Root Linear Units
- case 8 : return (x>=0)?((expl(x)*(x+expl(x)+1))/powl(expl(x)+1,2)):(expl(x)*(2*expl(x)+expl(2*x)-1)/powl(expl(x)+1,2)); // derivative : ELiSH
- case 9 : return 1/powl(1 + abs(fx),2); // derivative ElliotSig / Softsign
- case 10 : return 1; // still to be determined
- default :
- cout << "Error !" << endl;
- }
- }
- return 0;
- }
- long double outt(long double (*array)[training_inputs][3], int b)
- {
- long double calc;
- calc= pow((*array)[b][0],1.66) + 3*pow((*array)[b][1],1.77) + 15.55*(*array)[b][2];
- //calc= 3*powl((*array)[b][0],2.1) + 20.5*(*array)[b][1] + 10.77*(*array)[b][2];
- if (calc==0) { calc=1; printf("\ntest\n");
- }
- return calc;
- }
- int _tmain(int argc, _TCHAR* argv[])
- {
- srand (time(NULL));
- new_situation[0][0]=52.00;new_situation[0][1]=32.00;new_situation[0][2]=68.00;
- cool = outt(&new_situation,0);
- printf("\n the output should be approx %.2Lf\n",cool);
- for (create = 0; create < training_inputs; create++) {
- bool redo=false;
- do
- {
- //I designed the table to have values between 20 and 80, any values sort of below and above that destabilize the whole thing.
- training_set_inputs[create][0]= (rand() % 60) + 20.00L;
- training_set_inputs[create][1]= (rand() % 60) + 20.00L;
- training_set_inputs[create][2]= (rand() % 60) + 20.00L;
- training_set_outputs[create] = outt(&training_set_inputs, create);
- if (training_set_outputs[create]==0) redo=true;
- } while (redo==true);
- //printf("\ntr %Lf %Lf %Lf %Lf",training_set_inputs[create][0], training_set_inputs[create][1], training_set_inputs[create][2], training_set_outputs[create]);Sleep(10);
- }
- new_situation[0][0]= 1/new_situation[0][0];
- new_situation[0][1]= 1/new_situation[0][1];
- new_situation[0][2]= 1/new_situation[0][2];
- for(auto& rows: training_set_inputs)
- {
- for(auto& elem: rows)
- {
- elem = 1/elem;
- }
- }
- for (auto& number : training_set_outputs)
- {
- number = 1/number;
- }
- for (mc = 1; mc <= mt_implemented; mc++) {
- // xSynapticWeight[mc][1]=(rand() % 2000)/2000.00;
- // xSynapticWeight[mc][2]=(rand() % 2000)/2000.00;
- // xSynapticWeight[mc][3]=(rand() % 2000)/2000.00;
- xSynapticWeight[mc][1]=0.0000;
- xSynapticWeight[mc][2]=0.0000;
- xSynapticWeight[mc][3]=0.0000;
- //the variations in the results per round exist because of the randomly build table inputs.
- //printf("\nh%.6Lf %.6Lf %.6Lf", xSynapticWeight[mc][1], xSynapticWeight[mc][2], xSynapticWeight[mc][3]);
- }
- while (i++) {
- //for( q = 1; q <= training_inputs; q++)
- q = rand() % training_inputs + 1; //trains faster : for testing only
- {
- for (mc = 1; mc <= mt_implemented; mc++) {
- Bxoutput[mc][q] = training_set_inputs[q-1][0]*xSynapticWeight[mc][1]+training_set_inputs[q-1][1]*xSynapticWeight[mc][2]+training_set_inputs[q-1][2]*xSynapticWeight[mc][3];
- BxPart[mc][q] = amethod(Bxoutput[mc][q],0,mc,1);
- Bx_DerivativePart[mc][q] = TESTbooster * amethod(BxPart[mc][q],Bxoutput[mc][q],mc,2);
- BxerrorPart[mc][q] = (amethod(training_set_outputs[q-1],0,mc,1) - BxPart[mc][q]);
- BxadjustmentPart[mc][q] = BxerrorPart[mc][q] * Bx_DerivativePart[mc][q];
- }
- }
- for( q = 1; q <= 3; q++) {
- for (mc = 1; mc <= mt_implemented; mc++) {
- BxadjustSynapticWeight[mc][q]=0;
- //for( qq = 1; qq <= training_inputs; qq++) { BxadjustSynapticWeight[mc][q] += (training_set_inputs[qq-1][q-1]*BxadjustmentPart[mc][qq])/training_inputs;}
- for( qq = 1; qq <= training_inputs; qq++) { BxadjustSynapticWeight[mc][q] += (amethod(training_set_inputs[qq-1][q-1],0,mc,1)*BxadjustmentPart[mc][qq])/training_inputs;}
- xSynapticWeight[mc][q]=xSynapticWeight[mc][q]+BxadjustSynapticWeight[mc][q]*learningrate;
- }
- }
- if ((floor(i/cyclesi*100))!=indicator)
- {
- for (mc = 1; mc <= mt_implemented; mc++) {
- xOutput5[mc]=new_situation[0][0]*xSynapticWeight[mc][1]+new_situation[0][1]*xSynapticWeight[mc][2]+new_situation[0][2]*xSynapticWeight[mc][3];
- xoutput[mc] = 1/xOutput5[mc];
- printf("\nmethod:%s\t %.0Lf iterations) has output %.2Lf (targetvalue = %.2Lf)",colour[mc],i, xoutput[mc],cool);
- }
- printf("\n");
- }
- indicator = floor(i/cyclesi*100);
- }
- cin.get();
- return 0;
- }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement