Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- #pragma hdrstop
- #pragma argsused
- #ifdef _WIN32
- #include <tchar.h>
- #else
- typedef char _TCHAR;
- #define _tmain main
- #endif
- #include <algorithm>
- #include <stdio.h>
- #include <math.h>
- #include <iostream>
- using std::cin;
- using std::cout;
- using std::endl;
- const long double cyclesi = 1000;
- const long double learningrate = 0.0000000001; //higher values can cause Inf or Nan error
- const int training_inputs=1020;
- long double training_set_inputs[training_inputs][3];
- long double training_set_outputs[training_inputs], new_situation[training_inputs][3];
- long double funcSynapticWeight[4];
- long double funcOutput5, funcDOToutput5, funcoutput;
- long double Bfuncoutput[training_inputs], BfuncPart[training_inputs], Bfunc_DerivativePart[training_inputs], BfuncerrorPart[training_inputs], BfuncadjustmentPart[training_inputs], BfuncadjustSynapticWeight[4];
- long double oldoutput, cool, indicator,i=1,ErrorT;
- int m,q,qq, create;
- int f,g,h,y;
- long double outt(long double (*array)[training_inputs][3], int b)
- {
- long double calc;
- calc = 60*(*array)[b][0] + 37*(*array)[b][1];
- // calc= 2*powl((*array)[b][0],3.0) - 5.5*pow((*array)[b][1],2) + 4.77*(*array)[b][2];
- return calc;
- }
- // func function
- long double s(long double x)
- {
- return (x>=0)?(x):(0);
- }
- //func derivative
- long double d(long double x)
- {
- return (x>=0)?(1):(0);
- }
- int _tmain(int argc, _TCHAR* argv[])
- {
- srand (time(NULL));
- new_situation[0][0]=63.00;new_situation[0][1]=35.00;new_situation[0][2]=62.00;
- cool = outt(&new_situation,0);
- printf("\n the output should be approx %.2Lf\n",cool);
- for (create = 0; create < training_inputs; create++) {
- bool redo=false;
- do
- {
- training_set_inputs[create][0]= (rand() % 60) + 20.00L;
- training_set_inputs[create][1]= (rand() % 60) + 20.00L;
- training_set_inputs[create][2]= (rand() % 60) + 20.00L;
- training_set_outputs[create] = outt(&training_set_inputs, create);
- if (training_set_outputs[create]==0) redo=true;
- } while (redo==true);
- }
- funcSynapticWeight[1]=((rand()%1000)/1000.00)+0.01;
- funcSynapticWeight[2]=((rand()%1000)/1000.00)+0.01;
- funcSynapticWeight[3]=((rand()%1000)/1000.00)+0.01;
- printf("SynapticWeight[1]=%.2Lf , SynapticWeight[2]=%.2Lf, SynapticWeight[3]=%.2Lf\n\n", funcSynapticWeight[1], funcSynapticWeight[2], funcSynapticWeight[3]);
- while (i++) {
- //for( q = 1; q <= training_inputs; q++)
- q = rand() % training_inputs + 1;
- {
- Bfuncoutput[q] = training_set_inputs[q-1][0]*funcSynapticWeight[1]+training_set_inputs[q-1][1]*funcSynapticWeight[2]+training_set_inputs[q-1][2]*funcSynapticWeight[3];
- BfuncPart[q] = s(Bfuncoutput[q]);
- //Bfunc_DerivativePart[q] = d(BfuncPart[q]);
- BfuncerrorPart[q] = 2*(training_set_outputs[q-1] - BfuncPart[q]);
- ErrorT += pow(training_set_outputs[q-1] - BfuncPart[q],2);
- BfuncadjustmentPart[q] = BfuncerrorPart[q] * Bfunc_DerivativePart[q];
- }
- for( q = 1; q <= 3; q++) {
- BfuncadjustSynapticWeight[q]=0;
- for( qq = 1; qq <= training_inputs; qq++) { BfuncadjustSynapticWeight[q] += (s(training_set_inputs[qq-1][q-1])*BfuncadjustmentPart[qq])/(training_inputs);
- //new method
- funcSynapticWeight[q]=funcSynapticWeight[q]+learningrate*training_set_inputs[qq-1][q-1]*BfuncerrorPart[qq];
- }
- //old method
- //funcSynapticWeight[q]=funcSynapticWeight[q]+0.000000001*BfuncadjustSynapticWeight[q];
- }
- if ((floor(i/cyclesi*100))!=indicator)
- {
- funcOutput5=new_situation[0][0]*funcSynapticWeight[1]+new_situation[0][1]*funcSynapticWeight[2]+new_situation[0][2]*funcSynapticWeight[3];
- funcDOToutput5 = s(funcOutput5);
- funcoutput = funcOutput5;
- printf("Step %.0Lf ",indicator); printf("Error : %.2Lf \n", ErrorT);
- ErrorT=0;
- printf("\n( %.0Lf iterations) has output %.2Lf (targetvalue = %.2Lf)",i, funcoutput,cool);
- }
- indicator = floor(i/cyclesi*100);
- }
- cin.get();
- return 0;
- }
Advertisement
Add Comment
Please, Sign In to add comment