pabloducato

learnbp

May 7th, 2018
302
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
MatLab 4.98 KB | None | 0 0
  1. function BackPropAlgo(Input, Output)
  2.  
  3. %STEP 1 : Normalize the Input
  4.  
  5. %Checking whether the Inputs needs to be normalized or not
  6.  
  7. if max(abs(Input(:)))> 1
  8.  
  9. %Need to normalize
  10.  
  11. Norm_Input = Input / max(abs(Input(:)));
  12.  
  13. else
  14.  
  15. Norm_Input = Input;
  16.  
  17. end
  18.  
  19. %Checking Whether the Outputs needs to be normalized or not
  20.  
  21. if max(abs(Output(:))) >1
  22.  
  23. %Need to normalize
  24.  
  25. Norm_Output = Output / max(abs(Output(:)));
  26.  
  27. else
  28.  
  29. Norm_Output = Output;
  30.  
  31. end
  32.  
  33. %Assigning the number of hidden neurons in hidden layer
  34.  
  35. m = 2;
  36.  
  37. %Find the size of Input and Output Vectors
  38.  
  39. [l,b] = size(Input);
  40.  
  41. [n,a] = size(Output);
  42.  
  43. %Initialize the weight matrices with random weights
  44.  
  45. V = rand(l,m); % Weight matrix from Input to Hidden
  46.  
  47. W = rand(m,n); % Weight matrix from Hidden to Output
  48.  
  49. %Setting count to zero, to know the number of iterations
  50.  
  51. count = 0;
  52.  
  53. %Calling function for training the neural network
  54.  
  55. [errorValue delta_V delta_W] = trainNeuralNet(Norm_Input,Norm_Output,V,W);
  56.  
  57. %Checking if error value is greater than 0.1. If yes, we need to train the
  58.  
  59. %network again. User can decide the threshold value
  60.  
  61. while errorValue > 0.05
  62.  
  63. %incrementing count
  64.  
  65. count = count + 1;
  66.  
  67. %Store the error value into a matrix to plot the graph
  68.  
  69. Error_Mat(count)=errorValue;
  70.  
  71. %Change the weight metrix V and W by adding delta values to them
  72.  
  73. W=W+delta_W;
  74.  
  75. V=V+delta_V;
  76.  
  77. %Calling the function with another overload.
  78.  
  79. %Now we have delta values as well.
  80.  
  81. count
  82.  
  83. [errorValue delta_V delta_W]=trainNeuralNet(Norm_Input,Norm_Output,V,W,delta_V,delta_W);
  84.  
  85. end
  86.  
  87. %This code will be executed when the error value is less than 0.1
  88.  
  89. if errorValue < 0.05
  90.  
  91. %Incrementing count variable to know the number of iteration
  92.  
  93. count=count+1;
  94.  
  95. %Storing error value into matrix for plotting the graph
  96.  
  97. Error_Mat(count)=errorValue;
  98.  
  99. end
  100.  
  101. %Calculating error rate
  102.  
  103. Error_Rate=sum(Error_Mat)/count;
  104.  
  105. figure;
  106.  
  107. %setting y value for plotting graph
  108.  
  109. y=[1:count];
  110.  
  111. %Plotting graph
  112.  
  113. plot(y, Error_Mat);
  114.  
  115. end
  116.  
  117. Function to train the network
  118.  
  119. %Created By : Anoop.V.S & Lekshmi B G
  120.  
  121. %Created On : 18-09-2013
  122.  
  123. %Description : Function to train the network
  124.  
  125. function [errorValue delta_V delta_W] = trainNeuralNet(Input, Output, V, W, delta_V, delta_W)
  126.  
  127. %Function for calculation (steps 4 - 16)
  128.  
  129. %To train the Neural Network
  130.  
  131. %Calculating the Output of Input Layer
  132.  
  133. %No computation here.
  134.  
  135. %Output of Input Layer is same as the Input of Input  Layer
  136.  
  137. Output_of_InputLayer = Input;
  138.  
  139. %Calculating Input of the Hidden Layer
  140.  
  141. %Here we need to multiply the Output of the Input Layer with the -
  142.  
  143. %synaptic weight. That weight is in the matrix V.
  144.  
  145. Input_of_HiddenLayer = V' * Output_of_InputLayer;
  146.  
  147. %Calculate the size of Input to Hidden Layer
  148.  
  149. [m n] = size(Input_of_HiddenLayer);
  150.  
  151. %Now, we have to calculate the Output of the Hidden Layer
  152.  
  153. %For that, we need to use Sigmoidal Function
  154.  
  155. Output_of_HiddenLayer = 1./(1+exp(-Input_of_HiddenLayer));
  156.  
  157. %Calculating Input to Output Layer
  158.  
  159. %Here we need to multiply the Output of the Hidden Layer with the -
  160.  
  161. %synaptic weight. That weight is in the matrix W
  162.  
  163. Input_of_OutputLayer = W'*Output_of_HiddenLayer;
  164.  
  165. %Clear varables
  166.  
  167. clear m n;
  168.  
  169. %Calculate the size of Input of Output Layer
  170.  
  171. [m n] = size(Input_of_OutputLayer);
  172.  
  173. %Now, we have to calculate the Output of the Output Layer
  174.  
  175. %For that, we need to use Sigmoidal Function
  176.  
  177. Output_of_OutputLayer = 1./(1+exp(-Input_of_OutputLayer));
  178.  
  179. %Now we need to calculate the Error using Root Mean Square method
  180.  
  181. difference = Output - Output_of_OutputLayer;
  182.  
  183. square = difference.*difference;
  184.  
  185. errorValue = sqrt(sum(square(:)));
  186.  
  187. %Calculate the matrix 'd' with respect to the desired output
  188.  
  189. %Clear the variable m and n
  190.  
  191. clear m n
  192.  
  193. [n a] = size(Output);
  194.  
  195. for i = 1 : n
  196.  
  197. for j = 1 : a
  198.  
  199. d(i,j) =(Output(i,j)-Output_of_OutputLayer(i,j))*Output_of_OutputLayer(i,j)*(1-Output_of_OutputLayer(i,j));
  200.  
  201. end
  202.  
  203. end
  204.  
  205. %Now, calculate the Y matrix
  206.  
  207. Y = Output_of_HiddenLayer * d; %STEP 11
  208.  
  209. %Checking number of arguments. We are using function overloading
  210.  
  211. %On the first iteration, we don't have delta V and delta W
  212.  
  213. %So we have to initialize with zero. The size of delta V and delta W will
  214.  
  215. %be same as that of V and W matrix respectively (nargin - no of arguments)
  216.  
  217. if nargin == 4
  218.  
  219. delta_W=zeros(size(W));
  220.  
  221. delta_V=zeros(size(V));
  222.  
  223. end
  224.  
  225. %Initializing eetta with 0.6 and alpha with 1
  226.  
  227. etta=0.6;alpha=1;
  228.  
  229. %Calculating delta W
  230.  
  231. delta_W= alpha.*delta_W + etta.*Y;%STEP 12
  232.  
  233. %STEP 13
  234.  
  235. %Calculating error matrix
  236.  
  237. error = W*d;
  238.  
  239. %Calculating d*
  240.  
  241. clear m n
  242.  
  243. [m n] = size(error);
  244.  
  245. for i = 1 : m
  246.  
  247. for j = 1 :n
  248.  
  249. d_star(i,j)= error(i,j)*Output_of_HiddenLayer(i,j)*(1-Output_of_HiddenLayer(i,j));
  250.  
  251. end
  252.  
  253. end
  254.  
  255. %Now find matrix, X (Input * transpose of d_star)
  256.  
  257. X = Input * d_star';
  258.  
  259. %STEP 14
  260.  
  261. %Calculating delta V
  262.  
  263. delta_V=alpha*delta_V+etta*X;
  264.  
  265. end
Add Comment
Please, Sign In to add comment