Advertisement
Guest User

Untitled

a guest
Nov 23rd, 2014
158
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 10.06 KB | None | 0 0
  1. import math
  2. import random
  3. import numpy
  4.  
  5.  
  6. # DADOS DE ENTRADA:
  7.  
  8. nos_entrada = 2
  9. nos_entrada = nos_entrada + 1 # soma-se 1 por causa da posicao para guardar teta
  10. nos_ocultos = 30
  11. nos_saida = 1
  12. nr_interacoes = 50000
  13. taxa_aprendizado = 0.5
  14.  
  15. # ALGUMAS DECLARACOES:
  16. # matriz de ativacao
  17. ativacao_entrada = numpy.ones(nos_entrada)
  18. ativacao_ocultos = numpy.ones(nos_ocultos)
  19. ativacao_saida = numpy.ones(nos_saida)
  20.  
  21. # matriz de peso de entrada/saida
  22. PesoEntrada = numpy.zeros((nos_entrada, nos_ocultos))
  23. PesoSaida = numpy.zeros((nos_ocultos, nos_saida))
  24.  
  25. # ALGUMAS FUNCOES:
  26. def gerador(a, b):
  27. return (b-a) * random.random() + a # gerador de numeros aleatorios
  28.  
  29. # derivada da math.tanh(x)
  30. def derivada_tangente_h(x):
  31. func_ativacao = math.tanh(x)
  32. return 1 - func_ativacao**2
  33.  
  34. def iniciar():
  35. treino(dados)
  36. analise_treinamento(dados)
  37.  
  38. def treino(entradas_saidas):
  39. for i in range(nr_interacoes):
  40. erro = 0
  41. for p in entradas_saidas:
  42. entradas = p[0]
  43. saidas_desejadas = p[1]
  44. propagacao(entradas)
  45. erro = erro + retropropagacao(saidas_desejadas)
  46. if i % 1000 == 0:
  47. print("Erro apos 1000 iteracoes: ",erro)
  48.  
  49. def analise_treinamento(entradas_saidas):
  50. for p in entradas_saidas:
  51. array = propagacao(p[0])
  52. print("Entrada: " + str(p[0]) + " Saida encontrada: " + str(array[0]))
  53.  
  54.  
  55.  
  56.  
  57.  
  58. # PREENCHENDO MATRIZES:
  59. # Preenchendo as matrizes de pesos(entrada/saida) com valores aleatorios entre o intervalo -1 e 1
  60. for i in range(nos_entrada):
  61. for j in range(nos_ocultos):
  62. PesoEntrada[i][j] = gerador(-1, 1)
  63.  
  64. for j in range(nos_ocultos):
  65. for k in range(nos_saida):
  66. PesoSaida[j][k] = gerador(-1, 1)
  67.  
  68.  
  69. dados = [
  70. [[ -0.02 , 0.94 ], [ 0.999955780327 ]],
  71. [[ 0.07 , -0.9 ], [ 0.999959010293 ]],
  72. [[ -0.34 , 0.3 ], [ 0.999983940189 ]],
  73. [[ 0.24 , 0.03 ], [ 0.999994195033 ]],
  74. [[ -0.23 , 0.22 ], [ 0.999992290042 ]],
  75. [[ -0.09 , 0.49 ], [ 0.999987185034 ]],
  76. [[ -0.45 , 0.29 ], [ 0.999975545498 ]],
  77. [[ -0.65 , -0.28 ], [ 0.999953831953 ]],
  78. [[ -0.65 , 0.93 ], [ 0.999914508924 ]],
  79. [[ -0.76 , 0.58 ], [ 0.999925424355 ]],
  80. [[ -0.36 , 0.36 ], [ 0.999980560259 ]],
  81. [[ 0.8 , 0.54 ], [ 0.999921425064 ]],
  82. [[ -0.63 , -0.21 ], [ 0.999958106664 ]],
  83. [[ -0.75 , 0.86 ], [ 0.999906775472 ]],
  84. [[ -0.82 , 0.75 ], [ 0.999904641544 ]],
  85. [[ -0.16 , 0.2 ], [ 0.999995440012 ]],
  86. [[ 0.43 , -0.75 ], [ 0.999953385994 ]],
  87. [[ -0.1 , 0.11 ], [ 0.999998395002 ]],
  88. [[ -0.2 , 0.56 ], [ 0.99998032012 ]],
  89. [[ 0.63 , -0.48 ], [ 0.999948792055 ]],
  90. [[ 0.41 , 0.22 ], [ 0.999980770324 ]],
  91. [[ 0.13 , 0.33 ], [ 0.999992865017 ]],
  92. [[ -0.34 , -0.85 ], [ 0.999952315769 ]],
  93. [[ 0.4 , -0.97 ], [ 0.999936956378 ]],
  94. [[ -0.77 , 0.84 ], [ 0.999905435814 ]],
  95. [[ 0.02 , 0.81 ], [ 0.999967155181 ]],
  96. [[ 0.71 , 0.6 ], [ 0.999931593502 ]],
  97. [[ -1.0 , 0.56 ], [ 0.999884331608 ]],
  98. [[ 0.26 , -0.15 ], [ 0.999992115054 ]],
  99. [[ -0.38 , 0.86 ], [ 0.99994858097 ]],
  100. [[ -0.17 , 0.8 ], [ 0.999965110271 ]],
  101. [[ -0.84 , -0.52 ], [ 0.999915925963 ]],
  102. [[ 0.45 , -0.44 ], [ 0.999970070622 ]],
  103. [[ -0.39 , -0.64 ], [ 0.999964310613 ]],
  104. [[ 0.39 , 0.14 ], [ 0.999983810246 ]],
  105. [[ -0.77 , -0.8 ], [ 0.999908715583 ]],
  106. [[ -0.19 , 0.3 ], [ 0.999991890033 ]],
  107. [[ 0.25 , -0.73 ], [ 0.999967105324 ]],
  108. [[ -0.23 , 0.41 ], [ 0.999986305084 ]],
  109. [[ -0.26 , 0.8 ], [ 0.999961240433 ]],
  110. [[ -0.69 , 0.4 ], [ 0.999944392658 ]],
  111. [[ -0.15 , 0.38 ], [ 0.99999053003 ]],
  112. [[ -0.48 , 0.54 ], [ 0.999962380902 ]],
  113. [[ 0.4 , 0.5 ], [ 0.999971500482 ]],
  114. [[ -0.27 , 0.13 ], [ 0.999991865059 ]],
  115. [[ -0.77 , 0.52 ], [ 0.999927194347 ]],
  116. [[ -0.02 , -0.19 ], [ 0.999998155001 ]],
  117. [[ 0.47 , -0.39 ], [ 0.999970305666 ]],
  118. [[ -0.26 , -0.53 ], [ 0.999979195174 ]],
  119. [[ -0.52 , -0.53 ], [ 0.999958916144 ]],
  120. [[ -0.92 , 0.56 ], [ 0.999899688531 ]],
  121. [[ 0.68 , -0.34 ], [ 0.999947982411 ]],
  122. [[ 0.21 , -0.83 ], [ 0.999961145369 ]],
  123. [[ -0.78 , 0.73 ], [ 0.999912520441 ]],
  124. [[ 0.93 , -0.67 ], [ 0.999891074505 ]],
  125. [[ -0.62 , -0.91 ], [ 0.999920158355 ]],
  126. [[ -0.8 , 0.79 ], [ 0.999904801255 ]],
  127. [[ 0.38 , 0.74 ], [ 0.999958180729 ]],
  128. [[ 0.0 , 0.8 ], [ 0.999968000171 ]],
  129. [[ 0.34 , -0.3 ], [ 0.999983940189 ]],
  130. [[ 0.33 , -0.4 ], [ 0.999981110216 ]],
  131. [[ -0.45 , 0.73 ], [ 0.999953106068 ]],
  132. [[ 0.5 , 0.07 ], [ 0.999974755631 ]],
  133. [[ 0.48 , -0.3 ], [ 0.999972460638 ]],
  134. [[ 0.15 , 0.26 ], [ 0.999994370015 ]],
  135. [[ 0.69 , 0.64 ], [ 0.999931913312 ]],
  136. [[ 0.79 , -0.09 ], [ 0.99993718892 ]],
  137. [[ -0.79 , -0.17 ], [ 0.999936148985 ]],
  138. [[ 0.56 , -0.71 ], [ 0.99994343688 ]],
  139. [[ 0.24 , 0.5 ], [ 0.999981740131 ]],
  140. [[ 0.61 , -0.15 ], [ 0.999961666427 ]],
  141. [[ -0.52 , -0.38 ], [ 0.999965740935 ]],
  142. [[ -0.96 , 0.87 ], [ 0.999870007219 ]],
  143. [[ -0.31 , -0.71 ], [ 0.99996518544 ]],
  144. [[ 0.8 , -0.44 ], [ 0.999926324731 ]],
  145. [[ -0.05 , -0.57 ], [ 0.999983505048 ]],
  146. [[ -0.15 , 0.09 ], [ 0.999997345006 ]],
  147. [[ -0.85 , -0.75 ], [ 0.999899632383 ]],
  148. [[ -0.63 , 0.78 ], [ 0.999929892937 ]],
  149. [[ -0.44 , -0.89 ], [ 0.999941036403 ]],
  150. [[ 0.46 , 0.62 ], [ 0.999959620916 ]],
  151. [[ 0.36 , 0.54 ], [ 0.999972460392 ]],
  152. [[ 0.74 , -0.82 ], [ 0.999911625028 ]],
  153. [[ -0.94 , -0.69 ], [ 0.999887845004 ]],
  154. [[ 0.62 , -0.52 ], [ 0.999948042028 ]],
  155. [[ 0.55 , 0.47 ], [ 0.999958706269 ]],
  156. [[ -0.7 , 0.0 ], [ 0.999951002401 ]],
  157. [[ -0.77 , -0.06 ], [ 0.999940533526 ]],
  158. [[ -0.71 , -0.91 ], [ 0.999908189914 ]],
  159. [[ 0.55 , -0.95 ], [ 0.999924627619 ]],
  160. [[ -0.51 , -0.53 ], [ 0.999959946075 ]],
  161. [[ 0.83 , -0.69 ], [ 0.99990731148 ]],
  162. [[ 0.22 , -0.47 ], [ 0.999984115097 ]],
  163. [[ 0.86 , -0.85 ], [ 0.999889923359 ]],
  164. [[ 0.73 , -0.95 ], [ 0.999901590584 ]],
  165. [[ 0.39 , 0.08 ], [ 0.999984470236 ]],
  166. [[ 0.58 , -0.75 ], [ 0.99993823721 ]],
  167. [[ -0.34 , -0.83 ], [ 0.99995399573 ]],
  168. [[ -0.44 , -0.82 ], [ 0.999947021214 ]],
  169. [[ 0.65 , -0.23 ], [ 0.999955106898 ]]
  170. ]
  171.  
  172. random.seed(0)
  173.  
  174.  
  175. def propagacao(entradas):
  176. for i in range(nos_entrada - 1): # cria ativacao dos neuronios da camada de entrada
  177. ativacao_entrada[i] = entradas[i]
  178.  
  179. for j in range(nos_ocultos): # calcula as ativacoes dos neuronios da camada escondida, a funcao de ativacao utilizada e' a tangente hiperbolica
  180. soma = 0
  181. for i in range(nos_entrada):
  182. soma = soma + ativacao_entrada[i] * PesoEntrada[i][j]
  183. ativacao_ocultos[j] = math.tanh(soma)
  184.  
  185. for j in range(nos_saida): # calcula as ativacoes dos neuronios da camada de saida
  186. soma = 0
  187. for i in range(nos_ocultos):
  188. soma = soma + ativacao_ocultos[i] * PesoSaida[i][j]
  189. ativacao_saida[j] = math.tanh(soma)
  190.  
  191. return ativacao_saida
  192.  
  193. def retropropagacao(saidas_desejadas): # calcular os gradientes locais dos neuronios da camada de saida
  194. output_deltas = numpy.zeros(nos_saida)
  195. erro = 0
  196. for i in range(nos_saida):
  197. erro = saidas_desejadas[i] - ativacao_saida[i]
  198. output_deltas[i] = derivada_tangente_h(ativacao_saida[i]) * erro
  199.  
  200. hidden_deltas = numpy.zeros(nos_ocultos)
  201. for i in range(nos_ocultos): # calcular os gradientes locais dos neuronios da camada oculta
  202. erro = 0
  203. for j in range(nos_saida):
  204. erro = erro + output_deltas[j] * PesoSaida[i][j]
  205. hidden_deltas[i] = derivada_tangente_h(ativacao_ocultos[i]) * erro
  206.  
  207. for i in range(nos_ocultos): # atualizar os pesos da camada saida-oculta
  208. for j in range(nos_saida):
  209. change = output_deltas[j] * ativacao_ocultos[i]
  210. PesoSaida[i][j] = PesoSaida[i][j] + (taxa_aprendizado * change)
  211.  
  212.  
  213. for i in range(nos_entrada): # atualizar os pesos da camada oculta-entrada
  214. for j in range(nos_ocultos):
  215. change = hidden_deltas[j] * ativacao_entrada[i]
  216. PesoEntrada[i][j] = PesoEntrada[i][j] + (taxa_aprendizado * change)
  217.  
  218. erro = 0
  219. for i in range(len(saidas_desejadas)): # calculo do erro
  220. erro = erro + 0.5 * (saidas_desejadas[i] - ativacao_saida[i]) ** 2
  221. return erro
  222.  
  223. # Inicio execucao
  224. iniciar()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement