Advertisement
Guest User

Untitled

a guest
Nov 23rd, 2014
150
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 10.69 KB | None | 0 0
  1. import math
  2. import random
  3. import numpy
  4. import os
  5.  
  6. entradas_saidas = [
  7. [[ -0.02 , 0.94 ], [ 0.999955780327 ]],
  8. [[ 0.07 , -0.9 ], [ 0.999959010293 ]],
  9. [[ -0.34 , 0.3 ], [ 0.999983940189 ]],
  10. [[ 0.24 , 0.03 ], [ 0.999994195033 ]],
  11. [[ -0.23 , 0.22 ], [ 0.999992290042 ]],
  12. [[ -0.09 , 0.49 ], [ 0.999987185034 ]],
  13. [[ -0.45 , 0.29 ], [ 0.999975545498 ]],
  14. [[ -0.65 , -0.28 ], [ 0.999953831953 ]],
  15. [[ -0.65 , 0.93 ], [ 0.999914508924 ]],
  16. [[ -0.76 , 0.58 ], [ 0.999925424355 ]],
  17. [[ -0.36 , 0.36 ], [ 0.999980560259 ]],
  18. [[ 0.8 , 0.54 ], [ 0.999921425064 ]],
  19. [[ -0.63 , -0.21 ], [ 0.999958106664 ]],
  20. [[ -0.75 , 0.86 ], [ 0.999906775472 ]],
  21. [[ -0.82 , 0.75 ], [ 0.999904641544 ]],
  22. [[ -0.16 , 0.2 ], [ 0.999995440012 ]],
  23. [[ 0.43 , -0.75 ], [ 0.999953385994 ]],
  24. [[ -0.1 , 0.11 ], [ 0.999998395002 ]],
  25. [[ -0.2 , 0.56 ], [ 0.99998032012 ]],
  26. [[ 0.63 , -0.48 ], [ 0.999948792055 ]],
  27. [[ 0.41 , 0.22 ], [ 0.999980770324 ]],
  28. [[ 0.13 , 0.33 ], [ 0.999992865017 ]],
  29. [[ -0.34 , -0.85 ], [ 0.999952315769 ]],
  30. [[ 0.4 , -0.97 ], [ 0.999936956378 ]],
  31. [[ -0.77 , 0.84 ], [ 0.999905435814 ]],
  32. [[ 0.02 , 0.81 ], [ 0.999967155181 ]],
  33. [[ 0.71 , 0.6 ], [ 0.999931593502 ]],
  34. [[ -1.0 , 0.56 ], [ 0.999884331608 ]],
  35. [[ 0.26 , -0.15 ], [ 0.999992115054 ]],
  36. [[ -0.38 , 0.86 ], [ 0.99994858097 ]],
  37. [[ -0.17 , 0.8 ], [ 0.999965110271 ]],
  38. [[ -0.84 , -0.52 ], [ 0.999915925963 ]],
  39. [[ 0.45 , -0.44 ], [ 0.999970070622 ]],
  40. [[ -0.39 , -0.64 ], [ 0.999964310613 ]],
  41. [[ 0.39 , 0.14 ], [ 0.999983810246 ]],
  42. [[ -0.77 , -0.8 ], [ 0.999908715583 ]],
  43. [[ -0.19 , 0.3 ], [ 0.999991890033 ]],
  44. [[ 0.25 , -0.73 ], [ 0.999967105324 ]],
  45. [[ -0.23 , 0.41 ], [ 0.999986305084 ]],
  46. [[ -0.26 , 0.8 ], [ 0.999961240433 ]],
  47. [[ -0.69 , 0.4 ], [ 0.999944392658 ]],
  48. [[ -0.15 , 0.38 ], [ 0.99999053003 ]],
  49. [[ -0.48 , 0.54 ], [ 0.999962380902 ]],
  50. [[ 0.4 , 0.5 ], [ 0.999971500482 ]],
  51. [[ -0.27 , 0.13 ], [ 0.999991865059 ]],
  52. [[ -0.77 , 0.52 ], [ 0.999927194347 ]],
  53. [[ -0.02 , -0.19 ], [ 0.999998155001 ]],
  54. [[ 0.47 , -0.39 ], [ 0.999970305666 ]],
  55. [[ -0.26 , -0.53 ], [ 0.999979195174 ]],
  56. [[ -0.52 , -0.53 ], [ 0.999958916144 ]],
  57. [[ -0.92 , 0.56 ], [ 0.999899688531 ]],
  58. [[ 0.68 , -0.34 ], [ 0.999947982411 ]],
  59. [[ 0.21 , -0.83 ], [ 0.999961145369 ]],
  60. [[ -0.78 , 0.73 ], [ 0.999912520441 ]],
  61. [[ 0.93 , -0.67 ], [ 0.999891074505 ]],
  62. [[ -0.62 , -0.91 ], [ 0.999920158355 ]],
  63. [[ -0.8 , 0.79 ], [ 0.999904801255 ]],
  64. [[ 0.38 , 0.74 ], [ 0.999958180729 ]],
  65. [[ 0.0 , 0.8 ], [ 0.999968000171 ]],
  66. [[ 0.34 , -0.3 ], [ 0.999983940189 ]],
  67. [[ 0.33 , -0.4 ], [ 0.999981110216 ]],
  68. [[ -0.45 , 0.73 ], [ 0.999953106068 ]],
  69. [[ 0.5 , 0.07 ], [ 0.999974755631 ]],
  70. [[ 0.48 , -0.3 ], [ 0.999972460638 ]],
  71. [[ 0.15 , 0.26 ], [ 0.999994370015 ]],
  72. [[ 0.69 , 0.64 ], [ 0.999931913312 ]],
  73. [[ 0.79 , -0.09 ], [ 0.99993718892 ]],
  74. [[ -0.79 , -0.17 ], [ 0.999936148985 ]],
  75. [[ 0.56 , -0.71 ], [ 0.99994343688 ]],
  76. [[ 0.24 , 0.5 ], [ 0.999981740131 ]],
  77. [[ 0.61 , -0.15 ], [ 0.999961666427 ]],
  78. [[ -0.52 , -0.38 ], [ 0.999965740935 ]],
  79. [[ -0.96 , 0.87 ], [ 0.999870007219 ]],
  80. [[ -0.31 , -0.71 ], [ 0.99996518544 ]],
  81. [[ 0.8 , -0.44 ], [ 0.999926324731 ]],
  82. [[ -0.05 , -0.57 ], [ 0.999983505048 ]],
  83. [[ -0.15 , 0.09 ], [ 0.999997345006 ]],
  84. [[ -0.85 , -0.75 ], [ 0.999899632383 ]],
  85. [[ -0.63 , 0.78 ], [ 0.999929892937 ]],
  86. [[ -0.44 , -0.89 ], [ 0.999941036403 ]],
  87. [[ 0.46 , 0.62 ], [ 0.999959620916 ]],
  88. [[ 0.36 , 0.54 ], [ 0.999972460392 ]],
  89. [[ 0.74 , -0.82 ], [ 0.999911625028 ]],
  90. [[ -0.94 , -0.69 ], [ 0.999887845004 ]],
  91. [[ 0.62 , -0.52 ], [ 0.999948042028 ]],
  92. [[ 0.55 , 0.47 ], [ 0.999958706269 ]],
  93. [[ -0.7 , 0.0 ], [ 0.999951002401 ]],
  94. [[ -0.77 , -0.06 ], [ 0.999940533526 ]],
  95. [[ -0.71 , -0.91 ], [ 0.999908189914 ]],
  96. [[ 0.55 , -0.95 ], [ 0.999924627619 ]],
  97. [[ -0.51 , -0.53 ], [ 0.999959946075 ]],
  98. [[ 0.83 , -0.69 ], [ 0.99990731148 ]],
  99. [[ 0.22 , -0.47 ], [ 0.999984115097 ]],
  100. [[ 0.86 , -0.85 ], [ 0.999889923359 ]],
  101. [[ 0.73 , -0.95 ], [ 0.999901590584 ]],
  102. [[ 0.39 , 0.08 ], [ 0.999984470236 ]],
  103. [[ 0.58 , -0.75 ], [ 0.99993823721 ]],
  104. [[ -0.34 , -0.83 ], [ 0.99995399573 ]],
  105. [[ -0.44 , -0.82 ], [ 0.999947021214 ]],
  106. [[ 0.65 , -0.23 ], [ 0.999955106898 ]]
  107. ]
  108.  
  109. random.seed(0)
  110.  
  111. # gera numeros aleatorios obedecendo a regra: a <= rand < b
  112. def criar_linha():
  113. print("-",80)
  114.  
  115. def rand(a, b):
  116. return (b-a) * random.random() + a
  117.  
  118. # nossa funcao sigmoide - gera graficos em forma de S
  119. # funcao tangente hiperbolica
  120. def funcao_ativacao_tang_hip(x):
  121. return math.tanh(x)
  122.  
  123. # derivada da tangente hiperbolica
  124. def derivada_funcao_ativacao(x):
  125. t = funcao_ativacao_tang_hip(x)
  126. return 1 - t**2
  127.  
  128. # camada de entrada
  129. nos_entrada = 2
  130. nos_entrada = nos_entrada + 1 # +1 por causa do no do bias
  131. # camada oculta
  132. nos_ocultos = 30
  133. # camada de saida
  134. nos_saida = 1
  135. # quantidade maxima de interacoes
  136. max_interacoes = 1000
  137. # taxa de aprendizado
  138. taxa_aprendizado = 0.5
  139.  
  140. # activations for nodes
  141. # cria uma matriz, preenchida com uns, de uma linha pela quantidade de nos
  142. ativacao_entrada = numpy.ones(nos_entrada)
  143. ativacao_ocultos = numpy.ones(nos_ocultos)
  144. ativacao_saida = numpy.ones(nos_saida)
  145.  
  146. # contém os resultados das ativações de saída
  147. resultados_ativacao_saida = numpy.ones(nos_saida)
  148.  
  149. # criar a matriz de pesos, preenchidas com zeros
  150. wi = numpy.zeros((nos_entrada, nos_ocultos))
  151. wo = numpy.zeros((nos_ocultos, nos_saida))
  152.  
  153. # adicionar os valores dos pesos
  154. # vetor de pesos da camada de entrada - intermediaria
  155. for i in range(nos_entrada):
  156. for j in range(nos_ocultos):
  157. wi[i][j] = rand(-1, 1)
  158.  
  159. # vetor de pesos da camada intermediaria - saida
  160. for j in range(nos_ocultos):
  161. for k in range(nos_saida):
  162. wo[j][k] = rand(-1, 1)
  163.  
  164. def fase_forward(entradas):
  165. # input activations: -1 por causa do bias
  166. for i in range(nos_entrada - 1):
  167. ativacao_entrada[i] = entradas[i]
  168.  
  169. # calcula as ativacoes dos neuronios da camada escondida
  170. for j in range(nos_ocultos):
  171. soma = 0
  172. for i in range(nos_entrada):
  173. soma = soma + ativacao_entrada[i] * wi[i][j]
  174. ativacao_ocultos[j] = funcao_ativacao_tang_hip(soma)
  175.  
  176. # calcula as ativacoes dos neuronios da camada de saida
  177. # Note que as saidas dos neuronios da camada oculta fazem o papel de entrada
  178. # para os neuronios da camada de saida.
  179. for j in range(nos_saida):
  180. soma = 0
  181. for i in range(nos_ocultos):
  182. soma = soma + ativacao_ocultos[i] * wo[i][j]
  183. ativacao_saida[j] = funcao_ativacao_tang_hip(soma)
  184.  
  185. return ativacao_saida
  186.  
  187. def fase_backward(saidas_desejadas):
  188. # calcular os gradientes locais dos neuronios da camada de saida
  189. output_deltas = numpy.zeros(nos_saida)
  190. erro = 0
  191. for i in range(nos_saida):
  192. erro = saidas_desejadas[i] - ativacao_saida[i]
  193. output_deltas[i] = derivada_funcao_ativacao(ativacao_saida[i]) * erro
  194.  
  195. # calcular os gradientes locais dos neuronios da camada escondida
  196. hidden_deltas = numpy.zeros(nos_ocultos)
  197. for i in range(nos_ocultos):
  198. erro = 0
  199. for j in range(nos_saida):
  200. erro = erro + output_deltas[j] * wo[i][j]
  201. hidden_deltas[i] = derivada_funcao_ativacao(ativacao_ocultos[i]) * erro
  202.  
  203. # a partir da ultima camada ate a camada de entrada
  204. # os nos da camada atual ajustam seus pesos de forma a reduzir seus erros
  205. for i in range(nos_ocultos):
  206. for j in range(nos_saida):
  207. change = output_deltas[j] * ativacao_ocultos[i]
  208. wo[i][j] = wo[i][j] + (taxa_aprendizado * change)
  209.  
  210. # atualizar os pesos da primeira camada
  211. for i in range(nos_entrada):
  212. for j in range(nos_ocultos):
  213. change = hidden_deltas[j] * ativacao_entrada[i]
  214. wi[i][j] = wi[i][j] + (taxa_aprendizado * change)
  215.  
  216.  
  217. # calcula erro
  218. erro = 0
  219. for i in range(len(saidas_desejadas)):
  220. erro = erro + 0.5 * (saidas_desejadas[i] - ativacao_saida[i]) ** 2
  221. return erro
  222.  
  223. def test(entradas_saidas):
  224. for p in entradas_saidas:
  225. array = fase_forward(p[0])
  226. print("Entradas: " + str(p[0]) + ' - Saída encontrada/fase forward: ' + str(array[0]))
  227.  
  228. def treinar(entradas_saidas):
  229. for i in range(max_interacoes):
  230. erro = 0
  231. for p in entradas_saidas:
  232. entradas = p[0]
  233. saidas_desejadas = p[1]
  234. fase_forward(entradas)
  235. erro = erro + fase_backward(saidas_desejadas)
  236. if i % 100 == 0:
  237. print("Erro = %2.3f",erro)
  238.  
  239.  
  240. def iniciar():
  241. # cria rede neural com duas entradas, duas ocultas e um no de saida
  242. criar_linha()
  243. treinar(entradas_saidas)
  244. # testar
  245. criar_linha()
  246. test(entradas_saidas)
  247.  
  248. iniciar()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement