Advertisement
cimona

Validacija

Sep 13th, 2018
359
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 8.74 KB | None | 0 0
  1. from math import exp
  2. from random import seed
  3. from random import random
  4.  
  5. # Initialize a network
  6. # Put fixed weigths to 0.5 on code.finki.ukim.mk if there is a problem with random()
  7. def initialize_network(n_inputs, n_hidden, n_outputs):
  8. network = list()
  9. hidden_layer = [{'weights':[random() for i in range(n_inputs + 1)]} for i in range(n_hidden)]
  10. network.append(hidden_layer)
  11. output_layer = [{'weights':[random() for i in range(n_hidden + 1)]} for i in range(n_outputs)]
  12. network.append(output_layer)
  13. return network
  14.  
  15.  
  16. # Calculate neuron activation for an input
  17. def activate(weights, inputs):
  18. activation = weights[-1]
  19. for i in range(len(weights)-1):
  20. activation += weights[i] * inputs[i]
  21. return activation
  22.  
  23. # Transfer neuron activation
  24. def transfer(activation):
  25. return 1.0 / (1.0 + exp(-activation))
  26.  
  27. # Forward propagate input to a network output
  28. def forward_propagate(network, row):
  29. inputs = row
  30. for layer in network:
  31. new_inputs = []
  32. for neuron in layer:
  33. activation = activate(neuron['weights'], inputs)
  34. neuron['output'] = transfer(activation)
  35. new_inputs.append(neuron['output'])
  36. inputs = new_inputs
  37. return inputs
  38.  
  39. # Calculate the derivative of an neuron output
  40. def transfer_derivative(output):
  41. return output * (1.0 - output)
  42.  
  43. # Backpropagate error and store in neurons
  44. def backward_propagate_error(network, expected):
  45. for i in reversed(range(len(network))):
  46. layer = network[i]
  47. errors = list()
  48. if i != len(network)-1:
  49. for j in range(len(layer)):
  50. error = 0.0
  51. for neuron in network[i + 1]:
  52. error += (neuron['weights'][j] * neuron['delta'])
  53. errors.append(error)
  54. else:
  55. for j in range(len(layer)):
  56. neuron = layer[j]
  57. errors.append(expected[j] - neuron['output'])
  58. for j in range(len(layer)):
  59. neuron = layer[j]
  60. neuron['delta'] = errors[j] * transfer_derivative(neuron['output'])
  61.  
  62. # Update network weights with error
  63. def update_weights(network, row, l_rate):
  64. for i in range(len(network)):
  65. inputs = row[:-1]
  66. if i != 0:
  67. inputs = [neuron['output'] for neuron in network[i - 1]]
  68. for neuron in network[i]:
  69. for j in range(len(inputs)):
  70. neuron['weights'][j] += l_rate * neuron['delta'] * inputs[j]
  71. neuron['weights'][-1] += l_rate * neuron['delta']
  72.  
  73. # Train a network for a fixed number of epochs
  74. def train_network(network, train, l_rate, n_epoch, n_outputs):
  75. for epoch in range(n_epoch):
  76. sum_error = 0
  77. for row in train:
  78. outputs = forward_propagate(network, row)
  79. expected = [0 for i in range(n_outputs)]
  80. expected[row[-1]] = 1
  81. sum_error += sum([(expected[i]-outputs[i])**2 for i in range(len(expected))])
  82. backward_propagate_error(network, expected)
  83. update_weights(network, row, l_rate)
  84.  
  85.  
  86. # Test training backprop algorithm
  87.  
  88.  
  89.  
  90.  
  91.  
  92.  
  93. dataset = [
  94. [6.3, 2.9, 5.6, 1.8, 0],
  95. [6.5, 3.0, 5.8, 2.2, 0],
  96. [7.6, 3.0, 6.6, 2.1, 0],
  97. [4.9, 2.5, 4.5, 1.7, 0],
  98. [7.3, 2.9, 6.3, 1.8, 0],
  99. [6.7, 2.5, 5.8, 1.8, 0],
  100. [7.2, 3.6, 6.1, 2.5, 0],
  101. [6.5, 3.2, 5.1, 2.0, 0],
  102. [6.4, 2.7, 5.3, 1.9, 0],
  103. [6.8, 3.0, 5.5, 2.1, 0],
  104. [5.7, 2.5, 5.0, 2.0, 0],
  105. [5.8, 2.8, 5.1, 2.4, 0],
  106. [6.4, 3.2, 5.3, 2.3, 0],
  107. [6.5, 3.0, 5.5, 1.8, 0],
  108. [7.7, 3.8, 6.7, 2.2, 0],
  109. [7.7, 2.6, 6.9, 2.3, 0],
  110. [6.0, 2.2, 5.0, 1.5, 0],
  111. [6.9, 3.2, 5.7, 2.3, 0],
  112. [5.6, 2.8, 4.9, 2.0, 0],
  113. [7.7, 2.8, 6.7, 2.0, 0],
  114. [6.3, 2.7, 4.9, 1.8, 0],
  115. [6.7, 3.3, 5.7, 2.1, 0],
  116. [7.2, 3.2, 6.0, 1.8, 0],
  117. [6.2, 2.8, 4.8, 1.8, 0],
  118. [6.1, 3.0, 4.9, 1.8, 0],
  119. [6.4, 2.8, 5.6, 2.1, 0],
  120. [7.2, 3.0, 5.8, 1.6, 0],
  121. [7.4, 2.8, 6.1, 1.9, 0],
  122. [7.9, 3.8, 6.4, 2.0, 0],
  123. [6.4, 2.8, 5.6, 2.2, 0],
  124. [6.3, 2.8, 5.1, 1.5, 0],
  125. [6.1, 2.6, 5.6, 1.4, 0],
  126. [7.7, 3.0, 6.1, 2.3, 0],
  127. [6.3, 3.4, 5.6, 2.4, 0],
  128. [5.1, 3.5, 1.4, 0.2, 1],
  129. [4.9, 3.0, 1.4, 0.2, 1],
  130. [4.7, 3.2, 1.3, 0.2, 1],
  131. [4.6, 3.1, 1.5, 0.2, 1],
  132. [5.0, 3.6, 1.4, 0.2, 1],
  133. [5.4, 3.9, 1.7, 0.4, 1],
  134. [4.6, 3.4, 1.4, 0.3, 1],
  135. [5.0, 3.4, 1.5, 0.2, 1],
  136. [4.4, 2.9, 1.4, 0.2, 1],
  137. [4.9, 3.1, 1.5, 0.1, 1],
  138. [5.4, 3.7, 1.5, 0.2, 1],
  139. [4.8, 3.4, 1.6, 0.2, 1],
  140. [4.8, 3.0, 1.4, 0.1, 1],
  141. [4.3, 3.0, 1.1, 0.1, 1],
  142. [5.8, 4.0, 1.2, 0.2, 1],
  143. [5.7, 4.4, 1.5, 0.4, 1],
  144. [5.4, 3.9, 1.3, 0.4, 1],
  145. [5.1, 3.5, 1.4, 0.3, 1],
  146. [5.7, 3.8, 1.7, 0.3, 1],
  147. [5.1, 3.8, 1.5, 0.3, 1],
  148. [5.4, 3.4, 1.7, 0.2, 1],
  149. [5.1, 3.7, 1.5, 0.4, 1],
  150. [4.6, 3.6, 1.0, 0.2, 1],
  151. [5.1, 3.3, 1.7, 0.5, 1],
  152. [4.8, 3.4, 1.9, 0.2, 1],
  153. [5.0, 3.0, 1.6, 0.2, 1],
  154. [5.0, 3.4, 1.6, 0.4, 1],
  155. [5.2, 3.5, 1.5, 0.2, 1],
  156. [5.2, 3.4, 1.4, 0.2, 1],
  157. [5.5, 2.3, 4.0, 1.3, 2],
  158. [6.5, 2.8, 4.6, 1.5, 2],
  159. [5.7, 2.8, 4.5, 1.3, 2],
  160. [6.3, 3.3, 4.7, 1.6, 2],
  161. [4.9, 2.4, 3.3, 1.0, 2],
  162. [6.6, 2.9, 4.6, 1.3, 2],
  163. [5.2, 2.7, 3.9, 1.4, 2],
  164. [5.0, 2.0, 3.5, 1.0, 2],
  165. [5.9, 3.0, 4.2, 1.5, 2],
  166. [6.0, 2.2, 4.0, 1.0, 2],
  167. [6.1, 2.9, 4.7, 1.4, 2],
  168. [5.6, 2.9, 3.6, 1.3, 2],
  169. [6.7, 3.1, 4.4, 1.4, 2],
  170. [5.6, 3.0, 4.5, 1.5, 2],
  171. [5.8, 2.7, 4.1, 1.0, 2],
  172. [6.2, 2.2, 4.5, 1.5, 2],
  173. [5.6, 2.5, 3.9, 1.1, 2],
  174. [5.9, 3.2, 4.8, 1.8, 2],
  175. [6.1, 2.8, 4.0, 1.3, 2],
  176. [6.3, 2.5, 4.9, 1.5, 2],
  177. [6.1, 2.8, 4.7, 1.2, 2],
  178. [6.4, 2.9, 4.3, 1.3, 2],
  179. [6.6, 3.0, 4.4, 1.4, 2],
  180. [6.8, 2.8, 4.8, 1.4, 2],
  181. [6.7, 3.0, 5.0, 1.7, 2],
  182. [6.0, 2.9, 4.5, 1.5, 2],
  183. [5.7, 2.6, 3.5, 1.0, 2],
  184. [5.5, 2.4, 3.8, 1.1, 2],
  185. [5.4, 3.0, 4.5, 1.5, 2],
  186. [6.0, 3.4, 4.5, 1.6, 2],
  187. [6.7, 3.1, 4.7, 1.5, 2],
  188. [6.3, 2.3, 4.4, 1.3, 2],
  189. [5.6, 3.0, 4.1, 1.3, 2],
  190. [5.5, 2.5, 4.0, 1.3, 2],
  191. [5.5, 2.6, 4.4, 1.2, 2],
  192. [6.1, 3.0, 4.6, 1.4, 2],
  193. [5.8, 2.6, 4.0, 1.2, 2],
  194. [5.0, 2.3, 3.3, 1.0, 2],
  195. [5.6, 2.7, 4.2, 1.3, 2],
  196. [5.7, 3.0, 4.2, 1.2, 2],
  197. [5.7, 2.9, 4.2, 1.3, 2],
  198. [6.2, 2.9, 4.3, 1.3, 2],
  199. [5.1, 2.5, 3.0, 1.1, 2],
  200. [5.7, 2.8, 4.1, 1.3, 2],
  201. [6.4, 3.1, 5.5, 1.8, 0],
  202. [6.0, 3.0, 4.8, 1.8, 0],
  203. [6.9, 3.1, 5.4, 2.1, 0],
  204. [6.8, 3.2, 5.9, 2.3, 0],
  205. [6.7, 3.3, 5.7, 2.5, 0],
  206. [6.7, 3.0, 5.2, 2.3, 0],
  207. [6.3, 2.5, 5.0, 1.9, 0],
  208. [6.5, 3.0, 5.2, 2.0, 0],
  209. [6.2, 3.4, 5.4, 2.3, 0],
  210. [4.7, 3.2, 1.6, 0.2, 1],
  211. [4.8, 3.1, 1.6, 0.2, 1],
  212. [5.4, 3.4, 1.5, 0.4, 1],
  213. [5.2, 4.1, 1.5, 0.1, 1],
  214. [5.5, 4.2, 1.4, 0.2, 1],
  215. [4.9, 3.1, 1.5, 0.2, 1],
  216. [5.0, 3.2, 1.2, 0.2, 1],
  217. [5.5, 3.5, 1.3, 0.2, 1],
  218. [4.9, 3.6, 1.4, 0.1, 1],
  219. [4.4, 3.0, 1.3, 0.2, 1],
  220. [5.1, 3.4, 1.5, 0.2, 1],
  221. [5.0, 3.5, 1.3, 0.3, 1],
  222. [4.5, 2.3, 1.3, 0.3, 1],
  223. [4.4, 3.2, 1.3, 0.2, 1],
  224. [5.0, 3.5, 1.6, 0.6, 1],
  225. [5.9, 3.0, 5.1, 1.8, 0],
  226. [5.1, 3.8, 1.9, 0.4, 1],
  227. [4.8, 3.0, 1.4, 0.3, 1],
  228. [5.1, 3.8, 1.6, 0.2, 1],
  229. [5.5, 2.4, 3.7, 1.0, 2],
  230. [5.8, 2.7, 3.9, 1.2, 2],
  231. [6.0, 2.7, 5.1, 1.6, 2],
  232. [6.7, 3.1, 5.6, 2.4, 0],
  233. [6.9, 3.1, 5.1, 2.3, 0],
  234. [5.8, 2.7, 5.1, 1.9, 0],
  235. ]
  236.  
  237. def predict(network,row):
  238. outputs = forward_propagate(network,row)
  239. return outputs.index(max(outputs))
  240.  
  241. def getScore(network,data):
  242. score = 0
  243. for row in data:
  244. prediction = predict(network,row)
  245. if (prediction == row[-1]):
  246. score += 1
  247. return score
  248.  
  249. def getDataset(dataset):
  250. list = []
  251. validation = []
  252. for i in range(0, len(dataset) - 10):
  253. list.append(dataset[i])
  254.  
  255. for i in range(len(dataset) - 10, len(dataset)):
  256. validation.append(dataset[i])
  257.  
  258. return (list,validation)
  259. if __name__ == "__main__":
  260. # ne menuvaj
  261. seed(1)
  262.  
  263. att1 = input()
  264. att2 = input()
  265. att3 = input()
  266. att4 = input()
  267. planttype = input()
  268. testCase = [att1, att2, att3, att4, planttype]
  269. data = getDataset(dataset)[0]
  270. validation = getDataset(dataset)[1]
  271.  
  272. n_inputs=len(data[0]) -1
  273. n_outputs=len(set(row[-1] for row in data))
  274.  
  275. network1=initialize_network(n_inputs,3,n_outputs)
  276. network2=initialize_network(n_inputs,3,n_outputs)
  277. network3=initialize_network(n_inputs,3,n_outputs)
  278.  
  279. train_network(network1,data,0.3,20,n_outputs)
  280. train_network(network2, data, 0.5, 20, n_outputs)
  281. train_network(network3, data, 0.7, 20, n_outputs)
  282.  
  283. score_network1 = getScore(network1,validation)
  284. score_network2 = getScore(network2,validation)
  285. score_network3 = getScore(network3,validation)
  286.  
  287. MAX = max(score_network1,score_network2,score_network3)
  288.  
  289. if (MAX == score_network1):
  290.  
  291. prediction = predict(network1, testCase)
  292. print(prediction)
  293. elif (MAX == score_network2):
  294.  
  295. prediction = predict(network2, testCase)
  296. print(prediction)
  297. else :
  298.  
  299. prediction = predict(network3, testCase)
  300. print(prediction)
  301. # vasiot kod ovde
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement