Advertisement
Guest User

Untitled

a guest
Oct 21st, 2018
67
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 3.79 KB | None | 0 0
  1. # Function gradPerc: Learn Single layer perceptron
  2. # Input
  3. # x - features (input)
  4. # y - labels (output)
  5. # g - learning rate
  6. # it - maximum number of iterations
  7. # Output
  8. # w - perceptron weights
  9. gradPerc <- function(x, y, g, it) {
  10.  
  11. # transform data into appropriate format
  12. data <- rbind(1, x)
  13.  
  14. # random initialisation of w = (theta, w1,..., wm) with uniform distribution
  15. ###### CODE HERE ######## -> Question 1
  16. w <- runif(nrow(data))
  17.  
  18. # initialisation of vector for classification error
  19. err <- numeric(it + 1)
  20. err[1] <- error(w, data, y)
  21. i <- 1
  22.  
  23. # run until all examples are classified correctly or until the maximum number of iterations
  24. while(err[i] > 0 & i <= it) {
  25.  
  26. i <- i + 1
  27.  
  28. # update the perceptron weights with gradient (batch) method and learning rate g
  29. w <- w + g * gradient(w, data, y)
  30.  
  31. # normalise the weights
  32. if(w[1] != 0) {
  33. w <- w / abs(w[1])
  34. }
  35.  
  36. # compute error in current iteration
  37. err[i] <- error(w, data, y)
  38. }
  39.  
  40. # plot error over number of iterations
  41. plot(0:(i-1), err[1:i], main = "Errors", xlab = "iteration", ylab = "error",
  42. type = "l")
  43.  
  44. return(w)
  45. }
  46.  
  47. # Function err: compute number of classification errors
  48. # Input
  49. # w - perceptron weights
  50. # x - features (input)
  51. # y - labels (output)
  52. # Output
  53. # number of classification errors
  54. error <- function(w, x, y) {
  55. # identify all incorrectly classified datapoints
  56. ###### CODE HERE ######## -> Question 2
  57. d = y * (t(w) %*% x)
  58. ind = d < 0
  59.  
  60. # return number of classification errors
  61. ###### CODE HERE ######## -> Question 2
  62. return(sum(ind))
  63. }
  64.  
  65. # Function gradient: compute gradient
  66. # Input
  67. # w - perceptron weights
  68. # x - features (input)
  69. # y - labels (output)
  70. # Output
  71. # gradient
  72. gradient <- function(w, x, y) {
  73. # identify all incorrectly classified datapoints
  74. ###### CODE HERE ######## -> Question 3
  75. d = y * (t(w) %*% x)
  76. ind = d < 0
  77.  
  78. # return gradient
  79. if(sum(ind) == 1) {
  80. ###### CODE HERE ######## -> Question 3
  81. return(y[ind] * x[,ind])
  82. } else {
  83. ###### CODE HERE ######## -> Question 3
  84. return ( rowSums(x[,ind] %*% diag(y[ind])))
  85.  
  86. }
  87. }
  88.  
  89. # read data from file
  90. data1 <- read.table("data1.txt", header = TRUE, sep = "\t")
  91. data2 <- read.table("data2.txt", header = TRUE, sep = "\t")
  92.  
  93. # Function learn_p_plot_data: Learn Single layer perceptron and plot data
  94. # Input
  95. # x - features (input)
  96. # y - labels (output)
  97. # g - learning rate
  98. # it - maximum number of iterations
  99. # Output
  100. # w - perceptron weights
  101. learn_p_plot_data <- function(x, y, g = 0.01, it = 100) {
  102. # learn perceptron
  103. w <- gradPerc(t(x), y, 0.1, 100)
  104.  
  105. # The following code adds a line to the scatterplot of the form y = a + b * x
  106. # Calculate a and b such that the line represents the separation threshold
  107. # defined by the learned weights of the perceptron
  108.  
  109. ###### CODE HERE ######## -> Question 4
  110. a <- w[1] / w[3] # line intercept
  111. b <- -w[2] / w[3] # line slope
  112.  
  113. # plot data and seperator line
  114. plot(x, col = ifelse(y == 1, "blue", "red"), main = "Perceptron")
  115. abline(a, b)
  116.  
  117. return(w)
  118. }
  119.  
  120. # Code for Question 5:
  121.  
  122. set.seed(12)
  123. q5_1 <- learn_p_plot_data(data1[, 1:2], data1[, 3])
  124. q5_1
  125. q5_2 <- learn_p_plot_data(data2[, 1:2], data2[, 3])
  126. q5_2
  127.  
  128. # Code for Question 6:
  129.  
  130. set.seed(154)
  131. q6_1 <- learn_p_plot_data(data1[, 1:2], data1[, 3], g = 0.01, it = 100)
  132. q6_2 <- learn_p_plot_data(data1[, 1:2], data1[, 3], g = 0.10, it = 100)
  133. q6_3 <- learn_p_plot_data(data1[, 1:2], data1[, 3], g = 1.00, it = 100)
  134.  
  135. q6_4 <- learn_p_plot_data(data2[, 1:2], data2[, 3], g = 0.01, it = 100)
  136. q6_5 <- learn_p_plot_data(data2[, 1:2], data2[, 3], g = 0.10, it = 100)
  137. q6_6 <- learn_p_plot_data(data2[, 1:2], data2[, 3], g = 1.00, it = 100)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement