Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- library(MASS)
- library(klaR)
- library(e1071)
- names <- c("clas", "alc","malic alc","ash","alcality of ash","magnesium","total phenols","flavanoids","nonflavanoids phenols","proanthocyanians","color intens","hue","od280","proline")
- data <- read.csv(url("https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data"),col.names = names)
- class.lda <- lda(clas ~ . , data)
- class.qda <- qda(clas ~ ., data)
- class.nb <- naiveBayes(clas ~ ., data)
- CM.large <- function(org.class, pred.class) {
- CM <- table(org.class, pred.class)
- # Skuteczność klasyfikatora
- ACC <- sum(diag(CM)) / sum(CM)
- # Wartości true positive i true negative
- # zakładamy, że klasa "2" jest "pozytywna"
- TP <- CM[2,2]
- TN <- CM[1,1]
- sums <- apply(CM, 1, sum)
- TPR <- TP / sums[2]
- FPR <- 1 - TN / sums[1]
- return(c(ACC = round(ACC,4), TP = TP, TN = TN, TPR = round(TPR, 4), FPR = round(FPR, 4), row.names = NULL))
- }
- data.lda.old <- predict(class.lda, data)
- data.qda.old <- predict(class.qda, data)
- data.nb.old <- predict(class.nb, data)
- res.old <- CM.large(data$clas, data.lda.old$clas)
- res.old <- rbind(res.old, CM.large(data$clas, data.qda.old$clas))
- res.old <- rbind(res.old, CM.large(data$clas, data.nb.old))
- rownames(res.old) <- c("LDA", "QDA", "NB")
- res.old
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement