Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- #get available mirrored ratings
- #URL="http://www.hi-izuru.org/mirror/files/Ratings.htm" -> save as raterid.csv
- mirrordata<-read.csv("raterid.csv", header=TRUE)
- # function to determine percent agreement between two observers (modified from 'irr')
- agreea<-function (ratings, tolerance = 0)
- {
- ratings <- as.matrix(na.omit(ratings))
- ns <- nrow(ratings)
- nr <- ncol(ratings)
- if (is.numeric(ratings)) {
- rangetab <- apply(ratings, 1, max) - apply(ratings, 1,
- min)
- coeff <- 100 * sum(rangetab <= tolerance)/ns
- }
- else {
- rangetab <- as.numeric(sapply(apply(ratings, 1, table),
- length))
- coeff <- 100 * (sum(rangetab == 1)/ns)
- tolerance <- 0
- }
- return(coeff)
- }
- # function to determine percent agreement between two observers (modified from package 'irr')
- # Rank raters by no. of abstracts rated
- rater1<-as.character(mirrordata$Rater1);rater2<-as.character(mirrordata$Rater2);rater3<-as.character(mirrordata$Rater3)
- tiebreakrater<-as.character(mirrordata$TiebreakRater)
- rters<-cbind(rater1, rater2, rater3, tiebreakrater);
- rters<-data.frame(rters)
- ratertb<-table(rters[,1]) #use de-novo ratings. All others have uncharacterized biases.
- ratertb<-ratertb[order(ratertb, decreasing=TRUE)]
- nratertb<-names(ratertb)
- # Agreement matrix between top 12 raters
- for (i in 1:12) {
- rr1<-mirrordata[mirrordata$Rater1==nratertb[i],]
- rtr1<-with(rr1, table(Rater2));rtr1<-rtr1[order(rtr1, decreasing=TRUE)]
- dcr<-names(rtr1) #decreasing order of raters
- agk<-function(n){ extract<-rr1[(rr1$Rater2==n),]; agreea(with(extract, cbind(OrigEndorse1, OrigEndorse2)))}
- capture.output(paste("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"), file="agree-matrix.txt", append=TRUE)
- capture.output(cat("Rater1", nratertb[i], "vs "), file="agree-matrix.txt", append=TRUE)
- capture.output(rtr1, file="agree-matrix.txt", append=TRUE)
- capture.output(sapply(dcr, agk), file="agree-matrix.txt", append=TRUE)
- # What you get: Percent match between volunteers (rater) on abstracts they rated. Between each "XXXX..." is a listing of raters, the number of overlapping abstracts, and the percent match for each pair. Results for top twelve volunteers are listed.
- #Code can be adapted to calculate kappa, Krippendorf's alpha, Gwet's AC1 or other measures of reliability.
- # See eg graph generated quickly with Excel here: https://twitter.com/shubclimate/status/498582858160541698
Advertisement
Add Comment
Please, Sign In to add comment