shubclimate

Agreement between raters - Cook et al

Aug 10th, 2014
268
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
R 2.43 KB | None | 0 0
  1. #get available mirrored ratings
  2. #URL="http://www.hi-izuru.org/mirror/files/Ratings.htm" -> save as raterid.csv
  3. mirrordata<-read.csv("raterid.csv", header=TRUE)
  4.  
  5. # function to determine percent agreement between two observers (modified from 'irr')
  6. agreea<-function (ratings, tolerance = 0)
  7. {
  8.   ratings <- as.matrix(na.omit(ratings))
  9.   ns <- nrow(ratings)
  10.   nr <- ncol(ratings)
  11.   if (is.numeric(ratings)) {
  12.     rangetab <- apply(ratings, 1, max) - apply(ratings, 1,
  13.                                                min)
  14.     coeff <- 100 * sum(rangetab <= tolerance)/ns
  15.   }
  16.   else {
  17.     rangetab <- as.numeric(sapply(apply(ratings, 1, table),
  18.                                   length))
  19.     coeff <- 100 * (sum(rangetab == 1)/ns)
  20.     tolerance <- 0
  21.   }
  22.   return(coeff)
  23. }
  24. # function to determine percent agreement between two observers (modified from package 'irr')
  25.  
  26. # Rank raters by no. of abstracts rated
  27. rater1<-as.character(mirrordata$Rater1);rater2<-as.character(mirrordata$Rater2);rater3<-as.character(mirrordata$Rater3)
  28. tiebreakrater<-as.character(mirrordata$TiebreakRater)
  29. rters<-cbind(rater1, rater2, rater3, tiebreakrater);
  30. rters<-data.frame(rters)
  31.  
  32. ratertb<-table(rters[,1]) #use de-novo ratings. All others have uncharacterized biases.
  33. ratertb<-ratertb[order(ratertb, decreasing=TRUE)]
  34. nratertb<-names(ratertb)
  35.  
  36. # Agreement matrix between top 12 raters
  37. for (i in 1:12) {
  38. rr1<-mirrordata[mirrordata$Rater1==nratertb[i],]
  39. rtr1<-with(rr1, table(Rater2));rtr1<-rtr1[order(rtr1, decreasing=TRUE)]
  40. dcr<-names(rtr1) #decreasing order of raters
  41. agk<-function(n){ extract<-rr1[(rr1$Rater2==n),]; agreea(with(extract, cbind(OrigEndorse1, OrigEndorse2)))}
  42. capture.output(paste("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"), file="agree-matrix.txt", append=TRUE)
  43. capture.output(cat("Rater1", nratertb[i], "vs "), file="agree-matrix.txt", append=TRUE)
  44. capture.output(rtr1, file="agree-matrix.txt", append=TRUE)
  45. capture.output(sapply(dcr, agk), file="agree-matrix.txt", append=TRUE)
  46.  
  47. # What you get: Percent match between volunteers (rater) on abstracts they rated. Between each "XXXX..." is a listing of raters, the number of overlapping abstracts, and the percent match for each pair. Results for top twelve volunteers are listed.
  48.  
  49. #Code can be adapted to calculate kappa, Krippendorf's alpha, Gwet's AC1 or other measures of reliability.
  50.  
  51. # See eg graph generated quickly with Excel here: https://twitter.com/shubclimate/status/498582858160541698
Advertisement
Add Comment
Please, Sign In to add comment