Advertisement
Guest User

Untitled

a guest
May 6th, 2015
227
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 8.58 KB | None | 0 0
  1. package markov;
  2. import java.util.*;
  3. import java.math.*;
  4. import java.lang.reflect.*;
  5.  
  6. /**
  7. * Class with main method for presenting the results of the lab. At the moment this just reads in the sequence data
  8. * and the corresponding state labels.
  9. */
  10.  
  11. public class Answer {
  12.  
  13. public static void main(String[] args)
  14. {
  15. String mfccDataDirectory = "data/yesno_uncut/mfcc/";
  16. String labelDirectory = "data/yesno_uncut/labels/";
  17.  
  18. // Read in the MFCC data and state labels from each class
  19.  
  20. DataWithLabels dataClass1 = new DataWithLabels (mfccDataDirectory+"yes",labelDirectory+"yes");
  21. DataWithLabels dataClass2 = new DataWithLabels (mfccDataDirectory+"no",labelDirectory+"no");
  22.  
  23. // Task 1
  24. // 3 = number of states (silence, yes, silence)
  25. System.out.println("------");
  26. System.out.println("Task 1");
  27. System.out.println("------");
  28. System.out.println("Yes:");
  29. HiddenMarkovModel markovYes = new HiddenMarkovModel(3, dataClass1.getMfcc(), dataClass1.getLabels());
  30. // iterate through each state, getting probability for each transition
  31. for (int i = 0; i <= markovYes.getNoStates(); i++)
  32. for (int j = 0; j <= markovYes.getNoStates(); j++)
  33. System.out.println(markovYes.getTransitionProbability(i, j));
  34. System.out.println("--- End of Yes ---");
  35.  
  36. // 3 = number of states (silence, no, silence)
  37. System.out.println("No:");
  38. HiddenMarkovModel markovNo = new HiddenMarkovModel(3, dataClass2.getMfcc(), dataClass2.getLabels());
  39. // iterate through each state, getting probability for each transition
  40. for (int i = 0; i <= markovNo.getNoStates(); i++)
  41. for (int j = 0; j <= markovYes.getNoStates(); j++)
  42. System.out.println(markovNo.getTransitionProbability(i, j));
  43. System.out.println("--- End of No ---");
  44.  
  45.  
  46. // Task 2
  47. // Our hidden markov's with number of yes/no
  48. Classifier classifier = new Classifier(markovYes, markovNo, 82 / 165.0);
  49.  
  50. int numberOfErrors = 0;
  51.  
  52. // If less than 0.5 then we do not classify as 'yes'
  53. for (int index = 0; index < dataClass1.getNumberExamples(); index++)
  54. {
  55. double answer = classifier.classify(dataClass1.getMfcc(index));
  56. if (answer < 0.5)
  57. numberOfErrors++;
  58. } // for
  59. // If greater than 0.5 then we do not classify as 'no'
  60. for (int index = 0; index < dataClass2.getNumberExamples(); index++)
  61. {
  62. double answer = classifier.classify(dataClass2.getMfcc(index));
  63. if (answer > 0.5)
  64. numberOfErrors++;
  65. }
  66.  
  67. // Finding total percentage of errors = noOfErrors / totalData * 100
  68. double percentageErrors = numberOfErrors / 165.0 * 100.0;
  69. System.out.println("------");
  70. System.out.println("Task 2");
  71. System.out.println("------");
  72. System.out.println("Total error percentage is " + percentageErrors + "%");
  73.  
  74. // Task 3
  75. // Creating a new Markov Model by combining our current two
  76. // This time we have 4 states - silence, yes, no, silence (and then stop)
  77. HiddenMarkovModel markovYesNo = new HiddenMarkovModel(4);
  78.  
  79. // Need to represent silent to yes (states 0 -> 1 in our old model, and the same in the new)
  80. markovYesNo.setTransitionProbability(markovYes.getTransitionProbability(0, 1), 0, 1);
  81.  
  82. // Need to represent silent to no (states 0 -> 1 in our old model, 0 -> 2 in the new)
  83. markovYesNo.setTransitionProbability(markovNo.getTransitionProbability(0, 1), 0, 2);
  84.  
  85. // Need to represent silent to silent [initial] (states 0 -> 0, 0 -> 0 in our old model, 0 -> 0, 0 -> 0 in the new, so we must take an average)
  86. markovYesNo.setTransitionProbability((markovYes.getTransitionProbability(0, 0)
  87. + markovNo.getTransitionProbability(0, 0)) / 2, 0, 0);
  88.  
  89. // Need to represent yes to yes (states 1 -> 1 in our old model, 1 -> 1 in the new)
  90. markovYesNo.setTransitionProbability(markovYes.getTransitionProbability(1, 1), 1, 1);
  91.  
  92. // Need to represent no to no (states 1 -> 1 in our old model, 2 -> 2 in the new)
  93. markovYesNo.setTransitionProbability(markovNo.getTransitionProbability(1, 1), 2, 2);
  94.  
  95. // Need to represent yes to silent (states 1 -> 2 in our old model, 1 -> 3 in the new)
  96. markovYesNo.setTransitionProbability(markovYes.getTransitionProbability(1, 2), 1, 3);
  97.  
  98. // Need to represent no to silent (states 1 -> 2 in our old model, 2 -> 3 in the new)
  99. markovYesNo.setTransitionProbability(markovNo.getTransitionProbability(1, 2), 2, 3);
  100.  
  101. // Need to represent silent to silent [last] (states 2 -> 2, 2 -> 2 in our old model, 3 -> 3, 3 -> 3 in the new, so we must take an average)
  102. markovYesNo.setTransitionProbability((markovYes.getTransitionProbability(2, 2)
  103. + markovNo.getTransitionProbability(2,2)) / 2, 3, 3);
  104.  
  105. // Need to represent stop to start (states 3 -> 0, 3 -> 0 in our old model, 4 -> 0, 4 -> 0 in the new, so we must take an average)
  106. markovYesNo.setTransitionProbability((markovYes.getTransitionProbability(3, 0)
  107. + markovNo.getTransitionProbability(3,0)) / 2, 4, 0);
  108. // Need to represent silence [initial] to stop (states 0 -> 3, 0 -> 3 in our old model, 0 -> 4, 0 -> 4 in the new, so we must take an average)
  109. markovYesNo.setTransitionProbability((markovYes.getTransitionProbability(0, 3)
  110. + markovNo.getTransitionProbability(0,3)) / 2, 0, 4);
  111.  
  112. // Need to represent yes to stop (states 1 -> 3 in our old model, 1 -> 4 in the new)
  113. markovYesNo.setTransitionProbability(markovYes.getTransitionProbability(1, 3), 1, 4);
  114.  
  115. // Need to represent no to stop (states 1 -> 3 in our old model, 2 -> 4 in the new)
  116. markovYesNo.setTransitionProbability(markovNo.getTransitionProbability(1, 3), 2, 4);
  117.  
  118. // Need to represent silence to stop [last] (states 2 -> 3, 2 -> 3 in our old model, 3 -> 4, 3 -> 4 in the new, so we must take an average)
  119. markovYesNo.setTransitionProbability((markovYes.getTransitionProbability(2, 3)
  120. + markovNo.getTransitionProbability(2,3)) / 2, 3, 4);
  121.  
  122. // Here we are printing our results for each state
  123. // Should be the result of combining both yes and no classifiers
  124. System.out.println("------");
  125. System.out.println("Task 3");
  126. System.out.println("------");
  127. for(int i = 0; i < 5; i++)
  128. {
  129. for(int j = 0; j < 5; j++)
  130. System.out.println(markovYesNo.getTransitionProbability(i, j));
  131. }
  132.  
  133. // Combining emission densities
  134. for (int i = 0; i < 13; i++)
  135. {
  136. // "Silence" needs to be combined
  137. markovYesNo.setEmissionDensity(markovYes.getEmissionDensity(i, 0).combine(markovNo.getEmissionDensity(i, 0)), i, 0);
  138.  
  139. // Separate "yes" and "no"
  140. markovYesNo.setEmissionDensity(markovYes.getEmissionDensity(i, 1), i, 1);
  141. markovYesNo.setEmissionDensity(markovNo.getEmissionDensity(i, 1), i, 2);
  142.  
  143. // And finally another "silence" at the end
  144. markovYesNo.setEmissionDensity(markovYes.getEmissionDensity(i, 2).combine(markovNo.getEmissionDensity(i, 2)), i, 3);
  145. }
  146. // Task 4
  147.  
  148. // Finding path errors for yes and no classifiers respectively
  149. // Using viterbi method to find optimal path
  150. double yesNoError = 0;
  151. int[] yesViterbi;
  152. int[] noViterbi;
  153. // Going through our 'yes's
  154. for(int i = 0; i < dataClass1.getNumberExamples(); i++)
  155. {
  156. yesViterbi = markovYesNo.viterbi(dataClass1.getMfcc(i));
  157. for(int j = 0; j < yesViterbi.length; j++)
  158. {
  159. // State 2 is our 'no' and therefore cannot possibly be optimal
  160. if (yesViterbi[j] == 2)
  161. {
  162. // But only count it once so we do not get a silly return e.g. 2,2,2,2 = 1 error not 4
  163. yesNoError++;
  164. break;
  165. }
  166. }
  167. }
  168.  
  169. // Going through our 'no's
  170. for(int i = 0; i < dataClass2.getNumberExamples(); i++)
  171. {
  172. noViterbi = markovYesNo.viterbi(dataClass2.getMfcc(i));
  173. for(int j= 0; j < noViterbi.length; j++)
  174. {
  175. // State 1 is our 'yes' and therefore cannot possibly be optimal
  176. if (noViterbi[j] == 1)
  177. {
  178. // But only count it once so we do not get a silly return e.g. 1,1,1,1 = 1 error not 4
  179. yesNoError++;
  180. break;
  181. }
  182. }
  183. }
  184.  
  185. // Calculate percentage
  186. double yesNoViterbiTotalError = (yesNoError * 100)
  187. / (dataClass1.getNumberExamples() + dataClass2.getNumberExamples());
  188.  
  189. System.out.println("------");
  190. System.out.println("Task 4");
  191. System.out.println("------");
  192. // Combined classifier error percentage
  193. System.out.println("Total error percentage is " + yesNoViterbiTotalError + "%");
  194. } // main
  195. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement