Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- from sklearn.metrics import f1_score, accuracy_score
- ground_truth = [r[1] for r in testing_set]
- predictions = {}
- f1_scores = {}
- f
- or clf, listy in classifiers_dict.items():
- # getting predictions for the testing set by looping over each reviews featureset tuple
- # The first elemnt of the tuple is the feature set and the second element is the label
- predictions[clf] = [listy[0].classify(r[0]) for r in testing_set]
- f1_scores[clf] = f1_score(ground_truth, predictions[clf])
- print(f'f1_score {clf}: {f1_scores[clf]}')
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement