Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import nltk
- lemma = nltk.stem.WordNetLemmatizer()
- w_tokenize = nltk.tokenize.WhitespaceTokenizer()
- def converted_text(text):
- return[nltk.stem.WordNetLemmatizer().lemmatize(w, pos = 'n') for w in w_tokenize.tokenize(text)]
- data['purpose_lemmatized'] = data['purpose'].apply(converted_text)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement