Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import json
- from watson_developer_cloud import NaturalLanguageUnderstandingV1
- import watson_developer_cloud.natural_language_understanding.features.v1 as Features
- import tweepy
- # Consumer keys and access tokens, used for OAuth
- consumer_key = 'OBOtvJVP5CeRjleSvEntfQ6xe'
- consumer_secret = '94o8G3PLKsqDsMaM6aUi3CRHGNvTVM07U1634PfqPMlm3Ncmnj'
- access_token = '926950006640300032-jpW6aUO2vWrHop7YbMJWZNnkX19gS3O'
- access_token_secret = 'wQgGUK20TGWeb26Z2waXaw28fucWxrvCIUZlrtjd7RR4Z'
- # OAuth process, using the keys and tokens
- auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
- auth.set_access_token(access_token, access_token_secret)
- # Creation of the actual interface, using authentication
- api = tweepy.API(auth)
- # Sample method, used to update a status
- #api.update_status('Hello Python Central!')
- newYorkWOEID = 2459115
- stateCollegeWOEID = 12764454
- availablePlaces = api.trends_available()
- #trendList = api.trends_place(newYorkWOEID, "#")
- # print(trendList[0].keys())
- # print(trendList[0]["trends"][0])
- # for element in trendList[0]["trends"]:
- # print(element["name"])
- #Retreive all of the locations in the United States which have trends
- USavailablePlacesWOEID = []
- USavailablePlacesCity = []
- #top 11 cities by population in United States - may produce bias
- topCities = ["New York", "Los Angeles", "Chicago", "Houston", "Philadelphia", "Phoenix", "San Antonio", "San Diego", "Dallas", "San Jose", "Boston" ]
- #This works as is - creates parrallel arrays that hold the cities and corresponding woeids
- # for i in range(0, len(availablePlaces)):
- # availPlace = availablePlaces[i]
- # if(availPlace["country"] == "United States"):
- # USavailablePlacesWOEID.append(availPlace["woeid"])
- # USavailablePlacesCity.append(availPlace["name"])
- # print(USavailablePlacesWOEID)
- # print(USavailablePlacesCity)
- #listOfNamesOfTrends = []
- #this loop should compile the names of all the trends - but throws a "Rate limit exceeded" - this is the ideal case, compiling all trends
- # for woeid in USavailablePlacesWOEID:
- # trendList = api.trends_place(woeid)
- # for element in trendList[0]["trends"]:
- # listOfNamesOfTrends.append(element["name"])
- # print(listOfNamesOfTrends)
- compiledTrendList = []
- smallWOEID = []
- for i in range(1, len(availablePlaces)):
- availPlace = availablePlaces[i]
- if(availPlace["name"] in topCities):
- smallWOEID.append(availPlace["woeid"])
- for woeid in smallWOEID:
- trendList = api.trends_place(woeid)
- for element in trendList[0]["trends"]:
- compiledTrendList.append(element["name"])
- #not necessary to print - just to verify if it looks right
- for i in compiledTrendList:
- print(i)
- #converting list to space separated string for Watson:
- trendString = " ".join(str(x) for x in compiledTrendList)
- print(trendString)
- natural_language_understanding = NaturalLanguageUnderstandingV1(
- version='2017-02-27',
- username='2235dc52-736c-49b1-b52a-848550af0dca',
- password='kZ7LruQOOHUI')
- response = natural_language_understanding.analyze(
- text=trendString,
- features=[Features.Entities(), Features.Keywords()])
- print(json.dumps(response, indent=2))
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement