Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import json
- import tweepy
- import csv
- import json
- # create a dictionary to store your twitter credentials
- twitter_cred = dict()
- # load Twitter API credentials
- with open('twitter_creds.json') as cred_data:
- info = json.load(cred_data)
- consumer_key = info['CONSUMER_KEY']
- consumer_secret = info['CONSUMER_SECRET']
- access_key = info['ACCESS_KEY']
- access_secret = info['ACCESS_SECRET']
- def get_all_tweets(screen_name):
- # Twitter allows access to only 3240 tweets via this method
- # Authorization and initialization
- auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
- auth.set_access_token(access_key, access_secret)
- api = tweepy.API(auth)
- # initialization of a list to hold all Tweets
- all_the_tweets = []
- # We will get the tweets with multiple requests of 10 tweets each
- new_tweets = api.user_timeline(screen_name=screen_name, count=10)
- # saving the most recent tweets
- all_the_tweets.extend(new_tweets)
- # save id of 1 less than the oldest tweet
- oldest_tweet = all_the_tweets[-1].id - 1
- # grabbing tweets till none are left
- for i in range(10):
- # The max_id param will be used subsequently to prevent duplicates
- new_tweets = api.user_timeline(screen_name=screen_name,
- count=10, max_id=1)
- # save most recent tweets
- all_the_tweets.extend(new_tweets)
- # id is updated to oldest tweet - 1 to keep track
- oldest_tweet = all_the_tweets[-1].id - 1
- # transforming the tweets into a 2D array that will be used to populate the csv
- outtweets = [[tweet.id_str, tweet.created_at,
- tweet.text.encode('utf-8')] for tweet in all_the_tweets]
- global id__
- global date__
- global text__
- id__ = []
- date__ = []
- text__ = []
- for tweet in all_the_tweets:
- id__.append(tweet.id_str)
- date__.append(tweet.created_at)
- text__.append(tweet.text)
- if __name__ == '__main__':
- # Enter the twitter handle of the person concerned
- get_all_tweets(input("Enter the twitter handle of the person whose tweets you want to download:- "))
- #get_all_tweets = "battlefield"
- def Cleanuptweets():
- global get_all_tweets
- global id__
- global date__
- global text__
- global link__
- link__ = []
- n = 0
- for things in text__:
- word_list = text__[n].split()
- # Find the link of the post from the text.
- link__.append(word_list[-1])
- # Getting the link out of the text.
- change_this = text__[n]
- change_this.split(word_list[-1])
- remove_this = word_list[-1]
- text__[n] = change_this.strip(remove_this)
- print("ID :", id__[n], "\n")
- print("Date:", date__[n], "\n")
- print("Text:", text__[n], "\n")
- print("Link:", link__[n], "\n")
- n = n + 1
- i = 0
- for things in text__:
- date___ = str(date__[i])
- text___ = str(text__[i])
- link___ = str(link__[i])
- f = open("lots_of_tweets.html", "a+")
- f = f.write("""
- <div class="tweets">
- <div class="tweet">
- <div class="creator">
- <h1>""" + get_all_tweets + """</h1>
- </div>
- <div class="date">
- <p>""" + date___ + """</p>
- </div>
- <div class="text">
- <p>""" + text___ + """</p>
- </div>
- <div class="link">
- <a href='""" + link___ + """'></a>
- </div>
- </div>
- </div>
- """)
- i = i + 1
- email = """
- <html>
- <head>
- <meta charset="utf-8">
- <meta name="viewpoint" content="width-divice-width">
- <meta name="description" content="All the tweets today.">
- <title>Tweets</title>
- <link rel="stylesheet" href="style.css">
- </head>
- <body>
- </body>
- </html>
- """
- f = open("Tweets.html", "w")
- f = f.write(email)
- Cleanuptweets()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement