Grifter

RedditRipper

Mar 1st, 2014
240
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 2.34 KB | None | 0 0
  1. #Import the modules
  2. import os, requests, json, urllib.request, sys
  3.  
  4. #Header
  5. print("RedditRipper version 2 by Grifter")
  6. print("~~~~~~~~~~~~~~~~~~~~~~~")
  7. print(" ")
  8.  
  9. #UserAuth
  10. username = input('Reddit username: ')
  11. password = input('Password: ')
  12. try:
  13.     print("Attempting authentication....")
  14.     user_pass_dict = {'user': username,
  15.               'passwd': password,
  16.               'api_type': 'json'}
  17.     s = requests.Session()
  18.     s.headers.update({'User-Agent' : 'Image link acquirer : ' + username})
  19.     r = s.post(r'http://www.reddit.com/api/login', data=user_pass_dict)
  20.     print("Authentication succesful")
  21. except:
  22.     print('Authentication failed')
  23.     input()
  24.     sys.quit()
  25.    
  26. #Global variables
  27. subreddit= input('Which subreddit? reddit.com/r/')
  28. count = 0
  29. ids = []
  30.  
  31.  
  32. #Check local save directory existence
  33. if not os.path.exists(subreddit):
  34.     os.makedirs(subreddit)
  35.    
  36. # Get the feed
  37. r = requests.get(r'http://www.reddit.com/r/%s/.json?limit=100' %subreddit)
  38.  
  39. # Convert it to a Python dictionary
  40. data = r.json()
  41.  
  42. #Request-download iterations
  43. while True:
  44.  
  45.     #Request json packet
  46.     if count != 0:
  47.         idnext = str(ids[count-1])
  48.         r = requests.get(r'http://www.reddit.com/r/%s/.json?limit=100&after=t3_%s' %(subreddit,idnext))
  49.     else:
  50.         r = requests.get(r'http://www.reddit.com/r/%s/.json?limit=100' %subreddit)
  51.  
  52.     #Convert Json packet to python data
  53.     data = r.json()
  54.  
  55.     #Iterate downloads
  56.     for child in data['data']['children']:
  57.         count += 1
  58.  
  59.         #Record id for json request reference & filenames
  60.         ids.append( child['data']['id'])
  61.  
  62.         #Determine imgur domain
  63.         if child['data']['domain'] == 'imgur.com' or child['data']['domain'] == 'i.imgur.com':
  64.  
  65.             #Differentiate filetypes
  66.             if child['data']['url'][-3:] == 'gif':
  67.                 try:
  68.                     urllib.request.urlretrieve (child['data']['url'], subreddit +'\\'+child['data']['id']+'.gif')
  69.                     print(count)
  70.                 except:
  71.                     print(str(count) + " error")
  72.             else:    
  73.                 try:
  74.                     urllib.request.urlretrieve (child['data']['url']+'.png', subreddit +'\\'+child['data']['id']+'.png')
  75.                     print(count)
  76.                 except:
  77.                     print(str(count) + " error")
Add Comment
Please, Sign In to add comment