Advertisement
skip420

scrape_me

Jan 13th, 2021
888
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 6.44 KB | None | 0 0
  1. #python3 scrape_me.py -s "Fema Camps" --limit 50 -o /home/skip420/Desktop/Image_scraper/image
  2.  
  3. #!/usr/bin/env python3
  4. import os, urllib.request, re, threading, posixpath, urllib.parse, argparse, socket, time, hashlib, pickle, signal, imghdr
  5.  
  6. #config
  7. output_dir = './bing' #default output dir
  8. adult_filter = True #Do not disable adult filter by default
  9. socket.setdefaulttimeout(2)
  10.  
  11. tried_urls = []
  12. image_md5s = {}
  13. in_progress = 0
  14. urlopenheader={ 'User-Agent' : 'Mozilla/5.0 (X11; Fedora; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0'}
  15.  
  16. def download(pool_sema: threading.Semaphore, url: str, output_dir: str):
  17.     global in_progress
  18.  
  19.     if url in tried_urls:
  20.         return
  21.     pool_sema.acquire()
  22.     in_progress += 1
  23.     path = urllib.parse.urlsplit(url).path
  24.     filename = posixpath.basename(path).split('?')[0] #Strip GET parameters from filename
  25.     name, ext = os.path.splitext(filename)
  26.     name = name[:36].strip()
  27.     filename = name + ext
  28.  
  29.     try:
  30.         request=urllib.request.Request(url,None,urlopenheader)
  31.         image=urllib.request.urlopen(request).read()
  32.         if not imghdr.what(None, image):
  33.             print('Invalid image, not saving ' + filename)
  34.             return
  35.  
  36.         md5_key = hashlib.md5(image).hexdigest()
  37.         if md5_key in image_md5s:
  38.             print('Image is a duplicate of ' + image_md5s[md5_key] + ', not saving ' + filename)
  39.             return
  40.  
  41.         i = 0
  42.         while os.path.exists(os.path.join(output_dir, filename)):
  43.             if hashlib.md5(open(os.path.join(output_dir, filename), 'rb').read()).hexdigest() == md5_key:
  44.                 print('Already downloaded ' + filename + ', not saving')
  45.                 return
  46.             i += 1
  47.             filename = "%s-%d%s" % (name, i, ext)
  48.  
  49.         image_md5s[md5_key] = filename
  50.  
  51.         imagefile=open(os.path.join(output_dir, filename),'wb')
  52.         imagefile.write(image)
  53.         imagefile.close()
  54.         print("OK: " + filename)
  55.         tried_urls.append(url)
  56.     except Exception as e:
  57.         print("FAIL: " + filename)
  58.     finally:
  59.         pool_sema.release()
  60.         in_progress -= 1
  61.  
  62. def fetch_images_from_keyword(pool_sema: threading.Semaphore, keyword: str, output_dir: str, filters: str, limit: int):
  63.     current = 0
  64.     last = ''
  65.     while True:
  66.         time.sleep(0.1)
  67.  
  68.         if in_progress > 10:
  69.             continue
  70.  
  71.         request_url='https://www.bing.com/images/async?q=' + urllib.parse.quote_plus(keyword) + '&first=' + str(current) + '&count=35&adlt=' + adlt + '&qft=' + ('' if filters is None else filters)
  72.         request=urllib.request.Request(request_url,None,headers=urlopenheader)
  73.         response=urllib.request.urlopen(request)
  74.         html = response.read().decode('utf8')
  75.         links = re.findall('murl":"(.*?)"',html)
  76.         try:
  77.             if links[-1] == last:
  78.                 return
  79.             for index, link in enumerate(links):
  80.                 if limit is not None and current + index >= limit:
  81.                     return
  82.                 t = threading.Thread(target = download,args = (pool_sema, link, output_dir))
  83.                 t.start()
  84.                 current += 1
  85.             last = links[-1]
  86.         except IndexError:
  87.             print('No search results for "{0}"'.format(keyword))
  88.             return
  89.  
  90. def backup_history(*args):
  91.     download_history = open(os.path.join(output_dir, 'download_history.pickle'), 'wb')
  92.     pickle.dump(tried_urls,download_history)
  93.     copied_image_md5s = dict(image_md5s)  #We are working with the copy, because length of input variable for pickle must not be changed during dumping
  94.     pickle.dump(copied_image_md5s, download_history)
  95.     download_history.close()
  96.     print('history_dumped')
  97.     if args:
  98.         exit(0)
  99.  
  100. if __name__ == "__main__":
  101.     parser = argparse.ArgumentParser(description = 'Bing image bulk downloader')
  102.     parser.add_argument('-s', '--search-string', help = 'Keyword to search', required = False)
  103.     parser.add_argument('-f', '--search-file', help = 'Path to a file containing search strings line by line', required = False)
  104.     parser.add_argument('-o', '--output', help = 'Output directory', required = False)
  105.     parser.add_argument('--adult-filter-on', help ='Enable adult filter', action = 'store_true', required = False)
  106.     parser.add_argument('--adult-filter-off', help = 'Disable adult filter', action = 'store_true', required = False)
  107.     parser.add_argument('--filters', help = 'Any query based filters you want to append when searching for images, e.g. +filterui:license-L1', required = False)
  108.     parser.add_argument('--limit', help = 'Make sure not to search for more than specified amount of images.', required = False, type = int)
  109.     parser.add_argument('--threads', help = 'Number of threads', type = int, default = 20)
  110.     args = parser.parse_args()
  111.     if (not args.search_string) and (not args.search_file):
  112.         parser.error('Provide Either search string or path to file containing search strings')
  113.     if args.output:
  114.         output_dir = args.output
  115.     if not os.path.exists(output_dir):
  116.         os.makedirs(output_dir)
  117.     output_dir_origin = output_dir
  118.     signal.signal(signal.SIGINT, backup_history)
  119.     try:
  120.         download_history = open(os.path.join(output_dir, 'download_history.pickle'), 'rb')
  121.         tried_urls=pickle.load(download_history)
  122.         image_md5s=pickle.load(download_history)
  123.         download_history.close()
  124.     except (OSError, IOError):
  125.         tried_urls=[]
  126.     if adult_filter:
  127.         adlt = ''
  128.     else:
  129.         adlt = 'off'
  130.     if args.adult_filter_off:
  131.         adlt = 'off'
  132.     elif args.adult_filter_on:
  133.         adlt = ''
  134.     pool_sema = threading.BoundedSemaphore(args.threads)
  135.     if args.search_string:
  136.         fetch_images_from_keyword(pool_sema, args.search_string,output_dir, args.filters, args.limit)
  137.     elif args.search_file:
  138.         try:
  139.             inputFile=open(args.search_file)
  140.         except (OSError, IOError):
  141.             print("Couldn't open file {}".format(args.search_file))
  142.             exit(1)
  143.         for keyword in inputFile.readlines():
  144.             output_sub_dir = os.path.join(output_dir_origin, keyword.strip().replace(' ', '_'))
  145.             if not os.path.exists(output_sub_dir):
  146.                 os.makedirs(output_sub_dir)
  147.             fetch_images_from_keyword(pool_sema, keyword,output_sub_dir, args.filters, args.limit)
  148.             backup_history()
  149.             time.sleep(10)
  150.         inputFile.close()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement