Advertisement
skip420

scrape_me

Sep 14th, 2022 (edited)
858
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 7.20 KB | None | 0 0
  1. # creates sub_folder
  2. # Finds based on search keyword from terminal
  3. # python3 scrape_me.py -s bitcoin  -o find
  4. # find all images  and autosave to sub_directory based on a simple keyword search
  5.  
  6.  
  7. #========================================================================================================================
  8.  
  9.  
  10. # skip420@skip420:~/Desktop/products$ python3 scrape_me.py -s bitcoin  -o find.txt
  11. #OK: bitcoin-1.jpg
  12. #FAIL: bitcoin-wert-2000x1333.jpg
  13. #OK: 824915.jpg
  14. #OK: bitcoin-coins-1570x1047.jpg
  15. #Invalid image, not saving bitcoin-price-technical-analysis-do-.jpg
  16. #OK: Bitcoin_collapse.jpg
  17. #OK: bitcoin-news.png
  18. #OK: il_1588xN.2959676796_jlte.jpg
  19. #OK: maxresdefault.jpg
  20. #OK: shutterstock_1708749826-scaled.jpg
  21. #OK: bitcoin_PNG25.png
  22.  
  23.  
  24.  
  25.  
  26.  
  27.  
  28.  
  29.  
  30. #python3 scrape_me.py -s "Baby Formula and price" --limit 50 -o /home/skip420/Desktop/products
  31.  
  32. #!/usr/bin/env python3
  33. import os, urllib.request, re, threading, posixpath, urllib.parse, argparse, socket, time, hashlib, pickle, signal, imghdr
  34.  
  35. #config
  36. output_dir = './google' #default output dir
  37. adult_filter = True #Do not disable adult filter by default
  38. socket.setdefaulttimeout(2)
  39.  
  40. tried_urls = []
  41. image_md5s = {}
  42. in_progress = 0
  43. urlopenheader={ 'User-Agent' : 'Mozilla/5.0 (X11; Fedora; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0'}
  44.  
  45. def download(pool_sema: threading.Semaphore, url: str, output_dir: str):
  46.     global in_progress
  47.  
  48.     if url in tried_urls:
  49.         return
  50.     pool_sema.acquire()
  51.     in_progress += 1
  52.     path = urllib.parse.urlsplit(url).path
  53.     filename = posixpath.basename(path).split('?')[0] #Strip GET parameters from filename
  54.     name, ext = os.path.splitext(filename)
  55.     name = name[:36].strip()
  56.     filename = name + ext
  57.  
  58.     try:
  59.         request=urllib.request.Request(url,None,urlopenheader)
  60.         image=urllib.request.urlopen(request).read()
  61.         if not imghdr.what(None, image):
  62.             print('Invalid image, not saving ' + filename)
  63.             return
  64.  
  65.         md5_key = hashlib.md5(image).hexdigest()
  66.         if md5_key in image_md5s:
  67.             print('Image is a duplicate of ' + image_md5s[md5_key] + ', not saving ' + filename)
  68.             return
  69.  
  70.         i = 0
  71.         while os.path.exists(os.path.join(output_dir, filename)):
  72.             if hashlib.md5(open(os.path.join(output_dir, filename), 'rb').read()).hexdigest() == md5_key:
  73.                 print('Already downloaded ' + filename + ', not saving')
  74.                 return
  75.             i += 1
  76.             filename = "%s-%d%s" % (name, i, ext)
  77.  
  78.         image_md5s[md5_key] = filename
  79.  
  80.         imagefile=open(os.path.join(output_dir, filename),'wb')
  81.         imagefile.write(image)
  82.         imagefile.close()
  83.         print("OK: " + filename)
  84.         tried_urls.append(url)
  85.     except Exception as e:
  86.         print("FAIL: " + filename)
  87.     finally:
  88.         pool_sema.release()
  89.         in_progress -= 1
  90.  
  91. def fetch_images_from_keyword(pool_sema: threading.Semaphore, keyword: str, output_dir: str, filters: str, limit: int):
  92.     current = 0
  93.     last = ''
  94.     while True:
  95.         time.sleep(0.1)
  96.  
  97.         if in_progress > 10:
  98.             continue
  99.  
  100.         request_url='https://www.bing.com/images/async?q=' + urllib.parse.quote_plus(keyword) + '&first=' + str(current) + '&count=35&adlt=' + adlt + '&qft=' + ('' if filters is None else filters)
  101.         request=urllib.request.Request(request_url,None,headers=urlopenheader)
  102.         response=urllib.request.urlopen(request)
  103.         html = response.read().decode('utf8')
  104.         links = re.findall('murl":"(.*?)"',html)
  105.         try:
  106.             if links[-1] == last:
  107.                 return
  108.             for index, link in enumerate(links):
  109.                 if limit is not None and current + index >= limit:
  110.                     return
  111.                 t = threading.Thread(target = download,args = (pool_sema, link, output_dir))
  112.                 t.start()
  113.                 current += 1
  114.             last = links[-1]
  115.         except IndexError:
  116.             print('No search results for "{0}"'.format(keyword))
  117.             return
  118.  
  119. def backup_history(*args):
  120.     download_history = open(os.path.join(output_dir, 'download_history.pickle'), 'wb')
  121.     pickle.dump(tried_urls,download_history)
  122.     copied_image_md5s = dict(image_md5s)  #We are working with the copy, because length of input variable for pickle must not be changed during dumping
  123.     pickle.dump(copied_image_md5s, download_history)
  124.     download_history.close()
  125.     print('history_dumped')
  126.     if args:
  127.         exit(0)
  128.  
  129. if __name__ == "__main__":
  130.     parser = argparse.ArgumentParser(description = 'Bing image bulk downloader')
  131.     parser.add_argument('-s', '--search-string', help = 'Keyword to search', required = False)
  132.     parser.add_argument('-f', '--search-file', help = 'Path to a file containing search strings line by line', required = False)
  133.     parser.add_argument('-o', '--output', help = 'Output directory', required = False)
  134.     parser.add_argument('--adult-filter-on', help ='Enable adult filter', action = 'store_true', required = False)
  135.     parser.add_argument('--adult-filter-off', help = 'Disable adult filter', action = 'store_true', required = False)
  136.     parser.add_argument('--filters', help = 'Any query based filters you want to append when searching for images, e.g. +filterui:license-L1', required = False)
  137.     parser.add_argument('--limit', help = 'Make sure not to search for more than specified amount of images.', required = False, type = int)
  138.     parser.add_argument('--threads', help = 'Number of threads', type = int, default = 20)
  139.     args = parser.parse_args()
  140.     if (not args.search_string) and (not args.search_file):
  141.         parser.error('Provide Either search string or path to file containing search strings')
  142.     if args.output:
  143.         output_dir = args.output
  144.     if not os.path.exists(output_dir):
  145.         os.makedirs(output_dir)
  146.     output_dir_origin = output_dir
  147.     signal.signal(signal.SIGINT, backup_history)
  148.     try:
  149.         download_history = open(os.path.join(output_dir, 'download_history.pickle'), 'rb')
  150.         tried_urls=pickle.load(download_history)
  151.         image_md5s=pickle.load(download_history)
  152.         download_history.close()
  153.     except (OSError, IOError):
  154.         tried_urls=[]
  155.     if adult_filter:
  156.         adlt = ''
  157.     else:
  158.         adlt = 'off'
  159.     if args.adult_filter_off:
  160.         adlt = 'off'
  161.     elif args.adult_filter_on:
  162.         adlt = ''
  163.     pool_sema = threading.BoundedSemaphore(args.threads)
  164.     if args.search_string:
  165.         fetch_images_from_keyword(pool_sema, args.search_string,output_dir, args.filters, args.limit)
  166.     elif args.search_file:
  167.         try:
  168.             inputFile=open(args.search_file)
  169.         except (OSError, IOError):
  170.             print("Couldn't open file {}".format(args.search_file))
  171.             exit(1)
  172.         for keyword in inputFile.readlines():
  173.             output_sub_dir = os.path.join(output_dir_origin, keyword.strip().replace(' ', '_'))
  174.             if not os.path.exists(output_sub_dir):
  175.                 os.makedirs(output_sub_dir)
  176.             fetch_images_from_keyword(pool_sema, keyword,output_sub_dir, args.filters, args.limit)
  177.             backup_history()
  178.             time.sleep(10)
  179.         inputFile.close()
  180.  
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement