Advertisement
Guest User

claim_itch

a guest
Mar 27th, 2020
175
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 19.65 KB | None | 0 0
  1. '''
  2. ClaimItch/0.5
  3.  
  4. requirements:
  5. - python (tested on 3.8)
  6. - requests
  7. - beautiful soup
  8. - lxml
  9. - selenium
  10. - firefox
  11. - geckodriver
  12.  
  13. files and variables:
  14. - SOURCES variable:   includes itch sales/collections or reddit threads you want check, pass --recheck to retcheck them
  15. - history file:       includes the results of the current run so they can be used in future runs
  16.                      see the HISTORY_KEYS variable
  17. - log file
  18.  
  19. todo - functionality:
  20. - better interface for SOURCES
  21. - when discovering a game connected to a sale, check out the sale
  22. - download non-claimable games?
  23. - login?
  24. - follow discovered reddit threads?
  25.  
  26. todo - coding:
  27. - debug mode that enables breakpoints
  28. - log exceptions and urls on error
  29. - use classes?
  30. - edge case: non writable config location - would do the work but loss history
  31. - intersection between SOURCES and discovered collections in has_more?
  32. - confirm that it finds deeper reddit comments
  33. - proper log
  34. - proper config
  35. - claim() return values
  36. - "selenium.common.exceptions.ElementNotInteractableException: Message: Element <a class="button buy_btn" href=".."> could not be scrolled into view"
  37. - selenium's performance?
  38. - less strict parsing / navigation (use .lower) / fuller regex (to work with match and search)
  39. - pylint
  40. - a claimable game was recorded as dl_only, was it changed? https://melessthanthree.itch.io/lucah
  41. '''
  42.  
  43. import os
  44. import sys
  45. import re
  46. import json
  47. import html
  48. import argparse
  49. import requests
  50. from time import sleep, time
  51. from bs4 import BeautifulSoup
  52. from selenium import webdriver
  53. from selenium.common.exceptions import NoSuchElementException
  54.  
  55.  
  56. # add any itch sale/collection or reddit thread to this set
  57. SOURCES = {
  58.     'https://itch.io/c/757294/games-to-help-you-stay-inside',
  59.     'https://itch.io/c/759545/self-isolation-on-a-budget',
  60.     'https://old.reddit.com/r/FreeGameFindings/comments/fka4be/itchio_mega_thread/',
  61.     'https://old.reddit.com/r/GameDeals/comments/fkq5c3/itchio_a_collecting_compiling_almost_every_single'
  62. }
  63.  
  64.  
  65. PATTERNS = {
  66.     'itch_collection': r'.+itch\.io/c/.+',
  67.     'itch_sale': r'.+itch\.io/s/.+',
  68.     'itch_group': r'.+itch\.io/[sc]/\d+/.+', # sale or collection
  69.     'reddit_thread': r'.+(?P<thread>reddit\.com/r/.+/comments/.+)/.+',
  70.     'itch_game': r'(http://|https://)?(?P<game>.+\.itch\.io/[^/?]+)'
  71. }
  72.  
  73.  
  74. USER_AGENT = 'ClaimItch/0.5'
  75.  
  76.  
  77. HISTORY_KEYS = [
  78.     'urls',           # discovered game urls
  79.     'claimed',        # claimed games
  80.     'has_more',       # a sale, collection, or game that is connected to more sales
  81.     'checked_groups', # a sale/collection that was checked for games, pass --recheck-groups to recheck it
  82.     'dl_only',        # game is not claimable
  83.     'web',            # game is not claimable or downloadable, web game
  84.     'downloaded',     # games that were downloaded (edit this manually)
  85.     'buy',            # game is not free
  86.     'removed',        # game does not exist
  87.     'error',          # games that broke the script
  88. ]
  89.  
  90. PROCESSED_GAMES = ('claimed', 'dl_only', 'downloaded', 'buy', 'removed', 'web')
  91.  
  92.  
  93. class ParsingError(Exception):
  94.     def __init__(self, url, *args, **kwargs):
  95.         # breakpoint()
  96.         self.url = url
  97.         super().__init__(url, *args, **kwargs)
  98.  
  99.  
  100. def extract_from_itch_group(group_page):
  101.     '''
  102.    INPUT  html sale or collection page
  103.    OUTPUT urls of all games, urls of games that avie noted is connected to more sales
  104.    '''
  105.     soup = BeautifulSoup(group_page, 'lxml')
  106.     urls, more = set(), set()
  107.     games = soup.find_all('div', class_='game_cell')
  108.     for game in games:
  109.         url = game.find('a').get('href')
  110.         urls.add(url)
  111.         if game.find('div', class_='blurb_outer') is not None:
  112.             more.add(url)
  113.     return urls, more
  114.  
  115.  
  116. def get_from_itch_group(group_url, sleep_time=15, max_page=None, sale=False):
  117.     '''
  118.    INPUT  itch.io collection url
  119.    OUTPUT see extract_urls
  120.    '''
  121.     if sale:
  122.         max_page = 1 # sales don't seem to have pages
  123.     page = 1
  124.     urls = set()
  125.     has_more = set()
  126.     while max_page is None or page <= max_page:
  127.         print(f' getting page {page}')
  128.         params = {'page': page} if not sale else None
  129.         res = requests.get(group_url, params=params)
  130.         if res.status_code == 404:
  131.             break
  132.         elif res.status_code != 200:
  133.             # breakpoint()
  134.             r.raise_for_status()
  135.         page += 1
  136.         new_urls, new_more = extract_from_itch_group(res.text)
  137.         urls.update(new_urls)
  138.         has_more.update(new_more)
  139.         print(f' sleeping for {sleep_time}s')
  140.         sleep(sleep_time)
  141.     print(f' got {len(urls)} games')
  142.     return urls, has_more
  143.  
  144.  
  145. def get_from_reddit_thread(url, sleep_time=15):
  146.     '''
  147.    INPUT  reddit thread url
  148.    OUTPUT itch.io game urls, itch.io groups (sales, collections)
  149.    '''
  150.     global USER_AGENT, PATTERNS
  151.  
  152.     # https://www.reddit.com/dev/api#GET_comments_{article}
  153.     json_url = f"https://{re.match(PATTERNS['reddit_thread'], url)['thread']}.json?threaded=false"
  154.     urls = set()
  155.     has_more = set()
  156.     res = requests.get(json_url, headers={'User-Agent': USER_AGENT})
  157.     if res.status_code != 200:
  158.         res.raise_for_status()
  159.     data = res.json()
  160.     for listing in data:
  161.         if listing['kind'].lower() != 'listing':
  162.             raise ParsingError(json_url)
  163.         children = listing['data']['children']
  164.         for child in children:
  165.             text = None
  166.             if child['kind'] == 't3':
  167.                 text = child['data']['selftext_html']
  168.             elif child['kind'] == 't1':
  169.                 text = child['data']['body_html']
  170.             else:
  171.                 raise ParsingError(json_url)
  172.             if text is not None and len(text) > 0:
  173.                 soup = BeautifulSoup(html.unescape(text), 'lxml')
  174.                 new_urls = set(a.get('href') for a in soup.find_all('a'))
  175.                 urls.update(url for url in new_urls if re.match(PATTERNS['itch_game'], url))
  176.                 has_more.update(url for url in new_urls if re.match(PATTERNS['itch_group'], url))
  177.     print(f' got {len(urls)} games | {len(has_more)} collections/sales')
  178.     print(f' sleeping for {sleep_time}s')
  179.     sleep(sleep_time)
  180.     return urls, has_more
  181.  
  182.  
  183. def get_urls(url, sleep_time=15, max_page=None):
  184.     global PATTERNS
  185.  
  186.     print(f'getting games from {url}')
  187.     if re.match(PATTERNS['itch_collection'], url):
  188.         return get_from_itch_group(url, sleep_time, max_page)
  189.     elif re.match(PATTERNS['itch_sale'], url):
  190.         return get_from_itch_group(url, sleep_time, sale=True)
  191.     elif re.match(PATTERNS['reddit_thread'], url):
  192.         return get_from_reddit_thread(url, sleep_time)
  193.     else:
  194.         # breakpoint()
  195.         raise NotImplementedError(f'{url} is not supported')
  196.  
  197.  
  198. def claim(url, driver):
  199.     '''
  200.    INPUTS
  201.      url     game url
  202.      driver  a webdriver for a browser that is logged in to itch.io
  203.    OUTPUT
  204.      status
  205.        'claimed'           success
  206.        'dl_only'           cannot be claimed
  207.        'web'               cannot be claimed or downloaded, web game
  208.        'buy'               not for sale
  209.        'claimed has_more'  success, and indicaes that the game is connected to another sale
  210.        'removed'           game does not exist
  211.    '''
  212.     global PATTERNS
  213.    
  214.     url = f"https://{re.search(PATTERNS['itch_game'], url)['game']}"
  215.     print(f'handling {url}')
  216.  
  217.     driver.get(url)
  218.     original_window = driver.current_window_handle
  219.     assert len(driver.window_handles) == 1
  220.  
  221.     # removed game
  222.     try:
  223.         driver.find_element_by_css_selector('div.not_found_game_page')
  224.         return 'removed'
  225.     except NoSuchElementException:
  226.         pass
  227.  
  228.     # already owned
  229.     try:
  230.         if 'You own this' in driver.find_element_by_css_selector('div.purchase_banner_inner h2').get_attribute('textContent'):
  231.             print(f' already claimed: {url}')
  232.             return 'claimed'
  233.     except NoSuchElementException:
  234.         pass
  235.  
  236.     # check if claimable, download only, or a web game
  237.     try:
  238.         buy = driver.find_element_by_css_selector('div.buy_row a.buy_btn')
  239.     except NoSuchElementException:
  240.         try:
  241.             buy = driver.find_element_by_css_selector('section.game_download a.buy_btn')
  242.         except NoSuchElementException:
  243.             try:
  244.                 driver.find_element_by_css_selector('div.uploads')
  245.                 print(f' download only: {url}')
  246.                 return 'dl_only'
  247.             except NoSuchElementException:
  248.                 try:
  249.                     driver.find_element_by_css_selector('div.html_embed_widget')
  250.                     print(f' web game: {url}')
  251.                     return 'web'
  252.                 except NoSuchElementException as nse_e:
  253.                     raise ParsingError(url) from nse_e
  254.  
  255.     if 'Download Now' in buy.get_attribute('textContent'):
  256.         print(f' download only: {url}')
  257.         return 'dl_only'
  258.     elif 'buy now' in buy.get_attribute('textContent').lower():
  259.         print(f' buy: {url}')
  260.         return 'buy'
  261.     # claim
  262.     elif 'Download or claim' in buy.get_attribute('textContent'):
  263.         #buy.location_once_scrolled_into_view
  264.         #buy.click()
  265.         driver.get(f'{url}/purchase')
  266.  
  267.         try:
  268.             no_thanks = driver.find_element_by_css_selector('a.direct_download_btn')
  269.         except NoSuchElementException as nse_e:
  270.             raise ParsingError(url) from nse_e
  271.  
  272.         if 'No thanks, just take me to the downloads' in no_thanks.get_attribute('textContent'):
  273.             no_thanks.click()
  274.  
  275.             # in case the download page opens in a new window
  276.             original_window = switch_to_new_window(driver, original_window)
  277.  
  278.             try:
  279.                 claim_btn = driver.find_element_by_css_selector('div.claim_to_download_box form button')
  280.             except NoSuchElementException as nse_e:
  281.                 raise ParsingError(url) from nse_e
  282.  
  283.             if 'claim' in claim_btn.get_attribute('textContent').lower():
  284.                 claim_btn.click()
  285.  
  286.                 try:
  287.                     message = driver.find_element_by_css_selector('div.game_download_page div.inner_column p')
  288.                 except NoSuchElementException as nse_e:
  289.                     raise ParsingError(url) from nse_e
  290.  
  291.                 if 'for the promotion' in message.get_attribute('textContent'):
  292.                     print(f' just claimed | part of a sale: {url}')
  293.                     return 'claimed has_more'
  294.                 if 'You claimed this game' in message.get_attribute('textContent'):
  295.                     print(f' just claimed: {url}')
  296.                     return 'claimed'
  297.                 else:
  298.                     raise ParsingError(url)
  299.             else:
  300.                 raise ParsingError(url)
  301.         else:
  302.             raise ParsingError(url)
  303.     else:
  304.         raise ParsingError(url)
  305.  
  306.  
  307. def create_driver(enable_images=False):
  308.     options = webdriver.firefox.options.Options()
  309.     if not enable_images:
  310.         options.set_preference('permissions.default.image', 2)
  311.     if os.path.exists('geckodriver.exe'):
  312.         driver = webdriver.Firefox(options=options, executable_path='geckodriver.exe')
  313.     else:
  314.         # geckodriver should be in PATH
  315.         driver = webdriver.Firefox(options=options)
  316.     driver.implicitly_wait(10)
  317.     return driver
  318.  
  319.  
  320. def switch_to_new_window(driver, original_window):
  321.     '''If a new window was opened, switch to it'''
  322.     sleep(1)
  323.     if len(driver.window_handles) > 1:
  324.         new_handle = None
  325.         for window_handle in driver.window_handles:
  326.             if window_handle != original_window:
  327.                 new_handle = window_handle
  328.                 break
  329.         driver.close()
  330.         driver.switch_to.window(new_handle)
  331.         return new_handle
  332.     return original_window
  333.  
  334.  
  335. def log(name, data):
  336.     with open(name, 'a') as f:
  337.         for k, v in data.items():
  338.             f.write(k + ': ' + str(v) + '\n')
  339.  
  340.  
  341. def load_history(name):
  342.     global HISTORY_KEYS
  343.  
  344.     try:
  345.         f = open(name, 'r')
  346.         with f:
  347.             data = json.load(f)
  348.         print(f'loaded history from file {name}')
  349.     except FileNotFoundError:
  350.         data = dict()
  351.         print(f'new history file will be created: {name}')
  352.     history = {k: set(data.get(k, [])) for k in HISTORY_KEYS}
  353.     return history
  354.  
  355.  
  356. def save_history(name, data):
  357.     print(f'writing history to file {name}')
  358.     with open(name, 'w') as f:
  359.         json.dump({k: list(v) for k, v in data.items()}, f, indent=2)
  360.  
  361.  
  362. def print_summary(history_file, history):
  363.     global SOURCES, PATTERNS, PROCESSED_GAMES
  364.  
  365.     print('\nSUMMARY')
  366.  
  367.     if not os.path.exists(history_file):
  368.         print(f'No history is stored in {history_file}')
  369.         return
  370.  
  371.     print(f'History stored in {history_file}')
  372.     print()
  373.  
  374.     print(f'Using {len(SOURCES)} main sources (use --recheck to recheck them)')
  375.     print(f"Discovered {len(history['urls'])} games")
  376.     print(f"Claimed {len(history['claimed'])} games")
  377.     not_processed = history['urls'].difference(*map(history.get, PROCESSED_GAMES))
  378.     print(f"{len(not_processed)} games should be claimed on the next run")
  379.     print()
  380.  
  381.     itch_groups = set(filter(re.compile(PATTERNS['itch_group']).match, history['has_more']))
  382.     itch_games = set(filter(re.compile(PATTERNS['itch_game']).match, history['has_more']))
  383.     print(f"{len(itch_groups)} discovered collections / sales should be checked on the next run")
  384.     print(f"{len(history['checked_groups'])} discovered collections / sales were checked (use --recheck-groups to recheck them)")
  385.     print(f"{len(itch_games)} discovered games are connected to sales that may not have been checked")
  386.     print(f"{len(history['removed'])} games were removed or invalid")
  387.     print()
  388.  
  389.     print(f"Play {len(history['web'])} non-claimable and non-downloadable games online:")
  390.     for url in history['web']:
  391.         print(f'  {url}')
  392.     print()
  393.  
  394.     print(f"Download {len(history['dl_only'])} non-claimable games manually:")
  395.     for url in history['dl_only']:
  396.         print(f'  {url}')
  397.     print(f"{len(history['downloaded'])} games were marked as downloaded (to mark games: move them in the history file from 'dl_only' to 'downloaded')")
  398.     print()
  399.  
  400.     print(f"Buy {len(history['buy'])} non-free games:")
  401.     for url in history['buy']:
  402.         print(f'  {url}')
  403.     print()
  404.  
  405.  
  406. def get_urls_and_update_history(history, sources, itch_groups):
  407.     '''
  408.    INPUT
  409.      history      a dict that'll be updates as `sources` are processed
  410.      sources      sources to get links from
  411.      itch_groups  itch sales/collections in `sources` that should be marked as checked in `history`
  412.    '''
  413.     for i, source in enumerate(sources):
  414.         print(f'{i+1}/{len(sources)}')
  415.         new_urls, new_more = get_urls(source)
  416.         history['urls'].update(new_urls)
  417.         history['has_more'].update(new_more)
  418.     history['checked_groups'].update(itch_groups)
  419.     history['has_more'].difference_update(history['checked_groups'])
  420.  
  421.  
  422. def main():
  423.     global SOURCES, HISTORY_KEYS, PROCESSED_GAMES
  424.  
  425.     run_time = int(time())
  426.     script_name = os.path.basename(os.path.splitext(sys.argv[0])[0])
  427.     log_file = f'{script_name}.log.txt'
  428.     default_history_file = f'{script_name}.history.json'
  429.     log(log_file, {'# new run': run_time})
  430.  
  431.     arg_parser = argparse.ArgumentParser(
  432.         description=f'Claim free itch.io games in an itch.io sale/collection or reddit thread. \
  433.                     Writes the results (game links, claimed games, ..) to history_file. Logs to {log_file}')
  434.     arg_parser.add_argument('history_file', nargs='?', help=f'a json file generated by a previous run of this script (default: {default_history_file})')
  435.     arg_parser.add_argument('--show-history', action='store_true', help='show summary of history in history_file and exit')
  436.     arg_parser.add_argument('--recheck', action='store_true', help='reload game links from SOURCES')
  437.     arg_parser.add_argument('--recheck-groups', action='store_true', help='reload game links from discovered itch collections / sales')
  438.     arg_parser.add_argument('--enable-images', action='store_true', help='load images in the browser while claiming games')
  439.     args = arg_parser.parse_args()
  440.  
  441.     if args.history_file is not None:
  442.         history_file = args.history_file
  443.     else:
  444.         history_file = default_history_file
  445.     history = load_history(history_file)
  446.     log(log_file, {'history_file': history_file})
  447.     log(log_file, {k: len(v) for k, v in history.items()})
  448.  
  449.     if args.show_history:
  450.         print_summary(history_file, history)
  451.         sys.exit(0)
  452.  
  453.     # getting game links
  454.     itch_groups = set(filter(re.compile(PATTERNS['itch_group']).match, history['has_more']))
  455.     check_sources = not os.path.exists(history_file) or args.recheck
  456.     check_groups = len(itch_groups) > 0 or args.recheck_groups
  457.     if check_sources or check_groups:
  458.         print('will reload game urls from the internet')
  459.         # keep getting newly discovered sales/collections
  460.         first_pass = True
  461.         while True:
  462.             target_sources = set()
  463.             itch_groups = set(filter(re.compile(PATTERNS['itch_group']).match, history['has_more']))
  464.             if first_pass:
  465.                 if check_sources:
  466.                     target_sources.update(SOURCES)
  467.                 if args.recheck_groups:
  468.                     itch_groups.update(history['checked_groups'])
  469.             else:
  470.                 if len(itch_groups) == 0:
  471.                     break
  472.                 else:
  473.                     print('getting links from newly discovered sales/collections')
  474.             target_sources.update(itch_groups)
  475.             get_urls_and_update_history(history, target_sources, itch_groups)
  476.             first_pass = False
  477.             log(log_file, {'## got links': time(), 'sources': target_sources, 'urls': history['urls'], 'has_more': history['has_more']})
  478.     else:
  479.         print('using game urls saved in the history file')
  480.         print(' pass the option --recheck and/or --recheck-groups to reload game urls from the internet')
  481.  
  482.     # claiming games
  483.     url = None
  484.     sleep_time = 15
  485.     try:
  486.         ignore = set().union(*map(history.get, PROCESSED_GAMES))
  487.         valid = history['urls'].difference(ignore)
  488.         if len(valid) > 0:
  489.             with create_driver(args.enable_images) as driver:
  490.                 driver.get('https://itch.io/login')
  491.                 # manually log in
  492.                 input('A new Firefox window was opened. Log in to itch then click enter to continue')
  493.                 for i, url in enumerate(valid):
  494.                     print(f"{i+1}/{len(valid)} ({len(history['urls'])})")
  495.                     if url not in ignore:
  496.                         result = claim(url, driver)
  497.                         if 'claimed' in result:
  498.                             history['claimed'].add(url)
  499.                         if 'dl_only' in result:
  500.                             history['dl_only'].add(url)
  501.                         if 'web' in result:
  502.                             history['web'].add(url)
  503.                         if 'has_more' in result:
  504.                             history['has_more'].add(url)
  505.                         if 'buy' in result:
  506.                             history['buy'].add(url)
  507.                         if 'removed' in result:
  508.                             history['removed'].add(url)
  509.                         print(f' sleeping for {sleep_time}s')
  510.                         sleep(sleep_time)
  511.     except ParsingError as pe:
  512.         history['error'].add(pe.url)
  513.     except Exception as e:
  514.         history['error'].add(url)
  515.         raise
  516.     finally:
  517.         print()
  518.         save_history(history_file, history)
  519.         print_summary(history_file, history)
  520.  
  521.  
  522. if __name__ == '__main__':
  523.     main()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement