Advertisement
Guest User

claim_itch

a guest
May 1st, 2020
268
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 22.54 KB | None | 0 0
  1. '''
  2. ClaimItch/0.9
  3.  
  4. requirements:
  5. - python (tested on 3.8)
  6. - requests
  7. - beautiful soup
  8. - lxml
  9. - selenium
  10. - firefox
  11. - geckodriver
  12.  
  13. files and variables:
  14. - SOURCES variable:   includes itch sales/collections or reddit threads you want check, pass --recheck to retcheck them
  15. - history file:       includes the results of the current run so they can be used in future runs
  16.                      see the HISTORY_KEYS variable
  17. - log file
  18.  
  19. todo - functionality:
  20. - better interface for SOURCES
  21. - seperate always-free download-only games like https://leafxel.itch.io/hojiya
  22. - when discovering a game connected to a sale, check out the sale
  23. - games that redirect to a sale
  24. - notification of new script version
  25. - download non-claimable games?
  26. - login?
  27. - follow discovered reddit threads?
  28.  
  29. todo - coding:
  30. - debug mode that enables breakpoints
  31. - log exceptions and urls on error
  32. - use classes?
  33. - edge case: non writable config location - would do the work but loss history
  34. - intersection between SOURCES and discovered collections in has_more?
  35. - confirm that the keys before & after don't need to be checked in reddit's json
  36. - proper log
  37. - proper config
  38. - claim() return values
  39. - "selenium.common.exceptions.ElementNotInteractableException: Message: Element <a class="button buy_btn" href=".."> could not be scrolled into view"
  40. - selenium's performance?
  41. - less strict parsing / navigation (use .lower) / fuller regex (to work with match and search)
  42. - pylint
  43. - a claimable game was recorded as dl_only, was it changed? https://melessthanthree.itch.io/lucah
  44. '''
  45.  
  46. import os
  47. import sys
  48. import re
  49. import json
  50. import html
  51. import argparse
  52. import requests
  53. from time import sleep, time
  54. from bs4 import BeautifulSoup
  55. from selenium import webdriver
  56. from selenium.common.exceptions import NoSuchElementException
  57.  
  58.  
  59. # add any itch sale/collection or reddit thread to this set
  60. SOURCES = {
  61.     'https://itch.io/c/757294/games-to-help-you-stay-inside',
  62.     'https://itch.io/c/759545/self-isolation-on-a-budget',
  63.     'https://old.reddit.com/r/FreeGameFindings/comments/fka4be/itchio_mega_thread/',
  64.     'https://old.reddit.com/r/GameDeals/comments/fkq5c3/itchio_a_collecting_compiling_almost_every_single',
  65.     'https://itch.io/c/537762/already-claimed-will-be-on-sale-again',
  66.     'https://old.reddit.com/r/FreeGameFindings/comments/fxhotl/itchio_mega_thread/',
  67.     'https://old.reddit.com/r/FreeGameFindings/comments/gbcjdn/itchio_mega_thread_3/'
  68. }
  69.  
  70.  
  71. PATTERNS = {
  72.     'itch_collection': r'.+itch\.io/c/.+',
  73.     'itch_sale': r'.+itch\.io/s/.+',
  74.     'itch_group': r'.+itch\.io/[sc]/\d+/.+', # sale or collection
  75.     'reddit_thread': r'.+(?P<thread>reddit\.com/r/.+/comments/.+)/.+',
  76.     'itch_game': r'(http://|https://)?(?P<game>.+\.itch\.io/[^/?]+)'
  77. }
  78.  
  79.  
  80. USER_AGENT = 'ClaimItch/0.9'
  81.  
  82.  
  83. HISTORY_KEYS = [
  84.     'urls',           # discovered game urls
  85.     'claimed',        # claimed games
  86.     'has_more',       # a sale, collection, or game that is connected to more sales
  87.     'checked_groups', # a sale/collection that was checked for games, pass --recheck-groups to recheck it
  88.     'dl_only',        # game is not claimable
  89.     'dl_only_old',    # downloadable game you want to skip for now
  90.     'always_free',    # downloadable game that is always free
  91.     'web',            # game is not claimable or downloadable, web game
  92.     'downloaded',     # games that were downloaded (edit this manually)
  93.     'buy',            # game is not free
  94.     'removed',        # game does not exist
  95.     'error',          # games that broke the script
  96.     'old_error',      # games that broke the script but were fixed later
  97. ]
  98.  
  99. PROCESSED_GAMES = ('claimed', 'dl_only', 'dl_only_old', 'downloaded', 'buy', 'removed', 'web', 'always_free')
  100.  
  101.  
  102. class ParsingError(Exception):
  103.     def __init__(self, url, *args, **kwargs):
  104.         # breakpoint()
  105.         self.url = url
  106.         super().__init__(url, *args, **kwargs)
  107.  
  108.  
  109. def extract_from_itch_group(group_page):
  110.     '''
  111.    INPUT  html sale or collection page
  112.    OUTPUT urls of all games, urls of games that avie noted is connected to more sales
  113.    '''
  114.     soup = BeautifulSoup(group_page, 'lxml')
  115.     urls, more = set(), set()
  116.     games = soup.find_all('div', class_='game_cell')
  117.     for game in games:
  118.         url = game.find('a').get('href')
  119.         urls.add(url)
  120.         if game.find('div', class_='blurb_outer') is not None:
  121.             more.add(url)
  122.     return urls, more
  123.  
  124.  
  125. def get_from_itch_group(group_url, sleep_time=15, max_page=None, sale=False):
  126.     '''
  127.    INPUT  itch.io collection url
  128.    OUTPUT see extract_urls
  129.    '''
  130.     if sale:
  131.         max_page = 1 # sales don't seem to have pages
  132.     page = 1
  133.     urls = set()
  134.     has_more = set()
  135.     while max_page is None or page <= max_page:
  136.         print(f' getting page {page}')
  137.         params = {'page': page} if not sale else None
  138.         res = requests.get(group_url, params=params)
  139.         if res.status_code == 404:
  140.             break
  141.         elif res.status_code != 200:
  142.             # breakpoint()
  143.             res.raise_for_status()
  144.         page += 1
  145.         new_urls, new_more = extract_from_itch_group(res.text)
  146.         urls.update(new_urls)
  147.         has_more.update(new_more)
  148.         print(f' sleeping for {sleep_time}s')
  149.         sleep(sleep_time)
  150.     print(f' got {len(urls)} games')
  151.     return urls, has_more
  152.  
  153.  
  154. def get_from_reddit_thread(url, sleep_time=15):
  155.     '''
  156.    INPUT  reddit thread url
  157.    OUTPUT itch.io game urls, itch.io groups (sales, collections)
  158.    '''
  159.     global USER_AGENT, PATTERNS
  160.  
  161.     # https://www.reddit.com/dev/api#GET_comments_{article}
  162.     # https://github.com/reddit-archive/reddit/wiki/JSON
  163.     base_url = f"https://{re.match(PATTERNS['reddit_thread'], url)['thread']}" # does not end with /
  164.     urls = set()
  165.     has_more = set()
  166.  
  167.     chains = ['']
  168.     while len(chains) > 0:
  169.         current_chain = chains.pop()
  170.         print(f' getting a comment chain {current_chain}')
  171.         json_url = base_url + current_chain + '.json?threaded=false'
  172.         res = requests.get(json_url, headers={'User-Agent': USER_AGENT})
  173.         if res.status_code != 200:
  174.             res.raise_for_status()
  175.         data = res.json()
  176.         for listing in data:
  177.             if listing['kind'].lower() != 'listing':
  178.                 raise ParsingError(json_url)
  179.             children = listing['data']['children']
  180.             for child in children:
  181.                 text = None
  182.                 if child['kind'] == 't3':
  183.                     text = child['data']['selftext_html']
  184.                 elif child['kind'] == 't1':
  185.                     text = child['data']['body_html']
  186.                 elif child['kind'] == 'more':
  187.                     chains.extend(['/thread/' + chain for chain in child['data']['children']])
  188.                 else:
  189.                     raise ParsingError(json_url)
  190.                 if text is not None and len(text) > 0:
  191.                     soup = BeautifulSoup(html.unescape(text), 'lxml')
  192.                     new_urls = set(a.get('href') for a in soup.find_all('a'))
  193.                     urls.update(url for url in new_urls if re.match(PATTERNS['itch_game'], url))
  194.                     has_more.update(url for url in new_urls if re.match(PATTERNS['itch_group'], url))
  195.         print(f' sleeping for {sleep_time}s')
  196.         sleep(sleep_time)
  197.     print(f' got {len(urls)} games | {len(has_more)} collections/sales')
  198.     return urls, has_more
  199.  
  200.  
  201. def get_urls(url, sleep_time=15, max_page=None):
  202.     global PATTERNS
  203.  
  204.     print(f'getting games from {url}')
  205.     if re.match(PATTERNS['itch_collection'], url):
  206.         return get_from_itch_group(url, sleep_time, max_page)
  207.     elif re.match(PATTERNS['itch_sale'], url):
  208.         return get_from_itch_group(url, sleep_time, sale=True)
  209.     elif re.match(PATTERNS['reddit_thread'], url):
  210.         return get_from_reddit_thread(url, sleep_time)
  211.     else:
  212.         # breakpoint()
  213.         raise NotImplementedError(f'{url} is not supported')
  214.  
  215.  
  216. def claim(url, driver):
  217.     '''
  218.    INPUTS
  219.      url     game url
  220.      driver  a webdriver for a browser that is logged in to itch.io
  221.    OUTPUT
  222.      status
  223.        'claimed'           success
  224.        'dl_only'           cannot be claimed
  225.        'web'               cannot be claimed or downloaded, web game
  226.        'buy'               not for sale
  227.        'claimed has_more'  success, and indicaes that the game is connected to another sale
  228.        'removed'           game does not exist
  229.        'always_free'       dl_only game that is always free
  230.    '''
  231.     global PATTERNS
  232.  
  233.     url = f"https://{re.search(PATTERNS['itch_game'], url)['game']}"
  234.     print(f'handling {url}')
  235.  
  236.     driver.get(url)
  237.     original_window = driver.current_window_handle
  238.     assert len(driver.window_handles) == 1
  239.  
  240.     # removed game
  241.     try:
  242.         driver.find_element_by_css_selector('div.not_found_game_page')
  243.         return 'removed'
  244.     except NoSuchElementException:
  245.         pass
  246.  
  247.     # already owned
  248.     try:
  249.         if 'You own this' in driver.find_element_by_css_selector('div.purchase_banner_inner h2').get_attribute('textContent'):
  250.             print(f' already claimed: {url}')
  251.             return 'claimed'
  252.     except NoSuchElementException:
  253.         pass
  254.  
  255.     # check if claimable, download only, or a web game
  256.     try:
  257.         buy = driver.find_element_by_css_selector('div.buy_row a.buy_btn')
  258.     except NoSuchElementException:
  259.         try:
  260.             buy = driver.find_element_by_css_selector('section.game_download a.buy_btn')
  261.         except NoSuchElementException:
  262.             try:
  263.                 driver.find_element_by_css_selector('div.uploads')
  264.                 print(f' download only: {url}')
  265.                 return 'dl_only'
  266.             except NoSuchElementException:
  267.                 try:
  268.                     driver.find_element_by_css_selector('div.html_embed_widget')
  269.                     print(f' web game: {url}')
  270.                     return 'web'
  271.                 except NoSuchElementException as nse_e:
  272.                     raise ParsingError(url) from nse_e
  273.  
  274.     if 'Download Now' in buy.get_attribute('textContent'):
  275.         try:
  276.             sale_rate = driver.find_element_by_css_selector('.sale_rate')
  277.         except NoSuchElementException as nse_e:
  278.             print(f' always free: {url}')
  279.             return 'always_free'
  280.         else:
  281.             if '100' in sale_rate.get_attribute('textContent'):
  282.                 print(f' download only: {url}')
  283.                 return 'dl_only'
  284.             else:
  285.                 raise ParsingError(url)
  286.     elif 'buy now' in buy.get_attribute('textContent').lower():
  287.         print(f' buy: {url}')
  288.         return 'buy'
  289.     elif 'pre-order' in buy.get_attribute('textContent').lower():
  290.         print(f' buy (pre-order): {url}')
  291.         return 'buy'
  292.     # claim
  293.     elif 'Download or claim' in buy.get_attribute('textContent'):
  294.         #buy.location_once_scrolled_into_view
  295.         #buy.click()
  296.         driver.get(f'{url}/purchase')
  297.  
  298.         try:
  299.             no_thanks = driver.find_element_by_css_selector('a.direct_download_btn')
  300.         except NoSuchElementException as nse_e:
  301.             raise ParsingError(url) from nse_e
  302.  
  303.         if 'No thanks, just take me to the downloads' in no_thanks.get_attribute('textContent'):
  304.             no_thanks.click()
  305.  
  306.             # in case the download page opens in a new window
  307.             original_window = switch_to_new_window(driver, original_window)
  308.  
  309.             try:
  310.                 claim_btn = driver.find_element_by_css_selector('div.claim_to_download_box form button')
  311.             except NoSuchElementException as nse_e:
  312.                 raise ParsingError(url) from nse_e
  313.  
  314.             if 'claim' in claim_btn.get_attribute('textContent').lower():
  315.                 claim_btn.click()
  316.  
  317.                 try:
  318.                     message = driver.find_element_by_css_selector('div.game_download_page div.inner_column p')
  319.                 except NoSuchElementException as nse_e:
  320.                     raise ParsingError(url) from nse_e
  321.  
  322.                 if 'for the promotion' in message.get_attribute('textContent'):
  323.                     print(f' just claimed | part of a sale: {url}')
  324.                     return 'claimed has_more'
  325.                 if 'You claimed this game' in message.get_attribute('textContent'):
  326.                     print(f' just claimed: {url}')
  327.                     return 'claimed'
  328.                 else:
  329.                     raise ParsingError(url)
  330.             else:
  331.                 raise ParsingError(url)
  332.         else:
  333.             raise ParsingError(url)
  334.     else:
  335.         raise ParsingError(url)
  336.  
  337.  
  338. def create_driver(enable_images=False):
  339.     options = webdriver.firefox.options.Options()
  340.     if not enable_images:
  341.         options.set_preference('permissions.default.image', 2)
  342.     if os.path.exists('geckodriver.exe'):
  343.         driver = webdriver.Firefox(options=options, executable_path='geckodriver.exe')
  344.     else:
  345.         # geckodriver should be in PATH
  346.         driver = webdriver.Firefox(options=options)
  347.     driver.implicitly_wait(10)
  348.     return driver
  349.  
  350.  
  351. def switch_to_new_window(driver, original_window):
  352.     '''If a new window was opened, switch to it'''
  353.     sleep(1)
  354.     if len(driver.window_handles) > 1:
  355.         new_handle = None
  356.         for window_handle in driver.window_handles:
  357.             if window_handle != original_window:
  358.                 new_handle = window_handle
  359.                 break
  360.         driver.close()
  361.         driver.switch_to.window(new_handle)
  362.         return new_handle
  363.     return original_window
  364.  
  365.  
  366. def log(name, data):
  367.     with open(name, 'a') as f:
  368.         for k, v in data.items():
  369.             f.write(k + ': ' + str(v) + '\n')
  370.  
  371.  
  372. def load_history(name):
  373.     global HISTORY_KEYS
  374.  
  375.     try:
  376.         f = open(name, 'r')
  377.         with f:
  378.             data = json.load(f)
  379.         print(f'loaded history from file {name}')
  380.     except FileNotFoundError:
  381.         data = dict()
  382.         print(f'new history file will be created: {name}')
  383.     history = {k: set(data.get(k, [])) for k in HISTORY_KEYS}
  384.     return history
  385.  
  386.  
  387. def save_history(name, data):
  388.     print(f'writing history to file {name}')
  389.     with open(name, 'w') as f:
  390.         json.dump({k: list(v) for k, v in data.items()}, f, indent=2)
  391.  
  392.  
  393. def print_summary(history_file, history):
  394.     global SOURCES, PATTERNS, PROCESSED_GAMES
  395.  
  396.     print('\nSUMMARY')
  397.  
  398.     if not os.path.exists(history_file):
  399.         print(f'No history is stored in {history_file}')
  400.         return
  401.  
  402.     print(f'History stored in {history_file}')
  403.     print()
  404.  
  405.     print(f'Using {len(SOURCES)} main sources (use --recheck to recheck them)')
  406.     print(f"Discovered {len(history['urls'])} games")
  407.     print(f"Claimed {len(history['claimed'])} games")
  408.     not_processed = history['urls'].difference(*map(history.get, PROCESSED_GAMES))
  409.     print(f"{len(not_processed)} games should be claimed on the next run")
  410.     print()
  411.  
  412.     itch_groups = set(filter(re.compile(PATTERNS['itch_group']).match, history['has_more']))
  413.     itch_games = set(filter(re.compile(PATTERNS['itch_game']).match, history['has_more']))
  414.     print(f"{len(itch_groups)} discovered collections / sales should be checked on the next run")
  415.     print(f"{len(history['checked_groups'])} discovered collections / sales were checked (use --recheck-groups to recheck them)")
  416.     print(f"{len(itch_games)} discovered games are connected to sales that may not have been checked")
  417.     print(f"{len(history['removed'])} games were removed or invalid")
  418.     print()
  419.  
  420.     print(f"Play {len(history['web'])} non-claimable and non-downloadable games online:")
  421.     for url in history['web']:
  422.         print(f'  {url}')
  423.     print()
  424.  
  425.     print(f"Download {len(history['dl_only'])} non-claimable games manually:")
  426.     for url in history['dl_only']:
  427.         print(f'  {url}')
  428.     print(f"{len(history['always_free'])} downloadable games are always free (not listed above)")
  429.     print(f"{len(history['downloaded'])} games were marked as downloaded (to mark games: move them in the history file from 'dl_only' to 'downloaded')")
  430.     print(f"{len(history['dl_only_old'])} downloadable games were skipped (moved to 'dl_only_old')")
  431.     print()
  432.  
  433.     print(f"Buy {len(history['buy'])} non-free games.")
  434.     print()
  435.  
  436.     print(f"Error encountered in {len(history['error'])} games (some maybe already solved):")
  437.     for url in history['error']:
  438.         print(f'  {url}')
  439.     print()
  440.  
  441.  
  442. def get_urls_and_update_history(history, sources, itch_groups):
  443.     '''
  444.    INPUT
  445.      history      a dict that'll be updates as `sources` are processed
  446.      sources      sources to get links from
  447.      itch_groups  itch sales/collections in `sources` that should be marked as checked in `history`
  448.    '''
  449.     for i, source in enumerate(sources):
  450.         print(f'{i+1}/{len(sources)}')
  451.         new_urls, new_more = get_urls(source)
  452.         history['urls'].update(new_urls)
  453.         history['has_more'].update(new_more)
  454.     history['checked_groups'].update(itch_groups)
  455.     history['has_more'].difference_update(history['checked_groups'])
  456.  
  457.  
  458. def main():
  459.     global SOURCES, HISTORY_KEYS, PROCESSED_GAMES
  460.  
  461.     run_time = int(time())
  462.     script_name = os.path.basename(os.path.splitext(sys.argv[0])[0])
  463.     log_file = f'{script_name}.log.txt'
  464.     default_history_file = f'{script_name}.history.json'
  465.     log(log_file, {'# new run': run_time})
  466.  
  467.     arg_parser = argparse.ArgumentParser(
  468.         description=f'Claim free itch.io games in an itch.io sale/collection or reddit thread. \
  469.                     Writes the results (game links, claimed games, ..) to history_file. Logs to {log_file}')
  470.     arg_parser.add_argument('history_file', nargs='?', help=f'a json file generated by a previous run of this script (default: {default_history_file})')
  471.     arg_parser.add_argument('--show-history', action='store_true', help='show summary of history in history_file and exit')
  472.     arg_parser.add_argument('--recheck', action='store_true', help='reload game links from SOURCES')
  473.     arg_parser.add_argument('--recheck-groups', action='store_true', help='reload game links from discovered itch collections / sales')
  474.     arg_parser.add_argument('--enable-images', action='store_true', help='load images in the browser while claiming games')
  475.     arg_parser.add_argument('--ignore', action='store_true', help='continue even if an error occurs when handling a game')
  476.     args = arg_parser.parse_args()
  477.  
  478.     if args.history_file is not None:
  479.         history_file = args.history_file
  480.     else:
  481.         history_file = default_history_file
  482.     history = load_history(history_file)
  483.     log(log_file, {'history_file': history_file})
  484.     log(log_file, {k: len(v) for k, v in history.items()})
  485.  
  486.     if args.show_history:
  487.         print_summary(history_file, history)
  488.         sys.exit(0)
  489.  
  490.     # getting game links
  491.     itch_groups = set(filter(re.compile(PATTERNS['itch_group']).match, history['has_more']))
  492.     check_sources = not os.path.exists(history_file) or args.recheck
  493.     check_groups = len(itch_groups) > 0 or args.recheck_groups
  494.     if check_sources or check_groups:
  495.         print('will reload game urls from the internet')
  496.         # keep getting newly discovered sales/collections
  497.         first_pass = True
  498.         while True:
  499.             target_sources = set()
  500.             itch_groups = set(filter(re.compile(PATTERNS['itch_group']).match, history['has_more']))
  501.             if first_pass:
  502.                 if check_sources:
  503.                     target_sources.update(SOURCES)
  504.                 if args.recheck_groups:
  505.                     itch_groups.update(history['checked_groups'])
  506.             else:
  507.                 if len(itch_groups) == 0:
  508.                     break
  509.                 else:
  510.                     print('getting links from newly discovered sales/collections')
  511.             target_sources.update(itch_groups)
  512.             get_urls_and_update_history(history, target_sources, itch_groups)
  513.             first_pass = False
  514.             log(log_file, {'## got links': time(), 'sources': target_sources, 'urls': history['urls'], 'has_more': history['has_more']})
  515.     else:
  516.         print('using game urls saved in the history file')
  517.         print(' pass the option --recheck and/or --recheck-groups to reload game urls from the internet')
  518.  
  519.     # claiming games
  520.     url = None
  521.     sleep_time = 15
  522.     try:
  523.         ignore = set().union(*map(history.get, PROCESSED_GAMES))
  524.         valid = history['urls'].difference(ignore)
  525.         if len(valid) > 0:
  526.             with create_driver(args.enable_images) as driver:
  527.                 driver.get('https://itch.io/login')
  528.                 # manually log in
  529.                 input('A new Firefox window was opened. Log in to itch then click enter to continue')
  530.                 for i, url in enumerate(valid):
  531.                     print(f"{i+1}/{len(valid)} ({len(history['urls'])})")
  532.                     if url not in ignore:
  533.                         try:
  534.                             result = claim(url, driver)
  535.                         except ParsingError as pe:
  536.                             if not args.ignore:
  537.                                 raise
  538.                             history['error'].add(pe.url)
  539.                             print(f'Unknown Error: skipping {pe.url}')
  540.                         else:
  541.                             if url in history['error']:
  542.                                 history['error'].remove(url)
  543.                                 history['old_error'].add(url)
  544.                             if 'claimed' in result:
  545.                                 history['claimed'].add(url)
  546.                             if 'web' in result:
  547.                                 history['web'].add(url)
  548.                             if 'has_more' in result:
  549.                                 history['has_more'].add(url)
  550.                             if 'buy' in result:
  551.                                 history['buy'].add(url)
  552.                             if 'removed' in result:
  553.                                 history['removed'].add(url)
  554.                             if 'always_free' in result:
  555.                                 history['always_free'].add(url)
  556.                                 continue
  557.                             if 'dl_only' in result:
  558.                                 history['dl_only'].add(url)
  559.                             print(f' sleeping for {sleep_time}s')
  560.                         sleep(sleep_time)
  561.     except ParsingError as pe:
  562.         history['error'].add(pe.url)
  563.         raise
  564.     except Exception as e:
  565.         if url is not None:
  566.             history['error'].add(url)
  567.         raise
  568.     finally:
  569.         print()
  570.         save_history(history_file, history)
  571.         print_summary(history_file, history)
  572.  
  573.  
  574. if __name__ == '__main__':
  575.     main()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement