Advertisement
Guest User

claim_itch

a guest
Apr 12th, 2020
143
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 20.65 KB | None | 0 0
  1. '''
  2. ClaimItch/0.6
  3.  
  4. requirements:
  5. - python (tested on 3.8)
  6. - requests
  7. - beautiful soup
  8. - lxml
  9. - selenium
  10. - firefox
  11. - geckodriver
  12.  
  13. files and variables:
  14. - SOURCES variable:   includes itch sales/collections or reddit threads you want check, pass --recheck to retcheck them
  15. - history file:       includes the results of the current run so they can be used in future runs
  16.                      see the HISTORY_KEYS variable
  17. - log file
  18.  
  19. todo - functionality:
  20. - better interface for SOURCES
  21. - when discovering a game connected to a sale, check out the sale
  22. - download non-claimable games?
  23. - login?
  24. - follow discovered reddit threads?
  25.  
  26. todo - coding:
  27. - debug mode that enables breakpoints
  28. - log exceptions and urls on error
  29. - use classes?
  30. - edge case: non writable config location - would do the work but loss history
  31. - intersection between SOURCES and discovered collections in has_more?
  32. - confirm that the keys before & after don't need to be checked in reddit's json
  33. - proper log
  34. - proper config
  35. - claim() return values
  36. - "selenium.common.exceptions.ElementNotInteractableException: Message: Element <a class="button buy_btn" href=".."> could not be scrolled into view"
  37. - selenium's performance?
  38. - less strict parsing / navigation (use .lower) / fuller regex (to work with match and search)
  39. - pylint
  40. - a claimable game was recorded as dl_only, was it changed? https://melessthanthree.itch.io/lucah
  41. '''
  42.  
  43. import os
  44. import sys
  45. import re
  46. import json
  47. import html
  48. import argparse
  49. import requests
  50. from time import sleep, time
  51. from bs4 import BeautifulSoup
  52. from selenium import webdriver
  53. from selenium.common.exceptions import NoSuchElementException
  54.  
  55.  
  56. # add any itch sale/collection or reddit thread to this set
  57. SOURCES = {
  58.     'https://itch.io/c/757294/games-to-help-you-stay-inside',
  59.     'https://itch.io/c/759545/self-isolation-on-a-budget',
  60.     'https://old.reddit.com/r/FreeGameFindings/comments/fka4be/itchio_mega_thread/',
  61.     'https://old.reddit.com/r/GameDeals/comments/fkq5c3/itchio_a_collecting_compiling_almost_every_single',
  62.     'https://itch.io/c/537762/already-claimed-will-be-on-sale-again',
  63.     'https://old.reddit.com/r/FreeGameFindings/comments/fxhotl/itchio_mega_thread/'
  64. }
  65.  
  66.  
  67. PATTERNS = {
  68.     'itch_collection': r'.+itch\.io/c/.+',
  69.     'itch_sale': r'.+itch\.io/s/.+',
  70.     'itch_group': r'.+itch\.io/[sc]/\d+/.+', # sale or collection
  71.     'reddit_thread': r'.+(?P<thread>reddit\.com/r/.+/comments/.+)/.+',
  72.     'itch_game': r'(http://|https://)?(?P<game>.+\.itch\.io/[^/?]+)'
  73. }
  74.  
  75.  
  76. USER_AGENT = 'ClaimItch/0.6'
  77.  
  78.  
  79. HISTORY_KEYS = [
  80.     'urls',           # discovered game urls
  81.     'claimed',        # claimed games
  82.     'has_more',       # a sale, collection, or game that is connected to more sales
  83.     'checked_groups', # a sale/collection that was checked for games, pass --recheck-groups to recheck it
  84.     'dl_only',        # game is not claimable
  85.     'dl_only_old',    # downloadable game you want to skip for now
  86.     'web',            # game is not claimable or downloadable, web game
  87.     'downloaded',     # games that were downloaded (edit this manually)
  88.     'buy',            # game is not free
  89.     'removed',        # game does not exist
  90.     'error',          # games that broke the script
  91. ]
  92.  
  93. PROCESSED_GAMES = ('claimed', 'dl_only', 'dl_only_old', 'downloaded', 'buy', 'removed', 'web')
  94.  
  95.  
  96. class ParsingError(Exception):
  97.     def __init__(self, url, *args, **kwargs):
  98.         # breakpoint()
  99.         self.url = url
  100.         super().__init__(url, *args, **kwargs)
  101.  
  102.  
  103. def extract_from_itch_group(group_page):
  104.     '''
  105.    INPUT  html sale or collection page
  106.    OUTPUT urls of all games, urls of games that avie noted is connected to more sales
  107.    '''
  108.     soup = BeautifulSoup(group_page, 'lxml')
  109.     urls, more = set(), set()
  110.     games = soup.find_all('div', class_='game_cell')
  111.     for game in games:
  112.         url = game.find('a').get('href')
  113.         urls.add(url)
  114.         if game.find('div', class_='blurb_outer') is not None:
  115.             more.add(url)
  116.     return urls, more
  117.  
  118.  
  119. def get_from_itch_group(group_url, sleep_time=15, max_page=None, sale=False):
  120.     '''
  121.    INPUT  itch.io collection url
  122.    OUTPUT see extract_urls
  123.    '''
  124.     if sale:
  125.         max_page = 1 # sales don't seem to have pages
  126.     page = 1
  127.     urls = set()
  128.     has_more = set()
  129.     while max_page is None or page <= max_page:
  130.         print(f' getting page {page}')
  131.         params = {'page': page} if not sale else None
  132.         res = requests.get(group_url, params=params)
  133.         if res.status_code == 404:
  134.             break
  135.         elif res.status_code != 200:
  136.             # breakpoint()
  137.             res.raise_for_status()
  138.         page += 1
  139.         new_urls, new_more = extract_from_itch_group(res.text)
  140.         urls.update(new_urls)
  141.         has_more.update(new_more)
  142.         print(f' sleeping for {sleep_time}s')
  143.         sleep(sleep_time)
  144.     print(f' got {len(urls)} games')
  145.     return urls, has_more
  146.  
  147.  
  148. def get_from_reddit_thread(url, sleep_time=15):
  149.     '''
  150.    INPUT  reddit thread url
  151.    OUTPUT itch.io game urls, itch.io groups (sales, collections)
  152.    '''
  153.     global USER_AGENT, PATTERNS
  154.  
  155.     # https://www.reddit.com/dev/api#GET_comments_{article}
  156.     # https://github.com/reddit-archive/reddit/wiki/JSON
  157.     base_url = f"https://{re.match(PATTERNS['reddit_thread'], url)['thread']}" # does not end with /
  158.     urls = set()
  159.     has_more = set()
  160.  
  161.     chains = ['']
  162.     while len(chains) > 0:
  163.         current_chain = chains.pop()
  164.         print(f' getting a comment chain {current_chain}')
  165.         json_url = base_url + current_chain + '.json?threaded=false'
  166.         res = requests.get(json_url, headers={'User-Agent': USER_AGENT})
  167.         if res.status_code != 200:
  168.             res.raise_for_status()
  169.         data = res.json()
  170.         for listing in data:
  171.             if listing['kind'].lower() != 'listing':
  172.                 raise ParsingError(json_url)
  173.             children = listing['data']['children']
  174.             for child in children:
  175.                 text = None
  176.                 if child['kind'] == 't3':
  177.                     text = child['data']['selftext_html']
  178.                 elif child['kind'] == 't1':
  179.                     text = child['data']['body_html']
  180.                 elif child['kind'] == 'more':
  181.                     chains.extend(['/thread/' + chain for chain in child['data']['children']])
  182.                 else:
  183.                     raise ParsingError(json_url)
  184.                 if text is not None and len(text) > 0:
  185.                     soup = BeautifulSoup(html.unescape(text), 'lxml')
  186.                     new_urls = set(a.get('href') for a in soup.find_all('a'))
  187.                     urls.update(url for url in new_urls if re.match(PATTERNS['itch_game'], url))
  188.                     has_more.update(url for url in new_urls if re.match(PATTERNS['itch_group'], url))
  189.         print(f' sleeping for {sleep_time}s')
  190.         sleep(sleep_time)
  191.     print(f' got {len(urls)} games | {len(has_more)} collections/sales')
  192.     return urls, has_more
  193.  
  194.  
  195. def get_urls(url, sleep_time=15, max_page=None):
  196.     global PATTERNS
  197.  
  198.     print(f'getting games from {url}')
  199.     if re.match(PATTERNS['itch_collection'], url):
  200.         return get_from_itch_group(url, sleep_time, max_page)
  201.     elif re.match(PATTERNS['itch_sale'], url):
  202.         return get_from_itch_group(url, sleep_time, sale=True)
  203.     elif re.match(PATTERNS['reddit_thread'], url):
  204.         return get_from_reddit_thread(url, sleep_time)
  205.     else:
  206.         # breakpoint()
  207.         raise NotImplementedError(f'{url} is not supported')
  208.  
  209.  
  210. def claim(url, driver):
  211.     '''
  212.    INPUTS
  213.      url     game url
  214.      driver  a webdriver for a browser that is logged in to itch.io
  215.    OUTPUT
  216.      status
  217.        'claimed'           success
  218.        'dl_only'           cannot be claimed
  219.        'web'               cannot be claimed or downloaded, web game
  220.        'buy'               not for sale
  221.        'claimed has_more'  success, and indicaes that the game is connected to another sale
  222.        'removed'           game does not exist
  223.    '''
  224.     global PATTERNS
  225.  
  226.     url = f"https://{re.search(PATTERNS['itch_game'], url)['game']}"
  227.     print(f'handling {url}')
  228.  
  229.     driver.get(url)
  230.     original_window = driver.current_window_handle
  231.     assert len(driver.window_handles) == 1
  232.  
  233.     # removed game
  234.     try:
  235.         driver.find_element_by_css_selector('div.not_found_game_page')
  236.         return 'removed'
  237.     except NoSuchElementException:
  238.         pass
  239.  
  240.     # already owned
  241.     try:
  242.         if 'You own this' in driver.find_element_by_css_selector('div.purchase_banner_inner h2').get_attribute('textContent'):
  243.             print(f' already claimed: {url}')
  244.             return 'claimed'
  245.     except NoSuchElementException:
  246.         pass
  247.  
  248.     # check if claimable, download only, or a web game
  249.     try:
  250.         buy = driver.find_element_by_css_selector('div.buy_row a.buy_btn')
  251.     except NoSuchElementException:
  252.         try:
  253.             buy = driver.find_element_by_css_selector('section.game_download a.buy_btn')
  254.         except NoSuchElementException:
  255.             try:
  256.                 driver.find_element_by_css_selector('div.uploads')
  257.                 print(f' download only: {url}')
  258.                 return 'dl_only'
  259.             except NoSuchElementException:
  260.                 try:
  261.                     driver.find_element_by_css_selector('div.html_embed_widget')
  262.                     print(f' web game: {url}')
  263.                     return 'web'
  264.                 except NoSuchElementException as nse_e:
  265.                     raise ParsingError(url) from nse_e
  266.  
  267.     if 'Download Now' in buy.get_attribute('textContent'):
  268.         print(f' download only: {url}')
  269.         return 'dl_only'
  270.     elif 'buy now' in buy.get_attribute('textContent').lower():
  271.         print(f' buy: {url}')
  272.         return 'buy'
  273.     elif 'pre-order' in buy.get_attribute('textContent').lower():
  274.         print(f' buy (pre-order): {url}')
  275.         return 'buy'
  276.     # claim
  277.     elif 'Download or claim' in buy.get_attribute('textContent'):
  278.         #buy.location_once_scrolled_into_view
  279.         #buy.click()
  280.         driver.get(f'{url}/purchase')
  281.  
  282.         try:
  283.             no_thanks = driver.find_element_by_css_selector('a.direct_download_btn')
  284.         except NoSuchElementException as nse_e:
  285.             raise ParsingError(url) from nse_e
  286.  
  287.         if 'No thanks, just take me to the downloads' in no_thanks.get_attribute('textContent'):
  288.             no_thanks.click()
  289.  
  290.             # in case the download page opens in a new window
  291.             original_window = switch_to_new_window(driver, original_window)
  292.  
  293.             try:
  294.                 claim_btn = driver.find_element_by_css_selector('div.claim_to_download_box form button')
  295.             except NoSuchElementException as nse_e:
  296.                 raise ParsingError(url) from nse_e
  297.  
  298.             if 'claim' in claim_btn.get_attribute('textContent').lower():
  299.                 claim_btn.click()
  300.  
  301.                 try:
  302.                     message = driver.find_element_by_css_selector('div.game_download_page div.inner_column p')
  303.                 except NoSuchElementException as nse_e:
  304.                     raise ParsingError(url) from nse_e
  305.  
  306.                 if 'for the promotion' in message.get_attribute('textContent'):
  307.                     print(f' just claimed | part of a sale: {url}')
  308.                     return 'claimed has_more'
  309.                 if 'You claimed this game' in message.get_attribute('textContent'):
  310.                     print(f' just claimed: {url}')
  311.                     return 'claimed'
  312.                 else:
  313.                     raise ParsingError(url)
  314.             else:
  315.                 raise ParsingError(url)
  316.         else:
  317.             raise ParsingError(url)
  318.     else:
  319.         raise ParsingError(url)
  320.  
  321.  
  322. def create_driver(enable_images=False):
  323.     options = webdriver.firefox.options.Options()
  324.     if not enable_images:
  325.         options.set_preference('permissions.default.image', 2)
  326.     if os.path.exists('geckodriver.exe'):
  327.         driver = webdriver.Firefox(options=options, executable_path='geckodriver.exe')
  328.     else:
  329.         # geckodriver should be in PATH
  330.         driver = webdriver.Firefox(options=options)
  331.     driver.implicitly_wait(10)
  332.     return driver
  333.  
  334.  
  335. def switch_to_new_window(driver, original_window):
  336.     '''If a new window was opened, switch to it'''
  337.     sleep(1)
  338.     if len(driver.window_handles) > 1:
  339.         new_handle = None
  340.         for window_handle in driver.window_handles:
  341.             if window_handle != original_window:
  342.                 new_handle = window_handle
  343.                 break
  344.         driver.close()
  345.         driver.switch_to.window(new_handle)
  346.         return new_handle
  347.     return original_window
  348.  
  349.  
  350. def log(name, data):
  351.     with open(name, 'a') as f:
  352.         for k, v in data.items():
  353.             f.write(k + ': ' + str(v) + '\n')
  354.  
  355.  
  356. def load_history(name):
  357.     global HISTORY_KEYS
  358.  
  359.     try:
  360.         f = open(name, 'r')
  361.         with f:
  362.             data = json.load(f)
  363.         print(f'loaded history from file {name}')
  364.     except FileNotFoundError:
  365.         data = dict()
  366.         print(f'new history file will be created: {name}')
  367.     history = {k: set(data.get(k, [])) for k in HISTORY_KEYS}
  368.     return history
  369.  
  370.  
  371. def save_history(name, data):
  372.     print(f'writing history to file {name}')
  373.     with open(name, 'w') as f:
  374.         json.dump({k: list(v) for k, v in data.items()}, f, indent=2)
  375.  
  376.  
  377. def print_summary(history_file, history):
  378.     global SOURCES, PATTERNS, PROCESSED_GAMES
  379.  
  380.     print('\nSUMMARY')
  381.  
  382.     if not os.path.exists(history_file):
  383.         print(f'No history is stored in {history_file}')
  384.         return
  385.  
  386.     print(f'History stored in {history_file}')
  387.     print()
  388.  
  389.     print(f'Using {len(SOURCES)} main sources (use --recheck to recheck them)')
  390.     print(f"Discovered {len(history['urls'])} games")
  391.     print(f"Claimed {len(history['claimed'])} games")
  392.     not_processed = history['urls'].difference(*map(history.get, PROCESSED_GAMES))
  393.     print(f"{len(not_processed)} games should be claimed on the next run")
  394.     print()
  395.  
  396.     itch_groups = set(filter(re.compile(PATTERNS['itch_group']).match, history['has_more']))
  397.     itch_games = set(filter(re.compile(PATTERNS['itch_game']).match, history['has_more']))
  398.     print(f"{len(itch_groups)} discovered collections / sales should be checked on the next run")
  399.     print(f"{len(history['checked_groups'])} discovered collections / sales were checked (use --recheck-groups to recheck them)")
  400.     print(f"{len(itch_games)} discovered games are connected to sales that may not have been checked")
  401.     print(f"{len(history['removed'])} games were removed or invalid")
  402.     print()
  403.  
  404.     print(f"Play {len(history['web'])} non-claimable and non-downloadable games online:")
  405.     for url in history['web']:
  406.         print(f'  {url}')
  407.     print()
  408.  
  409.     print(f"Download {len(history['dl_only'])} non-claimable games manually:")
  410.     for url in history['dl_only']:
  411.         print(f'  {url}')
  412.     print(f"{len(history['downloaded'])} games were marked as downloaded (to mark games: move them in the history file from 'dl_only' to 'downloaded')")
  413.     print(f"{len(history['dl_only_old'])} downloadable games were skipped (moved to 'dl_only_old')")
  414.     print()
  415.  
  416.     print(f"Buy {len(history['buy'])} non-free games:")
  417.     for url in history['buy']:
  418.         print(f'  {url}')
  419.     print()
  420.  
  421.  
  422. def get_urls_and_update_history(history, sources, itch_groups):
  423.     '''
  424.    INPUT
  425.      history      a dict that'll be updates as `sources` are processed
  426.      sources      sources to get links from
  427.      itch_groups  itch sales/collections in `sources` that should be marked as checked in `history`
  428.    '''
  429.     for i, source in enumerate(sources):
  430.         print(f'{i+1}/{len(sources)}')
  431.         new_urls, new_more = get_urls(source)
  432.         history['urls'].update(new_urls)
  433.         history['has_more'].update(new_more)
  434.     history['checked_groups'].update(itch_groups)
  435.     history['has_more'].difference_update(history['checked_groups'])
  436.  
  437.  
  438. def main():
  439.     global SOURCES, HISTORY_KEYS, PROCESSED_GAMES
  440.  
  441.     run_time = int(time())
  442.     script_name = os.path.basename(os.path.splitext(sys.argv[0])[0])
  443.     log_file = f'{script_name}.log.txt'
  444.     default_history_file = f'{script_name}.history.json'
  445.     log(log_file, {'# new run': run_time})
  446.  
  447.     arg_parser = argparse.ArgumentParser(
  448.         description=f'Claim free itch.io games in an itch.io sale/collection or reddit thread. \
  449.                     Writes the results (game links, claimed games, ..) to history_file. Logs to {log_file}')
  450.     arg_parser.add_argument('history_file', nargs='?', help=f'a json file generated by a previous run of this script (default: {default_history_file})')
  451.     arg_parser.add_argument('--show-history', action='store_true', help='show summary of history in history_file and exit')
  452.     arg_parser.add_argument('--recheck', action='store_true', help='reload game links from SOURCES')
  453.     arg_parser.add_argument('--recheck-groups', action='store_true', help='reload game links from discovered itch collections / sales')
  454.     arg_parser.add_argument('--enable-images', action='store_true', help='load images in the browser while claiming games')
  455.     args = arg_parser.parse_args()
  456.  
  457.     if args.history_file is not None:
  458.         history_file = args.history_file
  459.     else:
  460.         history_file = default_history_file
  461.     history = load_history(history_file)
  462.     log(log_file, {'history_file': history_file})
  463.     log(log_file, {k: len(v) for k, v in history.items()})
  464.  
  465.     if args.show_history:
  466.         print_summary(history_file, history)
  467.         sys.exit(0)
  468.  
  469.     # getting game links
  470.     itch_groups = set(filter(re.compile(PATTERNS['itch_group']).match, history['has_more']))
  471.     check_sources = not os.path.exists(history_file) or args.recheck
  472.     check_groups = len(itch_groups) > 0 or args.recheck_groups
  473.     if check_sources or check_groups:
  474.         print('will reload game urls from the internet')
  475.         # keep getting newly discovered sales/collections
  476.         first_pass = True
  477.         while True:
  478.             target_sources = set()
  479.             itch_groups = set(filter(re.compile(PATTERNS['itch_group']).match, history['has_more']))
  480.             if first_pass:
  481.                 if check_sources:
  482.                     target_sources.update(SOURCES)
  483.                 if args.recheck_groups:
  484.                     itch_groups.update(history['checked_groups'])
  485.             else:
  486.                 if len(itch_groups) == 0:
  487.                     break
  488.                 else:
  489.                     print('getting links from newly discovered sales/collections')
  490.             target_sources.update(itch_groups)
  491.             get_urls_and_update_history(history, target_sources, itch_groups)
  492.             first_pass = False
  493.             log(log_file, {'## got links': time(), 'sources': target_sources, 'urls': history['urls'], 'has_more': history['has_more']})
  494.     else:
  495.         print('using game urls saved in the history file')
  496.         print(' pass the option --recheck and/or --recheck-groups to reload game urls from the internet')
  497.  
  498.     # claiming games
  499.     url = None
  500.     sleep_time = 15
  501.     try:
  502.         ignore = set().union(*map(history.get, PROCESSED_GAMES))
  503.         valid = history['urls'].difference(ignore)
  504.         if len(valid) > 0:
  505.             with create_driver(args.enable_images) as driver:
  506.                 driver.get('https://itch.io/login')
  507.                 # manually log in
  508.                 input('A new Firefox window was opened. Log in to itch then click enter to continue')
  509.                 for i, url in enumerate(valid):
  510.                     print(f"{i+1}/{len(valid)} ({len(history['urls'])})")
  511.                     if url not in ignore:
  512.                         result = claim(url, driver)
  513.                         if 'claimed' in result:
  514.                             history['claimed'].add(url)
  515.                         if 'dl_only' in result:
  516.                             history['dl_only'].add(url)
  517.                         if 'web' in result:
  518.                             history['web'].add(url)
  519.                         if 'has_more' in result:
  520.                             history['has_more'].add(url)
  521.                         if 'buy' in result:
  522.                             history['buy'].add(url)
  523.                         if 'removed' in result:
  524.                             history['removed'].add(url)
  525.                         print(f' sleeping for {sleep_time}s')
  526.                         sleep(sleep_time)
  527.     except ParsingError as pe:
  528.         history['error'].add(pe.url)
  529.     except Exception as e:
  530.         history['error'].add(url)
  531.         raise
  532.     finally:
  533.         print()
  534.         save_history(history_file, history)
  535.         print_summary(history_file, history)
  536.  
  537.  
  538. if __name__ == '__main__':
  539.     main()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement