Advertisement
Guest User

Untitled

a guest
Nov 18th, 2016
88
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 38.31 KB | None | 0 0
  1. #!/usr/bin/python
  2. # -*- coding: utf-8 -*-
  3.  
  4. '''
  5. Search Architecture:
  6. - Have a list of accounts
  7. - Create an "overseer" thread
  8. - Search Overseer:
  9.   - Tracks incoming new location values
  10.   - Tracks "paused state"
  11.   - During pause or new location will clears current search queue
  12.   - Starts search_worker threads
  13. - Search Worker Threads each:
  14.   - Have a unique API login
  15.   - Listens to the same Queue for areas to scan
  16.   - Can re-login as needed
  17.   - Pushes finds to db queue and webhook queue
  18. '''
  19.  
  20. import logging
  21. import math
  22. import os
  23. import random
  24. import time
  25. import geopy
  26. import geopy.distance
  27. import requests
  28. import geocoder
  29.  
  30. from datetime import datetime
  31. from threading import Thread
  32. from queue import Queue, Empty
  33.  
  34. from pgoapi import PGoApi
  35. from pgoapi.utilities import f2i
  36. from pgoapi import utilities as util
  37. from pgoapi.exceptions import AuthException
  38.  
  39. from .models import parse_map, GymDetails, parse_gyms, MainWorker, WorkerStatus
  40. from .fakePogoApi import FakePogoApi
  41. from .utils import now
  42. from .transform import get_new_coords
  43. import schedulers
  44.  
  45. import terminalsize
  46.  
  47. log = logging.getLogger(__name__)
  48.  
  49. TIMESTAMP = '\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000'
  50.  
  51.  
  52. # Apply a location jitter
  53. def jitterLocation(location=None, maxMeters=10):
  54.     origin = geopy.Point(location[0], location[1])
  55.     b = random.randint(0, 360)
  56.     d = math.sqrt(random.random()) * (float(maxMeters) / 1000)
  57.     destination = geopy.distance.distance(kilometers=d).destination(origin, b)
  58.     return (destination.latitude, destination.longitude, location[2])
  59.  
  60.  
  61. # Thread to handle user input
  62. def switch_status_printer(display_type, current_page):
  63.     # Get a reference to the root logger
  64.     mainlog = logging.getLogger()
  65.     # Disable logging of the first handler - the stream handler, and disable it's output
  66.     mainlog.handlers[0].setLevel(logging.CRITICAL)
  67.  
  68.     while True:
  69.         # Wait for the user to press a key
  70.         command = raw_input()
  71.  
  72.         if command == '':
  73.             # Switch between logging and display.
  74.             if display_type[0] != 'logs':
  75.                 # Disable display, enable on screen logging
  76.                 mainlog.handlers[0].setLevel(logging.DEBUG)
  77.                 display_type[0] = 'logs'
  78.                 # If logs are going slowly, sometimes it's hard to tell you switched.  Make it clear.
  79.                 print 'Showing logs...'
  80.             elif display_type[0] == 'logs':
  81.                 # Enable display, disable on screen logging (except for critical messages)
  82.                 mainlog.handlers[0].setLevel(logging.CRITICAL)
  83.                 display_type[0] = 'workers'
  84.         elif command.isdigit():
  85.                 current_page[0] = int(command)
  86.                 mainlog.handlers[0].setLevel(logging.CRITICAL)
  87.                 display_type[0] = 'workers'
  88.         elif command.lower() == 'f':
  89.                 mainlog.handlers[0].setLevel(logging.CRITICAL)
  90.                 display_type[0] = 'failedaccounts'
  91.  
  92.  
  93. # Thread to print out the status of each worker
  94. def status_printer(threadStatus, search_items_queue_array, db_updates_queue, wh_queue, account_queue, account_failures):
  95.     display_type = ["workers"]
  96.     current_page = [1]
  97.  
  98.     # Start another thread to get user input
  99.     t = Thread(target=switch_status_printer,
  100.                name='switch_status_printer',
  101.                args=(display_type, current_page))
  102.     t.daemon = True
  103.     t.start()
  104.  
  105.     while True:
  106.         time.sleep(1)
  107.  
  108.         if display_type[0] == 'logs':
  109.             # In log display mode, we don't want to show anything
  110.             continue
  111.  
  112.         # Create a list to hold all the status lines, so they can be printed all at once to reduce flicker
  113.         status_text = []
  114.  
  115.         if display_type[0] == 'workers':
  116.  
  117.             # Get the terminal size
  118.             width, height = terminalsize.get_terminal_size()
  119.             # Queue and overseer take 2 lines.  Switch message takes up 2 lines.  Remove an extra 2 for things like screen status lines.
  120.             usable_height = height - 6
  121.             # Prevent people running terminals only 6 lines high from getting a divide by zero
  122.             if usable_height < 1:
  123.                 usable_height = 1
  124.  
  125.             # Calculate total skipped items
  126.             skip_total = 0
  127.             for item in threadStatus:
  128.                 if 'skip' in threadStatus[item]:
  129.                     skip_total += threadStatus[item]['skip']
  130.  
  131.             # Print the queue length
  132.             search_items_queue_size = 0
  133.             for i in range(0, len(search_items_queue_array)):
  134.                 search_items_queue_size += search_items_queue_array[i].qsize()
  135.  
  136.             status_text.append('Queues: {} search items, {} db updates, {} webhook.  Total skipped items: {}. Spare accounts available: {}. Accounts on hold: {}'.format(search_items_queue_size, db_updates_queue.qsize(), wh_queue.qsize(), skip_total, account_queue.qsize(), len(account_failures)))
  137.  
  138.             # Print status of overseer
  139.             status_text.append('{} Overseer: {}'.format(threadStatus['Overseer']['scheduler'], threadStatus['Overseer']['message']))
  140.  
  141.             # Calculate the total number of pages.  Subtracting 1 for the overseer.
  142.             total_pages = math.ceil((len(threadStatus) - 1) / float(usable_height))
  143.  
  144.             # Prevent moving outside the valid range of pages
  145.             if current_page[0] > total_pages:
  146.                 current_page[0] = total_pages
  147.             if current_page[0] < 1:
  148.                 current_page[0] = 1
  149.  
  150.             # Calculate which lines to print
  151.             start_line = usable_height * (current_page[0] - 1)
  152.             end_line = start_line + usable_height
  153.             current_line = 1
  154.  
  155.             # Find the longest username and proxy
  156.             userlen = 4
  157.             proxylen = 5
  158.             for item in threadStatus:
  159.                 if threadStatus[item]['type'] == 'Worker':
  160.                     userlen = max(userlen, len(threadStatus[item]['user']))
  161.                     if 'proxy_display' in threadStatus[item]:
  162.                         proxylen = max(proxylen, len(str(threadStatus[item]['proxy_display'])))
  163.  
  164.             # How pretty
  165.             status = '{:10} | {:5} | {:' + str(userlen) + '} | {:' + str(proxylen) + '} | {:7} | {:6} | {:5} | {:7} | {:10}'
  166.  
  167.             # Print the worker status
  168.             status_text.append(status.format('Worker ID', 'Start', 'User', 'Proxy', 'Success', 'Failed', 'Empty', 'Skipped', 'Message'))
  169.             for item in sorted(threadStatus):
  170.                 if(threadStatus[item]['type'] == 'Worker'):
  171.                     current_line += 1
  172.  
  173.                     # Skip over items that don't belong on this page
  174.                     if current_line < start_line:
  175.                         continue
  176.                     if current_line > end_line:
  177.                         break
  178.  
  179.                     status_text.append(status.format(item, time.strftime('%H:%M', time.localtime(threadStatus[item]['starttime'])), threadStatus[item]['user'], threadStatus[item]['proxy_display'], threadStatus[item]['success'], threadStatus[item]['fail'], threadStatus[item]['noitems'], threadStatus[item]['skip'], threadStatus[item]['message']))
  180.  
  181.         elif display_type[0] == 'failedaccounts':
  182.             status_text.append('-----------------------------------------')
  183.             status_text.append('Accounts on hold:')
  184.             status_text.append('-----------------------------------------')
  185.  
  186.             # Find the longest account name
  187.             userlen = 4
  188.             for account in account_failures:
  189.                 userlen = max(userlen, len(account['account']['username']))
  190.  
  191.             status = '{:' + str(userlen) + '} | {:10} | {:20}'
  192.             status_text.append(status.format('User', 'Hold Time', 'Reason'))
  193.  
  194.             for account in account_failures:
  195.                 status_text.append(status.format(account['account']['username'], time.strftime('%H:%M:%S', time.localtime(account['last_fail_time'])), account['reason']))
  196.  
  197.         # Print the status_text for the current screen
  198.         status_text.append('Page {}/{}. Page number to switch pages. F to show on hold accounts. <ENTER> alone to switch between status and log view'.format(current_page[0], total_pages))
  199.         # Clear the screen
  200.         os.system('cls' if os.name == 'nt' else 'clear')
  201.         # Print status
  202.         print "\n".join(status_text)
  203.  
  204.  
  205. # The account recycler monitors failed accounts and places them back in the account queue 2 hours after they failed.
  206. # This allows accounts that were soft banned to be retried after giving them a chance to cool down.
  207. def account_recycler(accounts_queue, account_failures, args):
  208.     while True:
  209.         # Run once a minute
  210.         time.sleep(60)
  211.         log.info('Account recycler running. Checking status of {} accounts'.format(len(account_failures)))
  212.  
  213.         # Create a new copy of the failure list to search through, so we can iterate through it without it changing
  214.         failed_temp = list(account_failures)
  215.  
  216.         # Search through the list for any item that last failed before 2 hours ago
  217.         ok_time = now() - args.account_rest_interval
  218.         for a in failed_temp:
  219.             if a['last_fail_time'] <= ok_time:
  220.                 # Remove the account from the real list, and add to the account queue
  221.                 log.info('Account {} returning to active duty.'.format(a['account']['username']))
  222.                 account_failures.remove(a)
  223.                 accounts_queue.put(a['account'])
  224.             else:
  225.                 log.info('Account {} needs to cool off for {} seconds due to {}'.format(a['account']['username'], a['last_fail_time'] - ok_time, a['reason']))
  226.  
  227.  
  228. def worker_status_db_thread(threads_status, name, db_updates_queue):
  229.     log.info("Clearing previous statuses for '%s' worker", name)
  230.     WorkerStatus.delete().where(WorkerStatus.worker_name == name).execute()
  231.  
  232.     while True:
  233.         workers = {}
  234.         overseer = None
  235.         for status in threads_status.values():
  236.             if status['type'] == 'Overseer':
  237.                 overseer = {
  238.                     'worker_name': name,
  239.                     'message': status['message'],
  240.                     'method': status['scheduler'],
  241.                     'last_modified': datetime.utcnow()
  242.                 }
  243.             if status['type'] == 'Worker':
  244.                 workers[status['user']] = {
  245.                     'username': status['user'],
  246.                     'worker_name': name,
  247.                     'success': status['success'],
  248.                     'fail': status['fail'],
  249.                     'no_items': status['noitems'],
  250.                     'skip': status['skip'],
  251.                     'last_modified': datetime.utcnow(),
  252.                     'message': status['message']
  253.                 }
  254.         if overseer is not None:
  255.             db_updates_queue.put((MainWorker, {0: overseer}))
  256.             db_updates_queue.put((WorkerStatus, workers))
  257.         time.sleep(3)
  258.  
  259.  
  260. # The main search loop that keeps an eye on the over all process
  261. def search_overseer_thread(args, new_location_queue, pause_bit, heartb, encryption_lib_path, db_updates_queue, wh_queue):
  262.  
  263.     log.info('Search overseer starting')
  264.  
  265.     search_items_queue_array = []
  266.     scheduler_array = []
  267.     account_queue = Queue()
  268.     threadStatus = {}
  269.  
  270.     '''
  271.    Create a queue of accounts for workers to pull from. When a worker has failed too many times,
  272.    it can get a new account from the queue and reinitialize the API. Workers should return accounts
  273.    to the queue so they can be tried again later, but must wait a bit before doing do so to
  274.    prevent accounts from being cycled through too quickly.
  275.    '''
  276.     for i, account in enumerate(args.accounts):
  277.         account_queue.put(account)
  278.  
  279.     # Create a list for failed accounts
  280.     account_failures = []
  281.  
  282.     threadStatus['Overseer'] = {
  283.         'message': 'Initializing',
  284.         'type': 'Overseer',
  285.         'scheduler': args.scheduler
  286.     }
  287.  
  288.     if(args.print_status):
  289.         log.info('Starting status printer thread')
  290.         t = Thread(target=status_printer,
  291.                    name='status_printer',
  292.                    args=(threadStatus, search_items_queue_array, db_updates_queue, wh_queue, account_queue, account_failures))
  293.         t.daemon = True
  294.         t.start()
  295.  
  296.     # Create account recycler thread
  297.     log.info('Starting account recycler thread')
  298.     t = Thread(target=account_recycler, name='account-recycler', args=(account_queue, account_failures, args))
  299.     t.daemon = True
  300.     t.start()
  301.  
  302.     if args.status_name is not None:
  303.         log.info('Starting status database thread')
  304.         t = Thread(target=worker_status_db_thread,
  305.                    name='status_worker_db',
  306.                    args=(threadStatus, args.status_name, db_updates_queue))
  307.         t.daemon = True
  308.         t.start()
  309.  
  310.     search_items_queue = Queue()
  311.     # Create the appropriate type of scheduler to handle the search queue.
  312.     scheduler = schedulers.SchedulerFactory.get_scheduler(args.scheduler, [search_items_queue], threadStatus, args)
  313.  
  314.     scheduler_array.append(scheduler)
  315.     search_items_queue_array.append(search_items_queue)
  316.  
  317.     # Create specified number of search_worker_thread
  318.     log.info('Starting search worker threads')
  319.     for i in range(0, args.workers):
  320.         log.debug('Starting search worker thread %d', i)
  321.  
  322.         if args.beehive and i > 0:
  323.             search_items_queue = Queue()
  324.             # Create the appropriate type of scheduler to handle the search queue.
  325.             scheduler = schedulers.SchedulerFactory.get_scheduler(args.scheduler, [search_items_queue], threadStatus, args)
  326.  
  327.             scheduler_array.append(scheduler)
  328.             search_items_queue_array.append(search_items_queue)
  329.  
  330.         # Set proxy for each worker, using round robin
  331.         proxy_display = 'No'
  332.         proxy_url = False
  333.  
  334.         if args.proxy:
  335.             proxy_display = proxy_url = args.proxy[i % len(args.proxy)]
  336.             if args.proxy_display.upper() != 'FULL':
  337.                 proxy_display = i % len(args.proxy)
  338.  
  339.         workerId = 'Worker {:03}'.format(i)
  340.         threadStatus[workerId] = {
  341.             'type': 'Worker',
  342.             'message': 'Creating thread...',
  343.             'success': 0,
  344.             'fail': 0,
  345.             'noitems': 0,
  346.             'skip': 0,
  347.             'user': '',
  348.             'proxy_display': proxy_display,
  349.             'proxy_url': proxy_url,
  350.             'location': False,
  351.             'last_scan_time': 0,
  352.         }
  353.  
  354.         t = Thread(target=search_worker_thread,
  355.                    name='search-worker-{}'.format(i),
  356.                    args=(args, account_queue, account_failures, search_items_queue, pause_bit,
  357.                          encryption_lib_path, threadStatus[workerId],
  358.                          db_updates_queue, wh_queue))
  359.         t.daemon = True
  360.         t.start()
  361.  
  362.     # A place to track the current location
  363.     current_location = False
  364.  
  365.     # The real work starts here but will halt on pause_bit.set()
  366.     while True:
  367.  
  368.         if args.on_demand_timeout > 0 and (now() - args.on_demand_timeout) > heartb[0]:
  369.             pause_bit.set()
  370.             log.info("Searching paused due to inactivity...")
  371.  
  372.         # Wait here while scanning is paused
  373.         while pause_bit.is_set():
  374.             for i in range(0, len(scheduler_array)):
  375.                 scheduler_array[i].scanning_paused()
  376.             time.sleep(1)
  377.  
  378.         # If a new location has been passed to us, get the most recent one
  379.         if not new_location_queue.empty():
  380.             log.info('New location caught, moving search grid')
  381.             try:
  382.                 while True:
  383.                     current_location = new_location_queue.get_nowait()
  384.             except Empty:
  385.                 pass
  386.  
  387.             step_distance = 0.07
  388.  
  389.             if args.no_pokemon:
  390.                 step_distance = 0.9
  391.  
  392.             locations = _generate_locations(current_location, step_distance, args.step_limit, len(scheduler_array))
  393.  
  394.             for i in range(0, len(scheduler_array)):
  395.                 scheduler_array[i].location_changed(locations[i])
  396.  
  397.         # If there are no search_items_queue either the loop has finished (or been
  398.         # cleared above) -- either way, time to fill it back up
  399.         for i in range(0, len(search_items_queue_array)):
  400.             if search_items_queue_array[i].empty():
  401.                 log.debug('Search queue empty, scheduling more items to scan')
  402.                 scheduler_array[i].schedule()
  403.             else:
  404.                 nextitem = search_items_queue_array[i].queue[0]
  405.                 threadStatus['Overseer']['message'] = 'Processing search queue, next item is {:6f},{:6f}'.format(nextitem[1][0], nextitem[1][1])
  406.                 # If times are specified, print the time of the next queue item, and how many seconds ahead/behind realtime
  407.                 if nextitem[2]:
  408.                     threadStatus['Overseer']['message'] += ' @ {}'.format(time.strftime('%H:%M:%S', time.localtime(nextitem[2])))
  409.                     if nextitem[2] > now():
  410.                         threadStatus['Overseer']['message'] += ' ({}s ahead)'.format(nextitem[2] - now())
  411.                     else:
  412.                         threadStatus['Overseer']['message'] += ' ({}s behind)'.format(now() - nextitem[2])
  413.  
  414.         # Now we just give a little pause here
  415.         time.sleep(1)
  416.  
  417.  
  418. # Generates the list of locations to scan
  419. def _generate_locations(current_location, step_distance, step_limit, worker_count):
  420.     NORTH = 0
  421.     EAST = 90
  422.     SOUTH = 180
  423.     WEST = 270
  424.  
  425.     xdist = math.sqrt(3) * step_distance  # dist between column centers
  426.     ydist = 3 * (step_distance / 2)  # dist between row centers
  427.  
  428.     results = []
  429.  
  430.     results.append((current_location[0], current_location[1], 0))
  431.  
  432.     loc = current_location
  433.     ring = 1
  434.  
  435.     while len(results) < worker_count:
  436.  
  437.         loc = get_new_coords(loc, ydist * (step_limit - 1), NORTH)
  438.         loc = get_new_coords(loc, xdist * (1.5 * step_limit - 0.5), EAST)
  439.         results.append((loc[0], loc[1], 0))
  440.  
  441.         for i in range(ring):
  442.             loc = get_new_coords(loc, ydist * step_limit, NORTH)
  443.             loc = get_new_coords(loc, xdist * (1.5 * step_limit - 1), WEST)
  444.             results.append((loc[0], loc[1], 0))
  445.  
  446.         for i in range(ring):
  447.             loc = get_new_coords(loc, ydist * (step_limit - 1), SOUTH)
  448.             loc = get_new_coords(loc, xdist * (1.5 * step_limit - 0.5), WEST)
  449.             results.append((loc[0], loc[1], 0))
  450.  
  451.         for i in range(ring):
  452.             loc = get_new_coords(loc, ydist * (2 * step_limit - 1), SOUTH)
  453.             loc = get_new_coords(loc, xdist * 0.5, WEST)
  454.             results.append((loc[0], loc[1], 0))
  455.  
  456.         for i in range(ring):
  457.             loc = get_new_coords(loc, ydist * (step_limit), SOUTH)
  458.             loc = get_new_coords(loc, xdist * (1.5 * step_limit - 1), EAST)
  459.             results.append((loc[0], loc[1], 0))
  460.  
  461.         for i in range(ring):
  462.             loc = get_new_coords(loc, ydist * (step_limit - 1), NORTH)
  463.             loc = get_new_coords(loc, xdist * (1.5 * step_limit - 0.5), EAST)
  464.             results.append((loc[0], loc[1], 0))
  465.  
  466.         # Back to start
  467.         for i in range(ring - 1):
  468.             loc = get_new_coords(loc, ydist * (2 * step_limit - 1), NORTH)
  469.             loc = get_new_coords(loc, xdist * 0.5, EAST)
  470.             results.append((loc[0], loc[1], 0))
  471.  
  472.         loc = get_new_coords(loc, ydist * (2 * step_limit - 1), NORTH)
  473.         loc = get_new_coords(loc, xdist * 0.5, EAST)
  474.  
  475.         ring += 1
  476.  
  477.     return results
  478.  
  479.  
  480. def search_worker_thread(args, account_queue, account_failures, search_items_queue, pause_bit, encryption_lib_path, status, dbq, whq):
  481.  
  482.     log.debug('Search worker thread starting')
  483.  
  484.     # The outer forever loop restarts only when the inner one is intentionally exited - which should only be done when the worker is failing too often, and probably banned.
  485.     # This reinitializes the API and grabs a new account from the queue.
  486.     while True:
  487.         try:
  488.             status['starttime'] = now()
  489.  
  490.             # Get account
  491.             status['message'] = 'Waiting to get new account from the queue'
  492.             log.info(status['message'])
  493.             account = account_queue.get()
  494.             status['message'] = 'Switching to account {}'.format(account['username'])
  495.             status['user'] = account['username']
  496.             log.info(status['message'])
  497.  
  498.             stagger_thread(args, account)
  499.  
  500.             # New lease of life right here
  501.             status['fail'] = 0
  502.             status['success'] = 0
  503.             status['noitems'] = 0
  504.             status['skip'] = 0
  505.             status['location'] = False
  506.             status['last_scan_time'] = 0
  507.  
  508.             # only sleep when consecutive_fails reaches max_failures, overall fails for stat purposes
  509.             consecutive_fails = 0
  510.             consecutive_empties = 0
  511.  
  512.             # Create the API instance this will use
  513.             if args.mock != '':
  514.                 api = FakePogoApi(args.mock)
  515.             else:
  516.                 api = PGoApi()
  517.  
  518.             if status['proxy_url']:
  519.                 log.debug("Using proxy %s", status['proxy_url'])
  520.                 api.set_proxy({'http': status['proxy_url'], 'https': status['proxy_url']})
  521.  
  522.             api.activate_signature(encryption_lib_path)
  523.  
  524.             # The forever loop for the searches
  525.             while True:
  526.  
  527.                 # If this account has been messing up too hard, let it rest
  528.                 if consecutive_fails >= args.max_failures:
  529.                     status['message'] = 'Account {} failed more than {} scans; possibly bad account. Switching accounts...'.format(account['username'], args.max_failures)
  530.                     log.warning(status['message'])
  531.                     account_failures.append({'account': account, 'last_fail_time': now(), 'reason': 'failures'})
  532.                     break  # exit this loop to get a new account and have the API recreated
  533.  
  534.                 while pause_bit.is_set():
  535.                     status['message'] = 'Scanning paused'
  536.                     time.sleep(2)
  537.  
  538.                 # If this account has been running too long, let it rest
  539.                 if (args.account_search_interval is not None):
  540.                     if (status['starttime'] <= (now() - args.account_search_interval)):
  541.                         status['message'] = 'Account {} is being rotated out to rest.'.format(account['username'])
  542.                         log.info(status['message'])
  543.                         account_failures.append({'account': account, 'last_fail_time': now(), 'reason': 'rest interval'})
  544.                         break
  545.  
  546.                 # Grab the next thing to search (when available)
  547.                 status['message'] = 'Waiting for item from queue'
  548.                 step, step_location, appears, leaves = search_items_queue.get()
  549.  
  550.                 # too soon?
  551.                 if appears and now() < appears + 10:  # adding a 10 second grace period
  552.                     first_loop = True
  553.                     paused = False
  554.                     while now() < appears + 10:
  555.                         if pause_bit.is_set():
  556.                             paused = True
  557.                             break  # why can't python just have `break 2`...
  558.                         remain = appears - now() + 10
  559.                         status['message'] = 'Early for {:6f},{:6f}; waiting {}s...'.format(step_location[0], step_location[1], remain)
  560.                         if first_loop:
  561.                             log.info(status['message'])
  562.                             first_loop = False
  563.                         time.sleep(1)
  564.                     if paused:
  565.                         search_items_queue.task_done()
  566.                         continue
  567.  
  568.                 # too late?
  569.                 if leaves and now() > (leaves - args.min_seconds_left):
  570.                     search_items_queue.task_done()
  571.                     status['skip'] += 1
  572.                     # it is slightly silly to put this in status['message'] since it'll be overwritten very shortly after. Oh well.
  573.                     status['message'] = 'Too late for location {:6f},{:6f}; skipping'.format(step_location[0], step_location[1])
  574.                     log.info(status['message'])
  575.                     # No sleep here; we've not done anything worth sleeping for. Plus we clearly need to catch up!
  576.                     continue
  577.                
  578.                 #set altitude
  579.                 altitude = geocoder.elevation([step_location[0], step_location[1]])
  580.                 step_location = (step_location[0], step_location[1], altitude.meters)
  581.  
  582.                 # Let the api know where we intend to be for this loop
  583.                 # doing this before check_login so it does not also have to be done there
  584.                 # when the auth token is refreshed
  585.                 api.set_position(*step_location)
  586.  
  587.                 # Ok, let's get started -- check our login status
  588.                 check_login(args, account, api, step_location, status['proxy_url'])
  589.  
  590.                 # putting this message after the check_login so the messages aren't out of order
  591.                 status['message'] = 'Searching at {:6f},{:6f}'.format(step_location[0], step_location[1])
  592.                 log.info(status['message'])
  593.  
  594.                 # Make the actual request (finally!)
  595.                 response_dict = map_request(api, step_location, args.jitter)
  596.  
  597.                 # G'damnit, nothing back. Mark it up, sleep, carry on
  598.                 if not response_dict:
  599.                     status['fail'] += 1
  600.                     consecutive_fails += 1
  601.                     status['message'] = 'Invalid response at {:6f},{:6f}, abandoning location'.format(step_location[0], step_location[1])
  602.                     log.error(status['message'])
  603.                     time.sleep(args.scan_delay)
  604.                     continue
  605.  
  606.                 # Got the response, parse it out, send todo's to db/wh queues
  607.                 try:
  608.                                         # Captcha check
  609.                     if args.captcha_solving:
  610.                         captcha_url = response_dict['responses']['CHECK_CHALLENGE']['challenge_url']
  611.                         if len(captcha_url) > 1:
  612.                             status['message'] = 'Account {} is encountering a captcha, starting 2captcha sequence'.format(account['username'])
  613.                             log.warning(status['message'])
  614.                             captcha_token = token_request(args, status, captcha_url)
  615.                             if 'ERROR' in captcha_token:
  616.                                 log.warning("Unable to resolve captcha, please check your 2captcha API key and/or wallet balance")
  617.                                 account_failures.append({'account': account, 'last_fail_time': now(), 'reason': 'catpcha failed to verify'})
  618.                                 break
  619.                             else:
  620.                                 status['message'] = 'Retrieved captcha token, attempting to verify challenge for {}'.format(account['username'])
  621.                                 log.info(status['message'])
  622.                                 response = api.verify_challenge(token=captcha_token)
  623.                                 if 'success' in response['responses']['VERIFY_CHALLENGE']:
  624.                                     status['message'] = "Account {} successfully uncaptcha'd".format(account['username'])
  625.                                     log.info(status['message'])
  626.                                 else:
  627.                                     status['message'] = "Account {} failed verifyChallenge, putting away account for now".format(account['username'])
  628.                                     log.info(status['message'])
  629.                                     account_failures.append({'account': account, 'last_fail_time': now(), 'reason': 'catpcha failed to verify'})
  630.                                     break
  631.  
  632.                     # Parse 'GET_MAP_OBJECTS'
  633.                     parsed = parse_map(args, response_dict, step_location, dbq, whq, api)
  634.                     search_items_queue.task_done()
  635.                     if parsed['count'] > 0:
  636.                          status['success'] += 1
  637.                          consecutive_empties = 0
  638.                     else:
  639.                          status['noitems'] += 1
  640.                          consecutive_empties += 1
  641.                     consecutive_fails = 0
  642.                     status['message'] = 'Search at {:6f},{:6f} completed with {} finds'.format(step_location[0], step_location[1], parsed['count'])
  643.                     log.debug(status['message'])
  644.                 except KeyError:
  645.                     parsed = False
  646.                     status['fail'] += 1
  647.                     consecutive_fails += 1
  648.                     status['message'] = 'Map parse failed at {:6f},{:6f}, abandoning location. {} may be banned.'.format(step_location[0], step_location[1], account['username'])
  649.                     log.exception(status['message'])
  650.  
  651.                 # Get detailed information about gyms
  652.                 if args.gym_info and parsed:
  653.                     # build up a list of gyms to update
  654.                     gyms_to_update = {}
  655.                     for gym in parsed['gyms'].values():
  656.                         # Can only get gym details within 1km of our position
  657.                         distance = calc_distance(step_location, [gym['latitude'], gym['longitude']])
  658.                         if distance < 1:
  659.                             # check if we already have details on this gym (if not, get them)
  660.                             try:
  661.                                 record = GymDetails.get(gym_id=gym['gym_id'])
  662.                             except GymDetails.DoesNotExist as e:
  663.                                 gyms_to_update[gym['gym_id']] = gym
  664.                                 continue
  665.  
  666.                             # if we have a record of this gym already, check if the gym has been updated since our last update
  667.                             if record.last_scanned < gym['last_modified']:
  668.                                 gyms_to_update[gym['gym_id']] = gym
  669.                                 continue
  670.                             else:
  671.                                 log.debug('Skipping update of gym @ %f/%f, up to date', gym['latitude'], gym['longitude'])
  672.                                 continue
  673.                         else:
  674.                             log.debug('Skipping update of gym @ %f/%f, too far away from our location at %f/%f (%fkm)', gym['latitude'], gym['longitude'], step_location[0], step_location[1], distance)
  675.  
  676.                     if len(gyms_to_update):
  677.                         gym_responses = {}
  678.                         current_gym = 1
  679.                         status['message'] = 'Updating {} gyms for location {},{}...'.format(len(gyms_to_update), step_location[0], step_location[1])
  680.                         log.debug(status['message'])
  681.  
  682.                         for gym in gyms_to_update.values():
  683.                             status['message'] = 'Getting details for gym {} of {} for location {},{}...'.format(current_gym, len(gyms_to_update), step_location[0], step_location[1])
  684.                             time.sleep(random.random() + 2)
  685.                             response = gym_request(api, step_location, gym)
  686.  
  687.                             # make sure the gym was in range. (sometimes the API gets cranky about gyms that are ALMOST 1km away)
  688.                             if response['responses']['GET_GYM_DETAILS']['result'] == 2:
  689.                                 log.warning('Gym @ %f/%f is out of range (%dkm), skipping', gym['latitude'], gym['longitude'], distance)
  690.                             else:
  691.                                 gym_responses[gym['gym_id']] = response['responses']['GET_GYM_DETAILS']
  692.  
  693.                             # increment which gym we're on (for status messages)
  694.                             current_gym += 1
  695.  
  696.                         status['message'] = 'Processing details of {} gyms for location {},{}...'.format(len(gyms_to_update), step_location[0], step_location[1])
  697.                         log.debug(status['message'])
  698.  
  699.                         if gym_responses:
  700.                             parse_gyms(args, gym_responses, whq)
  701.  
  702.                 # Record the time and place the worker left off at
  703.                 status['last_scan_time'] = now()
  704.                 status['location'] = step_location
  705.  
  706.                 # Always delay the desired amount after "scan" completion
  707.                 status['message'] += ', sleeping {}s until {}'.format(args.scan_delay, time.strftime('%H:%M:%S', time.localtime(time.time() + args.scan_delay)))
  708.                 time.sleep(args.scan_delay)
  709.  
  710.         # catch any process exceptions, log them, and continue the thread
  711.         except Exception as e:
  712.             status['message'] = 'Exception in search_worker using account {}. Restarting with fresh account. See logs for details.'.format(account['username'])
  713.             time.sleep(args.scan_delay)
  714.             log.error('Exception in search_worker under account {} Exception message: {}'.format(account['username'], e))
  715.             account_failures.append({'account': account, 'last_fail_time': now(), 'reason': 'exception'})
  716.  
  717.  
  718. def check_login(args, account, api, position, proxy_url):
  719.  
  720.     # Logged in? Enough time left? Cool!
  721.     if api._auth_provider and api._auth_provider._ticket_expire:
  722.         remaining_time = api._auth_provider._ticket_expire / 1000 - time.time()
  723.         if remaining_time > 60:
  724.             log.debug('Credentials remain valid for another %f seconds', remaining_time)
  725.             return
  726.  
  727.     # Try to login (a few times, but don't get stuck here)
  728.     i = 0
  729.     while i < args.login_retries:
  730.         try:
  731.             if proxy_url:
  732.                 api.set_authentication(provider=account['auth_service'], username=account['username'], password=account['password'], proxy_config={'http': proxy_url, 'https': proxy_url})
  733.             else:
  734.                 api.set_authentication(provider=account['auth_service'], username=account['username'], password=account['password'])
  735.             break
  736.         except AuthException:
  737.             if i >= args.login_retries:
  738.                 raise TooManyLoginAttempts('Exceeded login attempts')
  739.             else:
  740.                 i += 1
  741.                 log.error('Failed to login to Pokemon Go with account %s. Trying again in %g seconds', account['username'], args.login_delay)
  742.                 time.sleep(args.login_delay)
  743.  
  744.     log.debug('Login for account %s successful', account['username'])
  745.     time.sleep(20)
  746.  
  747.  
  748. def map_request(api, position, jitter=False):
  749.     # create scan_location to send to the api based off of position, because tuples aren't mutable
  750.     if jitter:
  751.         # jitter it, just a little bit.
  752.         scan_location = jitterLocation(position)
  753.         log.debug('Jittered to: %f/%f/%f', scan_location[0], scan_location[1], scan_location[2])
  754.     else:
  755.         # Just use the original coordinates
  756.         scan_location = position
  757.  
  758.     try:
  759.         cell_ids = util.get_cell_ids(scan_location[0], scan_location[1])
  760.         timestamps = [0, ] * len(cell_ids)
  761.         req = api.create_request()
  762.         response = req.check_challenge()
  763.         response = req.get_hatched_eggs()
  764.         response = req.get_inventory()
  765.         response = req.check_awarded_badges()
  766.         response = req.download_settings()
  767.         response = req.get_buddy_walked()
  768.         response = req.get_map_objects(latitude=f2i(scan_location[0]),
  769.                                        longitude=f2i(scan_location[1]),
  770.                                        since_timestamp_ms=timestamps,
  771.                                        cell_id=cell_ids)
  772.         response = req.call()
  773.         return response
  774.     except Exception as e:
  775.         log.warning('Exception while downloading map: %s', e)
  776.         return False
  777.  
  778.  
  779. def gym_request(api, position, gym):
  780.     try:
  781.         log.debug('Getting details for gym @ %f/%f (%fkm away)', gym['latitude'], gym['longitude'], calc_distance(position, [gym['latitude'], gym['longitude']]))
  782.         x = api.get_gym_details(gym_id=gym['gym_id'],
  783.                                 player_latitude=f2i(position[0]),
  784.                                 player_longitude=f2i(position[1]),
  785.                                 gym_latitude=gym['latitude'],
  786.                                 gym_longitude=gym['longitude'])
  787.  
  788.         # print pretty(x)
  789.         return x
  790.  
  791.     except Exception as e:
  792.         log.warning('Exception while downloading gym details: %s', e)
  793.         return False
  794.  
  795.  
  796.  
  797. def token_request(args, status, url):
  798.      s = requests.Session()
  799.      # Fetch the CAPTCHA_ID from 2captcha
  800.      try:
  801.          captcha_id = s.post("http://2captcha.com/in.php?key={}&method=userrecaptcha&googlekey={}&pageurl={}".format(args.captcha_key, args.captcha_dsk, url)).text.split('|')[1]
  802.          captcha_id = str(captcha_id)
  803.      # IndexError implies that the retuned response was a 2captcha error
  804.      except IndexError:
  805.          return 'ERROR'
  806.      status['message'] = 'Retrieved captcha ID: {}; now retrieving token'.format(captcha_id)
  807.      log.info(status['message'])
  808.      # Get the response, retry every 5 seconds if its not ready
  809.      recaptcha_response = s.get("http://2captcha.com/res.php?key={}&action=get&id={}".format(args.captcha_key, captcha_id)).text
  810.      while 'CAPCHA_NOT_READY' in recaptcha_response:
  811.          log.info("Captcha token is not ready, retrying in 5 seconds")
  812.          time.sleep(5)
  813.          recaptcha_response = s.get("http://2captcha.com/res.php?key={}&action=get&id={}".format(args.captcha_key, captcha_id)).text
  814.      token = str(recaptcha_response.split('|')[1])
  815.      return token
  816.  
  817. def calc_distance(pos1, pos2):
  818.     R = 6378.1  # km radius of the earth
  819.  
  820.     dLat = math.radians(pos1[0] - pos2[0])
  821.     dLon = math.radians(pos1[1] - pos2[1])
  822.  
  823.     a = math.sin(dLat / 2) * math.sin(dLat / 2) + \
  824.         math.cos(math.radians(pos1[0])) * math.cos(math.radians(pos2[0])) * \
  825.         math.sin(dLon / 2) * math.sin(dLon / 2)
  826.  
  827.     c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
  828.     d = R * c
  829.  
  830.     return d
  831.  
  832.  
  833. # Delay each thread start time so that logins only occur ~1s
  834. def stagger_thread(args, account):
  835.     if args.accounts.index(account) == 0:
  836.         return  # No need to delay the first one
  837.     delay = args.accounts.index(account) + ((random.random() - .5) / 2)
  838.     log.debug('Delaying thread startup for %.2f seconds', delay)
  839.     time.sleep(delay)
  840.  
  841.  
  842. class TooManyLoginAttempts(Exception):
  843.     pass
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement