Advertisement
Guest User

Untitled

a guest
Dec 30th, 2016
255
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 40.70 KB | None | 0 0
  1. #!/usr/bin/python
  2. # -*- coding: utf-8 -*-
  3.  
  4. '''
  5. Search Architecture:
  6. - Have a list of accounts
  7. - Create an "overseer" thread
  8. - Search Overseer:
  9. - Tracks incoming new location values
  10. - Tracks "paused state"
  11. - During pause or new location will clears current search queue
  12. - Starts search_worker threads
  13. - Search Worker Threads each:
  14. - Have a unique API login
  15. - Listens to the same Queue for areas to scan
  16. - Can re-login as needed
  17. - Pushes finds to db queue and webhook queue
  18. '''
  19.  
  20. import logging
  21. import math
  22. import os
  23. import sys
  24. import traceback
  25. import random
  26. import time
  27. import geopy
  28. import geopy.distance
  29. import requests
  30.  
  31. from datetime import datetime
  32. from threading import Thread
  33. from queue import Queue, Empty
  34.  
  35. from pgoapi import PGoApi
  36. from pgoapi.utilities import f2i
  37. from pgoapi import utilities as util
  38. from pgoapi.exceptions import AuthException
  39.  
  40. from pgoapi.hash_server import HashServer
  41.  
  42. from .models import parse_map, GymDetails, parse_gyms, MainWorker, WorkerStatus
  43. from .fakePogoApi import FakePogoApi
  44. from .utils import now
  45. from .transform import get_new_coords
  46. import schedulers
  47.  
  48. from .proxy import get_new_proxy
  49.  
  50. import terminalsize
  51.  
  52. log = logging.getLogger(__name__)
  53.  
  54. TIMESTAMP = '\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000'
  55.  
  56.  
  57. # Apply a location jitter.
  58. def jitterLocation(location=None, maxMeters=10):
  59. origin = geopy.Point(location[0], location[1])
  60. b = random.randint(0, 360)
  61. d = math.sqrt(random.random()) * (float(maxMeters) / 1000)
  62. destination = geopy.distance.distance(kilometers=d).destination(origin, b)
  63. return (destination.latitude, destination.longitude, location[2])
  64.  
  65.  
  66. # Thread to handle user input.
  67. def switch_status_printer(display_type, current_page):
  68. # Get a reference to the root logger.
  69. mainlog = logging.getLogger()
  70. # Disable logging of the first handler - the stream handler, and disable it's output.
  71. mainlog.handlers[0].setLevel(logging.CRITICAL)
  72.  
  73. while True:
  74. # Wait for the user to press a key.
  75. command = raw_input()
  76.  
  77. if command == '':
  78. # Switch between logging and display.
  79. if display_type[0] != 'logs':
  80. # Disable display, enable on screen logging.
  81. mainlog.handlers[0].setLevel(logging.DEBUG)
  82. display_type[0] = 'logs'
  83. # If logs are going slowly, sometimes it's hard to tell you switched. Make it clear.
  84. print 'Showing logs...'
  85. elif display_type[0] == 'logs':
  86. # Enable display, disable on screen logging (except for critical messages).
  87. mainlog.handlers[0].setLevel(logging.CRITICAL)
  88. display_type[0] = 'workers'
  89. elif command.isdigit():
  90. current_page[0] = int(command)
  91. mainlog.handlers[0].setLevel(logging.CRITICAL)
  92. display_type[0] = 'workers'
  93. elif command.lower() == 'f':
  94. mainlog.handlers[0].setLevel(logging.CRITICAL)
  95. display_type[0] = 'failedaccounts'
  96.  
  97.  
  98. # Thread to print out the status of each worker.
  99. def status_printer(threadStatus, search_items_queue_array, db_updates_queue, wh_queue, account_queue, account_failures):
  100. display_type = ["workers"]
  101. current_page = [1]
  102.  
  103. # Start another thread to get user input.
  104. t = Thread(target=switch_status_printer,
  105. name='switch_status_printer',
  106. args=(display_type, current_page))
  107. t.daemon = True
  108. t.start()
  109.  
  110. while True:
  111. time.sleep(1)
  112.  
  113. if display_type[0] == 'logs':
  114. # In log display mode, we don't want to show anything.
  115. continue
  116.  
  117. # Create a list to hold all the status lines, so they can be printed all at once to reduce flicker.
  118. status_text = []
  119.  
  120. if display_type[0] == 'workers':
  121.  
  122. # Get the terminal size.
  123. width, height = terminalsize.get_terminal_size()
  124. # Queue and overseer take 2 lines. Switch message takes up 2 lines. Remove an extra 2 for things like screen status lines.
  125. usable_height = height - 6
  126. # Prevent people running terminals only 6 lines high from getting a divide by zero.
  127. if usable_height < 1:
  128. usable_height = 1
  129.  
  130. # Calculate total skipped items.
  131. skip_total = 0
  132. for item in threadStatus:
  133. if 'skip' in threadStatus[item]:
  134. skip_total += threadStatus[item]['skip']
  135.  
  136. # Print the queue length.
  137. search_items_queue_size = 0
  138. for i in range(0, len(search_items_queue_array)):
  139. search_items_queue_size += search_items_queue_array[i].qsize()
  140.  
  141. status_text.append('Queues: {} search items, {} db updates, {} webhook. Total skipped items: {}. Spare accounts available: {}. Accounts on hold: {}'
  142. .format(search_items_queue_size, db_updates_queue.qsize(), wh_queue.qsize(), skip_total, account_queue.qsize(), len(account_failures)))
  143.  
  144. # Print status of overseer.
  145. status_text.append('{} Overseer: {}'.format(threadStatus['Overseer']['scheduler'], threadStatus['Overseer']['message']))
  146.  
  147. # Calculate the total number of pages. Subtracting for the overseer.
  148. total_pages = math.ceil((len(threadStatus) - 1 - threadStatus['Overseer']['message'].count('\n')) /
  149. float(usable_height))
  150.  
  151. # Prevent moving outside the valid range of pages.
  152. if current_page[0] > total_pages:
  153. current_page[0] = total_pages
  154. if current_page[0] < 1:
  155. current_page[0] = 1
  156.  
  157. # Calculate which lines to print.
  158. start_line = usable_height * (current_page[0] - 1)
  159. end_line = start_line + usable_height
  160. current_line = 1
  161.  
  162. # Find the longest username and proxy.
  163. userlen = 4
  164. proxylen = 5
  165. for item in threadStatus:
  166. if threadStatus[item]['type'] == 'Worker':
  167. userlen = max(userlen, len(threadStatus[item]['username']))
  168. if 'proxy_display' in threadStatus[item]:
  169. proxylen = max(proxylen, len(str(threadStatus[item]['proxy_display'])))
  170.  
  171. # How pretty.
  172. status = '{:10} | {:5} | {:' + str(userlen) + '} | {:' + str(proxylen) + '} | {:7} | {:6} | {:5} | {:7} | {:10}'
  173.  
  174. # Print the worker status.
  175. status_text.append(status.format('Worker ID', 'Start', 'User', 'Proxy', 'Success', 'Failed', 'Empty', 'Skipped', 'Message'))
  176. for item in sorted(threadStatus):
  177. if(threadStatus[item]['type'] == 'Worker'):
  178. current_line += 1
  179.  
  180. # Skip over items that don't belong on this page.
  181. if current_line < start_line:
  182. continue
  183. if current_line > end_line:
  184. break
  185.  
  186. status_text.append(status.format(item, time.strftime('%H:%M', time.localtime(threadStatus[item]['starttime'])), threadStatus[item]['username'], threadStatus[item]['proxy_display'], threadStatus[item]['success'], threadStatus[item]['fail'], threadStatus[item]['noitems'], threadStatus[item]['skip'], threadStatus[item]['message']))
  187.  
  188. elif display_type[0] == 'failedaccounts':
  189. status_text.append('-----------------------------------------')
  190. status_text.append('Accounts on hold:')
  191. status_text.append('-----------------------------------------')
  192.  
  193. # Find the longest account name.
  194. userlen = 4
  195. for account in account_failures:
  196. userlen = max(userlen, len(account['account']['username']))
  197.  
  198. status = '{:' + str(userlen) + '} | {:10} | {:20}'
  199. status_text.append(status.format('User', 'Hold Time', 'Reason'))
  200.  
  201. for account in account_failures:
  202. status_text.append(status.format(account['account']['username'], time.strftime('%H:%M:%S', time.localtime(account['last_fail_time'])), account['reason']))
  203.  
  204. # Print the status_text for the current screen.
  205. status_text.append('Page {}/{}. Page number to switch pages. F to show on hold accounts. <ENTER> alone to switch between status and log view'.format(current_page[0], total_pages))
  206. # Clear the screen.
  207. os.system('cls' if os.name == 'nt' else 'clear')
  208. # Print status.
  209. print "\n".join(status_text)
  210.  
  211.  
  212. # The account recycler monitors failed accounts and places them back in the account queue 2 hours after they failed.
  213. # This allows accounts that were soft banned to be retried after giving them a chance to cool down.
  214. def account_recycler(accounts_queue, account_failures, args):
  215. while True:
  216. # Run once a minute.
  217. time.sleep(60)
  218. log.info('Account recycler running. Checking status of {} accounts'.format(len(account_failures)))
  219.  
  220. # Create a new copy of the failure list to search through, so we can iterate through it without it changing.
  221. failed_temp = list(account_failures)
  222.  
  223. # Search through the list for any item that last failed before -ari/--account-rest-interval seconds
  224. ok_time = now() - args.account_rest_interval
  225. for a in failed_temp:
  226. if a['last_fail_time'] <= ok_time:
  227. # Remove the account from the real list, and add to the account queue.
  228. log.info('Account {} returning to active duty.'.format(a['account']['username']))
  229. account_failures.remove(a)
  230. accounts_queue.put(a['account'])
  231. else:
  232. if 'notified' not in a:
  233. log.info('Account {} needs to cool off for {} minutes due to {}'.format(a['account']['username'], round((a['last_fail_time'] - ok_time) / 60, 0), a['reason']))
  234. a['notified'] = True
  235.  
  236.  
  237. def worker_status_db_thread(threads_status, name, db_updates_queue):
  238.  
  239. while True:
  240. workers = {}
  241. overseer = None
  242. for status in threads_status.values():
  243. if status['type'] == 'Overseer':
  244. overseer = {
  245. 'worker_name': name,
  246. 'message': status['message'],
  247. 'method': status['scheduler'],
  248. 'last_modified': datetime.utcnow()
  249. }
  250. elif status['type'] == 'Worker':
  251. workers[status['username']] = WorkerStatus.db_format(status, name)
  252. if overseer is not None:
  253. db_updates_queue.put((MainWorker, {0: overseer}))
  254. db_updates_queue.put((WorkerStatus, workers))
  255. time.sleep(3)
  256.  
  257.  
  258. # The main search loop that keeps an eye on the over all process.
  259. def search_overseer_thread(args, new_location_queue, pause_bit, heartb, db_updates_queue, wh_queue):
  260.  
  261. log.info('Search overseer starting')
  262.  
  263. search_items_queue_array = []
  264. scheduler_array = []
  265. account_queue = Queue()
  266. threadStatus = {}
  267.  
  268. '''
  269. Create a queue of accounts for workers to pull from. When a worker has failed too many times,
  270. it can get a new account from the queue and reinitialize the API. Workers should return accounts
  271. to the queue so they can be tried again later, but must wait a bit before doing do so to
  272. prevent accounts from being cycled through too quickly.
  273. '''
  274. for i, account in enumerate(args.accounts):
  275. account_queue.put(account)
  276.  
  277. # Create a list for failed accounts.
  278. account_failures = []
  279.  
  280. threadStatus['Overseer'] = {
  281. 'message': 'Initializing',
  282. 'type': 'Overseer',
  283. 'scheduler': args.scheduler
  284. }
  285.  
  286. if(args.print_status):
  287. log.info('Starting status printer thread')
  288. t = Thread(target=status_printer,
  289. name='status_printer',
  290. args=(threadStatus, search_items_queue_array, db_updates_queue, wh_queue, account_queue, account_failures))
  291. t.daemon = True
  292. t.start()
  293.  
  294. # Create account recycler thread.
  295. log.info('Starting account recycler thread')
  296. t = Thread(target=account_recycler, name='account-recycler', args=(account_queue, account_failures, args))
  297. t.daemon = True
  298. t.start()
  299.  
  300. if args.status_name is not None:
  301. log.info('Starting status database thread')
  302. t = Thread(target=worker_status_db_thread,
  303. name='status_worker_db',
  304. args=(threadStatus, args.status_name, db_updates_queue))
  305. t.daemon = True
  306. t.start()
  307.  
  308. # Create specified number of search_worker_thread.
  309. log.info('Starting search worker threads')
  310. for i in range(0, args.workers):
  311. log.debug('Starting search worker thread %d', i)
  312.  
  313. if i == 0 or (args.beehive and i % args.workers_per_hive == 0):
  314. search_items_queue = Queue()
  315. # Create the appropriate type of scheduler to handle the search queue.
  316. scheduler = schedulers.SchedulerFactory.get_scheduler(args.scheduler, [search_items_queue], threadStatus, args)
  317.  
  318. scheduler_array.append(scheduler)
  319. search_items_queue_array.append(search_items_queue)
  320.  
  321. # Set proxy for each worker, using round robin.
  322. proxy_display = 'No'
  323. proxy_url = False # Will be assigned inside a search thread
  324.  
  325. workerId = 'Worker {:03}'.format(i)
  326. threadStatus[workerId] = {
  327. 'type': 'Worker',
  328. 'message': 'Creating thread...',
  329. 'success': 0,
  330. 'fail': 0,
  331. 'noitems': 0,
  332. 'skip': 0,
  333. 'username': '',
  334. 'proxy_display': proxy_display,
  335. 'proxy_url': proxy_url
  336. }
  337.  
  338. t = Thread(target=search_worker_thread,
  339. name='search-worker-{}'.format(i),
  340. args=(args, account_queue, account_failures, search_items_queue, pause_bit,
  341. threadStatus[workerId],
  342. db_updates_queue, wh_queue, scheduler))
  343. t.daemon = True
  344. t.start()
  345.  
  346. # A place to track the current location.
  347. current_location = False
  348.  
  349. # The real work starts here but will halt on pause_bit.set().
  350. while True:
  351.  
  352. if args.on_demand_timeout > 0 and (now() - args.on_demand_timeout) > heartb[0]:
  353. pause_bit.set()
  354. log.info("Searching paused due to inactivity...")
  355.  
  356. # Wait here while scanning is paused.
  357. while pause_bit.is_set():
  358. for i in range(0, len(scheduler_array)):
  359. scheduler_array[i].scanning_paused()
  360. time.sleep(1)
  361.  
  362. # If a new location has been passed to us, get the most recent one.
  363. if not new_location_queue.empty():
  364. log.info('New location caught, moving search grid')
  365. try:
  366. while True:
  367. current_location = new_location_queue.get_nowait()
  368. except Empty:
  369. pass
  370.  
  371. step_distance = 0.9 if args.no_pokemon else 0.07
  372.  
  373. locations = generate_hive_locations(current_location, step_distance, args.step_limit, len(scheduler_array))
  374.  
  375. for i in range(0, len(scheduler_array)):
  376. scheduler_array[i].location_changed(locations[i], db_updates_queue)
  377.  
  378. # If there are no search_items_queue either the loop has finished (or been
  379. # cleared above) -- either way, time to fill it back up
  380. for i in range(0, len(scheduler_array)):
  381. if scheduler_array[i].time_to_refresh_queue():
  382. threadStatus['Overseer']['message'] = 'Search queue {} empty, scheduling more items to scan'.format(i)
  383. log.debug('Search queue %d empty, scheduling more items to scan', i)
  384. try: # Can't have the scheduler die because of a DB deadlock
  385. scheduler_array[i].schedule()
  386. except Exception as e:
  387. log.error('Schedule creation had an Exception: {}'.format(e))
  388. traceback.print_exc(file=sys.stdout)
  389. time.sleep(10)
  390. else:
  391. threadStatus['Overseer']['message'] = scheduler_array[i].get_overseer_message()
  392.  
  393. # Now we just give a little pause here.
  394. time.sleep(1)
  395.  
  396.  
  397. # Generates the list of locations to scan
  398. def generate_hive_locations(current_location, step_distance, step_limit, hive_count):
  399. NORTH = 0
  400. EAST = 90
  401. SOUTH = 180
  402. WEST = 270
  403.  
  404. xdist = math.sqrt(3) * step_distance # dist between column centers
  405. ydist = 3 * (step_distance / 2) # dist between row centers
  406.  
  407. results = []
  408.  
  409. results.append((current_location[0], current_location[1], 0))
  410.  
  411. loc = current_location
  412. ring = 1
  413.  
  414. while len(results) < hive_count:
  415.  
  416. loc = get_new_coords(loc, ydist * (step_limit - 1), NORTH)
  417. loc = get_new_coords(loc, xdist * (1.5 * step_limit - 0.5), EAST)
  418. results.append((loc[0], loc[1], 0))
  419.  
  420. for i in range(ring):
  421. loc = get_new_coords(loc, ydist * step_limit, NORTH)
  422. loc = get_new_coords(loc, xdist * (1.5 * step_limit - 1), WEST)
  423. results.append((loc[0], loc[1], 0))
  424.  
  425. for i in range(ring):
  426. loc = get_new_coords(loc, ydist * (step_limit - 1), SOUTH)
  427. loc = get_new_coords(loc, xdist * (1.5 * step_limit - 0.5), WEST)
  428. results.append((loc[0], loc[1], 0))
  429.  
  430. for i in range(ring):
  431. loc = get_new_coords(loc, ydist * (2 * step_limit - 1), SOUTH)
  432. loc = get_new_coords(loc, xdist * 0.5, WEST)
  433. results.append((loc[0], loc[1], 0))
  434.  
  435. for i in range(ring):
  436. loc = get_new_coords(loc, ydist * (step_limit), SOUTH)
  437. loc = get_new_coords(loc, xdist * (1.5 * step_limit - 1), EAST)
  438. results.append((loc[0], loc[1], 0))
  439.  
  440. for i in range(ring):
  441. loc = get_new_coords(loc, ydist * (step_limit - 1), NORTH)
  442. loc = get_new_coords(loc, xdist * (1.5 * step_limit - 0.5), EAST)
  443. results.append((loc[0], loc[1], 0))
  444.  
  445. # Back to start
  446. for i in range(ring - 1):
  447. loc = get_new_coords(loc, ydist * (2 * step_limit - 1), NORTH)
  448. loc = get_new_coords(loc, xdist * 0.5, EAST)
  449. results.append((loc[0], loc[1], 0))
  450.  
  451. loc = get_new_coords(loc, ydist * (2 * step_limit - 1), NORTH)
  452. loc = get_new_coords(loc, xdist * 0.5, EAST)
  453.  
  454. ring += 1
  455.  
  456. return results
  457.  
  458.  
  459. def search_worker_thread(args, account_queue, account_failures, search_items_queue, pause_bit, status, dbq, whq, scheduler):
  460.  
  461. log.debug('Search worker thread starting')
  462.  
  463. # The outer forever loop restarts only when the inner one is intentionally exited - which should only be done when the worker is failing too often, and probably banned.
  464. # This reinitializes the API and grabs a new account from the queue.
  465. while True:
  466. try:
  467. status['starttime'] = now()
  468.  
  469. # Get an account.
  470. status['message'] = 'Waiting to get new account from the queue'
  471. log.info(status['message'])
  472. # Make sure the scheduler is done for valid locations
  473. while not scheduler.ready:
  474. time.sleep(1)
  475.  
  476. account = account_queue.get()
  477. status.update(WorkerStatus.get_worker(account['username'], scheduler.scan_location))
  478. status['message'] = 'Switching to account {}'.format(account['username'])
  479. log.info(status['message'])
  480.  
  481. stagger_thread(args, account)
  482.  
  483. # New lease of life right here.
  484. status['fail'] = 0
  485. status['success'] = 0
  486. status['noitems'] = 0
  487. status['skip'] = 0
  488.  
  489. # sleep when consecutive_fails reaches max_failures, overall fails for stat purposes
  490. consecutive_fails = 0
  491.  
  492. # sleep when consecutive_noitems reaches max_empty, overall noitems for stat purposes
  493. consecutive_noitems = 0
  494.  
  495. # Create the API instance this will use.
  496. if args.mock != '':
  497. api = FakePogoApi(args.mock)
  498. log.info('Created API instance using hash libs.')
  499. else:
  500. api = PGoApi()
  501. if args.hash_key:
  502. api.activate_hash_server(args.hash_key)
  503. log.info('Created API instance using hash server.')
  504.  
  505. # New account - new proxy
  506. if args.proxy:
  507. # If proxy is not assigned yet or if proxy-rotation is defined - query for new proxy
  508. if (not status['proxy_url']) or \
  509. ((args.proxy_rotation is not None) and (args.proxy_rotation != 'none')):
  510.  
  511. proxy_num, status['proxy_url'] = get_new_proxy(args)
  512. if args.proxy_display.upper() != 'FULL':
  513. status['proxy_display'] = proxy_num
  514. else:
  515. status['proxy_display'] = status['proxy_url']
  516.  
  517. if status['proxy_url']:
  518. log.debug("Using proxy %s", status['proxy_url'])
  519. api.set_proxy({'http': status['proxy_url'], 'https': status['proxy_url']})
  520.  
  521. # The forever loop for the searches.
  522. while True:
  523.  
  524. while pause_bit.is_set():
  525. status['message'] = 'Scanning paused'
  526. time.sleep(2)
  527.  
  528. if args.hash_key:
  529. log.info('Hashes: {r}/{m}'.format(r=HashServer.status.get('remaining'), m=HashServer.status.get('maximum')))
  530.  
  531. # If this account has been messing up too hard, let it rest
  532. if (args.max_failures > 0) and (consecutive_fails >= args.max_failures):
  533. status['message'] = 'Account {} failed more than {} scans; possibly bad account. Switching accounts...'.format(account['username'], args.max_failures)
  534. log.warning(status['message'])
  535. account_failures.append({'account': account, 'last_fail_time': now(), 'reason': 'failures'})
  536. break # exit this loop to get a new account and have the API recreated
  537.  
  538. # If this account had not find anything for too long, let it rest
  539. if (args.max_empty > 0) and (consecutive_noitems >= args.max_empty):
  540. status['message'] = 'Account {} returned empty scan for more than {} scans; possibly ip is banned. Switching accounts...'.format(account['username'], args.max_empty)
  541. log.warning(status['message'])
  542. account_failures.append({'account': account, 'last_fail_time': now(), 'reason': 'empty scans'})
  543. break # exit this loop to get a new account and have the API recreated
  544.  
  545. # If used proxy disappears from "live list" after background checking - switch account but DO not freeze it (it's not an account failure)
  546. if (args.proxy) and (not status['proxy_url'] in args.proxy):
  547. status['message'] = 'Account {} proxy {} is not in a live list any more. Switching accounts...'.format(account['username'], status['proxy_url'])
  548. log.warning(status['message'])
  549. account_queue.put(account) # experimantal, nobody did this before :)
  550. break # exit this loop to get a new account and have the API recreated
  551.  
  552. # If this account has been running too long, let it rest
  553. if (args.account_search_interval is not None):
  554. if (status['starttime'] <= (now() - args.account_search_interval)):
  555. status['message'] = 'Account {} is being rotated out to rest.'.format(account['username'])
  556. log.info(status['message'])
  557. account_failures.append({'account': account, 'last_fail_time': now(), 'reason': 'rest interval'})
  558. break
  559.  
  560. # Grab the next thing to search (when available)
  561. step, step_location, appears, leaves, messages = scheduler.next_item(status)
  562. status['message'] = messages['wait']
  563.  
  564. # Using step as a flag for no valid next location returned
  565. if step == -1:
  566. time.sleep(scheduler.delay(status['last_scan_date']))
  567. continue
  568.  
  569. # Too soon?
  570. if appears and now() < appears + 10: # Adding a 10 second grace period.
  571. first_loop = True
  572. paused = False
  573. while now() < appears + 10:
  574. if pause_bit.is_set():
  575. paused = True
  576. break # why can't python just have `break 2`...
  577. status['message'] = messages['early']
  578. if first_loop:
  579. log.info(status['message'])
  580. first_loop = False
  581. time.sleep(1)
  582. if paused:
  583. scheduler.task_done(status)
  584. continue
  585.  
  586. # Too late?
  587. if leaves and now() > (leaves - args.min_seconds_left):
  588. scheduler.task_done(status)
  589. status['skip'] += 1
  590. # it is slightly silly to put this in status['message'] since it'll be overwritten very shortly after. Oh well.
  591. status['message'] = messages['late']
  592. log.info(status['message'])
  593. # No sleep here; we've not done anything worth sleeping for. Plus we clearly need to catch up!
  594. continue
  595.  
  596. status['message'] = messages['search']
  597. log.debug(status['message'])
  598.  
  599. # Let the api know where we intend to be for this loop
  600. # doing this before check_login so it does not also have to be done there
  601. # when the auth token is refreshed
  602. api.set_position(*step_location)
  603.  
  604. # Ok, let's get started -- check our login status
  605. status['message'] = 'Logging in...'
  606. check_login(args, account, api, step_location, status['proxy_url'])
  607.  
  608. # putting this message after the check_login so the messages aren't out of order
  609. status['message'] = messages['search']
  610. log.info(status['message'])
  611.  
  612. # Make the actual request. (finally!)
  613. scan_date = datetime.utcnow()
  614. response_dict = map_request(api, step_location, args.jitter)
  615. status['last_scan_date'] = datetime.utcnow()
  616.  
  617. # Record the time and place the worker made the request at
  618. status['latitude'] = step_location[0]
  619. status['longitude'] = step_location[1]
  620. dbq.put((WorkerStatus, {0: WorkerStatus.db_format(status)}))
  621.  
  622. # G'damnit, nothing back. Mark it up, sleep, carry on
  623. if not response_dict:
  624. status['fail'] += 1
  625. consecutive_fails += 1
  626. status['message'] = messages['invalid']
  627. log.error(status['message'])
  628. time.sleep(scheduler.delay(status['last_scan_date']))
  629. continue
  630.  
  631. # Got the response, check for captcha, parse it out, then send todo's to db/wh queues.
  632. try:
  633. # Captcha check
  634. if args.captcha_solving:
  635. captcha_url = response_dict['responses']['CHECK_CHALLENGE']['challenge_url']
  636. if len(captcha_url) > 1:
  637. status['message'] = 'Account {} is encountering a captcha, starting 2captcha sequence'.format(account['username'])
  638. log.warning(status['message'])
  639. captcha_token = token_request(args, status, captcha_url)
  640. if 'ERROR' in captcha_token:
  641. log.warning("Unable to resolve captcha, please check your 2captcha API key and/or wallet balance")
  642. account_failures.append({'account': account, 'last_fail_time': now(), 'reason': 'catpcha failed to verify'})
  643. break
  644. else:
  645. status['message'] = 'Retrieved captcha token, attempting to verify challenge for {}'.format(account['username'])
  646. log.info(status['message'])
  647. response = api.verify_challenge(token=captcha_token)
  648. if 'success' in response['responses']['VERIFY_CHALLENGE']:
  649. status['message'] = "Account {} successfully uncaptcha'd".format(account['username'])
  650. log.info(status['message'])
  651. scan_date = datetime.utcnow()
  652. # Make another request for the same coordinate since the previous one was captcha'd
  653. response_dict = map_request(api, step_location, args.jitter)
  654. status['last_scan_date'] = datetime.utcnow()
  655. else:
  656. status['message'] = "Account {} failed verifyChallenge, putting away account for now".format(account['username'])
  657. log.info(status['message'])
  658. account_failures.append({'account': account, 'last_fail_time': now(), 'reason': 'catpcha failed to verify'})
  659. break
  660.  
  661. parsed = parse_map(args, response_dict, step_location, dbq, whq, api, scan_date)
  662. scheduler.task_done(status, parsed)
  663. if parsed['count'] > 0:
  664. status['success'] += 1
  665. consecutive_noitems = 0
  666. else:
  667. status['noitems'] += 1
  668. consecutive_noitems += 1
  669. consecutive_fails = 0
  670. status['message'] = 'Search at {:6f},{:6f} completed with {} finds'.format(step_location[0], step_location[1], parsed['count'])
  671. log.debug(status['message'])
  672. # except KeyError as e:
  673. except Exception as e:
  674. parsed = False
  675. status['fail'] += 1
  676. consecutive_fails += 1
  677. # consecutive_noitems = 0 - I propose to leave noitems counter in case of error
  678. status['message'] = 'Map parse failed at {:6f},{:6f}, abandoning location. {} may be banned.'.format(step_location[0], step_location[1], account['username'])
  679. log.exception('{}. Exception message: {}'.format(status['message'], e))
  680.  
  681. # Get detailed information about gyms.
  682. if args.gym_info and parsed:
  683. # Build up a list of gyms to update.
  684. gyms_to_update = {}
  685. for gym in parsed['gyms'].values():
  686. # Can only get gym details within 1km of our position.
  687. distance = calc_distance(step_location, [gym['latitude'], gym['longitude']])
  688. if distance < 1:
  689. # Check if we already have details on this gym. (if not, get them)
  690. try:
  691. record = GymDetails.get(gym_id=gym['gym_id'])
  692. except GymDetails.DoesNotExist as e:
  693. gyms_to_update[gym['gym_id']] = gym
  694. continue
  695.  
  696. # If we have a record of this gym already, check if the gym has been updated since our last update.
  697. if record.last_scanned < gym['last_modified']:
  698. gyms_to_update[gym['gym_id']] = gym
  699. continue
  700. else:
  701. log.debug('Skipping update of gym @ %f/%f, up to date', gym['latitude'], gym['longitude'])
  702. continue
  703. else:
  704. log.debug('Skipping update of gym @ %f/%f, too far away from our location at %f/%f (%fkm)', gym['latitude'], gym['longitude'], step_location[0], step_location[1], distance)
  705.  
  706. if len(gyms_to_update):
  707. gym_responses = {}
  708. current_gym = 1
  709. status['message'] = 'Updating {} gyms for location {},{}...'.format(len(gyms_to_update), step_location[0], step_location[1])
  710. log.debug(status['message'])
  711.  
  712. for gym in gyms_to_update.values():
  713. status['message'] = 'Getting details for gym {} of {} for location {:6f},{:6f}...'.format(current_gym, len(gyms_to_update), step_location[0], step_location[1])
  714. time.sleep(random.random() + 2)
  715. response = gym_request(api, step_location, gym)
  716.  
  717. # make sure the gym was in range. (sometimes the API gets cranky about gyms that are ALMOST 1km away)
  718. if response['responses']['GET_GYM_DETAILS']['result'] == 2:
  719. log.warning('Gym @ %f/%f is out of range (%dkm), skipping', gym['latitude'], gym['longitude'], distance)
  720. else:
  721. gym_responses[gym['gym_id']] = response['responses']['GET_GYM_DETAILS']
  722.  
  723. # Increment which gym we're on. (for status messages)
  724. current_gym += 1
  725.  
  726. status['message'] = 'Processing details of {} gyms for location {:6f},{:6f}...'.format(len(gyms_to_update), step_location[0], step_location[1])
  727. log.debug(status['message'])
  728.  
  729. if gym_responses:
  730. parse_gyms(args, gym_responses, whq, dbq)
  731.  
  732. # Delay the desired amount after "scan" completion
  733. delay = scheduler.delay(status['last_scan_date'])
  734. status['message'] += ', sleeping {}s until {}'.format(delay, time.strftime('%H:%M:%S', time.localtime(time.time() + args.scan_delay)))
  735.  
  736. time.sleep(delay)
  737.  
  738. # Catch any process exceptions, log them, and continue the thread.
  739. except Exception as e:
  740. log.error('Exception in search_worker under account {} Exception message: {}'.format(account['username'], e))
  741. status['message'] = 'Exception in search_worker using account {}. Restarting with fresh account. See logs for details.'.format(account['username'])
  742. traceback.print_exc(file=sys.stdout)
  743. account_failures.append({'account': account, 'last_fail_time': now(), 'reason': 'exception'})
  744. time.sleep(args.scan_delay)
  745.  
  746.  
  747. def check_login(args, account, api, position, proxy_url):
  748.  
  749. # Logged in? Enough time left? Cool!
  750. if api._auth_provider and api._auth_provider._ticket_expire:
  751. remaining_time = api._auth_provider._ticket_expire / 1000 - time.time()
  752. if remaining_time > 60:
  753. log.debug('Credentials remain valid for another %f seconds', remaining_time)
  754. return
  755.  
  756. # Try to login. (a few times, but don't get stuck here)
  757. i = 0
  758. while i < args.login_retries:
  759. try:
  760. if proxy_url:
  761. api.set_authentication(provider=account['auth_service'], username=account['username'], password=account['password'], proxy_config={'http': proxy_url, 'https': proxy_url})
  762. else:
  763. api.set_authentication(provider=account['auth_service'], username=account['username'], password=account['password'])
  764. break
  765. except AuthException:
  766. if i >= args.login_retries:
  767. raise TooManyLoginAttempts('Exceeded login attempts')
  768. else:
  769. i += 1
  770. log.error('Failed to login to Pokemon Go with account %s. Trying again in %g seconds', account['username'], args.login_delay)
  771. time.sleep(args.login_delay)
  772.  
  773. log.debug('Login for account %s successful', account['username'])
  774. time.sleep(20)
  775.  
  776.  
  777. def map_request(api, position, jitter=False):
  778. # Create scan_location to send to the api based off of position, because tuples aren't mutable.
  779. if jitter:
  780. # Jitter it, just a little bit.
  781. scan_location = jitterLocation(position)
  782. log.debug('Jittered to: %f/%f/%f', scan_location[0], scan_location[1], scan_location[2])
  783. else:
  784. # Just use the original coordinates.
  785. scan_location = position
  786.  
  787. try:
  788. cell_ids = util.get_cell_ids(scan_location[0], scan_location[1])
  789. timestamps = [0, ] * len(cell_ids)
  790. req = api.create_request()
  791. response = req.get_map_objects(latitude=f2i(scan_location[0]),
  792. longitude=f2i(scan_location[1]),
  793. since_timestamp_ms=timestamps,
  794. cell_id=cell_ids)
  795. response = req.check_challenge()
  796. response = req.get_hatched_eggs()
  797. response = req.get_inventory()
  798. response = req.check_awarded_badges()
  799. response = req.download_settings()
  800. response = req.get_buddy_walked()
  801. response = req.call()
  802. return response
  803.  
  804. except Exception as e:
  805. log.warning('Exception while downloading map: %s', e)
  806. return False
  807.  
  808.  
  809. def gym_request(api, position, gym):
  810. try:
  811. log.debug('Getting details for gym @ %f/%f (%fkm away)', gym['latitude'], gym['longitude'], calc_distance(position, [gym['latitude'], gym['longitude']]))
  812. req = api.create_request()
  813. x = req.get_gym_details(gym_id=gym['gym_id'],
  814. player_latitude=f2i(position[0]),
  815. player_longitude=f2i(position[1]),
  816. gym_latitude=gym['latitude'],
  817. gym_longitude=gym['longitude'])
  818. x = req.check_challenge()
  819. x = req.get_hatched_eggs()
  820. x = req.get_inventory()
  821. x = req.check_awarded_badges()
  822. x = req.download_settings()
  823. x = req.get_buddy_walked()
  824. x = req.call()
  825. # Print pretty(x).
  826. return x
  827.  
  828. except Exception as e:
  829. log.warning('Exception while downloading gym details: %s', e)
  830. return False
  831.  
  832.  
  833. def token_request(args, status, url):
  834. s = requests.Session()
  835. # Fetch the CAPTCHA_ID from 2captcha.
  836. try:
  837. captcha_id = s.post("http://2captcha.com/in.php?key={}&method=userrecaptcha&googlekey={}&pageurl={}".format(args.captcha_key, args.captcha_dsk, url)).text.split('|')[1]
  838. captcha_id = str(captcha_id)
  839. # IndexError implies that the retuned response was a 2captcha error.
  840. except IndexError:
  841. return 'ERROR'
  842. status['message'] = 'Retrieved captcha ID: {}; now retrieving token'.format(captcha_id)
  843. log.info(status['message'])
  844. # Get the response, retry every 5 seconds if its not ready.
  845. recaptcha_response = s.get("http://2captcha.com/res.php?key={}&action=get&id={}".format(args.captcha_key, captcha_id)).text
  846. while 'CAPCHA_NOT_READY' in recaptcha_response:
  847. log.info("Captcha token is not ready, retrying in 5 seconds")
  848. time.sleep(5)
  849. recaptcha_response = s.get("http://2captcha.com/res.php?key={}&action=get&id={}".format(args.captcha_key, captcha_id)).text
  850. token = str(recaptcha_response.split('|')[1])
  851. return token
  852.  
  853.  
  854. def calc_distance(pos1, pos2):
  855. R = 6378.1 # KM radius of the earth
  856.  
  857. dLat = math.radians(pos1[0] - pos2[0])
  858. dLon = math.radians(pos1[1] - pos2[1])
  859.  
  860. a = math.sin(dLat / 2) * math.sin(dLat / 2) + \
  861. math.cos(math.radians(pos1[0])) * math.cos(math.radians(pos2[0])) * \
  862. math.sin(dLon / 2) * math.sin(dLon / 2)
  863.  
  864. c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
  865. d = R * c
  866.  
  867. return d
  868.  
  869.  
  870. # Delay each thread start time so that logins occur after delay.
  871. def stagger_thread(args, account):
  872. if args.accounts.index(account) == 0:
  873. return # No need to delay the first one.
  874. delay = args.accounts.index(account) * args.login_delay + ((random.random() - .5) / 2)
  875. log.debug('Delaying thread startup for %.2f seconds', delay)
  876. time.sleep(delay)
  877.  
  878.  
  879. class TooManyLoginAttempts(Exception):
  880. pass
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement