Advertisement
browncrown

Untitled

Jul 23rd, 2019
226
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 2.17 KB | None | 0 0
  1. from __future__ import division
  2.  
  3. import statistics
  4. import requests
  5. import api_config
  6. import datetime
  7. import csv
  8.  
  9. # extract configuration
  10. username = api_config.username
  11. password = api_config.password
  12. auth_token = api_config.auth_token
  13. event_ids = api_config.event_ids
  14.  
  15. # Prepare information for api requests
  16. login_url = 'https://api.stubhub.com/login'
  17. inventory_url = 'https://api.stubhub.com/search/inventory/v2'
  18.  
  19. headers = {
  20.         'Content-Type':'application/x-www-form-urlencoded',
  21.         'Authorization':'Basic '+auth_token,}
  22. body = {
  23.         'grant_type':'password',
  24.         'username':username,
  25.         'password':password,
  26.         'scope':'PRODUCTION'}
  27.  
  28. r = requests.post(login_url, headers=headers, data=body)
  29.  
  30. token_respoonse = r.json()
  31. access_token = token_respoonse['access_token']
  32. user_GUID = r.headers['X-StubHub-User-GUID']
  33.  
  34. headers['Authorization'] = 'Bearer ' + access_token
  35. headers['Accept'] = 'application/json'
  36. headers['Accept-Encoding'] = 'application/json'
  37.  
  38. cur_time = datetime.datetime.now()
  39. csv_rows = []
  40.  
  41. for event_id in event_ids:
  42.     prices = []
  43.  
  44.     is_scraping_incomplete = True
  45.     data = {'eventid':event_id, 'rows':250, 'start':1}
  46.  
  47.     while is_scraping_incomplete:
  48.  
  49.         try:
  50.             inventory = requests.get(inventory_url, headers=headers, params=data)
  51.             inv = inventory.json()
  52.             listings = inv['listing']
  53.  
  54.             cur_prices = [listing['currentPrice']['amount'] for listing in listings]
  55.             prices = prices + cur_prices
  56.  
  57.             # Scraping incomplete when results returned are less than the max returned
  58.             is_scraping_incomplete = len(cur_prices) == 250
  59.  
  60.             # advance the starting position for the results
  61.             data['start'] = data['start'] + 250
  62.         except:
  63.             pass
  64.  
  65.     csv_rows.append([cur_time, min(prices), max(prices),
  66.         sum(prices)/len(prices), statistics.median(prices), event_id])
  67.  
  68. try:
  69.     open('prices.csv')
  70. except:
  71.     header_row = ['time', 'min', 'max', 'avg', 'median', 'event']
  72.     csv_rows.insert(0, header_row)
  73.  
  74. with open('prices.csv', 'a') as f:
  75.     writer = csv.writer(f)
  76.     writer.writerows(csv_rows)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement