Advertisement
Guest User

Untitled

a guest
Mar 23rd, 2018
75
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 1.09 KB | None | 0 0
  1. def scrape(n):
  2.     with open('URLS{}.txt'.format(n), newline='') as f_urls, \
  3.             open('Output{}.txt'.format(n), 'w', newline='') as f_output:
  4.         csv_urls = csv.reader(f_urls)
  5.         txt_output = csv.writer(f_output)
  6.  
  7.         for line in csv_urls:
  8.             try:
  9.                 r = uClient = uReq(line[0])
  10.  
  11.                 page_html = uClient.read()
  12.                 page_soup = BeautifulSoup(page_html, "html.parser")
  13.                 containers = page_soup.findAll("div", {"id": "user_bar_l"})
  14.                 container = containers[0]
  15.                 username = container.span.text
  16.  
  17.                 containers = page_soup.findAll("b")
  18.                 listings = containers[8].text
  19.  
  20.                 print(username + "," + listings + "," + "\n")
  21.  
  22.                 txt_output.writerow([username] + [listings])
  23.  
  24.             except Exception:
  25.                 pass
  26.                 print("===============Error, Skipping this URL!===============")
  27.  
  28.  
  29. def main():
  30.     [threading.Thread(target=scrape, args=(i+1,)).start() for i in range(10)]
  31.  
  32.  
  33. if __name__ == '__main__':
  34.     main()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement