Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- def scrape(n):
- with open('URLS{}.txt'.format(n), newline='') as f_urls, \
- open('Output{}.txt'.format(n), 'w', newline='') as f_output:
- csv_urls = csv.reader(f_urls)
- txt_output = csv.writer(f_output)
- for line in csv_urls:
- try:
- r = uClient = uReq(line[0])
- page_html = uClient.read()
- page_soup = BeautifulSoup(page_html, "html.parser")
- containers = page_soup.findAll("div", {"id": "user_bar_l"})
- container = containers[0]
- username = container.span.text
- containers = page_soup.findAll("b")
- listings = containers[8].text
- print(username + "," + listings + "," + "\n")
- txt_output.writerow([username] + [listings])
- except Exception:
- pass
- print("===============Error, Skipping this URL!===============")
- def main():
- [threading.Thread(target=scrape, args=(i+1,)).start() for i in range(10)]
- if __name__ == '__main__':
- main()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement