Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- # import libraries
- import urllib2
- import sys
- import csv
- import re
- from datetime import datetime
- from bs4 import BeautifulSoup
- from urllib2 import Request, urlopen, HTTPError, URLError
- CBLUE = '\33[34m'
- CRED = '\033[91m'
- CGRN = '\033[92m'
- CEND = '\033[0m'
- data = []
- phone = sys.argv[1]
- def decodeEmail(e):
- de = ""
- k = int(e[:2], 16)
- for i in range(2, len(e)-1, 2):
- de += chr(int(e[i:i+2], 16)^k)
- return de
- #######################################################################
- #truepeoplesearch.com scraper
- print("\n[?]"+ CBLUE + " truepeoplesearch.com" + CEND)
- page = 'https://www.truepeoplesearch.com/results?phoneno='+phone
- try:
- request = urllib2.Request(page)
- request.add_header('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36')
- page = urllib2.urlopen(request)
- soup = BeautifulSoup(page, 'lxml')
- if not soup.find('div', attrs={'class':'row pl-1 record-count'}): #check if any results , if so continue
- information = []
- for person in soup.find_all('div', class_='card card-block shadow-form card-summary'):
- url = person['data-detail-link']
- name = person.find('div', class_='h4').text.replace("\n","").strip()
- age = person.find('span', text='Age ').find_next('span').text.replace("\n","").strip()
- location = person.find('span', text='Lives in ').find_next('span').text
- information.append([name, age, location, url])
- for name,age,location,url in information:
- print (CGRN+"[+] "+CRED + "CallerID: "+ CEND+ name +CRED + " Age: "+ CEND+ age)
- print (CGRN+"[+] "+CRED + "Location: "+ CEND+ location)
- spider = "https://www.truepeoplesearch.com" + url
- request = urllib2.Request(spider)
- request.add_header('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36')
- page2 = urllib2.urlopen(request)
- soup2 = BeautifulSoup(page2, 'lxml')
- addy = soup2.find('a', href=re.compile(r'/results\?streetaddress=.*'))
- location = addy.text.strip().replace("\n",", ")
- print (CGRN+"[+] "+CRED + "LastAddr: "+ CEND+ location)
- for emails in soup2.find_all('a', class_='__cf_email__'):
- email = emails['data-cfemail']
- email = decodeEmail(email)
- print(CGRN+"[+] "+CRED + "EmailAdr: " + CEND+ email)
- print ("")
- else:
- print(CRED + "[+] " + CEND + "No Data") #we aint found shit.
- except HTTPError:
- print (CRED + '[x] '+CEND+'No Data.')
- except URLError:
- print (CRED + '[x] '+CEND+'We failed to reach a server.')
- #######################################################################
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement