Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- # -*- coding: utf-8 -*-
- import urllib2
- import sys
- import csv
- import re
- from datetime import datetime
- from bs4 import BeautifulSoup
- from urllib2 import Request, urlopen, HTTPError, URLError
- CBLUE = '\33[34m'
- CRED = '\033[91m'
- CGRN = '\033[92m'
- CEND = '\033[0m'
- print ""
- print ".▄▄ · ▄ •▄ ▪ ▄▄▄·▄▄▄▄▄▄▄▄ ▄▄▄· ▄▄· ▄▄▄ .▄▄▄ "
- print "▐█ ▀. █▌▄▌▪██ ▐█ ▄█•██ ▀▄ █·▐█ ▀█ ▐█ ▌▪▀▄.▀·▀▄ █·"
- print "▄▀▀▀█▄▐▀▀▄·▐█· ██▀· ▐█.▪▐▀▀▄ ▄█▀▀█ ██ ▄▄▐▀▀▪▄▐▀▀▄ "
- print "▐█▄▪▐█▐█.█▌▐█▌▐█▪·• ▐█▌·▐█•█▌▐█ ▪▐▌▐███▌▐█▄▄▌▐█•█▌"
- print " ,.-~*´¨¯¨`*·~-.¸-(by: illwill)-,.-~*´¨¯¨`*·~-.¸ \n"
- if len (sys.argv) <= 1 :
- print(CRED + "Usage:" + CEND +" python "+sys.argv[0]+" <phonenumber> <filename>")
- print(CRED + "Example:" + CEND +" python "+sys.argv[0]+" 2036665555 result")
- print(CRED + "(you can omit filename if you dont want output to .csv)" + CEND + "\n")
- sys.exit (1)
- else:
- phone = sys.argv[1]
- data = []
- dashphone = '{}-{}-{}'.format(phone[0:3], phone[3:6], phone[6:]) #if site needs dashes in phone number
- UserAgent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36'
- #######################################################################
- # decode hidden cloudflare email obfuscation function used on some sites
- # https://support.cloudflare.com/hc/en-us/articles/200170016-What-is-Email-Address-Obfuscation-
- #######################################################################
- def decodeEmail(e):
- de = ""
- k = int(e[:2], 16)
- for i in range(2, len(e)-1, 2):
- de += chr(int(e[i:i+2], 16)^k)
- return de
- #######################################################################
- # 411.info scraper - returns CallerID and Location #
- #######################################################################
- page = 'https://411.info/reverse/?r='+phone
- print("\n[?]"+ CBLUE + " 411.info" + CEND)
- try:
- page = urllib2.urlopen(page)
- soup = BeautifulSoup(page, 'html.parser')
- name_box = soup.find('div', attrs={'class': 'cname'})
- name = name_box.text.strip()
- print(CGRN+"[+] "+CRED + "CallerID: " + CEND + name)
- def get_addrs():
- addrs = []
- for itemText in soup.find_all('div', attrs={'class':'adr_b'}):
- street = str(itemText.find('span', itemprop='streetAddress').text).replace("\t","").strip()
- town = itemText.find('span', itemprop='addressLocality').text
- state = itemText.find('span', itemprop='addressRegion').text
- __zip = itemText.find('span', itemprop='postalCode').text
- location = street+", "+ town+", "+ state+", "+ __zip
- addrs.append(location)
- return addrs
- for location in get_addrs():
- print(CGRN+"[+] "+CRED + "Location: " + CEND + location)
- data.append((name, location))
- except HTTPError:
- print (CRED + '[x] '+CEND+'No Data.')
- except URLError:
- print (CRED + '[x] '+CEND+'We failed to reach a server.')
- #######################################################################
- #usphonebook.com scraper - returns CallerID/Location/PhoneType/Carrier#
- #######################################################################
- page = 'https://www.usphonebook.com/'+ dashphone + '/detailed' #append the number with dashes
- print("\n[?]"+ CBLUE + " usphonebook.com" + CEND)
- try:
- request = urllib2.Request(page)
- request.add_header('User-Agent', UserAgent)
- page = urllib2.urlopen(request)
- soup = BeautifulSoup(page, 'html.parser')
- l = []
- location_box = soup.find("strong", itemprop="givenName")
- location = location_box.text
- l.append(location)
- location_box = soup.find("strong", itemprop="familyName")
- location = location_box.text
- l.append(location)
- s = ' '.join(l)
- print(CGRN+"[+] "+CRED + "CallerID: " + CEND + s)
- data = soup.find_all('p',attrs={'class':'ls_contacts__text'})
- street_addr = str(data[0].text).strip()
- town_zip = str(data[1].text).replace('\n\n', ' ').replace('\n', '')
- location = street_addr + town_zip
- phonetype = str(data[2].text).strip()
- carrier = str(data[3].text).strip()
- print(CGRN+"[+] "+CRED + "CallerID: " + CEND + location)
- print(CGRN+"[+] "+CRED + "PhoneType: " + CEND + phonetype)
- print(CGRN+"[+] "+CRED + "Carrier: " + CEND + carrier)
- # save the data in tuple
- data.append((s,""))
- except HTTPError as e:
- print (CRED + '[x] '+CEND+'No Data.')
- except URLError as e:
- print (CRED + '[x] '+CEND+'We failed to reach a server.')
- #######################################################################
- # Whocalld.com scraper - returns CallerID/Location/PhoneType #
- #######################################################################
- page = 'https://whocalld.com/+1'+phone
- print("\n[?]"+ CBLUE + " WhoCalld.com" + CEND)
- try:
- request = urllib2.Request(page)
- request.add_header('User-Agent', UserAgent)
- page = urllib2.urlopen(request)
- soup = BeautifulSoup(page, 'html.parser')
- name_box = soup.find('h2', attrs={'class': 'name'}) #Grab the callerID name if avail
- name = name_box.text.strip()
- if name:
- print(CGRN+"[+] "+CRED + "CallerID: " + CEND + name)
- location_box = soup.find('h3', attrs={'class':'location'}) #Grab the location if avail
- location = location_box.text
- if location:
- print(CGRN+"[+] "+CRED + "Location: " + CEND + location)
- phone_type = soup.find("img").attrs['alt'] #Grab the phone type if avail
- if phone_type:
- print(CGRN+"[+] "+CRED + "PhonType: " + CEND + phone_type)
- data.append((name, location))
- request = urllib2.Request('https://whocalld.com/+1'+phone+'?carrier') #spider the carrier page
- request.add_header('User-Agent', UserAgent)
- page = urllib2.urlopen(request)
- soup = BeautifulSoup(page, 'html.parser')
- carrier = soup.find('span', attrs={'class':'carrier'}).text #get carrier details
- city = soup.find('span', attrs={'class':'city'}).text
- state = soup.find('span', attrs={'class':'state'}).text
- time = soup.find('span', attrs={'class':'time'}).text
- print (CGRN+"[+] "+CRED + "Carrier: " + CEND + "%s - %s, %s on %s" % (carrier,city,state,time))
- except HTTPError:
- print (CRED + '[x] '+CEND+'No Data.') #Nothing returned.
- except URLError:
- print (CRED + '[x] '+CEND+'We failed to reach a server.') #website down or offline
- #######################################################################
- #advancedbackgroundchecks.com scraper return CallerID,Location,Emails #
- #######################################################################
- print("\n[?]"+ CBLUE + " advancedbackgroundchecks.com" + CEND)
- page = 'https://www.advancedbackgroundchecks.com/phone/'+phone
- try:
- request = urllib2.Request(page)
- request.add_header('User-Agent', UserAgent)
- page = urllib2.urlopen(request)
- soup = BeautifulSoup(page, 'html.parser')
- def get_names():
- names = []
- for foo in soup.find_all('div', attrs={'class': 'divCol name'}):
- bar = foo.find('div', attrs={'class': 'resultName'})
- name = bar.text.strip()
- names.append(name)
- return names
- def get_addrs():
- addrs = []
- for itemText in soup.find_all('div', attrs={'class':'divCol address'}):
- street = itemText.find('span', itemprop='streetAddress').text
- town = itemText.find('span', itemprop='addressLocality').text
- state = itemText.find('span', itemprop='addressRegion').text
- __zip = itemText.find('span', itemprop='postalCode').text
- location = street+","+ town+","+ state+","+ __zip
- addrs.append(location)
- return addrs
- def get_urls():
- urls = []
- _urls = []
- for data in soup.find_all('div', class_='row detailRow noclick'):
- for a in data.find_all('a'):
- urls.append(a.get('href'))
- uniq = list(set(urls))
- urls.append(uniq)
- for i in uniq:
- _urls.append(i)
- return _urls
- def get_emails():
- emails = []
- links = soup.find_all('div', class_='row link recWData')
- try:
- for link in links:
- if "/search/results.aspx?type=email&q=" in link.get("href"):
- name=(link.get("href").split("q=")[1])
- emails.append(name)
- except:
- emails.append("")
- return emails
- if not soup.find('div', attrs={'class':'NoResults'}):
- zipped_list = zip(get_names(), get_addrs())
- for name,location in zipped_list:
- print(CGRN+"[+] "+CRED + "CallerID: " + CEND + name)
- print(CGRN+"[+] "+CRED + "Location: " + CEND + location)
- ####SPIDER THE URLS#####
- url = get_urls()
- for a in url:
- spider = "https://www.advancedbackgroundchecks.com" + a
- #print(CGRN+"[+] "+CRED + "NEXT_URL: " + CEND + spider)
- request = urllib2.Request(spider)
- request.add_header('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36')
- page = urllib2.urlopen(request)
- soup = BeautifulSoup(page, 'html.parser')
- for email in get_emails():
- if email:
- print(CGRN+"[+] "+CRED + "EmailAdr: " + CEND + email + " - src: " + spider)
- else:
- print(CRED + "[+] " + CEND + "No Data")
- except HTTPError as e:
- print (CRED + '[x] '+CEND+'No Data.')
- except URLError as e:
- print (CRED + '[x] '+CEND+'We failed to reach a server.')
- #######################################################################
- #truepeoplesearch.com scraper
- print("\n[?]"+ CBLUE + " truepeoplesearch.com" + CEND)
- page = 'https://www.truepeoplesearch.com/results?phoneno='+phone
- try:
- request = urllib2.Request(page)
- request.add_header('User-Agent', UserAgent)
- page = urllib2.urlopen(request)
- soup = BeautifulSoup(page, 'lxml')
- if not soup.find('div', attrs={'class':'row pl-1 record-count'}): #check if any results , if so continue
- information = []
- for person in soup.find_all('div', class_='card card-block shadow-form card-summary'):
- url = person['data-detail-link']
- name = person.find('div', class_='h4').text.replace("\n","").strip()
- age = person.find('span', text='Age ').find_next('span').text.replace("\n","").strip()
- location = person.find('span', text='Lives in ').find_next('span').text
- information.append([name, age, location, url])
- for name,age,location,url in information:
- print (CGRN+"[+] "+CRED + "CallerID: "+ CEND+ name +CRED + " Age: "+ CEND+ age)
- print (CGRN+"[+] "+CRED + "Location: "+ CEND+ location)
- spider = "https://www.truepeoplesearch.com" + url
- request = urllib2.Request(spider)
- request.add_header('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36')
- page2 = urllib2.urlopen(request)
- soup2 = BeautifulSoup(page2, 'lxml')
- addy = soup2.find('a', href=re.compile(r'/results\?streetaddress=.*'))
- location = addy.text.strip().replace("\n",", ")
- print (CGRN+"[+] "+CRED + "LastAddr: "+ CEND+ location)
- for emails in soup2.find_all('a', class_='__cf_email__'):
- email = emails['data-cfemail']
- email = decodeEmail(email)
- print(CGRN+"[+] "+CRED + "EmailAdr: " + CEND+ email)
- print ("")
- else:
- print(CRED + "[+] " + CEND + "No Data") #we aint found shit.
- except HTTPError:
- print (CRED + '[x] '+CEND+'No Data.')
- except URLError:
- print (CRED + '[x] '+CEND+'We failed to reach a server.')
- #######################################################################
- if len(sys.argv) == 3:
- filename = sys.argv[2] + '.csv'
- print (CGRN+"\n[+] "+ CRED + " Finished.\n"+CEND+CGRN+"[+] "+ CRED + " Saving file as: " + filename + CEND)
- with open(filename, 'a') as csv_file:
- writer = csv.writer(csv_file)
- for name, location in data:
- writer.writerow([name, location, datetime.now()])
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement