Advertisement
illwill

Skiptracer

Mar 8th, 2018
261
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 12.72 KB | None | 0 0
  1. # -*- coding: utf-8 -*-
  2. import urllib2
  3. import sys
  4. import csv
  5. import re
  6. from datetime import datetime
  7. from bs4 import BeautifulSoup
  8. from urllib2 import Request, urlopen, HTTPError, URLError
  9.  
  10. CBLUE   = '\33[34m'
  11. CRED = '\033[91m'
  12. CGRN = '\033[92m'
  13. CEND = '\033[0m'
  14. print ""
  15. print ".▄▄ · ▄ •▄ ▪   ▄▄▄·▄▄▄▄▄▄▄▄   ▄▄▄·  ▄▄· ▄▄▄ .▄▄▄  "
  16. print "▐█ ▀. █▌▄▌▪██ ▐█ ▄█•██  ▀▄ █·▐█ ▀█ ▐█ ▌▪▀▄.▀·▀▄ █·"
  17. print "▄▀▀▀█▄▐▀▀▄·▐█· ██▀· ▐█.▪▐▀▀▄ ▄█▀▀█ ██ ▄▄▐▀▀▪▄▐▀▀▄ "
  18. print "▐█▄▪▐█▐█.█▌▐█▌▐█▪·• ▐█▌·▐█•█▌▐█ ▪▐▌▐███▌▐█▄▄▌▐█•█▌"
  19. print " ,.-~*´¨¯¨`*·~-.¸-(by: illwill)-,.-~*´¨¯¨`*·~-.¸ \n"
  20. if len (sys.argv) <= 1 :
  21.     print(CRED + "Usage:" + CEND +"   python "+sys.argv[0]+" <phonenumber> <filename>")
  22.     print(CRED + "Example:" + CEND +" python "+sys.argv[0]+" 2036665555 result")
  23.     print(CRED + "(you can omit filename if you dont want output to .csv)" + CEND + "\n")
  24.     sys.exit (1)
  25. else:
  26.     phone = sys.argv[1]
  27.     data = []
  28.     dashphone = '{}-{}-{}'.format(phone[0:3], phone[3:6], phone[6:]) #if site needs dashes in phone number
  29.  
  30. UserAgent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36'
  31.  
  32. #######################################################################
  33. # decode hidden cloudflare email obfuscation function used on some sites
  34. # https://support.cloudflare.com/hc/en-us/articles/200170016-What-is-Email-Address-Obfuscation-
  35. #######################################################################
  36. def decodeEmail(e):  
  37.     de = ""
  38.     k = int(e[:2], 16)
  39.  
  40.     for i in range(2, len(e)-1, 2):
  41.         de += chr(int(e[i:i+2], 16)^k)
  42.  
  43.     return de
  44. #######################################################################
  45. #       411.info scraper - returns CallerID and Location              #
  46. #######################################################################
  47. page = 'https://411.info/reverse/?r='+phone
  48. print("\n[?]"+ CBLUE + " 411.info" + CEND)
  49.  
  50. try:
  51.     page = urllib2.urlopen(page)
  52.     soup = BeautifulSoup(page, 'html.parser')
  53.     name_box = soup.find('div', attrs={'class': 'cname'})
  54.     name = name_box.text.strip()
  55.     print(CGRN+"[+] "+CRED + "CallerID: " + CEND + name)
  56.  
  57.     def get_addrs():
  58.         addrs = []
  59.         for itemText in soup.find_all('div', attrs={'class':'adr_b'}):
  60.             street = str(itemText.find('span', itemprop='streetAddress').text).replace("\t","").strip()
  61.             town = itemText.find('span', itemprop='addressLocality').text
  62.             state = itemText.find('span', itemprop='addressRegion').text
  63.             __zip = itemText.find('span', itemprop='postalCode').text
  64.             location = street+", "+ town+", "+ state+", "+ __zip
  65.             addrs.append(location)
  66.         return addrs
  67.     for location in get_addrs():
  68.      print(CGRN+"[+] "+CRED + "Location: " + CEND +  location)
  69.     data.append((name, location))
  70.  
  71. except HTTPError:
  72.     print (CRED + '[x] '+CEND+'No Data.')
  73. except URLError:
  74.     print (CRED + '[x] '+CEND+'We failed to reach a server.')
  75. #######################################################################
  76. #usphonebook.com scraper - returns CallerID/Location/PhoneType/Carrier#
  77. #######################################################################
  78. page = 'https://www.usphonebook.com/'+ dashphone + '/detailed'                  #append the number with dashes    
  79. print("\n[?]"+ CBLUE + " usphonebook.com" + CEND)
  80. try:
  81.     request = urllib2.Request(page)
  82.     request.add_header('User-Agent', UserAgent)
  83.     page = urllib2.urlopen(request)
  84.     soup = BeautifulSoup(page, 'html.parser')
  85.     l = []
  86.     location_box = soup.find("strong", itemprop="givenName")
  87.     location = location_box.text
  88.     l.append(location)
  89.     location_box = soup.find("strong", itemprop="familyName")
  90.     location = location_box.text
  91.     l.append(location)
  92.     s = ' '.join(l)
  93.     print(CGRN+"[+] "+CRED + "CallerID:  " + CEND + s)
  94.  
  95.    
  96.     data = soup.find_all('p',attrs={'class':'ls_contacts__text'})
  97.     street_addr = str(data[0].text).strip()
  98.     town_zip = str(data[1].text).replace('\n\n', ' ').replace('\n', '')
  99.     location = street_addr + town_zip
  100.     phonetype = str(data[2].text).strip()
  101.     carrier = str(data[3].text).strip()
  102.     print(CGRN+"[+] "+CRED + "CallerID:  " + CEND + location)
  103.     print(CGRN+"[+] "+CRED + "PhoneType: " + CEND + phonetype)
  104.     print(CGRN+"[+] "+CRED + "Carrier:   " + CEND + carrier)
  105.    
  106.     # save the data in tuple
  107.     data.append((s,""))
  108. except HTTPError as e:
  109.     print (CRED + '[x] '+CEND+'No Data.')
  110. except URLError as e:
  111.     print (CRED + '[x] '+CEND+'We failed to reach a server.')
  112.  
  113. #######################################################################
  114. #      Whocalld.com scraper - returns CallerID/Location/PhoneType     #
  115. #######################################################################
  116. page = 'https://whocalld.com/+1'+phone
  117. print("\n[?]"+ CBLUE + " WhoCalld.com" + CEND)
  118. try:
  119.     request = urllib2.Request(page)
  120.     request.add_header('User-Agent', UserAgent)
  121.     page = urllib2.urlopen(request)
  122.     soup = BeautifulSoup(page, 'html.parser')
  123.  
  124.     name_box = soup.find('h2', attrs={'class': 'name'})         #Grab the callerID name if avail
  125.     name = name_box.text.strip()
  126.     if name:
  127.         print(CGRN+"[+] "+CRED + "CallerID: " + CEND + name)
  128.  
  129.     location_box = soup.find('h3', attrs={'class':'location'})    #Grab the location if avail
  130.     location = location_box.text
  131.     if location:
  132.         print(CGRN+"[+] "+CRED + "Location: " + CEND + location)
  133.  
  134.     phone_type = soup.find("img").attrs['alt']                    #Grab the phone type if avail
  135.     if phone_type:
  136.         print(CGRN+"[+] "+CRED + "PhonType: " + CEND + phone_type)
  137.     data.append((name, location))
  138.    
  139.     request = urllib2.Request('https://whocalld.com/+1'+phone+'?carrier') #spider the carrier page
  140.     request.add_header('User-Agent', UserAgent)
  141.     page = urllib2.urlopen(request)
  142.     soup = BeautifulSoup(page, 'html.parser')
  143.     carrier = soup.find('span', attrs={'class':'carrier'}).text          #get carrier details
  144.     city = soup.find('span', attrs={'class':'city'}).text
  145.     state = soup.find('span', attrs={'class':'state'}).text
  146.     time = soup.find('span', attrs={'class':'time'}).text
  147.     print (CGRN+"[+] "+CRED + "Carrier:  " + CEND + "%s - %s, %s on %s" % (carrier,city,state,time))
  148.  
  149. except HTTPError:
  150.     print (CRED + '[x] '+CEND+'No Data.')                         #Nothing returned.
  151. except URLError:
  152.     print (CRED + '[x] '+CEND+'We failed to reach a server.')     #website down or offline
  153.  
  154. #######################################################################
  155. #advancedbackgroundchecks.com scraper return CallerID,Location,Emails #
  156. #######################################################################
  157. print("\n[?]"+ CBLUE + " advancedbackgroundchecks.com" + CEND)
  158. page = 'https://www.advancedbackgroundchecks.com/phone/'+phone
  159. try:
  160.     request = urllib2.Request(page)
  161.     request.add_header('User-Agent', UserAgent)
  162.     page = urllib2.urlopen(request)
  163.     soup = BeautifulSoup(page, 'html.parser')
  164.  
  165.     def get_names():
  166.         names = []
  167.         for foo in soup.find_all('div', attrs={'class': 'divCol name'}):
  168.             bar = foo.find('div', attrs={'class': 'resultName'})
  169.             name = bar.text.strip()
  170.             names.append(name)
  171.         return names
  172.  
  173.     def get_addrs():
  174.         addrs = []
  175.         for itemText in soup.find_all('div', attrs={'class':'divCol address'}):
  176.             street = itemText.find('span', itemprop='streetAddress').text
  177.             town = itemText.find('span', itemprop='addressLocality').text
  178.             state = itemText.find('span', itemprop='addressRegion').text
  179.             __zip = itemText.find('span', itemprop='postalCode').text
  180.             location = street+","+ town+","+ state+","+ __zip
  181.             addrs.append(location)
  182.         return addrs
  183.  
  184.     def get_urls():
  185.          urls = []
  186.          _urls = []
  187.          for data in soup.find_all('div', class_='row detailRow noclick'):
  188.              for a in data.find_all('a'):
  189.                  urls.append(a.get('href'))
  190.          uniq = list(set(urls))
  191.          urls.append(uniq)
  192.          for i in uniq:
  193.             _urls.append(i)
  194.          return _urls
  195.  
  196.     def get_emails():
  197.         emails = []
  198.         links = soup.find_all('div', class_='row link recWData')
  199.         try:
  200.             for link in links:
  201.                 if "/search/results.aspx?type=email&q=" in link.get("href"):
  202.                     name=(link.get("href").split("q=")[1])
  203.                     emails.append(name)
  204.         except:
  205.             emails.append("")
  206.         return emails
  207.  
  208.  
  209.  
  210.     if not soup.find('div', attrs={'class':'NoResults'}):
  211.         zipped_list = zip(get_names(), get_addrs())
  212.         for name,location in zipped_list:
  213.          print(CGRN+"[+] "+CRED + "CallerID: " + CEND + name)
  214.          print(CGRN+"[+] "+CRED + "Location: " + CEND + location)
  215.  
  216.         ####SPIDER THE URLS#####
  217.         url = get_urls()
  218.         for a in url:
  219.             spider = "https://www.advancedbackgroundchecks.com" + a
  220.             #print(CGRN+"[+] "+CRED + "NEXT_URL: " + CEND + spider)
  221.             request = urllib2.Request(spider)
  222.             request.add_header('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36')
  223.             page = urllib2.urlopen(request)
  224.             soup = BeautifulSoup(page, 'html.parser')
  225.             for email in get_emails():
  226.                 if email:
  227.                     print(CGRN+"[+] "+CRED + "EmailAdr: " + CEND + email + " - src: " + spider)
  228.  
  229.  
  230.     else:
  231.         print(CRED + "[+] " + CEND + "No Data")
  232.  
  233. except HTTPError as e:
  234.     print (CRED + '[x] '+CEND+'No Data.')
  235. except URLError as e:
  236.     print (CRED + '[x] '+CEND+'We failed to reach a server.')
  237. #######################################################################
  238. #truepeoplesearch.com scraper
  239. print("\n[?]"+ CBLUE + " truepeoplesearch.com" + CEND)
  240. page = 'https://www.truepeoplesearch.com/results?phoneno='+phone
  241. try:
  242.     request = urllib2.Request(page)
  243.     request.add_header('User-Agent', UserAgent)
  244.     page = urllib2.urlopen(request)
  245.     soup = BeautifulSoup(page, 'lxml')
  246.  
  247.  
  248.     if not soup.find('div', attrs={'class':'row pl-1 record-count'}):     #check if any results , if so continue
  249.         information = []
  250.         for person in soup.find_all('div', class_='card card-block shadow-form card-summary'):
  251.             url = person['data-detail-link']
  252.             name = person.find('div', class_='h4').text.replace("\n","").strip()
  253.             age = person.find('span', text='Age ').find_next('span').text.replace("\n","").strip()
  254.             location = person.find('span', text='Lives in ').find_next('span').text
  255.             information.append([name, age, location, url])
  256.  
  257.         for name,age,location,url in information:
  258.             print (CGRN+"[+] "+CRED + "CallerID: "+ CEND+ name +CRED + " Age: "+ CEND+ age)
  259.             print (CGRN+"[+] "+CRED + "Location: "+ CEND+ location)
  260.  
  261.              
  262.  
  263.             spider = "https://www.truepeoplesearch.com" + url
  264.             request = urllib2.Request(spider)
  265.             request.add_header('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36')
  266.             page2 = urllib2.urlopen(request)
  267.             soup2 = BeautifulSoup(page2, 'lxml')
  268.             addy = soup2.find('a', href=re.compile(r'/results\?streetaddress=.*'))
  269.             location = addy.text.strip().replace("\n",", ")
  270.             print (CGRN+"[+] "+CRED + "LastAddr: "+ CEND+ location)
  271.             for emails in soup2.find_all('a', class_='__cf_email__'):
  272.                 email = emails['data-cfemail']
  273.                 email = decodeEmail(email)
  274.                 print(CGRN+"[+] "+CRED + "EmailAdr: " + CEND+ email)
  275.             print ("")
  276.     else:
  277.         print(CRED + "[+] " + CEND + "No Data")                         #we aint found shit.
  278.  
  279. except HTTPError:
  280.     print (CRED + '[x] '+CEND+'No Data.')
  281. except URLError:
  282.     print (CRED + '[x] '+CEND+'We failed to reach a server.')
  283. #######################################################################
  284. if len(sys.argv) == 3:
  285.  filename = sys.argv[2] + '.csv'
  286.  print (CGRN+"\n[+] "+ CRED + " Finished.\n"+CEND+CGRN+"[+] "+ CRED + " Saving file as: " + filename + CEND)
  287.  with open(filename, 'a') as csv_file:
  288.   writer = csv.writer(csv_file)
  289.   for name, location in data:
  290.       writer.writerow([name, location, datetime.now()])
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement