Guest User

007ex

a guest
Jul 28th, 2015
187
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 2.39 KB | None | 0 0
  1. #!/usr/bin/python
  2. import urllib2
  3. import re
  4. import os
  5. import random
  6. import string
  7. import itertools
  8. from urlparse import urlparse
  9.  
  10. logo = """
  11. ##########################################################
  12. # ___ ___ _____ _____ _ _ _ #
  13. # / _ \ / _ \___ | /__ \___ ___ | | /\ /(_) |_ #
  14. # | | | | | | | / /____ / /\/ _ \ / _ \| |/ //_/ | __| #
  15. # | |_| | |_| |/ /_____/ / | (_) | (_) | / __ \| | |_ #
  16. # \___/ \___//_/ \/ \___/ \___/|_\/ \/|_|\__| #
  17. # #
  18. # [-] Developed & Coded by Bond Benz #
  19. # #
  20. ##########################################################
  21. """
  22.  
  23. menu = """
  24. [1] - Website Server Extractor
  25. [2] - Simple Dorker
  26. [3] - Password Generator
  27. [4] - Wordlist Generator
  28. [5] - About me
  29. [6] - Exit
  30. """
  31.  
  32.  
  33. def aboutme():
  34. print '''
  35. ##########################################################
  36. # ____ _____ _ _ ____ ____ ____ _ _ ____ #
  37. # ( _ \( _ )( \( )( _ \( _ \( ___)( \( )(_ ) #
  38. # ) _ < )(_)( ) ( )(_) )) _ < )__) ) ( / /_ #
  39. # (____/(_____)(_)\_)(____/(____/(____)(_)\_)(____) #
  40. # #
  41. # E-mail : s0-z.x@hotmail.com #
  42. # Facebook : https://www.facebook.com/0x55547987 #
  43. # Team : 1337Day Algeria #
  44. # #
  45. # #
  46. ##########################################################
  47. '''
  48.  
  49.  
  50. def Dorker():
  51. print "[+] Rule N1 : Don't Put inurl: or intext: or something like that"
  52. print "[+] Rule N2 : Don't Use Space In The Dork or the script wont work"
  53. dork = raw_input('Dork : ')
  54. print "[~] Result : "
  55. page = 1
  56. sites = list()
  57. while page <= 150:
  58.  
  59. url = "http://www.bing.com/search?q="+dork+"+node&go=Valider&qs=ds&form=QBRE&first=" + str(page)
  60. req = urllib2.Request(url)
  61. read = urllib2.urlopen(req).read()
  62. extract = re.findall('<div class="b_title"><h2><a href="(.*?)" h=', read)
  63. page += 1
  64.  
  65. for url in extract:
  66. split = urlparse(url)
  67. site = split.netloc
  68. if site not in sites:
  69. print site
  70. sites.append(site)
Add Comment
Please, Sign In to add comment