Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- #coding: utf-8;
- #Credit: Creuilcreuil;
- from tld import get_tld;
- import urllib.request;
- import argparse;
- import io;
- import os;
- ROOT_DIR = 'Websites';
- def create_dir(directory, quiet=False):
- if not os.path.exists( directory ):
- if not quiet:
- print( "Creating new directory, '{}'.".format( directory ) );
- os.makedirs( directory );
- else:
- if not quiet:
- print( "Directory allready exists, '{}'.".format( directory ) );
- def write_file(path, data, quiet=False):
- if not quiet:
- print( "Writing '{}'.".format( path ) );
- with open( path, 'w' ) as file:
- file.write( data );
- def get_domain_name(url, quiet=False):
- if not quiet:
- print( "Getting domain name,'{}'.".format( url ) );
- return get_tld( url );
- def get_ip_address(url, quiet=False):
- if not quiet:
- print( "Getting ip address from '{}'.".format( url ) );
- process = os.popen( 'host {}'.format( url ) );
- result = str( process.read() );
- marker = result.find( 'has address' ) + 12;
- return result[marker:].splitlines()[0];
- def get_nmap(options, ip, quiet=False):
- if not quiet:
- print( "Performing 'nmap' scan on {}".format( ip ) );
- process = os.popen( "nmap {0} {1}".format( options, ip ) );
- result = str( process.read() );
- return result;
- def get_robots_txt(url, quiet=False):
- if not quiet:
- print( "Downloading 'robot.txt' from '{}'".format( url ) );
- if not url.endswith( '/' ):
- url += '/';
- request = urllib.request.urlopen( '{}robots.txt'.format( url ), data=None );
- data = io.TextIOWrapper( request, encoding='utf-8' );
- return data.read();
- def get_whois(domain_name, quiet=False):
- if not quiet:
- print( "Getting 'whois' info from '{}'.".format( domain_name ) );
- process = os.popen( 'whois {}'.format( domain_name ) );
- result = str( process.read() );
- return result;
- def gather_info(url, quiet=False):
- print( "Gathering info from '{}'.".format( url ) );
- domain_name = get_domain_name( url, quiet );
- ip_address = get_ip_address( domain_name, quiet );
- nmap = get_nmap( '-F', ip_address, quiet );
- robots_txt = get_robots_txt( url, quiet );
- whois = get_whois( domain_name, quiet );
- data = {
- 'domain_name':domain_name, 'ip_address':ip_address, 'nmap':nmap,
- 'robots_txt':robots_txt, 'whois':whois,
- };
- create_report( data, quiet );
- def create_report( data, quiet ):
- project_dir = '{0}/{1}'.format( ROOT_DIR, data['domain_name'] );
- create_dir( project_dir, quiet );
- print( "Savign report in '{}'.".format( project_dir ) );
- for key, value in data.items():
- file = '{0}/{1}.txt'.format( project_dir, key );
- if not quiet:
- print( 'Savign {}'.format( file ) );
- write_file( file, value, quiet );
- print( "Done with '{}'.\n".format( data['domain_name'] ) );
- def Main():
- create_dir( ROOT_DIR );
- parser = argparse.ArgumentParser();
- parser.add_argument('url_list', help='website to gather info.', type=str );
- parser.add_argument('-l', '--list', help='<url> is website list.', action='store_true' );
- parser.add_argument('-q', '--quiet', help='silent mode.', action='store_true' );
- args = parser.parse_args();
- if args.url_list and not args.list:
- gather_info( args.url, args.quiet );
- elif args.url_list and args.list:
- print( "Loading website list '{}'".format( args.url_list ) );
- with open( args.url_list, 'r' ) as file:
- data = file.read().split('\n')[:-1];
- for line in data:
- gather_info( line, args.quiet );
- print( "Done with website list '{}'.\n".format( args.url_list ) );
- if __name__ == '__main__':
- Main();
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement