Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- #!/data/data/com.termux/files/usr/bin/env python
- import argparse
- import asyncio
- import aiohttp
- import json
- import logging
- from urllib.parse import urlparse
- from bs4 import BeautifulSoup
- from concurrent.futures import ThreadPoolExecutor
- from functools import partial
- logging.basicConfig(level=logging.INFO)
- async def fetch_subdomains_expert(session, domain, depth):
- subdomains = set()
- if depth == 0:
- return subdomains
- try:
- response = await session.get(f'https://crt.sh/?q=%.{domain}&output=json', timeout=5)
- data = await response.json()
- new_subdomains = {entry['name_value'] for entry in data}
- subdomains.update(new_subdomains)
- tasks = []
- for new_domain in new_subdomains:
- tasks.append(asyncio.ensure_future(fetch_subdomains_expert(session, new_domain, depth - 1)))
- await asyncio.gather(*tasks)
- except (aiohttp.ClientError, asyncio.TimeoutError) as e:
- logging.error(f"Error in fetch_subdomains_expert: {str(e)}")
- return subdomains
- async def fetch_subdomains_async(url, depth):
- subdomains = set()
- try:
- base_domain = urlparse(url).netloc
- async with aiohttp.ClientSession() as session:
- subdomains = await fetch_subdomains_expert(session, base_domain, depth)
- except (aiohttp.ClientError, asyncio.TimeoutError) as e:
- logging.error(f"Error in fetch_subdomains_async: {str(e)}")
- return list(subdomains)
- async def gather_technology_info_expert(session, subdomain):
- tech_info = {}
- try:
- response = await session.get(f'http://{subdomain}', timeout=5)
- soup = BeautifulSoup(await response.text(), 'html.parser')
- tech_info['title'] = soup.title.text if soup.title else None
- tech_info['meta_tags'] = [meta['name'] for meta in soup.find_all('meta', {'name': True})]
- tech_info['scripts'] = [script['src'] for script in soup.find_all('script', {'src': True})]
- except (aiohttp.ClientError, asyncio.TimeoutError) as e:
- logging.error(f"Error in gather_technology_info_expert: {str(e)}")
- return tech_info
- async def scan_website_expert(url, depth, output_format, output_file, concurrency, rate_limit, custom_headers, interactive_mode, command):
- try:
- base_domain = urlparse(url).netloc
- async with aiohttp.ClientSession(headers=custom_headers) as session:
- subdomains = await fetch_subdomains_expert(session, base_domain, depth)
- tech_info_func = partial(gather_technology_info_expert, session)
- with ThreadPoolExecutor(max_workers=concurrency) as executor:
- tech_info_list = list(executor.map(tech_info_func, subdomains))
- # Process command
- if "links" in command:
- links = set()
- for subdomain in subdomains:
- try:
- response = await session.get(f'http://{subdomain}', timeout=5)
- soup = BeautifulSoup(await response.text(), 'html.parser')
- links.update({a['href'] for a in soup.find_all('a', href=True)})
- except (aiohttp.ClientError, asyncio.TimeoutError):
- continue
- print("\nLinks:")
- print("\n".join(links))
- if "status_codes" in command:
- status_codes = {}
- for subdomain in subdomains:
- try:
- response = await session.get(f'http://{subdomain}', timeout=5)
- status_codes[subdomain] = response.status
- except (aiohttp.ClientError, asyncio.TimeoutError):
- status_codes[subdomain] = "Error"
- print("\nHTTP Status Codes:")
- for subdomain, status in status_codes.items():
- print(f"{subdomain}: {status}")
- if "keywords" in command:
- keywords = set(["security", "vulnerability"]) # Customize keywords as needed
- print("\nKeywords Found:")
- for subdomain in subdomains:
- try:
- response = await session.get(f'http://{subdomain}', timeout=5)
- soup = BeautifulSoup(await response.text(), 'html.parser')
- text = soup.get_text().lower()
- if any(keyword in text for keyword in keywords):
- print(f"{subdomain}: Keywords Found")
- except (aiohttp.ClientError, asyncio.TimeoutError):
- continue
- if output_file:
- with open(output_file, 'w') as file:
- if output_format == 'json':
- json.dump({'subdomains': list(subdomains), 'technology_info': tech_info_list}, file, indent=2)
- # Add support for other output formats as needed
- except Exception as e:
- logging.error(f"Error in scan_website_expert: {str(e)}")
- def print_gui():
- print("\033[93m==============================================================")
- print("\033[1m GhostSec Web Scanner ")
- print("\033[93m==============================================================\033[0m")
- print("\033[92mUsage: ./your_script_name.py example.com [options]\n")
- print("\033[1mOptions:\033[0m")
- print(" -h, --help Show this help message and exit")
- print(" --depth DEPTH Depth of subdomain enumeration (default: 1)")
- print(" --output-format {json} Output format (default: json)")
- print(" --output-file OUTPUT_FILE Output file path")
- print(" --concurrency CONCURRENCY Number of concurrent tasks (default: 5)")
- print(" --rate-limit RATE_LIMIT Rate limit in requests per minute (default: 0)")
- print(" --custom-headers CUSTOM_HEADERS Custom HTTP headers as a JSON object")
- print(" --interactive-mode Enable interactive mode")
- print("\n\033[1mCommand Options:\033[0m")
- print(" --command {links,status_codes,keywords} Execute specific commands")
- print("\n\033[1mExamples:\033[0m")
- print(" ./your_script_name.py example.com --depth 2 --output-format json --output-file results.json --command links,status_codes")
- print(" ./your_script_name.py example.com --interactive-mode --command keywords")
- print("\n\033[93m==============================================================\033[0m")
- if __name__ == "__main__":
- parser = argparse.ArgumentParser(
- description='GhostSec Web Scanner - Scan a website for subdomains and technology information.',
- epilog="Example: ./your_script_name.py example.com --depth 2 --output-format json --output-file results.json --command links,status_codes"
- )
- parser.add_argument('url', help='Target website URL')
- parser.add_argument('--depth', type=int, default=1, help='Depth of subdomain enumeration')
- parser.add_argument('--output-format', choices=['json'], default='json', help='Output format')
- parser.add_argument('--output-file', help='Output file path')
- parser.add_argument('--concurrency', type=int, default=5, help='Number of concurrent tasks')
- parser.add_argument('--rate-limit', type=int, default=0, help='Rate limit in requests per minute')
- parser.add_argument('--custom-headers', type=json.loads, default={}, help='Custom HTTP headers as a JSON object')
- parser.add_argument('--interactive-mode', action='store_true', help='Enable interactive mode')
- parser.add_argument('--command', choices=['links', 'status_codes', 'keywords'], help='Execute specific commands')
- args = parser.parse_args()
- if args.url == "--help" or args.url == "-h":
- print_gui()
- else:
- try:
- loop = asyncio.get_event_loop()
- loop.run_until_complete(scan_website_expert(
- args.url, args.depth, args.output_format, args.output_file, args.concurrency, args.rate_limit,
- args.custom_headers, args.interactive_mode, args.command
- ))
- except KeyboardInterrupt:
- print("\nScan aborted by user.")
- except Exception as e:
- print(f"An error occurred: {str(e)}")
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement