#!/usr/bin/env python
# Created By Abraham Aranguren <name.surname@gmail.com> Twitter: @7a_ http://7-a.org
# Requires lxml, installation instructions here: http://lxml.de/installation.html
# Installation in Backtrack 5: /usr/bin/easy_install --allow-hosts=lxml.de,*.python.org lxml
# Tip for Ubuntu courtesy of Mario Heiderich: Python2.7-dev is needed to compile this lib properly
# Clean HTML reference: http://lxml.de/lxmlhtml.html#cleaning-up-html
# Library documentation: http://lxml.de/api/lxml.html.clean.Cleaner-class.html
from lxml.html.clean import Cleaner, clean_html
import lxml.html
from urlparse import urlparse
ALLOWED_TAGS = ('html', 'body', 'a', 'p', 'h1', 'h2', 'h3', 'h4', 'div', 'span', 'i', 'b', 'u', 'table', 'tbody', 'tr', 'td', 'th', 'strong', 'em', 'sup', 'sub', 'ul', 'ol', 'li')
ALLOWED_URL_SCHEMES = [ 'http', 'https', 'ftp', 'mailto', 'sftp', 'shttp' ]
class HTMLSanitiser:
def __init__(self):
self.Cleaner = Cleaner(scripts = False, javascript = False, comments = False, links = False, meta = True, page_structure = False, processing_instructions = False, embedded = False, frames = False, forms = False, annoying_tags = False, remove_unknown_tags = False, safe_attrs_only = True, allow_tags=ALLOWED_TAGS)
def IsValidURL(self, URL):
ParsedURL = urlparse(URL)
return (ParsedURL.scheme in ALLOWED_URL_SCHEMES)
def CleanURLs(self, HTML):
# Largely Inspired from: http://stackoverflow.com/questions/5789127/how-to-replace-links-using-lxml-and-iterlinks
ParsedHTML = lxml.html.document_fromstring(HTML)
for Element, Attribute, Link, Pos in ParsedHTML.iterlinks():
if not self.IsValidURL(Link):
Element.set(Attribute, Link.replace(Link, ''))
return lxml.html.tostring(ParsedHTML)
def CleanThirdPartyHTML(self, HTML):
# 1st clean URLs, 2nd get rid of basics, 3rd apply white list
return self.Cleaner.clean_html(clean_html(self.CleanURLs(HTML)))
# For testing as a standalone script:
Sanitiser = HTMLSanitiser()
with open('input.txt') as file:
print Sanitiser.CleanThirdPartyHTML(file.read())