Advertisement
7a_

Untrusted HTML XSS/CSS challenge 4

7a_
Jan 27th, 2012
316
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 2.28 KB | None | 0 0
  1. #!/usr/bin/env python
  2. # Created By Abraham Aranguren <name.surname@gmail.com> Twitter: @7a_ http://7-a.org
  3. # Requires lxml, installation instructions here: http://lxml.de/installation.html
  4. # Installation in Backtrack 5: /usr/bin/easy_install --allow-hosts=lxml.de,*.python.org lxml
  5. # Tip for Ubuntu courtesy of Mario Heiderich: Python2.7-dev is needed to compile this lib properly
  6. # Clean HTML reference: http://lxml.de/lxmlhtml.html#cleaning-up-html
  7. # Library documentation: http://lxml.de/api/lxml.html.clean.Cleaner-class.html
  8. from lxml.html.clean import Cleaner, clean_html
  9. import lxml.html
  10. from urlparse import urlparse
  11. ALLOWED_TAGS = ('html', 'body', 'a', 'p', 'h1', 'h2', 'h3', 'h4', 'div', 'table', 'tbody', 'tr', 'td', 'th', 'strong', 'em', 'sup', 'sub', 'ul', 'ol', 'li')
  12. ALLOWED_URL_SCHEMES = [ 'http', 'https', 'ftp', 'mailto', 'sftp', 'shttp' ]
  13.  
  14. class HTMLSanitiser:
  15.         def __init__(self):
  16.                 self.Cleaner = Cleaner(scripts = False, javascript = False, comments = False, links = False, meta = True, page_structure = False, processing_instructions = False, embedded = False, frames = False, forms = False, annoying_tags = False, remove_unknown_tags = False, safe_attrs_only = True, allow_tags=ALLOWED_TAGS)
  17.  
  18.         def IsValidURL(self, URL):
  19.                 ParsedURL = urlparse(URL)
  20.                 if ParsedURL.scheme in ALLOWED_URL_SCHEMES:
  21.                         return True
  22.                 return False
  23.  
  24.         def CleanURLs(self, HTML):
  25.                 # Largely Inspired from: http://stackoverflow.com/questions/5789127/how-to-replace-links-using-lxml-and-iterlinks
  26.                 ParsedHTML = lxml.html.document_fromstring(HTML)
  27.                 for element, attribute, link, pos in ParsedHTML.iterlinks():
  28.                         if not self.IsValidURL(link):
  29.                                 element.set(attribute, link.replace(link, ''))
  30.                 return lxml.html.tostring(ParsedHTML)
  31.  
  32.         def CleanThirdPartyHTML(self, HTML):
  33.                 # 1st apply white list, 2nd get rid of basics, 3rd clean urls
  34.                 return self.CleanURLs(clean_html(self.Cleaner.clean_html(HTML)))
  35.  
  36. # For testing as a standalone script:
  37. Sanitiser = HTMLSanitiser()
  38. with open('input.txt') as file:
  39.         print Sanitiser.CleanThirdPartyHTML(file.read())
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement