Advertisement
Guest User

mirror.py

a guest
Mar 8th, 2013
157
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 9.61 KB | None | 0 0
  1.  
  2.  
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. #     http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # Tutorial by Shirobi
  15.  
  16. import datetime
  17. import hashlib
  18. import logging
  19. import pickle
  20. import re
  21. import time
  22. import urllib
  23. import wsgiref.handlers
  24.  
  25. from google.appengine.api import memcache
  26. from google.appengine.api import urlfetch
  27. from google.appengine.ext import db
  28. from google.appengine.ext import webapp
  29. from google.appengine.ext.webapp import template
  30. from google.appengine.runtime import apiproxy_errors
  31.  
  32. import transform_content
  33.  
  34. ################################################################################
  35.  
  36. DEBUG = False
  37. EXPIRATION_DELTA_SECONDS = 3600
  38. EXPIRATION_RECENT_URLS_SECONDS = 90
  39.  
  40. ## DEBUG = True
  41. ## EXPIRATION_DELTA_SECONDS = 10
  42. ## EXPIRATION_RECENT_URLS_SECONDS = 1
  43.  
  44. HTTP_PREFIX = "http://"
  45. HTTPS_PREFIX = "http://"
  46.  
  47. IGNORE_HEADERS = frozenset([
  48.   'set-cookie',
  49.   'expires',
  50.   'cache-control',
  51.  
  52.   # Ignore hop-by-hop headers
  53.   'connection',
  54.   'keep-alive',
  55.   'proxy-authenticate',
  56.   'proxy-authorization',
  57.   'te',
  58.   'trailers',
  59.   'transfer-encoding',
  60.   'upgrade',
  61. ])
  62.  
  63. TRANSFORMED_CONTENT_TYPES = frozenset([
  64.   "text/html",
  65.   "text/css",
  66. ])
  67.  
  68. MIRROR_HOSTS = frozenset([
  69.   'mirrorr.com',
  70.   'mirrorrr.com',
  71.   'www.mirrorr.com',
  72.   'www.mirrorrr.com',
  73.   'www1.mirrorrr.com',
  74.   'www2.mirrorrr.com',
  75.   'www3.mirrorrr.com',
  76. ])
  77.  
  78. MAX_CONTENT_SIZE = 10 ** 6
  79.  
  80. MAX_URL_DISPLAY_LENGTH = 50
  81.  
  82. ################################################################################
  83.  
  84. def get_url_key_name(url):
  85.   url_hash = hashlib.sha256()
  86.   url_hash.update(url)
  87.   return "hash_" + url_hash.hexdigest()
  88.  
  89. ################################################################################
  90.  
  91. class EntryPoint(db.Model):
  92.   translated_address = db.TextProperty(required=True)
  93.   last_updated = db.DateTimeProperty(auto_now=True)
  94.   display_address = db.TextProperty()
  95.  
  96.  
  97. class MirroredContent(object):
  98.   def __init__(self, original_address, translated_address,
  99.                status, headers, data, base_url):
  100.     self.original_address = original_address
  101.     self.translated_address = translated_address
  102.     self.status = status
  103.     self.headers = headers
  104.     self.data = data
  105.     self.base_url = base_url
  106.  
  107.   @staticmethod
  108.   def get_by_key_name(key_name):
  109.     return memcache.get(key_name)
  110.  
  111.   @staticmethod
  112.   def fetch_and_store(key_name, base_url, translated_address, mirrored_url):
  113.     """Fetch and cache a page.
  114.    
  115.    Args:
  116.      key_name: Hash to use to store the cached page.
  117.      base_url: The hostname of the page that's being mirrored.
  118.      translated_address: The URL of the mirrored page on this site.
  119.      mirrored_url: The URL of the original page. Hostname should match
  120.        the base_url.
  121.    
  122.    Returns:
  123.      A new MirroredContent object, if the page was successfully retrieved.
  124.      None if any errors occurred or the content could not be retrieved.
  125.    """
  126.     # Check for the X-Mirrorrr header to ignore potential loops.
  127.     if base_url in MIRROR_HOSTS:
  128.       logging.warning('Encountered recursive request for "%s"; ignoring',
  129.                       mirrored_url)
  130.       return None
  131.  
  132.     logging.debug("Fetching '%s'", mirrored_url)
  133.     try:
  134.       response = urlfetch.fetch(mirrored_url)
  135.     except (urlfetch.Error, apiproxy_errors.Error):
  136.       logging.exception("Could not fetch URL")
  137.       return None
  138.  
  139.     adjusted_headers = {}
  140.     for key, value in response.headers.iteritems():
  141.       adjusted_key = key.lower()
  142.       if adjusted_key not in IGNORE_HEADERS:
  143.         adjusted_headers[adjusted_key] = value
  144.  
  145.     content = response.content
  146.     page_content_type = adjusted_headers.get("content-type", "")
  147.     for content_type in TRANSFORMED_CONTENT_TYPES:
  148.       # Startswith() because there could be a 'charset=UTF-8' in the header.
  149.       if page_content_type.startswith(content_type):
  150.         content = transform_content.TransformContent(base_url, mirrored_url,
  151.                                                      content)
  152.         break
  153.  
  154.     # If the transformed content is over 1MB, truncate it (yikes!)
  155.     if len(content) > MAX_CONTENT_SIZE:
  156.       logging.warning('Content is over 1MB; truncating')
  157.       content = content[:MAX_CONTENT_SIZE]
  158.  
  159.     new_content = MirroredContent(
  160.       base_url=base_url,
  161.       original_address=mirrored_url,
  162.       translated_address=translated_address,
  163.       status=response.status_code,
  164.       headers=adjusted_headers,
  165.       data=content)
  166.     if not memcache.add(key_name, new_content, time=EXPIRATION_DELTA_SECONDS):
  167.       logging.error('memcache.add failed: key_name = "%s", '
  168.                     'original_url = "%s"', key_name, mirrored_url)
  169.      
  170.     return new_content
  171.  
  172. ################################################################################
  173.  
  174. class BaseHandler(webapp.RequestHandler):
  175.   def get_relative_url(self):
  176.     slash = self.request.url.find("/", len(self.request.scheme + "://"))
  177.     if slash == -1:
  178.       return "/"
  179.     return self.request.url[slash:]
  180.  
  181.  
  182. class HomeHandler(BaseHandler):
  183.   def get(self):
  184.     # Handle the input form to redirect the user to a relative url
  185.     form_url = self.request.get("url")
  186.     if form_url:
  187.       # Accept URLs that still have a leading 'http://'
  188.       inputted_url = urllib.unquote(form_url)
  189.       if inputted_url.startswith(HTTP_PREFIX):
  190.         inputted_url = inputted_url[len(HTTP_PREFIX):]
  191.       return self.redirect("/" + inputted_url)
  192.  
  193.     latest_urls = memcache.get('latest_urls')
  194.     if latest_urls is None:
  195.       latest_urls = EntryPoint.gql("ORDER BY last_updated DESC").fetch(25)
  196.  
  197.       # Generate a display address that truncates the URL, adds an ellipsis.
  198.       # This is never actually saved in the Datastore.
  199.       for entry_point in latest_urls:
  200.         entry_point.display_address = \
  201.           entry_point.translated_address[:MAX_URL_DISPLAY_LENGTH]
  202.         if len(entry_point.display_address) == MAX_URL_DISPLAY_LENGTH:
  203.           entry_point.display_address += '...'
  204.  
  205.       if not memcache.add('latest_urls', latest_urls,
  206.                           time=EXPIRATION_RECENT_URLS_SECONDS):
  207.         logging.error('memcache.add failed: latest_urls')
  208.  
  209.     # Do this dictionary construction here, to decouple presentation from
  210.     # how we store data.
  211.     secure_url = None
  212.     if self.request.scheme == "http":
  213.       secure_url = "https://mirrorrr.appspot.com"
  214.     context = {
  215.       "latest_urls": latest_urls,
  216.       "secure_url": secure_url,
  217.     }
  218.     self.response.out.write(template.render("main.html", context))
  219.  
  220.  
  221. class MirrorHandler(BaseHandler):
  222.   def get(self, base_url):
  223.     assert base_url
  224.    
  225.     # Log the user-agent and referrer, to see who is linking to us.
  226.     logging.debug('User-Agent = "%s", Referrer = "%s"',
  227.                   self.request.user_agent,
  228.                   self.request.referer)
  229.     logging.debug('Base_url = "%s", url = "%s"', base_url, self.request.url)
  230.  
  231.     translated_address = self.get_relative_url()[1:]  # remove leading /
  232.     mirrored_url = HTTP_PREFIX + translated_address
  233.  
  234.     # Use sha256 hash instead of mirrored url for the key name, since key
  235.     # names can only be 500 bytes in length; URLs may be up to 2KB.
  236.     key_name = get_url_key_name(mirrored_url)
  237.     logging.info("Handling request for '%s' = '%s'", mirrored_url, key_name)
  238.  
  239.     content = MirroredContent.get_by_key_name(key_name)
  240.     cache_miss = False
  241.     if content is None:
  242.       logging.debug("Cache miss")
  243.       cache_miss = True
  244.       content = MirroredContent.fetch_and_store(key_name, base_url,
  245.                                                 translated_address,
  246.                                                 mirrored_url)
  247.     if content is None:
  248.       return self.error(404)
  249.  
  250.     # Store the entry point down here, once we know the request is good and
  251.     # there has been a cache miss (i.e., the page expired). If the referrer
  252.     # wasn't local, or it was '/', then this is an entry point.
  253.     if (cache_miss and
  254.         'Googlebot' not in self.request.user_agent and
  255.         'Slurp' not in self.request.user_agent and
  256.         (not self.request.referer.startswith(self.request.host_url) or
  257.          self.request.referer == self.request.host_url + "/")):
  258.       # Ignore favicons as entry points; they're a common browser fetch on
  259.       # every request for a new site that we need to special case them here.
  260.       if not self.request.url.endswith("favicon.ico"):
  261.         logging.info("Inserting new entry point")
  262.         entry_point = EntryPoint(
  263.           key_name=key_name,
  264.           translated_address=translated_address)
  265.         try:
  266.           entry_point.put()
  267.         except (db.Error, apiproxy_errors.Error):
  268.           logging.exception("Could not insert EntryPoint")
  269.    
  270.     for key, value in content.headers.iteritems():
  271.       self.response.headers[key] = value
  272.     if not DEBUG:
  273.       self.response.headers['cache-control'] = \
  274.         'max-age=%d' % EXPIRATION_DELTA_SECONDS
  275.  
  276.     self.response.out.write(content.data)
  277.  
  278.  
  279. app = webapp.WSGIApplication([
  280.   (r"/", HomeHandler),
  281.   (r"/main", HomeHandler),
  282.   (r"/([^/]+).*", MirrorHandler)
  283. ], debug=DEBUG)
  284.  
  285.  
  286. def main():
  287.   wsgiref.handlers.CGIHandler().run(app)
  288.  
  289.  
  290. if __name__ == "__main__":
  291.   main()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement