Advertisement
Guest User

cRPG update 1.00

a guest
Apr 10th, 2016
258
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 8.26 KB | None | 0 0
  1. # -*- coding: utf-8 -*-
  2. # Python 3
  3. # μτf±8
  4. # ___________________________________________________________________________
  5. #
  6. #   Name:        crpg_update
  7. #   Description: -
  8. #   Version:     1.00
  9. #   Project:     cRPG
  10. #
  11. #   Version Last to First: yyyy/mm/dd x.xx Author: Description
  12. #   NB: Change __version__
  13. #
  14. #   2016/04/10 1.00 Élio: Creation
  15. # ___________________________________________________________________________
  16.  
  17. """cRPG update"""
  18.  
  19. import argparse # Parser for command-line options, arguments and sub-commands
  20. import gzip # Support for gzip files
  21. import hashlib # Secure hashes and message digests
  22. import os.path # Common pathname manipulations
  23. import sys # System-specific parameters and functions
  24. import urllib.error # Exception classes raised by urllib.request
  25. import urllib.parse # Parse URLs into components
  26. import urllib.request # Extensible library for opening URLs
  27.  
  28. __version__ = '1.00'
  29. APPLICATION_NAME = "cRPG update {}".format(__version__)
  30.  
  31. def binary_prefix_format(value: int) -> str: # {
  32.     """IEC binary prefix format
  33.    IEC (SI) names: Yobi (Yotta), Zebi (Zetta), Exbi (Exa), Pebi (Peta), Tebi (Tera), Gibi (Giga), Mebi (Mega), Kibi (kilo)
  34.    """
  35.     for val, unit in zip(
  36.         (1 << 80, 1 << 70, 1 << 60, 1 << 50, 1 << 40, 1 << 30, 1 << 20, 1 << 10),
  37.         ("Yi", "Zi", "Ei", "Pi", "Ti", "Gi", "Mi", "Ki")
  38.     ): # {
  39.         if value >= val:
  40.             return "{:0.2f} {}".format(value / val, unit)
  41.     # } for
  42.     else: return "{:0.0f}".format(value)
  43. # } binary_prefix_format
  44.  
  45. def url_read(url: str, verbose=True) -> bytes: # {
  46.     """Read URL data file
  47.    """
  48.     url_o = urllib.parse.urlparse(url)
  49.     try: # {
  50.         with urllib.request.urlopen(url) as f: # {
  51.             if f.reason != "OK" or not f.readable(): # { Validation: PASSED 2016/04/10
  52.                 exc_value = Exception("Unable to retrieve file: {}".format(
  53.                     os.path.basename(url_o.path),
  54.                 ))
  55.                 print(exc_value, url)
  56.                 sys.exit(exc_value) # TODO: Handle error
  57.             # }
  58.             meta = f.info()
  59.             data_length = int(meta.get_all('Content-Length')[0])
  60.             index = 0
  61.             progress = 0
  62.             progress_old = -1
  63.             data = bytes()
  64.             while index < data_length: # {
  65.                 data_chunck = f.read(100 << 10) # 100 kB
  66.                 data += data_chunck
  67.  
  68.                 index += len(data_chunck)
  69.                 progress = index / data_length
  70.                 if ((progress - progress_old) >= 0.1) or progress == 1: # {
  71.                     msg = "\tDownloading file... {}: {} / {} bytes".format(
  72.                         "{:0.0%}".format(progress).rjust(4),
  73.                         binary_prefix_format(index),
  74.                         binary_prefix_format(data_length),
  75.                     )
  76.                     if verbose: print(msg)
  77.                     progress_old = progress
  78.                 # } if
  79.             # } while
  80.             if verbose: print("")
  81.         # } with
  82.         return data
  83.     # } try
  84.     except urllib.error.HTTPError as exc_value: # {
  85.         print(exc_value, url)
  86.         sys.exit(exc_value) # TODO: Handle error
  87.         # os.path.basename(url_o.path)
  88.         # exc_value.code
  89.         # exc_value.reason
  90.     # } except urllib.error.HTTPError
  91. # } url_read
  92.  
  93. def url_unparse(scheme: str, netloc: str, path: str, params: str="", query: str="", fragment: str="") -> str: # {
  94.     return urllib.parse.urlunparse((scheme, netloc, path, params, query, fragment))
  95. # } url_unparse
  96.  
  97. def file_open(file: str) -> bytes: # {
  98.     try: # {
  99.         with open(file, 'rb') as f: # {
  100.             # TODO: Segment packets
  101.             return f.read()
  102.         # }
  103.     # } try
  104.     except (IOError, OSError) as exc_value: # {
  105.         sys.exit(exc_value) # TODO: Handle error
  106.     # } except (IOError, OSError)
  107. # } file_open
  108.  
  109. def compute_file_md5_checksum(file_path): # {
  110.     """Compute file MD5 checksum
  111.    """
  112.     m = hashlib.md5()
  113.     with open(file_path, 'rb') as f: # {
  114.         m.update(f.read())
  115.     # } with
  116.     return m.hexdigest()
  117. # } compute_file_md5_checksum
  118.  
  119. def retrieve_file(url_o, filename, output_path, iscompressed=True, verbose=True): # {
  120.     # Read URL file
  121.     url_path_file = url_unparse(
  122.         url_o.scheme,
  123.         url_o.netloc,
  124.         os.path.normpath(os.path.join(url_o.path, filename + ["", ".gz"][iscompressed])).replace("\\", "/"),
  125.     )
  126.     if verbose: print("Downloading: {}".format(filename))
  127.     data = url_read(url_path_file, verbose)
  128.     if iscompressed: # {
  129.         # Decompress file
  130.         data = gzip.decompress(data)
  131.     # }
  132.     file_path = os.path.normpath(os.path.join(output_path, filename))
  133.     # Create directory
  134.     if not os.path.exists(os.path.dirname(file_path)): os.makedirs(os.path.dirname(file_path))
  135.     # Store file
  136.     with open(file_path, "wb") as f: # {
  137.         f.write(data)
  138.     # }
  139.     return data
  140. # } retrieve_file
  141.  
  142. if __name__ == '__main__': # {
  143.     input_path_name = "cRPG"
  144.     parser = argparse.ArgumentParser(description=APPLICATION_NAME)
  145.     parser.add_argument( # Positional argument
  146.         action='store',
  147.         # nargs,
  148.         # const,
  149.         # default,
  150.         type=str,
  151.         # choices,
  152.         help="{} directory".format(input_path_name),
  153.         # metavar,
  154.         dest='input_directory'
  155.     )
  156.     arguments = parser.parse_args()
  157.     input_path = arguments.input_directory
  158.     input_url = "" ## Removed
  159.     #
  160.     input_path = os.path.normpath(input_path)
  161.     # Check input path
  162.     if not os.path.isdir(input_path) or not os.path.exists(input_path): # {
  163.         exc_value = Exception("Invalid {} path: {}".format(
  164.             input_path_name,
  165.             input_path,
  166.         ))
  167.         sys.exit(exc_value)
  168.     # }
  169.  
  170.     input_path_version = os.path.join(input_path, "version.txt")
  171.     data = file_open(input_path_version)
  172.     local_version = data.splitlines()[0].decode('utf-8', errors='ignore')
  173.     print("Local version:  {}".format(local_version))
  174.  
  175.     input_url_o = urllib.parse.urlparse(input_url)
  176.     url_path_version = url_unparse(
  177.         input_url_o.scheme,
  178.         input_url_o.netloc,
  179.         os.path.join(input_url_o.path, "version.txt"),
  180.     )
  181.     data = url_read(url_path_version, False)
  182.     server_version = data.splitlines()[0].decode('utf-8', errors='ignore')
  183.     print("Server version: {}\n".format(server_version))
  184.  
  185.     update_required = (local_version != server_version)
  186.     print("{} update is required\n".format(input_path_name))
  187.  
  188.     filelist = retrieve_file(input_url_o, "filelist.txt", input_path, iscompressed=False, verbose=False)
  189.     filelist = filelist.decode('utf-8', errors='ignore').splitlines()
  190.     index = 0
  191.     progress = 0
  192.     progress_old = -1
  193.     for line in filelist: # { for each line of filelist
  194.         file_path, file_md5_checksum = line.split(", ")
  195.         if "version.txt" in file_path: index += 1; continue # Skip troll
  196.         if "filelist.txt" in file_path: index += 1; continue # Skip troll
  197.         if not update_required and "std_banners" not in file_path: index += 1; continue
  198.  
  199.         local_file_path = os.path.normpath(os.path.join(input_path, file_path))
  200.  
  201.         local_file_md5_checksum = None
  202.         if os.path.exists(local_file_path): # {
  203.             local_file_md5_checksum = compute_file_md5_checksum(local_file_path)
  204.         # }
  205.         if local_file_md5_checksum != file_md5_checksum: # {
  206.             # print("{}: {}".format(file_path.ljust(50), file_md5_checksum))
  207.             # print("{}: {}".format("Local".ljust(50), local_file_md5_checksum))
  208.             retrieve_file(input_url_o, file_path, input_path, iscompressed=True, verbose=True)
  209.         # }
  210.         index += 1
  211.         progress = index / len(filelist)
  212.         if ((progress - progress_old) >= 0.1) or progress == 1: # {
  213.             msg = "Updating files... {}: {} / {}".format(
  214.                 "{:0.0%}".format(progress).rjust(4),
  215.                 index,
  216.                 len(filelist),
  217.             )
  218.             print(msg)
  219.             progress_old = progress
  220.         # } if
  221.     # } for each line of filelist
  222.     retrieve_file(input_url_o, "version.txt", input_path, iscompressed=False, verbose=False)
  223.     retrieve_file(input_url_o, "filelist.txt", input_path, iscompressed=False, verbose=False)
  224. # } __main__
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement