Advertisement
creamygoat

lookfat.py FAT12/16 volume inspector

Mar 29th, 2018
704
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 62.42 KB | None | 0 0
  1. #!/usr/bin/python3
  2.  
  3. """Examines a FAT12 or FAT16 volume image.
  4.  
  5. NAME
  6.  lookfat
  7.  
  8. SYNOPSIS
  9. lookfat [-a ADDRS] [-b] [-c] [-B] [-d] [-e] [-E] [-f] [-h] [-i] [-I ID]
  10.        [-j] [-l] [-m] [-p PATH] [-R] [-r] [-s] [-t N] [-u] [-v] [-V]
  11.        [-w W] [-x] [-z] VOL-IMAGE
  12.  
  13. DESCRIPTION
  14.  This script was written specifically to aid the diagnosis of a corrupted
  15.  filesystem on a DS203 oscilloscope, hardware version 2.72, though it
  16.  may be useful for other small volumes.
  17.  
  18. Author:
  19.  Daniel Neville (Blancmange), creamygoat@gmail.com
  20.  
  21. Copyright:
  22.  None
  23.  
  24. Licence:
  25.  Public domain
  26.  
  27. """
  28.  
  29. #------------------------------------------------------------------------------
  30. # Imports
  31. #------------------------------------------------------------------------------
  32.  
  33.  
  34. import sys
  35. import traceback
  36. import os
  37. import pipes
  38. import argparse
  39. from enum import Enum
  40.  
  41.  
  42. #------------------------------------------------------------------------------
  43. # Constants
  44. #------------------------------------------------------------------------------
  45.  
  46.  
  47. VERSION = '1.0.0.0'
  48.  
  49. DIR_ERRORS_OWN = 0x01
  50. DIR_ERRORS_CHILDREN = 0x02
  51. DIR_ERRORS_DESCENDENTS = 0x04
  52.  
  53. DIR_SHOW_FLAT = 0x01
  54. DIR_SHOW_HEX = 0x02
  55. DIR_SHOW_SPANS = 0x04
  56. DIR_SHOW_JUNK = 0x08
  57. DIR_SHOW_CLAIMS = 0x10
  58. DIR_SHOW_ERRORS = 0x20
  59. DIR_SHOW_ONLY_ERRORS = 0x40
  60.  
  61.  
  62. #------------------------------------------------------------------------------
  63. # Exceptions
  64. #------------------------------------------------------------------------------
  65.  
  66.  
  67. class Error (Exception):
  68.   pass
  69.  
  70. class ArgError (Error):
  71.   pass
  72.  
  73. class FileError(Error):
  74.   pass
  75.  
  76. class DataError(Error):
  77.   pass
  78.  
  79. class CmdError(Error):
  80.   pass
  81.  
  82.  
  83. #------------------------------------------------------------------------------
  84. # Classes
  85. #------------------------------------------------------------------------------
  86.  
  87.  
  88. class FATRawFile (object):
  89.  
  90.   def __init__(self):
  91.     self.is_dir = False
  92.     self.name = ""
  93.     self.pathname = ""
  94.     self.attributes = 0x00
  95.     self.size = 0
  96.     self.valid = False
  97.     self.clusters = []
  98.     self.collision_cluster = None
  99.     self.secondary_claims = []
  100.     self.alloc_id = 0
  101.     self.owner_id = None
  102.     self.errors = []
  103.  
  104.  
  105. #------------------------------------------------------------------------------
  106.  
  107.  
  108. class FATFile (FATRawFile):
  109.  
  110.   def __init__(self):
  111.     super().__init__()
  112.  
  113.  
  114. #------------------------------------------------------------------------------
  115.  
  116.  
  117. class FATDir (FATRawFile):
  118.  
  119.   #----------------------------------------------------------------------------
  120.  
  121.   def __init__(self):
  122.     super().__init__()
  123.     self.is_dir = True
  124.     self.parent_cluster = 0
  125.     self.subdirs = []
  126.     self.files = []
  127.     self.volume_name = ""
  128.     self.junk_entries = []
  129.     self.error_depth_flags = 0
  130.     self.last_alloc_id = 0
  131.  
  132.   #----------------------------------------------------------------------------
  133.  
  134.   def find(self, pathname):
  135.  
  136.     result = None
  137.  
  138.     S = "".join((ch for ch in pathname))
  139.  
  140.     if S == "":
  141.  
  142.       result = self
  143.  
  144.     else:
  145.  
  146.       # First, set the current directory being searched.
  147.       cdir = self
  148.  
  149.       # Note that the path fragments may include an empty string
  150.       # at the end, indicating the use of an optional slash, which
  151.       # is sometimes used to specifically indicate a directory in
  152.       # some contexts.
  153.       P = S.split("/")
  154.  
  155.       while len(P) > 1:
  156.         # The remaining path definitely indicates that
  157.         # a subdirectory must be searched.
  158.  
  159.         found = False
  160.         for f in cdir.subdirs:
  161.           if f.name == P[0]:
  162.             found = True
  163.             P = P[1:]
  164.             if P[0] == "":
  165.               # A trailing slash was used.
  166.               # f is the directory successfully found
  167.               # cdir is its container
  168.               result = f
  169.               P = []
  170.             cdir = f
  171.             break
  172.         if not found:
  173.           cdir = None
  174.           break
  175.  
  176.       if result is None and cdir is not None:
  177.         # The scope of the search is now in a single directory.
  178.         # There is no trailing slash in the search string, so the
  179.         # user could be trying to refer to a directory or a file.
  180.  
  181.         # First, look at the files.
  182.         for f in cdir.files:
  183.           if f.name == P[0]:
  184.             result = f
  185.             break
  186.  
  187.         if result is None:
  188.           # Now search for subdirectories, perhaps one which
  189.           # (illegally) shares a name with a file.
  190.           for f in cdir.subdirs:
  191.             if f.name == P[0]:
  192.               result = f
  193.               break
  194.  
  195.     return result
  196.  
  197.   #----------------------------------------------------------------------------
  198.  
  199.  
  200. #------------------------------------------------------------------------------
  201. # Span functions
  202. #------------------------------------------------------------------------------
  203.  
  204.  
  205. def merge_span(spans, new_span):
  206.  
  207.   low_spans = []
  208.   high_spans = []
  209.   merged_span = new_span
  210.  
  211.   for span in spans:
  212.     if span[1] < new_span[0]:
  213.       low_spans.append(span)
  214.     elif span[0] > new_span[1]:
  215.       high_spans.append(span)
  216.     else:
  217.       merged_span = (
  218.         min(span[0], merged_span[0]),
  219.         max(span[1], merged_span[1]),
  220.       )
  221.  
  222.   return tuple(low_spans) + tuple((merged_span,)) + tuple(high_spans)
  223.  
  224.  
  225. #------------------------------------------------------------------------------
  226. # Helper functions
  227. #------------------------------------------------------------------------------
  228.  
  229.  
  230. def div_ru(dividend, divisor):
  231.  
  232.   return -((-dividend) // divisor)
  233.  
  234.  
  235. #------------------------------------------------------------------------------
  236.  
  237.  
  238. def le_uint(le_bytes, offset=None, length=None):
  239.  
  240.   ix = 0 if offset is None else offset
  241.   rem_length = len(le_bytes) - ix if length is None else length
  242.   result = 0
  243.   shift = 0
  244.  
  245.   if rem_length > 256:
  246.     raise Error("Cannot handle integers longer than 256 bytes!")
  247.  
  248.   while rem_length > 0:
  249.     result += le_bytes[ix] << shift
  250.     ix += 1
  251.     shift += 8
  252.     rem_length -= 1
  253.  
  254.   return result
  255.  
  256.  
  257. #------------------------------------------------------------------------------
  258. # FATVolumeMetrics
  259. #------------------------------------------------------------------------------
  260.  
  261.  
  262. class FATVolumeMetrics (object):
  263.  
  264.   #----------------------------------------------------------------------------
  265.  
  266.   def __init__(self, boot_sector):
  267.  
  268.     bs = boot_sector
  269.  
  270.     self.bs_sig = bs[0x1FE: 0x200]
  271.     self.has_kump = bs[0x000] == 0xE9 or (bs[0] == 0xEB and bs[2] == 0x90)
  272.     self.oem_name = bs[0x003 : 0x00B].decode("iso8859_1").rstrip()
  273.     self.num_reserved_sectors = le_uint(bs, 0x00E, 2)
  274.     ns_16 = le_uint(bs, 0x013, 2)
  275.     self.media_descriptor = bs[0x015]
  276.     self.num_fats = bs[0x010]
  277.     self.num_root_dir_entries = le_uint(bs, 0x011, 2)
  278.     self.sector_size = le_uint(bs, 0x00B, 2)
  279.     self.sectors_per_fat = le_uint(bs, 0x016, 2)
  280.     self.sectors_per_cluster = bs[0x00D]
  281.     self.sectors_per_track = le_uint(bs, 0x018, 2)
  282.     self.num_heads = le_uint(bs, 0x01A, 2)
  283.     self.num_hidden_sectors = le_uint(bs, 0x01C, 4)
  284.     ns_32 = le_uint(bs, 0x020, 4)
  285.     self.drive_number = bs[0x024]
  286.     self.flags = bs[0x025]
  287.     self.ext_boot_sig = bs[0x026]
  288.     self.serial_number = le_uint(bs, 0x027, 4)
  289.     self.partition_label = bs[0x02B : 0x036].decode("iso8859_1").rstrip()
  290.     self.fs_name = bs[0x036 : 0x03C].decode("iso8859_1").rstrip()
  291.  
  292.     spc = self.sectors_per_cluster
  293.  
  294.     if ns_16 != 0:
  295.       if ns_32 != 0:
  296.         self.num_sectors = min(ns_16, ns_32)
  297.       else:
  298.         self.num_sectors = ns_16
  299.     else:
  300.       self.num_sectors = ns_32
  301.  
  302.     self.root_dir_size_in_sectors = div_ru(
  303.       self.num_root_dir_entries * 32, self.sector_size
  304.     )
  305.     fats_so = self.num_reserved_sectors
  306.     root_so = fats_so + self.num_fats * self.sectors_per_fat
  307.     data_so = root_so + self.root_dir_size_in_sectors
  308.     self.num_clusters = (self.num_sectors - data_so) // spc
  309.     data_end_so = data_so + self.num_clusters * spc
  310.  
  311.     self.fat_offset = fats_so * self.sector_size
  312.     self.fat_size = self.sectors_per_fat * self.sector_size
  313.     self.root_dir_offset = root_so * self.sector_size
  314.     self.da_offset = data_so * self.sector_size
  315.     self.end_offset = self.num_sectors * self.sector_size
  316.     self.cluster_size = spc * self.sector_size
  317.     self.num_unreachable_sectors = self.num_sectors - data_end_so
  318.  
  319.     self.min_c = 0x002
  320.     self.max_c = self.min_c + self.num_clusters - 1
  321.  
  322.     if self.fs_name == "FAT12":
  323.       self.c_mask = 0xFFF
  324.       self.cm_base = 0xFF0
  325.       self.cme_nibble_values_set = 0xFF01
  326.       self.cfw = 3
  327.     elif self.fs_name == "FAT16":
  328.       self.c_mask = 0xFFFF
  329.       self.cm_base = 0xFFF0
  330.       self.cme_nibble_values_set = 0xFF00
  331.       self.cfw = 4
  332.     else:
  333.       self.c_mask = 0x0FFFFFFF
  334.       self.cm_base = 0x0FFFFFF0
  335.       self.cme_nibble_values_set = 0xFF00
  336.       self.cfw = 8
  337.  
  338.     self.afw = max(6, 2 * div_ru(len("{:x}".format(self.end_offset - 1)), 2))
  339.  
  340.     valid_fs_names = ("FAT12", "FAT16", "FAT32")
  341.  
  342.     is_vaguely_valid = (
  343.       self.fs_name in valid_fs_names
  344.       and self.ext_boot_sig == 0x29
  345.       and self.num_fats > 0
  346.       and self.num_root_dir_entries > 0
  347.       and self.da_offset + self.cluster_size <= self.end_offset
  348.     )
  349.  
  350.     self.hopeless = not is_vaguely_valid
  351.  
  352.   #----------------------------------------------------------------------------
  353.  
  354.   def dc_vol_offset(self, cluster):
  355.  
  356.     if self.min_c <= cluster <= self.max_c:
  357.       return self.da_offset + (cluster - self.min_c) * self.cluster_size
  358.     else:
  359.       raise Error("Data cluster index out of bounds for FAT.")
  360.  
  361.   #----------------------------------------------------------------------------
  362.  
  363.   def cluster_from_offset(self, offset_in_vol):
  364.  
  365.     if self.da_offset <= offset_in_vol < self.end_offset:
  366.       return (
  367.         self.min_c
  368.         + (offset_in_vol - self.da_offset) // self.cluster_size
  369.       )
  370.     else:
  371.       raise Error("Offset in volume out of bounds of data area.")
  372.  
  373.   #----------------------------------------------------------------------------
  374.  
  375.   def fmt_c(self, cluster):
  376.     return "{:0{}X}".format(cluster, self.cfw)
  377.  
  378.   #----------------------------------------------------------------------------
  379.  
  380.   def fmt_a(self, cluster):
  381.     return "{:0{}X}".format(cluster, self.afw)
  382.  
  383.   #----------------------------------------------------------------------------
  384.  
  385.   def is_valid_data_cref(self, cref):
  386.     # No masking is performed. The extra bits in FAT32 which might appear
  387.     # in a FAT entry must be masked out already.
  388.     return self.min_c <= cref <= self.max_c
  389.  
  390.   #----------------------------------------------------------------------------
  391.  
  392.   def is_last_in_chain_marker(self, cref):
  393.     # As a last-in-chain marker appears within a FAT entry and has little
  394.     # meaning outside of a FAT, the masking is applied by this function.
  395.     c = cref & self.c_mask
  396.     return (c >= self.cm_base
  397.         and self.cme_nibble_values_set & (1 << (c & 15)) != 0)
  398.  
  399.   #----------------------------------------------------------------------------
  400.  
  401.   def cluster_kind(self, cluster_index, cluster_value):
  402.     # So far bits 28-31 in FAT32 cluster values are ignored.
  403.  
  404.     ch = "?"
  405.     cv = cluster_value & self.c_mask
  406.  
  407.     if cluster_index == 0:
  408.       # The first entry in the FAT holds the media descriptor
  409.       # in the lower eight bits of the cluster value, corresponding
  410.       # to the very first byte of the FAT.
  411.       #
  412.       # The rest of the bits of the cluster value should be all ones.
  413.       # (Equivalantly, the cluster value should be greater than or
  414.       # equal to cm_base, the Cluster Marker Base value.
  415.  
  416.       if cv >= self.cm_base and cv & 0xFF == self.media_descriptor:
  417.         ch = "S"
  418.  
  419.     elif cluster_index == 1:
  420.       # The second entry in the FAT holds an end marker. The entry
  421.       # refers to no valid data region in contrast to normal cluster
  422.       # entries in the FAT that are end markers to indicate that their
  423.       # corresponding region within the data area is the last cluster
  424.       # in a chain. This end marker may as well be regarded a part of
  425.       # the signature.
  426.  
  427.       if self.is_last_in_chain_marker(cv):
  428.         ch = "S"
  429.  
  430.     elif self.min_c <= cluster_index <= self.max_c:
  431.       # The cluster index corresponds to a fat entry which corresponds
  432.       # to a Data Cluster, a region within the data area.
  433.       #
  434.       # Now all tests are performed on the cluster value. For non-terminal
  435.       # data clusters, the cluster value is a reference to another cluster's
  436.       # entry in the FAT. Otherwise the value directly refers to the state
  437.       # of the corresponding data cluster.
  438.  
  439.       if cv == 0:
  440.         # Free cluster
  441.  
  442.         ch = "."
  443.  
  444.       elif cv < self.min_c:
  445.         # Reserved value
  446.  
  447.         ch = "R"
  448.  
  449.       elif cv < self.cm_base:
  450.         # Non-terminal entry in a luster chain
  451.  
  452.         if cv <= self.max_c:
  453.           # Normal cluster in chain
  454.           ch = "d"
  455.         else:
  456.           # Link to data beyond the end of the data area
  457.           # nevertheless addressable by the FAT.
  458.           ch = "f"
  459.  
  460.       else:
  461.         # High values with special meaning.
  462.  
  463.         cv4 = cv & 0xF
  464.  
  465.         if self.cme_nibble_values_set & (1 << cv4):
  466.           # Last cluster in chain
  467.           ch = "e"
  468.         else:
  469.           if cv4 == 0:
  470.             # Reserved value, except in FAT12, where it may be used
  471.             # as an end marker. (The above test catches that.)
  472.             ch = "R"
  473.           elif cv4 < 7:
  474.             # Reserved value
  475.             ch = "R"
  476.           elif cv4 == 7:
  477.             # The data cluster is marked as bad.
  478.             ch = "B"
  479.  
  480.     return ch
  481.  
  482.   #----------------------------------------------------------------------------
  483.  
  484.  
  485. #------------------------------------------------------------------------------
  486.  
  487.  
  488. def get_fat(m, fat_img, full_fat=False):
  489.  
  490.   result = []
  491.  
  492.   if full_fat:
  493.     if m.fs_name == "FAT12":
  494.       fat_len = (m.fat_size * 2) // 3
  495.     elif m.fs_name == "FAT16":
  496.       fat_len = (m.fat_size) // 2
  497.     else:
  498.       fat_len = (m.fat_size) // 4
  499.   else:
  500.     fat_len = m.max_c + 1
  501.  
  502.   if m.fs_name == "FAT12":
  503.     num_whole_triple_bytes = fat_len // 2
  504.     for i in range(num_whole_triple_bytes):
  505.       x = le_uint(fat_img, 3 * i, 3)
  506.       result.append(x & 0x000FFF)
  507.       result.append(x >> 12)
  508.     if fat_len & 1:
  509.       x = le_uint(fat_img, 3 * num_whole_triple_bytes, 2) & 0x0FFF
  510.       result.append(x)
  511.   elif m.fs_name == "FAT16":
  512.     for i in range(fat_len):
  513.       result.append(le_uint(fat_img, 2 * i, 2))
  514.   else:
  515.     raise Error('Unsupported Filesystem type, "{}".'.format(m.fs_name))
  516.  
  517.   return result
  518.  
  519.  
  520. #------------------------------------------------------------------------------
  521.  
  522.  
  523. def walk_fat(m, fat, volume_file, part_offset=0):
  524.  
  525.   #----------------------------------------------------------------------------
  526.  
  527.   def walk_recursive(
  528.           fdir,
  529.           dir_img, volume_file, part_offset,
  530.           m, fat, id_map, id_list,
  531.           alloc_id):
  532.  
  533.     valid_dos_chars = (
  534.       "0123456789"
  535.       "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
  536.       " !#$%&'()-@^_`{}~"
  537.     )
  538.  
  539.     fdir.alloc_id = alloc_id
  540.     fdir.last_alloc_id = alloc_id
  541.     next_id = alloc_id + 1
  542.  
  543.     num_entries = len(dir_img) // 32
  544.  
  545.     is_root = len(fdir.clusters) == 0
  546.     volume_name = None
  547.  
  548.     for i in range(num_entries):
  549.  
  550.       entry = dir_img[32 * i : 32 * (i + 1)]
  551.       name_field = entry[:0x0B]
  552.       status = name_field[0]
  553.       attributes = entry[0x0B] # 00ADVSHR
  554.       start_c = le_uint(entry, 0x1A, 2)
  555.       file_size = le_uint(entry, 0x1C, 4)
  556.  
  557.       if status in [0x05, 0xE5]:
  558.         status ^= 0xE0
  559.  
  560.       if status == 0:
  561.         # A zero is used to terminator a directory.
  562.  
  563.         break
  564.  
  565.       elif status == 0x2E:
  566.         # Link to current or parent directory (or root)
  567.  
  568.         name = ".." if entry[1] == 0x2E else "."
  569.  
  570.         if is_root:
  571.           fdir.errors.append(
  572.               'Spurious "{}" (Cx{}) found in root.'.format(
  573.               name, m.fmt_c(start_c)))
  574.         else:
  575.           if entry[1] == 0x2E:
  576.             # ".." (link to parent)
  577.             fdir.parent_cluster = start_c
  578.             # Check parentage later.
  579.           else:
  580.             # "." (link to self)
  581.             if start_c != fdir.clusters[0]:
  582.               fdir.errors.append('Bad ".": (Cx{})'.format(m.fmt_c(start_c)))
  583.  
  584.       else:
  585.         # The name field is now worth processing.
  586.  
  587.         S = []
  588.         for x in name_field:
  589.           ch = chr(x) if chr(x) in valid_dos_chars else "?"
  590.           S.append(ch)
  591.         S = "".join(S)
  592.         base_name = S[:8].rstrip()
  593.         ext_name = S[8 : 11].rstrip()
  594.         name = base_name if ext_name == "" else base_name + "." + ext_name
  595.  
  596.         if attributes == 0x0F != 0:
  597.            # VFAT long file name fragment
  598.            # The true attributes are stored in the preceding
  599.            # DOS filename entry for the same file.
  600.  
  601.            fdir.junk_entries.append(name)
  602.  
  603.         elif attributes & 0x08 != 0:
  604.           # Volume Name
  605.  
  606.           vn = S.rstrip()
  607.  
  608.           if is_root:
  609.             if volume_name is None:
  610.               volume_name = vn
  611.               fdir.volume_name = volume_name
  612.             else:
  613.               fdir.errors.append('Redundant volume name: "{}"'.format(vn))
  614.           else:
  615.             fdir.errors.append('Spurious volume name: "{}"'.format(vn))
  616.  
  617.         elif status == 0x05:
  618.            # Deleted file
  619.  
  620.            # In really old versions of DOS, deleted files still reserved
  621.            # space on volume until a garbage collector program was issued.
  622.            # In later versions, deleted files had their cluster chains in
  623.            # the FAT zeroed, which made recovering a deleted file's data
  624.            # from the data clusters a hit-and-miss affair.
  625.            fdir.junk_entries.append(name)
  626.  
  627.         elif attributes & 0x10 != 0 and start_c == 0:
  628.            # A subdirectory with no starting cluster is invalid since it
  629.            # cannot store entries even for "." and "..".
  630.  
  631.            fdir.junk_entries.append(name)
  632.  
  633.         else:
  634.  
  635.           if attributes & 0x10 != 0:
  636.             f = FATDir()
  637.             is_dir = True
  638.             f.name = name
  639.             f.pathname = fdir.pathname + f.name + "/"
  640.           else:
  641.             f = FATFile()
  642.             f.size = file_size
  643.             is_dir = False
  644.             f.name = name
  645.             f.pathname = fdir.pathname + f.name
  646.  
  647.           chain_valid = True
  648.  
  649.           f.attributes = attributes
  650.           f.owner_id = fdir.alloc_id
  651.           f.alloc_id = next_id
  652.           fdir.last_alloc_id = f.alloc_id
  653.  
  654.           id_list.append(f)
  655.           next_id += 1
  656.  
  657.           # In a directory entry, a start cluster references of zero
  658.           # implies a chain length of zero. A zero (illegal) and some
  659.           # values for next-cluster references appearing within a
  660.           # cluster chain acts as a terminator, causing the cluster
  661.           # in which that zero appears to be excluded. In contrast,
  662.           # a last-in-chain marker implies that the cluster is included
  663.           # in the chain but no more clusters follow.
  664.  
  665.           if is_dir:
  666.             if file_size != 0:
  667.               f.errors.append(
  668.                   ("File size for directory is {} "
  669.                   + "but should be zero.").format(FileSize))
  670.               if not m.is_valid_data_cref(start_c):
  671.                 f.errors.append(
  672.                     "Bad subdirectory start cluster: Cx{}".format(
  673.                     m.fmt_c(start_c)))
  674.                 chain_valid = False
  675.           else:
  676.             if file_size != 0:
  677.               if not m.is_valid_data_cref(start_c):
  678.                 f.errors.append(
  679.                     "Bad file start cluster: Cx{}".format(m.fmt_c(start_c)))
  680.                 chain_valid = False
  681.             else:
  682.               if start_c != 0:
  683.                 f.errors.append(
  684.                     "Empty file has non-zero start cluster: Cx{}.".format(
  685.                     m.fmt_c(start_c)))
  686.                 chain_valid = False
  687.  
  688.           rem_file_size = file_size
  689.  
  690.           if chain_valid and (is_dir or rem_file_size > 0):
  691.  
  692.             cluster = 0
  693.             next_c = start_c
  694.  
  695.             while True:
  696.  
  697.               if m.is_valid_data_cref(next_c):
  698.  
  699.                 if not is_dir:
  700.  
  701.                   if rem_file_size > 0:
  702.                     rem_file_size = max(0, rem_file_size - m.cluster_size)
  703.                   else:
  704.                     chain_valid = False
  705.                     f.errors.append(
  706.                         "Last file cluster at Cx{} links to Cx{}.".format(
  707.                         m.fmt_c(cluster), m.fmt_c(next_c)))
  708.                     break
  709.  
  710.                 next_c_id = id_map[next_c]
  711.  
  712.                 if next_c_id == 0:
  713.  
  714.                   cluster = next_c
  715.                   next_c = fat[cluster] & m.c_mask
  716.                   id_map[cluster] = f.alloc_id
  717.                   f.clusters.append(cluster)
  718.  
  719.                 else:
  720.  
  721.                   chain_valid = False
  722.                   f.collision_cluster = next_c
  723.  
  724.                   if cluster > 0:
  725.                     src_str = "Cluster Cx{}".format(m.fmt_c(cluster))
  726.                   else:
  727.                     src_str = "Start"
  728.  
  729.                   owner_id = next_c_id
  730.                   looped = owner_id == f.alloc_id
  731.  
  732.                   if not looped:
  733.                     # Each secondary claim record consists of:
  734.                     # * The file offset (in clusters) where the claim occurs;
  735.                     # * The allocation ID of the claimant and
  736.                     # * The offset (in clusters) within the claimant.
  737.                     fco = len(f.clusters)
  738.                     owner_f = id_list[owner_id]
  739.                     owner_fco = owner_f.clusters.index(f.collision_cluster)
  740.                     owner_f.secondary_claims.append(
  741.                       (owner_fco, f.alloc_id, fco)
  742.                     )
  743.  
  744.                   if looped:
  745.                     em = "Loop: {} -> Cx{}".format(src_str, m.fmt_c(next_c))
  746.                   else:
  747.                     em = "Collision: {} (ID {}) -> Cx{} (ID {})".format(
  748.                         src_str, f.alloc_id, m.fmt_c(next_c), next_c_id)
  749.  
  750.                   f.errors.append(em)
  751.  
  752.                   break
  753.  
  754.               elif m.is_last_in_chain_marker(next_c):
  755.  
  756.                   break
  757.  
  758.               else:
  759.  
  760.                 chain_valid = False
  761.  
  762.                 f.errors.append(
  763.                     "Bad cluster link: Cx{} -> Cx{}".format(
  764.                     m.fmt_c(cluster), m.fmt_c(next_c)))
  765.  
  766.                 break
  767.  
  768.           if is_dir:
  769.  
  770.             # Even though the directory entry should always record a
  771.             # file size of zero for a subdirectory, the file size of
  772.             # a subdirectory is determined by the number of clusters
  773.             # in its chain.
  774.             f.size = len(f.clusters) * m.cluster_size
  775.  
  776.             if chain_valid:
  777.  
  778.               if len(f.clusters) < 1:
  779.                 f.errors.append("Subdirectory has no (valid) clusters.")
  780.  
  781.               chain_valid = False
  782.  
  783.             f.valid = chain_valid
  784.  
  785.             subdir_img = bytearray()
  786.  
  787.             for c in f.clusters:
  788.  
  789.               offset_to_dc = m.dc_vol_offset(c)
  790.               volume_file.seek(part_offset + offset_to_dc)
  791.               dir_frag = volume_file.read(m.cluster_size)
  792.               subdir_img += dir_frag
  793.  
  794.             walk_recursive(
  795.               f,
  796.               subdir_img, volume_file, part_offset,
  797.               m, fat, id_map, id_list,
  798.               f.alloc_id
  799.             )
  800.  
  801.             expected_parent = (fdir.clusters[:1] + [0])[0]
  802.  
  803.             if f.parent_cluster != expected_parent:
  804.               f.errors.append(
  805.                   "Bad parent reference: Cx{}".format(
  806.                   m.fmt_c(f.parent_cluster)))
  807.  
  808.             f.parent_cluster = expected_parent
  809.             fdir.last_alloc_id = f.last_alloc_id
  810.             next_id = fdir.last_alloc_id + 1
  811.             fdir.subdirs.append(f)
  812.             if f.error_depth_flags & DIR_ERRORS_OWN != 0:
  813.               fdir.error_depth_flags |= DIR_ERRORS_CHILDREN
  814.             if f.error_depth_flags != 0:
  815.               fdir.error_depth_flags |= DIR_ERRORS_DESCENDENTS
  816.  
  817.           else:
  818.  
  819.             if chain_valid:
  820.  
  821.               expected_length_in_clusters = div_ru(file_size, m.cluster_size)
  822.               diff = len(f.clusters) - expected_length_in_clusters
  823.  
  824.               if diff != 0:
  825.  
  826.                 chain_valid = False
  827.  
  828.                 if diff < 0:
  829.                   f.errors.append(
  830.                       "Truncated: {} clusters short, {} bytes lost.".format(
  831.                       -diff, rem_file_size))
  832.                 else:
  833.                   # This condition cannot happen if the check
  834.                   # with the remaining file size is working.
  835.                   f.errors.append(
  836.                       "Chain is too long by {} clusters.".format(diff))
  837.  
  838.             if not chain_valid:
  839.               f.errors.append(
  840.                   ("{} clusters collected from " +
  841.                   "invalid chain.").format(len(f.clusters)))
  842.  
  843.             f.valid = chain_valid
  844.             fdir.files.append(f)
  845.             if len(f.errors) > 0:
  846.               fdir.error_depth_flags |= (
  847.                   DIR_ERRORS_CHILDREN | DIR_ERRORS_DESCENDENTS)
  848.  
  849.     if len(fdir.errors) > 0:
  850.       fdir.error_depth_flags |= DIR_ERRORS_OWN
  851.  
  852.     return fdir
  853.  
  854.   #----------------------------------------------------------------------------
  855.  
  856.   volume_file.seek(part_offset + m.root_dir_offset)
  857.   root_dir_img = volume_file.read(m.num_root_dir_entries * 32)
  858.  
  859.   root_dir = FATDir()
  860.   id_map = [0] * len(fat)
  861.   id_list = [root_dir]
  862.  
  863.   walk_recursive(
  864.       root_dir,
  865.       root_dir_img, volume_file, part_offset,
  866.       m, fat, id_map, id_list,
  867.       0
  868.   )
  869.  
  870.   return (root_dir, id_map, id_list)
  871.  
  872.  
  873. #------------------------------------------------------------------------------
  874.  
  875.  
  876. def all_dirs(fdir):
  877.  
  878.   yield fdir
  879.  
  880.   for subdir in fdir.subdirs:
  881.     yield from all_dirs(subdir)
  882.  
  883.  
  884. #------------------------------------------------------------------------------
  885.  
  886.  
  887. def all_dirs_and_files(fdir):
  888.  
  889.   for fdir in all_dirs(fdir):
  890.     for f in [fdir] + fdir.files:
  891.       yield f
  892.  
  893.  
  894. #------------------------------------------------------------------------------
  895.  
  896.  
  897. def secondary_claims_for_file(m, f, id_map, id_list):
  898.  
  899.   for sc in f.secondary_claims:
  900.  
  901.     yield sc
  902.  
  903.     fso, claimant_id, claimant_fco = sc
  904.     claimant_f = id_list[claimant_id]
  905.  
  906.     for claimant_sc in secondary_claims_for_file(
  907.         m, claimant_f, id_map, id_list):
  908.  
  909.       r_fco, r_id, r_fco = claimant_sc
  910.       r_f = id_list[r_id]
  911.       x = r_fco + (claimant_fco - r_fco)
  912.  
  913.       # So far, (fco, r_id, x) would be the composite remote claim
  914.       # for this file. First, we should check the remote file's length.
  915.       # A file does not claim a cluster just because its cluster chain
  916.       # ultimately leads to that cluster. Reading a file always requires
  917.       # knowledge of the file size in bytes.
  918.  
  919.       rclen = div_ru(r_f.file_size, m.cluster_size)
  920.  
  921.       if x < rclen:
  922.         yield (fco, r_id, x)
  923.  
  924.  
  925. #------------------------------------------------------------------------------
  926.  
  927.  
  928. def analyse_addr_in_volume(
  929.         m, addr, fat, id_map, id_list):
  930.  
  931.   da_end = m.da_offset + m.num_clusters * m.cluster_size
  932.  
  933.   cluster_ix = None
  934.   desc = "(Nowhere)"
  935.   f = None
  936.   c = None
  937.   offset = None
  938.  
  939.   if not m.da_offset <= addr < da_end:
  940.     # Annoying cases
  941.  
  942.     if addr < 0:
  943.       # Negative address
  944.  
  945.       desc = "Before volume"
  946.       offset = addr
  947.  
  948.     elif addr < m.fat_offset:
  949.       # Boot sector
  950.  
  951.       offset = addr
  952.       desc = "Boot sector".format(offset)
  953.  
  954.     elif addr < m.root_dir_offset:
  955.       # FATs
  956.  
  957.       ix = (addr - m.fat_offset) // m.fat_size
  958.       offset = addr - m.fat_size * ix
  959.       desc = "FAT #{}".format(ix)
  960.  
  961.     elif addr < m.da_offset:
  962.  
  963.       a = addr - m.root_dir_offset
  964.       ix = a // 32
  965.  
  966.       if ix < m.num_root_dir_entries:
  967.         f = id_list[0]
  968.         offset = a & 31
  969.         desc = "Root directory item {}".format(ix)
  970.       else:
  971.         offset = a
  972.         desc = "Root directory"
  973.  
  974.     else:
  975.  
  976.       if addr == da_end:
  977.         desc = "End of data area"
  978.         offset == None
  979.       else:
  980.         desc = "Beyond end of data area"
  981.         offset = addr - da_end
  982.  
  983.   else:
  984.     # The interesting case in which the supplied address is
  985.     # within the data area.
  986.  
  987.     c = ((addr - m.da_offset) // m.cluster_size) + m.min_c
  988.     c_addr = m.da_offset + (c - m.min_c) * m.cluster_size
  989.  
  990.     item_index = c
  991.     offset = addr - c_addr
  992.  
  993.     if c > m.max_c:
  994.  
  995.       desc = "Unreachable cluster"
  996.  
  997.     else:
  998.  
  999.       aid = id_map[c]
  1000.  
  1001.       if aid == 0:
  1002.         # This cluster is not referenced by the directory tree.
  1003.         # Not properly, at least.
  1004.  
  1005.         k = m.cluster_kind(c, fat[c])
  1006.  
  1007.         if k == ".":
  1008.           desc = "Free cluster"
  1009.         elif k in "def":
  1010.           desc = "Orphan cluster"
  1011.         elif k == "B":
  1012.           desc = "Bad cluster"
  1013.         else:
  1014.           desc = "Unknown cluster"
  1015.  
  1016.       else:
  1017.         # We have a properly owned cluster.
  1018.  
  1019.         f = id_list[aid]
  1020.         fcn = f.clusters.index(c)  # File cluster number
  1021.         offset = fcn * m.cluster_size + offset
  1022.         desc = f.pathname
  1023.  
  1024.   return (c, f, offset, desc)
  1025.  
  1026.  
  1027. #------------------------------------------------------------------------------
  1028.  
  1029.  
  1030. def get_fat_usage(m, fat, id_map=None):
  1031.  
  1032.   num_used = 0
  1033.   num_free = 0
  1034.   num_reserved = 0
  1035.   num_orphaned = 0
  1036.   num_bad = 0
  1037.   num_invalid_ce = 0
  1038.  
  1039.   for c in range(m.min_c, m.max_c + 1):
  1040.     ch = m.cluster_kind(c, fat[c])
  1041.     if ch == ".":
  1042.       num_free += 1
  1043.     elif ch in "def":
  1044.       if ch =="f":
  1045.         num_invalid_ce += 1
  1046.       if id_map is not None and id_map[c] > 0:
  1047.         num_used += 1
  1048.       else:
  1049.         num_orphaned += 1
  1050.     elif ch in ["B"]:
  1051.       num_bad += 1
  1052.     elif ch == "R":
  1053.       num_reserved += 1
  1054.  
  1055.   result = {
  1056.     "Total": m.num_clusters,
  1057.     "Used": num_used,
  1058.     "Free": num_free,
  1059.     "Reserved": num_reserved,
  1060.     "Bad": num_bad,
  1061.     "Orphaned": num_orphaned,
  1062.     "InvalidCE": num_invalid_ce
  1063.   }
  1064.  
  1065.   return result
  1066.  
  1067.  
  1068. #------------------------------------------------------------------------------
  1069. # Report generating functions
  1070. #------------------------------------------------------------------------------
  1071.  
  1072.  
  1073. def metrics_report(m):
  1074.  
  1075.   yield 'Size: {} sectors ({} bytes)'.format(
  1076.       m.num_sectors, m.num_sectors * m.sector_size)
  1077.   yield 'OEM: "{}"'.format(m.oem_name)
  1078.   yield 'Partition Label: "{}"'.format(m.partition_label)
  1079.   yield 'Media Descriptor: 0x{:02X}'.format(m.media_descriptor)
  1080.   yield 'EBPB Signature: 0x{:02X}'.format(m.ext_boot_sig)
  1081.   yield 'Serial Number: {:04X}-{:04X}'.format(
  1082.       m.serial_number >> 16, m.serial_number & 0xFFFF)
  1083.   yield 'Filesystem: "{}"'.format(m.fs_name)
  1084.   yield 'Sector Size: {} bytes'.format(m.sector_size)
  1085.   yield 'Cluster Size: {} sectors'.format(m.sectors_per_cluster)
  1086.   yield 'FAT Size: {} bytes'.format(m.sector_size)
  1087.   yield 'Number of FATs: {}, each {} sectors long'.format(
  1088.       m.num_fats, m.sectors_per_fat)
  1089.   yield 'Flags: 0b{0:08b} (0x{0:02X})'.format(m.flags)
  1090.   yield 'Data Area: {} clusters ({} bytes)'.format(
  1091.       m.num_clusters, m.num_clusters * m.cluster_size)
  1092.   yield 'Unreachable Sectors at End of Data Area: {}'.format(
  1093.       m.num_unreachable_sectors)
  1094.  
  1095.   if m.num_fats > 0:
  1096.     yield "FAT #0 (default): 0x{}".format(m.fmt_a(m.fat_offset))
  1097.   for i in range(1, m.num_fats):
  1098.     yield "Alternate FAT #{}: 0x{}".format(
  1099.         i, m.fmt_a(m.fat_offset + i * m.fat_size))
  1100.  
  1101.   yield "Valid Cluster Entry Range: Cx{}..Cx{}".format(
  1102.       m.fmt_c(m.min_c), m.fmt_c(m.max_c))
  1103.   yield "Root Dir:  0x{}".format(m.fmt_a(m.root_dir_offset))
  1104.   yield "Data Area: 0x{}".format(m.fmt_a(m.da_offset))
  1105.   yield "Data end:  0x{}".format(m.fmt_a(m.end_offset))
  1106.  
  1107.  
  1108. #------------------------------------------------------------------------------
  1109.  
  1110.  
  1111. def spans_report(m, clusters, file_size, collision_cluster):
  1112.  
  1113.   def cluster_spans(clusters):
  1114.     first_c = None
  1115.     last_c = None
  1116.     cix = 0
  1117.     while cix < num_whole_clusters:
  1118.       c = clusters[cix]
  1119.       if last_c is None:
  1120.         first_c = c
  1121.       elif c != last_c + 1:
  1122.         yield (first_c, last_c)
  1123.         first_c = c
  1124.       last_c = c
  1125.       cix += 1
  1126.     if last_c is not None:
  1127.       yield (first_c, last_c)
  1128.  
  1129.   num_whole_clusters = min(len(clusters), file_size // m.cluster_size)
  1130.   rem_bytes = file_size - num_whole_clusters * m.cluster_size
  1131.  
  1132.   cr_fw = 6 + 2 * m.cfw
  1133.   file_offset = 0
  1134.  
  1135.   for first, last in cluster_spans(clusters[:num_whole_clusters]):
  1136.  
  1137.     span_size = (last + 1 - first) * m.cluster_size
  1138.     vol_offset = m.dc_vol_offset(first)
  1139.  
  1140.     fo_str = m.fmt_a(file_offset)
  1141.     cr_str = "Cx" + m.fmt_c(first)
  1142.     if first != last:
  1143.       cr_str += "..Cx" + m.fmt_c(last)
  1144.     vr_str = "{}:{}".format(
  1145.         m.fmt_a(vol_offset),
  1146.         m.fmt_a(vol_offset + span_size))
  1147.  
  1148.     yield "{} {:{}} {}".format(fo_str, cr_str, cr_fw, vr_str)
  1149.     file_offset += span_size
  1150.  
  1151.   for c in clusters[num_whole_clusters:]:
  1152.  
  1153.     span_size = rem_bytes
  1154.     vol_offset = m.dc_vol_offset(c)
  1155.  
  1156.     fo_str = m.fmt_a(file_offset)
  1157.     cr_str = "Cx{} (part)".format(m.fmt_c(c))
  1158.     vr_str = "{}:{}".format(
  1159.         m.fmt_a(vol_offset),
  1160.         m.fmt_a(vol_offset + span_size))
  1161.  
  1162.     yield "{} {:{}} {}".format(fo_str, cr_str, cr_fw, vr_str)
  1163.     file_offset += span_size
  1164.  
  1165.   fo_str = m.fmt_a(file_offset)
  1166.  
  1167.   if collision_cluster is not None:
  1168.     fault_str = " Collision at Cx{}".format(m.fmt_c(collision_cluster))
  1169.   elif len(clusters) * m.cluster_size < file_size:
  1170.     fault_str = " Truncated"
  1171.   else:
  1172.     fault_str = ""
  1173.  
  1174.   yield "{}{}".format(fo_str, fault_str)
  1175.  
  1176.  
  1177. #------------------------------------------------------------------------------
  1178.  
  1179.  
  1180. def secondary_claims_report(m, f, id_map, id_list, disp_hex=False):
  1181.  
  1182.   def fmt_o(offset):
  1183.     return ("0x{:X}" if disp_hex else "{}").format(offset)
  1184.  
  1185.   for sc in secondary_claims_for_file(m, f, id_map, id_list):
  1186.  
  1187.     fco, claimant_id, claimant_fco = sc
  1188.     claimant_f = id_list[claimant_id]
  1189.     collision_cluster = f.clusters[fco]
  1190.  
  1191.     yield (
  1192.       "Cx{} {}: Byte {} is byte {} in {}".format(
  1193.         m.fmt_c(collision_cluster),
  1194.         m.fmt_a(m.dc_vol_offset(collision_cluster)),
  1195.         fmt_o(m.cluster_size * fco),
  1196.         fmt_o(m.cluster_size * claimant_fco),
  1197.         claimant_f.pathname
  1198.       )
  1199.     )
  1200.  
  1201.  
  1202. #------------------------------------------------------------------------------
  1203.  
  1204.  
  1205. def dir_report(
  1206.         m, fdir, ffile, opts,
  1207.         id_map, id_list,
  1208.         level=0, max_level=None,
  1209.         indent_str="  "):
  1210.  
  1211.   def attrs_to_str(a):
  1212.     # 00ADVSHR
  1213.  
  1214.     def flag(attrs, bit, ch):
  1215.       return ch if attrs & (1 << bit) != 0 else "-"
  1216.     return "".join([
  1217.       flag(a, 5, "A"),
  1218.       flag(a, 3, "V"),
  1219.       flag(a, 2, "S"),
  1220.       flag(a, 1, "H"),
  1221.       flag(a, 0, "R"),
  1222.     ])
  1223.  
  1224.   def dec_fmt_fn(x):
  1225.     return str(x)
  1226.  
  1227.   def hex_fmt_fn(x):
  1228.     return "0x{:X}".format(x)
  1229.  
  1230.   err_prefix = "(!) "
  1231.  
  1232.   disp_hex = (opts & DIR_SHOW_HEX) != 0
  1233.   show_spans = (opts & DIR_SHOW_SPANS) != 0
  1234.   show_claims = (opts & DIR_SHOW_CLAIMS) != 0
  1235.   only_errors = (opts & DIR_SHOW_ONLY_ERRORS) != 0
  1236.   flat = (opts & DIR_SHOW_FLAT != 0)
  1237.   show_junk = (opts & DIR_SHOW_JUNK != 0) and not only_errors
  1238.   show_errors = (opts & DIR_SHOW_ERRORS != 0) or only_errors
  1239.  
  1240.   if flat:
  1241.     i_s = ""
  1242.   else:
  1243.     i_s = indent_str * level
  1244.  
  1245.   # Byte size and offset format function
  1246.   bso_fmt_fn = hex_fmt_fn if disp_hex else dec_fmt_fn
  1247.  
  1248.   has_own_error = len(fdir.errors) > 0
  1249.   has_child_error = (fdir.error_depth_flags & DIR_ERRORS_CHILDREN) != 0
  1250.   has_descendent_error = (fdir.error_depth_flags & DIR_ERRORS_DESCENDENTS) != 0
  1251.   has_any_error = has_own_error or has_descendent_error
  1252.   is_root = fdir.owner_id is None
  1253.   do_report_self = (
  1254.       (is_root and has_any_error)
  1255.       or (flat and has_child_error)
  1256.       or (not flat and has_descendent_error))
  1257.  
  1258.   if flat:
  1259.     # In flat, non-nested mode, a directory heading is
  1260.     # required even for the top level.
  1261.     if do_report_self or not only_errors:
  1262.  
  1263.       if is_root:
  1264.         if fdir.volume_name is not None and fdir.volume_name != "":
  1265.           pathname_str = "Volume {}".format(fdir.volume_name)
  1266.         elif m.partition_label != "":
  1267.           pathname_str = "Partition {}".format(m.partition_label)
  1268.         else:
  1269.           pathname_str = "/"
  1270.       else:
  1271.         pathname_str = fdir.pathname
  1272.       yield(pathname_str + ":")
  1273.  
  1274.   if is_root and show_errors and ffile is None:
  1275.     for e in fdir.errors:
  1276.       yield "{}{}{}".format(i_s, err_prefix, str(e))
  1277.  
  1278.   # List the subdirectories.
  1279.   for d in fdir.subdirs:
  1280.  
  1281.     if ((ffile is None or d is ffile)
  1282.         and (len(d.errors) > 0 or not only_errors)):
  1283.  
  1284.       start_c = (d.clusters[:1] + [0])[0]
  1285.  
  1286.       yield "{}{:13}{:>12}  {}  at Cx{} {:7} ID: {}".format(
  1287.         i_s,
  1288.         d.name + "/",
  1289.         "",
  1290.         attrs_to_str(d.attributes),
  1291.         m.fmt_c(start_c),
  1292.         "",
  1293.         d.alloc_id,
  1294.       )
  1295.  
  1296.       if show_spans:
  1297.         dir_file_size = max(1, len(d.clusters)) * m.cluster_size
  1298.         for line in spans_report(
  1299.             m, d.clusters, dir_file_size, d.collision_cluster):
  1300.           yield "{}{}{}".format(i_s, indent_str, line)
  1301.  
  1302.       if show_claims:
  1303.         for line in secondary_claims_report(
  1304.             m, d, id_map, id_list, disp_hex):
  1305.           yield "{}{}{}".format(i_s, indent_str, line)
  1306.  
  1307.       if show_errors:
  1308.         for e in d.errors:
  1309.           yield "{}{}{}{}".format(i_s, indent_str, err_prefix, str(e))
  1310.  
  1311.     # Only recurse through the displayed list of subdirectories
  1312.     # if nested mode is selected. (Non-nested recursion is to
  1313.     # occur at the bottom instead.)
  1314.     if ffile is None and not flat:
  1315.       if max_level is None or max_level < 0 or level < max_level:
  1316.         yield from dir_report(
  1317.           m, d, None, opts, id_map, id_list, level + 1, max_level, indent_str
  1318.         )
  1319.  
  1320.   for f in fdir.files:
  1321.  
  1322.     size_str = "({})".format(bso_fmt_fn(f.size))
  1323.  
  1324.     if len(f.clusters) > 0:
  1325.       start_c = f.clusters[0]
  1326.       start_c_str = "Cx{}".format(m.fmt_c(start_c))
  1327.       if start_c > m.max_c:
  1328.         start_c_str = start_c_str + "!"
  1329.       c_count = div_ru(f.size, m.cluster_size)
  1330.       if len(f.clusters) == c_count:
  1331.         c_count_str = "{}".format(c_count)
  1332.       else:
  1333.         c_count_str = "{}/{}!".format(len(f.clusters), c_count)
  1334.       c_start_count_str = "at {} ({})".format(start_c_str, c_count_str)
  1335.     else:
  1336.       c_start_count_str = ""
  1337.  
  1338.     if ((ffile is None or f is ffile)
  1339.         and (len(f.errors) > 0 or not only_errors)):
  1340.  
  1341.       yield "{}{:12} {:>12}  {}  {:{}} ID: {}".format(
  1342.         i_s,
  1343.         f.name,
  1344.         size_str,
  1345.         attrs_to_str(f.attributes),
  1346.         c_start_count_str, 13 + m.cfw,
  1347.         f.alloc_id,
  1348.       )
  1349.  
  1350.       if show_spans:
  1351.         for line in spans_report(
  1352.             m, f.clusters, f.size, f.collision_cluster):
  1353.           yield "{}{}{}".format(i_s, indent_str, line)
  1354.  
  1355.       if show_claims:
  1356.         for line in secondary_claims_report(
  1357.             m, f, id_map, id_list, disp_hex):
  1358.           yield "{}{}{}".format(i_s, indent_str, line)
  1359.  
  1360.       if show_errors:
  1361.         for e in f.errors:
  1362.           yield "{}{}{}{}".format(i_s, indent_str, err_prefix, str(e))
  1363.  
  1364.   if ffile is None:
  1365.     # No specific file is requested.
  1366.  
  1367.     if show_junk:
  1368.       # Display the junk entries, including VFAT file names and deleted files.
  1369.       for j in fdir.junk_entries:
  1370.         yield "{}{:12} <JUNK>".format(i_s, j)
  1371.  
  1372.     # The current directory is done.
  1373.     # Recursion in flat (non-nested) mode can thus begin here.
  1374.     if flat:
  1375.       if max_level is None or max_level < 0 or level < max_level:
  1376.         for d in fdir.subdirs:
  1377.           if ((d.error_depth_flags & DIR_ERRORS_DESCENDENTS != 0)
  1378.               or not only_errors):
  1379.             yield("")
  1380.             yield from dir_report(
  1381.                 m, d, None, opts,
  1382.                 id_map, id_list,
  1383.                 level + 1, max_level, indent_str)
  1384.  
  1385. #------------------------------------------------------------------------------
  1386.  
  1387.  
  1388. def elided_text_lines(
  1389.         addr_line_gen,
  1390.         elide_repeats=True,
  1391.         sep=""):
  1392.  
  1393.   prev_line = None
  1394.   eliding = False
  1395.   for (addr, line) in addr_line_gen():
  1396.     if line is not None:
  1397.       if (
  1398.         elide_repeats and prev_line is not None
  1399.         and line == prev_line[:len(line)]
  1400.       ):
  1401.         if not eliding:
  1402.           eliding = True
  1403.           yield "*"
  1404.       else:
  1405.         eliding = False
  1406.         prev_line = line
  1407.         yield "{}{}{}".format(addr, sep, line)
  1408.     else:
  1409.       yield addr
  1410.  
  1411.  
  1412. #------------------------------------------------------------------------------
  1413.  
  1414.  
  1415. def brief_map_report(
  1416.         m, fat,
  1417.         data_only,
  1418.         elide_repeats=True, columns=64):
  1419.  
  1420.   def al_gen():
  1421.  
  1422.     for line_start_ix in range(0, len(fat), columns):
  1423.  
  1424.       fat_line = fat[line_start_ix : line_start_ix + columns]
  1425.       line = ["?"] * len(fat_line)
  1426.  
  1427.       for i, x in enumerate(fat_line):
  1428.         c = line_start_ix + i
  1429.         ch = m.cluster_kind(c, x)
  1430.         ch = "." if data_only and ch not in "def" else ch
  1431.         line[i] = ch
  1432.  
  1433.       S = "".join(line)
  1434.  
  1435.       yield (m.fmt_c(line_start_ix), S)
  1436.  
  1437.     yield (m.fmt_c(line_start_ix), None)
  1438.  
  1439.   yield from elided_text_lines(al_gen, elide_repeats, ": ")
  1440.  
  1441.  
  1442. #------------------------------------------------------------------------------
  1443.  
  1444.  
  1445. def fancy_brief_map_report(
  1446.         m, fat, root_dir,
  1447.         data_only, sel_ids,
  1448.         id_map,
  1449.         elide_repeats=True, columns=64,
  1450.         be_fancy=True):
  1451.  
  1452.   DATA_BEGIN = 0x01
  1453.   LOOP_BEGIN = 0x02
  1454.   LOOP_END = 0x04
  1455.   COLLISION = 0x08
  1456.   ORPHAN = 0x10
  1457.  
  1458.   def al_gen():
  1459.  
  1460.     aug_map = [0] * len(fat)
  1461.  
  1462.     for c in range(2, len(fat)):
  1463.       if id_map[c] == 0:
  1464.         aug_map[c] |= ORPHAN
  1465.  
  1466.     for f in all_dirs_and_files(root_dir):
  1467.  
  1468.       for sc in f.secondary_claims:
  1469.         aug_map[f.clusters[sc[0]]] |= COLLISION
  1470.  
  1471.       if len(f.clusters) > 0:
  1472.         aug_map[f.clusters[0]] |= DATA_BEGIN
  1473.         if f.collision_cluster in f.clusters:
  1474.           aug_map[f.collision_cluster] |= LOOP_BEGIN
  1475.           aug_map[f.clusters[-1]] |= LOOP_END
  1476.  
  1477.     adj_map_start = {
  1478.       "d": "D",
  1479.       "e": "E",
  1480.       "f": "F",
  1481.     }
  1482.  
  1483.     adj_map_orphan = {
  1484.       "d": "x",
  1485.       "e": "y",
  1486.       "f": "z",
  1487.     }
  1488.  
  1489.     for line_start_ix in range(0, len(fat), columns):
  1490.  
  1491.       fat_line = fat[line_start_ix : line_start_ix + columns]
  1492.       line = ["?"] * len(fat_line)
  1493.  
  1494.       for i, x in enumerate(fat_line):
  1495.  
  1496.         aid = id_map[i]
  1497.  
  1498.         if sel_ids is None or aid in sel_ids:
  1499.  
  1500.           c = line_start_ix + i
  1501.  
  1502.           ch = m.cluster_kind(c, x)
  1503.           ch = "." if data_only and ch not in "def" else ch
  1504.  
  1505.           if be_fancy:
  1506.  
  1507.             a = aug_map[line_start_ix + i]
  1508.  
  1509.             if a & ORPHAN and ch in adj_map_orphan:
  1510.               ch = adj_map_orphan[ch]
  1511.             if a & DATA_BEGIN and ch in adj_map_start:
  1512.               ch = adj_map_start[ch]
  1513.             if a & LOOP_BEGIN:
  1514.               ch = "[" if a & LOOP_END == 0 else "@"
  1515.             elif a & LOOP_END:
  1516.               ch = "]"
  1517.             if a & COLLISION:
  1518.               ch = "*"
  1519.  
  1520.         else:
  1521.  
  1522.           ch = "."
  1523.  
  1524.         line[i] = ch
  1525.  
  1526.       S = "".join(line)
  1527.  
  1528.       yield (m.fmt_c(line_start_ix), S)
  1529.  
  1530.     yield (m.fmt_c(line_start_ix), None)
  1531.  
  1532.   yield from elided_text_lines(al_gen, elide_repeats, ": ")
  1533.  
  1534.  
  1535. #------------------------------------------------------------------------------
  1536.  
  1537.  
  1538. def cluster_report(m, fat, data_only, elide_repeats=True, columns=8):
  1539.  
  1540.   def al_gen():
  1541.     masked_str = "." * m.cfw
  1542.     for line_start_ix in range(0, len(fat), columns):
  1543.       L = []
  1544.       for i, x in enumerate(fat[line_start_ix : line_start_ix + columns]):
  1545.         c = line_start_ix + i
  1546.         if data_only and m.cluster_kind(c, x) not in "def":
  1547.           L.append(masked_str)
  1548.         else:
  1549.           L.append(m.fmt_c(x))
  1550.       S = " ".join(L)
  1551.       yield (m.fmt_c(line_start_ix), S)
  1552.     yield (m.fmt_c(line_start_ix), None)
  1553.  
  1554.   yield from elided_text_lines(al_gen, elide_repeats, ": ")
  1555.  
  1556.  
  1557. #------------------------------------------------------------------------------
  1558.  
  1559.  
  1560. def selective_cluster_report(
  1561.         m, fat,
  1562.         data_only, sel_ids,
  1563.         id_map,
  1564.         elide_repeats=True, columns=8):
  1565.  
  1566.   def al_gen():
  1567.     masked_str = "." * m.cfw
  1568.     for line_start_ix in range(0, len(fat), columns):
  1569.       L  = []
  1570.       for i, x in enumerate(fat[line_start_ix : line_start_ix + columns]):
  1571.         c = line_start_ix + i
  1572.         if sel_ids is not None and id_map[c] not in sel_ids:
  1573.           S = masked_str
  1574.         else:
  1575.           if data_only and m.cluster_kind(c, x) not in "def":
  1576.             S = masked_str
  1577.           else:
  1578.             S = m.fmt_c(x)
  1579.         L.append(S)
  1580.       S = " ".join(L)
  1581.       yield (m.fmt_c(line_start_ix), S)
  1582.     yield (m.fmt_c(line_start_ix), None)
  1583.  
  1584.   yield from elided_text_lines(al_gen, elide_repeats, ": ")
  1585.  
  1586.  
  1587. #------------------------------------------------------------------------------
  1588.  
  1589.  
  1590. def fat_usage_report(m, usage_stats):
  1591.  
  1592.   total = usage_stats["Total"]
  1593.  
  1594.   def output_line(key_name, count):
  1595.     f = float(count) / total
  1596.     return "{:9} {:5} ({:3.1f}%)".format(key_name + ":", count, 100.0 * f)
  1597.  
  1598.   yield "FAT allocation in clusters of {} bytes:".format(m.cluster_size)
  1599.   yield "{:9} {:5}".format("Total:", total)
  1600.  
  1601.   for key in ("Used", "Free", "Orphaned", "Bad"):
  1602.     yield output_line(key, usage_stats[key])
  1603.  
  1604.   for (key, key_output) in (
  1605.       ("Reserved", "Reserved"),
  1606.       ("InvalidCE", "Cluster entries with invalid references"),
  1607.   ):
  1608.     if usage_stats[key] > 0:
  1609.       yield output_line(key_ouput, usage_stats[key])
  1610.  
  1611.  
  1612. #------------------------------------------------------------------------------
  1613.  
  1614.  
  1615. def index_report(m, root_dir, id_list):
  1616.  
  1617.   fmt_w = len(str(root_dir.last_alloc_id))
  1618.  
  1619.   for aid in range(1, len(id_list)):
  1620.  
  1621.     f = id_list[aid]
  1622.  
  1623.     if len(f.clusters) == 0:
  1624.       cs_str = " " * (m.cfw + 2)
  1625.     else:
  1626.       cs_str = "Cx" + m.fmt_c(f.clusters[0])
  1627.  
  1628.     yield "{:>{}} {} {}".format(aid, fmt_w, cs_str, f.pathname)
  1629.  
  1630.  
  1631. #------------------------------------------------------------------------------
  1632.  
  1633.  
  1634. def addr_report(m, addr, fat, id_map, id_list, disp_hex=False):
  1635.  
  1636.   c, f, offset, desc = analyse_addr_in_volume(
  1637.     m, addr, fat, id_map, id_list
  1638.   )
  1639.  
  1640.   def fmt_o(offset):
  1641.     return ("0x{:X}" if disp_hex else "{}").format(offset)
  1642.  
  1643.   astr = "0x{}".format(m.fmt_a(addr))
  1644.  
  1645.   if c is not None:
  1646.     c_addr = m.da_offset + m.cluster_size * (c - m.min_c)
  1647.     relstr = "at" if c_addr == addr else "in"
  1648.     cstr = "({} Cx{})".format(relstr, m.fmt_c(c))
  1649.   else:
  1650.     cstr = ""
  1651.  
  1652.   if offset is not None:
  1653.  
  1654.     ostr = ", byte {}".format(fmt_o(offset))
  1655.  
  1656.   else:
  1657.  
  1658.     ostr = ""
  1659.  
  1660.   if c is None:
  1661.  
  1662.     yield "{}: {}{}".format(astr, desc, ostr)
  1663.  
  1664.   else:
  1665.  
  1666.     if f is None:
  1667.  
  1668.       yield "{} {} {}{}".format(astr, cstr, desc, ostr)
  1669.  
  1670.     else:
  1671.  
  1672.       if offset >= f.size:
  1673.         ostr += (" (overshot)")
  1674.  
  1675.       yield "{} {} {}{}".format(astr, cstr, desc, ostr)
  1676.  
  1677.       for sc in secondary_claims_for_file(m, f, id_map, id_list):
  1678.  
  1679.         fco, rid, rfco = sc
  1680.  
  1681.         collision_cluster = f.clusters[fco]
  1682.  
  1683.         if collision_cluster == c:
  1684.  
  1685.           rf = id_list[rid]
  1686.           x = offset - fco * m.cluster_size
  1687.           rfo = rfco * m.cluster_size + x
  1688.           rfostr = ", byte {}".format(fmt_o(rfo))
  1689.  
  1690.           if rfo >= rf.size:
  1691.             rfostr += (" (overshot)")
  1692.  
  1693.           yield "(!) Same address as {}{}".format(rf.pathname, rfostr)
  1694.  
  1695.  
  1696. #------------------------------------------------------------------------------
  1697. # Main
  1698. #------------------------------------------------------------------------------
  1699.  
  1700.  
  1701. def main():
  1702.  
  1703.   def printlines(seq):
  1704.     for line in seq:
  1705.       print(line)
  1706.  
  1707.   def nice_num_columns(console_width, label_w, column_w):
  1708.     var_w = console_width - label_w
  1709.     max_cols = var_w // column_w
  1710.     group_size = 1
  1711.     while 2 * group_size <= max_cols and 2 * group_size <= 16:
  1712.       group_size *= 2
  1713.     num_groups = max(1, max_cols // group_size)
  1714.     return num_groups * group_size
  1715.  
  1716.   def ca_addr_strs_from_csv(csv_addrs):
  1717.     result = []
  1718.     addr_strs = csv_addrs.split(",")
  1719.     for addr_str in addr_strs:
  1720.       ass = addr_str.strip().upper()
  1721.       prefix = ""
  1722.       if ass[:2] == "CX" and ass[2 : 3] != " ":
  1723.         ass = "0x" + ass[2:]
  1724.         variant = "C"
  1725.         int_base = 16
  1726.         min_a = 2
  1727.       elif ass[:1] == "C" and ass[1 : 2] != " ":
  1728.         ass = ass[1:]
  1729.         variant = "C"
  1730.         int_base = 10
  1731.         min_a = 2
  1732.       else:
  1733.         variant = "A"
  1734.         int_base = None
  1735.         min_a = 0
  1736.       try:
  1737.         if int_base is not None:
  1738.           a = int(ass, int_base)
  1739.         else:
  1740.           if ass[:2] in ["0x", "0X"]:
  1741.             a = int(ass, 16)
  1742.           elif ass[:2] in ["0o", "0o"]:
  1743.             a = int(ass, 8)
  1744.           elif ass[:2] in ["0b", "0b"]:
  1745.             a = int(ass, 2)
  1746.           else:
  1747.             a = int(ass)
  1748.         if a < min_a:
  1749.           if variant == "C":
  1750.             raise argparse.ArgumentTypeError(
  1751.                 'A cluster index cannot be less than {}.'.format(min_a))
  1752.           else:
  1753.             raise argparse.ArgumentTypeError(
  1754.                 'An address cannot be negative.')
  1755.         result.append(variant + str(a))
  1756.       except ValueError as E:
  1757.         raise argparse.ArgumentTypeError(
  1758.             '"{}" is not a valid address.'.format(addr_str))
  1759.     return result
  1760.  
  1761.   def get_arguments():
  1762.  
  1763.     cmd = os.path.basename(sys.argv[0])
  1764.  
  1765.     parser = argparse.ArgumentParser(
  1766.       prog=cmd,
  1767.       add_help=False,
  1768.       description="Examines a FAT12 or FAT16 partition."
  1769.     )
  1770.  
  1771.     parser.add_argument(
  1772.         "-a", "--addrs", metavar="ADDRS",
  1773.         dest="vol_addresses", type=ca_addr_strs_from_csv, action="store",
  1774.         help=("Identify objects at indicated volume offsets."))
  1775.     parser.add_argument(
  1776.         "-b", "--brief-fat",
  1777.         dest="show_brief_fat", action="store_true",
  1778.         help="Display a summarised FAT cluster map.")
  1779.     parser.add_argument(
  1780.         "-c", "--claims",
  1781.         dest="show_claims", action="store_true",
  1782.         help="Show secondary claims on each file.")
  1783.     parser.add_argument(
  1784.         "-B", "--brief-fancy",
  1785.         dest="show_fancy_brief_fat", action="store_true",
  1786.         help="Display starts and loops in FAT map.")
  1787.     parser.add_argument(
  1788.         "-d", "--directory",
  1789.         dest="dir_as_file", action="store_true",
  1790.         help="Select a directory, not its contents.")
  1791.     parser.add_argument(
  1792.         "-e", "--errors",
  1793.         dest="show_errors", action="store_true",
  1794.         help="Show errors.")
  1795.     parser.add_argument(
  1796.         "-E", "--only-errors",
  1797.         dest="only_errors", action="store_true",
  1798.         help="Show only errors.")
  1799.     parser.add_argument(
  1800.         "-f", "--fat",
  1801.         dest="show_fat", action="store_true",
  1802.         help="Display the FAT cluster entries.")
  1803.     parser.add_argument(
  1804.         "-h", "--help",
  1805.         dest="help", action="store_true",
  1806.         help="Display this message and exit.")
  1807.     parser.add_argument(
  1808.         "-i", "--index",
  1809.         dest="show_index", action="store_true",
  1810.         help="List IDs generated in traversal.")
  1811.     parser.add_argument(
  1812.         "-I", "--id", metavar="ID",
  1813.         dest="alloc_id", type=int, default=None, action="store",
  1814.         help="Select a file or directory by its ID.")
  1815.     parser.add_argument(
  1816.         "-j", "--junk",
  1817.         dest="show_junk", action="store_true",
  1818.         help="Show junk entries in directories.")
  1819.     parser.add_argument(
  1820.         "-l", "--list",
  1821.         dest="show_list", action="store_true",
  1822.         help="List items (recursively with -r or -R).")
  1823.     parser.add_argument(
  1824.         "-m", "--metrics",
  1825.         dest="show_metrics", action="store_true",
  1826.         help="Display metrics and volume information.")
  1827.     parser.add_argument(
  1828.         "-p", "--path", metavar="PATH",
  1829.         dest="pathname", type=str, action="store",
  1830.         help=("Select path for list. (See -l)"))
  1831.     parser.add_argument(
  1832.         "-R", "--recursive",
  1833.         dest="recursive", action="store_true",
  1834.         help="Recurse through subdirectories.")
  1835.     parser.add_argument(
  1836.         "-r", "--nested",
  1837.         dest="nested", action="store_true",
  1838.         help="Recurse with nested indents.")
  1839.     parser.add_argument(
  1840.         "-s", "--spans",
  1841.         dest="show_spans", action="store_true",
  1842.         help="Show cluster spans.")
  1843.     parser.add_argument(
  1844.         "-t", "--table", metavar="N",
  1845.         dest="fat_index", type=int, default=0, action="store",
  1846.         help="Select the File Allocation Table. (Default = 0)")
  1847.     parser.add_argument(
  1848.         "-u", "--vol-usage",
  1849.         dest="show_vol_usage", action="store_true",
  1850.         help="Show counts of used and free clusters.")
  1851.     parser.add_argument(
  1852.         "-v", "--verbose",
  1853.         dest="verbose", action="store_true",
  1854.         help="Disable elision of repeats.")
  1855.     parser.add_argument(
  1856.         "-V", "--version",
  1857.         dest="version", action="store_true",
  1858.         help="Display version and exit.")
  1859.     parser.add_argument(
  1860.         "-w", "--width", metavar="W",
  1861.         dest="display_width", type=int, default=80, action="store",
  1862.         help="Set the output width in characters.")
  1863.     parser.add_argument(
  1864.         "-x", "--hex",
  1865.         dest="display_hex", action="store_true",
  1866.         help="Display byte sizes and offsets in hexadecimal.")
  1867.     parser.add_argument(
  1868.         "-z", "--orphans",
  1869.         dest="orphans", action="store_true",
  1870.         help="Select orphans in cluster maps.")
  1871.  
  1872.     parser.add_argument(
  1873.         "fat_image_filename", metavar="VOL-IMAGE",
  1874.         type=str,
  1875.         help=("Indicate the FAT12/16 volume image to read."))
  1876.  
  1877.     if "-h" in sys.argv or "--help" in sys.argv:
  1878.       parser.print_help()
  1879.       print(
  1880.         "\nExamples:\n"
  1881.         + "  " + cmd + " -lre doom.fat12\n"
  1882.         + "  " + cmd + " -bu doom.fat12\n"
  1883.         + "  " + cmd + " -lrjsec doom.fat12\n"
  1884.       )
  1885.       sys.exit(0)
  1886.  
  1887.     if "-V" in sys.argv or "--version" in sys.argv:
  1888.       print(VERSION)
  1889.       sys.exit(0)
  1890.  
  1891.     args = parser.parse_args()
  1892.  
  1893.     return args
  1894.  
  1895.   #----------------------------------------------------------------------------
  1896.  
  1897.   result = 0
  1898.   err_msg = ''
  1899.  
  1900.   cmd = os.path.basename(sys.argv[0])
  1901.  
  1902.   try:
  1903.  
  1904.     args = get_arguments()
  1905.     img_file = open(args.fat_image_filename, "rb")
  1906.  
  1907.     try:
  1908.  
  1909.       if args.pathname is not None and args.alloc_id is not None:
  1910.         raise ArgError("Cannot select by pathname and ID at the same time.")
  1911.  
  1912.       part_offset = 0
  1913.       ca_addr_strs = []
  1914.  
  1915.       if args.vol_addresses is not None:
  1916.         ca_addr_strs = args.vol_addresses
  1917.  
  1918.       m = None
  1919.       fat = None
  1920.       root_dir = None
  1921.       id_map = None
  1922.       id_list = None
  1923.       selected_dir = None
  1924.       selected_file = None
  1925.       given_id = None
  1926.       sel_ids = None
  1927.       data_clusters_only = False
  1928.  
  1929.       # Oddly, the nested style is more natural in the case of a
  1930.       # non-recursive directory listing. This is because in the flat
  1931.       # kind of listing, the starting directory requires header text
  1932.       # in the same form as the subdirectories.
  1933.       recursive = (args.nested or args.recursive) and not args.dir_as_file
  1934.       nested = args.nested or not recursive
  1935.  
  1936.       # Decide which of the above resources are needed.
  1937.       do_walk = (
  1938.         args.show_list
  1939.         or args.show_index
  1940.         or args.show_vol_usage
  1941.         or len(ca_addr_strs) > 0
  1942.         or args.show_fancy_brief_fat
  1943.         or args.pathname is not None
  1944.         or args.alloc_id is not None
  1945.         or args.orphans
  1946.       )
  1947.       do_get_fat = do_walk or args.show_fat or args.show_brief_fat
  1948.       do_get_metrics = do_get_fat or args.show_metrics
  1949.  
  1950.       # Fetch the needed resources.
  1951.  
  1952.       if do_get_metrics:
  1953.  
  1954.         img_file.seek(part_offset)
  1955.         m = FATVolumeMetrics(img_file.read(512))
  1956.  
  1957.       if do_get_fat:
  1958.  
  1959.         if not (0 <= args.fat_index < m.num_fats):
  1960.           raise ArgError("FAT index is out of range.")
  1961.  
  1962.         img_file.seek(
  1963.             part_offset + m.fat_offset + args.fat_index * m.fat_size)
  1964.         fat = get_fat(m, img_file.read(m.fat_size))
  1965.  
  1966.       if do_walk:
  1967.  
  1968.         root_dir, id_map, id_list = walk_fat(
  1969.           m, fat, img_file, part_offset
  1970.         )
  1971.  
  1972.         if args.pathname is not None:
  1973.  
  1974.           # Normalise the path so that an empty string means
  1975.           # "Select the contents of the root directory".
  1976.           S = args.pathname if args.pathname != "" else "/"
  1977.  
  1978.           # Normalise the path further by removing any slash
  1979.           # at the beginning that is not also at the end.
  1980.           if len(S) >= 2 and S[0] == "/" and S[1] != "/":
  1981.             S = S[1:]
  1982.  
  1983.           # Because the root directory entry has an empty name string,
  1984.           # the search function will need an un-normalised root path.
  1985.           #
  1986.           # Given that directory-not-its-contents option is available,
  1987.           # There is no need to be clever with interpreting a trailing
  1988.           # slash on the path.
  1989.           f = root_dir.find("" if S == "/" else S)
  1990.  
  1991.           if f is not None:
  1992.             given_id = f.alloc_id
  1993.           else:
  1994.             raise ArgError('Cannot find "{}".'.format(args.pathname))
  1995.  
  1996.         if args.alloc_id is not None:
  1997.  
  1998.           if not 0 <= args.alloc_id < len(id_list):
  1999.             raise ArgError("ID {} not found.".format(args.alloc_id))
  2000.  
  2001.           given_id = args.alloc_id
  2002.  
  2003.         if given_id is not None:
  2004.  
  2005.           f = id_list[given_id]
  2006.  
  2007.           if f is root_dir and args.dir_as_file:
  2008.             raise ArgError("Cannot select the root directory as a file.")
  2009.  
  2010.           if args.dir_as_file or not f.is_dir:
  2011.             # A single object is being addressed.
  2012.  
  2013.             sel_ids = [f.alloc_id]
  2014.             selected_file = f
  2015.             selected_dir = id_list[selected_file.owner_id]
  2016.  
  2017.           else:
  2018.             # A directory's contents is being addressed.
  2019.  
  2020.             selected_file = None
  2021.             selected_dir = f
  2022.  
  2023.             sel_ids = []
  2024.  
  2025.             if recursive:
  2026.               for f in all_dirs_and_files(selected_dir):
  2027.                 if f is not selected_dir:
  2028.                   sel_ids.append(f.alloc_id)
  2029.             else:
  2030.               for f in selected_dir.subdirs:
  2031.                 sel_ids.append(f.alloc_id)
  2032.               for f in selected_dir.files:
  2033.                 sel_ids.append(f.alloc_id)
  2034.  
  2035.           data_clusters_only = True
  2036.  
  2037.         if args.orphans:
  2038.           if sel_ids is None:
  2039.             sel_ids = [0]
  2040.           elif 0 not in sel_ids:
  2041.             sel_ids.append(0)
  2042.           data_clusters_only = True
  2043.  
  2044.       # Produce the required output, hopefully with the resources loaded.
  2045.  
  2046.       if args.show_metrics:
  2047.  
  2048.         printlines(metrics_report(m))
  2049.  
  2050.       if args.show_fat:
  2051.  
  2052.         w = nice_num_columns(args.display_width, m.cfw + 1, 1 + m.cfw)
  2053.  
  2054.         if sel_ids is None:
  2055.           printlines(cluster_report(
  2056.               m, fat,
  2057.               data_clusters_only,
  2058.               not args.verbose, w))
  2059.         else:
  2060.           printlines(selective_cluster_report(
  2061.               m, fat,
  2062.               data_clusters_only, sel_ids,
  2063.               id_map,
  2064.               not args.verbose, w))
  2065.  
  2066.       if args.show_brief_fat or args.show_fancy_brief_fat:
  2067.  
  2068.         w = nice_num_columns(args.display_width, m.cfw + 2, 1)
  2069.  
  2070.         if args.show_fancy_brief_fat or sel_ids is not None:
  2071.  
  2072.           printlines(fancy_brief_map_report(
  2073.               m, fat, root_dir,
  2074.               data_clusters_only, sel_ids,
  2075.               id_map, not args.verbose, w,
  2076.               args.show_fancy_brief_fat))
  2077.  
  2078.         else:
  2079.  
  2080.           printlines(brief_map_report(
  2081.               m, fat,
  2082.               data_clusters_only,
  2083.               not args.verbose, w))
  2084.  
  2085.       if args.show_list:
  2086.  
  2087.         d = selected_dir if selected_dir is not None else root_dir
  2088.  
  2089.         opts = 0x00
  2090.         opts |= DIR_SHOW_FLAT if recursive and not nested else 0
  2091.         opts |= DIR_SHOW_HEX if args.display_hex else 0
  2092.         opts |= DIR_SHOW_SPANS if args.show_spans else 0
  2093.         opts |= DIR_SHOW_JUNK if args.show_junk else 0
  2094.         opts |= DIR_SHOW_CLAIMS if args.show_claims else 0
  2095.         opts |= DIR_SHOW_ERRORS if args.show_errors else 0
  2096.         opts |= DIR_SHOW_ONLY_ERRORS if args.only_errors else 0
  2097.  
  2098.         printlines(dir_report(
  2099.             m, d, selected_file, opts,
  2100.             id_map, id_list,
  2101.             0, None if recursive else 0,
  2102.             "  "))
  2103.  
  2104.       if args.show_index:
  2105.  
  2106.         printlines(index_report(m, root_dir, id_list))
  2107.  
  2108.       if args.show_vol_usage:
  2109.  
  2110.         usage_stats = get_fat_usage(m, fat, id_map)
  2111.         printlines(fat_usage_report(m, usage_stats))
  2112.  
  2113.       if not args.show_list:
  2114.  
  2115.         if selected_file is not None:
  2116.           f = selected_file
  2117.         else:
  2118.           f = selected_dir
  2119.  
  2120.         if f is not None:
  2121.  
  2122.           err_prefix = ""
  2123.  
  2124.           if args.show_claims:
  2125.             printlines(secondary_claims_report(
  2126.                 m, f, id_map, id_list, args.display_hex))
  2127.  
  2128.           if args.show_errors or args.only_errors:
  2129.             for e in f.errors:
  2130.               print("{}{}".format(err_prefix, str(e)))
  2131.  
  2132.           if args.show_spans:
  2133.             printlines(spans_report(
  2134.                 m, f.clusters, f.size, f.collision_cluster))
  2135.  
  2136.       if len(ca_addr_strs) > 0:
  2137.  
  2138.         for ca_addr_str in ca_addr_strs:
  2139.  
  2140.           if ca_addr_str[0] == "C":
  2141.             c = int(ca_addr_str[1:])
  2142.             addr = m.da_offset + m.cluster_size * (c - m.min_c)
  2143.           else:
  2144.             addr = int(ca_addr_str[1:])
  2145.  
  2146.           printlines(addr_report(
  2147.               m, addr, fat, id_map, id_list, args.display_hex))
  2148.  
  2149.     finally:
  2150.  
  2151.       img_file.close()
  2152.  
  2153.   except ArgError as E:
  2154.  
  2155.     err_msg = 'Error: ' + str(E)
  2156.     result = 2
  2157.  
  2158.   except FileError as E:
  2159.  
  2160.     err_msg = str(E)
  2161.     result = 3
  2162.  
  2163.   except CmdError as E:
  2164.  
  2165.     err_msg = str(E)
  2166.     result = 4
  2167.  
  2168.   except DataError as E:
  2169.  
  2170.     err_msg = str(E)
  2171.     result = 5
  2172.  
  2173.   except Exception as E:
  2174.  
  2175.     exc_type, exc_value, exc_traceback = sys.exc_info()
  2176.     err_lines = traceback.format_exc().splitlines()
  2177.     err_msg = 'Unhandled exception:\n' + '\n'.join(err_lines)
  2178.     result = 1
  2179.  
  2180.   if err_msg != '':
  2181.     print(cmd + ': ' + err_msg, file=sys.stderr)
  2182.  
  2183.   return result
  2184.  
  2185.  
  2186. #------------------------------------------------------------------------------
  2187. # Command line trigger
  2188. #------------------------------------------------------------------------------
  2189.  
  2190.  
  2191. if __name__ == '__main__':
  2192.   sys.exit(main())
  2193.  
  2194.  
  2195. #------------------------------------------------------------------------------
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement