s243a

ArchiveManager.java (freenet.keys)

Oct 22nd, 2014
342
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Java 29.23 KB | None | 0 0
  1. // s243a pearltree node: http://www.pearltrees.com/s243a/archivemanager-freenet-client/id12827706
  2. /* This code is part of Freenet. It is distributed under the GNU General
  3.  * Public License, version 2 (or at your option any later version). See
  4.  * http://www.gnu.org/ for further details of the GPL. */
  5. package freenet.client;
  6.  
  7. import java.io.IOException;
  8. import java.io.InputStream;
  9. import java.io.OutputStream;
  10. import java.io.PipedInputStream;
  11. import java.io.PipedOutputStream;
  12. import java.util.Arrays;
  13. import java.util.HashMap;
  14. import java.util.HashSet;
  15. import java.util.Set;
  16. import java.util.zip.GZIPInputStream;
  17. import java.util.zip.ZipEntry;
  18. import java.util.zip.ZipInputStream;
  19.  
  20. import net.contrapunctus.lzma.LzmaInputStream;
  21.  
  22. import org.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream;
  23. import org.apache.commons.compress.archivers.ArchiveEntry;
  24. import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
  25.  
  26. import com.db4o.ObjectContainer;
  27.  
  28. import freenet.client.async.ClientContext;
  29. import freenet.keys.FreenetURI;
  30. import freenet.support.ExceptionWrapper;
  31. import freenet.support.LRUMap;
  32. import freenet.support.Logger;
  33. import freenet.support.MutableBoolean;
  34. import freenet.support.Logger.LogLevel;
  35. import freenet.support.api.Bucket;
  36. import freenet.support.api.BucketFactory;
  37. import freenet.support.compress.CompressionOutputSizeException;
  38. import freenet.support.compress.Compressor;
  39. import freenet.support.compress.Compressor.COMPRESSOR_TYPE;
  40. import freenet.support.io.BucketTools;
  41. import freenet.support.io.Closer;
  42.  
  43. /**
  44.  * Cache of recently decoded archives:
  45.  * - Keep up to N ArchiveHandler's in RAM (this can be large; we don't keep the
  46.  * files open due to the limitations of the java.util.zip API)
  47.  * - Keep up to Y bytes (after padding and overheads) of decoded data on disk
  48.  * (the OS is quite capable of determining what to keep in actual RAM)
  49.  *
  50.  * Always take the lock on ArchiveStoreContext before the lock on ArchiveManager, NOT the other way around.
  51.  */
  52. public class ArchiveManager {
  53.  
  54.     public static final String METADATA_NAME = ".metadata";
  55.     private static boolean logMINOR;
  56.  
  57.     public enum ARCHIVE_TYPE {
  58.         // WARNING: THIS CLASS IS STORED IN DB4O -- THINK TWICE BEFORE ADD/REMOVE/RENAME FIELDS
  59.         ZIP((short)0, new String[] { "application/zip", "application/x-zip" }),     /* eventually get rid of ZIP support at some point */
  60.         TAR((short)1, new String[] { "application/x-tar" });
  61.  
  62.         public final short metadataID;
  63.         public final String[] mimeTypes;
  64.  
  65.         /** cached values(). Never modify or pass this array to outside code! */
  66.         private static final ARCHIVE_TYPE[] values = values();
  67.  
  68.         private ARCHIVE_TYPE(short metadataID, String[] mimeTypes) {
  69.             this.metadataID = metadataID;
  70.             this.mimeTypes = mimeTypes;
  71.         }
  72.  
  73.         public static boolean isValidMetadataID(short id) {
  74.             for(ARCHIVE_TYPE current : values)
  75.                 if(id == current.metadataID)
  76.                     return true;
  77.             return false;
  78.         }
  79.  
  80.         /**
  81.          * Is the given MIME type an archive type that we can deal with?
  82.          */
  83.         public static boolean isUsableArchiveType(String type) {
  84.             for(ARCHIVE_TYPE current : values)
  85.                 for(String ctype : current.mimeTypes)
  86.                     if(ctype.equalsIgnoreCase(type))
  87.                         return true;
  88.             return false;
  89.         }
  90.  
  91.         /** If the given MIME type is an archive type that we can deal with,
  92.          * get its archive type number (see the ARCHIVE_ constants in Metadata).
  93.          */
  94.         public static ARCHIVE_TYPE getArchiveType(String type) {
  95.             for(ARCHIVE_TYPE current : values)
  96.                 for(String ctype : current.mimeTypes)
  97.                     if(ctype.equalsIgnoreCase(type))
  98.                         return current;
  99.             return null;
  100.         }
  101.  
  102.         public static ARCHIVE_TYPE getArchiveType(short type) {
  103.             for(ARCHIVE_TYPE current : values)
  104.                 if(current.metadataID == type)
  105.                     return current;
  106.             return null;
  107.         }
  108.  
  109.         public static ARCHIVE_TYPE getDefault() {
  110.             return TAR;
  111.         }
  112.     }
  113.  
  114.     final long maxArchivedFileSize;
  115.  
  116.     // ArchiveHandler's
  117.     final int maxArchiveHandlers;
  118.     private final LRUMap<FreenetURI, ArchiveStoreContext> archiveHandlers;
  119.  
  120.     // Data cache
  121.     /** Maximum number of cached ArchiveStoreItems */
  122.     final int maxCachedElements;
  123.     /** Maximum cached data in bytes */
  124.     final long maxCachedData;
  125.     /** Currently cached data in bytes */
  126.     private long cachedData;
  127.     /** Map from ArchiveKey to ArchiveStoreElement */
  128.     private final LRUMap<ArchiveKey, ArchiveStoreItem> storedData;
  129.     /** Bucket Factory */
  130.     private final BucketFactory tempBucketFactory;
  131.  
  132.     /**
  133.      * Create an ArchiveManager.
  134.      * @param maxHandlers The maximum number of cached ArchiveHandler's i.e. the
  135.      * maximum number of containers to track.
  136.      * @param maxCachedData The maximum size of the cache directory, in bytes.
  137.      * @param maxArchiveSize The maximum size of an archive.
  138.      * @param maxArchivedFileSize The maximum extracted size of a single file in any
  139.      * archive.
  140.      * @param maxCachedElements The maximum number of cached elements (an element is a
  141.      * file extracted from an archive. It is stored, encrypted and padded, in a single
  142.      * file.
  143.      * @param cacheDir The directory in which to store cached data.
  144.      * @param random A cryptographicaly secure random source
  145.      * @param weakRandom A weak and cheap random source
  146.      */
  147.     public ArchiveManager(int maxHandlers, long maxCachedData, long maxArchivedFileSize, int maxCachedElements, BucketFactory tempBucketFactory) {
  148.         maxArchiveHandlers = maxHandlers;
  149.         // FIXME PERFORMANCE I'm assuming there isn't much locality here, so it's faster to use the FAST_COMPARATOR.
  150.         // This may not be true if there are a lot of sites with many containers all inserted as individual SSKs?
  151.         archiveHandlers = LRUMap.createSafeMap(FreenetURI.FAST_COMPARATOR);
  152.         this.maxCachedElements = maxCachedElements;
  153.         this.maxCachedData = maxCachedData;
  154.         storedData = new LRUMap<ArchiveKey, ArchiveStoreItem>();
  155.         this.maxArchivedFileSize = maxArchivedFileSize;
  156.         this.tempBucketFactory = tempBucketFactory;
  157.         logMINOR = Logger.shouldLog(LogLevel.MINOR, this);
  158.     }
  159.  
  160.     /** Add an ArchiveHandler by key */
  161.     private synchronized void putCached(FreenetURI key, ArchiveStoreContext zip) {
  162.         if(logMINOR) Logger.minor(this, "Put cached AH for "+key+" : "+zip);
  163.         archiveHandlers.push(key, zip);
  164.         while(archiveHandlers.size() > maxArchiveHandlers)
  165.             archiveHandlers.popKey(); // dump it
  166.     }
  167.  
  168.     /** Get an ArchiveHandler by key */
  169.     ArchiveStoreContext getCached(FreenetURI key) {
  170.         if(logMINOR) Logger.minor(this, "Get cached AH for "+key);
  171.         ArchiveStoreContext handler = archiveHandlers.get(key);
  172.         if(handler == null) return null;
  173.         archiveHandlers.push(key, handler);
  174.         return handler;
  175.     }
  176.  
  177.     /**
  178.      * Create an archive handler. This does not need to know how to
  179.      * fetch the key, because the methods called later will ask.
  180.      * It will try to serve from cache, but if that fails, will
  181.      * re-fetch.
  182.      * @param key The key of the archive that we are extracting data from.
  183.      * @param archiveType The archive type, defined in Metadata.
  184.      * @return An archive handler.
  185.      */
  186.     synchronized ArchiveStoreContext makeContext(FreenetURI key, ARCHIVE_TYPE archiveType, COMPRESSOR_TYPE ctype, boolean returnNullIfNotFound) {
  187.         ArchiveStoreContext handler = null;
  188.         handler = getCached(key);
  189.         if(handler != null) return handler;
  190.         if(returnNullIfNotFound) return null;
  191.         handler = new ArchiveStoreContext(key, archiveType);
  192.         putCached(key, handler);
  193.         return handler;
  194.     }
  195.  
  196.     /**
  197.      * Create an archive handler. This does not need to know how to
  198.      * fetch the key, because the methods called later will ask.
  199.      * It will try to serve from cache, but if that fails, will
  200.      * re-fetch.
  201.      * @param key The key of the archive that we are extracting data from.
  202.      * @param archiveType The archive type, defined in Metadata.
  203.      * @return An archive handler.
  204.      */
  205.     public ArchiveHandler makeHandler(FreenetURI key, ARCHIVE_TYPE archiveType, COMPRESSOR_TYPE ctype, boolean forceRefetch, boolean persistent) {
  206.         return new ArchiveHandlerImpl(persistent ? key.clone() : key, archiveType, ctype, forceRefetch);
  207.     }
  208.  
  209.     /**
  210.      * Get a cached, previously extracted, file from an archive.
  211.      * @param key The key used to fetch the archive.
  212.      * @param filename The name of the file within the archive.
  213.      * @return A Bucket containing the data requested, or null.
  214.      * @throws ArchiveFailureException
  215.      */
  216.     public Bucket getCached(FreenetURI key, String filename) throws ArchiveFailureException {
  217.         if(logMINOR) Logger.minor(this, "Fetch cached: "+key+ ' ' +filename);
  218.         ArchiveKey k = new ArchiveKey(key, filename);
  219.         ArchiveStoreItem asi = null;
  220.         synchronized (this) {
  221.             asi = storedData.get(k);
  222.             if(asi == null) return null;
  223.             // Promote to top of LRU
  224.             storedData.push(k, asi);
  225.         }
  226.         if(logMINOR) Logger.minor(this, "Found data");
  227.         return asi.getReaderBucket();
  228.     }
  229.  
  230.     /**
  231.      * Remove a file from the cache. Called after it has been removed from its
  232.      * ArchiveHandler.
  233.      * @param item The ArchiveStoreItem to remove.
  234.      */
  235.     synchronized void removeCachedItem(ArchiveStoreItem item) {
  236.         long size = item.spaceUsed();
  237.         storedData.removeKey(item.key);
  238.         // Hard disk space limit = remove it here.
  239.         // Soft disk space limit would be to remove it outside the lock.
  240.         // Soft disk space limit = we go over the limit significantly when we
  241.         // are overloaded.
  242.         cachedData -= size;
  243.         if(logMINOR) Logger.minor(this, "removeCachedItem: "+item);
  244.         item.close();
  245.     }
  246.  
  247.     /**
  248.      * Extract data to cache. Call synchronized on ctx.
  249.      * @param key The key the data was fetched from.
  250.      * @param archiveType The archive type. Must be Metadata.ARCHIVE_ZIP | Metadata.ARCHIVE_TAR.
  251.      * @param data The actual data fetched.
  252.      * @param archiveContext The context for the whole fetch process.
  253.      * @param ctx The ArchiveStoreContext for this key.
  254.      * @param element A particular element that the caller is especially interested in, or null.
  255.      * @param callback A callback to be called if we find that element, or if we don't.
  256.      * @throws ArchiveFailureException If we could not extract the data, or it was too big, etc.
  257.      * @throws ArchiveRestartException
  258.      * @throws ArchiveRestartException If the request needs to be restarted because the archive
  259.      * changed.
  260.      *
  261.      * FIXME: This method *can* be called from the database thread, however it isn't at
  262.      * present (check the call stack). Maybe we should get rid of the ObjectContainer?
  263.      * OTOH maybe extracting inline on the database thread for small containers would be useful?
  264.      */
  265.     public void extractToCache(FreenetURI key, ARCHIVE_TYPE archiveType, COMPRESSOR_TYPE ctype, final Bucket data, ArchiveContext archiveContext, ArchiveStoreContext ctx, String element, ArchiveExtractCallback callback, ObjectContainer container, ClientContext context) throws ArchiveFailureException, ArchiveRestartException {
  266.         logMINOR = Logger.shouldLog(LogLevel.MINOR, this);
  267.  
  268.         MutableBoolean gotElement = element != null ? new MutableBoolean() : null;
  269.  
  270.         if(logMINOR) Logger.minor(this, "Extracting "+key);
  271.         ctx.removeAllCachedItems(this); // flush cache anyway
  272.         final long expectedSize = ctx.getLastSize();
  273.         final long archiveSize = data.size();
  274.         /** Set if we need to throw a RestartedException rather than returning success,
  275.          * after we have unpacked everything.
  276.          */
  277.         boolean throwAtExit = false;
  278.         if((expectedSize != -1) && (archiveSize != expectedSize)) {
  279.             throwAtExit = true;
  280.             ctx.setLastSize(archiveSize);
  281.         }
  282.         byte[] expectedHash = ctx.getLastHash();
  283.         if(expectedHash != null) {
  284.             byte[] realHash;
  285.             try {
  286.                 realHash = BucketTools.hash(data);
  287.             } catch (IOException e) {
  288.                 throw new ArchiveFailureException("Error reading archive data: "+e, e);
  289.             }
  290.             if(!Arrays.equals(realHash, expectedHash))
  291.                 throwAtExit = true;
  292.             ctx.setLastHash(realHash);
  293.         }
  294.  
  295.         if(archiveSize > archiveContext.maxArchiveSize)
  296.             throw new ArchiveFailureException("Archive too big ("+archiveSize+" > "+archiveContext.maxArchiveSize+")!");
  297.         else if(archiveSize <= 0)
  298.             throw new ArchiveFailureException("Archive too small! ("+archiveSize+')');
  299.         else if(logMINOR)
  300.             Logger.minor(this, "Container size (possibly compressed): "+archiveSize+" for "+data);
  301.  
  302.         InputStream is = null;
  303.         try {
  304.             final ExceptionWrapper wrapper;
  305.             if((ctype == null) || (ARCHIVE_TYPE.ZIP == archiveType)) {
  306.                 if(logMINOR) Logger.minor(this, "No compression");
  307.                 is = data.getInputStream();
  308.                 wrapper = null;
  309.             } else if(ctype == COMPRESSOR_TYPE.BZIP2) {
  310.                 if(logMINOR) Logger.minor(this, "dealing with BZIP2");
  311.                 is = new BZip2CompressorInputStream(data.getInputStream());
  312.                 wrapper = null;
  313.             } else if(ctype == COMPRESSOR_TYPE.GZIP) {
  314.                 if(logMINOR) Logger.minor(this, "dealing with GZIP");
  315.                 is = new GZIPInputStream(data.getInputStream());
  316.                 wrapper = null;
  317.             } else if(ctype == COMPRESSOR_TYPE.LZMA_NEW) {
  318.                 // LZMA internally uses pipe streams, so we may as well do it here.
  319.                 // In fact we need to for LZMA_NEW, because of the properties bytes.
  320.                 PipedInputStream pis = new PipedInputStream();
  321.                 final PipedOutputStream pos = new PipedOutputStream();
  322.                 pis.connect(pos);
  323.                 wrapper = new ExceptionWrapper();
  324.                 context.mainExecutor.execute(new Runnable() {
  325.  
  326.                     @Override
  327.                     public void run() {
  328.                         InputStream is = null;
  329.                         try {
  330.                             Compressor.COMPRESSOR_TYPE.LZMA_NEW.decompress(is = data.getInputStream(), pos, data.size(), expectedSize);
  331.                         } catch (CompressionOutputSizeException e) {
  332.                             Logger.error(this, "Failed to decompress archive: "+e, e);
  333.                             wrapper.set(e);
  334.                         } catch (IOException e) {
  335.                             Logger.error(this, "Failed to decompress archive: "+e, e);
  336.                             wrapper.set(e);
  337.                         } finally {
  338.                             try {
  339.                                 pos.close();
  340.                             } catch (IOException e) {
  341.                                 Logger.error(this, "Failed to close PipedOutputStream: "+e, e);
  342.                             }
  343.                             Closer.close(is);
  344.                         }
  345.                     }
  346.                    
  347.                 });
  348.                 is = pis;
  349.             } else if(ctype == COMPRESSOR_TYPE.LZMA) {
  350.                 if(logMINOR) Logger.minor(this, "dealing with LZMA");
  351.                 is = new LzmaInputStream(data.getInputStream());
  352.                 wrapper = null;
  353.             } else {
  354.                 wrapper = null;
  355.             }
  356.  
  357.             if(ARCHIVE_TYPE.ZIP == archiveType)
  358.                 handleZIPArchive(ctx, key, is, element, callback, gotElement, throwAtExit, container, context);
  359.             else if(ARCHIVE_TYPE.TAR == archiveType)
  360.                 handleTARArchive(ctx, key, is, element, callback, gotElement, throwAtExit, container, context);
  361.         else
  362.                 throw new ArchiveFailureException("Unknown or unsupported archive algorithm " + archiveType);
  363.             if(wrapper != null) {
  364.                 Exception e = wrapper.get();
  365.                 if(e != null) throw new ArchiveFailureException("An exception occured decompressing: "+e.getMessage(), e);
  366.             }
  367.         } catch (IOException ioe) {
  368.             throw new ArchiveFailureException("An IOE occured: "+ioe.getMessage(), ioe);
  369.         }finally {
  370.             Closer.close(is);
  371.     }
  372.     }
  373.  
  374.     private void handleTARArchive(ArchiveStoreContext ctx, FreenetURI key, InputStream data, String element, ArchiveExtractCallback callback, MutableBoolean gotElement, boolean throwAtExit, ObjectContainer container, ClientContext context) throws ArchiveFailureException, ArchiveRestartException {
  375.         if(logMINOR) Logger.minor(this, "Handling a TAR Archive");
  376.         TarArchiveInputStream tarIS = null;
  377.         try {
  378.             tarIS = new TarArchiveInputStream(data);
  379.  
  380.             // MINOR: Assumes the first entry in the tarball is a directory.
  381.             ArchiveEntry entry;
  382.  
  383.             byte[] buf = new byte[32768];
  384.             HashSet<String> names = new HashSet<String>();
  385.             boolean gotMetadata = false;
  386.  
  387. outerTAR:       while(true) {
  388.                 try {
  389.                 entry = tarIS.getNextEntry();
  390.                 } catch (IllegalArgumentException e) {
  391.                     // Annoyingly, it can throw this on some corruptions...
  392.                     throw new ArchiveFailureException("Error reading archive: "+e.getMessage(), e);
  393.                 }
  394.                 if(entry == null) break;
  395.                 if(entry.isDirectory()) continue;
  396.                 String name = stripLeadingSlashes(entry.getName());
  397.                 if(names.contains(name)) {
  398.                     Logger.error(this, "Duplicate key "+name+" in archive "+key);
  399.                     continue;
  400.                 }
  401.                 long size = entry.getSize();
  402.                 if(name.equals(".metadata"))
  403.                     gotMetadata = true;
  404.                 if(size > maxArchivedFileSize && !name.equals(element)) {
  405.                     addErrorElement(ctx, key, name, "File too big: "+size+" greater than current archived file size limit "+maxArchivedFileSize, true);
  406.                 } else {
  407.                     // Read the element
  408.                     long realLen = 0;
  409.                     Bucket output = tempBucketFactory.makeBucket(size);
  410.                     OutputStream out = output.getOutputStream();
  411.  
  412.                     try {
  413.                         int readBytes;
  414.                         while((readBytes = tarIS.read(buf)) > 0) {
  415.                             out.write(buf, 0, readBytes);
  416.                             readBytes += realLen;
  417.                             if(readBytes > maxArchivedFileSize) {
  418.                                 addErrorElement(ctx, key, name, "File too big: "+maxArchivedFileSize+" greater than current archived file size limit "+maxArchivedFileSize, true);
  419.                                 out.close();
  420.                                 out = null;
  421.                                 output.free();
  422.                                 continue outerTAR;
  423.                             }
  424.                         }
  425.                        
  426.                     } finally {
  427.                         if(out != null) out.close();
  428.                     }
  429.                     if(size <= maxArchivedFileSize) {
  430.                         addStoreElement(ctx, key, name, output, gotElement, element, callback, container, context);
  431.                         names.add(name);
  432.                         trimStoredData();
  433.                     } else {
  434.                         // We are here because they asked for this file.
  435.                         callback.gotBucket(output, container, context);
  436.                         gotElement.value = true;
  437.                         addErrorElement(ctx, key, name, "File too big: "+size+" greater than current archived file size limit "+maxArchivedFileSize, true);
  438.                     }
  439.                 }
  440.             }
  441.  
  442.             // If no metadata, generate some
  443.             if(!gotMetadata) {
  444.                 generateMetadata(ctx, key, names, gotElement, element, callback, container, context);
  445.                 trimStoredData();
  446.             }
  447.             if(throwAtExit) throw new ArchiveRestartException("Archive changed on re-fetch");
  448.  
  449.             if((!gotElement.value) && element != null)
  450.                 callback.notInArchive(container, context);
  451.  
  452.         } catch (IOException e) {
  453.             throw new ArchiveFailureException("Error reading archive: "+e.getMessage(), e);
  454.         } finally {
  455.             Closer.close(tarIS);
  456.         }
  457.     }
  458.  
  459.     private void handleZIPArchive(ArchiveStoreContext ctx, FreenetURI key, InputStream data, String element, ArchiveExtractCallback callback, MutableBoolean gotElement, boolean throwAtExit, ObjectContainer container, ClientContext context) throws ArchiveFailureException, ArchiveRestartException {
  460.         if(logMINOR) Logger.minor(this, "Handling a ZIP Archive");
  461.         ZipInputStream zis = null;
  462.         try {
  463.             zis = new ZipInputStream(data);
  464.  
  465.             // MINOR: Assumes the first entry in the zip is a directory.
  466.             ZipEntry entry;
  467.  
  468.             byte[] buf = new byte[32768];
  469.             HashSet<String> names = new HashSet<String>();
  470.             boolean gotMetadata = false;
  471.  
  472. outerZIP:       while(true) {
  473.                 entry = zis.getNextEntry();
  474.                 if(entry == null) break;
  475.                 if(entry.isDirectory()) continue;
  476.                 String name = stripLeadingSlashes(entry.getName());
  477.                 if(names.contains(name)) {
  478.                     Logger.error(this, "Duplicate key "+name+" in archive "+key);
  479.                     continue;
  480.                 }
  481.                 long size = entry.getSize();
  482.                 if(name.equals(".metadata"))
  483.                     gotMetadata = true;
  484.                 if(size > maxArchivedFileSize && !name.equals(element)) {
  485.                     addErrorElement(ctx, key, name, "File too big: "+maxArchivedFileSize+" greater than current archived file size limit "+maxArchivedFileSize, true);
  486.                 } else {
  487.                     // Read the element
  488.                     long realLen = 0;
  489.                     Bucket output = tempBucketFactory.makeBucket(size);
  490.                     OutputStream out = output.getOutputStream();
  491.                     try {
  492.                        
  493.                         int readBytes;
  494.                         while((readBytes = zis.read(buf)) > 0) {
  495.                             out.write(buf, 0, readBytes);
  496.                             readBytes += realLen;
  497.                             if(readBytes > maxArchivedFileSize) {
  498.                                 addErrorElement(ctx, key, name, "File too big: "+maxArchivedFileSize+" greater than current archived file size limit "+maxArchivedFileSize, true);
  499.                                 out.close();
  500.                                 out = null;
  501.                                 output.free();
  502.                                 continue outerZIP;
  503.                             }
  504.                         }
  505.                        
  506.                     } finally {
  507.                         if(out != null) out.close();
  508.                     }
  509.                     if(size <= maxArchivedFileSize) {
  510.                         addStoreElement(ctx, key, name, output, gotElement, element, callback, container, context);
  511.                         names.add(name);
  512.                         trimStoredData();
  513.                     } else {
  514.                         // We are here because they asked for this file.
  515.                         callback.gotBucket(output, container, context);
  516.                         gotElement.value = true;
  517.                         addErrorElement(ctx, key, name, "File too big: "+size+" greater than current archived file size limit "+maxArchivedFileSize, true);
  518.                     }
  519.                 }
  520.             }
  521.  
  522.             // If no metadata, generate some
  523.             if(!gotMetadata) {
  524.                 generateMetadata(ctx, key, names, gotElement, element, callback, container, context);
  525.                 trimStoredData();
  526.             }
  527.             if(throwAtExit) throw new ArchiveRestartException("Archive changed on re-fetch");
  528.  
  529.             if((!gotElement.value) && element != null)
  530.                 callback.notInArchive(container, context);
  531.  
  532.         } catch (IOException e) {
  533.             throw new ArchiveFailureException("Error reading archive: "+e.getMessage(), e);
  534.         } finally {
  535.             if(zis != null) {
  536.                 try {
  537.                     zis.close();
  538.                 } catch (IOException e) {
  539.                     Logger.error(this, "Failed to close stream: "+e, e);
  540.                 }
  541.             }
  542.         }
  543.     }
  544.  
  545.     private String stripLeadingSlashes(String name) {
  546.         while(name.length() > 1 && name.charAt(0) == '/')
  547.             name = name.substring(1);
  548.         return name;
  549.     }
  550.  
  551.     /**
  552.      * Generate fake metadata for an archive which doesn't have any.
  553.      * @param ctx The context object.
  554.      * @param key The key from which the archive we are unpacking was fetched.
  555.      * @param names Set of names in the archive.
  556.      * @param element2
  557.      * @param gotElement
  558.      * @param callbackName If we generate a
  559.      * @throws ArchiveFailureException
  560.      */
  561.     private ArchiveStoreItem generateMetadata(ArchiveStoreContext ctx, FreenetURI key, Set<String> names, MutableBoolean gotElement, String element2, ArchiveExtractCallback callback, ObjectContainer container, ClientContext context) throws ArchiveFailureException {
  562.         /* What we have to do is to:
  563.          * - Construct a filesystem tree of the names.
  564.          * - Turn each level of the tree into a Metadata object, including those below it, with
  565.          * simple manifests and archive internal redirects.
  566.          * - Turn the master Metadata object into binary metadata, with all its subsidiaries.
  567.          * - Create a .metadata entry containing this data.
  568.          */
  569.         // Root directory.
  570.         // String -> either itself, or another HashMap
  571.         HashMap<String, Object> dir = new HashMap<String, Object>();
  572.         for (String name : names) {
  573.             addToDirectory(dir, name, "");
  574.         }
  575.         Metadata metadata = new Metadata(dir, "");
  576.         int x = 0;
  577.         Bucket bucket = null;
  578.         while(true) {
  579.             try {
  580.                 bucket = BucketTools.makeImmutableBucket(tempBucketFactory, metadata.writeToByteArray());
  581.                 return addStoreElement(ctx, key, ".metadata", bucket, gotElement, element2, callback, container, context);
  582.             } catch (MetadataUnresolvedException e) {
  583.                 try {
  584.                     x = resolve(e, x, bucket, ctx, key, gotElement, element2, callback, container, context);
  585.                 } catch (IOException e1) {
  586.                     throw new ArchiveFailureException("Failed to create metadata: "+e1, e1);
  587.                 }
  588.             } catch (IOException e1) {
  589.                 Logger.error(this, "Failed to create metadata: "+e1, e1);
  590.                 throw new ArchiveFailureException("Failed to create metadata: "+e1, e1);
  591.             }
  592.         }
  593.     }
  594.  
  595.     private int resolve(MetadataUnresolvedException e, int x, Bucket bucket, ArchiveStoreContext ctx, FreenetURI key, MutableBoolean gotElement, String element2, ArchiveExtractCallback callback, ObjectContainer container, ClientContext context) throws IOException, ArchiveFailureException {
  596.         for(Metadata m: e.mustResolve) {
  597.             byte[] buf;
  598.             try {
  599.                 buf = m.writeToByteArray();
  600.             } catch (MetadataUnresolvedException e1) {
  601.                 x = resolve(e, x, bucket, ctx, key, gotElement, element2, callback, container, context);
  602.                 continue;
  603.             }
  604.             OutputStream os = bucket.getOutputStream();
  605.             try {
  606.             os.write(buf);
  607.             } finally {
  608.             os.close();
  609.             }
  610.             addStoreElement(ctx, key, ".metadata-"+(x++), bucket, gotElement, element2, callback, container, context);
  611.         }
  612.         return x;
  613.     }
  614.  
  615.     private void addToDirectory(HashMap<String, Object> dir, String name, String prefix) throws ArchiveFailureException {
  616.         int x = name.indexOf('/');
  617.         if(x < 0) {
  618.             if(dir.containsKey(name)) {
  619.                 throw new ArchiveFailureException("Invalid archive: contains "+prefix+name+" twice");
  620.             }
  621.             dir.put(name, name);
  622.         } else {
  623.             String before = name.substring(0, x);
  624.             String after;
  625.             if(x == name.length()-1) {
  626.                 // Last char
  627.                 after = "";
  628.             } else
  629.                 after = name.substring(x+1, name.length());
  630.             Object o = dir.get(before);
  631.             if (o == null) {
  632.                 dir.put(before, o = new HashMap<String, Object>());
  633.             } else if (o instanceof String) {
  634.                 throw new ArchiveFailureException("Invalid archive: contains "+name+" as both file and dir");
  635.             }
  636.             addToDirectory(Metadata.forceMap(o), after, prefix + before + '/');
  637.         }
  638.     }
  639.  
  640.     /**
  641.      * Add an error element to the cache. This happens when a single file in the archive
  642.      * is invalid (usually because it is too large).
  643.      * @param ctx The ArchiveStoreContext which must be notified about this element's creation.
  644.      * @param key The key from which the archive was fetched.
  645.      * @param name The name of the file within the archive.
  646.      * @param error The error message to be included on the eventual exception thrown,
  647.      * if anyone tries to extract the data for this element.
  648.      */
  649.     private void addErrorElement(ArchiveStoreContext ctx, FreenetURI key, String name, String error, boolean tooBig) {
  650.         ErrorArchiveStoreItem element = new ErrorArchiveStoreItem(ctx, key, name, error, tooBig);
  651.         if(logMINOR) Logger.minor(this, "Adding error element: "+element+" for "+key+ ' ' +name);
  652.         ArchiveStoreItem oldItem;
  653.         synchronized (this) {
  654.             oldItem = storedData.get(element.key);
  655.             storedData.push(element.key, element);
  656.             if(oldItem != null) {
  657.                 oldItem.close();
  658.                 cachedData -= oldItem.spaceUsed();
  659.                 if(logMINOR) Logger.minor(this, "Dropping old store element from archive cache: "+oldItem);
  660.             }
  661.         }
  662.     }
  663.  
  664.     /**
  665.      * Add a store element.
  666.      * @param callbackName If set, the name of the file for which we must call the callback if this file happens to
  667.      * match.
  668.      * @param gotElement Flag indicating whether we've already found the file for the callback. If so we must not call
  669.      * it again.
  670.      * @param callback Callback to be called if we do find it. We must getReaderBucket() before adding the data to the
  671.      * LRU, otherwise it may be deleted before it reaches the client.
  672.      * @throws ArchiveFailureException If a failure occurred resulting in the data not being readable. Only happens if
  673.      * callback != null.
  674.      */
  675.     private ArchiveStoreItem addStoreElement(ArchiveStoreContext ctx, FreenetURI key, String name, Bucket temp, MutableBoolean gotElement, String callbackName, ArchiveExtractCallback callback, ObjectContainer container, ClientContext context) throws ArchiveFailureException {
  676.         RealArchiveStoreItem element = new RealArchiveStoreItem(ctx, key, name, temp);
  677.         if(logMINOR) Logger.minor(this, "Adding store element: "+element+" ( "+key+ ' ' +name+" size "+element.spaceUsed()+" )");
  678.         ArchiveStoreItem oldItem;
  679.         // Let it throw, if it does something is drastically wrong
  680.         Bucket matchBucket = null;
  681.         if((!gotElement.value) && name.equals(callbackName)) {
  682.             matchBucket = element.getReaderBucket();
  683.         }
  684.         synchronized (this) {
  685.             oldItem = storedData.get(element.key);
  686.             storedData.push(element.key, element);
  687.             cachedData += element.spaceUsed();
  688.             if(oldItem != null) {
  689.                 cachedData -= oldItem.spaceUsed();
  690.                 if(logMINOR) Logger.minor(this, "Dropping old store element from archive cache: "+oldItem);
  691.                 oldItem.close();
  692.             }
  693.         }
  694.         if(matchBucket != null) {
  695.             callback.gotBucket(matchBucket, container, context);
  696.             gotElement.value = true;
  697.         }
  698.         return element;
  699.     }
  700.  
  701.     /**
  702.      * Drop any stored data beyond the limit.
  703.      * Call synchronized on storedData.
  704.      */
  705.     private void trimStoredData() {
  706.         synchronized(this) {
  707.         while(true) {
  708.             ArchiveStoreItem item;
  709.                 if(cachedData <= maxCachedData && storedData.size() <= maxCachedElements) return;
  710.                 if(storedData.isEmpty()) {
  711.                     // Race condition? cachedData out of sync?
  712.                     Logger.error(this, "storedData is empty but still over limit: cachedData="+cachedData+" / "+maxCachedData);
  713.                     return;
  714.                 }
  715.                 item = storedData.popValue();
  716.                 long space = item.spaceUsed();
  717.                 cachedData -= space;
  718.                 // Hard limits = delete file within lock, soft limits = delete outside of lock
  719.                 // Here we use a hard limit
  720.             if(logMINOR)
  721.                 Logger.minor(this, "Dropping "+item+" : cachedData="+cachedData+" of "+maxCachedData+" stored items : "+storedData.size()+" of "+maxCachedElements);
  722.             item.close();
  723.         }
  724.         }
  725.     }
  726.  
  727.     public static void init(ObjectContainer container, ClientContext context, final long nodeDBHandle) {
  728.         ArchiveHandlerImpl.init(container, context, nodeDBHandle);
  729.     }
  730.  
  731.     public boolean objectCanNew(ObjectContainer container) {
  732.         Logger.error(this, "Not storing ArchiveManager in database", new Exception("error"));
  733.         return false;
  734.     }
  735.    
  736.     public boolean objectCanUpdate(ObjectContainer container) {
  737.         Logger.error(this, "Trying to store an ArchiveManager!", new Exception("error"));
  738.         return false;
  739.     }
  740.    
  741.     public boolean objectCanActivate(ObjectContainer container) {
  742.         Logger.error(this, "Trying to store an ArchiveManager!", new Exception("error"));
  743.         return false;
  744.     }
  745.    
  746.     public boolean objectCanDeactivate(ObjectContainer container) {
  747.         Logger.error(this, "Trying to store an ArchiveManager!", new Exception("error"));
  748.         return false;
  749.     }
  750.    
  751. }
Add Comment
Please, Sign In to add comment