Advertisement
Guest User

Jeremie Pelletier

a guest
Sep 20th, 2009
387
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
D 42.17 KB | None | 0 0
  1. /**
  2.  Runtime Memory Management and Garbage Collection
  3.  
  4.  Authors:
  5.     Walter Bright
  6.     David Friedman
  7.     Sean Kelly
  8.     Jeremie Pelletier
  9.  
  10.  References:
  11.     $(LINK http://en.wikipedia.org/wiki/Garbage_collection_%28computer_science%29)
  12.     $(LINK http://www.iecc.com/gclist/GC-algorithms.html)
  13.  
  14.  License:
  15.     Copyright (C) 2000-2006 by Digital Mars, www.digitalmars.com
  16.     Written by Walter Bright
  17.  
  18.     This software is provided 'as-is', without any express or implied
  19.     warranty. In no event will the authors be held liable for any damages
  20.     arising from the use of this software.
  21.  
  22.     Permission is granted to anyone to use this software for any purpose,
  23.     including commercial applications, and to alter it and redistribute it
  24.     freely, in both source and binary form, subject to the following
  25.     restrictions:
  26.  
  27.     o   The origin of this software must not be misrepresented; you must not
  28.         claim that you wrote the original software. If you use this software
  29.         in a product, an acknowledgment in the product documentation would be
  30.         appreciated but is not required.
  31.     o   Altered source versions must be plainly marked as such, and must not
  32.         be misrepresented as being the original software.
  33.     o   This notice may not be removed or altered from any source distribution.
  34. */
  35. module dlib.Memory;
  36.  
  37. /*debug = Memory;
  38. debug = MemoryInternal;
  39. debug = MemoryExtendedInfo;
  40. debug = MemoryPool;*/
  41. //debug = MemoryBreakOnAlloc;
  42. //debug = MemoryBreakOnAllocPool;
  43. //debug = MemoryBreakOnCollection;
  44.  
  45. version(X86) {
  46.     version = StackGrowsDown;
  47.  
  48.     enum PageSize = 0x1000;
  49. }
  50. else static assert(0, "Unsupported architecture.");
  51.  
  52. //version = MemoryProfile;
  53.  
  54. import std.c.stdlib : calloc, malloc, realloc, free;
  55. import std.c.string : memset, memcpy, memmove;
  56.  
  57. version(Windows) {
  58.     import sys.windows.Memory;
  59. }
  60. else version(Posix) {
  61.     import sys.posix.mman;
  62.     import sys.posix.pthread;
  63. }
  64. else static assert(0, "Unsupported platform.");
  65.  
  66. import dlib.Main;
  67. import dlib.Monitor;
  68.  
  69. // TODO!!! threading disabled until 'shared' gets decent semantics
  70. //import dlib.Thread;
  71.  
  72. version(DigitalMars) {
  73.     import std.intrinsic;
  74.     import dlib.dmd.Memory;
  75. }
  76. else static assert(0, "Unsupported compiler.");
  77.  
  78. debug(MemoryExtendedInfo) enum {
  79.     TracePool           = q{Trace("\tpool=%p", pool);},
  80.     TraceHasPointers    = q{Trace("\tHas Pointers");}
  81. }
  82. else enum {
  83.     TracePool           = "",
  84.     TraceHasPointers    = ""
  85. }
  86.  
  87. // The memory module is the first initialized by the runtime, at this point
  88. // the monitor module, depending on this one, has not yet initialized.
  89. // This monitor is therefore allocated in the static data segment, and assigned
  90. // to Memory.classinfo.__monitor during initialization.
  91. private __gshared Monitor _memoryMonitor;
  92.  
  93. version(Windows) {
  94.     extern(C): // From DigitalMars' C runtime
  95.     extern invariant void* _xi_a;   // &_xi_a just happens to be start of data segment
  96.     extern invariant void* _edata;  // &_edata is start of BSS segment
  97.     extern invariant void* _end;    // &_end is past end of BSS
  98. }
  99. else version(Posix) {
  100.     extern(C): // From glibc runtime
  101.     extern invariant void* __data_start;
  102.     extern invariant void* _end;
  103. }
  104. else static assert(0);
  105.  
  106. /**
  107.  Memory initialization method
  108. */
  109. enum AllocInit {
  110.     ZeroFill,   /// Fill memory with zeros
  111.     TypeInfo,   /// Initialize memory from TypeInfo.init
  112.     None        /// Don't initialize memory
  113. }
  114.  
  115. /**
  116.  The garbage collector automatic collection strategy.
  117. */
  118. enum CollectionStrategy {
  119.     Aggressive, /// Favor collections to new pools
  120.     Lazy        /// Favor new pools to collections
  121. }
  122.  
  123. /**
  124.  Internal memory manager initialization
  125. */
  126. package void MemoryInit() { with(Memory) {
  127.     Memory.classinfo.__monitor = &_memoryMonitor;
  128.  
  129.     version(Windows) {
  130.         MutexInit(&_memoryMonitor.mutex);
  131.  
  132.         AddRange(&_xi_a, &_end);
  133.     }
  134.     else version(Posix) {
  135.         pthread_mutex_init(&_memoryMonitor.mutex, null);
  136.  
  137.         AddRange(&__data_start, &_end);
  138.     }
  139.     else static assert(0);
  140. } }
  141.  
  142. /**
  143.  Internal memory manager destruction
  144. */
  145. package void MemoryDestroy() { with(Memory) {
  146.     _nRanges = 0;
  147.     RunCollector(true);
  148.  
  149.     free(_ranges);
  150.  
  151.     foreach(i; 0 .. _nPools) free(_pools[i]);
  152.     free(_pools);
  153.  
  154.     MutexDestroy(&_memoryMonitor.mutex);
  155. } }
  156.  
  157. /**
  158.  TODO: find a way to use multiple Memory instances, so for example threads can
  159.  be optionally created with their own memory manager to handle unshared data.
  160.  This could also be used to implement short lived heaps where you can dump
  161.  the entire pool at once without affecting the rest of the program.
  162.  
  163.  These Memory instances should not have pointers into other memory instances,
  164.  but may have pointers into the shared Memory. The shared memory may only have
  165.  pointers into itself.
  166. */
  167. final abstract class Memory {
  168.  
  169. // ----------------------------------------------------------------------------
  170. // P u b l i c  I n t e r f a c e
  171. // ----------------------------------------------------------------------------
  172.  
  173. /**
  174.  Allocates memory for an object described by the given class info. The
  175.  allocated memory is always initialized using the classinfo initializer. The
  176.  object's constructor is not called here!
  177.  
  178.  Note:
  179.     It is recommended to use "new ClassName" whenever possible, which will end up
  180.     here with the proper classinfo, and then call the appropriate constructor
  181.     automatically.
  182.  
  183.  Params:
  184.     ci = The classInfo for the object to instanciate.
  185.  
  186.  Returns:
  187.     A reference to the allocated object is returned.
  188. */
  189. static shared Object Alloc(in ClassInfo ci)
  190. in {
  191.     assert(ci && ci.init);
  192. }
  193. out(o) {
  194.     assert(o);
  195. }
  196. body {
  197.     debug(Memory) Trace("Memory.Alloc(ci=%p) %.*s", ci, ci.name);
  198.  
  199.     void* p = void;
  200.  
  201.     if(ci.flags & ClassInfo.COM) {
  202.         debug(MemoryExtendedInfo) Trace("\tCOM Object");
  203.  
  204.         p = malloc(ci.init.length);
  205.         if(!p) OutOfMemoryError();
  206.     }
  207.     else synchronized(Memory.classinfo) {
  208.         Pool* pool = void;
  209.         p = Alloc(ci.init.length, &pool);
  210.  
  211.         mixin(TracePool);
  212.  
  213.         uint bit = FindBit(p, pool);
  214.  
  215.         if(!pool.hasFinalizer.nbits)
  216.             pool.hasFinalizer.Alloc(pool.hasPointers.nbits);
  217.  
  218.         pool.hasFinalizer.Set(bit);
  219.  
  220.         if(!(ci.flags & ClassInfo.NoPointers)) {
  221.             mixin(TraceHasPointers);
  222.  
  223.             pool.hasPointers.Set(bit);
  224.         }
  225.     }
  226.  
  227.     memcpy(p, ci.init.ptr, ci.init.length);
  228.     return cast(Object)p;
  229. }
  230.  
  231. /**
  232.  Allocates memory for an array described by the given TypeInfo.
  233.  
  234.  Note:
  235.     It is recommended to Use "new mytype[mylength]" whenever possible, which
  236.     will end up here with all the proper parameters.
  237.  
  238.  Params:
  239.     ti =        Type of the array's elements.
  240.     length =    Number of elements to allocate.
  241.     init =      How to initialize memory.
  242.     size =      If nonzero, overrides the typeinfo size.
  243.  
  244.  Returns:
  245.     A pointer to the allocated array memory is returned.
  246. */
  247. static shared void* Alloc(in TypeInfo ti, size_t length = 1,
  248.     AllocInit init = AllocInit.ZeroFill, size_t size = 0)
  249. in {
  250.     assert(ti);
  251.     assert(length);
  252.     assert(size || ti.size);
  253. }
  254. out(p) {
  255.     assert(p);
  256. }
  257. body {
  258.     debug(Memory) Trace("Memory.Alloc(ti=%p, length=%u, init=%u, size=%u)",
  259.         ti, length, init, size);
  260.  
  261.     void* p = void;
  262.     size = !size ? ti.size * length : size * length;
  263.  
  264.     synchronized(Memory.classinfo) {
  265.         Pool* pool = void;
  266.         p = Alloc(size, &pool);
  267.  
  268.         mixin(TracePool);
  269.  
  270.         if(ti.flags & TypeInfo.HasPointers) {
  271.             mixin(TraceHasPointers);
  272.  
  273.             pool.hasPointers.Set(FindBit(p, pool));
  274.         }
  275.     }
  276.  
  277.     MemoryInit(ti, p, size, init);
  278.     return p;
  279. }
  280.  
  281. /**
  282.  Allocates unitialized memory.
  283.  
  284.  Note:
  285.     It is strongly recommended that you use "new" instead, the dlib
  286.     implementation will call the proper allocation routine with the proper
  287.     typeinfo.
  288.  
  289.  Params:
  290.     size =          Size of the allocation, in bytes.
  291.     hasPointers =   Whether the memory contains pointers or not.
  292.  
  293.  Returns:
  294.     A pointer to the allocated memory is returned.
  295. */
  296. static shared void* Alloc(size_t size, bool hasPointers = true)
  297. in {
  298.     assert(size);
  299. }
  300. out(p) {
  301.     assert(p);
  302. }
  303. body {
  304.     debug(Memory) Trace("Memory.Alloc(size=%u, hasPointers=%b)", size, hasPointers & 1);
  305.  
  306.     // BUG!! When this method is synchronized:
  307.     // Error: cannot goto forward into different try block level
  308.     synchronized(Memory.classinfo) {
  309.         Pool* pool = void;
  310.         void* p = Alloc(size, &pool);
  311.  
  312.         mixin(TracePool);
  313.  
  314.         if(hasPointers) {
  315.             mixin(TraceHasPointers);
  316.  
  317.             pool.hasPointers.Set(FindBit(p, pool));
  318.         }
  319.  
  320.         return p;
  321.     }
  322. }
  323.  
  324. /**
  325.  Reallocates memory for an array.
  326.  
  327.     Resize the given allocated memory, possibly relocating it. The length
  328.     parameter is used to specify the slice of the old memory to copy and/or
  329.     initialize.
  330.  
  331.  Note:
  332.     It is recommended that you use the array properties and operators instead
  333.     of calling this method. The runtime will determine the proper parameters
  334.     to call this method.
  335.  
  336.  Params:
  337.     old =       Pointer to the memory that is to be reallocated.
  338.     ti =        Type of the array's elements.
  339.     length =    Current array length.
  340.     newLength = New number of array elements.
  341.     init =      How to initialize new memory.
  342.     size =      If nonzero, overrides the typeinfo's size.
  343.  
  344.  Returns:
  345.     A pointer to the new memory is returned, the old pointer is no longer
  346.     valid and should not be used anymore.
  347. */
  348. static shared void* Realloc(in void* old, in TypeInfo ti, size_t length = 1,
  349.     size_t newLength = 1, AllocInit init = AllocInit.ZeroFill, size_t size = 0)
  350. in {
  351.     assert(!length || old);
  352.     assert(ti);
  353.     assert(newLength);
  354.     assert(init != AllocInit.TypeInfo || ti.init);
  355.     assert(size || ti.size);
  356. }
  357. out(p) {
  358.     assert(p);
  359. }
  360. body {
  361.     debug(Memory) Trace("Memory.Relloc(old=%p, ti=%p, length=%u, newLength=%u, "
  362.         "init=%u, size=%u) %.*s", old, ti, length, newLength, init, size);
  363.  
  364.     if(!old) return Memory.Alloc(ti, newLength, init, size);
  365.  
  366.     void* p = void;
  367.     size_t newSize = size ? size * newLength : ti.size * newLength;
  368.     size = size ? size * length : ti.size * length;
  369.  
  370.     synchronized(Memory.classinfo) {
  371.         Pool* pool = void;
  372.         p = Realloc(old, newSize, size, &pool);
  373.  
  374.         mixin(TracePool);
  375.  
  376.         if(p != old && ti.flags & TypeInfo.HasPointers) {
  377.             mixin(TraceHasPointers);
  378.  
  379.             pool.hasPointers.Set(FindBit(p, pool));
  380.         }
  381.     }
  382.  
  383.     if(newSize > size) MemoryInit(ti, p + size, newSize - size, init);
  384.     return p;
  385. }
  386.  
  387. /**
  388.  Reallocates memory.
  389.  
  390.  Note:
  391.     If the memory is relocated, the new location is automatically marked as
  392.     having pointers, since there is no typeinfo available.
  393.  
  394.  Params:
  395.     old =   Pointer to the memory that is to be reallocated.
  396.     size =  Size of the new allocation, in bytes.
  397.  
  398.  Returns:
  399.     A pointer to the allocated is returned, if it is different than the one
  400.     given by the old parameter, then the old pointer is no longer safe for use.
  401. */
  402. static shared void* Realloc(in void* old, size_t size, bool hasPointers = true)
  403. in {
  404.     assert(size);
  405. }
  406. out(p) {
  407.     assert(p);
  408. }
  409. body {
  410.     debug(Memory) Trace("Memory.Relloc(old=%p, size=%u, hasPointers=%b)",
  411.         old, size, hasPointers & 1);
  412.  
  413.     if(!old) return Memory.Alloc(size);
  414.  
  415.     synchronized(Memory.classinfo) {
  416.         Pool* pool = void;
  417.         void* p = Realloc(old, size, 0, &pool);
  418.  
  419.         mixin(TracePool);
  420.  
  421.         if(p != old && hasPointers) {
  422.             mixin(TraceHasPointers);
  423.  
  424.             pool.hasPointers.Set(FindBit(p, pool));
  425.         }
  426.  
  427.         return p;
  428.     }
  429. }
  430.  
  431. /**
  432.  Frees memory
  433.  
  434.  Params:
  435.     p = Pointer to the memory that is to be freed.
  436. */
  437. static synchronized void Free(in void* p)
  438. in {
  439.     assert(p);
  440. }
  441. body {
  442.     debug(Memory) Trace("Memory.Free(p=%p)", p);
  443.  
  444.     Free(p, null);
  445. }
  446.  
  447. /**
  448.  Find the allocated size the given pointer
  449.  
  450.  Params:
  451.     p = Pointer to the allocated memory.
  452.  
  453.  Returns:
  454.     The size of the allocated memory block.
  455. */
  456. static synchronized size_t SizeOf(in void* p) {
  457.     debug(Memory) Trace("Memory.SizeOf(p=%p)", p);
  458.  
  459.     if(!p) return 0;
  460.  
  461.     if(p is _cache) return _cacheSize;
  462.  
  463.     size_t size = FindAllocSize(p);
  464.  
  465.     if(cast(size_t)p & (size - 1) & (PageSize - 1)) {
  466.         size = 0;
  467.     }
  468.     else {
  469.         _cache = p;
  470.         _cacheSize = size;
  471.     }
  472.  
  473.     return size;
  474. }
  475.  
  476. /**
  477.  Adds a range index to external memory. This range will be scanned by the
  478.  garbage collector; anything pointing to a valid address within allocated
  479.  memory marks that allocation as in-use.
  480.  
  481.  Params:
  482.     bottom =    The start address of the memory range.
  483.     top =       The end address of the memory range.
  484. */
  485. static synchronized void AddRange(in void* bottom, in void* top)
  486. in {
  487.     assert(bottom);
  488.     assert(top);
  489. }
  490. body {
  491.     debug(Memory) Trace("Memory.AddRange(bottom=%p, top=%p)", bottom, top);
  492.  
  493.     if(_nRanges == _rangesLength) {
  494.         _rangesLength = _rangesLength ? _rangesLength * 2 : 16;
  495.         _ranges = cast(Range*)realloc(_ranges, _rangesLength * Range.sizeof);
  496.  
  497.         if(!_ranges) OutOfMemoryError();
  498.     }
  499.  
  500.     _ranges[_nRanges].bottom = bottom;
  501.     _ranges[_nRanges].top = top;
  502.     _nRanges++;
  503. }
  504.  
  505. /**
  506.  Removes an index to external memory.
  507.  
  508.  Params:
  509.     bottom =    The start address of the memory range.
  510. */
  511. static synchronized void RemoveRange(in void* bottom)
  512. in {
  513.     assert(bottom);
  514. }
  515. body {
  516.     debug(Memory) Trace("Memory.RemoveRange(bottom=%p)", bottom);
  517.  
  518.     foreach(i; 0 .. _nRanges) {
  519.         if(_ranges[i].bottom == bottom) {
  520.             _nRanges--;
  521.             memmove(_ranges + i, _ranges + i + 1, (_nRanges - i) * Range.sizeof);
  522.             return;
  523.         }
  524.     }
  525. }
  526.  
  527. /**
  528.  Change the garbage collector's strategy.
  529.  
  530.  Params:
  531.     strategy =  The new strategy to use.
  532.  
  533.  Returns:
  534.     The previous strategy is returned.
  535. */
  536. static synchronized CollectionStrategy SetCollectionStrategy(CollectionStrategy strategy) {
  537.     debug(Memory) Trace("Memory.SetCollectionStrategy(strategy=%u)", strategy);
  538.  
  539.     CollectionStrategy old = _collectionStrategy;
  540.     _collectionStrategy = strategy;
  541.     return old;
  542. }
  543.  
  544. /**
  545.  Runs the garbage collector
  546.  
  547.  Params:
  548.     complete =  Set to true to run the collector in aggressive mode;
  549.                 it is much slower, but can free much more memory.
  550.  
  551.  Returns:
  552.     The total bytes freed.
  553. */
  554. static synchronized size_t RunCollector(bool complete = false) {
  555.     debug(Memory) Trace("Memory.RunCollector(complete=%b)", complete & 1);
  556.  
  557.     return CollectGarbage(complete);
  558. }
  559.  
  560. version(MemoryProfile) {
  561.     static uint used() {
  562.         return _used;
  563.     }
  564.     static uint total() {
  565.         return _total;
  566.     }
  567. }
  568.  
  569. // ----------------------------------------------------------------------------
  570. // I n t e r n a l
  571. // ----------------------------------------------------------------------------
  572.  
  573. private:
  574.  
  575. enum {
  576.     PtrShift    = 4,
  577.  
  578.     CommitSize  = PageSize * 16,
  579.     PoolSize    = PageSize * 256,
  580. }
  581.  
  582. static if(PageSize == 0x1000) enum PageShift = 12;
  583. else static assert(0);
  584.  
  585. typedef ubyte BlockIndex;
  586. enum : BlockIndex {
  587.     B_16, B_32, B_64, B_128, B_256, B_512, B_1024, B_2048, B_4096,
  588.     B_PAGE,
  589.     B_PAGEPLUS,
  590.     B_FREE,
  591.     B_UNCOMMITTED,
  592.     B_MAX = B_PAGE
  593. }
  594.  
  595. immutable uint[B_MAX] BlockIndexSize =
  596.     [0x10, 0x20, 0x40, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000];
  597.  
  598. // Free blocks are casted to this structure and pushed on their appropriate
  599. // freelist index
  600. struct FreeList {
  601.     FreeList*   next;
  602.     Pool*       pool; // Cache the block's pool for faster allocations
  603. }
  604. static assert(FreeList.sizeof <= 16);
  605.  
  606. struct Range {
  607.     const(void)* bottom;
  608.     const(void)* top;
  609. }
  610.  
  611. static __gshared:
  612.  
  613. CollectionStrategy _collectionStrategy;
  614.  
  615. Range* _ranges;
  616. uint _nRanges;
  617. uint _rangesLength;
  618.  
  619. const(void)* _cache;
  620. size_t _cacheSize;
  621.  
  622. FreeList*[B_MAX] _freeList;
  623.  
  624. Pool** _pools;
  625. uint _nPools;
  626. uint _poolsLength;
  627.  
  628. const(void)* _minAddr;
  629. const(void)* _maxAddr;
  630.  
  631. version(MemoryProfile) {
  632.     uint _used;
  633.     uint _total;
  634. }
  635.  
  636. /**
  637.  Internal memory allocation
  638.  
  639.  Params:
  640.     size =  The allocation size.
  641.     pool =  Set to the pool owning the returned allocation.
  642.  
  643.  Returns:
  644.     A pointer to the allocated memory is returned, it is never initialized.
  645. */
  646. static void* Alloc(size_t size, Pool** pPool)
  647. in {
  648.     assert(size);
  649.     assert(pPool);
  650. }
  651. out(p) {
  652.     assert(p);
  653. }
  654. body {
  655.     debug(MemoryInternal) Trace("Memory.Alloc(size=%u, pPool=%p)", size, pPool);
  656.  
  657.     debug(MemoryBreakOnAlloc) asm { int 3; }
  658.  
  659.     void* p = void;
  660.     BlockIndex blockIndex = FindBlockIndex(size);
  661.  
  662.     /*
  663.      Block allocation
  664.     */
  665.     if(blockIndex < B_PAGE) {
  666.         if(!_freeList[blockIndex]) {
  667.             Pool* pool = void;
  668.             uint blockSize = BlockIndexSize[blockIndex];
  669.  
  670.             bool AllocPage() {
  671.                 uint n = pool.AllocPages(1);
  672.                 if(n == uint.max) return false;
  673.  
  674.                 pool.pageTable[n] = blockIndex;
  675.  
  676.                 void* p = pool.baseAddr + n * PageSize;
  677.                 void* top = p + PageSize;
  678.                 FreeList** block = &_freeList[blockIndex];
  679.  
  680.                 for(; p < top; p += blockSize) {
  681.                     (cast(FreeList*)p).next = *block;
  682.                     *block = cast(FreeList*)p;
  683.                     (*block).pool = pool;
  684.                 }
  685.  
  686.                 return true;
  687.             }
  688.  
  689.             foreach(i; 0 .. _nPools) {
  690.                 pool = _pools[i];
  691.                 if(AllocPage()) goto BlockAlloc;
  692.             }
  693.  
  694.             final switch(_collectionStrategy) {
  695.             case CollectionStrategy.Aggressive:
  696.                 if(CollectGarbage(true) < blockSize || !_freeList[blockIndex])
  697.                     if((pool = AllocPool()) == null || !AllocPage())
  698.                         OutOfMemoryError();
  699.  
  700.                 break;
  701.  
  702.             case CollectionStrategy.Lazy:
  703.                 if((pool = AllocPool()) == null || !AllocPage())
  704.                     if(CollectGarbage(true) < blockSize || !_freeList[blockIndex])
  705.                         OutOfMemoryError();
  706.  
  707.                 break;
  708.             }
  709.         }
  710.  
  711.     BlockAlloc:
  712.         p = _freeList[blockIndex];
  713.         *pPool = _freeList[blockIndex].pool;
  714.         _freeList[blockIndex] = _freeList[blockIndex].next;
  715.  
  716.         version(MemoryProfile) _used += BlockIndexSize[blockIndex];
  717.     }
  718.  
  719.     /*
  720.      Page allocation
  721.     */
  722.     else {
  723.         Pool* pool = void;
  724.         uint numPages = (size + PageSize - 1) >> PageShift;
  725.         uint page = void;
  726.  
  727.         bool AllocPagesScan() {
  728.             foreach(i; 0 .. _nPools) {
  729.                 pool = _pools[i];
  730.                 page = pool.AllocPages(numPages);
  731.  
  732.                 if(page != uint.max) return true;
  733.             }
  734.  
  735.             return false;
  736.         }
  737.  
  738.         if(!AllocPagesScan()) {
  739.             final switch(_collectionStrategy) {
  740.             case CollectionStrategy.Aggressive:
  741.                 if(CollectGarbage(true) < numPages * PageSize || !AllocPagesScan())
  742.                     if((pool = AllocPool(numPages)) == null || (page = pool.AllocPages(numPages)) == uint.max)
  743.                         OutOfMemoryError();
  744.  
  745.                 break;
  746.  
  747.             case CollectionStrategy.Lazy:
  748.                 if((pool = AllocPool(numPages)) == null || (page = pool.AllocPages(numPages)) == uint.max)
  749.                     if(CollectGarbage(true) < numPages * PageSize || !AllocPagesScan())
  750.                         OutOfMemoryError();
  751.  
  752.                 break;
  753.             }
  754.         }
  755.  
  756.         pool.pageTable[page] = B_PAGE;
  757.         pool.MarkPages(page + 1, numPages - 1, B_PAGEPLUS);
  758.  
  759.         p = pool.baseAddr + page * PageSize;
  760.         *pPool = pool;
  761.  
  762.         version(MemoryProfile) _used += PageSize * numPages;
  763.     }
  764.  
  765.     return p;
  766. }
  767.  
  768. /**
  769.  Internal memory reallocation
  770.  
  771.  Params:
  772.     old     = Pointer to the current allocation.
  773.     size    = The requested allocation size.
  774.     curSize = The known used allocation size, defaults to current size if 0.
  775.     pool    = Returns the pool of the reallocated memory.
  776.  
  777.  Returns:
  778.     A pointer to the new allocation is returned.
  779. */
  780. static void* Realloc(in void* old, size_t size, size_t curSize, Pool** pPool)
  781. in {
  782.     assert(old);
  783.     assert(pPool);
  784. }
  785. out(p) {
  786.     assert(p);
  787. }
  788. body {
  789.     debug(MemoryInternal) Trace("Memory.Relloc(old=%p, size=%u, curSize=%u, pPool=%p)",
  790.         old, size, curSize, pPool);
  791.  
  792.     debug(MemoryBreakOnAlloc) asm { int 3; }
  793.  
  794.     void* p = void;
  795.  
  796.     Pool* pool = FindPool(old);
  797.  
  798.     // Trying to realloc memory that isn't ours, this can happen when, for
  799.     // example, the source of a contatenation is inside the static memory
  800.     // segment.
  801.     if(!pool) {
  802.         p = Memory.Alloc(size, pPool);
  803.         memcpy(p, old, curSize);
  804.         return p;
  805.     }
  806.  
  807.     size_t sizeof = FindAllocSize(old, pool);
  808.     size_t pageAlign = (sizeof + PageSize-1) & ~(PageSize-1);
  809.  
  810.     if(!curSize) curSize = sizeof;
  811.  
  812.     /*
  813.      Block re-allocations:
  814.         block => block
  815.         block => page
  816.         page  => block
  817.     */
  818.     if(size <= PageSize || pageAlign <= PageSize) {
  819.         // No block index change
  820.         if((sizeof == 16 || size > (sizeof >> 1)) && size <= sizeof && pageAlign <= PageSize) {
  821.             *pPool = pool;
  822.             return cast(void*)old;
  823.         }
  824.  
  825.         p = Alloc(size, pPool);
  826.         memcpy(p, old, curSize);
  827.         Free(old, pool);
  828.     }
  829.  
  830.     /*
  831.      Page re-allocations:
  832.         page => page
  833.     */
  834.     else {
  835.         // No page count change
  836.         if(size > (pageAlign - PageSize) && size <= pageAlign) {
  837.             *pPool = pool;
  838.             return cast(void*)old;
  839.         }
  840.  
  841.         uint page = FindPage(old, pool);
  842.         uint numPages = pageAlign >> PageShift;
  843.  
  844.         // Adding pages
  845.         if(size > curSize) {
  846.             uint newPages = (size - curSize + PageSize - 2) >> PageShift;
  847.  
  848.             // Try and extend pages in place
  849.             uint i = void, pageIndex = void;
  850.             for(i = numPages, pageIndex = page + i; i < newPages; i++, pageIndex++) {
  851.                 if(pageIndex == pool.ncommitted)
  852.                     if(pool.CommitPages(newPages - i) == uint.max) break;
  853.  
  854.                 if(pool.pageTable[pageIndex] != B_FREE) break;
  855.             }
  856.  
  857.             if(i == newPages) {
  858.                 pool.MarkPages(page, newPages - page, B_PAGEPLUS);
  859.                 *pPool = pool;
  860.  
  861.                 version(MemoryProfile) _used += PageSize * newPages;
  862.             }
  863.             else {
  864.                 p = Alloc(size, pPool);
  865.                 memcpy(p, old, curSize);
  866.  
  867.                 pool.MarkPages(page, numPages, B_FREE);
  868.  
  869.                 version(MemoryProfile) _used -= PageSize * numPages;
  870.             }
  871.         }
  872.  
  873.         // Freeing pages
  874.         else {
  875.             uint freePages = (curSize - size + PageSize - 2) >> PageShift;
  876.             pool.MarkPages(page + numPages - freePages, freePages, B_FREE);
  877.             *pPool = pool;
  878.  
  879.             version(MemoryProfile) _used -= PageSize * freePages;
  880.         }
  881.     }
  882.  
  883.     if(_cache == old && p != old) {
  884.         _cache = null;
  885.         _cacheSize = 0;
  886.     }
  887.  
  888.     return p;
  889. }
  890.  
  891. /**
  892.  Internal memory release.
  893.  
  894.  Params:
  895.     p =     Pointer to the memory that is to be freed.
  896.     pool =  Pointer to the pool containing the memory, can be null.
  897. */
  898. static void Free(in void* p, Pool* pool)
  899. in {
  900.     assert(p);
  901. }
  902. body {
  903.     debug(MemoryInternal) Trace("Memory.Free(p=%p, pool=%p)", p, pool);
  904.  
  905.     debug(MemoryBreakOnAlloc) asm { int 3; }
  906.  
  907.     if(!pool) pool = FindPool(p);
  908.     if(!pool) throw new Error("Memory.Free(): Bad pointer.");
  909.  
  910.     uint page = FindPage(p, pool);
  911.     uint bit = FindBit(p, pool);
  912.  
  913.     pool.hasPointers.Clear(bit);
  914.  
  915.     if(pool.hasFinalizer.nbits && pool.hasFinalizer.TestClear(bit)) {
  916.         debug(MemoryExtendedInfo) Trace("\tCall finalizer");
  917.  
  918.         version(DigitalMars) _d_callfinalizer(cast(Object)cast(void*)p);
  919.         else static assert(0);
  920.     }
  921.  
  922.     BlockIndex blockIndex = pool.pageTable[page];
  923.  
  924.     // Block allocation, back on the freelist
  925.     if(blockIndex < B_PAGE) {
  926.         FreeList* block = cast(FreeList*)p;
  927.         block.next = _freeList[blockIndex];
  928.         block.pool = pool;
  929.         _freeList[blockIndex] = block;
  930.  
  931.         version(MemoryProfile) _used -= BlockIndexSize[blockIndex];
  932.     }
  933.  
  934.     // Page allocation, free pages
  935.     else {
  936.         int numPages = 1;
  937.  
  938.         while(++page != pool.ncommitted && pool.pageTable[page] == B_PAGEPLUS)
  939.             numPages++;
  940.  
  941.         pool.MarkPages(page - numPages, numPages, B_FREE);
  942.  
  943.         version(MemoryProfile) _used -= PageSize * numPages;
  944.     }
  945. }
  946.  
  947. /**
  948.  Initialize array memory
  949.  
  950.  Params:
  951.     ti =    Type of the array's elements
  952.     p =     Pointer to the allocated memory to initialize.
  953.     size =  Length of the memory to initialize.
  954.     init =  How to initialize.
  955. */
  956. static void MemoryInit(in TypeInfo ti, void* p, size_t size, AllocInit init)
  957. in {
  958.     assert(ti);
  959.     assert(p);
  960.     assert(size);
  961. }
  962. body {
  963.     if(init == AllocInit.ZeroFill) {
  964.         memset(p, 0, size);
  965.     }
  966.     else if(init == AllocInit.TypeInfo) {
  967.         size_t length = ti.init.length;
  968.         const void* ptr = ti.init.ptr;
  969.  
  970.         if(!ptr)
  971.             memset(p, 0, size);
  972.         else if(length == 1)
  973.             memset(p, *cast(ubyte*)ptr, size);
  974.         else if(length == uint.sizeof)
  975.             foreach(i; 0 .. size / uint.sizeof)
  976.                 (cast(size_t*)p)[i] = *cast(size_t*)ptr;
  977.         else
  978.             for(size_t i = 0; i < size; i += length)
  979.                 memcpy(p + i, ptr, length);
  980.     }
  981. }
  982.  
  983. /**
  984.  Finds the block index for a given allocation size
  985.  
  986.  Params:
  987.     size =  The allocation size
  988.  
  989.  Returns:
  990.     Index to the smallest block that will contain allocation.
  991. */
  992. static BlockIndex FindBlockIndex(size_t size)
  993. in {
  994.     assert(size);
  995. }
  996. out(i) {
  997.     assert(i >= 0 && i <= B_PAGE);
  998. }
  999. body {
  1000.     static __gshared size_t sizeCache;
  1001.     static __gshared BlockIndex cache;
  1002.  
  1003.     if(size == sizeCache) return cache;
  1004.  
  1005.     sizeCache = size;
  1006.     if(size <= 64) {
  1007.         if(size <= 16) return cache = B_16;
  1008.         if(size <= 32) return cache = B_32;
  1009.         else return cache = B_64;
  1010.     }
  1011.     else if(size <= 256) {
  1012.         if(size <= 128) return cache = B_128;
  1013.         else return cache = B_256;
  1014.     }
  1015.     else if(size <= 1024) {
  1016.         if(size <= 512) return cache = B_512;
  1017.         return cache = B_1024;
  1018.     }
  1019.     else {
  1020.         if(size <= 2048) return cache = B_2048;
  1021.         if(size <= 4096) return cache = B_4096;
  1022.     }
  1023.  
  1024.     return cache = B_PAGE;
  1025. }
  1026.  
  1027. /**
  1028.  Find the memory pool containing pointer p
  1029.  
  1030.  Params:
  1031.     p = Pointer to allocated memory.
  1032.  
  1033.  Returns:
  1034.     Pointer to the pool containing the memory.
  1035.     Returns null if not found.
  1036. */
  1037. static Pool* FindPool(in void* p) {
  1038.     return p >= _minAddr && p < _maxAddr ?
  1039.         (_nPools == 1 ? _pools[0] : BinarySearch(p, 0, _nPools)) :
  1040.         null;
  1041. }
  1042.  
  1043. /**
  1044.  Specialized binary search for FindPool()
  1045.  TODO!!! why on earth did I use a recursive search instead of an iterative one here?
  1046. */
  1047. Pool* BinarySearch(in void* p, uint low, uint high) {
  1048.     if(high < low) return null;
  1049.  
  1050.     uint mid = low + ((high - low) / 2);
  1051.  
  1052.     Pool* pool = _pools[mid];
  1053.     if(p >= pool.baseAddr && p < pool.topAddr) return pool;
  1054.  
  1055.     return pool.baseAddr > p ? BinarySearch(p, low, mid - 1) : BinarySearch(p, mid + 1, high);
  1056. }
  1057.  
  1058. /**
  1059.  Find the memory page containing pointer p inside the given pool
  1060.  
  1061.  Params:
  1062.     p =     Pointer to the allocation we want the page of.
  1063.     pool =  Pool containing the pointer.
  1064.  
  1065.  Return:
  1066.     The page number is returned.
  1067. */
  1068. static uint FindPage(in void* p, in Pool* pool) {
  1069.     return cast(uint)((p - pool.baseAddr) >> PageShift);
  1070. }
  1071.  
  1072. /**
  1073.  Find the bit index of pointer p inside the given pool
  1074. */
  1075. static uint FindBit(in void* p, in Pool* pool) {
  1076.     return cast(uint)((p - pool.baseAddr) >> PtrShift);
  1077. }
  1078.  
  1079. /**
  1080.  Find the block size of an allocation
  1081. */
  1082. static size_t FindAllocSize(in void* p, Pool* pool = null) {
  1083.     if(!pool) pool = FindPool(p);
  1084.     if(!pool) throw new Error("FindAllocSize: Bad Pointer.");
  1085.  
  1086.     uint page = FindPage(p, pool);
  1087.     BlockIndex blockIndex = pool.pageTable[page];
  1088.  
  1089.     // Block size
  1090.     if(blockIndex < B_PAGE)
  1091.         return BlockIndexSize[blockIndex];
  1092.  
  1093.     // Page size
  1094.     else foreach(i; page + 1 .. pool.ncommitted)
  1095.         if(pool.pageTable[i] != B_PAGEPLUS)
  1096.             return (i - page) * PageSize;
  1097.  
  1098.     return (pool.ncommitted - page) * PageSize;
  1099. }
  1100.  
  1101. /**
  1102.  Allocate a new memory pool
  1103. */
  1104. static Pool* AllocPool(uint pages = 1) {
  1105.     debug(MemoryInternal) Trace("Memory.AllocPool(pages=%u)", pages);
  1106.  
  1107.     debug(MemoryBreakOnAllocPool) asm { int 3; }
  1108.  
  1109.     pages = (pages + (CommitSize / PageSize) - 1) & ~(CommitSize / PageSize - 1);
  1110.  
  1111.     if(pages < PoolSize / PageSize) {
  1112.         pages = PoolSize / PageSize;
  1113.     }
  1114.     else {
  1115.         uint n = pages + (pages >> 1);
  1116.         if(n < size_t.max / PageSize) pages = n;
  1117.     }
  1118.  
  1119.     if(_nPools) {
  1120.         uint n = (_nPools > 8 ? 8 : _nPools) * PoolSize / PageSize;
  1121.         if(pages < n) pages = n;
  1122.     }
  1123.  
  1124.     debug(MemoryExtendedInfo) Trace("\tCreating with %u pages", pages);
  1125.  
  1126.     Pool* pool = cast(Pool*)calloc(1, Pool.sizeof);
  1127.     if(!pool) return null;
  1128.  
  1129.     pool.Alloc(pages);
  1130.     if(!pool.baseAddr) goto Error;
  1131.  
  1132.     if(_nPools == _poolsLength) {
  1133.         _poolsLength = _poolsLength ? _poolsLength * 2 : 16;
  1134.         _pools = cast(Pool**)realloc(_pools, _poolsLength * (Pool*).sizeof);
  1135.  
  1136.         if(!_pools) goto Error;
  1137.     }
  1138.  
  1139.     uint i;
  1140.     for(; i != _nPools; i++) if(pool.topAddr <= _pools[i].baseAddr) break;
  1141.  
  1142.     memmove(_pools + i + 1, _pools + i, (_nPools - i) * (Pool*).sizeof);
  1143.  
  1144.     _pools[i] = pool;
  1145.     _nPools++;
  1146.  
  1147.     _minAddr = _pools[0].baseAddr;
  1148.     _maxAddr = _pools[_nPools - 1].topAddr;
  1149.  
  1150.     return pool;
  1151.  
  1152. Error:
  1153.     debug(MemoryExtendedInfo) Trace("\tFailed!");
  1154.  
  1155.     pool.Free();
  1156.     free(pool);
  1157.     return null;
  1158. }
  1159.  
  1160. /+ TODO!!! threading support on hold until 'shared' gets decent semantics
  1161.  
  1162.     if(!IsRuntimeShutdown && RuntimeThread.isMultiThreaded) {
  1163.         // We require exclusive execution while we scan threads,
  1164.         // once the stacks are all marked, we can resume the threads and finish collecting
  1165.         RuntimeThread.PauseAll();
  1166.         scope(exit) RuntimeThread.ResumeAll();
  1167.  
  1168.         version(Windows) CONTEXT context;
  1169.  
  1170.         foreach(t; RuntimeThread) {
  1171.             version(Windows) {
  1172.                 context.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL;
  1173.                 if(!.GetThreadContext(t.handle, &context)) SystemException();
  1174.  
  1175.                 MarkRangePointers(cast(void*)context.Esp, t.stackBottom);
  1176.                 MarkRangePointers(&context.Edi, &context.Eip);
  1177.             }
  1178.             else version(Posix) {
  1179.                 assert(0);
  1180.                 // The registers are already stored on the stack
  1181.                 //if(t.isSelf()) t.stackTop = Thread.getESP();
  1182.  
  1183.                 //version (STACKGROWSDOWN) mark(t.stackTop, t.stackBottom);
  1184.                 //else mark(t.stackBottom, t.stackTop);
  1185.             }
  1186.             else static assert(0);
  1187.         }
  1188.     }
  1189.  
  1190.     // Only running a single thread, easy
  1191.     else {
  1192.         RuntimeThread current = RuntimeThread.current;
  1193.         version(StackGrowsDown) MarkRangePointers(current.stackTop, current.stackBottom);
  1194.         else MarkRangePointers(current.stackBottom, current.stackTop);
  1195.     }
  1196. +/
  1197.  
  1198. /**
  1199.  Wrapper used to ensure the registers are also part of the root set.
  1200. */
  1201. static size_t CollectGarbage(bool recursive = false) {
  1202.     debug(MemoryInternal) Trace("CollectGarbage(recursive=%b)", recursive & 1);
  1203.  
  1204.     debug(MemoryBreakOnCollection) asm { int 3; }
  1205.  
  1206.     const(void)* stackTop = void;
  1207.  
  1208.     version(X86) asm {
  1209.         pushad;
  1210.         mov stackTop[EBP], ESP;
  1211.     }
  1212.     else static assert(0);
  1213.  
  1214.     assert(stackTop);
  1215.  
  1216.     size_t free = recursive ?
  1217.         DoCollectGarbage!true(stackTop) : DoCollectGarbage!false(stackTop);
  1218.  
  1219.     debug(MemoryExtendedInfo) Trace("\t%u bytes free", free);
  1220.  
  1221.     version(MemoryProfile) _used -= _total - free;
  1222.  
  1223.     version(X86) asm {
  1224.         popad;
  1225.     }
  1226.     else static assert(0);
  1227.  
  1228.     return free;
  1229. }
  1230.  
  1231. /**
  1232.  Scans the known roots, usually the static data segment and the threads' stacks,
  1233.  for reachable objects in the working set, and releases the unreachable ones.
  1234.  
  1235.  Params:
  1236.     recursive =     Whether to run the collector until no more memory can be freed.
  1237.     stackTop =      The top of the calling thread's stack.
  1238.     initialFree =   Only used if recursive is true to determine whether to recurse or not.
  1239.  
  1240.  Returns:
  1241.     The total amount of free memory in the working set is returned.
  1242. */
  1243. static size_t DoCollectGarbage(bool recursive)(in void* stackTop, size_t initialFree = 0) {
  1244.     debug(MemoryInternal) Trace("Memory.DoCollectGarbage!%.*s(stackTop=%p, initialFree=%u)",
  1245.         recursive ? "true" : "false", stackTop, initialFree);
  1246.  
  1247.     const(void)* p = void, top = void;
  1248.     BlockIndex blockIndex = void;
  1249.     uint page = void, blockSize = void, bit = void, stride = void, maxBit = void;
  1250.     uint freePages = void;
  1251.     bool hasFinalizers = void;
  1252.     FreeList* block = void;
  1253.     size_t free;
  1254.  
  1255.     _freeList[] = null;
  1256.     _cache = null;
  1257.     _cacheSize = 0;
  1258.  
  1259.     enum InitPage = q{
  1260.         blockIndex = pool.pageTable[page];
  1261.         bit = page * (PageSize >> PtrShift);
  1262.     };
  1263.     enum InitBlock = q{
  1264.         blockSize = BlockIndexSize[blockIndex];
  1265.         stride = blockSize >> PtrShift;
  1266.         p = pool.baseAddr + page * PageSize;
  1267.         top = p + PageSize;
  1268.     };
  1269.  
  1270.     foreach(pool; _pools[0 .. _nPools]) pool.mark.Zero();
  1271.  
  1272.     // Scan the roots and working set for reachable objects.
  1273.     foreach(range; _ranges[0 .. _nRanges])
  1274.         MarkRangePointers(range.bottom, range.top);
  1275.  
  1276.     MarkRangePointers(stackTop, STACKBOTTOM); // TODO!!! temporary hack until 'shared' works
  1277.  
  1278.     foreach(pool; _pools[0 .. _nPools]) {
  1279.         for(page = 0; page != pool.ncommitted; page++) {
  1280.             mixin(InitPage);
  1281.  
  1282.             if(blockIndex < B_PAGE) {
  1283.                 mixin(InitBlock);
  1284.  
  1285.                 for(; p != top; p += blockSize, bit += stride)
  1286.                     if(pool.hasPointers.Test(bit))
  1287.                         MarkRangePointers(p, p + blockSize);
  1288.             }
  1289.             else if(blockIndex == B_PAGE) {
  1290.                 if(!pool.hasPointers.Test(bit)) continue;
  1291.    
  1292.                 p = pool.baseAddr + page * PageSize;
  1293.                 top = p + PageSize;
  1294.  
  1295.                 while(page + 1 != pool.ncommitted && pool.pageTable[page + 1] == B_PAGEPLUS) {
  1296.                     page++;
  1297.                     top += PageSize;
  1298.                 }
  1299.  
  1300.                 MarkRangePointers(p, top);
  1301.             }
  1302.         }
  1303.     }
  1304.  
  1305.     enum BuildFreeList = q{
  1306.         blockSize = BlockIndexSize[blockIndex];
  1307.         bit = page * (PageSize >> PtrShift);
  1308.         p = pool.baseAddr + page * PageSize;
  1309.         top = p + PageSize;
  1310.  
  1311.         for(; p != top; p += blockSize, bit += stride) {
  1312.             if(pool.mark.Test(bit)) continue;
  1313.  
  1314.             block = cast(FreeList*)p;
  1315.             block.next = _freeList[blockIndex];
  1316.             block.pool = pool;
  1317.  
  1318.             _freeList[blockIndex] = block;
  1319.         }
  1320.     };
  1321.     enum FreePool = q{
  1322.         uint i;
  1323.         for(; i != _nPools; i++) if(pool.topAddr <= _pools[i].topAddr) break;
  1324.  
  1325.         memmove(_pools + i, _pools + i + 1, (_nPools - i - 1) * (Pool*).sizeof);
  1326.  
  1327.         pool.Free();
  1328.         _nPools--;
  1329.     };
  1330.  
  1331.     // Sweep the working set and finalize every unreachable objects. When
  1332.     // recursive is false the freelist is also rebuilt here.
  1333.     foreach(pool; _pools[0 .. _nPools]) {
  1334.         static if(!recursive) freePages = 0;
  1335.  
  1336.         for(page = 0; page != pool.ncommitted; page++) {
  1337.             mixin(InitPage);
  1338.  
  1339.             hasFinalizers = cast(bool)pool.hasFinalizer.nbits;
  1340.  
  1341.             if(blockIndex < B_PAGE) {
  1342.                 mixin(InitBlock);
  1343.  
  1344.                 static if(!recursive) maxBit = 0;
  1345.  
  1346.                 for(; p != top; p += blockSize, bit += stride) {
  1347.                     if(pool.mark.Test(bit)) {
  1348.                         static if(!recursive) if(!maxBit) maxBit = bit;
  1349.                         continue;
  1350.                     }
  1351.  
  1352.                     pool.hasPointers.Clear(bit);
  1353.  
  1354.                     if(hasFinalizers && pool.hasFinalizer.TestClear(bit))
  1355.                         _d_callfinalizer(cast(Object)p);
  1356.  
  1357.                     free += blockSize;
  1358.                 }
  1359.  
  1360.                 static if(!recursive) {
  1361.                     if(maxBit == bit + (PageSize >> PtrShift)) {
  1362.                         pool.pageTable[page] = B_FREE;
  1363.                         freePages++;
  1364.                     }
  1365.                     else {
  1366.                         mixin(BuildFreeList);
  1367.                     }
  1368.  
  1369.                     continue;
  1370.                 }
  1371.             }
  1372.             else if(blockIndex == B_PAGE) {
  1373.                 if(pool.mark.Test(bit)) continue;
  1374.  
  1375.                 p = pool.baseAddr + page * PageSize;
  1376.                 pool.hasPointers.Clear(bit);
  1377.  
  1378.                 if(hasFinalizers && pool.hasFinalizer.TestClear(bit))
  1379.                     _d_callfinalizer(cast(Object)p);
  1380.  
  1381.                 do {
  1382.                     pool.pageTable[page] = B_FREE;
  1383.                     free += PageSize;
  1384.                     freePages++;
  1385.                 } while(++page != pool.ncommitted && pool.pageTable[page] == B_PAGEPLUS);
  1386.  
  1387.                 static if(!recursive) continue;
  1388.             }
  1389.  
  1390.             static if(!recursive) if(blockIndex == B_FREE) freePages++;
  1391.         }
  1392.  
  1393.         static if(!recursive) if(freePages == pool.ncommitted) {
  1394.             mixin(FreePool);
  1395.         }
  1396.     }
  1397.  
  1398.     // Run the collector recursively until it does not free anymore memory, the
  1399.     // freelist is rebuilt on the last iteration.
  1400.     static if(recursive) {
  1401.         if(free != initialFree) return DoCollectGarbage!true(stackTop, free);
  1402.  
  1403.         foreach(pool; _pools[0 .. _nPools]) {
  1404.             freePages = 0;
  1405.  
  1406.             for(page = 0; page != pool.ncommitted; page++) {
  1407.                 blockIndex = pool.pageTable[page];
  1408.  
  1409.                 if(blockIndex >= B_PAGE) {
  1410.                     freePages++;
  1411.                     continue;
  1412.                 }
  1413.  
  1414.                 bit = page * (PageSize >> PtrShift);
  1415.                 maxBit = bit + (PageSize >> PtrShift);
  1416.                 stride = blockSize >> PtrShift;
  1417.  
  1418.                 for(; bit != maxBit; bit += stride)
  1419.                     if(pool.mark.Test(bit)) break;
  1420.  
  1421.                 if(bit == maxBit) {
  1422.                     pool.pageTable[page] = B_FREE;
  1423.                     freePages++;
  1424.                     continue;
  1425.                 }
  1426.  
  1427.                 mixin(BuildFreeList);
  1428.             }
  1429.  
  1430.             if(freePages == pool.ncommitted) {
  1431.                 mixin(FreePool);
  1432.             }
  1433.         }
  1434.     }
  1435.  
  1436.     return free;
  1437. }
  1438.  
  1439. /**
  1440.  Mark all pointers to valid allocations within given memory range
  1441.     Marked allocations are not freed.
  1442. */
  1443. static void MarkRangePointers(in void* bottom, in void* top) {
  1444.     void** pBottom = cast(void**)bottom;
  1445.     void** pTop = cast(void**)top;
  1446.  
  1447.     Pool* pool = void;
  1448.     void* p = void;
  1449.     uint bit = void;
  1450.     uint page = void;
  1451.     BlockIndex blockIndex = void;
  1452.  
  1453.     for(; pBottom < pTop; pBottom++) {
  1454.         p = *pBottom;
  1455.  
  1456.         if(p >= _minAddr && p <= _maxAddr && (pool = FindPool(p)) != null) {
  1457.             page = FindPage(p, pool);
  1458.             blockIndex = pool.pageTable[page];
  1459.  
  1460.             if(blockIndex < B_PAGE) {
  1461.                 bit = FindBit(cast(void*)(cast(size_t)p & ~(BlockIndexSize[blockIndex] - 1)), pool);
  1462.             }
  1463.             else if(blockIndex == B_PAGE) {
  1464.                 bit = page * (PageSize >> PtrShift);
  1465.             }
  1466.             else if(blockIndex == B_PAGEPLUS) {
  1467.                 while(pool.pageTable[--page] == B_PAGEPLUS) {}
  1468.                 bit = page * (PageSize >> PtrShift);
  1469.             }
  1470.             else {
  1471.                 continue;
  1472.             }
  1473.  
  1474.             pool.mark.Set(bit);
  1475.         }
  1476.     }
  1477. }
  1478.  
  1479. // TODO: temporary hack until threading gets back
  1480. static void* STACKBOTTOM() {
  1481.     version(X86) asm {
  1482.         naked;
  1483.         mov EAX, FS:4;
  1484.         ret;
  1485.     }
  1486.     else static assert(0);
  1487. }
  1488.  
  1489. /**
  1490.  Internal memory pool
  1491. */
  1492. struct Pool {
  1493.     void* baseAddr;         /// The base address of the pool's memory
  1494.     void* topAddr;          /// The top address of the pool's memory
  1495.  
  1496.     Bits mark;              /// Marked allocations during collection, these are still in use
  1497.     Bits hasFinalizer;      /// Allocations which need to be finalized before they are freed
  1498.     Bits hasPointers;       /// Allocations not containing memory pointers, not scanned
  1499.  
  1500.     uint npages;            /// Total number of memory pages within this pool
  1501.     uint ncommitted;        /// Number of memory pages currently committed
  1502.     BlockIndex* pageTable;  /// Lookup table for the used block index of every page
  1503.  
  1504.     /**
  1505.      Allocate a new pool
  1506.  
  1507.      Params:
  1508.         npages =    The number of pages to allocate in the pool.
  1509.  
  1510.      Returns:
  1511.         $(B true) on success, $(B false) otherwise.
  1512.     */
  1513.     bool Alloc(uint npages) {
  1514.         debug(MemoryPool) Trace("Memory.Pool[%p].Alloc(npages=%u)", &this, npages);
  1515.  
  1516.         size_t poolSize = npages * PageSize;
  1517.         assert(poolSize >= PoolSize);
  1518.  
  1519.         version(Windows) {
  1520.             baseAddr = VirtualAlloc(null, poolSize, MEM_RESERVE, PAGE_READWRITE);
  1521.             if(!baseAddr) OutOfMemoryError();
  1522.         }
  1523.         else version(Posix) {
  1524.             baseAddr = mmap(null, poolSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
  1525.             if(baseAddr == MAP_FAILED) OutOfMemoryError();
  1526.         }
  1527.         else static assert(0);
  1528.  
  1529.         assert((cast(uint)baseAddr & (PageSize - 1)) == 0);
  1530.  
  1531.         if(!baseAddr) return false;
  1532.  
  1533.         topAddr = baseAddr + poolSize;
  1534.  
  1535.         mark.Alloc(poolSize / 16);
  1536.         hasPointers.Alloc(poolSize / 16);
  1537.  
  1538.         pageTable = cast(BlockIndex*)malloc(npages);
  1539.         if(!pageTable) OutOfMemoryError();
  1540.         memset(pageTable, B_UNCOMMITTED, npages);
  1541.  
  1542.         this.npages = npages;
  1543.         ncommitted = 0;
  1544.  
  1545.         version(MemoryProfile) _total += poolSize;
  1546.  
  1547.         return true;
  1548.     }
  1549.  
  1550.     /**
  1551.      Free the pool
  1552.     */
  1553.     void Free() {
  1554.         debug(MemoryPool) Trace("Memory.Pool[%p].Free()", &this);
  1555.  
  1556.         version(MemoryProfile) _total -= PageSize * npages;
  1557.  
  1558.         if(baseAddr) {
  1559.             bool result;
  1560.  
  1561.             if(npages) {
  1562.                 if(ncommitted) {
  1563.                     version(Windows) result = cast(bool)VirtualFree(baseAddr, ncommitted * PageSize, MEM_DECOMMIT);
  1564.                     else version(Posix) { /* nothing to do */ }
  1565.                     else static assert(0);
  1566.    
  1567.                     assert(result);
  1568.                     ncommitted = 0;
  1569.                 }
  1570.  
  1571.                 version(Windows) result = cast(bool)VirtualFree(baseAddr, 0, MEM_RELEASE);
  1572.                 else version(Posix) result = cast(bool)munmap(baseAddr, 0);
  1573.                 else static assert(0);
  1574.  
  1575.                 assert(result);
  1576.                 npages = 0;
  1577.             }
  1578.  
  1579.             baseAddr = null;
  1580.             topAddr = null;
  1581.         }
  1582.  
  1583.         if(pageTable) free(pageTable);
  1584.  
  1585.         mark.Free();
  1586.         hasFinalizer.Free();
  1587.         hasPointers.Free();
  1588.     }
  1589.  
  1590.     /**
  1591.      Allocate memory from available pool pages.
  1592.  
  1593.      Params:
  1594.         npages =    The number of pages for the allocation.
  1595.  
  1596.      Returns:
  1597.         The index of the first page is returned, or uint.max if the pool do
  1598.         not contain enough free pages for the allocation.
  1599.     */
  1600.     uint AllocPages(uint npages = 1) {
  1601.         debug(MemoryPool) Trace("Memory.Pool[%p].AllocPages(npages=%u)", &this, npages);
  1602.  
  1603.         for(uint i, j; i != ncommitted; i++) {
  1604.             if(pageTable[i] != B_FREE) {
  1605.                 j = 0;
  1606.                 continue;
  1607.             }
  1608.  
  1609.             if(++j == npages) return i - j + 1;
  1610.         }
  1611.  
  1612.         return CommitPages(npages);
  1613.     }
  1614.  
  1615.     /**
  1616.      Commit memory pages from the pool.
  1617.  
  1618.      Params:
  1619.         npages =    The number of pages to commit.
  1620.  
  1621.      Returns:
  1622.         The index of the first newly committed page is returned, or uint.max if
  1623.         the pool does not have enough uncommitted pages left.
  1624.     */
  1625.     uint CommitPages(uint npages) {
  1626.         debug(MemoryPool) Trace("Memory.Pool[%p].CommitPages(npages=%u)", &this, npages);
  1627.  
  1628.         if(ncommitted + npages <= this.npages) {
  1629.             uint commit = (npages + (CommitSize / PageSize) - 1) & ~(CommitSize / PageSize - 1);
  1630.  
  1631.             if(ncommitted + commit > this.npages) return uint.max;
  1632.  
  1633.             version(Windows)
  1634.                 bool result = cast(bool)VirtualAlloc(baseAddr + ncommitted * PageSize,
  1635.                     commit * PageSize, MEM_COMMIT, PAGE_READWRITE);
  1636.             else version(Posix)
  1637.                 enum result = true;
  1638.             else
  1639.                 static assert(0);
  1640.  
  1641.             if(result) {
  1642.                 memset(pageTable + ncommitted, B_FREE, commit);
  1643.  
  1644.                 uint i = ncommitted;
  1645.                 ncommitted += commit;
  1646.  
  1647.                 while(i && pageTable[i - 1] == B_FREE) i--;
  1648.                 return i;
  1649.             }
  1650.         }
  1651.  
  1652.         return uint.max;
  1653.     }
  1654.  
  1655.     /**
  1656.      Mark a set of pages with the given blockindex.
  1657.  
  1658.      Params:
  1659.         page =          The first page to mark
  1660.         npages =        The number of pages to mark
  1661.         blockIndex =    The block index to mark pages with
  1662.     */
  1663.     void MarkPages(uint page, uint npages, uint blockIndex) {
  1664.         memset(&pageTable[page], blockIndex, npages);
  1665.     }
  1666.  
  1667.     /**
  1668.      Comparison operator to sort the pool table.
  1669.     */
  1670.     int opCmp(in Pool* pool) const {
  1671.         if(baseAddr < pool.baseAddr) return -1;
  1672.         else return cast(int)(baseAddr > pool.baseAddr);
  1673.     }
  1674. } // Pool
  1675.  
  1676. /**
  1677.  Internal bit array
  1678. */
  1679. struct Bits {
  1680.     static if(size_t.sizeof == 4) enum {
  1681.         BITS_PER_WORD   = 32,
  1682.         BITS_SHIFT      = 5,
  1683.         BITS_MASK       = 31,
  1684.         EXTRA_WORDS     = 2
  1685.     }
  1686.     else static if(size_t.sizeof == 8) enum {
  1687.         BITS_PER_WORD   = 64,
  1688.         BITS_SHIFT      = 6,
  1689.         BITS_MASK       = 63,
  1690.         EXTRA_WORDS     = 4
  1691.     }
  1692.     else static assert(0);
  1693.  
  1694.     size_t* _data;  /// The bits data
  1695.     uint    nwords; /// Number of data words
  1696.     uint    nbits;  /// Number of data bits
  1697.  
  1698.     invariant() {
  1699.         assert(!_data || nwords * size_t.sizeof * 8 >= nbits);
  1700.     }
  1701.  
  1702.     /**
  1703.      Get the pointer to the data bits
  1704.     */
  1705.     size_t* data()
  1706.     in {
  1707.         assert(_data);
  1708.     }
  1709.     body {
  1710.         return _data + 1;
  1711.     }
  1712.  
  1713.     /**
  1714.      Allocate bits
  1715.     */
  1716.     void Alloc(uint nbits)
  1717.     in {
  1718.         assert(!_data);
  1719.     }
  1720.     body {
  1721.         this.nbits = nbits;
  1722.         nwords = (nbits + BITS_MASK) >> BITS_SHIFT;
  1723.         _data = cast(size_t*)calloc(nwords + EXTRA_WORDS, size_t.sizeof);
  1724.         if(!_data) OutOfMemoryError();
  1725.     }
  1726.  
  1727.     /**
  1728.      Free bits
  1729.     */
  1730.     void Free() {
  1731.         if(_data) {
  1732.             free(_data);
  1733.             _data = null;
  1734.         }
  1735.     }
  1736.  
  1737.     /**
  1738.      Zero bits
  1739.     */
  1740.     void Zero() {
  1741.         memset(data, 0, nwords * size_t.sizeof);
  1742.     }
  1743.  
  1744.     /**
  1745.      Copy bits from another array
  1746.     */
  1747.     void Copy(Bits* f)
  1748.     in {
  1749.         assert(nwords == f.nwords);
  1750.     }
  1751.     body {
  1752.         memcpy(data, f.data, nwords * size_t.sizeof);
  1753.     }
  1754.  
  1755.     /**
  1756.      Set bit
  1757.     */
  1758.     void Set(uint i)
  1759.     in {
  1760.         assert(i < nbits);
  1761.     }
  1762.     body {
  1763.         data[i >> BITS_SHIFT] |= (1 << (i & BITS_MASK));
  1764.     }
  1765.  
  1766.     /**
  1767.      Clear bit
  1768.     */
  1769.     void Clear(uint i)
  1770.     in {
  1771.         assert(i < nbits);
  1772.     }
  1773.     body {
  1774.         data[i >> BITS_SHIFT] &= ~(1 << (i & BITS_MASK));
  1775.     }
  1776.  
  1777.     /**
  1778.      Test bit
  1779.     */
  1780.     uint Test(uint i)
  1781.     in {
  1782.         assert(i < nbits);
  1783.     }
  1784.     body {
  1785.         return data[i >> BITS_SHIFT] & (1 << (i & BITS_MASK));
  1786.     }
  1787.  
  1788.     /**
  1789.      Test & Clear bit
  1790.     */
  1791.     uint TestClear(uint i)
  1792.     in {
  1793.         assert(i < nbits);
  1794.     }
  1795.     body {
  1796.         version(DigitalMars) {
  1797.             return btr(data, i);
  1798.         }
  1799.         else version(X86) {
  1800.             asm {
  1801.                 naked;
  1802.                 mov     EAX, _data[EAX];
  1803.                 mov     ECX, i-4[ESP];
  1804.                 btr     4[EAX], ECX;
  1805.                 sbb     EAX, EAX;
  1806.                 ret     4;
  1807.             }
  1808.         }
  1809.         else {
  1810.             uint result;
  1811.             uint *p = &data[i >> BITS_SHIFT];
  1812.             uint mask = (1 << (i & BITS_MASK));
  1813.             result = *p & mask;
  1814.             *p &= ~mask;
  1815.             return result;
  1816.         }
  1817.     }
  1818. } // Bits
  1819.  
  1820. } // Memory
  1821.  
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement