Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- /**
- Runtime Memory Management and Garbage Collection
- Authors:
- Walter Bright
- David Friedman
- Sean Kelly
- Jeremie Pelletier
- References:
- $(LINK http://en.wikipedia.org/wiki/Garbage_collection_%28computer_science%29)
- $(LINK http://www.iecc.com/gclist/GC-algorithms.html)
- License:
- Copyright (C) 2000-2006 by Digital Mars, www.digitalmars.com
- Written by Walter Bright
- This software is provided 'as-is', without any express or implied
- warranty. In no event will the authors be held liable for any damages
- arising from the use of this software.
- Permission is granted to anyone to use this software for any purpose,
- including commercial applications, and to alter it and redistribute it
- freely, in both source and binary form, subject to the following
- restrictions:
- o The origin of this software must not be misrepresented; you must not
- claim that you wrote the original software. If you use this software
- in a product, an acknowledgment in the product documentation would be
- appreciated but is not required.
- o Altered source versions must be plainly marked as such, and must not
- be misrepresented as being the original software.
- o This notice may not be removed or altered from any source distribution.
- */
- module dlib.Memory;
- /*debug = Memory;
- debug = MemoryInternal;
- debug = MemoryExtendedInfo;
- debug = MemoryPool;*/
- //debug = MemoryBreakOnAlloc;
- //debug = MemoryBreakOnAllocPool;
- //debug = MemoryBreakOnCollection;
- version(X86) {
- version = StackGrowsDown;
- enum PageSize = 0x1000;
- }
- else static assert(0, "Unsupported architecture.");
- //version = MemoryProfile;
- import std.c.stdlib : calloc, malloc, realloc, free;
- import std.c.string : memset, memcpy, memmove;
- version(Windows) {
- import sys.windows.Memory;
- }
- else version(Posix) {
- import sys.posix.mman;
- import sys.posix.pthread;
- }
- else static assert(0, "Unsupported platform.");
- import dlib.Main;
- import dlib.Monitor;
- // TODO!!! threading disabled until 'shared' gets decent semantics
- //import dlib.Thread;
- version(DigitalMars) {
- import std.intrinsic;
- import dlib.dmd.Memory;
- }
- else static assert(0, "Unsupported compiler.");
- debug(MemoryExtendedInfo) enum {
- TracePool = q{Trace("\tpool=%p", pool);},
- TraceHasPointers = q{Trace("\tHas Pointers");}
- }
- else enum {
- TracePool = "",
- TraceHasPointers = ""
- }
- // The memory module is the first initialized by the runtime, at this point
- // the monitor module, depending on this one, has not yet initialized.
- // This monitor is therefore allocated in the static data segment, and assigned
- // to Memory.classinfo.__monitor during initialization.
- private __gshared Monitor _memoryMonitor;
- version(Windows) {
- extern(C): // From DigitalMars' C runtime
- extern invariant void* _xi_a; // &_xi_a just happens to be start of data segment
- extern invariant void* _edata; // &_edata is start of BSS segment
- extern invariant void* _end; // &_end is past end of BSS
- }
- else version(Posix) {
- extern(C): // From glibc runtime
- extern invariant void* __data_start;
- extern invariant void* _end;
- }
- else static assert(0);
- /**
- Memory initialization method
- */
- enum AllocInit {
- ZeroFill, /// Fill memory with zeros
- TypeInfo, /// Initialize memory from TypeInfo.init
- None /// Don't initialize memory
- }
- /**
- The garbage collector automatic collection strategy.
- */
- enum CollectionStrategy {
- Aggressive, /// Favor collections to new pools
- Lazy /// Favor new pools to collections
- }
- /**
- Internal memory manager initialization
- */
- package void MemoryInit() { with(Memory) {
- Memory.classinfo.__monitor = &_memoryMonitor;
- version(Windows) {
- MutexInit(&_memoryMonitor.mutex);
- AddRange(&_xi_a, &_end);
- }
- else version(Posix) {
- pthread_mutex_init(&_memoryMonitor.mutex, null);
- AddRange(&__data_start, &_end);
- }
- else static assert(0);
- } }
- /**
- Internal memory manager destruction
- */
- package void MemoryDestroy() { with(Memory) {
- _nRanges = 0;
- RunCollector(true);
- free(_ranges);
- foreach(i; 0 .. _nPools) free(_pools[i]);
- free(_pools);
- MutexDestroy(&_memoryMonitor.mutex);
- } }
- /**
- TODO: find a way to use multiple Memory instances, so for example threads can
- be optionally created with their own memory manager to handle unshared data.
- This could also be used to implement short lived heaps where you can dump
- the entire pool at once without affecting the rest of the program.
- These Memory instances should not have pointers into other memory instances,
- but may have pointers into the shared Memory. The shared memory may only have
- pointers into itself.
- */
- final abstract class Memory {
- // ----------------------------------------------------------------------------
- // P u b l i c I n t e r f a c e
- // ----------------------------------------------------------------------------
- /**
- Allocates memory for an object described by the given class info. The
- allocated memory is always initialized using the classinfo initializer. The
- object's constructor is not called here!
- Note:
- It is recommended to use "new ClassName" whenever possible, which will end up
- here with the proper classinfo, and then call the appropriate constructor
- automatically.
- Params:
- ci = The classInfo for the object to instanciate.
- Returns:
- A reference to the allocated object is returned.
- */
- static shared Object Alloc(in ClassInfo ci)
- in {
- assert(ci && ci.init);
- }
- out(o) {
- assert(o);
- }
- body {
- debug(Memory) Trace("Memory.Alloc(ci=%p) %.*s", ci, ci.name);
- void* p = void;
- if(ci.flags & ClassInfo.COM) {
- debug(MemoryExtendedInfo) Trace("\tCOM Object");
- p = malloc(ci.init.length);
- if(!p) OutOfMemoryError();
- }
- else synchronized(Memory.classinfo) {
- Pool* pool = void;
- p = Alloc(ci.init.length, &pool);
- mixin(TracePool);
- uint bit = FindBit(p, pool);
- if(!pool.hasFinalizer.nbits)
- pool.hasFinalizer.Alloc(pool.hasPointers.nbits);
- pool.hasFinalizer.Set(bit);
- if(!(ci.flags & ClassInfo.NoPointers)) {
- mixin(TraceHasPointers);
- pool.hasPointers.Set(bit);
- }
- }
- memcpy(p, ci.init.ptr, ci.init.length);
- return cast(Object)p;
- }
- /**
- Allocates memory for an array described by the given TypeInfo.
- Note:
- It is recommended to Use "new mytype[mylength]" whenever possible, which
- will end up here with all the proper parameters.
- Params:
- ti = Type of the array's elements.
- length = Number of elements to allocate.
- init = How to initialize memory.
- size = If nonzero, overrides the typeinfo size.
- Returns:
- A pointer to the allocated array memory is returned.
- */
- static shared void* Alloc(in TypeInfo ti, size_t length = 1,
- AllocInit init = AllocInit.ZeroFill, size_t size = 0)
- in {
- assert(ti);
- assert(length);
- assert(size || ti.size);
- }
- out(p) {
- assert(p);
- }
- body {
- debug(Memory) Trace("Memory.Alloc(ti=%p, length=%u, init=%u, size=%u)",
- ti, length, init, size);
- void* p = void;
- size = !size ? ti.size * length : size * length;
- synchronized(Memory.classinfo) {
- Pool* pool = void;
- p = Alloc(size, &pool);
- mixin(TracePool);
- if(ti.flags & TypeInfo.HasPointers) {
- mixin(TraceHasPointers);
- pool.hasPointers.Set(FindBit(p, pool));
- }
- }
- MemoryInit(ti, p, size, init);
- return p;
- }
- /**
- Allocates unitialized memory.
- Note:
- It is strongly recommended that you use "new" instead, the dlib
- implementation will call the proper allocation routine with the proper
- typeinfo.
- Params:
- size = Size of the allocation, in bytes.
- hasPointers = Whether the memory contains pointers or not.
- Returns:
- A pointer to the allocated memory is returned.
- */
- static shared void* Alloc(size_t size, bool hasPointers = true)
- in {
- assert(size);
- }
- out(p) {
- assert(p);
- }
- body {
- debug(Memory) Trace("Memory.Alloc(size=%u, hasPointers=%b)", size, hasPointers & 1);
- // BUG!! When this method is synchronized:
- // Error: cannot goto forward into different try block level
- synchronized(Memory.classinfo) {
- Pool* pool = void;
- void* p = Alloc(size, &pool);
- mixin(TracePool);
- if(hasPointers) {
- mixin(TraceHasPointers);
- pool.hasPointers.Set(FindBit(p, pool));
- }
- return p;
- }
- }
- /**
- Reallocates memory for an array.
- Resize the given allocated memory, possibly relocating it. The length
- parameter is used to specify the slice of the old memory to copy and/or
- initialize.
- Note:
- It is recommended that you use the array properties and operators instead
- of calling this method. The runtime will determine the proper parameters
- to call this method.
- Params:
- old = Pointer to the memory that is to be reallocated.
- ti = Type of the array's elements.
- length = Current array length.
- newLength = New number of array elements.
- init = How to initialize new memory.
- size = If nonzero, overrides the typeinfo's size.
- Returns:
- A pointer to the new memory is returned, the old pointer is no longer
- valid and should not be used anymore.
- */
- static shared void* Realloc(in void* old, in TypeInfo ti, size_t length = 1,
- size_t newLength = 1, AllocInit init = AllocInit.ZeroFill, size_t size = 0)
- in {
- assert(!length || old);
- assert(ti);
- assert(newLength);
- assert(init != AllocInit.TypeInfo || ti.init);
- assert(size || ti.size);
- }
- out(p) {
- assert(p);
- }
- body {
- debug(Memory) Trace("Memory.Relloc(old=%p, ti=%p, length=%u, newLength=%u, "
- "init=%u, size=%u) %.*s", old, ti, length, newLength, init, size);
- if(!old) return Memory.Alloc(ti, newLength, init, size);
- void* p = void;
- size_t newSize = size ? size * newLength : ti.size * newLength;
- size = size ? size * length : ti.size * length;
- synchronized(Memory.classinfo) {
- Pool* pool = void;
- p = Realloc(old, newSize, size, &pool);
- mixin(TracePool);
- if(p != old && ti.flags & TypeInfo.HasPointers) {
- mixin(TraceHasPointers);
- pool.hasPointers.Set(FindBit(p, pool));
- }
- }
- if(newSize > size) MemoryInit(ti, p + size, newSize - size, init);
- return p;
- }
- /**
- Reallocates memory.
- Note:
- If the memory is relocated, the new location is automatically marked as
- having pointers, since there is no typeinfo available.
- Params:
- old = Pointer to the memory that is to be reallocated.
- size = Size of the new allocation, in bytes.
- Returns:
- A pointer to the allocated is returned, if it is different than the one
- given by the old parameter, then the old pointer is no longer safe for use.
- */
- static shared void* Realloc(in void* old, size_t size, bool hasPointers = true)
- in {
- assert(size);
- }
- out(p) {
- assert(p);
- }
- body {
- debug(Memory) Trace("Memory.Relloc(old=%p, size=%u, hasPointers=%b)",
- old, size, hasPointers & 1);
- if(!old) return Memory.Alloc(size);
- synchronized(Memory.classinfo) {
- Pool* pool = void;
- void* p = Realloc(old, size, 0, &pool);
- mixin(TracePool);
- if(p != old && hasPointers) {
- mixin(TraceHasPointers);
- pool.hasPointers.Set(FindBit(p, pool));
- }
- return p;
- }
- }
- /**
- Frees memory
- Params:
- p = Pointer to the memory that is to be freed.
- */
- static synchronized void Free(in void* p)
- in {
- assert(p);
- }
- body {
- debug(Memory) Trace("Memory.Free(p=%p)", p);
- Free(p, null);
- }
- /**
- Find the allocated size the given pointer
- Params:
- p = Pointer to the allocated memory.
- Returns:
- The size of the allocated memory block.
- */
- static synchronized size_t SizeOf(in void* p) {
- debug(Memory) Trace("Memory.SizeOf(p=%p)", p);
- if(!p) return 0;
- if(p is _cache) return _cacheSize;
- size_t size = FindAllocSize(p);
- if(cast(size_t)p & (size - 1) & (PageSize - 1)) {
- size = 0;
- }
- else {
- _cache = p;
- _cacheSize = size;
- }
- return size;
- }
- /**
- Adds a range index to external memory. This range will be scanned by the
- garbage collector; anything pointing to a valid address within allocated
- memory marks that allocation as in-use.
- Params:
- bottom = The start address of the memory range.
- top = The end address of the memory range.
- */
- static synchronized void AddRange(in void* bottom, in void* top)
- in {
- assert(bottom);
- assert(top);
- }
- body {
- debug(Memory) Trace("Memory.AddRange(bottom=%p, top=%p)", bottom, top);
- if(_nRanges == _rangesLength) {
- _rangesLength = _rangesLength ? _rangesLength * 2 : 16;
- _ranges = cast(Range*)realloc(_ranges, _rangesLength * Range.sizeof);
- if(!_ranges) OutOfMemoryError();
- }
- _ranges[_nRanges].bottom = bottom;
- _ranges[_nRanges].top = top;
- _nRanges++;
- }
- /**
- Removes an index to external memory.
- Params:
- bottom = The start address of the memory range.
- */
- static synchronized void RemoveRange(in void* bottom)
- in {
- assert(bottom);
- }
- body {
- debug(Memory) Trace("Memory.RemoveRange(bottom=%p)", bottom);
- foreach(i; 0 .. _nRanges) {
- if(_ranges[i].bottom == bottom) {
- _nRanges--;
- memmove(_ranges + i, _ranges + i + 1, (_nRanges - i) * Range.sizeof);
- return;
- }
- }
- }
- /**
- Change the garbage collector's strategy.
- Params:
- strategy = The new strategy to use.
- Returns:
- The previous strategy is returned.
- */
- static synchronized CollectionStrategy SetCollectionStrategy(CollectionStrategy strategy) {
- debug(Memory) Trace("Memory.SetCollectionStrategy(strategy=%u)", strategy);
- CollectionStrategy old = _collectionStrategy;
- _collectionStrategy = strategy;
- return old;
- }
- /**
- Runs the garbage collector
- Params:
- complete = Set to true to run the collector in aggressive mode;
- it is much slower, but can free much more memory.
- Returns:
- The total bytes freed.
- */
- static synchronized size_t RunCollector(bool complete = false) {
- debug(Memory) Trace("Memory.RunCollector(complete=%b)", complete & 1);
- return CollectGarbage(complete);
- }
- version(MemoryProfile) {
- static uint used() {
- return _used;
- }
- static uint total() {
- return _total;
- }
- }
- // ----------------------------------------------------------------------------
- // I n t e r n a l
- // ----------------------------------------------------------------------------
- private:
- enum {
- PtrShift = 4,
- CommitSize = PageSize * 16,
- PoolSize = PageSize * 256,
- }
- static if(PageSize == 0x1000) enum PageShift = 12;
- else static assert(0);
- typedef ubyte BlockIndex;
- enum : BlockIndex {
- B_16, B_32, B_64, B_128, B_256, B_512, B_1024, B_2048, B_4096,
- B_PAGE,
- B_PAGEPLUS,
- B_FREE,
- B_UNCOMMITTED,
- B_MAX = B_PAGE
- }
- immutable uint[B_MAX] BlockIndexSize =
- [0x10, 0x20, 0x40, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000];
- // Free blocks are casted to this structure and pushed on their appropriate
- // freelist index
- struct FreeList {
- FreeList* next;
- Pool* pool; // Cache the block's pool for faster allocations
- }
- static assert(FreeList.sizeof <= 16);
- struct Range {
- const(void)* bottom;
- const(void)* top;
- }
- static __gshared:
- CollectionStrategy _collectionStrategy;
- Range* _ranges;
- uint _nRanges;
- uint _rangesLength;
- const(void)* _cache;
- size_t _cacheSize;
- FreeList*[B_MAX] _freeList;
- Pool** _pools;
- uint _nPools;
- uint _poolsLength;
- const(void)* _minAddr;
- const(void)* _maxAddr;
- version(MemoryProfile) {
- uint _used;
- uint _total;
- }
- /**
- Internal memory allocation
- Params:
- size = The allocation size.
- pool = Set to the pool owning the returned allocation.
- Returns:
- A pointer to the allocated memory is returned, it is never initialized.
- */
- static void* Alloc(size_t size, Pool** pPool)
- in {
- assert(size);
- assert(pPool);
- }
- out(p) {
- assert(p);
- }
- body {
- debug(MemoryInternal) Trace("Memory.Alloc(size=%u, pPool=%p)", size, pPool);
- debug(MemoryBreakOnAlloc) asm { int 3; }
- void* p = void;
- BlockIndex blockIndex = FindBlockIndex(size);
- /*
- Block allocation
- */
- if(blockIndex < B_PAGE) {
- if(!_freeList[blockIndex]) {
- Pool* pool = void;
- uint blockSize = BlockIndexSize[blockIndex];
- bool AllocPage() {
- uint n = pool.AllocPages(1);
- if(n == uint.max) return false;
- pool.pageTable[n] = blockIndex;
- void* p = pool.baseAddr + n * PageSize;
- void* top = p + PageSize;
- FreeList** block = &_freeList[blockIndex];
- for(; p < top; p += blockSize) {
- (cast(FreeList*)p).next = *block;
- *block = cast(FreeList*)p;
- (*block).pool = pool;
- }
- return true;
- }
- foreach(i; 0 .. _nPools) {
- pool = _pools[i];
- if(AllocPage()) goto BlockAlloc;
- }
- final switch(_collectionStrategy) {
- case CollectionStrategy.Aggressive:
- if(CollectGarbage(true) < blockSize || !_freeList[blockIndex])
- if((pool = AllocPool()) == null || !AllocPage())
- OutOfMemoryError();
- break;
- case CollectionStrategy.Lazy:
- if((pool = AllocPool()) == null || !AllocPage())
- if(CollectGarbage(true) < blockSize || !_freeList[blockIndex])
- OutOfMemoryError();
- break;
- }
- }
- BlockAlloc:
- p = _freeList[blockIndex];
- *pPool = _freeList[blockIndex].pool;
- _freeList[blockIndex] = _freeList[blockIndex].next;
- version(MemoryProfile) _used += BlockIndexSize[blockIndex];
- }
- /*
- Page allocation
- */
- else {
- Pool* pool = void;
- uint numPages = (size + PageSize - 1) >> PageShift;
- uint page = void;
- bool AllocPagesScan() {
- foreach(i; 0 .. _nPools) {
- pool = _pools[i];
- page = pool.AllocPages(numPages);
- if(page != uint.max) return true;
- }
- return false;
- }
- if(!AllocPagesScan()) {
- final switch(_collectionStrategy) {
- case CollectionStrategy.Aggressive:
- if(CollectGarbage(true) < numPages * PageSize || !AllocPagesScan())
- if((pool = AllocPool(numPages)) == null || (page = pool.AllocPages(numPages)) == uint.max)
- OutOfMemoryError();
- break;
- case CollectionStrategy.Lazy:
- if((pool = AllocPool(numPages)) == null || (page = pool.AllocPages(numPages)) == uint.max)
- if(CollectGarbage(true) < numPages * PageSize || !AllocPagesScan())
- OutOfMemoryError();
- break;
- }
- }
- pool.pageTable[page] = B_PAGE;
- pool.MarkPages(page + 1, numPages - 1, B_PAGEPLUS);
- p = pool.baseAddr + page * PageSize;
- *pPool = pool;
- version(MemoryProfile) _used += PageSize * numPages;
- }
- return p;
- }
- /**
- Internal memory reallocation
- Params:
- old = Pointer to the current allocation.
- size = The requested allocation size.
- curSize = The known used allocation size, defaults to current size if 0.
- pool = Returns the pool of the reallocated memory.
- Returns:
- A pointer to the new allocation is returned.
- */
- static void* Realloc(in void* old, size_t size, size_t curSize, Pool** pPool)
- in {
- assert(old);
- assert(pPool);
- }
- out(p) {
- assert(p);
- }
- body {
- debug(MemoryInternal) Trace("Memory.Relloc(old=%p, size=%u, curSize=%u, pPool=%p)",
- old, size, curSize, pPool);
- debug(MemoryBreakOnAlloc) asm { int 3; }
- void* p = void;
- Pool* pool = FindPool(old);
- // Trying to realloc memory that isn't ours, this can happen when, for
- // example, the source of a contatenation is inside the static memory
- // segment.
- if(!pool) {
- p = Memory.Alloc(size, pPool);
- memcpy(p, old, curSize);
- return p;
- }
- size_t sizeof = FindAllocSize(old, pool);
- size_t pageAlign = (sizeof + PageSize-1) & ~(PageSize-1);
- if(!curSize) curSize = sizeof;
- /*
- Block re-allocations:
- block => block
- block => page
- page => block
- */
- if(size <= PageSize || pageAlign <= PageSize) {
- // No block index change
- if((sizeof == 16 || size > (sizeof >> 1)) && size <= sizeof && pageAlign <= PageSize) {
- *pPool = pool;
- return cast(void*)old;
- }
- p = Alloc(size, pPool);
- memcpy(p, old, curSize);
- Free(old, pool);
- }
- /*
- Page re-allocations:
- page => page
- */
- else {
- // No page count change
- if(size > (pageAlign - PageSize) && size <= pageAlign) {
- *pPool = pool;
- return cast(void*)old;
- }
- uint page = FindPage(old, pool);
- uint numPages = pageAlign >> PageShift;
- // Adding pages
- if(size > curSize) {
- uint newPages = (size - curSize + PageSize - 2) >> PageShift;
- // Try and extend pages in place
- uint i = void, pageIndex = void;
- for(i = numPages, pageIndex = page + i; i < newPages; i++, pageIndex++) {
- if(pageIndex == pool.ncommitted)
- if(pool.CommitPages(newPages - i) == uint.max) break;
- if(pool.pageTable[pageIndex] != B_FREE) break;
- }
- if(i == newPages) {
- pool.MarkPages(page, newPages - page, B_PAGEPLUS);
- *pPool = pool;
- version(MemoryProfile) _used += PageSize * newPages;
- }
- else {
- p = Alloc(size, pPool);
- memcpy(p, old, curSize);
- pool.MarkPages(page, numPages, B_FREE);
- version(MemoryProfile) _used -= PageSize * numPages;
- }
- }
- // Freeing pages
- else {
- uint freePages = (curSize - size + PageSize - 2) >> PageShift;
- pool.MarkPages(page + numPages - freePages, freePages, B_FREE);
- *pPool = pool;
- version(MemoryProfile) _used -= PageSize * freePages;
- }
- }
- if(_cache == old && p != old) {
- _cache = null;
- _cacheSize = 0;
- }
- return p;
- }
- /**
- Internal memory release.
- Params:
- p = Pointer to the memory that is to be freed.
- pool = Pointer to the pool containing the memory, can be null.
- */
- static void Free(in void* p, Pool* pool)
- in {
- assert(p);
- }
- body {
- debug(MemoryInternal) Trace("Memory.Free(p=%p, pool=%p)", p, pool);
- debug(MemoryBreakOnAlloc) asm { int 3; }
- if(!pool) pool = FindPool(p);
- if(!pool) throw new Error("Memory.Free(): Bad pointer.");
- uint page = FindPage(p, pool);
- uint bit = FindBit(p, pool);
- pool.hasPointers.Clear(bit);
- if(pool.hasFinalizer.nbits && pool.hasFinalizer.TestClear(bit)) {
- debug(MemoryExtendedInfo) Trace("\tCall finalizer");
- version(DigitalMars) _d_callfinalizer(cast(Object)cast(void*)p);
- else static assert(0);
- }
- BlockIndex blockIndex = pool.pageTable[page];
- // Block allocation, back on the freelist
- if(blockIndex < B_PAGE) {
- FreeList* block = cast(FreeList*)p;
- block.next = _freeList[blockIndex];
- block.pool = pool;
- _freeList[blockIndex] = block;
- version(MemoryProfile) _used -= BlockIndexSize[blockIndex];
- }
- // Page allocation, free pages
- else {
- int numPages = 1;
- while(++page != pool.ncommitted && pool.pageTable[page] == B_PAGEPLUS)
- numPages++;
- pool.MarkPages(page - numPages, numPages, B_FREE);
- version(MemoryProfile) _used -= PageSize * numPages;
- }
- }
- /**
- Initialize array memory
- Params:
- ti = Type of the array's elements
- p = Pointer to the allocated memory to initialize.
- size = Length of the memory to initialize.
- init = How to initialize.
- */
- static void MemoryInit(in TypeInfo ti, void* p, size_t size, AllocInit init)
- in {
- assert(ti);
- assert(p);
- assert(size);
- }
- body {
- if(init == AllocInit.ZeroFill) {
- memset(p, 0, size);
- }
- else if(init == AllocInit.TypeInfo) {
- size_t length = ti.init.length;
- const void* ptr = ti.init.ptr;
- if(!ptr)
- memset(p, 0, size);
- else if(length == 1)
- memset(p, *cast(ubyte*)ptr, size);
- else if(length == uint.sizeof)
- foreach(i; 0 .. size / uint.sizeof)
- (cast(size_t*)p)[i] = *cast(size_t*)ptr;
- else
- for(size_t i = 0; i < size; i += length)
- memcpy(p + i, ptr, length);
- }
- }
- /**
- Finds the block index for a given allocation size
- Params:
- size = The allocation size
- Returns:
- Index to the smallest block that will contain allocation.
- */
- static BlockIndex FindBlockIndex(size_t size)
- in {
- assert(size);
- }
- out(i) {
- assert(i >= 0 && i <= B_PAGE);
- }
- body {
- static __gshared size_t sizeCache;
- static __gshared BlockIndex cache;
- if(size == sizeCache) return cache;
- sizeCache = size;
- if(size <= 64) {
- if(size <= 16) return cache = B_16;
- if(size <= 32) return cache = B_32;
- else return cache = B_64;
- }
- else if(size <= 256) {
- if(size <= 128) return cache = B_128;
- else return cache = B_256;
- }
- else if(size <= 1024) {
- if(size <= 512) return cache = B_512;
- return cache = B_1024;
- }
- else {
- if(size <= 2048) return cache = B_2048;
- if(size <= 4096) return cache = B_4096;
- }
- return cache = B_PAGE;
- }
- /**
- Find the memory pool containing pointer p
- Params:
- p = Pointer to allocated memory.
- Returns:
- Pointer to the pool containing the memory.
- Returns null if not found.
- */
- static Pool* FindPool(in void* p) {
- return p >= _minAddr && p < _maxAddr ?
- (_nPools == 1 ? _pools[0] : BinarySearch(p, 0, _nPools)) :
- null;
- }
- /**
- Specialized binary search for FindPool()
- TODO!!! why on earth did I use a recursive search instead of an iterative one here?
- */
- Pool* BinarySearch(in void* p, uint low, uint high) {
- if(high < low) return null;
- uint mid = low + ((high - low) / 2);
- Pool* pool = _pools[mid];
- if(p >= pool.baseAddr && p < pool.topAddr) return pool;
- return pool.baseAddr > p ? BinarySearch(p, low, mid - 1) : BinarySearch(p, mid + 1, high);
- }
- /**
- Find the memory page containing pointer p inside the given pool
- Params:
- p = Pointer to the allocation we want the page of.
- pool = Pool containing the pointer.
- Return:
- The page number is returned.
- */
- static uint FindPage(in void* p, in Pool* pool) {
- return cast(uint)((p - pool.baseAddr) >> PageShift);
- }
- /**
- Find the bit index of pointer p inside the given pool
- */
- static uint FindBit(in void* p, in Pool* pool) {
- return cast(uint)((p - pool.baseAddr) >> PtrShift);
- }
- /**
- Find the block size of an allocation
- */
- static size_t FindAllocSize(in void* p, Pool* pool = null) {
- if(!pool) pool = FindPool(p);
- if(!pool) throw new Error("FindAllocSize: Bad Pointer.");
- uint page = FindPage(p, pool);
- BlockIndex blockIndex = pool.pageTable[page];
- // Block size
- if(blockIndex < B_PAGE)
- return BlockIndexSize[blockIndex];
- // Page size
- else foreach(i; page + 1 .. pool.ncommitted)
- if(pool.pageTable[i] != B_PAGEPLUS)
- return (i - page) * PageSize;
- return (pool.ncommitted - page) * PageSize;
- }
- /**
- Allocate a new memory pool
- */
- static Pool* AllocPool(uint pages = 1) {
- debug(MemoryInternal) Trace("Memory.AllocPool(pages=%u)", pages);
- debug(MemoryBreakOnAllocPool) asm { int 3; }
- pages = (pages + (CommitSize / PageSize) - 1) & ~(CommitSize / PageSize - 1);
- if(pages < PoolSize / PageSize) {
- pages = PoolSize / PageSize;
- }
- else {
- uint n = pages + (pages >> 1);
- if(n < size_t.max / PageSize) pages = n;
- }
- if(_nPools) {
- uint n = (_nPools > 8 ? 8 : _nPools) * PoolSize / PageSize;
- if(pages < n) pages = n;
- }
- debug(MemoryExtendedInfo) Trace("\tCreating with %u pages", pages);
- Pool* pool = cast(Pool*)calloc(1, Pool.sizeof);
- if(!pool) return null;
- pool.Alloc(pages);
- if(!pool.baseAddr) goto Error;
- if(_nPools == _poolsLength) {
- _poolsLength = _poolsLength ? _poolsLength * 2 : 16;
- _pools = cast(Pool**)realloc(_pools, _poolsLength * (Pool*).sizeof);
- if(!_pools) goto Error;
- }
- uint i;
- for(; i != _nPools; i++) if(pool.topAddr <= _pools[i].baseAddr) break;
- memmove(_pools + i + 1, _pools + i, (_nPools - i) * (Pool*).sizeof);
- _pools[i] = pool;
- _nPools++;
- _minAddr = _pools[0].baseAddr;
- _maxAddr = _pools[_nPools - 1].topAddr;
- return pool;
- Error:
- debug(MemoryExtendedInfo) Trace("\tFailed!");
- pool.Free();
- free(pool);
- return null;
- }
- /+ TODO!!! threading support on hold until 'shared' gets decent semantics
- if(!IsRuntimeShutdown && RuntimeThread.isMultiThreaded) {
- // We require exclusive execution while we scan threads,
- // once the stacks are all marked, we can resume the threads and finish collecting
- RuntimeThread.PauseAll();
- scope(exit) RuntimeThread.ResumeAll();
- version(Windows) CONTEXT context;
- foreach(t; RuntimeThread) {
- version(Windows) {
- context.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL;
- if(!.GetThreadContext(t.handle, &context)) SystemException();
- MarkRangePointers(cast(void*)context.Esp, t.stackBottom);
- MarkRangePointers(&context.Edi, &context.Eip);
- }
- else version(Posix) {
- assert(0);
- // The registers are already stored on the stack
- //if(t.isSelf()) t.stackTop = Thread.getESP();
- //version (STACKGROWSDOWN) mark(t.stackTop, t.stackBottom);
- //else mark(t.stackBottom, t.stackTop);
- }
- else static assert(0);
- }
- }
- // Only running a single thread, easy
- else {
- RuntimeThread current = RuntimeThread.current;
- version(StackGrowsDown) MarkRangePointers(current.stackTop, current.stackBottom);
- else MarkRangePointers(current.stackBottom, current.stackTop);
- }
- +/
- /**
- Wrapper used to ensure the registers are also part of the root set.
- */
- static size_t CollectGarbage(bool recursive = false) {
- debug(MemoryInternal) Trace("CollectGarbage(recursive=%b)", recursive & 1);
- debug(MemoryBreakOnCollection) asm { int 3; }
- const(void)* stackTop = void;
- version(X86) asm {
- pushad;
- mov stackTop[EBP], ESP;
- }
- else static assert(0);
- assert(stackTop);
- size_t free = recursive ?
- DoCollectGarbage!true(stackTop) : DoCollectGarbage!false(stackTop);
- debug(MemoryExtendedInfo) Trace("\t%u bytes free", free);
- version(MemoryProfile) _used -= _total - free;
- version(X86) asm {
- popad;
- }
- else static assert(0);
- return free;
- }
- /**
- Scans the known roots, usually the static data segment and the threads' stacks,
- for reachable objects in the working set, and releases the unreachable ones.
- Params:
- recursive = Whether to run the collector until no more memory can be freed.
- stackTop = The top of the calling thread's stack.
- initialFree = Only used if recursive is true to determine whether to recurse or not.
- Returns:
- The total amount of free memory in the working set is returned.
- */
- static size_t DoCollectGarbage(bool recursive)(in void* stackTop, size_t initialFree = 0) {
- debug(MemoryInternal) Trace("Memory.DoCollectGarbage!%.*s(stackTop=%p, initialFree=%u)",
- recursive ? "true" : "false", stackTop, initialFree);
- const(void)* p = void, top = void;
- BlockIndex blockIndex = void;
- uint page = void, blockSize = void, bit = void, stride = void, maxBit = void;
- uint freePages = void;
- bool hasFinalizers = void;
- FreeList* block = void;
- size_t free;
- _freeList[] = null;
- _cache = null;
- _cacheSize = 0;
- enum InitPage = q{
- blockIndex = pool.pageTable[page];
- bit = page * (PageSize >> PtrShift);
- };
- enum InitBlock = q{
- blockSize = BlockIndexSize[blockIndex];
- stride = blockSize >> PtrShift;
- p = pool.baseAddr + page * PageSize;
- top = p + PageSize;
- };
- foreach(pool; _pools[0 .. _nPools]) pool.mark.Zero();
- // Scan the roots and working set for reachable objects.
- foreach(range; _ranges[0 .. _nRanges])
- MarkRangePointers(range.bottom, range.top);
- MarkRangePointers(stackTop, STACKBOTTOM); // TODO!!! temporary hack until 'shared' works
- foreach(pool; _pools[0 .. _nPools]) {
- for(page = 0; page != pool.ncommitted; page++) {
- mixin(InitPage);
- if(blockIndex < B_PAGE) {
- mixin(InitBlock);
- for(; p != top; p += blockSize, bit += stride)
- if(pool.hasPointers.Test(bit))
- MarkRangePointers(p, p + blockSize);
- }
- else if(blockIndex == B_PAGE) {
- if(!pool.hasPointers.Test(bit)) continue;
- p = pool.baseAddr + page * PageSize;
- top = p + PageSize;
- while(page + 1 != pool.ncommitted && pool.pageTable[page + 1] == B_PAGEPLUS) {
- page++;
- top += PageSize;
- }
- MarkRangePointers(p, top);
- }
- }
- }
- enum BuildFreeList = q{
- blockSize = BlockIndexSize[blockIndex];
- bit = page * (PageSize >> PtrShift);
- p = pool.baseAddr + page * PageSize;
- top = p + PageSize;
- for(; p != top; p += blockSize, bit += stride) {
- if(pool.mark.Test(bit)) continue;
- block = cast(FreeList*)p;
- block.next = _freeList[blockIndex];
- block.pool = pool;
- _freeList[blockIndex] = block;
- }
- };
- enum FreePool = q{
- uint i;
- for(; i != _nPools; i++) if(pool.topAddr <= _pools[i].topAddr) break;
- memmove(_pools + i, _pools + i + 1, (_nPools - i - 1) * (Pool*).sizeof);
- pool.Free();
- _nPools--;
- };
- // Sweep the working set and finalize every unreachable objects. When
- // recursive is false the freelist is also rebuilt here.
- foreach(pool; _pools[0 .. _nPools]) {
- static if(!recursive) freePages = 0;
- for(page = 0; page != pool.ncommitted; page++) {
- mixin(InitPage);
- hasFinalizers = cast(bool)pool.hasFinalizer.nbits;
- if(blockIndex < B_PAGE) {
- mixin(InitBlock);
- static if(!recursive) maxBit = 0;
- for(; p != top; p += blockSize, bit += stride) {
- if(pool.mark.Test(bit)) {
- static if(!recursive) if(!maxBit) maxBit = bit;
- continue;
- }
- pool.hasPointers.Clear(bit);
- if(hasFinalizers && pool.hasFinalizer.TestClear(bit))
- _d_callfinalizer(cast(Object)p);
- free += blockSize;
- }
- static if(!recursive) {
- if(maxBit == bit + (PageSize >> PtrShift)) {
- pool.pageTable[page] = B_FREE;
- freePages++;
- }
- else {
- mixin(BuildFreeList);
- }
- continue;
- }
- }
- else if(blockIndex == B_PAGE) {
- if(pool.mark.Test(bit)) continue;
- p = pool.baseAddr + page * PageSize;
- pool.hasPointers.Clear(bit);
- if(hasFinalizers && pool.hasFinalizer.TestClear(bit))
- _d_callfinalizer(cast(Object)p);
- do {
- pool.pageTable[page] = B_FREE;
- free += PageSize;
- freePages++;
- } while(++page != pool.ncommitted && pool.pageTable[page] == B_PAGEPLUS);
- static if(!recursive) continue;
- }
- static if(!recursive) if(blockIndex == B_FREE) freePages++;
- }
- static if(!recursive) if(freePages == pool.ncommitted) {
- mixin(FreePool);
- }
- }
- // Run the collector recursively until it does not free anymore memory, the
- // freelist is rebuilt on the last iteration.
- static if(recursive) {
- if(free != initialFree) return DoCollectGarbage!true(stackTop, free);
- foreach(pool; _pools[0 .. _nPools]) {
- freePages = 0;
- for(page = 0; page != pool.ncommitted; page++) {
- blockIndex = pool.pageTable[page];
- if(blockIndex >= B_PAGE) {
- freePages++;
- continue;
- }
- bit = page * (PageSize >> PtrShift);
- maxBit = bit + (PageSize >> PtrShift);
- stride = blockSize >> PtrShift;
- for(; bit != maxBit; bit += stride)
- if(pool.mark.Test(bit)) break;
- if(bit == maxBit) {
- pool.pageTable[page] = B_FREE;
- freePages++;
- continue;
- }
- mixin(BuildFreeList);
- }
- if(freePages == pool.ncommitted) {
- mixin(FreePool);
- }
- }
- }
- return free;
- }
- /**
- Mark all pointers to valid allocations within given memory range
- Marked allocations are not freed.
- */
- static void MarkRangePointers(in void* bottom, in void* top) {
- void** pBottom = cast(void**)bottom;
- void** pTop = cast(void**)top;
- Pool* pool = void;
- void* p = void;
- uint bit = void;
- uint page = void;
- BlockIndex blockIndex = void;
- for(; pBottom < pTop; pBottom++) {
- p = *pBottom;
- if(p >= _minAddr && p <= _maxAddr && (pool = FindPool(p)) != null) {
- page = FindPage(p, pool);
- blockIndex = pool.pageTable[page];
- if(blockIndex < B_PAGE) {
- bit = FindBit(cast(void*)(cast(size_t)p & ~(BlockIndexSize[blockIndex] - 1)), pool);
- }
- else if(blockIndex == B_PAGE) {
- bit = page * (PageSize >> PtrShift);
- }
- else if(blockIndex == B_PAGEPLUS) {
- while(pool.pageTable[--page] == B_PAGEPLUS) {}
- bit = page * (PageSize >> PtrShift);
- }
- else {
- continue;
- }
- pool.mark.Set(bit);
- }
- }
- }
- // TODO: temporary hack until threading gets back
- static void* STACKBOTTOM() {
- version(X86) asm {
- naked;
- mov EAX, FS:4;
- ret;
- }
- else static assert(0);
- }
- /**
- Internal memory pool
- */
- struct Pool {
- void* baseAddr; /// The base address of the pool's memory
- void* topAddr; /// The top address of the pool's memory
- Bits mark; /// Marked allocations during collection, these are still in use
- Bits hasFinalizer; /// Allocations which need to be finalized before they are freed
- Bits hasPointers; /// Allocations not containing memory pointers, not scanned
- uint npages; /// Total number of memory pages within this pool
- uint ncommitted; /// Number of memory pages currently committed
- BlockIndex* pageTable; /// Lookup table for the used block index of every page
- /**
- Allocate a new pool
- Params:
- npages = The number of pages to allocate in the pool.
- Returns:
- $(B true) on success, $(B false) otherwise.
- */
- bool Alloc(uint npages) {
- debug(MemoryPool) Trace("Memory.Pool[%p].Alloc(npages=%u)", &this, npages);
- size_t poolSize = npages * PageSize;
- assert(poolSize >= PoolSize);
- version(Windows) {
- baseAddr = VirtualAlloc(null, poolSize, MEM_RESERVE, PAGE_READWRITE);
- if(!baseAddr) OutOfMemoryError();
- }
- else version(Posix) {
- baseAddr = mmap(null, poolSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
- if(baseAddr == MAP_FAILED) OutOfMemoryError();
- }
- else static assert(0);
- assert((cast(uint)baseAddr & (PageSize - 1)) == 0);
- if(!baseAddr) return false;
- topAddr = baseAddr + poolSize;
- mark.Alloc(poolSize / 16);
- hasPointers.Alloc(poolSize / 16);
- pageTable = cast(BlockIndex*)malloc(npages);
- if(!pageTable) OutOfMemoryError();
- memset(pageTable, B_UNCOMMITTED, npages);
- this.npages = npages;
- ncommitted = 0;
- version(MemoryProfile) _total += poolSize;
- return true;
- }
- /**
- Free the pool
- */
- void Free() {
- debug(MemoryPool) Trace("Memory.Pool[%p].Free()", &this);
- version(MemoryProfile) _total -= PageSize * npages;
- if(baseAddr) {
- bool result;
- if(npages) {
- if(ncommitted) {
- version(Windows) result = cast(bool)VirtualFree(baseAddr, ncommitted * PageSize, MEM_DECOMMIT);
- else version(Posix) { /* nothing to do */ }
- else static assert(0);
- assert(result);
- ncommitted = 0;
- }
- version(Windows) result = cast(bool)VirtualFree(baseAddr, 0, MEM_RELEASE);
- else version(Posix) result = cast(bool)munmap(baseAddr, 0);
- else static assert(0);
- assert(result);
- npages = 0;
- }
- baseAddr = null;
- topAddr = null;
- }
- if(pageTable) free(pageTable);
- mark.Free();
- hasFinalizer.Free();
- hasPointers.Free();
- }
- /**
- Allocate memory from available pool pages.
- Params:
- npages = The number of pages for the allocation.
- Returns:
- The index of the first page is returned, or uint.max if the pool do
- not contain enough free pages for the allocation.
- */
- uint AllocPages(uint npages = 1) {
- debug(MemoryPool) Trace("Memory.Pool[%p].AllocPages(npages=%u)", &this, npages);
- for(uint i, j; i != ncommitted; i++) {
- if(pageTable[i] != B_FREE) {
- j = 0;
- continue;
- }
- if(++j == npages) return i - j + 1;
- }
- return CommitPages(npages);
- }
- /**
- Commit memory pages from the pool.
- Params:
- npages = The number of pages to commit.
- Returns:
- The index of the first newly committed page is returned, or uint.max if
- the pool does not have enough uncommitted pages left.
- */
- uint CommitPages(uint npages) {
- debug(MemoryPool) Trace("Memory.Pool[%p].CommitPages(npages=%u)", &this, npages);
- if(ncommitted + npages <= this.npages) {
- uint commit = (npages + (CommitSize / PageSize) - 1) & ~(CommitSize / PageSize - 1);
- if(ncommitted + commit > this.npages) return uint.max;
- version(Windows)
- bool result = cast(bool)VirtualAlloc(baseAddr + ncommitted * PageSize,
- commit * PageSize, MEM_COMMIT, PAGE_READWRITE);
- else version(Posix)
- enum result = true;
- else
- static assert(0);
- if(result) {
- memset(pageTable + ncommitted, B_FREE, commit);
- uint i = ncommitted;
- ncommitted += commit;
- while(i && pageTable[i - 1] == B_FREE) i--;
- return i;
- }
- }
- return uint.max;
- }
- /**
- Mark a set of pages with the given blockindex.
- Params:
- page = The first page to mark
- npages = The number of pages to mark
- blockIndex = The block index to mark pages with
- */
- void MarkPages(uint page, uint npages, uint blockIndex) {
- memset(&pageTable[page], blockIndex, npages);
- }
- /**
- Comparison operator to sort the pool table.
- */
- int opCmp(in Pool* pool) const {
- if(baseAddr < pool.baseAddr) return -1;
- else return cast(int)(baseAddr > pool.baseAddr);
- }
- } // Pool
- /**
- Internal bit array
- */
- struct Bits {
- static if(size_t.sizeof == 4) enum {
- BITS_PER_WORD = 32,
- BITS_SHIFT = 5,
- BITS_MASK = 31,
- EXTRA_WORDS = 2
- }
- else static if(size_t.sizeof == 8) enum {
- BITS_PER_WORD = 64,
- BITS_SHIFT = 6,
- BITS_MASK = 63,
- EXTRA_WORDS = 4
- }
- else static assert(0);
- size_t* _data; /// The bits data
- uint nwords; /// Number of data words
- uint nbits; /// Number of data bits
- invariant() {
- assert(!_data || nwords * size_t.sizeof * 8 >= nbits);
- }
- /**
- Get the pointer to the data bits
- */
- size_t* data()
- in {
- assert(_data);
- }
- body {
- return _data + 1;
- }
- /**
- Allocate bits
- */
- void Alloc(uint nbits)
- in {
- assert(!_data);
- }
- body {
- this.nbits = nbits;
- nwords = (nbits + BITS_MASK) >> BITS_SHIFT;
- _data = cast(size_t*)calloc(nwords + EXTRA_WORDS, size_t.sizeof);
- if(!_data) OutOfMemoryError();
- }
- /**
- Free bits
- */
- void Free() {
- if(_data) {
- free(_data);
- _data = null;
- }
- }
- /**
- Zero bits
- */
- void Zero() {
- memset(data, 0, nwords * size_t.sizeof);
- }
- /**
- Copy bits from another array
- */
- void Copy(Bits* f)
- in {
- assert(nwords == f.nwords);
- }
- body {
- memcpy(data, f.data, nwords * size_t.sizeof);
- }
- /**
- Set bit
- */
- void Set(uint i)
- in {
- assert(i < nbits);
- }
- body {
- data[i >> BITS_SHIFT] |= (1 << (i & BITS_MASK));
- }
- /**
- Clear bit
- */
- void Clear(uint i)
- in {
- assert(i < nbits);
- }
- body {
- data[i >> BITS_SHIFT] &= ~(1 << (i & BITS_MASK));
- }
- /**
- Test bit
- */
- uint Test(uint i)
- in {
- assert(i < nbits);
- }
- body {
- return data[i >> BITS_SHIFT] & (1 << (i & BITS_MASK));
- }
- /**
- Test & Clear bit
- */
- uint TestClear(uint i)
- in {
- assert(i < nbits);
- }
- body {
- version(DigitalMars) {
- return btr(data, i);
- }
- else version(X86) {
- asm {
- naked;
- mov EAX, _data[EAX];
- mov ECX, i-4[ESP];
- btr 4[EAX], ECX;
- sbb EAX, EAX;
- ret 4;
- }
- }
- else {
- uint result;
- uint *p = &data[i >> BITS_SHIFT];
- uint mask = (1 << (i & BITS_MASK));
- result = *p & mask;
- *p &= ~mask;
- return result;
- }
- }
- } // Bits
- } // Memory
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement