Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- $ diff -Naur rpmalloc.c rpmalloc.cpp
- --- rpmalloc.c 2019-05-24 16:50:36.934851263 -0500
- +++ rpmalloc.cpp 2019-05-21 15:59:57.677746534 -0500
- @@ -1,9 +1,9 @@
- -/* rpmalloc.c - Memory allocator - Public Domain - 2016 Mattias Jansson
- +/* rpmalloc.c - Memory allocator - Public Domain - 2016 Mattias Jansson / Rampant Pixels
- *
- * This library provides a cross-platform lock free thread caching malloc implementation in C11.
- * The latest source code is always available at
- *
- - * https://github.com/mjansson/rpmalloc
- + * https://github.com/rampantpixels/rpmalloc
- *
- * This library is put in the public domain; you can redistribute it and/or modify it without any restrictions.
- *
- @@ -26,20 +26,32 @@
- #endif
- #ifndef ENABLE_VALIDATE_ARGS
- //! Enable validation of args to public entry points
- +#if defined(__HAIKU__) && __GNUC__ > 2
- +#define ENABLE_VALIDATE_ARGS 1
- +#else
- #define ENABLE_VALIDATE_ARGS 0
- #endif
- +#endif
- #ifndef ENABLE_STATISTICS
- //! Enable statistics collection
- #define ENABLE_STATISTICS 0
- #endif
- #ifndef ENABLE_ASSERTS
- //! Enable asserts
- +#ifdef __HAIKU__
- +#define ENABLE_ASSERTS 1
- +#else
- #define ENABLE_ASSERTS 0
- #endif
- +#endif
- #ifndef ENABLE_PRELOAD
- //! Support preloading
- +#ifdef __HAIKU__
- +#define ENABLE_PRELOAD 1
- +#else
- #define ENABLE_PRELOAD 0
- #endif
- +#endif
- #ifndef DISABLE_UNMAP
- //! Disable unmapping memory pages
- #define DISABLE_UNMAP 0
- @@ -60,9 +72,13 @@
- #endif
- #if !ENABLE_UNLIMITED_THREAD_CACHE
- //! Multiplier for thread cache (cache limit will be span release count multiplied by this value)
- +#ifdef __HAIKU__
- +#define THREAD_CACHE_MULTIPLIER 4
- +#else
- #define THREAD_CACHE_MULTIPLIER 16
- #endif
- #endif
- +#endif
- #if ENABLE_GLOBAL_CACHE && ENABLE_THREAD_CACHE
- #ifndef ENABLE_UNLIMITED_GLOBAL_CACHE
- @@ -71,8 +87,12 @@
- #endif
- #if !ENABLE_UNLIMITED_GLOBAL_CACHE
- //! Multiplier for global cache (cache limit will be span release count multiplied by this value)
- +#ifdef __HAIKU__
- +#define GLOBAL_CACHE_MULTIPLIER 32
- +#else
- #define GLOBAL_CACHE_MULTIPLIER 64
- #endif
- +#endif
- #else
- # undef ENABLE_GLOBAL_CACHE
- # define ENABLE_GLOBAL_CACHE 0
- @@ -96,6 +116,13 @@
- # define _Static_assert static_assert
- #else
- # define FORCEINLINE inline __attribute__((__always_inline__))
- +# if defined(__cplusplus)
- +#if __GNUC__ == 2
- +#define _Static_assert(...)
- +#else
- +# define _Static_assert static_assert
- +#endif
- +# endif
- #endif
- #if PLATFORM_WINDOWS
- # if ENABLE_VALIDATE_ARGS
- @@ -111,16 +138,14 @@
- # endif
- # if defined(__HAIKU__)
- # include <OS.h>
- -# include <pthread.h>
- +# include <TLS.h>
- # endif
- #endif
- -#ifndef ARCH_64BIT
- -# if defined(__LLP64__) || defined(__LP64__) || defined(_WIN64)
- -# define ARCH_64BIT 1
- -# else
- -# define ARCH_64BIT 0
- -# endif
- +#if defined(__LLP64__) || defined(__LP64__)
- +# define ARCH_64BIT 1
- +#else
- +# define ARCH_64BIT 0
- #endif
- #include <stdint.h>
- @@ -159,6 +184,32 @@
- static FORCEINLINE int atomic_cas_ptr(atomicptr_t* dst, void* val, void* ref) { return (_InterlockedCompareExchange((volatile long*)dst, (long)val, (long)ref) == (long)ref) ? 1 : 0; }
- #endif
- +#elif defined(__HAIKU__)
- +
- +#include <OS.h>
- +
- +typedef int32 atomic32_t;
- +typedef int64 atomic64_t;
- +typedef intptr_t atomicptr_t;
- +
- +#define atomic_thread_fence_acquire()
- +#define atomic_thread_fence_release()
- +
- +static FORCEINLINE int32_t atomic_load32(atomic32_t* src) { return atomic_get(src); }
- +static FORCEINLINE void atomic_store32(atomic32_t* dst, int32_t val) { atomic_set(dst, val); }
- +static FORCEINLINE int32_t atomic_incr32(atomic32_t* val) { return atomic_add(val, 1) + 1; }
- +static FORCEINLINE int32_t atomic_add32(atomic32_t* val, int32_t add) { return atomic_add(val, add) + add; }
- +
- +#if ARCH_64BIT
- +static FORCEINLINE void* atomic_load_ptr(atomicptr_t* src) { return (void*)atomic_get64(src); }
- +static FORCEINLINE void atomic_store_ptr(atomicptr_t* dst, void* val) { atomic_set64(dst, (atomicptr_t)val); }
- +static FORCEINLINE int atomic_cas_ptr(atomicptr_t* dst, void* val, void* ref) { return atomic_test_and_set64(dst, (int64)val, (int64)ref) == (int64)ref; }
- +#else
- +static FORCEINLINE void* atomic_load_ptr(atomicptr_t* src) { return (void*)atomic_get(src); }
- +static FORCEINLINE void atomic_store_ptr(atomicptr_t* dst, void* val) { atomic_set(dst, (atomicptr_t)val); }
- +static FORCEINLINE int atomic_cas_ptr(atomicptr_t* dst, void* val, void* ref) { return atomic_test_and_set(dst, (int32)val, (int32)ref) == (int32)ref; }
- +#endif
- +
- #else
- #include <stdatomic.h>
- @@ -180,6 +231,11 @@
- #endif
- +#ifdef __HAIKU__
- +namespace BPrivate {
- +namespace rpmalloc {
- +#endif
- +
- /// Preconfigured limits and sizes
- //! Granularity of a small allocation block
- #define SMALL_GRANULARITY 32
- @@ -391,9 +447,9 @@
- static atomicptr_t _memory_orphan_heaps;
- //! Running orphan counter to avoid ABA issues in linked list
- static atomic32_t _memory_orphan_counter;
- -#if ENABLE_STATISTICS
- //! Active heap count
- static atomic32_t _memory_active_heaps;
- +#if ENABLE_STATISTICS
- //! Total number of currently mapped memory pages
- static atomic32_t _mapped_pages;
- //! Total number of currently lost spans
- @@ -407,8 +463,10 @@
- #endif
- //! Current thread heap
- -#if (defined(__APPLE__) || defined(__HAIKU__)) && ENABLE_PRELOAD
- +#if defined(__APPLE__) && ENABLE_PRELOAD
- static pthread_key_t _memory_thread_heap;
- +#elif defined(__HAIKU__) && ENABLE_PRELOAD
- +static int32 _memory_thread_heap;
- #else
- # ifdef _MSC_VER
- # define _Thread_local __declspec(thread)
- @@ -425,8 +483,10 @@
- //! Get the current thread heap
- static FORCEINLINE heap_t*
- get_thread_heap(void) {
- -#if (defined(__APPLE__) || defined(__HAIKU__)) && ENABLE_PRELOAD
- - return pthread_getspecific(_memory_thread_heap);
- +#if defined(__APPLE__) && ENABLE_PRELOAD
- + return (heap_t*)pthread_getspecific(_memory_thread_heap);
- +#elif defined(__HAIKU__) && ENABLE_PRELOAD
- + return (heap_t*)tls_get(_memory_thread_heap);
- #else
- return _memory_thread_heap;
- #endif
- @@ -435,8 +495,10 @@
- //! Set the current thread heap
- static void
- set_thread_heap(heap_t* heap) {
- -#if (defined(__APPLE__) || defined(__HAIKU__)) && ENABLE_PRELOAD
- +#if defined(__APPLE__) && ENABLE_PRELOAD
- pthread_setspecific(_memory_thread_heap, heap);
- +#elif defined(__HAIKU__) && ENABLE_PRELOAD
- + tls_set(_memory_thread_heap, heap);
- #else
- _memory_thread_heap = heap;
- #endif
- @@ -458,7 +520,7 @@
- static heap_t*
- _memory_heap_lookup(int32_t id) {
- uint32_t list_idx = id % HEAP_ARRAY_SIZE;
- - heap_t* heap = atomic_load_ptr(&_memory_heaps[list_idx]);
- + heap_t* heap = (heap_t*)atomic_load_ptr(&_memory_heaps[list_idx]);
- while (heap && (heap->id != id))
- heap = heap->next_heap;
- return heap;
- @@ -503,7 +565,7 @@
- _memory_map_spans(heap_t* heap, size_t span_count) {
- if (span_count <= heap->spans_reserved) {
- span_t* span = heap->span_reserve;
- - heap->span_reserve = pointer_offset(span, span_count * _memory_span_size);
- + heap->span_reserve = (span_t*)pointer_offset(span, span_count * _memory_span_size);
- heap->spans_reserved -= span_count;
- if (span == heap->span_reserve_master) {
- assert(span->flags & SPAN_FLAG_MASTER);
- @@ -525,7 +587,7 @@
- if ((_memory_page_size > _memory_span_size) && ((request_spans * _memory_span_size) % _memory_page_size))
- request_spans += _memory_span_map_count - (request_spans % _memory_span_map_count);
- size_t align_offset = 0;
- - span_t* span = _memory_map(request_spans * _memory_span_size, &align_offset);
- + span_t* span = (span_t*)_memory_map(request_spans * _memory_span_size, &align_offset);
- if (!span)
- return span;
- span->align_offset = (uint32_t)align_offset;
- @@ -551,7 +613,7 @@
- _memory_heap_cache_insert(heap, prev_span);
- }
- heap->span_reserve_master = span;
- - heap->span_reserve = pointer_offset(span, span_count * _memory_span_size);
- + heap->span_reserve = (span_t*)pointer_offset(span, span_count * _memory_span_size);
- heap->spans_reserved = request_spans - span_count;
- }
- return span;
- @@ -565,7 +627,7 @@
- assert(!(span->flags & SPAN_FLAG_MASTER) || !(span->flags & SPAN_FLAG_SUBSPAN));
- int is_master = !!(span->flags & SPAN_FLAG_MASTER);
- - span_t* master = is_master ? span : (pointer_offset(span, -(int32_t)(span->total_spans_or_distance * _memory_span_size)));
- + span_t* master = is_master ? span : (span_t*)(pointer_offset(span, -(int32_t)(span->total_spans_or_distance * _memory_span_size)));
- assert(is_master || (span->flags & SPAN_FLAG_SUBSPAN));
- assert(master->flags & SPAN_FLAG_MASTER);
- @@ -623,7 +685,7 @@
- distance = span->total_spans_or_distance;
- //Setup remainder as a subspan
- - span_t* subspan = pointer_offset(span, use_count * _memory_span_size);
- + span_t* subspan = (span_t*)pointer_offset(span, use_count * _memory_span_size);
- subspan->flags = SPAN_FLAG_SUBSPAN;
- subspan->total_spans_or_distance = (uint32_t)(distance + use_count);
- subspan->span_count = (uint32_t)(current_count - use_count);
- @@ -732,7 +794,7 @@
- void* current_cache, *new_cache;
- do {
- current_cache = atomic_load_ptr(&cache->cache);
- - span->prev_span = (void*)((uintptr_t)current_cache & _memory_span_mask);
- + span->prev_span = (span_t*)((uintptr_t)current_cache & _memory_span_mask);
- new_cache = (void*)((uintptr_t)span | ((uintptr_t)atomic_incr32(&cache->counter) & ~_memory_span_mask));
- } while (!atomic_cas_ptr(&cache->cache, new_cache, current_cache));
- }
- @@ -745,7 +807,7 @@
- void* global_span = atomic_load_ptr(&cache->cache);
- span_ptr = (uintptr_t)global_span & _memory_span_mask;
- if (span_ptr) {
- - span_t* span = (void*)span_ptr;
- + span_t* span = (span_t*)span_ptr;
- //By accessing the span ptr before it is swapped out of list we assume that a contending thread
- //does not manage to traverse the span to being unmapped before we access it
- void* new_cache = (void*)((uintptr_t)span->prev_span | ((uintptr_t)atomic_incr32(&cache->counter) & ~_memory_span_mask));
- @@ -762,9 +824,9 @@
- static void
- _memory_cache_finalize(global_cache_t* cache) {
- void* current_cache = atomic_load_ptr(&cache->cache);
- - span_t* span = (void*)((uintptr_t)current_cache & _memory_span_mask);
- + span_t* span = (span_t*)((uintptr_t)current_cache & _memory_span_mask);
- while (span) {
- - span_t* skip_span = (void*)((uintptr_t)span->prev_span & _memory_span_mask);
- + span_t* skip_span = (span_t*)((uintptr_t)span->prev_span & _memory_span_mask);
- atomic_add32(&cache->size, -(int32_t)span->data.list.size);
- _memory_unmap_span_list(span);
- span = skip_span;
- @@ -860,7 +922,7 @@
- if (!heap->spans_reserved) {
- heap->spans_reserved = got_count - span_count;
- heap->span_reserve = subspan;
- - heap->span_reserve_master = pointer_offset(subspan, -(int32_t)(subspan->total_spans_or_distance * _memory_span_size));
- + heap->span_reserve_master = (span_t*)pointer_offset(subspan, -(int32_t)(subspan->total_spans_or_distance * _memory_span_size));
- }
- else {
- _memory_heap_cache_insert(heap, subspan);
- @@ -903,7 +965,7 @@
- //Happy path, we have a span with at least one free block
- span_t* span = heap->active_span[class_idx];
- count_t offset = class_size * active_block->free_list;
- - uint32_t* block = pointer_offset(span, SPAN_HEADER_SIZE + offset);
- + uint32_t* block = (uint32_t*)pointer_offset(span, SPAN_HEADER_SIZE + offset);
- assert(span && (atomic_load32(&span->heap_id) == heap->id));
- if (active_block->free_count == 1) {
- @@ -1020,7 +1082,7 @@
- atomic_thread_fence_acquire();
- do {
- raw_heap = atomic_load_ptr(&_memory_orphan_heaps);
- - heap = (void*)((uintptr_t)raw_heap & ~(uintptr_t)0xFF);
- + heap = (heap_t*)((uintptr_t)raw_heap & ~(uintptr_t)0xFF);
- if (!heap)
- break;
- next_heap = heap->next_orphan;
- @@ -1032,7 +1094,7 @@
- if (!heap) {
- //Map in pages for a new heap
- size_t align_offset = 0;
- - heap = _memory_map((1 + (sizeof(heap_t) >> _memory_page_size_shift)) * _memory_page_size, &align_offset);
- + heap = (heap_t*)_memory_map((1 + (sizeof(heap_t) >> _memory_page_size_shift)) * _memory_page_size, &align_offset);
- if (!heap)
- return heap;
- memset(heap, 0, sizeof(heap_t));
- @@ -1048,7 +1110,7 @@
- //Link in heap in heap ID map
- size_t list_idx = heap->id % HEAP_ARRAY_SIZE;
- do {
- - next_heap = atomic_load_ptr(&_memory_heaps[list_idx]);
- + next_heap = (heap_t*)atomic_load_ptr(&_memory_heaps[list_idx]);
- heap->next_heap = next_heap;
- } while (!atomic_cas_ptr(&_memory_heaps[list_idx], heap, next_heap));
- }
- @@ -1097,7 +1159,7 @@
- void* blocks_start = pointer_offset(span, SPAN_HEADER_SIZE);
- count_t block_offset = (count_t)pointer_diff(p, blocks_start);
- count_t block_idx = block_offset / (count_t)size_class->size;
- - uint32_t* block = pointer_offset(blocks_start, block_idx * size_class->size);
- + uint32_t* block = (uint32_t*)pointer_offset(blocks_start, block_idx * size_class->size);
- *block = block_data->free_list;
- if (block_data->free_list > block_data->first_autolink)
- block_data->first_autolink = block_data->free_list;
- @@ -1121,7 +1183,7 @@
- }
- else { //SPAN_FLAG_SUBSPAN
- uint32_t distance = span->total_spans_or_distance;
- - span_t* master = pointer_offset(span, -(int32_t)(distance * _memory_span_size));
- + span_t* master = (span_t*)pointer_offset(span, -(int32_t)(distance * _memory_span_size));
- heap->span_reserve_master = master;
- assert(master->flags & SPAN_FLAG_MASTER);
- assert(atomic_load32(&master->remaining_spans) >= (int32_t)span->span_count);
- @@ -1143,7 +1205,7 @@
- return;
- do {
- void* next = *(void**)p;
- - span_t* span = (void*)((uintptr_t)p & _memory_span_mask);
- + span_t* span = (span_t*)((uintptr_t)p & _memory_span_mask);
- _memory_deallocate_to_heap(heap, span, p);
- p = next;
- } while (p);
- @@ -1177,7 +1239,7 @@
- if (size & (_memory_page_size - 1))
- ++num_pages;
- size_t align_offset = 0;
- - span_t* span = _memory_map(num_pages * _memory_page_size, &align_offset);
- + span_t* span = (span_t*)_memory_map(num_pages * _memory_page_size, &align_offset);
- if (!span)
- return span;
- atomic_store32(&span->heap_id, 0);
- @@ -1195,7 +1257,7 @@
- return;
- //Grab the span (always at start of span, using span alignment)
- - span_t* span = (void*)((uintptr_t)p & _memory_span_mask);
- + span_t* span = (span_t*)((uintptr_t)p & _memory_span_mask);
- int32_t heap_id = atomic_load32(&span->heap_id);
- if (heap_id) {
- heap_t* heap = get_thread_heap();
- @@ -1223,7 +1285,7 @@
- _memory_reallocate(void* p, size_t size, size_t oldsize, unsigned int flags) {
- if (p) {
- //Grab the span using guaranteed span alignment
- - span_t* span = (void*)((uintptr_t)p & _memory_span_mask);
- + span_t* span = (span_t*)((uintptr_t)p & _memory_span_mask);
- int32_t heap_id = atomic_load32(&span->heap_id);
- if (heap_id) {
- if (span->size_class < SIZE_CLASS_COUNT) {
- @@ -1287,14 +1349,14 @@
- static size_t
- _memory_usable_size(void* p) {
- //Grab the span using guaranteed span alignment
- - span_t* span = (void*)((uintptr_t)p & _memory_span_mask);
- + span_t* span = (span_t*)((uintptr_t)p & _memory_span_mask);
- int32_t heap_id = atomic_load32(&span->heap_id);
- if (heap_id) {
- //Small/medium block
- if (span->size_class < SIZE_CLASS_COUNT) {
- size_class_t* size_class = _memory_size_class + span->size_class;
- void* blocks_start = pointer_offset(span, SPAN_HEADER_SIZE);
- - return size_class->size - ((size_t)pointer_diff(p, blocks_start) % size_class->size);
- + return size_class->size - (pointer_diff(p, blocks_start) % size_class->size);
- }
- //Large block
- @@ -1335,10 +1397,6 @@
- #else
- # include <sys/mman.h>
- # include <sched.h>
- -# ifdef __FreeBSD__
- -# include <sys/sysctl.h>
- -# define MAP_HUGETLB MAP_ALIGNED_SUPER
- -# endif
- # ifndef MAP_UNINITIALIZED
- # define MAP_UNINITIALIZED 0
- # endif
- @@ -1418,15 +1476,6 @@
- _memory_page_size = huge_page_size;
- _memory_map_granularity = huge_page_size;
- }
- -#elif defined(__FreeBSD__)
- - int rc;
- - size_t sz = sizeof(rc);
- -
- - if (sysctlbyname("vm.pmap.pg_ps_enabled", &rc, &sz, NULL, 0) == 0 && rc == 1) {
- - _memory_huge_pages = 1;
- - _memory_page_size = 2 * 1024 * 1024;
- - _memory_map_granularity = _memory_page_size;
- - }
- #elif defined(__APPLE__)
- _memory_huge_pages = 1;
- _memory_page_size = 2 * 1024 * 1024;
- @@ -1480,15 +1529,17 @@
- _memory_span_release_count = (_memory_span_map_count > 4 ? ((_memory_span_map_count < 64) ? _memory_span_map_count : 64) : 4);
- _memory_span_release_count_large = (_memory_span_release_count > 4 ? (_memory_span_release_count / 2) : 2);
- -#if (defined(__APPLE__) || defined(__HAIKU__)) && ENABLE_PRELOAD
- +#if defined(__APPLE__) && ENABLE_PRELOAD
- if (pthread_key_create(&_memory_thread_heap, 0))
- return -1;
- +#elif defined(__HAIKU__) && ENABLE_PRELOAD
- + _memory_thread_heap = tls_allocate();
- #endif
- atomic_store32(&_memory_heap_id, 0);
- atomic_store32(&_memory_orphan_counter, 0);
- -#if ENABLE_STATISTICS
- atomic_store32(&_memory_active_heaps, 0);
- +#if ENABLE_STATISTICS
- atomic_store32(&_reserved_spans, 0);
- atomic_store32(&_mapped_pages, 0);
- atomic_store32(&_mapped_total, 0);
- @@ -1529,15 +1580,12 @@
- atomic_thread_fence_acquire();
- rpmalloc_thread_finalize();
- -
- -#if ENABLE_STATISTICS
- //If you hit this assert, you still have active threads or forgot to finalize some thread(s)
- assert(atomic_load32(&_memory_active_heaps) == 0);
- -#endif
- //Free all thread caches
- for (size_t list_idx = 0; list_idx < HEAP_ARRAY_SIZE; ++list_idx) {
- - heap_t* heap = atomic_load_ptr(&_memory_heaps[list_idx]);
- + heap_t* heap = (heap_t*)atomic_load_ptr(&_memory_heaps[list_idx]);
- while (heap) {
- _memory_deallocate_deferred(heap);
- @@ -1576,7 +1624,7 @@
- assert(!atomic_load32(&_mapped_pages_os));
- #endif
- -#if (defined(__APPLE__) || defined(__HAIKU__)) && ENABLE_PRELOAD
- +#if defined(__APPLE__) && ENABLE_PRELOAD
- pthread_key_delete(_memory_thread_heap);
- #endif
- }
- @@ -1585,15 +1633,13 @@
- void
- rpmalloc_thread_initialize(void) {
- if (!get_thread_heap()) {
- + atomic_incr32(&_memory_active_heaps);
- heap_t* heap = _memory_allocate_heap();
- - if (heap) {
- #if ENABLE_STATISTICS
- - atomic_incr32(&_memory_active_heaps);
- - heap->thread_to_global = 0;
- - heap->global_to_thread = 0;
- + heap->thread_to_global = 0;
- + heap->global_to_thread = 0;
- #endif
- - set_thread_heap(heap);
- - }
- + set_thread_heap(heap);
- }
- }
- @@ -1631,18 +1677,15 @@
- uintptr_t orphan_counter;
- heap_t* last_heap;
- do {
- - last_heap = atomic_load_ptr(&_memory_orphan_heaps);
- - heap->next_orphan = (void*)((uintptr_t)last_heap & ~(uintptr_t)0xFF);
- + last_heap = (heap_t*)atomic_load_ptr(&_memory_orphan_heaps);
- + heap->next_orphan = (heap_t*)((uintptr_t)last_heap & ~(uintptr_t)0xFF);
- orphan_counter = (uintptr_t)atomic_incr32(&_memory_orphan_counter);
- raw_heap = (void*)((uintptr_t)heap | (orphan_counter & (uintptr_t)0xFF));
- }
- while (!atomic_cas_ptr(&_memory_orphan_heaps, raw_heap, last_heap));
- set_thread_heap(0);
- -
- -#if ENABLE_STATISTICS
- atomic_add32(&_memory_active_heaps, -1);
- -#endif
- }
- int
- @@ -1671,13 +1714,21 @@
- #else
- # if defined(__APPLE__)
- void* ptr = mmap(0, size + padding, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_UNINITIALIZED, (_memory_huge_pages ? VM_FLAGS_SUPERPAGE_SIZE_2MB : -1), 0);
- +# elif defined(__HAIKU__)
- + void* ptr;
- + area_id area = create_area("heap area", &ptr, B_ANY_ADDRESS,
- + size + padding, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA);
- + if (area < 0)
- + ptr = MAP_FAILED;
- # elif defined(MAP_HUGETLB)
- void* ptr = mmap(0, size + padding, PROT_READ | PROT_WRITE, (_memory_huge_pages ? MAP_HUGETLB : 0) | MAP_PRIVATE | MAP_ANONYMOUS | MAP_UNINITIALIZED, -1, 0);
- # else
- void* ptr = mmap(0, size + padding, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_UNINITIALIZED, -1, 0);
- # endif
- if ((ptr == MAP_FAILED) || !ptr) {
- +#ifndef __HAIKU__
- assert("Failed to map virtual memory block" == 0);
- +#endif
- return 0;
- }
- #endif
- @@ -1722,12 +1773,14 @@
- }
- }
- else {
- +#ifndef __HAIKU__
- #if defined(POSIX_MADV_FREE)
- if (posix_madvise(address, size, POSIX_MADV_FREE))
- #endif
- if (posix_madvise(address, size, POSIX_MADV_DONTNEED)) {
- assert("Failed to madvise virtual memory block as free" == 0);
- }
- +#endif
- }
- #endif
- #endif
- @@ -1886,11 +1939,9 @@
- align_offset = 0;
- mapped_size = num_pages * _memory_page_size;
- - span = _memory_map(mapped_size, &align_offset);
- - if (!span) {
- - errno = ENOMEM;
- + span = (span_t*)_memory_map(mapped_size, &align_offset);
- + if (span == NULL)
- return 0;
- - }
- ptr = pointer_offset(span, SPAN_HEADER_SIZE);
- if ((uintptr_t)ptr & align_mask)
- @@ -1949,7 +2000,7 @@
- void* p = atomic_load_ptr(&heap->defer_deallocate);
- while (p) {
- void* next = *(void**)p;
- - span_t* span = (void*)((uintptr_t)p & _memory_span_mask);
- + span_t* span = (span_t*)((uintptr_t)p & _memory_span_mask);
- stats->deferred += _memory_size_class[span->size_class].size;
- p = next;
- }
- @@ -1987,3 +2038,8 @@
- }
- #endif
- }
- +
- +#ifdef __HAIKU__
- +} // namespace rpmalloc
- +} // namespace BPrivate
- +#endif
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement