Advertisement
Guest User

Untitled

a guest
May 24th, 2019
121
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 21.08 KB | None | 0 0
  1. $ diff -Naur rpmalloc.c rpmalloc.cpp
  2. --- rpmalloc.c 2019-05-24 16:50:36.934851263 -0500
  3. +++ rpmalloc.cpp 2019-05-21 15:59:57.677746534 -0500
  4. @@ -1,9 +1,9 @@
  5. -/* rpmalloc.c - Memory allocator - Public Domain - 2016 Mattias Jansson
  6. +/* rpmalloc.c - Memory allocator - Public Domain - 2016 Mattias Jansson / Rampant Pixels
  7. *
  8. * This library provides a cross-platform lock free thread caching malloc implementation in C11.
  9. * The latest source code is always available at
  10. *
  11. - * https://github.com/mjansson/rpmalloc
  12. + * https://github.com/rampantpixels/rpmalloc
  13. *
  14. * This library is put in the public domain; you can redistribute it and/or modify it without any restrictions.
  15. *
  16. @@ -26,20 +26,32 @@
  17. #endif
  18. #ifndef ENABLE_VALIDATE_ARGS
  19. //! Enable validation of args to public entry points
  20. +#if defined(__HAIKU__) && __GNUC__ > 2
  21. +#define ENABLE_VALIDATE_ARGS 1
  22. +#else
  23. #define ENABLE_VALIDATE_ARGS 0
  24. #endif
  25. +#endif
  26. #ifndef ENABLE_STATISTICS
  27. //! Enable statistics collection
  28. #define ENABLE_STATISTICS 0
  29. #endif
  30. #ifndef ENABLE_ASSERTS
  31. //! Enable asserts
  32. +#ifdef __HAIKU__
  33. +#define ENABLE_ASSERTS 1
  34. +#else
  35. #define ENABLE_ASSERTS 0
  36. #endif
  37. +#endif
  38. #ifndef ENABLE_PRELOAD
  39. //! Support preloading
  40. +#ifdef __HAIKU__
  41. +#define ENABLE_PRELOAD 1
  42. +#else
  43. #define ENABLE_PRELOAD 0
  44. #endif
  45. +#endif
  46. #ifndef DISABLE_UNMAP
  47. //! Disable unmapping memory pages
  48. #define DISABLE_UNMAP 0
  49. @@ -60,9 +72,13 @@
  50. #endif
  51. #if !ENABLE_UNLIMITED_THREAD_CACHE
  52. //! Multiplier for thread cache (cache limit will be span release count multiplied by this value)
  53. +#ifdef __HAIKU__
  54. +#define THREAD_CACHE_MULTIPLIER 4
  55. +#else
  56. #define THREAD_CACHE_MULTIPLIER 16
  57. #endif
  58. #endif
  59. +#endif
  60.  
  61. #if ENABLE_GLOBAL_CACHE && ENABLE_THREAD_CACHE
  62. #ifndef ENABLE_UNLIMITED_GLOBAL_CACHE
  63. @@ -71,8 +87,12 @@
  64. #endif
  65. #if !ENABLE_UNLIMITED_GLOBAL_CACHE
  66. //! Multiplier for global cache (cache limit will be span release count multiplied by this value)
  67. +#ifdef __HAIKU__
  68. +#define GLOBAL_CACHE_MULTIPLIER 32
  69. +#else
  70. #define GLOBAL_CACHE_MULTIPLIER 64
  71. #endif
  72. +#endif
  73. #else
  74. # undef ENABLE_GLOBAL_CACHE
  75. # define ENABLE_GLOBAL_CACHE 0
  76. @@ -96,6 +116,13 @@
  77. # define _Static_assert static_assert
  78. #else
  79. # define FORCEINLINE inline __attribute__((__always_inline__))
  80. +# if defined(__cplusplus)
  81. +#if __GNUC__ == 2
  82. +#define _Static_assert(...)
  83. +#else
  84. +# define _Static_assert static_assert
  85. +#endif
  86. +# endif
  87. #endif
  88. #if PLATFORM_WINDOWS
  89. # if ENABLE_VALIDATE_ARGS
  90. @@ -111,16 +138,14 @@
  91. # endif
  92. # if defined(__HAIKU__)
  93. # include <OS.h>
  94. -# include <pthread.h>
  95. +# include <TLS.h>
  96. # endif
  97. #endif
  98.  
  99. -#ifndef ARCH_64BIT
  100. -# if defined(__LLP64__) || defined(__LP64__) || defined(_WIN64)
  101. -# define ARCH_64BIT 1
  102. -# else
  103. -# define ARCH_64BIT 0
  104. -# endif
  105. +#if defined(__LLP64__) || defined(__LP64__)
  106. +# define ARCH_64BIT 1
  107. +#else
  108. +# define ARCH_64BIT 0
  109. #endif
  110.  
  111. #include <stdint.h>
  112. @@ -159,6 +184,32 @@
  113. static FORCEINLINE int atomic_cas_ptr(atomicptr_t* dst, void* val, void* ref) { return (_InterlockedCompareExchange((volatile long*)dst, (long)val, (long)ref) == (long)ref) ? 1 : 0; }
  114. #endif
  115.  
  116. +#elif defined(__HAIKU__)
  117. +
  118. +#include <OS.h>
  119. +
  120. +typedef int32 atomic32_t;
  121. +typedef int64 atomic64_t;
  122. +typedef intptr_t atomicptr_t;
  123. +
  124. +#define atomic_thread_fence_acquire()
  125. +#define atomic_thread_fence_release()
  126. +
  127. +static FORCEINLINE int32_t atomic_load32(atomic32_t* src) { return atomic_get(src); }
  128. +static FORCEINLINE void atomic_store32(atomic32_t* dst, int32_t val) { atomic_set(dst, val); }
  129. +static FORCEINLINE int32_t atomic_incr32(atomic32_t* val) { return atomic_add(val, 1) + 1; }
  130. +static FORCEINLINE int32_t atomic_add32(atomic32_t* val, int32_t add) { return atomic_add(val, add) + add; }
  131. +
  132. +#if ARCH_64BIT
  133. +static FORCEINLINE void* atomic_load_ptr(atomicptr_t* src) { return (void*)atomic_get64(src); }
  134. +static FORCEINLINE void atomic_store_ptr(atomicptr_t* dst, void* val) { atomic_set64(dst, (atomicptr_t)val); }
  135. +static FORCEINLINE int atomic_cas_ptr(atomicptr_t* dst, void* val, void* ref) { return atomic_test_and_set64(dst, (int64)val, (int64)ref) == (int64)ref; }
  136. +#else
  137. +static FORCEINLINE void* atomic_load_ptr(atomicptr_t* src) { return (void*)atomic_get(src); }
  138. +static FORCEINLINE void atomic_store_ptr(atomicptr_t* dst, void* val) { atomic_set(dst, (atomicptr_t)val); }
  139. +static FORCEINLINE int atomic_cas_ptr(atomicptr_t* dst, void* val, void* ref) { return atomic_test_and_set(dst, (int32)val, (int32)ref) == (int32)ref; }
  140. +#endif
  141. +
  142. #else
  143.  
  144. #include <stdatomic.h>
  145. @@ -180,6 +231,11 @@
  146.  
  147. #endif
  148.  
  149. +#ifdef __HAIKU__
  150. +namespace BPrivate {
  151. +namespace rpmalloc {
  152. +#endif
  153. +
  154. /// Preconfigured limits and sizes
  155. //! Granularity of a small allocation block
  156. #define SMALL_GRANULARITY 32
  157. @@ -391,9 +447,9 @@
  158. static atomicptr_t _memory_orphan_heaps;
  159. //! Running orphan counter to avoid ABA issues in linked list
  160. static atomic32_t _memory_orphan_counter;
  161. -#if ENABLE_STATISTICS
  162. //! Active heap count
  163. static atomic32_t _memory_active_heaps;
  164. +#if ENABLE_STATISTICS
  165. //! Total number of currently mapped memory pages
  166. static atomic32_t _mapped_pages;
  167. //! Total number of currently lost spans
  168. @@ -407,8 +463,10 @@
  169. #endif
  170.  
  171. //! Current thread heap
  172. -#if (defined(__APPLE__) || defined(__HAIKU__)) && ENABLE_PRELOAD
  173. +#if defined(__APPLE__) && ENABLE_PRELOAD
  174. static pthread_key_t _memory_thread_heap;
  175. +#elif defined(__HAIKU__) && ENABLE_PRELOAD
  176. +static int32 _memory_thread_heap;
  177. #else
  178. # ifdef _MSC_VER
  179. # define _Thread_local __declspec(thread)
  180. @@ -425,8 +483,10 @@
  181. //! Get the current thread heap
  182. static FORCEINLINE heap_t*
  183. get_thread_heap(void) {
  184. -#if (defined(__APPLE__) || defined(__HAIKU__)) && ENABLE_PRELOAD
  185. - return pthread_getspecific(_memory_thread_heap);
  186. +#if defined(__APPLE__) && ENABLE_PRELOAD
  187. + return (heap_t*)pthread_getspecific(_memory_thread_heap);
  188. +#elif defined(__HAIKU__) && ENABLE_PRELOAD
  189. + return (heap_t*)tls_get(_memory_thread_heap);
  190. #else
  191. return _memory_thread_heap;
  192. #endif
  193. @@ -435,8 +495,10 @@
  194. //! Set the current thread heap
  195. static void
  196. set_thread_heap(heap_t* heap) {
  197. -#if (defined(__APPLE__) || defined(__HAIKU__)) && ENABLE_PRELOAD
  198. +#if defined(__APPLE__) && ENABLE_PRELOAD
  199. pthread_setspecific(_memory_thread_heap, heap);
  200. +#elif defined(__HAIKU__) && ENABLE_PRELOAD
  201. + tls_set(_memory_thread_heap, heap);
  202. #else
  203. _memory_thread_heap = heap;
  204. #endif
  205. @@ -458,7 +520,7 @@
  206. static heap_t*
  207. _memory_heap_lookup(int32_t id) {
  208. uint32_t list_idx = id % HEAP_ARRAY_SIZE;
  209. - heap_t* heap = atomic_load_ptr(&_memory_heaps[list_idx]);
  210. + heap_t* heap = (heap_t*)atomic_load_ptr(&_memory_heaps[list_idx]);
  211. while (heap && (heap->id != id))
  212. heap = heap->next_heap;
  213. return heap;
  214. @@ -503,7 +565,7 @@
  215. _memory_map_spans(heap_t* heap, size_t span_count) {
  216. if (span_count <= heap->spans_reserved) {
  217. span_t* span = heap->span_reserve;
  218. - heap->span_reserve = pointer_offset(span, span_count * _memory_span_size);
  219. + heap->span_reserve = (span_t*)pointer_offset(span, span_count * _memory_span_size);
  220. heap->spans_reserved -= span_count;
  221. if (span == heap->span_reserve_master) {
  222. assert(span->flags & SPAN_FLAG_MASTER);
  223. @@ -525,7 +587,7 @@
  224. if ((_memory_page_size > _memory_span_size) && ((request_spans * _memory_span_size) % _memory_page_size))
  225. request_spans += _memory_span_map_count - (request_spans % _memory_span_map_count);
  226. size_t align_offset = 0;
  227. - span_t* span = _memory_map(request_spans * _memory_span_size, &align_offset);
  228. + span_t* span = (span_t*)_memory_map(request_spans * _memory_span_size, &align_offset);
  229. if (!span)
  230. return span;
  231. span->align_offset = (uint32_t)align_offset;
  232. @@ -551,7 +613,7 @@
  233. _memory_heap_cache_insert(heap, prev_span);
  234. }
  235. heap->span_reserve_master = span;
  236. - heap->span_reserve = pointer_offset(span, span_count * _memory_span_size);
  237. + heap->span_reserve = (span_t*)pointer_offset(span, span_count * _memory_span_size);
  238. heap->spans_reserved = request_spans - span_count;
  239. }
  240. return span;
  241. @@ -565,7 +627,7 @@
  242. assert(!(span->flags & SPAN_FLAG_MASTER) || !(span->flags & SPAN_FLAG_SUBSPAN));
  243.  
  244. int is_master = !!(span->flags & SPAN_FLAG_MASTER);
  245. - span_t* master = is_master ? span : (pointer_offset(span, -(int32_t)(span->total_spans_or_distance * _memory_span_size)));
  246. + span_t* master = is_master ? span : (span_t*)(pointer_offset(span, -(int32_t)(span->total_spans_or_distance * _memory_span_size)));
  247.  
  248. assert(is_master || (span->flags & SPAN_FLAG_SUBSPAN));
  249. assert(master->flags & SPAN_FLAG_MASTER);
  250. @@ -623,7 +685,7 @@
  251. distance = span->total_spans_or_distance;
  252.  
  253. //Setup remainder as a subspan
  254. - span_t* subspan = pointer_offset(span, use_count * _memory_span_size);
  255. + span_t* subspan = (span_t*)pointer_offset(span, use_count * _memory_span_size);
  256. subspan->flags = SPAN_FLAG_SUBSPAN;
  257. subspan->total_spans_or_distance = (uint32_t)(distance + use_count);
  258. subspan->span_count = (uint32_t)(current_count - use_count);
  259. @@ -732,7 +794,7 @@
  260. void* current_cache, *new_cache;
  261. do {
  262. current_cache = atomic_load_ptr(&cache->cache);
  263. - span->prev_span = (void*)((uintptr_t)current_cache & _memory_span_mask);
  264. + span->prev_span = (span_t*)((uintptr_t)current_cache & _memory_span_mask);
  265. new_cache = (void*)((uintptr_t)span | ((uintptr_t)atomic_incr32(&cache->counter) & ~_memory_span_mask));
  266. } while (!atomic_cas_ptr(&cache->cache, new_cache, current_cache));
  267. }
  268. @@ -745,7 +807,7 @@
  269. void* global_span = atomic_load_ptr(&cache->cache);
  270. span_ptr = (uintptr_t)global_span & _memory_span_mask;
  271. if (span_ptr) {
  272. - span_t* span = (void*)span_ptr;
  273. + span_t* span = (span_t*)span_ptr;
  274. //By accessing the span ptr before it is swapped out of list we assume that a contending thread
  275. //does not manage to traverse the span to being unmapped before we access it
  276. void* new_cache = (void*)((uintptr_t)span->prev_span | ((uintptr_t)atomic_incr32(&cache->counter) & ~_memory_span_mask));
  277. @@ -762,9 +824,9 @@
  278. static void
  279. _memory_cache_finalize(global_cache_t* cache) {
  280. void* current_cache = atomic_load_ptr(&cache->cache);
  281. - span_t* span = (void*)((uintptr_t)current_cache & _memory_span_mask);
  282. + span_t* span = (span_t*)((uintptr_t)current_cache & _memory_span_mask);
  283. while (span) {
  284. - span_t* skip_span = (void*)((uintptr_t)span->prev_span & _memory_span_mask);
  285. + span_t* skip_span = (span_t*)((uintptr_t)span->prev_span & _memory_span_mask);
  286. atomic_add32(&cache->size, -(int32_t)span->data.list.size);
  287. _memory_unmap_span_list(span);
  288. span = skip_span;
  289. @@ -860,7 +922,7 @@
  290. if (!heap->spans_reserved) {
  291. heap->spans_reserved = got_count - span_count;
  292. heap->span_reserve = subspan;
  293. - heap->span_reserve_master = pointer_offset(subspan, -(int32_t)(subspan->total_spans_or_distance * _memory_span_size));
  294. + heap->span_reserve_master = (span_t*)pointer_offset(subspan, -(int32_t)(subspan->total_spans_or_distance * _memory_span_size));
  295. }
  296. else {
  297. _memory_heap_cache_insert(heap, subspan);
  298. @@ -903,7 +965,7 @@
  299. //Happy path, we have a span with at least one free block
  300. span_t* span = heap->active_span[class_idx];
  301. count_t offset = class_size * active_block->free_list;
  302. - uint32_t* block = pointer_offset(span, SPAN_HEADER_SIZE + offset);
  303. + uint32_t* block = (uint32_t*)pointer_offset(span, SPAN_HEADER_SIZE + offset);
  304. assert(span && (atomic_load32(&span->heap_id) == heap->id));
  305.  
  306. if (active_block->free_count == 1) {
  307. @@ -1020,7 +1082,7 @@
  308. atomic_thread_fence_acquire();
  309. do {
  310. raw_heap = atomic_load_ptr(&_memory_orphan_heaps);
  311. - heap = (void*)((uintptr_t)raw_heap & ~(uintptr_t)0xFF);
  312. + heap = (heap_t*)((uintptr_t)raw_heap & ~(uintptr_t)0xFF);
  313. if (!heap)
  314. break;
  315. next_heap = heap->next_orphan;
  316. @@ -1032,7 +1094,7 @@
  317. if (!heap) {
  318. //Map in pages for a new heap
  319. size_t align_offset = 0;
  320. - heap = _memory_map((1 + (sizeof(heap_t) >> _memory_page_size_shift)) * _memory_page_size, &align_offset);
  321. + heap = (heap_t*)_memory_map((1 + (sizeof(heap_t) >> _memory_page_size_shift)) * _memory_page_size, &align_offset);
  322. if (!heap)
  323. return heap;
  324. memset(heap, 0, sizeof(heap_t));
  325. @@ -1048,7 +1110,7 @@
  326. //Link in heap in heap ID map
  327. size_t list_idx = heap->id % HEAP_ARRAY_SIZE;
  328. do {
  329. - next_heap = atomic_load_ptr(&_memory_heaps[list_idx]);
  330. + next_heap = (heap_t*)atomic_load_ptr(&_memory_heaps[list_idx]);
  331. heap->next_heap = next_heap;
  332. } while (!atomic_cas_ptr(&_memory_heaps[list_idx], heap, next_heap));
  333. }
  334. @@ -1097,7 +1159,7 @@
  335. void* blocks_start = pointer_offset(span, SPAN_HEADER_SIZE);
  336. count_t block_offset = (count_t)pointer_diff(p, blocks_start);
  337. count_t block_idx = block_offset / (count_t)size_class->size;
  338. - uint32_t* block = pointer_offset(blocks_start, block_idx * size_class->size);
  339. + uint32_t* block = (uint32_t*)pointer_offset(blocks_start, block_idx * size_class->size);
  340. *block = block_data->free_list;
  341. if (block_data->free_list > block_data->first_autolink)
  342. block_data->first_autolink = block_data->free_list;
  343. @@ -1121,7 +1183,7 @@
  344. }
  345. else { //SPAN_FLAG_SUBSPAN
  346. uint32_t distance = span->total_spans_or_distance;
  347. - span_t* master = pointer_offset(span, -(int32_t)(distance * _memory_span_size));
  348. + span_t* master = (span_t*)pointer_offset(span, -(int32_t)(distance * _memory_span_size));
  349. heap->span_reserve_master = master;
  350. assert(master->flags & SPAN_FLAG_MASTER);
  351. assert(atomic_load32(&master->remaining_spans) >= (int32_t)span->span_count);
  352. @@ -1143,7 +1205,7 @@
  353. return;
  354. do {
  355. void* next = *(void**)p;
  356. - span_t* span = (void*)((uintptr_t)p & _memory_span_mask);
  357. + span_t* span = (span_t*)((uintptr_t)p & _memory_span_mask);
  358. _memory_deallocate_to_heap(heap, span, p);
  359. p = next;
  360. } while (p);
  361. @@ -1177,7 +1239,7 @@
  362. if (size & (_memory_page_size - 1))
  363. ++num_pages;
  364. size_t align_offset = 0;
  365. - span_t* span = _memory_map(num_pages * _memory_page_size, &align_offset);
  366. + span_t* span = (span_t*)_memory_map(num_pages * _memory_page_size, &align_offset);
  367. if (!span)
  368. return span;
  369. atomic_store32(&span->heap_id, 0);
  370. @@ -1195,7 +1257,7 @@
  371. return;
  372.  
  373. //Grab the span (always at start of span, using span alignment)
  374. - span_t* span = (void*)((uintptr_t)p & _memory_span_mask);
  375. + span_t* span = (span_t*)((uintptr_t)p & _memory_span_mask);
  376. int32_t heap_id = atomic_load32(&span->heap_id);
  377. if (heap_id) {
  378. heap_t* heap = get_thread_heap();
  379. @@ -1223,7 +1285,7 @@
  380. _memory_reallocate(void* p, size_t size, size_t oldsize, unsigned int flags) {
  381. if (p) {
  382. //Grab the span using guaranteed span alignment
  383. - span_t* span = (void*)((uintptr_t)p & _memory_span_mask);
  384. + span_t* span = (span_t*)((uintptr_t)p & _memory_span_mask);
  385. int32_t heap_id = atomic_load32(&span->heap_id);
  386. if (heap_id) {
  387. if (span->size_class < SIZE_CLASS_COUNT) {
  388. @@ -1287,14 +1349,14 @@
  389. static size_t
  390. _memory_usable_size(void* p) {
  391. //Grab the span using guaranteed span alignment
  392. - span_t* span = (void*)((uintptr_t)p & _memory_span_mask);
  393. + span_t* span = (span_t*)((uintptr_t)p & _memory_span_mask);
  394. int32_t heap_id = atomic_load32(&span->heap_id);
  395. if (heap_id) {
  396. //Small/medium block
  397. if (span->size_class < SIZE_CLASS_COUNT) {
  398. size_class_t* size_class = _memory_size_class + span->size_class;
  399. void* blocks_start = pointer_offset(span, SPAN_HEADER_SIZE);
  400. - return size_class->size - ((size_t)pointer_diff(p, blocks_start) % size_class->size);
  401. + return size_class->size - (pointer_diff(p, blocks_start) % size_class->size);
  402. }
  403.  
  404. //Large block
  405. @@ -1335,10 +1397,6 @@
  406. #else
  407. # include <sys/mman.h>
  408. # include <sched.h>
  409. -# ifdef __FreeBSD__
  410. -# include <sys/sysctl.h>
  411. -# define MAP_HUGETLB MAP_ALIGNED_SUPER
  412. -# endif
  413. # ifndef MAP_UNINITIALIZED
  414. # define MAP_UNINITIALIZED 0
  415. # endif
  416. @@ -1418,15 +1476,6 @@
  417. _memory_page_size = huge_page_size;
  418. _memory_map_granularity = huge_page_size;
  419. }
  420. -#elif defined(__FreeBSD__)
  421. - int rc;
  422. - size_t sz = sizeof(rc);
  423. -
  424. - if (sysctlbyname("vm.pmap.pg_ps_enabled", &rc, &sz, NULL, 0) == 0 && rc == 1) {
  425. - _memory_huge_pages = 1;
  426. - _memory_page_size = 2 * 1024 * 1024;
  427. - _memory_map_granularity = _memory_page_size;
  428. - }
  429. #elif defined(__APPLE__)
  430. _memory_huge_pages = 1;
  431. _memory_page_size = 2 * 1024 * 1024;
  432. @@ -1480,15 +1529,17 @@
  433. _memory_span_release_count = (_memory_span_map_count > 4 ? ((_memory_span_map_count < 64) ? _memory_span_map_count : 64) : 4);
  434. _memory_span_release_count_large = (_memory_span_release_count > 4 ? (_memory_span_release_count / 2) : 2);
  435.  
  436. -#if (defined(__APPLE__) || defined(__HAIKU__)) && ENABLE_PRELOAD
  437. +#if defined(__APPLE__) && ENABLE_PRELOAD
  438. if (pthread_key_create(&_memory_thread_heap, 0))
  439. return -1;
  440. +#elif defined(__HAIKU__) && ENABLE_PRELOAD
  441. + _memory_thread_heap = tls_allocate();
  442. #endif
  443.  
  444. atomic_store32(&_memory_heap_id, 0);
  445. atomic_store32(&_memory_orphan_counter, 0);
  446. -#if ENABLE_STATISTICS
  447. atomic_store32(&_memory_active_heaps, 0);
  448. +#if ENABLE_STATISTICS
  449. atomic_store32(&_reserved_spans, 0);
  450. atomic_store32(&_mapped_pages, 0);
  451. atomic_store32(&_mapped_total, 0);
  452. @@ -1529,15 +1580,12 @@
  453. atomic_thread_fence_acquire();
  454.  
  455. rpmalloc_thread_finalize();
  456. -
  457. -#if ENABLE_STATISTICS
  458. //If you hit this assert, you still have active threads or forgot to finalize some thread(s)
  459. assert(atomic_load32(&_memory_active_heaps) == 0);
  460. -#endif
  461.  
  462. //Free all thread caches
  463. for (size_t list_idx = 0; list_idx < HEAP_ARRAY_SIZE; ++list_idx) {
  464. - heap_t* heap = atomic_load_ptr(&_memory_heaps[list_idx]);
  465. + heap_t* heap = (heap_t*)atomic_load_ptr(&_memory_heaps[list_idx]);
  466. while (heap) {
  467. _memory_deallocate_deferred(heap);
  468.  
  469. @@ -1576,7 +1624,7 @@
  470. assert(!atomic_load32(&_mapped_pages_os));
  471. #endif
  472.  
  473. -#if (defined(__APPLE__) || defined(__HAIKU__)) && ENABLE_PRELOAD
  474. +#if defined(__APPLE__) && ENABLE_PRELOAD
  475. pthread_key_delete(_memory_thread_heap);
  476. #endif
  477. }
  478. @@ -1585,15 +1633,13 @@
  479. void
  480. rpmalloc_thread_initialize(void) {
  481. if (!get_thread_heap()) {
  482. + atomic_incr32(&_memory_active_heaps);
  483. heap_t* heap = _memory_allocate_heap();
  484. - if (heap) {
  485. #if ENABLE_STATISTICS
  486. - atomic_incr32(&_memory_active_heaps);
  487. - heap->thread_to_global = 0;
  488. - heap->global_to_thread = 0;
  489. + heap->thread_to_global = 0;
  490. + heap->global_to_thread = 0;
  491. #endif
  492. - set_thread_heap(heap);
  493. - }
  494. + set_thread_heap(heap);
  495. }
  496. }
  497.  
  498. @@ -1631,18 +1677,15 @@
  499. uintptr_t orphan_counter;
  500. heap_t* last_heap;
  501. do {
  502. - last_heap = atomic_load_ptr(&_memory_orphan_heaps);
  503. - heap->next_orphan = (void*)((uintptr_t)last_heap & ~(uintptr_t)0xFF);
  504. + last_heap = (heap_t*)atomic_load_ptr(&_memory_orphan_heaps);
  505. + heap->next_orphan = (heap_t*)((uintptr_t)last_heap & ~(uintptr_t)0xFF);
  506. orphan_counter = (uintptr_t)atomic_incr32(&_memory_orphan_counter);
  507. raw_heap = (void*)((uintptr_t)heap | (orphan_counter & (uintptr_t)0xFF));
  508. }
  509. while (!atomic_cas_ptr(&_memory_orphan_heaps, raw_heap, last_heap));
  510.  
  511. set_thread_heap(0);
  512. -
  513. -#if ENABLE_STATISTICS
  514. atomic_add32(&_memory_active_heaps, -1);
  515. -#endif
  516. }
  517.  
  518. int
  519. @@ -1671,13 +1714,21 @@
  520. #else
  521. # if defined(__APPLE__)
  522. void* ptr = mmap(0, size + padding, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_UNINITIALIZED, (_memory_huge_pages ? VM_FLAGS_SUPERPAGE_SIZE_2MB : -1), 0);
  523. +# elif defined(__HAIKU__)
  524. + void* ptr;
  525. + area_id area = create_area("heap area", &ptr, B_ANY_ADDRESS,
  526. + size + padding, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA);
  527. + if (area < 0)
  528. + ptr = MAP_FAILED;
  529. # elif defined(MAP_HUGETLB)
  530. void* ptr = mmap(0, size + padding, PROT_READ | PROT_WRITE, (_memory_huge_pages ? MAP_HUGETLB : 0) | MAP_PRIVATE | MAP_ANONYMOUS | MAP_UNINITIALIZED, -1, 0);
  531. # else
  532. void* ptr = mmap(0, size + padding, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_UNINITIALIZED, -1, 0);
  533. # endif
  534. if ((ptr == MAP_FAILED) || !ptr) {
  535. +#ifndef __HAIKU__
  536. assert("Failed to map virtual memory block" == 0);
  537. +#endif
  538. return 0;
  539. }
  540. #endif
  541. @@ -1722,12 +1773,14 @@
  542. }
  543. }
  544. else {
  545. +#ifndef __HAIKU__
  546. #if defined(POSIX_MADV_FREE)
  547. if (posix_madvise(address, size, POSIX_MADV_FREE))
  548. #endif
  549. if (posix_madvise(address, size, POSIX_MADV_DONTNEED)) {
  550. assert("Failed to madvise virtual memory block as free" == 0);
  551. }
  552. +#endif
  553. }
  554. #endif
  555. #endif
  556. @@ -1886,11 +1939,9 @@
  557. align_offset = 0;
  558. mapped_size = num_pages * _memory_page_size;
  559.  
  560. - span = _memory_map(mapped_size, &align_offset);
  561. - if (!span) {
  562. - errno = ENOMEM;
  563. + span = (span_t*)_memory_map(mapped_size, &align_offset);
  564. + if (span == NULL)
  565. return 0;
  566. - }
  567. ptr = pointer_offset(span, SPAN_HEADER_SIZE);
  568.  
  569. if ((uintptr_t)ptr & align_mask)
  570. @@ -1949,7 +2000,7 @@
  571. void* p = atomic_load_ptr(&heap->defer_deallocate);
  572. while (p) {
  573. void* next = *(void**)p;
  574. - span_t* span = (void*)((uintptr_t)p & _memory_span_mask);
  575. + span_t* span = (span_t*)((uintptr_t)p & _memory_span_mask);
  576. stats->deferred += _memory_size_class[span->size_class].size;
  577. p = next;
  578. }
  579. @@ -1987,3 +2038,8 @@
  580. }
  581. #endif
  582. }
  583. +
  584. +#ifdef __HAIKU__
  585. +} // namespace rpmalloc
  586. +} // namespace BPrivate
  587. +#endif
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement