Guest User

Untitled

a guest
Feb 16th, 2019
92
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 12.76 KB | None | 0 0
  1. diff --git a/byterun/compact.c b/byterun/compact.c
  2. index bf80301..fa83e2d 100644
  3. --- a/byterun/compact.c
  4. +++ b/byterun/compact.c
  5. @@ -58,7 +58,7 @@ static void invert_pointer_at (word *p)
  6.  
  7. /* Use Ecolor (q) == 0 instead of Is_block (q) because q could be an
  8. inverted pointer for an infix header (with Ecolor == 2). */
  9. - if (Ecolor (q) == 0 && (Classify_addr (q) & In_heap)){
  10. + if (Ecolor (q) == 0 && Is_in_heap(q)){
  11. switch (Ecolor (Hd_val (q))){
  12. case 0:
  13. case 3: /* Pointer or header: insert in inverted list. */
  14. @@ -440,10 +440,14 @@ void caml_compact_heap (void)
  15. recognized as free by the recompaction. */
  16. caml_make_free_blocks ((value *) chunk,
  17. Wsize_bsize (Chunk_size (chunk)), 0, Caml_blue);
  18. +
  19. +#ifndef CAML_CONTIGUOUS_HEAP_LOG
  20. if (caml_page_table_add (In_heap, chunk, chunk + Chunk_size (chunk)) != 0){
  21. caml_free_for_heap (chunk);
  22. return;
  23. }
  24. +#endif
  25. +
  26. Chunk_next (chunk) = caml_heap_start;
  27. caml_heap_start = chunk;
  28. ++ caml_stat_heap_chunks;
  29. diff --git a/byterun/config.h b/byterun/config.h
  30. index 24f4e59..ef66d82 100644
  31. --- a/byterun/config.h
  32. +++ b/byterun/config.h
  33. @@ -17,6 +17,7 @@
  34. /* <include ../config/m.h> */
  35. /* <include ../config/s.h> */
  36. /* <private> */
  37. +#define CAML_CONTIGUOUS_HEAP_LOG 40
  38. #include "../config/m.h"
  39. #include "../config/s.h"
  40. /* </private> */
  41. @@ -143,12 +144,14 @@ typedef struct { uint32 l, h; } uint64, int64;
  42.  
  43. /* Default size increment when growing the heap. (words)
  44. Must be a multiple of [Page_size / sizeof (value)].
  45. + This should be a power of two minus the size of a page to avoid
  46. + wasting virtual address space.
  47. (Approx 512 Kb for a 32-bit platform, 1 Mb for a 64-bit platform.) */
  48. -#define Heap_chunk_def (31 * Page_size)
  49. +#define Heap_chunk_def (255 * Page_size / sizeof (value))
  50.  
  51. /* Default initial size of the major heap (words);
  52. same constraints as for Heap_chunk_def. */
  53. -#define Init_heap_def (31 * Page_size)
  54. +#define Init_heap_def (255 * Page_size / sizeof(value))
  55.  
  56.  
  57. /* Default speed setting for the major GC. The heap will grow until
  58. diff --git a/byterun/gc_ctrl.c b/byterun/gc_ctrl.c
  59. index 07cfc26..9c5ac63 100644
  60. --- a/byterun/gc_ctrl.c
  61. +++ b/byterun/gc_ctrl.c
  62. @@ -481,6 +481,10 @@ void caml_init_gc (uintnat minor_size, uintnat major_size,
  63. {
  64. uintnat major_heap_size = Bsize_wsize (norm_heapincr (major_size));
  65.  
  66. +#ifdef CAML_CONTIGUOUS_HEAP_LOG
  67. + if (caml_block_init())
  68. + caml_fatal_error ("OCaml runtime error: cannot initialize memoy manager\n");
  69. +#endif
  70. if (caml_page_table_initialize(Bsize_wsize(minor_size) + major_heap_size)){
  71. caml_fatal_error ("OCaml runtime error: cannot initialize page table\n");
  72. }
  73. diff --git a/byterun/major_gc.c b/byterun/major_gc.c
  74. index 14a248f..71a48e6 100644
  75. --- a/byterun/major_gc.c
  76. +++ b/byterun/major_gc.c
  77. @@ -487,11 +487,13 @@ void caml_init_major_heap (asize_t heap_size)
  78. Chunk_next (caml_heap_start) = NULL;
  79. caml_stat_heap_chunks = 1;
  80.  
  81. +#ifndef CAML_CONTIGUOUS_HEAP_LOG
  82. if (caml_page_table_add(In_heap, caml_heap_start,
  83. caml_heap_start + caml_stat_heap_size) != 0) {
  84. caml_fatal_error ("Fatal error: not enough memory "
  85. "for the initial page table.\n");
  86. }
  87. +#endif
  88.  
  89. caml_fl_init_merge ();
  90. caml_make_free_blocks ((value *) caml_heap_start,
  91. diff --git a/byterun/memory.c b/byterun/memory.c
  92. index e18bde4..d199053 100644
  93. --- a/byterun/memory.c
  94. +++ b/byterun/memory.c
  95. @@ -216,6 +216,195 @@ int caml_page_table_remove(int kind, void * start, void * end)
  96. return 0;
  97. }
  98.  
  99. +#ifdef CAML_CONTIGUOUS_HEAP_LOG
  100. +
  101. +#include<sys/mman.h>
  102. +
  103. +char* caml_contiguous_heap = NULL;
  104. +
  105. +struct block_descr {
  106. + struct block_descr *left, *right;
  107. + unsigned sizes;
  108. +};
  109. +static struct block_descr *root_descr = NULL;
  110. +
  111. +int caml_block_init()
  112. +{
  113. + Assert(CAML_CONTIGUOUS_HEAP_LOG-Page_log < 32);
  114. +
  115. + root_descr = (struct block_descr*)malloc(sizeof(struct block_descr));
  116. + if(root_descr == NULL)
  117. + return -1;
  118. + root_descr -> left = NULL;
  119. + root_descr -> right = NULL;
  120. + root_descr -> sizes = 1U<<(CAML_CONTIGUOUS_HEAP_LOG-Page_log);
  121. +
  122. + caml_contiguous_heap =
  123. + (char*)mmap(NULL, 1LL << CAML_CONTIGUOUS_HEAP_LOG, PROT_NONE,
  124. + MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0);
  125. + if(caml_contiguous_heap == NULL) {
  126. + free(root_descr);
  127. + root_descr = NULL;
  128. + return -1;
  129. + }
  130. +
  131. + return 0;
  132. +}
  133. +
  134. +static struct block_descr*
  135. + block_descrs[CAML_CONTIGUOUS_HEAP_LOG - Page_log + 1];
  136. +
  137. +char* caml_block_alloc(size_t size)
  138. +{
  139. + int log_alloc, log_block, log_cur;
  140. + struct block_descr **bd_pt;
  141. + char* block;
  142. +
  143. + Assert(caml_contiguous_heap != NULL);
  144. +
  145. + if(size > (1ULL << CAML_CONTIGUOUS_HEAP_LOG))
  146. + return NULL;
  147. +
  148. + Assert((size & (Page_size-1)) == 0);
  149. +
  150. + for(log_alloc = Page_log; (1ULL << log_alloc) < size; log_alloc++);
  151. +
  152. + for(log_block = log_alloc;
  153. + log_block <= CAML_CONTIGUOUS_HEAP_LOG &&
  154. + ((1U << (log_block-Page_log)) & root_descr->sizes) == 0;
  155. + log_block++);
  156. + if(log_block > CAML_CONTIGUOUS_HEAP_LOG)
  157. + return NULL;
  158. +
  159. + block_descrs[0] = root_descr;
  160. + bd_pt = block_descrs;
  161. + log_cur = CAML_CONTIGUOUS_HEAP_LOG;
  162. + block = caml_contiguous_heap;
  163. + while(log_cur != log_block) {
  164. + Assert((*bd_pt)->left != NULL);
  165. + Assert((*bd_pt)->right != NULL);
  166. + if(((*bd_pt)->left->sizes & (1U << (log_block-Page_log))) != 0)
  167. + *(bd_pt+1) = (*bd_pt)->left;
  168. + else {
  169. + Assert((*bd_pt)->right != NULL);
  170. + Assert(((*bd_pt)->right->sizes & (1U << (log_block-Page_log))) != 0);
  171. + *(bd_pt+1) = (*bd_pt)->right;
  172. + block += (1ULL << (log_cur-1));
  173. + }
  174. + bd_pt++;
  175. + log_cur--;
  176. + }
  177. +
  178. + Assert((*bd_pt)->sizes == (1U << (log_block-Page_log)));
  179. + Assert((*bd_pt)->left == NULL);
  180. + Assert((*bd_pt)->right == NULL);
  181. +
  182. + if((char*)mmap(block, size, PROT_READ | PROT_WRITE,
  183. + MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) != block)
  184. + return NULL;
  185. +
  186. + while(log_cur > log_alloc) {
  187. + (*bd_pt)->left = (struct block_descr*)malloc(sizeof(struct block_descr));
  188. + (*bd_pt)->right = (struct block_descr*)malloc(sizeof(struct block_descr));
  189. + if((*bd_pt)->left == NULL || (*bd_pt)->right == NULL) {
  190. + while(log_cur <= log_block) {
  191. + free((*bd_pt)->left);
  192. + (*bd_pt)->left = NULL;
  193. + free((*bd_pt)->right);
  194. + (*bd_pt)->right = NULL;
  195. +
  196. + log_cur++;
  197. + bd_pt--;
  198. + }
  199. + if((char*)mmap(block, size, PROT_NONE,
  200. + MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED | MAP_NORESERVE,
  201. + -1, 0) != block)
  202. + caml_fatal_error("Fatal error : could not deallocate memory\n");
  203. + return NULL;
  204. + }
  205. +
  206. + (*bd_pt)->right->left = NULL;
  207. + (*bd_pt)->right->right = NULL;
  208. + (*bd_pt)->right->sizes = 1U<<(log_cur-1-Page_log);
  209. +
  210. + *(bd_pt+1) = (*bd_pt)->left;
  211. + bd_pt++;
  212. + log_cur--;
  213. + }
  214. +
  215. + (*bd_pt)->left = NULL;
  216. + (*bd_pt)->right = NULL;
  217. + (*bd_pt)->sizes = 0;
  218. +
  219. + while(bd_pt > block_descrs) {
  220. + bd_pt--;
  221. + (*bd_pt)->sizes = (*bd_pt)->left->sizes | (*bd_pt)->right->sizes;
  222. + }
  223. +
  224. + return block;
  225. +}
  226. +
  227. +void caml_block_free(char *block, asize_t size)
  228. +{
  229. + asize_t offset;
  230. + int log_alloc, log_cur;
  231. + struct block_descr **bd_pt;
  232. +
  233. + Assert(caml_contiguous_heap != NULL);
  234. +
  235. + offset = block-caml_contiguous_heap;
  236. + Assert((size & ((1ULL<<Page_log)-1)) == 0);
  237. + Assert((offset & ((1ULL<<Page_log)-1)) == 0);
  238. + Assert(offset < (1ULL<<CAML_CONTIGUOUS_HEAP_LOG));
  239. +
  240. + for(log_alloc = Page_log; (1ULL << log_alloc) < size; log_alloc++);
  241. +
  242. + if((char*)mmap(block, size, PROT_NONE,
  243. + MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED | MAP_NORESERVE,
  244. + -1, 0) != block)
  245. + caml_fatal_error("Fatal error : could not deallocate memory\n");
  246. +
  247. + block_descrs[0] = root_descr;
  248. + bd_pt = block_descrs;
  249. + log_cur = CAML_CONTIGUOUS_HEAP_LOG;
  250. + while(log_cur > log_alloc) {
  251. + Assert((*bd_pt)->left != NULL);
  252. + Assert((*bd_pt)->right != NULL);
  253. + if((offset & (1ULL<<(log_cur-1))) != 0)
  254. + *(bd_pt+1) = (*bd_pt)->right;
  255. + else
  256. + *(bd_pt+1) = (*bd_pt)->left;
  257. + bd_pt++;
  258. + log_cur--;
  259. + }
  260. +
  261. + Assert((*bd_pt)->left == NULL);
  262. + Assert((*bd_pt)->right == NULL);
  263. + Assert((*bd_pt)->sizes == 0);
  264. +
  265. + (*bd_pt)->sizes = 1U<<(log_cur-Page_log);
  266. + while(bd_pt > block_descrs) {
  267. + bd_pt--;
  268. + log_cur++;
  269. + if((*bd_pt)->left->sizes == (1U<<(log_cur-1-Page_log)) &&
  270. + (*bd_pt)->right->sizes == (1U<<(log_cur-1-Page_log))) {
  271. + free((*bd_pt)->left);
  272. + (*bd_pt)->left = NULL;
  273. + free((*bd_pt)->right);
  274. + (*bd_pt)->right = NULL;
  275. + (*bd_pt)->sizes = 1U<<(log_cur-Page_log);
  276. + } else {
  277. + bd_pt++;
  278. + while(bd_pt > block_descrs) {
  279. + bd_pt--;
  280. + (*bd_pt)->sizes = (*bd_pt)->left->sizes | (*bd_pt)->right->sizes;
  281. + }
  282. + }
  283. + }
  284. +}
  285. +
  286. +#endif
  287. +
  288. /* Allocate a block of the requested size, to be passed to
  289. [caml_add_to_heap] later.
  290. [request] must be a multiple of [Page_size].
  291. @@ -228,10 +417,18 @@ char *caml_alloc_for_heap (asize_t request)
  292. char *mem;
  293. void *block;
  294. Assert (request % Page_size == 0);
  295. +#ifdef CAML_CONTIGUOUS_HEAP_LOG
  296. + /* TODO : We allocate a full new page for the chunk header.
  297. + That would be good to avoid this waste. */
  298. + block = caml_block_alloc(request+Page_size);
  299. + if (block == NULL) return NULL;
  300. + mem = block + Page_size;
  301. +#else
  302. mem = caml_aligned_malloc (request + sizeof (heap_chunk_head),
  303. sizeof (heap_chunk_head), &block);
  304. if (mem == NULL) return NULL;
  305. mem += sizeof (heap_chunk_head);
  306. +#endif
  307. Chunk_size (mem) = request;
  308. Chunk_block (mem) = block;
  309. return mem;
  310. @@ -242,7 +439,11 @@ char *caml_alloc_for_heap (asize_t request)
  311. */
  312. void caml_free_for_heap (char *mem)
  313. {
  314. +#ifdef CAML_CONTIGUOUS_HEAP_LOG
  315. + caml_block_free(Chunk_block (mem), Chunk_size(mem)+Page_size);
  316. +#else
  317. free (Chunk_block (mem));
  318. +#endif
  319. }
  320.  
  321. /* Take a chunk of memory as argument, which must be the result of a
  322. @@ -266,9 +467,11 @@ int caml_add_to_heap (char *m)
  323. caml_gc_message (0x04, "Growing heap to %luk bytes\n",
  324. (caml_stat_heap_size + Chunk_size (m)) / 1024);
  325.  
  326. +#ifndef CAML_CONTIGUOUS_HEAP_LOG
  327. /* Register block in page table */
  328. if (caml_page_table_add(In_heap, m, m + Chunk_size(m)) != 0)
  329. return -1;
  330. +#endif
  331.  
  332. /* Chain this heap chunk. */
  333. {
  334. @@ -381,8 +584,10 @@ void caml_shrink_heap (char *chunk)
  335. while (*cp != chunk) cp = &(Chunk_next (*cp));
  336. *cp = Chunk_next (chunk);
  337.  
  338. +#ifndef CAML_CONTIGUOUS_HEAP_LOG
  339. /* Remove the pages of [chunk] from the page table. */
  340. caml_page_table_remove(In_heap, chunk, chunk + Chunk_size (chunk));
  341. +#endif
  342.  
  343. /* Free the [malloc] block that contains [chunk]. */
  344. caml_free_for_heap (chunk);
  345. diff --git a/byterun/memory.h b/byterun/memory.h
  346. index 0761070..eab0aec 100644
  347. --- a/byterun/memory.h
  348. +++ b/byterun/memory.h
  349. @@ -53,8 +53,12 @@ color_t caml_allocation_color (void *hp);
  350. /* <private> */
  351.  
  352. #define Not_in_heap 0
  353. +
  354. +#ifndef CAML_CONTIGUOUS_HEAP_LOG
  355. #define In_heap 1
  356. #define In_young 2
  357. +#endif
  358. +
  359. #define In_static_data 4
  360. #define In_code_area 8
  361.  
  362. @@ -81,15 +85,37 @@ CAMLextern unsigned char * caml_page_table[Pagetable1_size];
  363.  
  364. #endif
  365.  
  366. +#ifdef CAML_CONTIGUOUS_HEAP_LOG
  367. +
  368. +extern char* caml_contiguous_heap;
  369. +#define Is_in_heap(a) \
  370. + ((uintnat)((char*)(a) - caml_contiguous_heap) < \
  371. + (1ULL<<CAML_CONTIGUOUS_HEAP_LOG))
  372. +#define Is_in_heap_or_young(a) \
  373. + (Is_in_heap(a) || \
  374. + ((char*)(a) >= caml_young_start && (char*)(a) < caml_young_end))
  375. +#define Is_in_value_area(a) \
  376. + (Is_in_heap_or_young(a) || (Classify_addr(a) & In_static_data))
  377. +
  378. +#else
  379. +
  380. #define Is_in_value_area(a) \
  381. (Classify_addr(a) & (In_heap | In_young | In_static_data))
  382. #define Is_in_heap(a) (Classify_addr(a) & In_heap)
  383. #define Is_in_heap_or_young(a) (Classify_addr(a) & (In_heap | In_young))
  384.  
  385. +#endif
  386. +
  387. int caml_page_table_add(int kind, void * start, void * end);
  388. int caml_page_table_remove(int kind, void * start, void * end);
  389. int caml_page_table_initialize(mlsize_t bytesize);
  390.  
  391. +#ifdef CAML_CONTIGUOUS_HEAP_LOG
  392. +
  393. +int caml_block_init();
  394. +
  395. +#endif
  396. +
  397. #ifdef DEBUG
  398. #define DEBUG_clear(result, wosize) do{ \
  399. uintnat caml__DEBUG_i; \
  400. diff --git a/byterun/minor_gc.c b/byterun/minor_gc.c
  401. index 3e0dd4e..dcc8088 100644
  402. --- a/byterun/minor_gc.c
  403. +++ b/byterun/minor_gc.c
  404. @@ -83,6 +83,8 @@ void caml_set_minor_heap_size (asize_t size)
  405. Assert (caml_young_ptr == caml_young_end);
  406. new_heap = caml_aligned_malloc(size, 0, &new_heap_base);
  407. if (new_heap == NULL) caml_raise_out_of_memory();
  408. +
  409. +#ifndef CAML_CONTIGUOUS_HEAP_LOG
  410. if (caml_page_table_add(In_young, new_heap, new_heap + size) != 0)
  411. caml_raise_out_of_memory();
  412.  
  413. @@ -90,6 +92,8 @@ void caml_set_minor_heap_size (asize_t size)
  414. caml_page_table_remove(In_young, caml_young_start, caml_young_end);
  415. free (caml_young_base);
  416. }
  417. +#endif
  418. +
  419. caml_young_base = new_heap_base;
  420. caml_young_start = new_heap;
  421. caml_young_end = new_heap + size;
Add Comment
Please, Sign In to add comment