Guest User

Untitled

a guest
Feb 20th, 2018
74
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 11.68 KB | None | 0 0
  1. Index: gc.c
  2. ===================================================================
  3. --- gc.c (revision 19984)
  4. +++ gc.c (working copy)
  5. @@ -281,7 +281,7 @@
  6. int limit;
  7. };
  8.  
  9. -#define HEAP_MIN_SLOTS 10000
  10. +#define HEAP_MIN_SLOTS 100000
  11. #define FREE_MIN 4096
  12.  
  13. struct gc_list {
  14. @@ -395,11 +395,45 @@
  15. /*#define HEAP_SIZE 0x800 */
  16.  
  17. #define HEAP_OBJ_LIMIT (HEAP_SIZE / sizeof(struct RVALUE))
  18. +#define GC_NOTIFY 1
  19.  
  20. extern st_table *rb_class_tbl;
  21.  
  22. int ruby_disable_gc_stress = 0;
  23.  
  24. +
  25. +
  26. +
  27. +#include <sys/types.h>
  28. +#include <sys/mman.h>
  29. +#include <fcntl.h>
  30. +
  31. +#define TO_ADD_TO_FREE_LIST_SIZE 4000000
  32. +#define HEAPS_TO_FREE_SIZE 3000
  33. +enum CHILD_STATE { idle, child_in_gc, gc_just_finished, gc_wait_for_child_next_pass};
  34. +typedef struct
  35. +{
  36. + enum CHILD_STATE child_state;
  37. + RVALUE* to_add_to_free_list[TO_ADD_TO_FREE_LIST_SIZE];
  38. + struct heaps_slot* heaps_to_free[HEAPS_TO_FREE_SIZE];
  39. +
  40. +} shared_stuff_t, *shared_stuff_p;
  41. +
  42. +shared_stuff_p shared_area = 0;
  43. +int child_gc_pid = 0;
  44. +
  45. +/* TODO wait for the child pid on exit */
  46. +
  47. +
  48. +
  49. +
  50. +
  51. +
  52. +
  53. +
  54. +
  55. +
  56. +
  57. static void run_final(rb_objspace_t *objspace, VALUE obj);
  58. static int garbage_collect(rb_objspace_t *objspace);
  59.  
  60. @@ -546,6 +580,7 @@
  61. if ((ruby_gc_stress && !ruby_disable_gc_stress) ||
  62. (malloc_increase+size) > malloc_limit) {
  63. garbage_collect(objspace);
  64. + malloc_increase = 0;
  65. }
  66. mem = malloc(size);
  67. if (!mem) {
  68. @@ -889,9 +924,11 @@
  69. VALUE obj;
  70.  
  71. if ((ruby_gc_stress && !ruby_disable_gc_stress) || !freelist) {
  72. - if (!heaps_increment(objspace) && !garbage_collect(objspace)) {
  73. - during_gc = 0;
  74. - rb_memerror();
  75. + if (!heaps_increment(objspace)) {
  76. + if(!garbage_collect(objspace)) {
  77. + during_gc = 0;
  78. + rb_memerror();
  79. + }
  80. }
  81. }
  82.  
  83. @@ -1621,95 +1658,59 @@
  84. static void
  85. gc_sweep(rb_objspace_t *objspace)
  86. {
  87. - RVALUE *p, *pend, *final_list;
  88. - size_t freed = 0;
  89. - size_t i;
  90. - size_t live = 0, free_min = 0, do_heap_free = 0;
  91. + RVALUE *p, *pend;
  92. + size_t i; /* TODO were there more here we needed to worry about? */
  93. + size_t live = 0;
  94. + int where_at_in_to_add_to_free_list = 0;
  95. + int heaps_to_free_where_at = 0;
  96. + int total_added_to_to_free_list;
  97.  
  98. - do_heap_free = (heaps_used * HEAP_OBJ_LIMIT) * 0.65;
  99. - free_min = (heaps_used * HEAP_OBJ_LIMIT) * 0.2;
  100. -
  101. - if (free_min < FREE_MIN) {
  102. - do_heap_free = heaps_used * HEAP_OBJ_LIMIT;
  103. - free_min = FREE_MIN;
  104. - }
  105. -
  106. - freelist = 0;
  107. - final_list = deferred_final_list;
  108. - deferred_final_list = 0;
  109. for (i = 0; i < heaps_used; i++) {
  110. - int free_num = 0, final_num = 0;
  111. - RVALUE *free = freelist;
  112. - RVALUE *final = final_list;
  113. - int deferred;
  114. + int free_num = 0;
  115.  
  116. p = heaps[i].slot; pend = p + heaps[i].limit;
  117. +
  118. + total_added_to_to_free_list = 0;
  119. while (p < pend) {
  120. if (!(p->as.basic.flags & FL_MARK)) {
  121. - if (p->as.basic.flags &&
  122. - ((deferred = obj_free(objspace, (VALUE)p)) ||
  123. - ((FL_TEST(p, FL_FINALIZE)) && need_call_final))) {
  124. - if (!deferred) {
  125. - p->as.free.flags = T_ZOMBIE;
  126. - RDATA(p)->dfree = 0;
  127. + if (p->as.basic.flags) {
  128. + free_num++; /* for now only count those that we honestly free this time */
  129. + total_added_to_to_free_list++;
  130. + if(where_at_in_to_add_to_free_list < TO_ADD_TO_FREE_LIST_SIZE - 2)
  131. + {
  132. + shared_area->to_add_to_free_list[where_at_in_to_add_to_free_list++] = p;
  133. }
  134. - p->as.free.flags |= FL_MARK;
  135. - p->as.free.next = final_list;
  136. - final_list = p;
  137. - final_num++;
  138. +
  139. }
  140. - else {
  141. - add_freelist(objspace, p);
  142. - free_num++;
  143. - }
  144. }
  145. - else if (BUILTIN_TYPE(p) == T_ZOMBIE) {
  146. + else if (BUILTIN_TYPE(p) == T_ZOMBIE) {
  147. /* objects to be finalized */
  148. /* do nothing remain marked */
  149. }
  150. else {
  151. - RBASIC(p)->flags &= ~FL_MARK;
  152. - live++;
  153. + //live++;
  154. }
  155. p++;
  156. }
  157. - if (final_num + free_num == heaps[i].limit && freed > do_heap_free) {
  158. - RVALUE *pp;
  159. +
  160. + if (free_num == heaps[i].limit) {
  161. + if(heaps_to_free_where_at < (HEAPS_TO_FREE_SIZE - 2))
  162. + {
  163. + /* TODO do not list them on the to be freed list or they will be added to the freelist when collected */
  164. + //printf("marking heap to be totally freed %d\n", heaps[i].membase); /* this has the right number*/
  165. + shared_area->heaps_to_free[heaps_to_free_where_at++] = heaps[i].membase; /* TODO do we still add these to the to free list though? */
  166. + }
  167. + }
  168. + } /* foreach heap */
  169.  
  170. - for (pp = final_list; pp != final; pp = pp->as.free.next) {
  171. - RDATA(pp)->dmark = (void *)&heaps[i];
  172. - pp->as.free.flags |= FL_SINGLETON; /* freeing page mark */
  173. - }
  174. - heaps[i].limit = final_num;
  175. + if(where_at_in_to_add_to_free_list == TO_ADD_TO_FREE_LIST_SIZE - 2)
  176. + if (GC_NOTIFY) printf("PROBABLY WASTED free list size wanted %d, size is %d\n", where_at_in_to_add_to_free_list, TO_ADD_TO_FREE_LIST_SIZE);
  177.  
  178. - freelist = free; /* cancel this page from freelist */
  179. - }
  180. - else {
  181. - freed += free_num;
  182. - }
  183. - }
  184. - GC_PROF_SET_MALLOC_INFO;
  185. - if (malloc_increase > malloc_limit) {
  186. - malloc_limit += (malloc_increase - malloc_limit) * (double)live / (live + freed);
  187. - if (malloc_limit < GC_MALLOC_LIMIT) malloc_limit = GC_MALLOC_LIMIT;
  188. - }
  189. - malloc_increase = 0;
  190. - if (freed < free_min) {
  191. - set_heaps_increment(objspace);
  192. - heaps_increment(objspace);
  193. - }
  194. - during_gc = 0;
  195. -
  196. - /* clear finalization list */
  197. - if (final_list) {
  198. - GC_PROF_SET_HEAP_INFO;
  199. - deferred_final_list = final_list;
  200. - RUBY_VM_SET_FINALIZER_INTERRUPT(GET_THREAD());
  201. - }
  202. - else{
  203. - free_unused_heaps(objspace);
  204. - GC_PROF_SET_HEAP_INFO;
  205. - }
  206. + /* add some null terminators */
  207. + shared_area->to_add_to_free_list[where_at_in_to_add_to_free_list] = 0;
  208. + shared_area->heaps_to_free[heaps_to_free_where_at] = 0;
  209. +
  210. + return;
  211. }
  212.  
  213. void
  214. @@ -1855,7 +1856,6 @@
  215. return 0;
  216. }
  217.  
  218. -#define GC_NOTIFY 0
  219.  
  220. void rb_vm_mark(void *ptr);
  221.  
  222. @@ -1902,9 +1902,178 @@
  223.  
  224. void rb_gc_mark_encodings(void);
  225.  
  226. +
  227. +static void
  228. +gc_free_from_child()
  229. +{
  230. + RVALUE *p;
  231. + int where_at_in_to_add_to_free_list = 0;
  232. + int deferred;
  233. + rb_objspace_t *objspace = &rb_objspace;
  234. + p = shared_area->to_add_to_free_list[0];
  235. + while(p != 0) {
  236. + if( (deferred = obj_free(objspace, (VALUE)p)) ||
  237. + ((FL_TEST(p, FL_FINALIZE)) && need_call_final))
  238. + {
  239. + if (!deferred) {
  240. + p->as.free.flags = T_ZOMBIE;
  241. + RDATA(p)->dfree = 0;
  242. + }
  243. + p->as.free.flags |= FL_MARK;
  244. + p->as.free.next = deferred_final_list;
  245. + deferred_final_list = p;
  246. + }else
  247. + add_freelist(objspace, (RVALUE *)p);
  248. +
  249. + p = shared_area->to_add_to_free_list[++where_at_in_to_add_to_free_list];
  250. + }
  251. +
  252. + /* free heaps -- TODO
  253. + if(shared_area->heaps_to_free[0])
  254. + {
  255. + struct heaps_slot *a = shared_area->heaps_to_free[0];
  256. + int here = 0;
  257. + struct heaps_slot *p2 = shared_area->heaps_to_free[here];
  258. + while(p2)
  259. + {
  260. + p2 = shared_area->heaps_to_free[++here];
  261. + //p2->limit = 0;
  262. + }
  263. + //free_unused_heaps(objspace);
  264. + } */
  265. +
  266. +}
  267. +
  268. +static void init_shared_memory() {
  269. +#ifndef _DARWIN_C_SOURCE
  270. + int fd;
  271. +#endif
  272. + if( child_gc_pid) // todo take out
  273. + {
  274. + printf("ERROR CHILD IS %d -- which means it was initialized twice! unexpected\n", child_gc_pid);
  275. + _exit(EXIT_FAILURE);
  276. + }
  277. +
  278. +#ifdef _DARWIN_C_SOURCE
  279. + shared_area=(shared_stuff_p)mmap(0,
  280. + sizeof(shared_stuff_t),
  281. + PROT_READ|PROT_WRITE,MAP_SHARED|MAP_ANON,
  282. + -1,
  283. + 0
  284. + );
  285. +#else
  286. + fd=open("/dev/zero",
  287. + O_RDWR
  288. + );
  289. +
  290. + if(fd==-1)
  291. + {
  292. + printf("unable to get file handle for attempted shared memory--exiting!\n");
  293. + _exit(EXIT_FAILURE);
  294. + }
  295. +
  296. + shared_area=(shared_stuff_p)mmap(0,
  297. + sizeof(shared_stuff_t),
  298. + PROT_READ|PROT_WRITE,MAP_SHARED,
  299. + fd,
  300. + 0
  301. + );
  302. +#endif
  303. +
  304. + if(shared_area==(shared_stuff_p)-1)
  305. + {
  306. + printf("shared memory allocation failure! %s\n", strerror(errno));
  307. + _exit(EXIT_FAILURE);
  308. + }
  309. + shared_area->child_state = idle;
  310. +}
  311. +
  312. static int
  313. garbage_collect(rb_objspace_t *objspace)
  314. {
  315. + pid_t w;
  316. + int status;
  317. +
  318. + if(shared_area==0){
  319. + init_shared_memory();
  320. + }
  321. +
  322. + int saved_child_state = shared_area->child_state;
  323. + if (dont_gc || during_gc || (saved_child_state == child_in_gc)) {
  324. +
  325. + // we should add if we're dont_gc or during_gc and !freelist, else wait for child
  326. + if(!freelist && (dont_gc || during_gc))
  327. + {
  328. + set_heaps_increment(objspace);
  329. + heaps_increment(objspace);
  330. + return Qtrue;
  331. + } else
  332. + {
  333. + // we should wait - cause waitpid to be called below
  334. + saved_child_state = gc_wait_for_child_next_pass;
  335. + }
  336. + }
  337. +
  338. +
  339. + if(saved_child_state == gc_just_finished || saved_child_state == gc_wait_for_child_next_pass)
  340. + {
  341. + during_gc++;
  342. + if(GC_NOTIFY)
  343. + if(saved_child_state == gc_just_finished)
  344. + printf("receiving WELL a child collecting thread\n");
  345. + else
  346. + printf("receiving POORLY a child collecting thread \n");
  347. + // reap child process
  348. + w = waitpid(child_gc_pid, &status, 0);
  349. + child_gc_pid = 0;
  350. + if (w == -1) { perror("waitpid"); exit(EXIT_FAILURE); }
  351. + gc_free_from_child();
  352. + if (!freelist) {
  353. + // our collecting from child was unsuccessful
  354. + if (!heaps_increment(objspace)) {
  355. + set_heaps_increment(objspace);
  356. + heaps_increment(objspace);
  357. + }
  358. +
  359. + }
  360. + shared_area->child_state = idle;
  361. + during_gc = 0;
  362. + return Qtrue;
  363. + }
  364. +
  365. +
  366. + if(shared_area->child_state == child_in_gc)
  367. + {
  368. + printf("attempted to start two children on accident: bug\n");
  369. + raise(SIGTERM);
  370. + }
  371. +
  372. + /* now the case of there was no child running - start the child */
  373. + if (GC_NOTIFY) fflush(stdout);
  374. + if(child_gc_pid)
  375. + {
  376. + printf("err -- looks like child exists already\n");
  377. + raise(SIGTERM);
  378. + }
  379. + shared_area->child_state = child_in_gc;
  380. + int childs_pid = fork();
  381. + if(childs_pid != 0)
  382. + {
  383. + // parent:
  384. + child_gc_pid = childs_pid;
  385. + if (!freelist) {
  386. + if (!heaps_increment(objspace)) {
  387. + /* At this point we can either add more [and possibly grow forever] or
  388. + we can call GC again and wait on our newly created child thread */
  389. + return garbage_collect(objspace);
  390. + }
  391. +
  392. + }
  393. + return Qtrue;
  394. +
  395. + }
  396. +
  397. + // child:
  398. struct gc_list *list;
  399. rb_thread_t *th = GET_THREAD();
  400. INIT_GC_PROF_PARAMS;
  401. @@ -1915,15 +2084,6 @@
  402. return Qfalse;
  403. }
  404.  
  405. - if (dont_gc || during_gc) {
  406. - if (!freelist) {
  407. - if (!heaps_increment(objspace)) {
  408. - set_heaps_increment(objspace);
  409. - heaps_increment(objspace);
  410. - }
  411. - }
  412. - return Qtrue;
  413. - }
  414. during_gc++;
  415. objspace->count++;
  416.  
  417. @@ -1976,7 +2136,10 @@
  418.  
  419. GC_PROF_TIMER_STOP;
  420. if (GC_NOTIFY) printf("end garbage_collect()\n");
  421. - return Qtrue;
  422. +
  423. + shared_area->child_state = gc_just_finished;
  424. + _exit(EXIT_SUCCESS);
  425. + return Qtrue; // we never get here
  426. }
  427.  
  428. int
  429. @@ -2376,6 +2539,8 @@
  430. rb_gc(void)
  431. {
  432. rb_objspace_t *objspace = &rb_objspace;
  433. + if(shared_area && (shared_area->child_state == child_in_gc))
  434. + shared_area->child_state = gc_wait_for_child_next_pass; // allow two calls to GC.start to force a GC
  435. garbage_collect(objspace);
  436. gc_finalize_deferred(objspace);
  437. }
Add Comment
Please, Sign In to add comment