Advertisement
cristom

Chris M. Thomasson

Feb 26th, 2013
230
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 10.80 KB | None | 0 0
  1. /*
  2. Word-Based Portable Proxy Garbage Collector
  3. Copyright (C) 2013 Christopher Michael Thomasson
  4.  
  5. This program is free software: you can redistribute it and/or modify
  6. it under the terms of the GNU General Public License as published by
  7. the Free Software Foundation, either version 3 of the License, or
  8. (at your option) any later version.
  9.  
  10. This program is distributed in the hope that it will be useful,
  11. but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. GNU General Public License for more details.
  14.  
  15. You should have received a copy of the GNU General Public License
  16. along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18.  
  19.  
  20.  
  21.  
  22.  
  23. //#define RL_DEBUGBREAK_ON_ASSERT
  24. //#define RL_GC
  25. //#define RL_MSVC_OUTPUT
  26. //#define RL_FORCE_SEQ_CST
  27. #include <relacy/relacy_std.hpp>
  28. #include <cstdio>
  29. #include <cstddef>
  30.  
  31.  
  32.  
  33.  
  34. #if ! defined (NDEBUG)
  35. # define DBG_PRINTF(e) std::printf e
  36. #else
  37. # define DBG_PRINTF(e) ((void)0)
  38. #endif
  39.  
  40.  
  41.  
  42. #define mb_relaxed std::memory_order_relaxed
  43. #define mb_acquire std::memory_order_acquire
  44. #define mb_release std::memory_order_release
  45. #define mb_acq_rel std::memory_order_acq_rel
  46. #define mb_seq_cst std::memory_order_seq_cst
  47.  
  48.  
  49. #define MB_FENCE(mb_mp) std::atomic_thread_fence(mb_mp)
  50. #define mb_fence(mb_mp) MB_FENCE(mb_mp)
  51.  
  52.  
  53.  
  54. typedef unsigned int ver_uint_type;
  55.  
  56.  
  57. struct node
  58. {
  59. std::atomic<node*> m_next;
  60. VAR_T(node*) m_defer_next;
  61.  
  62.  
  63. node()
  64. : m_next(NULL),
  65. m_defer_next(NULL)
  66. {
  67.  
  68. }
  69. };
  70.  
  71.  
  72.  
  73.  
  74. class proxy_gc
  75. {
  76. static void prv_destroy(node* n)
  77. {
  78. while (n)
  79. {
  80. node* next = VAR(n->m_defer_next);
  81. delete n;
  82. n = next;
  83. }
  84. }
  85.  
  86.  
  87.  
  88. public:
  89. struct collector
  90. {
  91. std::atomic<ver_uint_type> m_count;
  92. std::atomic<collector*> m_next;
  93. VAR_T(node*) m_defer;
  94.  
  95. collector()
  96. : m_count(0x00000000U),
  97. m_next(NULL),
  98. m_defer(NULL)
  99. {
  100.  
  101. }
  102.  
  103.  
  104. collector(ver_uint_type count)
  105. : m_count(count),
  106. m_next(NULL),
  107. m_defer(NULL)
  108. {
  109.  
  110. }
  111.  
  112.  
  113. ~collector()
  114. {
  115. node* defer = VAR(m_defer);
  116. prv_destroy(defer);
  117. }
  118. };
  119.  
  120.  
  121.  
  122. private:
  123. std::atomic<ver_uint_type> m_count;
  124. std::atomic<collector*> m_collector;
  125. std::mutex m_mutex;
  126.  
  127.  
  128.  
  129. public:
  130. proxy_gc()
  131. : m_count(0x00000000U),
  132. m_collector(new collector())
  133. {
  134.  
  135. }
  136.  
  137. ~proxy_gc()
  138. {
  139. prv_collect_collector_ref(NULL, NULL);
  140. }
  141.  
  142.  
  143.  
  144. private:
  145. collector*
  146. prv_acquire_collector_ref()
  147. {
  148. ver_uint_type count = m_count.load(mb_relaxed);
  149. collector* c = NULL;
  150. rl::linear_backoff backoff;
  151.  
  152. for (;;)
  153. {
  154. if (count & 0x00000001U)
  155. {
  156. backoff.yield($);
  157. count = m_count.load(mb_relaxed);
  158. continue;
  159. }
  160.  
  161. mb_fence(mb_acquire);
  162.  
  163. c = m_collector.load(mb_relaxed);
  164.  
  165. if (m_count.compare_exchange_weak(
  166. count,
  167. count + 0x00020000U,
  168. mb_relaxed))
  169. {
  170. break;
  171. }
  172. }
  173.  
  174. mb_fence(mb_acquire);
  175.  
  176. return c;
  177. }
  178.  
  179.  
  180. void
  181. prv_release_collector_release(collector* c)
  182. {
  183. mb_fence(mb_release);
  184.  
  185. ver_uint_type count = c->m_count.fetch_sub(0x00000002U, mb_relaxed);
  186.  
  187. if (count == 0x00000003U)
  188. {
  189. prv_dtor_collector(c);
  190. }
  191. }
  192.  
  193.  
  194. void
  195. prv_collect_collector_ref(collector* nc, node* defer)
  196. {
  197. if (nc)
  198. {
  199. VAR(nc->m_defer) = defer;
  200. }
  201.  
  202. m_mutex.lock($);
  203.  
  204. ver_uint_type count1 = m_count.fetch_add(0x00000001U, mb_relaxed);
  205.  
  206. collector* oc = m_collector.load(mb_relaxed);
  207. m_collector.store(nc, mb_relaxed);
  208.  
  209. oc->m_next.store(nc, mb_relaxed);
  210.  
  211. ver_uint_type count2 = count1 + 0x00000001U;
  212. ver_uint_type xchg = (count2 + 0x00000001U) & 0x0000FFFFU;
  213.  
  214. mb_fence(mb_release);
  215.  
  216. if (! m_count.compare_exchange_strong(
  217. count2,
  218. xchg,
  219. mb_relaxed))
  220. {
  221. RL_ASSERT(false);
  222. }
  223.  
  224. m_mutex.unlock($);
  225.  
  226. ver_uint_type count = count1 & 0xFFFF0000U;
  227. count >>= 0x00000010U;
  228.  
  229. ver_uint_type ocount =
  230. oc->m_count.fetch_add(count + 0x00000001U, mb_relaxed);
  231.  
  232. if (ocount == -count)
  233. {
  234. prv_dtor_collector(oc);
  235. }
  236. }
  237.  
  238.  
  239. void
  240. prv_dtor_collector(collector* c)
  241. {
  242. mb_fence(mb_acquire);
  243.  
  244. collector* head = c;
  245. collector* tail = c;
  246. collector* next = c->m_next.load(mb_relaxed);
  247. c->m_next.store(NULL, mb_relaxed);
  248.  
  249. while (next)
  250. {
  251. ver_uint_type next_count = next->m_count.fetch_sub(0x00000002U, mb_relaxed);
  252.  
  253. if (next_count != 0x00000003U)
  254. {
  255. break;
  256. }
  257.  
  258. mb_fence(mb_acquire);
  259.  
  260. collector* next2 = next->m_next.load(mb_relaxed);
  261.  
  262. DBG_PRINTF(("prv_dtor_collector: collected! - %u - %p - %u\n",
  263. next_count,
  264. c,
  265. next_count));
  266.  
  267. next->m_next.store(NULL, mb_relaxed);
  268. tail->m_next.store(next, mb_relaxed);
  269.  
  270. tail = next;
  271. next = next2;
  272. }
  273.  
  274. while (head)
  275. {
  276. collector* next = head->m_next.load(mb_relaxed);
  277.  
  278. delete head;
  279.  
  280. head = next;
  281. }
  282. }
  283.  
  284.  
  285.  
  286. public:
  287. collector*
  288. acquire()
  289. {
  290. collector* c = prv_acquire_collector_ref();
  291.  
  292. return c;
  293. }
  294.  
  295.  
  296. void
  297. release(collector* c)
  298. {
  299. prv_release_collector_release(c);
  300. }
  301.  
  302.  
  303. void
  304. collect(node* defer = NULL)
  305. {
  306. collector* nc = new collector(0x00000002U);
  307.  
  308. prv_collect_collector_ref(nc, defer);
  309. }
  310. };
  311.  
  312.  
  313.  
  314.  
  315. // you're basic lock-free stack...
  316. // well, minus ABA counter and DWCAS of course! ;^)
  317. class stack
  318. {
  319. std::atomic<node*> m_head;
  320.  
  321.  
  322.  
  323. public:
  324. stack()
  325. : m_head(NULL)
  326. {
  327.  
  328. }
  329.  
  330.  
  331.  
  332. public:
  333. void push(node* n)
  334. {
  335. node* head = m_head.load(std::memory_order_relaxed);
  336.  
  337. do
  338. {
  339. n->m_next.store(head, std::memory_order_relaxed);
  340.  
  341. mb_fence(mb_release);
  342. }
  343.  
  344. while (! m_head.compare_exchange_weak(
  345. head,
  346. n,
  347. mb_relaxed));
  348. }
  349.  
  350.  
  351. node* flush()
  352. {
  353. node* n = m_head.exchange(NULL, mb_relaxed);
  354.  
  355. if (n) mb_fence(mb_acquire);
  356.  
  357. return n;
  358. }
  359.  
  360.  
  361. node* get_head()
  362. {
  363. node* n = m_head.load(mb_relaxed);
  364.  
  365. if (n) mb_fence(mb_acquire);
  366.  
  367. return n;
  368. }
  369.  
  370.  
  371. node* pop()
  372. {
  373. node* head = m_head.load(mb_relaxed);
  374. node* xchg;
  375.  
  376. do
  377. {
  378. if (! head) return NULL;
  379.  
  380. mb_fence(mb_acquire);
  381.  
  382. xchg = head->m_next.load(std::memory_order_relaxed);
  383. }
  384.  
  385. while (! m_head.compare_exchange_weak(
  386. head,
  387. xchg,
  388. mb_relaxed));
  389.  
  390. return head;
  391. }
  392. };
  393.  
  394.  
  395.  
  396.  
  397. #define ITERS 7
  398. #define DEFER 6
  399. #define WRITERS 2
  400. #define READERS 4
  401. #define REAPERS 1
  402. #define THREADS (WRITERS + READERS + REAPERS)
  403.  
  404.  
  405.  
  406. typedef proxy_gc proxy_gc_type;
  407.  
  408.  
  409. struct proxy_test
  410. : rl::test_suite<proxy_test, THREADS>
  411. {
  412. proxy_gc_type g_proxy_gc;
  413. stack g_stack;
  414. unsigned int g_writers;
  415.  
  416.  
  417. void before()
  418. {
  419. g_writers = WRITERS;
  420. }
  421.  
  422.  
  423. void thread(unsigned int tidx)
  424. {
  425. rl::backoff backoff;
  426.  
  427. if (tidx < READERS)
  428. {
  429. DBG_PRINTF(("reader thread entry(%d)\n", tidx));
  430.  
  431. while (g_writers)
  432. {
  433. backoff.yield($);
  434.  
  435. proxy_gc_type::collector* c = g_proxy_gc.acquire();
  436.  
  437. node* n = g_stack.get_head();
  438.  
  439. while (n)
  440. {
  441. node* next = n->m_next.load(mb_relaxed);
  442.  
  443. backoff.yield($);
  444.  
  445. DBG_PRINTF(("reader thread(%d) - READ (%p:%p)\n", tidx, c, n));
  446.  
  447. n = next;
  448. }
  449.  
  450. g_proxy_gc.release(c);
  451. }
  452.  
  453. DBG_PRINTF(("reader thread exit(%d)\n", tidx));
  454. }
  455.  
  456. else if (tidx < WRITERS + READERS)
  457. {
  458. DBG_PRINTF(("writer thread entry(%d)\n", tidx));
  459.  
  460. node* n;
  461.  
  462. for (unsigned int i = 0; i < ITERS; ++i)
  463. {
  464. n = new node();
  465.  
  466. g_stack.push(n);
  467.  
  468. DBG_PRINTF(("writer thread(%d) - WRITE PUSH (%p)\n", tidx, n));
  469.  
  470. if (! (i % (rl::rand(4) + 1)))
  471. {
  472. proxy_gc_type::collector* c = g_proxy_gc.acquire();
  473. backoff.yield($);
  474. n = g_stack.pop();
  475. g_proxy_gc.release(c);
  476.  
  477. DBG_PRINTF(("writer thread(%d) - WRITE POP (%p:%p)\n", tidx, n, c));
  478. g_proxy_gc.collect(n);
  479. }
  480. }
  481.  
  482. for (unsigned int i = 0; i < ITERS; ++i)
  483. {
  484. proxy_gc_type::collector* c = g_proxy_gc.acquire();
  485. n = g_stack.pop();
  486. backoff.yield($);
  487. g_proxy_gc.release(c);
  488. g_proxy_gc.collect(n);
  489. }
  490.  
  491. --g_writers;
  492.  
  493. DBG_PRINTF(("writer thread exit(%d)\n", tidx));
  494. }
  495.  
  496. else
  497. {
  498. DBG_PRINTF(("reaper thread entry(%d)\n", tidx));
  499.  
  500. while (g_writers)
  501. {
  502. g_proxy_gc.collect();
  503. backoff.yield($);
  504. }
  505.  
  506. DBG_PRINTF(("reaper thread exit(%d)\n", tidx));
  507. }
  508. }
  509. };
  510.  
  511.  
  512.  
  513.  
  514. int main()
  515. {
  516. {
  517. rl::test_params p;
  518.  
  519. p.iteration_count = 30000000;
  520. p.execution_depth_limit = 10000;
  521. // p.context_bound = 3;
  522. // p.search_type = rl::sched_bound;
  523. // p.search_type = rl::fair_full_search_scheduler_type;
  524. //p.search_type = rl::fair_context_bound_scheduler_type;
  525. rl::simulate<proxy_test>(p);
  526. }
  527.  
  528. std::puts("\n\n____________________________\nTesting Complete!");
  529. std::getchar();
  530.  
  531. return 0;
  532. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement