Guest User

Chris M Thomasson

a guest
Jan 23rd, 2010
1,184
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 9.33 KB | None | 0 0
  1. /*
  2. Word-Based Portable Proxy Garbage Collector
  3. Copyright (C) 2010 Christopher Michael Thomasson
  4.  
  5. This program is free software: you can redistribute it and/or modify
  6. it under the terms of the GNU General Public License as published by
  7. the Free Software Foundation, either version 3 of the License, or
  8. (at your option) any later version.
  9.  
  10. This program is distributed in the hope that it will be useful,
  11. but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. GNU General Public License for more details.
  14.  
  15. You should have received a copy of the GNU General Public License
  16. along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18.  
  19.  
  20.  
  21.  
  22.  
  23. //#define RL_DEBUGBREAK_ON_ASSERT
  24. //#define RL_MSVC_OUTPUT
  25. //#define RL_FORCE_SEQ_CST
  26. #include <relacy/relacy_std.hpp>
  27. #include <cstdio>
  28. #include <cstddef>
  29.  
  30.  
  31.  
  32.  
  33. #if ! defined (NDEBUG)
  34. # define DBG_PRINTF(e) std::printf e
  35. #else
  36. # define DBG_PRINTF(e) ((void)0)
  37. #endif
  38.  
  39.  
  40.  
  41.  
  42. struct node
  43. {
  44. std::atomic<node*> m_next;
  45. VAR_T(node*) m_defer_next;
  46. };
  47.  
  48.  
  49.  
  50.  
  51. template<size_t T_defer_limit>
  52. class proxy
  53. {
  54. static void prv_destroy(node* n)
  55. {
  56. while (n)
  57. {
  58. node* next = VAR(n->m_defer_next);
  59. delete n;
  60. n = next;
  61. }
  62. }
  63.  
  64.  
  65.  
  66. public:
  67. class collector
  68. {
  69. friend class proxy;
  70.  
  71.  
  72.  
  73. private:
  74. std::atomic<node*> m_defer;
  75. std::atomic<unsigned int> m_defer_count;
  76. std::atomic<unsigned int> m_count;
  77.  
  78.  
  79.  
  80. public:
  81. collector()
  82. : m_defer(NULL),
  83. m_defer_count(0),
  84. m_count(0)
  85. {
  86.  
  87. }
  88.  
  89.  
  90. ~collector()
  91. {
  92. prv_destroy(m_defer.load(std::memory_order_relaxed));
  93. }
  94. };
  95.  
  96.  
  97.  
  98. private:
  99. std::atomic<unsigned int> m_current;
  100. std::atomic<bool> m_quiesce;
  101. VAR_T(node*) m_defer;
  102. collector m_collectors[2];
  103.  
  104.  
  105.  
  106. public:
  107. proxy()
  108. : m_current(0),
  109. m_quiesce(false),
  110. m_defer(NULL)
  111. {
  112.  
  113. }
  114.  
  115. ~proxy()
  116. {
  117. prv_destroy(VAR(m_defer));
  118. }
  119.  
  120.  
  121.  
  122. private:
  123. void prv_quiesce_begin()
  124. {
  125. // Try to begin the quiescence process.
  126. if (! m_quiesce.exchange(true, std::memory_order_acquire))
  127. {
  128. // advance the current collector and grab the old one.
  129. unsigned int old = m_current.load(std::memory_order_relaxed) & 0xFU;
  130. old = m_current.exchange((old + 1) & 1, std::memory_order_acq_rel);
  131. collector& c = m_collectors[old & 0xFU];
  132.  
  133. // decode reference count.
  134. unsigned int refs = old & 0xFFFFFFF0U;
  135.  
  136. // verify reference count and previous collector index.
  137. RL_ASSERT(! (refs & 0x10U) && (old & 0xFU) == (&c - m_collectors));
  138.  
  139. // increment and generate an odd reference count.
  140. if (c.m_count.fetch_add(refs + 0x10U, std::memory_order_release) == -refs)
  141. {
  142. // odd reference count and drop-to-zero condition detected!
  143. prv_quiesce_complete(c);
  144. }
  145. }
  146. }
  147.  
  148.  
  149. void prv_quiesce_complete(collector& c)
  150. {
  151. // the collector `c' is now in a quiescent state! :^)
  152. std::atomic_thread_fence(std::memory_order_acquire);
  153.  
  154. // maintain the back link and obtain "fresh" objects from
  155. // this collection.
  156. node* n = VAR(m_defer);
  157. VAR(m_defer) = c.m_defer.load(std::memory_order_relaxed);
  158. c.m_defer.store(0, std::memory_order_relaxed);
  159.  
  160. // verify and reset the reference count.
  161. RL_ASSERT(c.m_count.load(std::memory_order_relaxed) == 0x10U);
  162. c.m_count.store(0, std::memory_order_relaxed);
  163. c.m_defer_count.store(0, std::memory_order_relaxed);
  164.  
  165. // release the quiesce lock.
  166. m_quiesce.store(false, std::memory_order_release);
  167.  
  168. // destroy nodes.
  169. prv_destroy(n);
  170. }
  171.  
  172.  
  173.  
  174. public:
  175. collector& acquire()
  176. {
  177. // increment the master count _and_ obtain current collector.
  178. unsigned int current =
  179. m_current.fetch_add(0x20U, std::memory_order_acquire);
  180.  
  181. // decode the collector index.
  182. return m_collectors[current & 0xFU];
  183. }
  184.  
  185.  
  186. void release(collector& c)
  187. {
  188. // decrement the collector.
  189. unsigned int count =
  190. c.m_count.fetch_sub(0x20U, std::memory_order_release);
  191.  
  192. // check for the completion of the quiescence process.
  193. if ((count & 0xFFFFFFF0U) == 0x30U)
  194. {
  195. // odd reference count and drop-to-zero condition detected!
  196. prv_quiesce_complete(c);
  197. }
  198. }
  199.  
  200.  
  201. collector& sync(collector& c)
  202. {
  203. // check if the `c' is in the middle of a quiescence process.
  204. if (c.m_count.load(std::memory_order_relaxed) & 0x10U)
  205. {
  206. // drop `c' and get the next collector.
  207. release(c);
  208.  
  209. return acquire();
  210. }
  211.  
  212. return c;
  213. }
  214.  
  215.  
  216. void collect()
  217. {
  218. prv_quiesce_begin();
  219. }
  220.  
  221.  
  222. void collect(collector& c, node* n)
  223. {
  224. if (! n) return;
  225.  
  226. // link node into the defer list.
  227. node* prev = c.m_defer.exchange(n, std::memory_order_relaxed);
  228. VAR(n->m_defer_next) = prev;
  229.  
  230. // bump the defer count and begin quiescence process if over
  231. // the limit.
  232. unsigned int count =
  233. c.m_defer_count.fetch_add(1, std::memory_order_relaxed) + 1;
  234.  
  235. if (count >= (T_defer_limit / 2))
  236. {
  237. prv_quiesce_begin();
  238. }
  239. }
  240. };
  241.  
  242.  
  243.  
  244.  
  245. // you're basic lock-free stack...
  246. // well, minus ABA counter and DWCAS of course! ;^)
  247. class stack
  248. {
  249. std::atomic<node*> m_head;
  250.  
  251.  
  252.  
  253. public:
  254. stack()
  255. : m_head(NULL)
  256. {
  257.  
  258. }
  259.  
  260.  
  261.  
  262. public:
  263. void push(node* n)
  264. {
  265. node* head = m_head.load(std::memory_order_relaxed);
  266.  
  267. do
  268. {
  269. n->m_next.store(head, std::memory_order_relaxed);
  270. }
  271.  
  272. while (! m_head.compare_exchange_weak(
  273. head,
  274. n,
  275. std::memory_order_release));
  276. }
  277.  
  278.  
  279. node* flush()
  280. {
  281. return m_head.exchange(NULL, std::memory_order_acquire);
  282. }
  283.  
  284.  
  285. node* get_head()
  286. {
  287. return m_head.load(std::memory_order_acquire);
  288. }
  289.  
  290.  
  291. node* pop()
  292. {
  293. node* head = m_head.load(std::memory_order_acquire);
  294. node* xchg;
  295.  
  296. do
  297. {
  298. if (! head) return NULL;
  299.  
  300. xchg = head->m_next.load(std::memory_order_relaxed);
  301. }
  302.  
  303. while (! m_head.compare_exchange_weak(
  304. head,
  305. xchg,
  306. std::memory_order_acquire));
  307.  
  308. return head;
  309. }
  310. };
  311.  
  312.  
  313.  
  314.  
  315. #define ITERS 15
  316. #define DEFER 6
  317. #define WRITERS 2
  318. #define READERS 3
  319. #define REAPERS 1
  320. #define THREADS (WRITERS + READERS + REAPERS)
  321.  
  322.  
  323. struct proxy_test
  324. : rl::test_suite<proxy_test, THREADS>
  325. {
  326. typedef proxy<DEFER> proxy_type;
  327.  
  328.  
  329. proxy_type g_proxy;
  330. stack g_stack;
  331. unsigned g_writers; // for proper test termination only.
  332.  
  333.  
  334. void before()
  335. {
  336. g_writers = WRITERS;
  337. }
  338.  
  339.  
  340. void thread(unsigned int tidx)
  341. {
  342. rl::backoff b;
  343.  
  344. if (tidx < READERS)
  345. {
  346. proxy_type::collector* c = &g_proxy.acquire();
  347.  
  348. // readers.
  349. while (g_writers)
  350. {
  351.  
  352. node* n = g_stack.get_head();
  353.  
  354. while (n)
  355. {
  356. node* next = n->m_next.load(std::memory_order_relaxed);
  357. n = next;
  358. }
  359.  
  360. c = &g_proxy.sync(*c);
  361.  
  362. b.yield($);
  363. }
  364.  
  365. g_proxy.release(*c);
  366. }
  367.  
  368. else if (tidx < WRITERS + READERS)
  369. {
  370. // writers.
  371. unsigned count = 0;
  372.  
  373. for (unsigned int i = 0; i < ITERS; ++i)
  374. {
  375. g_stack.push(new node);
  376.  
  377. if (! (i % 2))
  378. {
  379. proxy_type::collector& c = g_proxy.acquire();
  380. g_proxy.collect(c, g_stack.pop());
  381. g_proxy.release(c);
  382. b.yield($);
  383. }
  384. }
  385.  
  386. for (unsigned int i = count; i < ITERS; ++i)
  387. {
  388. proxy_type::collector& c = g_proxy.acquire();
  389. g_proxy.collect(c, g_stack.pop());
  390. g_proxy.release(c);
  391. }
  392.  
  393. --g_writers;
  394. }
  395.  
  396. else if (tidx < WRITERS + READERS + REAPERS)
  397. {
  398. // reapers.
  399. while (g_writers)
  400. {
  401. g_proxy.collect();
  402. b.yield($);
  403. }
  404. }
  405. }
  406. };
  407.  
  408.  
  409.  
  410.  
  411. int main()
  412. {
  413. {
  414. rl::test_params p;
  415.  
  416. p.iteration_count = 10000000;
  417. p.execution_depth_limit = 10000;
  418. //p.search_type = rl::sched_bound;
  419. //p.search_type = rl::fair_full_search_scheduler_type;
  420. //p.search_type = rl::fair_context_bound_scheduler_type;
  421. rl::simulate<proxy_test>(p);
  422. }
  423.  
  424. return 0;
  425. }
Advertisement
Add Comment
Please, Sign In to add comment