Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- /*
- Word-Based Portable Proxy Garbage Collector
- Copyright (C) 2013 Christopher Michael Thomasson
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
- //#define RL_DEBUGBREAK_ON_ASSERT
- //#define RL_GC
- //#define RL_MSVC_OUTPUT
- //#define RL_FORCE_SEQ_CST
- #include <relacy/relacy_std.hpp>
- #include <cstdio>
- #include <cstddef>
- #if ! defined (NDEBUG)
- # define DBG_PRINTF(e) std::printf e
- #else
- # define DBG_PRINTF(e) ((void)0)
- #endif
- #define mb_relaxed std::memory_order_relaxed
- #define mb_acquire std::memory_order_acquire
- #define mb_release std::memory_order_release
- #define mb_acq_rel std::memory_order_acq_rel
- #define mb_seq_cst std::memory_order_seq_cst
- #define MB_FENCE(mb_mp) std::atomic_thread_fence(mb_mp)
- #define mb_fence(mb_mp) MB_FENCE(mb_mp)
- typedef unsigned int ver_uint_type;
- struct node
- {
- std::atomic<node*> m_next;
- VAR_T(node*) m_defer_next;
- node()
- : m_next(NULL),
- m_defer_next(NULL)
- {
- }
- };
- class proxy_gc
- {
- static void prv_destroy(node* n)
- {
- while (n)
- {
- node* next = VAR(n->m_defer_next);
- delete n;
- n = next;
- }
- }
- public:
- struct collector
- {
- std::atomic<ver_uint_type> m_count;
- std::atomic<collector*> m_next;
- VAR_T(node*) m_defer;
- collector()
- : m_count(0x00000000U),
- m_next(NULL),
- m_defer(NULL)
- {
- }
- collector(ver_uint_type count)
- : m_count(count),
- m_next(NULL),
- m_defer(NULL)
- {
- }
- ~collector()
- {
- node* defer = VAR(m_defer);
- prv_destroy(defer);
- }
- };
- private:
- std::atomic<ver_uint_type> m_count;
- std::atomic<collector*> m_collector;
- std::mutex m_mutex;
- public:
- proxy_gc()
- : m_count(0x00000000U),
- m_collector(new collector())
- {
- }
- ~proxy_gc()
- {
- prv_collect_collector_ref(NULL, NULL);
- }
- private:
- collector*
- prv_acquire_collector_ref()
- {
- ver_uint_type count = m_count.load(mb_relaxed);
- collector* c = NULL;
- rl::linear_backoff backoff;
- for (;;)
- {
- if (count & 0x00000001U)
- {
- backoff.yield($);
- count = m_count.load(mb_relaxed);
- continue;
- }
- mb_fence(mb_acquire);
- c = m_collector.load(mb_relaxed);
- if (m_count.compare_exchange_weak(
- count,
- count + 0x00020000U,
- mb_relaxed))
- {
- break;
- }
- }
- mb_fence(mb_acquire);
- return c;
- }
- void
- prv_release_collector_release(collector* c)
- {
- mb_fence(mb_release);
- ver_uint_type count = c->m_count.fetch_sub(0x00000002U, mb_relaxed);
- if (count == 0x00000003U)
- {
- prv_dtor_collector(c);
- }
- }
- void
- prv_collect_collector_ref(collector* nc, node* defer)
- {
- if (nc)
- {
- VAR(nc->m_defer) = defer;
- }
- m_mutex.lock($);
- ver_uint_type count1 = m_count.fetch_add(0x00000001U, mb_relaxed);
- collector* oc = m_collector.load(mb_relaxed);
- m_collector.store(nc, mb_relaxed);
- oc->m_next.store(nc, mb_relaxed);
- ver_uint_type count2 = count1 + 0x00000001U;
- ver_uint_type xchg = (count2 + 0x00000001U) & 0x0000FFFFU;
- mb_fence(mb_release);
- if (! m_count.compare_exchange_strong(
- count2,
- xchg,
- mb_relaxed))
- {
- RL_ASSERT(false);
- }
- m_mutex.unlock($);
- ver_uint_type count = count1 & 0xFFFF0000U;
- count >>= 0x00000010U;
- ver_uint_type ocount =
- oc->m_count.fetch_add(count + 0x00000001U, mb_relaxed);
- if (ocount == -count)
- {
- prv_dtor_collector(oc);
- }
- }
- void
- prv_dtor_collector(collector* c)
- {
- mb_fence(mb_acquire);
- collector* head = c;
- collector* tail = c;
- collector* next = c->m_next.load(mb_relaxed);
- c->m_next.store(NULL, mb_relaxed);
- while (next)
- {
- ver_uint_type next_count = next->m_count.fetch_sub(0x00000002U, mb_relaxed);
- if (next_count != 0x00000003U)
- {
- break;
- }
- mb_fence(mb_acquire);
- collector* next2 = next->m_next.load(mb_relaxed);
- DBG_PRINTF(("prv_dtor_collector: collected! - %u - %p - %u\n",
- next_count,
- c,
- next_count));
- next->m_next.store(NULL, mb_relaxed);
- tail->m_next.store(next, mb_relaxed);
- tail = next;
- next = next2;
- }
- while (head)
- {
- collector* next = head->m_next.load(mb_relaxed);
- delete head;
- head = next;
- }
- }
- public:
- collector*
- acquire()
- {
- collector* c = prv_acquire_collector_ref();
- return c;
- }
- void
- release(collector* c)
- {
- prv_release_collector_release(c);
- }
- void
- collect(node* defer = NULL)
- {
- collector* nc = new collector(0x00000002U);
- prv_collect_collector_ref(nc, defer);
- }
- };
- // you're basic lock-free stack...
- // well, minus ABA counter and DWCAS of course! ;^)
- class stack
- {
- std::atomic<node*> m_head;
- public:
- stack()
- : m_head(NULL)
- {
- }
- public:
- void push(node* n)
- {
- node* head = m_head.load(std::memory_order_relaxed);
- do
- {
- n->m_next.store(head, std::memory_order_relaxed);
- mb_fence(mb_release);
- }
- while (! m_head.compare_exchange_weak(
- head,
- n,
- mb_relaxed));
- }
- node* flush()
- {
- node* n = m_head.exchange(NULL, mb_relaxed);
- if (n) mb_fence(mb_acquire);
- return n;
- }
- node* get_head()
- {
- node* n = m_head.load(mb_relaxed);
- if (n) mb_fence(mb_acquire);
- return n;
- }
- node* pop()
- {
- node* head = m_head.load(mb_relaxed);
- node* xchg;
- do
- {
- if (! head) return NULL;
- mb_fence(mb_acquire);
- xchg = head->m_next.load(std::memory_order_relaxed);
- }
- while (! m_head.compare_exchange_weak(
- head,
- xchg,
- mb_relaxed));
- return head;
- }
- };
- #define ITERS 7
- #define DEFER 6
- #define WRITERS 2
- #define READERS 4
- #define REAPERS 1
- #define THREADS (WRITERS + READERS + REAPERS)
- typedef proxy_gc proxy_gc_type;
- struct proxy_test
- : rl::test_suite<proxy_test, THREADS>
- {
- proxy_gc_type g_proxy_gc;
- stack g_stack;
- unsigned int g_writers;
- void before()
- {
- g_writers = WRITERS;
- }
- void thread(unsigned int tidx)
- {
- rl::backoff backoff;
- if (tidx < READERS)
- {
- DBG_PRINTF(("reader thread entry(%d)\n", tidx));
- while (g_writers)
- {
- backoff.yield($);
- proxy_gc_type::collector* c = g_proxy_gc.acquire();
- node* n = g_stack.get_head();
- while (n)
- {
- node* next = n->m_next.load(mb_relaxed);
- backoff.yield($);
- DBG_PRINTF(("reader thread(%d) - READ (%p:%p)\n", tidx, c, n));
- n = next;
- }
- g_proxy_gc.release(c);
- }
- DBG_PRINTF(("reader thread exit(%d)\n", tidx));
- }
- else if (tidx < WRITERS + READERS)
- {
- DBG_PRINTF(("writer thread entry(%d)\n", tidx));
- node* n;
- for (unsigned int i = 0; i < ITERS; ++i)
- {
- n = new node();
- g_stack.push(n);
- DBG_PRINTF(("writer thread(%d) - WRITE PUSH (%p)\n", tidx, n));
- if (! (i % (rl::rand(4) + 1)))
- {
- proxy_gc_type::collector* c = g_proxy_gc.acquire();
- backoff.yield($);
- n = g_stack.pop();
- g_proxy_gc.release(c);
- DBG_PRINTF(("writer thread(%d) - WRITE POP (%p:%p)\n", tidx, n, c));
- g_proxy_gc.collect(n);
- }
- }
- for (unsigned int i = 0; i < ITERS; ++i)
- {
- proxy_gc_type::collector* c = g_proxy_gc.acquire();
- n = g_stack.pop();
- backoff.yield($);
- g_proxy_gc.release(c);
- g_proxy_gc.collect(n);
- }
- --g_writers;
- DBG_PRINTF(("writer thread exit(%d)\n", tidx));
- }
- else
- {
- DBG_PRINTF(("reaper thread entry(%d)\n", tidx));
- while (g_writers)
- {
- g_proxy_gc.collect();
- backoff.yield($);
- }
- DBG_PRINTF(("reaper thread exit(%d)\n", tidx));
- }
- }
- };
- int main()
- {
- {
- rl::test_params p;
- p.iteration_count = 30000000;
- p.execution_depth_limit = 10000;
- // p.context_bound = 3;
- // p.search_type = rl::sched_bound;
- // p.search_type = rl::fair_full_search_scheduler_type;
- //p.search_type = rl::fair_context_bound_scheduler_type;
- rl::simulate<proxy_test>(p);
- }
- std::puts("\n\n____________________________\nTesting Complete!");
- std::getchar();
- return 0;
- }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement