// Experimental Read-Write Mutex Test // More "realistic" test, in Relacy... // By: Chris M. Thomasson //_______________________________________________ //#define RL_DEBUGBREAK_ON_ASSERT //#define RL_MSVC_OUTPUT //#define RL_FORCE_SEQ_CST //#define RL_GC #include #include // Simple macro based redirection of the verbose std membars. #define CT_MB_ACQ std::memory_order_acquire #define CT_MB_REL std::memory_order_release #define CT_MB_RLX std::memory_order_relaxed #define CT_MB_ACQ_REL std::memory_order_acq_rel #define CT_MB_SEQ_CST std::memory_order_seq_cst #define CT_MB(mp_0) std::atomic_thread_fence(mp_0) #define READERS 4 #define WRITERS 2 #define THREADS (READERS + WRITERS) #define ITERS 3 //////////////// // bare bones mutex/condvar based semaphore struct ct_slow_semaphore { VAR_T(unsigned long) m_state; std::mutex m_mutex; std::condition_variable m_cond; ct_slow_semaphore(unsigned long state) : m_state(state) {} void inc() { m_mutex.lock($); ++VAR(m_state); m_mutex.unlock($); m_cond.notify_one($); } void add(unsigned long addend) { m_mutex.lock($); VAR(m_state) += addend; m_mutex.unlock($); m_cond.notify_all($); } void dec() { m_mutex.lock($); while (VAR(m_state) == 0) m_cond.wait(m_mutex, $); --VAR(m_state); m_mutex.unlock($); } }; // bin-sema struct ct_auto_reset_event { bool m_state; std::mutex m_mutex; std::condition_variable m_cond; ct_auto_reset_event() : m_state(false) {} void signal() { m_mutex.lock($); m_state = true; m_cond.notify_one($); m_mutex.unlock($); } void wait() { m_mutex.lock($); while (m_state == false) m_cond.wait(m_mutex, $); m_state = false; // auto-reset m_mutex.unlock($); } }; // just a layer over an auto-reset event struct ct_fast_mutex { std::atomic m_state; ct_auto_reset_event m_waitset; ct_fast_mutex() : m_state(0) {} void lock() { if (m_state.exchange(1, std::memory_order_acquire)) { while (m_state.exchange(2, std::memory_order_acquire)) { m_waitset.wait(); } } } void unlock() { if (m_state.exchange(0, std::memory_order_release) == 2) { m_waitset.signal(); } } }; // Chris M. Thomassons Experimental Read/Write Mutex // Yeah, it is pretty damn fat wrt the state, however // it has some interesting properties... // The state can be compressed a bit... // btw, it has no loops... // Take a look at the lock_shared and unlock_shared functions #define RWMUTEX_COUNT_MAX LONG_MAX struct ct_rwmutex { // shared state std::atomic m_wrstate; std::atomic m_count; std::atomic m_rdwake; ct_slow_semaphore m_rdwset; ct_slow_semaphore m_wrwset; ct_fast_mutex m_wrlock; ct_rwmutex() : m_wrstate(1), m_count(RWMUTEX_COUNT_MAX), m_rdwake(0), m_rdwset(0), m_wrwset(0) { } // READ, pretty slim... void lock_shared() { if (m_count.fetch_add(-1, std::memory_order_acquire) < 1) { m_rdwset.dec(); } } void unlock_shared() { if (m_count.fetch_add(1, std::memory_order_release) < 0) { if (m_rdwake.fetch_add(-1, std::memory_order_acq_rel) == 1) { m_wrwset.inc(); } } } // WRITE, more hefty void lock() { m_wrlock.lock(); long count = m_count.fetch_add(-RWMUTEX_COUNT_MAX, std::memory_order_acquire); if (count < RWMUTEX_COUNT_MAX) { long rdwake = m_rdwake.fetch_add(RWMUTEX_COUNT_MAX - count, std::memory_order_acquire); if (rdwake + RWMUTEX_COUNT_MAX - count) { m_wrwset.dec(); } } } // write unlock void unlock() { long count = m_count.fetch_add(RWMUTEX_COUNT_MAX, std::memory_order_release); if (count < 0) { m_rdwset.add(-count); } m_wrlock.unlock(); } }; //////////////// struct ct_simple_stack { struct node { VAR_T(node*) m_next; VAR_T(unsigned int) m_tid; node(unsigned int tid) : m_tid(tid) {} }; VAR_T(node*) m_head; ct_simple_stack() : m_head(nullptr) {} void push(node* n) { VAR(n->m_next) = VAR(m_head); VAR(m_head) = n; } node* pop() { node* n = VAR(m_head); if (n) { VAR(m_head) = VAR(n->m_next); } return n; } node* flush() { node* n = VAR(m_head); VAR(m_head) = nullptr; return n; } }; void ct_destroy_nodes(ct_simple_stack& s) { ct_simple_stack::node* n = s.flush(); while (n) { ct_simple_stack::node* next = VAR(n->m_next); delete n; n = next; } } // Relacy Stack Test... struct ct_fast_mutex_test : rl::test_suite { ct_rwmutex g_rwmutex; ct_simple_stack g_stack; void before() { } void after() { ct_destroy_nodes(g_stack); } // reader void reader(unsigned int tidx) { g_rwmutex.lock_shared(); for (unsigned int i = 0; i < ITERS; ++i) { ct_simple_stack::node* h = VAR(g_stack.m_head); while (h) { ct_simple_stack::node* next = VAR(h->m_next); if (i % 512 == 0) { rl::yield(1, $); } h = next; } } g_rwmutex.unlock_shared(); } // writer void writer(unsigned int tidx) { g_rwmutex.lock(); g_stack.push(new ct_simple_stack::node(tidx)); g_stack.push(new ct_simple_stack::node(tidx)); g_rwmutex.unlock(); g_rwmutex.lock(); ct_simple_stack::node* n = g_stack.pop(); g_rwmutex.unlock(); if (n) { // destroy delete n; } } void thread(unsigned int tidx) { if (tidx < READERS) { reader(tidx); } else { writer(tidx); } } }; // Test away... Or fly? Humm... int main() { { rl::test_params p; p.iteration_count = 10000000; //p.execution_depth_limit = 33333; //p.search_type = rl::sched_bound; //p.search_type = rl::fair_full_search_scheduler_type; //p.search_type = rl::fair_context_bound_scheduler_type; rl::simulate(p); } return 0; }