Advertisement
Guest User

Untitled

a guest
Apr 2nd, 2016
27
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C++ 3.75 KB | None | 0 0
  1. #pragma once
  2.  
  3. #include "relacy\relacy.hpp"
  4. #include <cstdint>
  5.  
  6. template <typename T, size_t size = 32, size_t cache_line_size = 64> class LockFreeMPMCQueue
  7. {
  8.     public:
  9.     explicit LockFreeMPMCQueue()
  10.     {
  11.         m_data = new rl::var<T>[size];
  12.         for (size_t i = 0; i < size; ++i)
  13.         {
  14.             m_data[i]($) = size_t(-1);
  15.         }
  16.         m_head_1($) = 0;
  17.         m_head_2($) = 0;
  18.         m_tail_1($) = 0;
  19.         m_tail_2($) = 0;
  20.     }
  21.  
  22.     bool try_enqueue( const T& value )
  23.     {
  24.         const std::uint64_t head = m_head_2($).load( rl::mo_relaxed );
  25.         std::uint64_t tail = m_tail_1($).load( rl::mo_relaxed );
  26.  
  27.         const std::uint64_t count = tail - head;
  28.  
  29.         // count could be greater than size if between the reading of head, and the reading of tail, both head
  30.         // and tail have been advanced
  31.         if( count >= size )
  32.         {
  33.             return false;
  34.         }
  35.  
  36.         if( !m_tail_1($).compare_exchange_strong( tail, tail + 1, rl::mo_relaxed ) )
  37.         {
  38.             return false;
  39.         }
  40.  
  41.         m_data[tail % size]($) = value;
  42.  
  43.         while( m_tail_2($).load( rl::mo_relaxed ) != tail )
  44.         {
  45.             rl::yield(1, $);
  46.         }
  47.  
  48.         // Release - read/write before can't be reordered with writes after
  49.         // Make sure the write of the value to m_data is
  50.         // not reordered past the write to m_tail_2
  51.         rl::atomic_thread_fence( rl::mo_release, $ );
  52.         m_tail_2($).store( tail + 1, rl::mo_relaxed );
  53.  
  54.         return true;
  55.     }
  56.  
  57.     bool try_dequeue( T& out )
  58.     {
  59.         const std::uint64_t tail = m_tail_2($).load( rl::mo_relaxed );
  60.         std::uint64_t head = m_head_1($).load( rl::mo_relaxed );
  61.  
  62.         if( head >= tail )
  63.         {
  64.             return false;
  65.         }
  66.  
  67.         if( !m_head_1($).compare_exchange_strong( head, head + 1, rl::mo_relaxed ) )
  68.         {
  69.             return false;
  70.         }
  71.  
  72.         // Acquire - read/write after can't be reordered with reads before
  73.         // Make sure this read of m_data[head] is not reordered with the load
  74.         // of m_tail_2
  75.         rl::atomic_thread_fence( rl::mo_acquire, $ );
  76.         out = m_data[head % size]($);
  77.  
  78.         while( m_head_2($).load( rl::mo_relaxed ) != head )
  79.         {
  80.             rl::yield(1, $);
  81.         }
  82.  
  83.         // Release - read/write before can't be reordered with writes after
  84.         // Make sure the read of value from m_data is
  85.         // not reordered past the write to m_head_2
  86.         rl::atomic_thread_fence( rl::mo_release, $ );
  87.         m_head_2($).store( head + 1, rl::mo_relaxed );
  88.  
  89.         return true;
  90.     }
  91.  
  92.     size_t capacity() const { return size; }
  93.  
  94.     private:
  95.     rl::var<T>* m_data;
  96.  
  97.     // Make sure each index is on its own cache line
  98.     char _pad1[cache_line_size - 8];
  99.     rl::atomic<std::uint64_t> m_head_1;
  100.     char _pad2[cache_line_size - 8];
  101.     rl::atomic<std::uint64_t> m_head_2;
  102.     char _pad3[cache_line_size - 8];
  103.     rl::atomic<std::uint64_t> m_tail_1;
  104.     char _pad4[cache_line_size - 8];
  105.     rl::atomic<std::uint64_t> m_tail_2;
  106. };
  107.  
  108.  
  109.  
  110.  
  111. const size_t num_values = 32;
  112. const size_t num_threads = 2;
  113. const size_t num_values_per_thread = num_values / num_threads;
  114.  
  115. struct queue_test : rl::test_suite<queue_test, num_threads + num_threads>
  116. {
  117.     LockFreeMPMCQueue<size_t> queue;
  118.     bool data[num_values];
  119.  
  120.     void before()
  121.     {
  122.         for (size_t i = 0; i < num_values; ++i)
  123.         {
  124.             data[i] = false;
  125.         }
  126.     }
  127.  
  128.     void after()
  129.     {
  130.         for (size_t i = 0; i < num_values; ++i)
  131.         {
  132.             RL_ASSERT(data[i] == true);
  133.         }
  134.     }
  135.  
  136.     void thread(unsigned index)
  137.     {
  138.         // writers
  139.         if (index <= 1)
  140.         {
  141.             const size_t offset = index * num_values_per_thread;
  142.             for (size_t i = 0; i < num_values_per_thread; ++i)
  143.             {
  144.                 while (!queue.try_enqueue(offset + i)) { rl::yield(1, $); }
  145.             }
  146.         }
  147.         // readers
  148.         else
  149.         {
  150.             for (size_t i = 0; i < num_values_per_thread; ++i)
  151.             {
  152.                 size_t value;
  153.                 while (!queue.try_dequeue(value)) { rl::yield(1, $); }
  154.                 RL_ASSERT(data[value] == false);
  155.                 data[value] = true;
  156.             }
  157.         }
  158.     }
  159. };
  160.  
  161. int main( int argc, char* argv[] )
  162. {
  163.     rl::simulate<queue_test>();
  164.  
  165.     char c;
  166.     scanf( "%c", &c );
  167.  
  168.     return 0;
  169. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement