Advertisement
Guest User

Chris M Thomasson

a guest
May 26th, 2009
946
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C 9.34 KB | None | 0 0
  1. /* Debugging Support
  2. ______________________________________________________________*/
  3. #include <assert.h>
  4.  
  5.  
  6.  
  7.  
  8. #if ! defined (NDEBUG)
  9. #  include <stdio.h>
  10. #  define DBG_PRINTF(mp_exp) printf mp_exp
  11. #else
  12. #  define DBG_PRINTF(mp_exp) ((void)0)
  13. #endif
  14.  
  15.  
  16.  
  17.  
  18.  
  19.  
  20.  
  21.  
  22. /* x86-32 Atomic API (Fetch-and-Add only!)
  23. ______________________________________________________________*/
  24. #include <limits.h>
  25.  
  26.  
  27. typedef int atomic_x86_word32;
  28. typedef unsigned int atomic_x86_uword32;
  29. #define ATOMIC_X86_WORD32_MAX (INT_MAX)
  30. #define ATOMIC_X86_L2CACHE (128U)
  31.  
  32.  
  33. typedef char atomic_x86_static_assert[
  34.  sizeof(atomic_x86_word32) == 32 / CHAR_BIT &&
  35.  sizeof(atomic_x86_uword32) == 32 / CHAR_BIT &&
  36.  sizeof(void*) == 32 / CHAR_BIT ? 1 : -1
  37. ];
  38.  
  39.  
  40. #if defined (_MSC_VER)
  41.   __declspec(naked)
  42.   static atomic_x86_word32
  43.   atomic_x86_faa32(
  44.    atomic_x86_word32 volatile* self,
  45.    atomic_x86_word32 const addend
  46.   ) {
  47.     _asm {
  48.       MOV ECX, [ESP + 4]
  49.       MOV EAX, [ESP + 8]
  50.       LOCK XADD [ECX], EAX
  51.       RET
  52.     }
  53.   }
  54. #elif defined (__GNUC__)
  55.   __attribute__((always_inline))
  56.   static __inline__
  57.   atomic_x86_word32
  58.   atomic_x86_faa32(
  59.    atomic_x86_word32 volatile* self,
  60.    atomic_x86_word32 const addend
  61.   ) {
  62.     atomic_x86_word32 ret;
  63.    
  64.     __asm__ __volatile__(
  65.       "LOCK XADDL %2, %1;\n"
  66.       : "=&r" (ret),
  67.         "=m"  (*self)
  68.       : "0"   (addend)
  69.       : "memory",
  70.         "cc"
  71.     );
  72.    
  73.     return ret;
  74.   }
  75. #else
  76. #  error Compiler Not Supported!
  77. #endif
  78.  
  79.  
  80. typedef atomic_x86_word32 atomic_word32;
  81. #define ATOMIC_WORD32_MAX ATOMIC_X86_WORD32_MAX
  82. #define ATOMIC_L2CACHE ATOMIC_X86_L2CACHE
  83. #define atomic_faa32 atomic_x86_faa32
  84.  
  85.  
  86.  
  87.  
  88.  
  89.  
  90.  
  91.  
  92. /* Alignment Support
  93. ______________________________________________________________*/
  94. #include <stddef.h>
  95.  
  96.  
  97. typedef atomic_x86_uword32 uintptr_type;
  98.  
  99.  
  100. #define ALIGN_UP(mp_ptr, mp_align) \
  101.   ((void*)( \
  102.     (((uintptr_type)(mp_ptr)) + ((mp_align) - 1U)) \
  103.     & ~(((mp_align) - 1U)) \
  104.   ))
  105.  
  106.  
  107. #define ALIGN_ASSERT(mp_ptr, mp_align) \
  108.   (((void*)(mp_ptr)) == ALIGN_UP(mp_ptr, mp_align))
  109.  
  110.  
  111. #define ALIGN_BUFSIZE(mp_size, mp_align) \
  112.   ((size_t)(((mp_size) + (mp_align) - 1U)))
  113.  
  114.  
  115.  
  116.  
  117.  
  118.  
  119.  
  120.  
  121. /* Read/Write Mutex By Chris M. Thomasson
  122. ______________________________________________________________*/
  123. #define _XOPEN_SOURCE 600
  124.  
  125.  
  126. #include <pthread.h>
  127. #include <semaphore.h>
  128. #include <stdlib.h>
  129. #include <errno.h>
  130.  
  131.  
  132.  
  133.  
  134. /* #define SEM_USE_POST_MULTIPLE */
  135. #define RWMUTEX_COUNT_MAX ATOMIC_WORD32_MAX
  136.  
  137.  
  138.  
  139.  
  140. static void
  141. sem_wait_safe(
  142.  sem_t* const self
  143. ) {
  144.   while (sem_wait(self)) {
  145.     if (errno != EINTR) abort();
  146.   }
  147. }
  148.  
  149.  
  150. static void
  151. sem_post_multiple_safe(
  152.  sem_t* const self,
  153.  int count
  154. ) {
  155. #if defined (SEM_USE_POST_MULTIPLE)
  156.   sem_post_multiple(self, count);
  157. #else
  158.   while (count) {
  159.     sem_post(self);
  160.     --count;
  161.   }
  162. #endif
  163. }
  164.  
  165.  
  166. struct rwmutex_shm {
  167.   atomic_word32 count;
  168.   atomic_word32 rdwake;
  169. };
  170.  
  171.  
  172. struct rwmutex {
  173.   struct rwmutex_shm* shm;
  174.   sem_t rdwset;
  175.   sem_t wrwset;
  176.   pthread_mutex_t wrmtx;
  177. };
  178.  
  179.  
  180. static void
  181. rwmutex_shm_init(
  182.  struct rwmutex_shm* const self
  183. ) {
  184.   self->count = RWMUTEX_COUNT_MAX;
  185.   self->rdwake = 0;
  186. }
  187.  
  188.  
  189. static int
  190. rwmutex_create(
  191.  struct rwmutex* const self,
  192.  struct rwmutex_shm* shm
  193. ) {
  194.   int state = sem_init(&self->rdwset, 0, 0);
  195.   if (! state) {
  196.     state = sem_init(&self->wrwset, 0, 0);
  197.     if (! state) {
  198.       state = pthread_mutex_init(&self->wrmtx, NULL);
  199.       if (! state) {
  200.         self->shm = shm;
  201.         return 0;
  202.       }
  203.       sem_destroy(&self->wrwset);
  204.     }
  205.     sem_destroy(&self->rdwset);
  206.   }
  207.   return state;
  208. }
  209.  
  210.  
  211. static void
  212. rwmutex_destroy(
  213.  struct rwmutex* const self
  214. ) {
  215.   pthread_mutex_destroy(&self->wrmtx);
  216.   sem_destroy(&self->wrwset);
  217.   sem_destroy(&self->rdwset);
  218. }
  219.  
  220.  
  221. static void
  222. rwmutex_rdlock(
  223.  struct rwmutex* const self
  224. ) {
  225.   if (atomic_faa32(&self->shm->count, -1) < 1) {
  226.     sem_wait_safe(&self->rdwset);
  227.   }
  228. }
  229.  
  230.  
  231. static void
  232. rwmutex_rdunlock(
  233.  struct rwmutex* const self
  234. ) {
  235.   if (atomic_faa32(&self->shm->count, 1) < 0) {
  236.     if (atomic_faa32(&self->shm->rdwake, -1) == 1) {
  237.       sem_post(&self->wrwset);
  238.     }
  239.   }  
  240. }
  241.  
  242.  
  243. static int
  244. rwmutex_rdtowrlock(
  245.  struct rwmutex* const self
  246. ) {
  247.   if (! pthread_mutex_trylock(&self->wrmtx)) {
  248.     atomic_word32 count =
  249.       atomic_faa32(&self->shm->count, (-RWMUTEX_COUNT_MAX) + 1) + 1;
  250.     if (count < RWMUTEX_COUNT_MAX) {
  251.       if (atomic_faa32(&self->shm->rdwake, RWMUTEX_COUNT_MAX - count)
  252.             + RWMUTEX_COUNT_MAX - count) {
  253.         sem_wait_safe(&self->wrwset);
  254.       }
  255.     }
  256.     return 0;
  257.   }
  258.   return EBUSY;
  259. }
  260.  
  261.  
  262. static void
  263. rwmutex_wrlock(
  264.  struct rwmutex* const self
  265. ) {
  266.   atomic_word32 count;
  267.   pthread_mutex_lock(&self->wrmtx);
  268.   count = atomic_faa32(&self->shm->count, -RWMUTEX_COUNT_MAX);
  269.   if (count < RWMUTEX_COUNT_MAX) {
  270.     if (atomic_faa32(&self->shm->rdwake, RWMUTEX_COUNT_MAX - count)
  271.           + RWMUTEX_COUNT_MAX - count) {
  272.       sem_wait_safe(&self->wrwset);
  273.     }
  274.   }
  275. }
  276.  
  277.  
  278. static void
  279. rwmutex_wrunlock(
  280.  struct rwmutex* const self
  281. ) {
  282.   atomic_word32 count =
  283.     atomic_faa32(&self->shm->count, RWMUTEX_COUNT_MAX);
  284.   if (count < 0) {
  285.     sem_post_multiple_safe(&self->rdwset, -count);
  286.   }
  287.   pthread_mutex_unlock(&self->wrmtx);
  288. }
  289.  
  290.  
  291.  
  292.  
  293.  
  294.  
  295.  
  296.  
  297. /* Simple Test Application
  298. ______________________________________________________________*/
  299. #include <stdio.h>
  300. #include <sched.h>
  301.  
  302.  
  303.  
  304.  
  305. #define ITERS 99999U
  306. #define RUNS 10U
  307. #define READERS 7U
  308. #define WRITERS 3U
  309. #define THREADS (READERS + WRITERS)
  310. #define READER_TO_WRITER 12U
  311. #define READER_LOCK_YIELD 10U
  312. #define READER_UNLOCK_YIELD 10U
  313. #define WRITER_LOCK_YIELD 25U
  314. #define WRITER_UNLOCK_YIELD 4U
  315. #define COMPLETE ((WRITERS * ITERS) * 2U)
  316. #define IS_COMPLETE() \
  317.   (g_shm->state + g_shm->rdtowr == COMPLETE)
  318.  
  319.  
  320.  
  321.  
  322. struct shm {
  323.   struct rwmutex_shm shm;
  324.   char pad[ATOMIC_L2CACHE - sizeof(struct rwmutex_shm)];
  325.   unsigned volatile state;
  326.   unsigned volatile rdtowr;
  327. };
  328.  
  329.  
  330. static unsigned char* g_raw_shm[
  331.  ALIGN_BUFSIZE(sizeof(struct shm), ATOMIC_L2CACHE)
  332. ];
  333.  
  334.  
  335. static struct shm* g_shm;
  336. static struct rwmutex g_rwmutex;
  337.  
  338.  
  339.  
  340.  
  341. static void
  342. display_prompt(
  343.  char const* msg,
  344.  int fatal
  345. ) {
  346.   printf("\n\n\n____________________________________________\n"
  347.          "%s\n\nPress <ENTER> to exit...", msg);
  348.   fflush(stdout);
  349.   fflush(stdin);
  350.   fflush(stderr);
  351.   getchar();
  352.   if (fatal) abort();
  353. }
  354.  
  355.  
  356.  
  357.  
  358. static void*
  359. reader(
  360.  void* state
  361. ) {
  362.   unsigned i;
  363.   DBG_PRINTF(("reader running\n"));
  364.  
  365.   for (i = 0; ; ++i) {
  366.     rwmutex_rdlock(&g_rwmutex);
  367.  
  368.     if (! (i % READER_TO_WRITER)) {
  369.       if (! rwmutex_rdtowrlock(&g_rwmutex)) {
  370.  
  371.         ++g_shm->rdtowr;
  372.         if (! (i % WRITER_LOCK_YIELD)) sched_yield();
  373.         --g_shm->state;
  374.         if (! (i % WRITER_LOCK_YIELD)) sched_yield();
  375.         ++g_shm->rdtowr;
  376.         if (! (i % WRITER_LOCK_YIELD)) sched_yield();
  377.         --g_shm->state;
  378.  
  379.         if (g_shm->state % 2U || g_shm->rdtowr % 2U) {
  380.           display_prompt("READER-TO-WRITER: COUNTER OUT-OF-SYNC!!!!", 1);
  381.         }
  382.  
  383.         DBG_PRINTF(("reader-to-writer (%u of %u)\n",
  384.           g_shm->state + g_shm->rdtowr, COMPLETE));
  385.         rwmutex_wrunlock(&g_rwmutex);
  386.         if (! (i % WRITER_UNLOCK_YIELD)) sched_yield();
  387.         rwmutex_rdlock(&g_rwmutex);
  388.       }
  389.     }
  390.  
  391.     if (! (i % READER_LOCK_YIELD)) sched_yield();
  392.  
  393.     DBG_PRINTF(("reader (%u of %u)\n",
  394.       g_shm->state + g_shm->rdtowr, COMPLETE));
  395.  
  396.     if (g_shm->state % 2U) {
  397.       display_prompt("READER: COUNTER OUT-OF-SYNC!!!!", 1);
  398.     }
  399.  
  400.     if (IS_COMPLETE()) {
  401.       rwmutex_rdunlock(&g_rwmutex);
  402.       break;
  403.     }
  404.  
  405.     rwmutex_rdunlock(&g_rwmutex);
  406.  
  407.     if (! (i % READER_UNLOCK_YIELD)) sched_yield();
  408.   }
  409.  
  410.   DBG_PRINTF(("reader completed\n"));
  411.   return NULL;
  412. }
  413.  
  414.  
  415. static void*
  416. writer(
  417.  void* state
  418. ) {
  419.   unsigned i;
  420.   DBG_PRINTF(("writer running\n"));
  421.  
  422.   for (i = 0; i < ITERS; ++i) {
  423.     rwmutex_wrlock(&g_rwmutex);
  424.  
  425.     ++g_shm->state;
  426.  
  427.     if (! (i % WRITER_LOCK_YIELD)) sched_yield();
  428.  
  429.     ++g_shm->state;
  430.  
  431.     if (g_shm->state % 2U) {
  432.       display_prompt("WRITER: COUNTER OUT-OF-SYNC!!!!", 1);
  433.     }
  434.  
  435.     DBG_PRINTF(("writer (%u of %u)\n",
  436.       g_shm->state + g_shm->rdtowr, COMPLETE));
  437.  
  438.     rwmutex_wrunlock(&g_rwmutex);
  439.  
  440.     if (! (i % WRITER_UNLOCK_YIELD)) sched_yield();
  441.   }
  442.  
  443.   DBG_PRINTF(("writer completed\n"));
  444.   return NULL;
  445. }
  446.  
  447.  
  448.  
  449.  
  450. int main(void) {
  451.   unsigned runs;
  452.  
  453.   g_shm = ALIGN_UP(g_raw_shm, ATOMIC_L2CACHE);
  454.   rwmutex_shm_init(&g_shm->shm);
  455.  
  456.   for (runs = 0; runs < RUNS; ++runs) {
  457.     unsigned i;
  458.     pthread_t tid[THREADS];
  459.  
  460.     g_shm->state = g_shm->rdtowr = 0;
  461.  
  462.     printf("TEST RUN %u of %u RUNNING...\n", runs + 1, RUNS);
  463.  
  464.     rwmutex_create(&g_rwmutex, &g_shm->shm);
  465.  
  466.     for (i = 0; i < THREADS; ++i) {
  467.       if (i < READERS) {
  468.         pthread_create(&tid[i], NULL, reader, NULL);
  469.       } else {
  470.         pthread_create(&tid[i], NULL, writer, NULL);
  471.       }
  472.     }
  473.  
  474.     for (i = 0; i < THREADS; ++i) {
  475.       pthread_join(tid[i], NULL);
  476.     }
  477.  
  478.     rwmutex_destroy(&g_rwmutex);
  479.  
  480.     if (! IS_COMPLETE()) {
  481.       display_prompt("COUNTER OUT-OF-SYNC!!!!", 1);
  482.     }
  483.  
  484.     printf("TEST RUN %u of %u COMPLETED!!!\n"
  485.            "------------------------------------\n",
  486.            runs + 1, RUNS);
  487.  
  488.     fflush(stdout);
  489.   }
  490.  
  491.   display_prompt("RWMUTEX PROGRAM COMPLETED", 0);
  492.  
  493.   return 0;
  494. }
  495.  
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement