Advertisement
Guest User

Chris M Thomasson

a guest
May 29th, 2009
325
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C 9.46 KB | None | 0 0
  1. /* Debugging Support
  2. ______________________________________________________________*/
  3. #include <assert.h>
  4.  
  5.  
  6.  
  7.  
  8. #if ! defined (NDEBUG)
  9. #  include <stdio.h>
  10. #  define DBG_PRINTF(mp_exp) printf mp_exp
  11. #else
  12. #  define DBG_PRINTF(mp_exp) ((void)0)
  13. #endif
  14.  
  15.  
  16.  
  17.  
  18.  
  19.  
  20.  
  21.  
  22. /* x86-32 Atomic API (Fetch-and-Add only!)
  23. ______________________________________________________________*/
  24. #include <limits.h>
  25.  
  26.  
  27. typedef int atomic_x86_word32;
  28. typedef unsigned int atomic_x86_uword32;
  29. #define ATOMIC_X86_WORD32_MAX (INT_MAX)
  30. #define ATOMIC_X86_L2CACHE (128U)
  31.  
  32.  
  33. typedef char atomic_x86_static_assert[
  34.  sizeof(atomic_x86_word32) == 32 / CHAR_BIT &&
  35.  sizeof(atomic_x86_uword32) == 32 / CHAR_BIT &&
  36.  sizeof(void*) == 32 / CHAR_BIT ? 1 : -1
  37. ];
  38.  
  39.  
  40. #if defined (_MSC_VER)
  41.   __declspec(naked)
  42.   static atomic_x86_word32
  43.   atomic_x86_faa32(
  44.    atomic_x86_word32 volatile* self,
  45.    atomic_x86_word32 const addend
  46.   ) {
  47.     _asm {
  48.       MOV ECX, [ESP + 4]
  49.       MOV EAX, [ESP + 8]
  50.       LOCK XADD [ECX], EAX
  51.       RET
  52.     }
  53.   }
  54. #elif defined (__GNUC__)
  55.   __attribute__((always_inline))
  56.   static __inline__
  57.   atomic_x86_word32
  58.   atomic_x86_faa32(
  59.    atomic_x86_word32 volatile* self,
  60.    atomic_x86_word32 const addend
  61.   ) {
  62.     atomic_x86_word32 ret;
  63.    
  64.     __asm__ __volatile__(
  65.       "LOCK XADDL %2, %1;\n"
  66.       : "=&r" (ret),
  67.         "=m"  (*self)
  68.       : "0"   (addend)
  69.       : "memory",
  70.         "cc"
  71.     );
  72.    
  73.     return ret;
  74.   }
  75. #else
  76. #  error Compiler Not Supported!
  77. #endif
  78.  
  79.  
  80. typedef atomic_x86_word32 atomic_word32;
  81. #define ATOMIC_WORD32_MAX ATOMIC_X86_WORD32_MAX
  82. #define ATOMIC_L2CACHE ATOMIC_X86_L2CACHE
  83. #define atomic_faa32 atomic_x86_faa32
  84.  
  85.  
  86.  
  87.  
  88.  
  89.  
  90.  
  91.  
  92. /* Alignment Support
  93. ______________________________________________________________*/
  94. #include <stddef.h>
  95.  
  96.  
  97. typedef atomic_x86_uword32 uintptr_type;
  98.  
  99.  
  100. #define ALIGN_UP(mp_ptr, mp_align) \
  101.   ((void*)( \
  102.     (((uintptr_type)(mp_ptr)) + ((mp_align) - 1U)) \
  103.     & ~(((mp_align) - 1U)) \
  104.   ))
  105.  
  106.  
  107. #define ALIGN_ASSERT(mp_ptr, mp_align) \
  108.   (((void*)(mp_ptr)) == ALIGN_UP(mp_ptr, mp_align))
  109.  
  110.  
  111. #define ALIGN_BUFSIZE(mp_size, mp_align) \
  112.   ((size_t)(((mp_size) + (mp_align) - 1U)))
  113.  
  114.  
  115.  
  116.  
  117.  
  118.  
  119.  
  120.  
  121. /* Read/Write Mutex By Chris M. Thomasson
  122. ______________________________________________________________*/
  123. #define _XOPEN_SOURCE 600
  124.  
  125.  
  126. #include <pthread.h>
  127. #include <semaphore.h>
  128. #include <stdlib.h>
  129. #include <errno.h>
  130.  
  131.  
  132.  
  133.  
  134. /* #define SEM_USE_POST_MULTIPLE */
  135. #define RWMUTEX_COUNT_MAX ATOMIC_WORD32_MAX
  136.  
  137.  
  138.  
  139.  
  140. static void
  141. sem_wait_safe(
  142.  sem_t* const self
  143. ) {
  144.   while (sem_wait(self)) {
  145.     if (errno != EINTR) abort();
  146.   }
  147. }
  148.  
  149.  
  150. static void
  151. sem_post_multiple_safe(
  152.  sem_t* const self,
  153.  int count
  154. ) {
  155. #if defined (SEM_USE_POST_MULTIPLE)
  156.   sem_post_multiple(self, count);
  157. #else
  158.   while (count) {
  159.     sem_post(self);
  160.     --count;
  161.   }
  162. #endif
  163. }
  164.  
  165.  
  166. struct rwmutex_shm {
  167.   atomic_word32 count;
  168.   atomic_word32 rdwake;
  169. };
  170.  
  171.  
  172. struct rwmutex {
  173.   struct rwmutex_shm* shm;
  174.   sem_t rdwset;
  175.   sem_t wrwset;
  176.   pthread_mutex_t wrmtx;
  177. };
  178.  
  179.  
  180. static void
  181. rwmutex_shm_init(
  182.  struct rwmutex_shm* const self
  183. ) {
  184.   self->count = RWMUTEX_COUNT_MAX;
  185.   self->rdwake = 0;
  186. }
  187.  
  188.  
  189. static int
  190. rwmutex_create(
  191.  struct rwmutex* const self,
  192.  struct rwmutex_shm* shm
  193. ) {
  194.   int state = sem_init(&self->rdwset, 0, 0);
  195.   if (! state) {
  196.     state = sem_init(&self->wrwset, 0, 0);
  197.     if (! state) {
  198.       state = pthread_mutex_init(&self->wrmtx, NULL);
  199.       if (! state) {
  200.         self->shm = shm;
  201.         return 0;
  202.       }
  203.       sem_destroy(&self->wrwset);
  204.     }
  205.     sem_destroy(&self->rdwset);
  206.   }
  207.   return state;
  208. }
  209.  
  210.  
  211. static void
  212. rwmutex_destroy(
  213.  struct rwmutex* const self
  214. ) {
  215.   pthread_mutex_destroy(&self->wrmtx);
  216.   sem_destroy(&self->wrwset);
  217.   sem_destroy(&self->rdwset);
  218. }
  219.  
  220.  
  221. static void
  222. rwmutex_rdlock(
  223.  struct rwmutex* const self
  224. ) {
  225.   if (atomic_faa32(&self->shm->count, -1) < 1) {
  226.     sem_wait_safe(&self->rdwset);
  227.   }
  228. }
  229.  
  230.  
  231. static void
  232. rwmutex_rdunlock(
  233.  struct rwmutex* const self
  234. ) {
  235.   if (atomic_faa32(&self->shm->count, 1) < 0) {
  236.     if (atomic_faa32(&self->shm->rdwake, -1) == 1) {
  237.       sem_post(&self->wrwset);
  238.     }
  239.   }  
  240. }
  241.  
  242.  
  243. static int
  244. rwmutex_rdtowrlock(
  245.  struct rwmutex* const self
  246. ) {
  247.   if (! pthread_mutex_trylock(&self->wrmtx)) {
  248.     atomic_word32 count =
  249.       atomic_faa32(&self->shm->count, (-RWMUTEX_COUNT_MAX) + 1) + 1;
  250.     if (count < RWMUTEX_COUNT_MAX) {
  251.       if (atomic_faa32(&self->shm->rdwake, RWMUTEX_COUNT_MAX - count)
  252.             + RWMUTEX_COUNT_MAX - count) {
  253.         sem_wait_safe(&self->wrwset);
  254.       }
  255.     }
  256.     return 0;
  257.   }
  258.   return EBUSY;
  259. }
  260.  
  261.  
  262. static void
  263. rwmutex_wrlock(
  264.  struct rwmutex* const self
  265. ) {
  266.   atomic_word32 count;
  267.   pthread_mutex_lock(&self->wrmtx);
  268.   count = atomic_faa32(&self->shm->count, -RWMUTEX_COUNT_MAX);
  269.   if (count < RWMUTEX_COUNT_MAX) {
  270.     if (atomic_faa32(&self->shm->rdwake, RWMUTEX_COUNT_MAX - count)
  271.           + RWMUTEX_COUNT_MAX - count) {
  272.       sem_wait_safe(&self->wrwset);
  273.     }
  274.   }
  275. }
  276.  
  277.  
  278. static void
  279. rwmutex_wrunlock(
  280.  struct rwmutex* const self
  281. ) {
  282.   atomic_word32 count =
  283.     atomic_faa32(&self->shm->count, RWMUTEX_COUNT_MAX);
  284.   if (count < 0) {
  285.     sem_post_multiple_safe(&self->rdwset, -count);
  286.   }
  287.   pthread_mutex_unlock(&self->wrmtx);
  288. }
  289.  
  290.  
  291.  
  292.  
  293.  
  294.  
  295.  
  296.  
  297. /* Simple Test Application
  298. ______________________________________________________________*/
  299. #include <stdio.h>
  300. #include <sched.h>
  301. #include <time.h>
  302.  
  303.  
  304.  
  305.  
  306. /* #define USE_PTHREAD_NATIVE_RWLOCK */
  307.  
  308.  
  309.  
  310.  
  311. #if ! defined (USE_PTHREAD_NATIVE_RWLOCK)
  312.    typedef struct rwmutex rwmutex_test_t;
  313. #  define rwmutex_test_create rwmutex_create
  314. #  define rwmutex_test_rdlock rwmutex_rdlock
  315. #  define rwmutex_test_rdunlock rwmutex_rdunlock
  316. #  define rwmutex_test_wrlock rwmutex_wrlock
  317. #  define rwmutex_test_wrunlock rwmutex_wrunlock
  318. #  define rwmutex_test_destroy rwmutex_destroy
  319. #else
  320.    typedef pthread_rwlock_t rwmutex_test_t;
  321. #  define rwmutex_test_create(a, b) pthread_rwlock_init(a, NULL)
  322. #  define rwmutex_test_rdlock pthread_rwlock_rdlock
  323. #  define rwmutex_test_rdunlock pthread_rwlock_unlock
  324. #  define rwmutex_test_wrlock pthread_rwlock_wrlock
  325. #  define rwmutex_test_wrunlock pthread_rwlock_unlock
  326. #  define rwmutex_test_destroy pthread_rwlock_destroy
  327. #endif
  328.  
  329.  
  330.  
  331.  
  332. #define RUNS 2U
  333. #define ITERS 9999999U
  334. #define READERS 12U
  335. #define PRIME 256U
  336. #define VOLATILE volatile
  337.  
  338.  
  339.  
  340.  
  341. struct node {
  342.   struct node* VOLATILE next;
  343. };
  344.  
  345.  
  346. struct shm {
  347.   struct rwmutex_shm rwshm;
  348.   char pad1[ATOMIC_L2CACHE - sizeof(struct rwmutex_shm)];
  349.   struct node* VOLATILE head;
  350.   char pad2[ATOMIC_L2CACHE - sizeof(struct node*)];
  351. };
  352.  
  353.  
  354.  
  355.  
  356. static unsigned char g_shm_raw[
  357.   ALIGN_BUFSIZE(sizeof(struct shm), ATOMIC_L2CACHE)
  358. ];
  359.  
  360.  
  361. static struct shm* g_shm;
  362. static rwmutex_test_t g_rwmutex;
  363.  
  364.  
  365.  
  366.  
  367. static void
  368. display_prompt(
  369.  char const* msg,
  370.  int fatal
  371. ) {
  372.   printf("\n\n\n____________________________________________\n"
  373.          "%s\n\nPress <ENTER> to exit...", msg);
  374.   fflush(stdout);
  375.   fflush(stdin);
  376.   fflush(stderr);
  377.   getchar();
  378.   if (fatal) abort();
  379. }
  380.  
  381.  
  382. static int
  383. slist_push(void) {
  384.   struct node* node = malloc(sizeof(*node));
  385.  
  386.   if (node) {
  387.     rwmutex_test_wrlock(&g_rwmutex);
  388.     node->next = g_shm->head;
  389.     g_shm->head = node;
  390.     rwmutex_test_wrunlock(&g_rwmutex);
  391.     return 1;
  392.   }
  393.  
  394.   return 0;
  395. }
  396.  
  397.  
  398. static int
  399. slist_pop(void) {
  400.   struct node* node;
  401.   rwmutex_test_wrlock(&g_rwmutex);
  402.   node = g_shm->head;
  403.   if (! node) {
  404.     rwmutex_test_wrunlock(&g_rwmutex);
  405.     return 0;
  406.   }
  407.   g_shm->head = node->next;
  408.   rwmutex_test_wrunlock(&g_rwmutex);
  409.   free(node);
  410.   return 1;
  411. }
  412.  
  413.  
  414. static void
  415. slist_prime(void) {
  416.   unsigned i;
  417.   for (i = 0; i < PRIME; ++i) slist_push();
  418. }
  419.  
  420.  
  421. static void
  422. slist_flush(void) {
  423.   while (slist_pop());
  424. }
  425.  
  426.  
  427. static int
  428. slist_iterate(void) {
  429.   struct node* node;
  430.  
  431.   rwmutex_test_rdlock(&g_rwmutex);
  432.  
  433.   node = g_shm->head;
  434.  
  435.   if (! node) {
  436.     rwmutex_test_rdunlock(&g_rwmutex);
  437.     return 0;
  438.   }
  439.  
  440.   while (node) {
  441.     node = node->next;
  442.   }
  443.  
  444.   rwmutex_test_rdunlock(&g_rwmutex);
  445.   return 1;
  446. }
  447.  
  448.  
  449.  
  450.  
  451. static void*
  452. reader(
  453.  void* state
  454. ) {
  455.   unsigned i;
  456.   DBG_PRINTF(("reader running\n"));
  457.  
  458.   for (i = 0; i < ITERS; ++i) {
  459.     if (! slist_iterate()) break;
  460.   }
  461.  
  462.   DBG_PRINTF(("reader completed\n"));
  463.   return (void*)i;
  464. }
  465.  
  466.  
  467.  
  468.  
  469. int main(void) {
  470.   unsigned runs;
  471.  
  472.   g_shm = ALIGN_UP(g_shm_raw, ATOMIC_L2CACHE);
  473.   rwmutex_shm_init(&g_shm->rwshm);
  474.   g_shm->head = NULL;
  475.  
  476.   for (runs = 0; runs < RUNS; ++runs) {
  477.     unsigned i;
  478.     unsigned long iters = ITERS * READERS, seconds = 0;
  479.     clock_t start, end;
  480.     pthread_t tid[READERS];
  481.  
  482.     rwmutex_test_create(&g_rwmutex, &g_shm->rwshm);
  483.  
  484.     slist_prime();
  485.  
  486.     printf("TEST RUN %u of %u RUNNING...\n", runs + 1, RUNS);
  487.  
  488.     start = clock() / CLOCKS_PER_SEC;
  489.  
  490.     for (i = 0; i < READERS; ++i) {
  491.       pthread_create(&tid[i], NULL, reader, NULL);
  492.     }
  493.  
  494.     for (i = 0; i < READERS; ++i) {
  495.       pthread_join(tid[i], NULL);
  496.       slist_pop();
  497.     }
  498.  
  499.     end = clock() / CLOCKS_PER_SEC;
  500.  
  501.     seconds = (unsigned long)(end - start);
  502.  
  503.     if (seconds) iters /= seconds;
  504.  
  505.     slist_flush();
  506.  
  507.     rwmutex_test_destroy(&g_rwmutex);
  508.  
  509.     printf("READERS ACHIEVED %lu ITERATIONS PER-THREAD-PER-SECOND\n\n",
  510.       iters / READERS);
  511.  
  512.     printf("TEST RUN %u of %u COMPLETED!!!\n"
  513.            "------------------------------------\n",
  514.            runs + 1, RUNS);
  515.  
  516.     fflush(stdout);
  517.   }
  518.  
  519.   display_prompt("RWMUTEX PROGRAM COMPLETED", 0);
  520.   return 0;
  521. }
  522.  
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement