Advertisement
homer512

C segfault handler

Oct 28th, 2014
185
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C 9.26 KB | None | 0 0
  1. /**
  2.  * Demonstrates the use of a signal handler to create data lazily
  3.  *
  4.  *
  5.  * Copyright 2014 Florian Philipp
  6.  *
  7.  * Licensed under the Apache License, Version 2.0 (the "License");
  8.  * you may not use this file except in compliance with the License.
  9.  * You may obtain a copy of the License at
  10.  *
  11.  *  http://www.apache.org/licenses/LICENSE-2.0
  12.  *
  13.  * Unless required by applicable law or agreed to in writing, software
  14.  * distributed under the License is distributed on an "AS IS" BASIS,
  15.  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  16.  * See the License for the specific language governing permissions and
  17.  * limitations under the License.
  18.  */
  19.  
  20. #define _GNU_SOURCE
  21. /* using tdestroy */
  22.  
  23. #include <stddef.h>
  24. /* using size_t */
  25. #include <stdbool.h>
  26. /* using bool */
  27. #include <stdio.h>
  28. /* using printf, fprintf, perror, fputs */
  29. #include <stdlib.h>
  30. /* using exit */
  31. #include <assert.h>
  32. /* using assert */
  33. #include <errno.h>
  34. /* using errno */
  35. #include <unistd.h>
  36. /* using sysconf */
  37. #include <sys/mman.h>
  38. /* using mprotect, mmap, munmap */
  39. #include <search.h>
  40. /* using tsearch, tfind, tdestroy */
  41. #include <signal.h>
  42. /* using sigaction */
  43.  
  44.  
  45. /**
  46.  * Segmentation fault handler
  47.  *
  48.  * Uses a dedicated memory area that is filled with data on demand.
  49.  * A singleton in production code, but not necessarily in testing code
  50.  *
  51.  * TODO: Make thread-safe
  52.  *
  53.  * BUG: Not async-signal-safe
  54.  */
  55. struct _SegvHandler
  56. {
  57.   /**
  58.    * pointer to the start of the dedicated memory area or NULL
  59.    */
  60.   void* base_addr;
  61.   /**
  62.    * pointer past the end of the dedicated memory area or NULL
  63.    */
  64.   void* mapped_end;
  65.   /**
  66.    * search.h tree of addresses of pages with read permissions
  67.    */
  68.   void* mapped_ro;
  69.   /**
  70.    * search.h tree of addresses of pages with write permissions
  71.    */
  72.   void* mapped_rw;
  73.   int async_err;
  74. };
  75.  
  76.  
  77. /**
  78.  * Signal handler singleton
  79.  *
  80.  * Default initialization with 0 is valid for all attributes
  81.  */
  82. static struct _SegvHandler _segv_global_self;
  83.  
  84.  
  85. /**
  86.  * Comparison of memory addresses. Used with search.h
  87.  */
  88. static int _segv_page_cmp(const void* first, const void* second)
  89. {
  90.   if(first < second)
  91.     return -1;
  92.   else if(first == second)
  93.     return 0;
  94.   else
  95.     return 1;
  96. }
  97.  
  98. /**
  99.  * Sets async_err from errno unless an error is already set
  100.  */
  101. static void _segv_store_errno(struct _SegvHandler* self)
  102. {
  103.   if(! self->async_err)
  104.     self->async_err = errno;
  105. }
  106.  
  107. /**
  108.  * Returns bytes per memory page
  109.  */
  110. static size_t _segv_pagelen()
  111. {
  112.   return sysconf(_SC_PAGESIZE);
  113. }
  114.  
  115. /**
  116.  * Fills page with meaningful data
  117.  *
  118.  * TODO: Stub
  119.  */
  120. static void _segv_populate_page(void* page)
  121. {
  122.   unsigned* typed = page;
  123.   unsigned* end = page + _segv_pagelen() / sizeof(*typed);
  124.   for(; typed != end; ++typed)
  125.     *typed = 0xDEADBEEF;
  126. }
  127.  
  128. /**
  129.  * Flushes changed page to shared storage or whatever
  130.  *
  131.  * TODO: Stub
  132.  */
  133. static void _segv_commit_page(struct _SegvHandler* self, void* page)
  134. {
  135.   char* byte_addr = page;
  136.   char* byte_base = self->base_addr;
  137.   size_t offset = byte_addr - byte_base;
  138.   printf("Range [%zu, %zu) changed\n", offset, offset + _segv_pagelen());
  139. }
  140.  
  141. /**
  142.  * Returns page address of segfault
  143.  */
  144. static void* _segv_get_page(const siginfo_t* siginfo)
  145. {
  146.   size_t addr = (size_t) siginfo->si_addr;
  147.   addr &= ~(_segv_pagelen() - 1);
  148.   return (void*) addr;
  149. }
  150.  
  151. /**
  152.  * Invokes handler for segmentation faults that cannot be handled otherwise
  153.  *
  154.  * TODO: Replace with original segfault handler
  155.  */
  156. static void _segv_real_segfault(const siginfo_t* siginfo)
  157. {
  158.   fprintf(stderr, "SEGMENTATION FAULT %p\n", siginfo->si_addr);
  159.   exit(EXIT_FAILURE);
  160. }
  161.  
  162. /**
  163.  * Permits write access to page. Marks page as dirty
  164.  *
  165.  * Precondition: page is removed from self->mapped_ro
  166.  * Postcondition on error: Page is still write-protected
  167.  */
  168. static int _segv_map_rw(struct _SegvHandler* self, void* page)
  169. {
  170.   size_t pagelen = _segv_pagelen();
  171.   if(mprotect(page, pagelen, PROT_READ | PROT_WRITE))
  172.     goto err_rtrn;
  173.   if(tsearch(page, &self->mapped_rw, _segv_page_cmp) == NULL)
  174.     goto err_nomem;
  175.   return 0;
  176.  err_nomem:
  177.   mprotect(page, pagelen, PROT_READ);
  178.   errno = ENOMEM;
  179.  err_rtrn:
  180.   return -1;
  181. }
  182.  
  183. /**
  184.  * Populates page and permits read access
  185.  */
  186. static int _segv_map_ro(struct _SegvHandler* self, void* page)
  187. {
  188.   size_t pagelen = _segv_pagelen();
  189.   if(mprotect(page, pagelen, PROT_READ | PROT_WRITE))
  190.     goto err_rtrn;
  191.   _segv_populate_page(page);
  192.   if(mprotect(page, pagelen, PROT_READ))
  193.     goto err_rtrn;
  194.   if(! tsearch(page, &self->mapped_ro, _segv_page_cmp))
  195.     goto err_nomem;
  196.   return 0;
  197.  err_nomem:
  198.   mprotect(page, pagelen, PROT_NONE);
  199.   errno = ENOMEM;
  200.  err_rtrn:
  201.   return -1;
  202. }
  203.  
  204. /**
  205.  * Returns true if this segfault can be avoided
  206.  */
  207. static bool _segv_is_magic_segfault(const struct _SegvHandler* self,
  208.                     const siginfo_t* siginfo)
  209. {
  210.   void* addr = siginfo->si_addr;
  211.   if(siginfo->si_code != SEGV_ACCERR)
  212.     return false;
  213.   if(addr < self->base_addr || addr >= self->mapped_end)
  214.     return false;
  215.   void* page = _segv_get_page(siginfo);
  216.   if(tfind(page, &self->mapped_rw, _segv_page_cmp))
  217.     return false;
  218.   return true;
  219. }
  220.  
  221. /**
  222.  * Signal handler compatible with sigaction
  223.  */
  224. static void _segv_signal_handler(int signum, siginfo_t* siginfo,
  225.                  void* ucontext)
  226. {
  227.   assert(signum == SIGSEGV);
  228.   struct _SegvHandler* self = &_segv_global_self;
  229.   void* page = _segv_get_page(siginfo);
  230.   if(! _segv_is_magic_segfault(self, siginfo))
  231.     _segv_real_segfault(siginfo);
  232.   if(tdelete(page, &self->mapped_ro, _segv_page_cmp)) {
  233.     if(_segv_map_rw(self, page))
  234.       goto err;
  235.   }
  236.   else {
  237.     if(_segv_map_ro(self, page))
  238.       goto err;
  239.   }
  240.   return;
  241.  err:
  242.   perror("segfault handler");
  243.   exit(EXIT_FAILURE);
  244. }
  245.  
  246. /**
  247.  * Installs the global signal handler
  248.  */
  249. static int _segv_install()
  250. {
  251.   struct sigaction action = {
  252.     .sa_sigaction = _segv_signal_handler,
  253.     .sa_mask = {{0}},
  254.     .sa_flags = SA_SIGINFO,
  255.   };
  256.   return sigaction(SIGSEGV, &action, NULL);
  257. }
  258.  
  259. /**
  260.  * Allocates the dedicated memory area
  261.  */
  262. static int _segv_init_mapping(struct _SegvHandler* self)
  263. {
  264.   int err;
  265.   if(self->base_addr)
  266.     goto err_double;
  267.   size_t mapping_len = _segv_pagelen() * 16;
  268.   void* mapped = mmap(NULL, mapping_len, PROT_READ | PROT_WRITE,
  269.               MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
  270.   if(mapped == MAP_FAILED)
  271.     goto err_rtrn;
  272.   if(mprotect(mapped, mapping_len, PROT_NONE))
  273.     goto err_unmap;
  274.   self->base_addr = mapped;
  275.   self->mapped_end = ((char*) mapped) + mapping_len;
  276.   return 0;
  277.  err_unmap:
  278.   err = errno;
  279.   munmap(mapped, mapping_len);
  280.   errno = err;
  281.   goto err_rtrn;
  282.  err_double:
  283.   fputs("segfault handler double initialization\n", stderr);
  284.   errno = EINVAL;
  285.  err_rtrn:
  286.   return -1;
  287. }
  288.  
  289. /**
  290.  * Commits a dirty pages and marks it as clean
  291.  *
  292.  * Callback for tdestroy
  293.  */
  294. static void _segv_commit_action(void* page)
  295. {
  296.   struct _SegvHandler* self = &_segv_global_self;
  297.   _segv_commit_page(self, page);
  298.   size_t pagelen = _segv_pagelen();
  299.   if(mprotect(page, pagelen, PROT_READ))
  300.     goto err_store;
  301.   if(! tsearch(page, &self->mapped_ro, _segv_page_cmp))
  302.     goto err_mem;
  303.   return;
  304.  err_mem:
  305.   mprotect(page, pagelen, PROT_NONE);
  306.   errno = ENOMEM;
  307.  err_store:
  308.   _segv_store_errno(self);
  309. }
  310.  
  311. /**
  312.  * Discards a dirty page. Will re-populate it on demand
  313.  *
  314.  * Callback for tdestroy
  315.  */
  316. static void _segv_discard_action(void* page)
  317. {
  318.   if(mprotect(page, _segv_pagelen(), PROT_NONE))
  319.     _segv_store_errno(&_segv_global_self);
  320. }
  321.  
  322. /**
  323.  * Removes all entries from the global mapped_rw set
  324.  *
  325.  * \param a callback called on every page
  326.  * \return async_err
  327.  */
  328. static int _segv_clear_rw(void (*action)(void*))
  329. {
  330.   struct _SegvHandler* self = &_segv_global_self;
  331.   self->async_err = 0;
  332.   tdestroy(self->mapped_rw, action);
  333.   self->mapped_rw = NULL;
  334.   if(self->async_err)
  335.     errno = self->async_err;
  336.   return self->async_err;
  337. }
  338.  
  339. /**
  340.  * Installs and initializes the segmentation fault handler
  341.  *
  342.  * \return 0 on success, -1 otherwise. Sets errno
  343.  */
  344. int sigsegv_install()
  345. {
  346.   if(_segv_init_mapping(&_segv_global_self))
  347.     return -1;
  348.   return _segv_install();
  349. }
  350.  
  351. /**
  352.  * Commits all changes
  353.  *
  354.  * \return 0 on success, -1 otherwise. Sets errno
  355.  */
  356. int sigsegv_commit()
  357. {
  358.   return _segv_clear_rw(_segv_commit_action);
  359. }
  360.  
  361. /**
  362.  * Discards all changes
  363.  *
  364.  * \return 0 on success, -1 otherwise. Sets errno
  365.  */
  366. int sigsegv_discard()
  367. {
  368.   return _segv_clear_rw(_segv_discard_action);
  369. }
  370.  
  371. /**
  372.  * Returns starting address of the dedicated memory area
  373.  */
  374. void* sigsegv_baseptr()
  375. {
  376.   const struct _SegvHandler* self = &_segv_global_self;
  377.   return self->base_addr;
  378. }
  379.  
  380.  
  381. /**
  382.  * Some simple testing code
  383.  *
  384.  * Observe it with strace
  385.  */
  386. int main()
  387. {
  388.   if(sigsegv_install())
  389.     goto err;
  390.   unsigned* base = sigsegv_baseptr();
  391.   printf("Accessing RO %p = 0x%x\n", base + 3, base[3]);
  392.   base[3] = 0;
  393.   puts("Committing");
  394.   if(sigsegv_commit())
  395.     goto err;
  396.   size_t otherpage = _segv_pagelen() / sizeof(unsigned) + 3;
  397.   puts("Making direct RW access");
  398.   base[otherpage] = 0;
  399.   puts("Discarding");
  400.   if(sigsegv_discard())
  401.     goto err;
  402.   printf("Accessing RO %p = 0x%x\n", base + otherpage, base[otherpage]);
  403.   return EXIT_SUCCESS;
  404.  err:
  405.   perror("sigsegv");
  406.   return EXIT_FAILURE;
  407. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement