SHARE
TWEET

Untitled

a guest May 19th, 2017 76 Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
  1. /*
  2.  * SLOB Allocator: Simple List Of Blocks
  3.  *
  4.  * Matt Mackall <mpm@selenic.com> 12/30/03
  5.  *
  6.  * NUMA support by Paul Mundt, 2007.
  7.  *
  8.  * How SLOB works:
  9.  *
  10.  * The core of SLOB is a traditional K&R style heap allocator, with
  11.  * support for returning aligned objects. The granularity of this
  12.  * allocator is as little as 2 bytes, however typically most architectures
  13.  * will require 4 bytes on 32-bit and 8 bytes on 64-bit.
  14.  *
  15.  * The slob heap is a set of linked list of pages from alloc_pages(),
  16.  * and within each page, there is a singly-linked list of free blocks
  17.  * (slob_t). The heap is grown on demand. To reduce fragmentation,
  18.  * heap pages are segregated into three lists, with objects less than
  19.  * 256 bytes, objects less than 1024 bytes, and all other objects.
  20.  *
  21.  * Allocation from heap involves first searching for a page with
  22.  * sufficient free blocks (using a next-fit-like approach) followed by
  23.  * a first-fit scan of the page. Deallocation inserts objects back
  24.  * into the free list in address order, so this is effectively an
  25.  * address-ordered first fit.
  26.  *
  27.  * Above this is an implementation of kmalloc/kfree. Blocks returned
  28.  * from kmalloc are prepended with a 4-byte header with the kmalloc size.
  29.  * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
  30.  * alloc_pages() directly, allocating compound pages so the page order
  31.  * does not have to be separately tracked.
  32.  * These objects are detected in kfree() because PageSlab()
  33.  * is false for them.
  34.  *
  35.  * SLAB is emulated on top of SLOB by simply calling constructors and
  36.  * destructors for every SLAB allocation. Objects are returned with the
  37.  * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which
  38.  * case the low-level allocator will fragment blocks to create the proper
  39.  * alignment. Again, objects of page-size or greater are allocated by
  40.  * calling alloc_pages(). As SLAB objects know their size, no separate
  41.  * size bookkeeping is necessary and there is essentially no allocation
  42.  * space overhead, and compound pages aren't needed for multi-page
  43.  * allocations.
  44.  *
  45.  * NUMA support in SLOB is fairly simplistic, pushing most of the real
  46.  * logic down to the page allocator, and simply doing the node accounting
  47.  * on the upper levels. In the event that a node id is explicitly
  48.  * provided, __alloc_pages_node() with the specified node id is used
  49.  * instead. The common case (or when the node id isn't explicitly provided)
  50.  * will default to the current node, as per numa_node_id().
  51.  *
  52.  * Node aware pages are still inserted in to the global freelist, and
  53.  * these are scanned for by matching against the node id encoded in the
  54.  * page flags. As a result, block allocations that can be satisfied from
  55.  * the freelist will only be done so on pages residing on the same node,
  56.  * in order to prevent random node placement.
  57.  */
  58.  
  59. #include <linux/kernel.h>
  60. #include <linux/slab.h>
  61.  
  62. #include <linux/mm.h>
  63. #include <linux/swap.h> /* struct reclaim_state */
  64. #include <linux/cache.h>
  65. #include <linux/init.h>
  66. #include <linux/export.h>
  67. #include <linux/rcupdate.h>
  68. #include <linux/list.h>
  69. #include <linux/kmemleak.h>
  70.  
  71. #include <trace/events/kmem.h>
  72.  
  73. #include <linux/atomic.h>
  74.  
  75. #include "slab.h"
  76. /*
  77.  * slob_block has a field 'units', which indicates size of block if +ve,
  78.  * or offset of next block if -ve (in SLOB_UNITs).
  79.  *
  80.  * Free blocks of size 1 unit simply contain the offset of the next block.
  81.  * Those with larger size contain their size in the first SLOB_UNIT of
  82.  * memory, and the offset of the next free block in the second SLOB_UNIT.
  83.  */
  84. #if PAGE_SIZE <= (32767 * 2)
  85. typedef s16 slobidx_t;
  86. #else
  87. typedef s32 slobidx_t;
  88. #endif
  89.  
  90. struct slob_block {
  91.     slobidx_t units;
  92. };
  93. typedef struct slob_block slob_t;
  94.  
  95. /*
  96.  * All partially free slob pages go on these lists.
  97.  */
  98. #define SLOB_BREAK1 256
  99. #define SLOB_BREAK2 1024
  100. static LIST_HEAD(free_slob_small);
  101. static LIST_HEAD(free_slob_medium);
  102. static LIST_HEAD(free_slob_large);
  103.  
  104. /*
  105.  * slob_page_free: true for pages on free_slob_pages list.
  106.  */
  107. static inline int slob_page_free(struct page *sp)
  108. {
  109.     return PageSlobFree(sp);
  110. }
  111.  
  112. static void set_slob_page_free(struct page *sp, struct list_head *list)
  113. {
  114.     list_add(&sp->lru, list);
  115.     __SetPageSlobFree(sp);
  116. }
  117.  
  118. static inline void clear_slob_page_free(struct page *sp)
  119. {
  120.     list_del(&sp->lru);
  121.     __ClearPageSlobFree(sp);
  122. }
  123.  
  124. #define SLOB_UNIT sizeof(slob_t)
  125. #define SLOB_UNITS(size) DIV_ROUND_UP(size, SLOB_UNIT)
  126.  
  127. /*
  128.  * struct slob_rcu is inserted at the tail of allocated slob blocks, which
  129.  * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free
  130.  * the block using call_rcu.
  131.  */
  132. struct slob_rcu {
  133.     struct rcu_head head;
  134.     int size;
  135. };
  136.  
  137. /*
  138.  * slob_lock protects all slob allocator structures.
  139.  */
  140. static DEFINE_SPINLOCK(slob_lock);
  141.  
  142. /*
  143.  * Encode the given size and next info into a free slob block s.
  144.  */
  145. static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
  146. {
  147.     slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
  148.     slobidx_t offset = next - base;
  149.  
  150.     if (size > 1) {
  151.         s[0].units = size;
  152.         s[1].units = offset;
  153.     } else
  154.         s[0].units = -offset;
  155. }
  156.  
  157. /*
  158.  * Return the size of a slob block.
  159.  */
  160. static slobidx_t slob_units(slob_t *s)
  161. {
  162.     if (s->units > 0)
  163.         return s->units;
  164.     return 1;
  165. }
  166.  
  167. /*
  168.  * Return the next free slob block pointer after this one.
  169.  */
  170. static slob_t *slob_next(slob_t *s)
  171. {
  172.     slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
  173.     slobidx_t next;
  174.  
  175.     if (s[0].units < 0)
  176.         next = -s[0].units;
  177.     else
  178.         next = s[1].units;
  179.     return base+next;
  180. }
  181.  
  182. /*
  183.  * Returns true if s is the last free block in its page.
  184.  */
  185. static int slob_last(slob_t *s)
  186. {
  187.     return !((unsigned long)slob_next(s) & ~PAGE_MASK);
  188. }
  189.  
  190. static void *slob_new_pages(gfp_t gfp, int order, int node)
  191. {
  192.     void *page;
  193.  
  194. #ifdef CONFIG_NUMA
  195.     if (node != NUMA_NO_NODE)
  196.         page = __alloc_pages_node(node, gfp, order);
  197.     else
  198. #endif
  199.         page = alloc_pages(gfp, order);
  200.  
  201.     if (!page)
  202.         return NULL;
  203.  
  204.     return page_address(page);
  205. }
  206.  
  207. static void slob_free_pages(void *b, int order)
  208. {
  209.     if (current->reclaim_state)
  210.         current->reclaim_state->reclaimed_slab += 1 << order;
  211.     free_pages((unsigned long)b, order);
  212. }
  213.  
  214. /*
  215.  * Allocate a slob block within a given slob_page sp.
  216.  */
  217. static void *slob_page_alloc(struct page *sp, size_t size, int align)
  218. {
  219.     slob_t *prev, *cur, *aligned = NULL;
  220.     int delta = 0, units = SLOB_UNITS(size);
  221.  
  222.     for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) {
  223.         slobidx_t avail = slob_units(cur);
  224.  
  225.         if (align) {
  226.             aligned = (slob_t *)ALIGN((unsigned long)cur, align);
  227.             delta = aligned - cur;
  228.         }
  229.         if (avail >= units + delta) { /* room enough? */
  230.             slob_t *next;
  231.  
  232.             if (delta) { /* need to fragment head to align? */
  233.                 next = slob_next(cur);
  234.                 set_slob(aligned, avail - delta, next);
  235.                 set_slob(cur, delta, aligned);
  236.                 prev = cur;
  237.                 cur = aligned;
  238.                 avail = slob_units(cur);
  239.             }
  240.  
  241.             next = slob_next(cur);
  242.             if (avail == units) { /* exact fit? unlink. */
  243.                 if (prev)
  244.                     set_slob(prev, slob_units(prev), next);
  245.                 else
  246.                     sp->freelist = next;
  247.             } else { /* fragment */
  248.                 if (prev)
  249.                     set_slob(prev, slob_units(prev), cur + units);
  250.                 else
  251.                     sp->freelist = cur + units;
  252.                 set_slob(cur + units, avail - units, next);
  253.             }
  254.  
  255.             sp->units -= units;
  256.             if (!sp->units)
  257.                 clear_slob_page_free(sp);
  258.             return cur;
  259.         }
  260.         if (slob_last(cur))
  261.             return NULL;
  262.     }
  263. }
  264.  
  265. //source: https://github.com/fusion2004/cop4610/blob/master/lab3/slob.c
  266. //checks the blocks in the current page for the quality of fit
  267. //returns an int >= 0 representing how many bytes left over after allocation
  268. //or -1 if page cannot support this block
  269. static int slob_page_best_fit_check(struct page *sp, size_t size, int align)
  270. {
  271.     slob_t *prev, *cur, *aligned = NULL;
  272.     int delta = 0, units = SLOB_UNITS(size);
  273.  
  274.     slob_t *best_cur = NULL;
  275.     slobidx_t best_fit = 0;
  276.  
  277.     for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) {
  278.         slobidx_t avail = slob_units(cur);
  279.  
  280.         if (align) {
  281.             aligned = (slob_t *)ALIGN((unsigned long)cur, align);
  282.             delta = aligned - cur;
  283.         }
  284.         if (avail >= units + delta && (best_cur == NULL || avail - (units + delta) < best_fit) ) { /* room enough? */
  285.             best_cur = cur;
  286.             best_fit = avail - (units + delta);
  287.             if(best_fit == 0)
  288.                 return 0;
  289.         }
  290.         if (slob_last(cur)) {
  291.             if (best_cur != NULL)
  292.                 return best_fit;
  293.            
  294.             return -1;
  295.         }
  296.     }
  297. }
  298.  
  299.  
  300. long amt_claimed [100];
  301. long amt_free [100];
  302. int counter = 0;
  303.  
  304. /*
  305.  * slob_alloc: entry point into the slob allocator.
  306.  */
  307. static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
  308. {
  309.     struct page *sp;
  310.     struct list_head *prev;
  311.     struct list_head *slob_list;
  312.     struct page *best_fit_page;
  313.     slob_t *b = NULL;
  314.     unsigned long flags;
  315.     long temp_amt_free = 0;
  316.     int best_fit = -1; // keep track of best fit
  317.    
  318.  
  319.     if (size < SLOB_BREAK1)
  320.         slob_list = &free_slob_small;
  321.     else if (size < SLOB_BREAK2)
  322.         slob_list = &free_slob_medium;
  323.     else
  324.         slob_list = &free_slob_large;
  325.  
  326.     spin_lock_irqsave(&slob_lock, flags);
  327.     /* Iterate through each partially free page, try to find room
  328.     Keep track of how good the fit is. update best_fit_page when
  329.     a better fit is found. After loop, allocate to best_fit_page.
  330.     */
  331.     list_for_each_entry(sp, slob_list, lru) {
  332.         int current_fit = -1; // current fit for this page
  333.         temp_amt_free = temp_amt_free + sp->units;
  334. #ifdef CONFIG_NUMA
  335.         /*
  336.          * If there's a node specification, search for a partial
  337.          * page with a matching node id in the freelist.
  338.          */
  339.         if (node != NUMA_NO_NODE && page_to_nid(sp) != node)
  340.             continue;
  341. #endif
  342.         /* Enough room on this page? */
  343.         if (sp->units < SLOB_UNITS(size))
  344.             continue;
  345.        
  346.  
  347.         //check current page list of blocks to see how the fit is
  348.         current_fit = slob_page_best_fit_check(sp, size, align);
  349.  
  350.         if (current_fit == 0)
  351.         {
  352.             best_fit_page = sp;
  353.             best_fit = current_fit;
  354.             break;
  355.         }
  356.         else if (current_fit > 0 && (best_fit == -1 || current_fit < best_fit))
  357.         {
  358.             best_fit_page = sp;
  359.             best_fit = current_fit;
  360.         }
  361.         continue;
  362.     }
  363.  
  364.     // if theres a good fit, allocate the block there, otherwise it goes in a new page
  365.     if (best_fit >=0)
  366.     {
  367.         /* Attempt to alloc */
  368.         b = slob_page_alloc(best_fit_page, size, align);
  369.     }
  370.  
  371.  
  372.  
  373.     spin_unlock_irqrestore(&slob_lock, flags);
  374.  
  375.     /* Not enough space: must allocate a new page */
  376.     if (!b) {
  377.         b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
  378.         if (!b)
  379.             return NULL;
  380.         sp = virt_to_page(b);
  381.         __SetPageSlab(sp);
  382.  
  383.         spin_lock_irqsave(&slob_lock, flags);
  384.    
  385.         amt_claimed[counter] = size;
  386.                 amt_free[counter] = (temp_amt_free * SLOB_UNIT) - SLOB_UNIT + 1;
  387.                 counter = (counter + 1) % 100;
  388.  
  389.         sp->units = SLOB_UNITS(PAGE_SIZE);
  390.         sp->freelist = b;
  391.         INIT_LIST_HEAD(&sp->lru);
  392.         set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
  393.         set_slob_page_free(sp, slob_list);
  394.         b = slob_page_alloc(sp, size, align);
  395.         BUG_ON(!b);
  396.         spin_unlock_irqrestore(&slob_lock, flags);
  397.     }
  398.     if (unlikely((gfp & __GFP_ZERO) && b))
  399.         memset(b, 0, size);
  400.     return b;
  401. }
  402.  
  403. /*
  404.  * slob_free: entry point into the slob allocator.
  405.  */
  406. static void slob_free(void *block, int size)
  407. {
  408.     struct page *sp;
  409.     slob_t *prev, *next, *b = (slob_t *)block;
  410.     slobidx_t units;
  411.     unsigned long flags;
  412.     struct list_head *slob_list;
  413.  
  414.     if (unlikely(ZERO_OR_NULL_PTR(block)))
  415.         return;
  416.     BUG_ON(!size);
  417.  
  418.     sp = virt_to_page(block);
  419.     units = SLOB_UNITS(size);
  420.  
  421.     spin_lock_irqsave(&slob_lock, flags);
  422.  
  423.     if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) {
  424.         /* Go directly to page allocator. Do not pass slob allocator */
  425.         if (slob_page_free(sp))
  426.             clear_slob_page_free(sp);
  427.         spin_unlock_irqrestore(&slob_lock, flags);
  428.         __ClearPageSlab(sp);
  429.         page_mapcount_reset(sp);
  430.         slob_free_pages(b, 0);
  431.         return;
  432.     }
  433.  
  434.     if (!slob_page_free(sp)) {
  435.         /* This slob page is about to become partially free. Easy! */
  436.         sp->units = units;
  437.         sp->freelist = b;
  438.         set_slob(b, units,
  439.             (void *)((unsigned long)(b +
  440.                     SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
  441.         if (size < SLOB_BREAK1)
  442.             slob_list = &free_slob_small;
  443.         else if (size < SLOB_BREAK2)
  444.             slob_list = &free_slob_medium;
  445.         else
  446.             slob_list = &free_slob_large;
  447.         set_slob_page_free(sp, slob_list);
  448.         goto out;
  449.     }
  450.  
  451.     /*
  452.      * Otherwise the page is already partially free, so find reinsertion
  453.      * point.
  454.      */
  455.     sp->units += units;
  456.  
  457.     if (b < (slob_t *)sp->freelist) {
  458.         if (b + units == sp->freelist) {
  459.             units += slob_units(sp->freelist);
  460.             sp->freelist = slob_next(sp->freelist);
  461.         }
  462.         set_slob(b, units, sp->freelist);
  463.         sp->freelist = b;
  464.     } else {
  465.         prev = sp->freelist;
  466.         next = slob_next(prev);
  467.         while (b > next) {
  468.             prev = next;
  469.             next = slob_next(prev);
  470.         }
  471.  
  472.         if (!slob_last(prev) && b + units == next) {
  473.             units += slob_units(next);
  474.             set_slob(b, units, slob_next(next));
  475.         } else
  476.             set_slob(b, units, next);
  477.  
  478.         if (prev + slob_units(prev) == b) {
  479.             units = slob_units(b) + slob_units(prev);
  480.             set_slob(prev, units, slob_next(b));
  481.         } else
  482.             set_slob(prev, slob_units(prev), b);
  483.     }
  484. out:
  485.     spin_unlock_irqrestore(&slob_lock, flags);
  486. }
  487.  
  488. /*
  489.  * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
  490.  */
  491.  
  492. static __always_inline void *
  493. __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
  494. {
  495.     unsigned int *m;
  496.     int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
  497.     void *ret;
  498.  
  499.     gfp &= gfp_allowed_mask;
  500.  
  501.     lockdep_trace_alloc(gfp);
  502.  
  503.     if (size < PAGE_SIZE - align) {
  504.         if (!size)
  505.             return ZERO_SIZE_PTR;
  506.  
  507.         m = slob_alloc(size + align, gfp, align, node);
  508.  
  509.         if (!m)
  510.             return NULL;
  511.         *m = size;
  512.         ret = (void *)m + align;
  513.  
  514.         trace_kmalloc_node(caller, ret,
  515.                    size, size + align, gfp, node);
  516.     } else {
  517.         unsigned int order = get_order(size);
  518.  
  519.         if (likely(order))
  520.             gfp |= __GFP_COMP;
  521.         ret = slob_new_pages(gfp, order, node);
  522.  
  523.         trace_kmalloc_node(caller, ret,
  524.                    size, PAGE_SIZE << order, gfp, node);
  525.     }
  526.  
  527.     kmemleak_alloc(ret, size, 1, gfp);
  528.     return ret;
  529. }
  530.  
  531. void *__kmalloc(size_t size, gfp_t gfp)
  532. {
  533.     return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
  534. }
  535. EXPORT_SYMBOL(__kmalloc);
  536.  
  537. void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller)
  538. {
  539.     return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller);
  540. }
  541.  
  542. #ifdef CONFIG_NUMA
  543. void *__kmalloc_node_track_caller(size_t size, gfp_t gfp,
  544.                     int node, unsigned long caller)
  545. {
  546.     return __do_kmalloc_node(size, gfp, node, caller);
  547. }
  548. #endif
  549.  
  550. void kfree(const void *block)
  551. {
  552.     struct page *sp;
  553.  
  554.     trace_kfree(_RET_IP_, block);
  555.  
  556.     if (unlikely(ZERO_OR_NULL_PTR(block)))
  557.         return;
  558.     kmemleak_free(block);
  559.  
  560.     sp = virt_to_page(block);
  561.     if (PageSlab(sp)) {
  562.         int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
  563.         unsigned int *m = (unsigned int *)(block - align);
  564.         slob_free(m, *m + align);
  565.     } else
  566.         __free_pages(sp, compound_order(sp));
  567. }
  568. EXPORT_SYMBOL(kfree);
  569.  
  570. /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
  571. size_t ksize(const void *block)
  572. {
  573.     struct page *sp;
  574.     int align;
  575.     unsigned int *m;
  576.  
  577.     BUG_ON(!block);
  578.     if (unlikely(block == ZERO_SIZE_PTR))
  579.         return 0;
  580.  
  581.     sp = virt_to_page(block);
  582.     if (unlikely(!PageSlab(sp)))
  583.         return PAGE_SIZE << compound_order(sp);
  584.  
  585.     align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
  586.     m = (unsigned int *)(block - align);
  587.     return SLOB_UNITS(*m) * SLOB_UNIT;
  588. }
  589. EXPORT_SYMBOL(ksize);
  590.  
  591. int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
  592. {
  593.     if (flags & SLAB_DESTROY_BY_RCU) {
  594.         /* leave room for rcu footer at the end of object */
  595.         c->size += sizeof(struct slob_rcu);
  596.     }
  597.     c->flags = flags;
  598.     return 0;
  599. }
  600.  
  601. static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
  602. {
  603.     void *b;
  604.  
  605.     flags &= gfp_allowed_mask;
  606.  
  607.     lockdep_trace_alloc(flags);
  608.  
  609.     if (c->size < PAGE_SIZE) {
  610.         b = slob_alloc(c->size, flags, c->align, node);
  611.         trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
  612.                         SLOB_UNITS(c->size) * SLOB_UNIT,
  613.                         flags, node);
  614.     } else {
  615.         b = slob_new_pages(flags, get_order(c->size), node);
  616.         trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
  617.                         PAGE_SIZE << get_order(c->size),
  618.                         flags, node);
  619.     }
  620.  
  621.     if (b && c->ctor)
  622.         c->ctor(b);
  623.  
  624.     kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);
  625.     return b;
  626. }
  627.  
  628. void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
  629. {
  630.     return slob_alloc_node(cachep, flags, NUMA_NO_NODE);
  631. }
  632. EXPORT_SYMBOL(kmem_cache_alloc);
  633.  
  634. #ifdef CONFIG_NUMA
  635. void *__kmalloc_node(size_t size, gfp_t gfp, int node)
  636. {
  637.     return __do_kmalloc_node(size, gfp, node, _RET_IP_);
  638. }
  639. EXPORT_SYMBOL(__kmalloc_node);
  640.  
  641. void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
  642. {
  643.     return slob_alloc_node(cachep, gfp, node);
  644. }
  645. EXPORT_SYMBOL(kmem_cache_alloc_node);
  646. #endif
  647.  
  648. static void __kmem_cache_free(void *b, int size)
  649. {
  650.     if (size < PAGE_SIZE)
  651.         slob_free(b, size);
  652.     else
  653.         slob_free_pages(b, get_order(size));
  654. }
  655.  
  656. static void kmem_rcu_free(struct rcu_head *head)
  657. {
  658.     struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
  659.     void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
  660.  
  661.     __kmem_cache_free(b, slob_rcu->size);
  662. }
  663.  
  664. void kmem_cache_free(struct kmem_cache *c, void *b)
  665. {
  666.     kmemleak_free_recursive(b, c->flags);
  667.     if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
  668.         struct slob_rcu *slob_rcu;
  669.         slob_rcu = b + (c->size - sizeof(struct slob_rcu));
  670.         slob_rcu->size = c->size;
  671.         call_rcu(&slob_rcu->head, kmem_rcu_free);
  672.     } else {
  673.         __kmem_cache_free(b, c->size);
  674.     }
  675.  
  676.     trace_kmem_cache_free(_RET_IP_, b);
  677. }
  678. EXPORT_SYMBOL(kmem_cache_free);
  679.  
  680. void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
  681. {
  682.     __kmem_cache_free_bulk(s, size, p);
  683. }
  684. EXPORT_SYMBOL(kmem_cache_free_bulk);
  685.  
  686. int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
  687.                                 void **p)
  688. {
  689.     return __kmem_cache_alloc_bulk(s, flags, size, p);
  690. }
  691. EXPORT_SYMBOL(kmem_cache_alloc_bulk);
  692.  
  693. int __kmem_cache_shutdown(struct kmem_cache *c)
  694. {
  695.     /* No way to check for remaining objects */
  696.     return 0;
  697. }
  698.  
  699. void __kmem_cache_release(struct kmem_cache *c)
  700. {
  701. }
  702.  
  703. int __kmem_cache_shrink(struct kmem_cache *d)
  704. {
  705.     return 0;
  706. }
  707.  
  708. struct kmem_cache kmem_cache_boot = {
  709.     .name = "kmem_cache",
  710.     .size = sizeof(struct kmem_cache),
  711.     .flags = SLAB_PANIC,
  712.     .align = ARCH_KMALLOC_MINALIGN,
  713. };
  714.  
  715. void __init kmem_cache_init(void)
  716. {
  717.     kmem_cache = &kmem_cache_boot;
  718.     slab_state = UP;
  719. }
  720.  
  721. void __init kmem_cache_init_late(void)
  722. {
  723.     slab_state = FULL;
  724. }
  725.  
  726. asmlinkage long sys_get_slob_amt_claimed(void)
  727. {
  728.         long total = 0;
  729.         int i = 0;
  730.  
  731.         for(i = 0; i < 100; i++)
  732.         {
  733.                 total = total + amt_claimed[i];
  734.         }
  735.  
  736.         return total/100;
  737. }
  738.  
  739. asmlinkage long sys_get_slob_amt_free(void)
  740. {
  741.         long total = 0;
  742.         int i = 0;
  743.  
  744.         for(i = 0; i < 100; i++)
  745.         {
  746.                 total = total + amt_free[i];
  747.         }
  748.  
  749.         return total/100;
  750.  
  751. }
RAW Paste Data
Want to get better at C?
Learn to code C in 2017
Pastebin PRO Summer Special!
Get 40% OFF on Pastebin PRO accounts!
Top