Guest User

Untitled

a guest
Oct 22nd, 2017
75
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 4.72 KB | None | 0 0
  1. --- a/fs/pipe.c
  2. +++ b/fs/pipe.c
  3. @@ -145,7 +145,7 @@ static int anon_pipe_buf_steal(struct pipe_inode_info *pipe,
  4.  
  5. if (page_count(page) == 1) {
  6. if (memcg_kmem_enabled())
  7. - memcg_kmem_uncharge(page, 0);
  8. + memcg_kmem_uncharge(page, 0, NULL);
  9. __SetPageLocked(page);
  10. return 0;
  11. }
  12. diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
  13. index 69966c4..3334ff1 100644
  14. --- a/include/linux/memcontrol.h
  15. +++ b/include/linux/memcontrol.h
  16. @@ -1098,9 +1098,9 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
  17. struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
  18. void memcg_kmem_put_cache(struct kmem_cache *cachep);
  19. int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
  20. - struct mem_cgroup *memcg);
  21. + struct kmem_cache *s);
  22. int memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
  23. -void memcg_kmem_uncharge(struct page *page, int order);
  24. +void memcg_kmem_uncharge(struct page *page, int order, struct kmem_cache *s);
  25. --- a/mm/memcontrol.c
  26. +++ b/mm/memcontrol.c
  27. @@ -1133,6 +1133,8 @@ unsigned int memcg1_stats[] = {
  28. NR_FILE_DIRTY,
  29. NR_WRITEBACK,
  30. MEMCG_SWAP,
  31. + NR_SLAB_RECLAIMABLE,
  32. + NR_SLAB_UNRECLAIMABLE,
  33. };
  34.  
  35. static const char *const memcg1_stat_names[] = {
  36. @@ -1144,6 +1146,8 @@ static const char *const memcg1_stat_names[] = {
  37. "dirty",
  38. "writeback",
  39. "swap",
  40. + "slab_reclaimable",
  41. + "slab_unreclaimable",
  42. };
  43.  
  44. #define K(x) ((x) << (PAGE_SHIFT-10))
  45. @@ -2321,11 +2325,17 @@ void memcg_kmem_put_cache(struct kmem_cache *cachep)
  46. * Returns 0 on success, an error code on failure.
  47. */
  48. int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
  49. - struct mem_cgroup *memcg)
  50. + struct kmem_cache *s)
  51. {
  52. unsigned int nr_pages = 1 << order;
  53. struct page_counter *counter;
  54. int ret;
  55. + struct mem_cgroup *memcg;
  56. +
  57. + if (s)
  58. + memcg = s->memcg_params.memcg;
  59. + else
  60. + memcg = get_mem_cgroup_from_mm(current->mm);
  61. ret = try_charge(memcg, gfp, nr_pages);
  62. if (ret)
  63. @@ -2339,6 +2349,13 @@ int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
  64.  
  65. page->mem_cgroup = memcg;
  66.  
  67. + if (s) {
  68. + if (s->flags & SLAB_RECLAIM_ACCOUNT)
  69. + __this_cpu_add(memcg->stat->count[NR_SLAB_RECLAIMABLE], nr_pages);
  70. + else
  71. + __this_cpu_add(memcg->stat->count[NR_SLAB_UNRECLAIMABLE], nr_pages);
  72. + }
  73. +
  74. return 0;
  75. }
  76.  
  77. @@ -2360,7 +2377,7 @@ int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
  78.  
  79. memcg = get_mem_cgroup_from_mm(current->mm);
  80. if (!mem_cgroup_is_root(memcg)) {
  81. - ret = memcg_kmem_charge_memcg(page, gfp, order, memcg);
  82. + ret = memcg_kmem_charge_memcg(page, gfp, order, NULL);
  83. if (!ret)
  84. __SetPageKmemcg(page);
  85. }
  86. @@ -2372,7 +2389,7 @@ int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
  87. * @page: page to uncharge
  88. * @order: allocation order
  89. */
  90. -void memcg_kmem_uncharge(struct page *page, int order)
  91. +void memcg_kmem_uncharge(struct page *page, int order, struct kmem_cache *s)
  92. {
  93. struct mem_cgroup *memcg = page->mem_cgroup;
  94. unsigned int nr_pages = 1 << order;
  95. @@ -2389,6 +2406,13 @@ void memcg_kmem_uncharge(struct page *page, int order)
  96. if (do_memsw_account())
  97. page_counter_uncharge(&memcg->memsw, nr_pages);
  98.  
  99. + if (s) {
  100. + if (s->flags & SLAB_RECLAIM_ACCOUNT)
  101. + __this_cpu_sub(memcg->stat->count[NR_SLAB_RECLAIMABLE], nr_pages);
  102. + else
  103. + __this_cpu_sub(memcg->stat->count[NR_SLAB_UNRECLAIMABLE], nr_pages);
  104. + }
  105. +
  106. page->mem_cgroup = NULL;
  107.  
  108. /* slab pages do not have PageKmemcg flag set */
  109. diff --git a/mm/page_alloc.c b/mm/page_alloc.c
  110. index 77e4d3c..81ad71f 100644
  111. --- a/mm/page_alloc.c
  112. +++ b/mm/page_alloc.c
  113. @@ -1040,7 +1040,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
  114. if (PageMappingFlags(page))
  115. page->mapping = NULL;
  116. if (memcg_kmem_enabled() && PageKmemcg(page))
  117. - memcg_kmem_uncharge(page, order);
  118. + memcg_kmem_uncharge(page, order, NULL);
  119. if (check_free)
  120. bad += free_pages_check(page);
  121. if (bad)
  122. --- a/mm/slab.h
  123. +++ b/mm/slab.h
  124. @@ -279,7 +279,7 @@ static __always_inline int memcg_charge_slab(struct page *page,
  125. return 0;
  126. if (is_root_cache(s))
  127. return 0;
  128. - return memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg);
  129. + return memcg_kmem_charge_memcg(page, gfp, order, s);
  130. }
  131.  
  132. static __always_inline void memcg_uncharge_slab(struct page *page, int order,
  133. @@ -287,7 +287,7 @@ static __always_inline void memcg_uncharge_slab(struct page *page, int order,
  134. {
  135. if (!memcg_kmem_enabled())
  136. return;
  137. - memcg_kmem_uncharge(page, order);
  138. + memcg_kmem_uncharge(page, order, s);
  139. }
Add Comment
Please, Sign In to add comment