ebruakagunduz

zero_page_patch

Jan 30th, 2015
184
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 3.44 KB | None | 0 0
  1. diff --git a/mm/huge_memory.c b/mm/huge_memory.c
  2. index 817a875..fe1a16b 100644
  3. --- a/mm/huge_memory.c
  4. +++ b/mm/huge_memory.c
  5. @@ -804,30 +804,33 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
  6. return VM_FAULT_OOM;
  7. if (unlikely(khugepaged_enter(vma, vma->vm_flags)))
  8. return VM_FAULT_OOM;
  9. - if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm) &&
  10. - transparent_hugepage_use_zero_page()) {
  11. + if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) {
  12. spinlock_t *ptl;
  13. pgtable_t pgtable;
  14. struct page *zero_page;
  15. - bool set;
  16. + /* bool set; */
  17. pgtable = pte_alloc_one(mm, haddr);
  18. + pr_info("read-fault, vm_start = %04lx\n", vma->vm_start);
  19. if (unlikely(!pgtable))
  20. return VM_FAULT_OOM;
  21. - zero_page = get_huge_zero_page();
  22. + /* zero_page = get_huge_zero_page(); */
  23. + zero_page = get_zeroed_page(GFP_KERNEL);
  24. + pr_info("zero-page allocated\n");
  25. if (unlikely(!zero_page)) {
  26. pte_free(mm, pgtable);
  27. count_vm_event(THP_FAULT_FALLBACK);
  28. return VM_FAULT_FALLBACK;
  29. }
  30. ptl = pmd_lock(mm, pmd);
  31. - set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd,
  32. - zero_page);
  33. + /*set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd,
  34. + zero_page); */
  35. spin_unlock(ptl);
  36. - if (!set) {
  37. + /*if (!set) {
  38. pte_free(mm, pgtable);
  39. put_huge_zero_page();
  40. - }
  41. - return 0;
  42. + }*/
  43. + /* return 0; */
  44. + return VM_FAULT_FALLBACK;
  45. }
  46. page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
  47. vma, haddr, numa_node_id(), 0);
  48. diff --git a/mm/memory.c b/mm/memory.c
  49. index 54f3a9b..5259d2f 100644
  50. --- a/mm/memory.c
  51. +++ b/mm/memory.c
  52. @@ -756,8 +756,13 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
  53. goto check_pfn;
  54. if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
  55. return NULL;
  56. - if (!is_zero_pfn(pfn))
  57. + if (!is_zero_pfn(pfn)) {
  58. + pr_info("vm_start = %04lx, !is_zero_pfn, have_pte_special, pid = %d\n", vma->vm_start, current->pid);
  59. print_bad_pte(vma, addr, pte, NULL);
  60. + pr_info("printed zero page\n");
  61. + } else {
  62. + pr_info("vm_start = %04lx, is_zero_pfn, have_pte_special, pid = %d\n",vma->vm_start, current->pid);
  63. + }
  64. return NULL;
  65. }
  66.  
  67. @@ -778,8 +783,10 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
  68. }
  69. }
  70.  
  71. - if (is_zero_pfn(pfn))
  72. + if (is_zero_pfn(pfn)) {
  73. + pr_info("vm_start = %04lx, is_zero_pfn, don't have pte special, pid = %d\n", vma->vm_start, current->pid);
  74. return NULL;
  75. + }
  76. check_pfn:
  77. if (unlikely(pfn > highest_memmap_pfn)) {
  78. print_bad_pte(vma, addr, pte, NULL);
Advertisement
Add Comment
Please, Sign In to add comment