Guest User

i386 mm mremap proposed fix patch

a guest
Jul 16th, 2020
118
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 2.75 KB | None | 0 0
  1. diff --git a/mm/mremap.c b/mm/mremap.c
  2. index 6b153dc05fe4..fa84b5992d89 100644
  3. --- a/mm/mremap.c
  4. +++ b/mm/mremap.c
  5. @@ -254,6 +254,77 @@ static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
  6.  
  7. return true;
  8. }
  9. +
  10. +#define ADDR_BEFORE_PREV(addr, vma) \
  11. + ((vma)->vm_prev && (addr) < (vma)->vm_prev->vm_end)
  12. +
  13. +static inline void try_to_align_start(unsigned long *len,
  14. + struct vm_area_struct *old, unsigned long *old_addr,
  15. + struct vm_area_struct *new, unsigned long *new_addr)
  16. +{
  17. + if (*old_addr > old->vm_start)
  18. + return;
  19. +
  20. + if (ADDR_BEFORE_PREV(*old_addr & PMD_MASK, old))
  21. + return;
  22. +
  23. + if (ADDR_BEFORE_PREV(*new_addr & PMD_MASK, new))
  24. + return;
  25. +
  26. + /* Bingo! */
  27. + *len += *new_addr & ~PMD_MASK;
  28. + *old_addr &= PMD_MASK;
  29. + *new_addr &= PMD_MASK;
  30. +}
  31. +
  32. +/*
  33. + * When aligning the end, avoid ALIGN() (which can overflow
  34. + * if the user space is the full address space, and overshoot
  35. + * the vm_start of the next vma).
  36. + *
  37. + * Align the upper limit down instead, and check that it's not
  38. + * in the same PMD as the end.
  39. + */
  40. +#define ADDR_AFTER_NEXT(addr, vma) \
  41. + ((vma)->vm_next && (addr) > (PMD_MASK & (vma)->vm_next->vm_start))
  42. +
  43. +static inline void try_to_align_end(unsigned long *len,
  44. + struct vm_area_struct *old, unsigned long *old_addr,
  45. + struct vm_area_struct *new, unsigned long *new_addr)
  46. +{
  47. + if (*old_addr + *len < old->vm_end)
  48. + return;
  49. +
  50. + if (ADDR_AFTER_NEXT(*old_addr + *len, old))
  51. + return;
  52. +
  53. + if (ADDR_AFTER_NEXT(*new_addr + *len, new))
  54. + return;
  55. +
  56. + /* Mutual alignment means this is same for new/old addr */
  57. + *len = ALIGN(*new_addr + *len, PMD_SIZE) - *new_addr;
  58. +}
  59. +
  60. +/*
  61. + * The PMD move case is much more efficient, so if we have the
  62. + * mutually aligned case, try to see if we can extend the
  63. + * beginning and end to be aligned too.
  64. + *
  65. + * The pointer dereferences look bad, but with inlining, the
  66. + * compiler will sort it out.
  67. + */
  68. +static inline void try_to_align_range(unsigned long *len,
  69. + struct vm_area_struct *old, unsigned long *old_addr,
  70. + struct vm_area_struct *new, unsigned long *new_addr)
  71. +{
  72. + if ((*old_addr ^ *new_addr) & ~PMD_MASK)
  73. + return;
  74. +
  75. + try_to_align_start(len, old, old_addr, new, new_addr);
  76. + try_to_align_end(len, old, old_addr, new, new_addr);
  77. +}
  78. +#else
  79. +#define try_to_align_range(len,old,olda,new,newa) do { } while(0);
  80. #endif
  81.  
  82. unsigned long move_page_tables(struct vm_area_struct *vma,
  83. @@ -272,6 +343,8 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
  84. old_addr, old_end);
  85. mmu_notifier_invalidate_range_start(&range);
  86.  
  87. + try_to_align_range(&len, vma, &old_addr, new_vma, &new_addr);
  88. +
  89. for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
  90. cond_resched();
  91. next = (old_addr + PMD_SIZE) & PMD_MASK;
Add Comment
Please, Sign In to add comment