SHARE
TWEET

chmeee

a guest Nov 12th, 2019 98 in 1 day
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
  1. diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c
  2. index 1190e35d559..32c7fe41da3 100644
  3. --- a/sys/powerpc/aim/mmu_oea64.c
  4. +++ b/sys/powerpc/aim/mmu_oea64.c
  5. @@ -121,7 +121,7 @@ uintptr_t moea64_get_unique_vsid(void);
  6.  
  7.  #define PV_LOCK_PER_DOM    (PA_LOCK_COUNT * 3)
  8.  #define PV_LOCK_COUNT  (PV_LOCK_PER_DOM * MAXMEMDOM)
  9. -static struct mtx_padalign pv_lock[PV_LOCK_COUNT];
  10. +static struct rwlock_padalign pv_lock[PV_LOCK_COUNT];
  11.  
  12.  /*
  13.   * Cheap NUMA-izing of the pv locks, to reduce contention across domains.
  14. @@ -134,12 +134,16 @@ static struct mtx_padalign pv_lock[PV_LOCK_COUNT];
  15.  #else
  16.  #define PV_LOCK_IDX(pa)    (pa_index(pa) % PV_LOCK_COUNT)
  17.  #endif
  18. -#define PV_LOCKPTR(pa) ((struct mtx *)(&pv_lock[PV_LOCK_IDX(pa)]))
  19. -#define PV_LOCK(pa)        mtx_lock(PV_LOCKPTR(pa))
  20. -#define PV_UNLOCK(pa)      mtx_unlock(PV_LOCKPTR(pa))
  21. -#define PV_LOCKASSERT(pa)  mtx_assert(PV_LOCKPTR(pa), MA_OWNED)
  22. -#define PV_PAGE_LOCK(m)        PV_LOCK(VM_PAGE_TO_PHYS(m))
  23. -#define PV_PAGE_UNLOCK(m)  PV_UNLOCK(VM_PAGE_TO_PHYS(m))
  24. +#define PV_LOCKPTR(pa) ((struct rwlock *)(&pv_lock[PV_LOCK_IDX(pa)]))
  25. +#define PV_RLOCK(pa)       rw_rlock(PV_LOCKPTR(pa))
  26. +#define PV_RUNLOCK(pa)     rw_runlock(PV_LOCKPTR(pa))
  27. +#define PV_WLOCK(pa)       rw_wlock(PV_LOCKPTR(pa))
  28. +#define PV_WUNLOCK(pa)     rw_wunlock(PV_LOCKPTR(pa))
  29. +#define PV_LOCKASSERT(pa)  rw_assert(PV_LOCKPTR(pa), RA_LOCKED)
  30. +#define PV_PAGE_RLOCK(m)   PV_RLOCK(VM_PAGE_TO_PHYS(m))
  31. +#define PV_PAGE_RUNLOCK(m) PV_RUNLOCK(VM_PAGE_TO_PHYS(m))
  32. +#define PV_PAGE_WLOCK(m)   PV_WLOCK(VM_PAGE_TO_PHYS(m))
  33. +#define PV_PAGE_WUNLOCK(m) PV_WUNLOCK(VM_PAGE_TO_PHYS(m))
  34.  #define PV_PAGE_LOCKASSERT(m)  PV_LOCKASSERT(VM_PAGE_TO_PHYS(m))
  35.  
  36.  struct ofw_map {
  37. @@ -375,7 +379,7 @@ static struct pvo_head *
  38.  vm_page_to_pvoh(vm_page_t m)
  39.  {
  40.  
  41. -   mtx_assert(PV_LOCKPTR(VM_PAGE_TO_PHYS(m)), MA_OWNED);
  42. +   rw_assert(PV_LOCKPTR(VM_PAGE_TO_PHYS(m)), RA_LOCKED);
  43.     return (&m->md.mdpg_pvoh);
  44.  }
  45.  
  46. @@ -658,7 +662,7 @@ moea64_kenter_large(mmu_t mmup, vm_offset_t va, vm_paddr_t pa, uint64_t attr, in
  47.         VM_PROT_EXECUTE;
  48.     pvo->pvo_pte.pa = pa | pte_lo;
  49.     error = moea64_pvo_enter(mmup, pvo, NULL, NULL);
  50. -   if (error != ENOENT && error != 0)
  51. +   if (error != 0)
  52.         panic("Error %d inserting long page\n", error);
  53.     return (0);
  54.  }
  55. @@ -905,7 +909,7 @@ moea64_mid_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
  56.      */
  57.     mtx_init(&moea64_slb_mutex, "SLB table", NULL, MTX_DEF);
  58.     for (i = 0; i < PV_LOCK_COUNT; i++)
  59. -       mtx_init(&pv_lock[i], "page pv", NULL, MTX_DEF);
  60. +       rw_init(&pv_lock[i], "page pv");
  61.  
  62.     /*
  63.      * Initialise the bootstrap pvo pool.
  64. @@ -1443,7 +1447,7 @@ moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
  65.         pvo->pvo_vaddr |= PVO_MANAGED;
  66.     }
  67.  
  68. -   PV_PAGE_LOCK(m);
  69. +   PV_PAGE_RLOCK(m);
  70.     PMAP_LOCK(pmap);
  71.     if (pvo->pvo_pmap == NULL)
  72.         init_pvo_entry(pvo, pmap, va);
  73. @@ -1467,7 +1471,7 @@ moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
  74.             }
  75.  
  76.             /* Then just clean up and go home */
  77. -           PV_PAGE_UNLOCK(m);
  78. +           PV_PAGE_RUNLOCK(m);
  79.             PMAP_UNLOCK(pmap);
  80.             free_pvo_entry(pvo);
  81.             goto out;
  82. @@ -1480,7 +1484,7 @@ moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
  83.         }
  84.     }
  85.     PMAP_UNLOCK(pmap);
  86. -   PV_PAGE_UNLOCK(m);
  87. +   PV_PAGE_RUNLOCK(m);
  88.  
  89.     /* Free any dead pages */
  90.     if (error == EEXIST) {
  91. @@ -1763,9 +1767,9 @@ moea64_remove_write(mmu_t mmu, vm_page_t m)
  92.         return
  93.  
  94.     powerpc_sync();
  95. -   PV_PAGE_LOCK(m);
  96. +   PV_PAGE_WLOCK(m);
  97.     refchg = 0;
  98. -   LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
  99. +   SLIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
  100.         pmap = pvo->pvo_pmap;
  101.         PMAP_LOCK(pmap);
  102.         if (!(pvo->pvo_vaddr & PVO_DEAD) &&
  103. @@ -1784,7 +1788,7 @@ moea64_remove_write(mmu_t mmu, vm_page_t m)
  104.     if ((refchg | atomic_readandclear_32(&m->md.mdpg_attrs)) & LPTE_CHG)
  105.         vm_page_dirty(m);
  106.     vm_page_aflag_clear(m, PGA_WRITEABLE);
  107. -   PV_PAGE_UNLOCK(m);
  108. +   PV_PAGE_WUNLOCK(m);
  109.  }
  110.  
  111.  /*
  112. @@ -1826,8 +1830,8 @@ moea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
  113.  
  114.     lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma);
  115.  
  116. -   PV_PAGE_LOCK(m);
  117. -   LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
  118. +   PV_PAGE_WLOCK(m);
  119. +   SLIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
  120.         pmap = pvo->pvo_pmap;
  121.         PMAP_LOCK(pmap);
  122.         if (!(pvo->pvo_vaddr & PVO_DEAD)) {
  123. @@ -1853,7 +1857,7 @@ moea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
  124.         PMAP_UNLOCK(pmap);
  125.     }
  126.     m->md.mdpg_cache_attrs = ma;
  127. -   PV_PAGE_UNLOCK(m);
  128. +   PV_PAGE_WUNLOCK(m);
  129.  }
  130.  
  131.  /*
  132. @@ -1888,7 +1892,7 @@ moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
  133.         free_pvo_entry(oldpvo);
  134.     }
  135.  
  136. -   if (error != 0 && error != ENOENT)
  137. +   if (error != 0)
  138.         panic("moea64_kenter: failed to enter va %#zx pa %#jx: %d", va,
  139.             (uintmax_t)pa, error);
  140.  }
  141. @@ -2081,8 +2085,8 @@ moea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
  142.         ("moea64_page_exists_quick: page %p is not managed", m));
  143.     loops = 0;
  144.     rv = FALSE;
  145. -   PV_PAGE_LOCK(m);
  146. -   LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
  147. +   PV_PAGE_WLOCK(m);
  148. +   SLIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
  149.         if (!(pvo->pvo_vaddr & PVO_DEAD) && pvo->pvo_pmap == pmap) {
  150.             rv = TRUE;
  151.             break;
  152. @@ -2090,7 +2094,7 @@ moea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
  153.         if (++loops >= 16)
  154.             break;
  155.     }
  156. -   PV_PAGE_UNLOCK(m);
  157. +   PV_PAGE_WUNLOCK(m);
  158.     return (rv);
  159.  }
  160.  
  161. @@ -2100,7 +2104,7 @@ moea64_page_init(mmu_t mmu __unused, vm_page_t m)
  162.  
  163.     m->md.mdpg_attrs = 0;
  164.     m->md.mdpg_cache_attrs = VM_MEMATTR_DEFAULT;
  165. -   LIST_INIT(&m->md.mdpg_pvoh);
  166. +   SLIST_INIT(&m->md.mdpg_pvoh);
  167.  }
  168.  
  169.  /*
  170. @@ -2116,11 +2120,11 @@ moea64_page_wired_mappings(mmu_t mmu, vm_page_t m)
  171.     count = 0;
  172.     if ((m->oflags & VPO_UNMANAGED) != 0)
  173.         return (count);
  174. -   PV_PAGE_LOCK(m);
  175. -   LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
  176. +   PV_PAGE_WLOCK(m);
  177. +   SLIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
  178.         if ((pvo->pvo_vaddr & (PVO_DEAD | PVO_WIRED)) == PVO_WIRED)
  179.             count++;
  180. -   PV_PAGE_UNLOCK(m);
  181. +   PV_PAGE_WUNLOCK(m);
  182.     return (count);
  183.  }
  184.  
  185. @@ -2452,10 +2456,10 @@ moea64_remove_all(mmu_t mmu, vm_page_t m)
  186.     int wasdead;
  187.     pmap_t  pmap;
  188.  
  189. -   LIST_INIT(&freequeue);
  190. +   SLIST_INIT(&freequeue);
  191.  
  192. -   PV_PAGE_LOCK(m);
  193. -   LIST_FOREACH_SAFE(pvo, vm_page_to_pvoh(m), pvo_vlink, next_pvo) {
  194. +   PV_PAGE_WLOCK(m);
  195. +   SLIST_FOREACH_SAFE(pvo, vm_page_to_pvoh(m), pvo_vlink, next_pvo) {
  196.         pmap = pvo->pvo_pmap;
  197.         PMAP_LOCK(pmap);
  198.         wasdead = (pvo->pvo_vaddr & PVO_DEAD);
  199. @@ -2463,16 +2467,16 @@ moea64_remove_all(mmu_t mmu, vm_page_t m)
  200.             moea64_pvo_remove_from_pmap(mmu, pvo);
  201.         moea64_pvo_remove_from_page_locked(mmu, pvo, m);
  202.         if (!wasdead)
  203. -           LIST_INSERT_HEAD(&freequeue, pvo, pvo_vlink);
  204. +           SLIST_INSERT_HEAD(&freequeue, pvo, pvo_vlink);
  205.         PMAP_UNLOCK(pmap);
  206.        
  207.     }
  208.     KASSERT(!pmap_page_is_mapped(m), ("Page still has mappings"));
  209.     KASSERT(!(m->aflags & PGA_WRITEABLE), ("Page still writable"));
  210. -   PV_PAGE_UNLOCK(m);
  211. +   PV_PAGE_WUNLOCK(m);
  212.  
  213.     /* Clean up UMA allocations */
  214. -   LIST_FOREACH_SAFE(pvo, &freequeue, pvo_vlink, next_pvo)
  215. +   SLIST_FOREACH_SAFE(pvo, &freequeue, pvo_vlink, next_pvo)
  216.         free_pvo_entry(pvo);
  217.  }
  218.  
  219. @@ -2526,7 +2530,7 @@ static int
  220.  moea64_pvo_enter(mmu_t mmu, struct pvo_entry *pvo, struct pvo_head *pvo_head,
  221.      struct pvo_entry **oldpvop)
  222.  {
  223. -   int first, err;
  224. +   int err;
  225.     struct pvo_entry *old_pvo;
  226.  
  227.     PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
  228. @@ -2545,13 +2549,13 @@ moea64_pvo_enter(mmu_t mmu, struct pvo_entry *pvo, struct pvo_head *pvo_head,
  229.     }
  230.  
  231.     /*
  232. -    * Remember if the list was empty and therefore will be the first
  233. -    * item.
  234. +    * "Atomic" insert head.  The page is locked to only allow concurrent
  235. +    * inserts to head at this point, so we only need to replace the head
  236. +    * pointer for now.
  237.      */
  238.     if (pvo_head != NULL) {
  239. -       if (LIST_FIRST(pvo_head) == NULL)
  240. -           first = 1;
  241. -       LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
  242. +       SLIST_NEXT(pvo, pvo_vlink) =
  243. +           atomic_swap_ptr(&SLIST_FIRST(pvo_head), pvo);
  244.     }
  245.  
  246.     if (pvo->pvo_vaddr & PVO_WIRED)
  247. @@ -2581,7 +2585,7 @@ moea64_pvo_enter(mmu_t mmu, struct pvo_entry *pvo, struct pvo_head *pvo_head,
  248.             pvo->pvo_vaddr & PVO_LARGE);
  249.  #endif
  250.  
  251. -   return (first ? ENOENT : 0);
  252. +   return (0);
  253.  }
  254.  
  255.  static void
  256. @@ -2658,8 +2662,8 @@ moea64_pvo_remove_from_page_locked(mmu_t mmu, struct pvo_entry *pvo,
  257.     PV_LOCKASSERT(pvo->pvo_pte.pa & LPTE_RPGN);
  258.     if (pvo->pvo_vaddr & PVO_MANAGED) {
  259.         if (m != NULL) {
  260. -           LIST_REMOVE(pvo, pvo_vlink);
  261. -           if (LIST_EMPTY(vm_page_to_pvoh(m)))
  262. +           SLIST_REMOVE(&m->md.mdpg_pvoh, pvo, pvo_entry, pvo_vlink);
  263. +           if (SLIST_EMPTY(vm_page_to_pvoh(m)))
  264.                 vm_page_aflag_clear(m,
  265.                     PGA_WRITEABLE | PGA_EXECUTABLE);
  266.         }
  267. @@ -2677,9 +2681,9 @@ moea64_pvo_remove_from_page(mmu_t mmu, struct pvo_entry *pvo)
  268.     if (pvo->pvo_vaddr & PVO_MANAGED)
  269.         pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
  270.  
  271. -   PV_LOCK(pvo->pvo_pte.pa & LPTE_RPGN);
  272. +   PV_WLOCK(pvo->pvo_pte.pa & LPTE_RPGN);
  273.     moea64_pvo_remove_from_page_locked(mmu, pvo, pg);
  274. -   PV_UNLOCK(pvo->pvo_pte.pa & LPTE_RPGN);
  275. +   PV_WUNLOCK(pvo->pvo_pte.pa & LPTE_RPGN);
  276.  }
  277.  
  278.  static struct pvo_entry *
  279. @@ -2712,8 +2716,8 @@ moea64_query_bit(mmu_t mmu, vm_page_t m, uint64_t ptebit)
  280.      */
  281.     rv = FALSE;
  282.     powerpc_sync();
  283. -   PV_PAGE_LOCK(m);
  284. -   LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
  285. +   PV_PAGE_WLOCK(m);
  286. +   SLIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
  287.         ret = 0;
  288.  
  289.         /*
  290. @@ -2735,7 +2739,7 @@ moea64_query_bit(mmu_t mmu, vm_page_t m, uint64_t ptebit)
  291.             }
  292.         }
  293.     }
  294. -   PV_PAGE_UNLOCK(m);
  295. +   PV_PAGE_WUNLOCK(m);
  296.  
  297.     return (rv);
  298.  }
  299. @@ -2757,8 +2761,8 @@ moea64_clear_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit)
  300.      * For each pvo entry, clear the pte's ptebit.
  301.      */
  302.     count = 0;
  303. -   PV_PAGE_LOCK(m);
  304. -   LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
  305. +   PV_PAGE_WLOCK(m);
  306. +   SLIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
  307.         ret = 0;
  308.  
  309.         PMAP_LOCK(pvo->pvo_pmap);
  310. @@ -2770,7 +2774,7 @@ moea64_clear_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit)
  311.             count++;
  312.     }
  313.     atomic_clear_32(&m->md.mdpg_attrs, ptebit);
  314. -   PV_PAGE_UNLOCK(m);
  315. +   PV_PAGE_WUNLOCK(m);
  316.  
  317.     return (count);
  318.  }
  319. diff --git a/sys/powerpc/include/pmap.h b/sys/powerpc/include/pmap.h
  320. index b7e28539b3d..cec139d77ff 100644
  321. --- a/sys/powerpc/include/pmap.h
  322. +++ b/sys/powerpc/include/pmap.h
  323. @@ -89,7 +89,7 @@ typedef struct pmap *pmap_t;
  324.  struct slbtnode;
  325.  
  326.  struct pvo_entry {
  327. -   LIST_ENTRY(pvo_entry) pvo_vlink;    /* Link to common virt page */
  328. +   SLIST_ENTRY(pvo_entry) pvo_vlink;   /* Link to common virt page */
  329.  #ifndef __powerpc64__
  330.     LIST_ENTRY(pvo_entry) pvo_olink;    /* Link to overflow entry */
  331.  #endif
  332. @@ -111,7 +111,7 @@ struct pvo_entry {
  333.     vm_offset_t pvo_vaddr;      /* VA of entry */
  334.     uint64_t    pvo_vpn;        /* Virtual page number */
  335.  };
  336. -LIST_HEAD(pvo_head, pvo_entry);
  337. +SLIST_HEAD(pvo_head, pvo_entry);
  338.  SLIST_HEAD(pvo_dlist, pvo_entry);
  339.  RB_HEAD(pvo_tree, pvo_entry);
  340.  int pvo_vaddr_compare(struct pvo_entry *, struct pvo_entry *);
  341. @@ -203,7 +203,7 @@ struct  md_page {
  342.  
  343.  #ifdef AIM
  344.  #define    pmap_page_get_memattr(m)    ((m)->md.mdpg_cache_attrs)
  345. -#define    pmap_page_is_mapped(m)  (!LIST_EMPTY(&(m)->md.mdpg_pvoh))
  346. +#define    pmap_page_is_mapped(m)  (!SLIST_EMPTY(&(m)->md.mdpg_pvoh))
  347.  #else
  348.  #define    pmap_page_get_memattr(m)    VM_MEMATTR_DEFAULT
  349.  #define    pmap_page_is_mapped(m)  (!TAILQ_EMPTY(&(m)->md.pv_list))
RAW Paste Data
We use cookies for various purposes including analytics. By continuing to use Pastebin, you agree to our use of cookies as described in the Cookies Policy. OK, I Understand
Not a member of Pastebin yet?
Sign Up, it unlocks many cool features!
 
Top