Advertisement
Guest User

tlbex-handler.S

a guest
Apr 12th, 2013
416
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C 9.28 KB | None | 0 0
  1. /*
  2.  * TLB exception handler for Inventel boards
  3.  * Copyright 2006 Vivien Chappelier -- Inventel / Thomson
  4.  *
  5.  * Based on the TLB exception handling code for r4k.
  6.  * Copyright (C) 1994, 1995, 1996 by Ralf Baechle and Andreas Busse
  7.  *
  8.  * Multi-cpu abstraction and reworking:
  9.  * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
  10.  *
  11.  * Carsten Langgaard, carstenl@mips.com
  12.  * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
  13.  */
  14.  
  15. #include <linux/config.h>
  16.  
  17. #include <asm/asm.h>
  18. #include <asm/mipsregs.h>
  19. #include <asm/regdef.h>
  20. #include <asm/stackframe.h>
  21. #include <asm/pgtable-bits.h>
  22.  
  23. /*
  24.  * TLB exception handlers
  25.  *
  26.  * On a CMT CPU, the TLB is shared between the two cores. Since hardware
  27.  * exception serialization must be turned off to allow ipis to reach the other
  28.  * core during operations such as I-cache flushing, we need to use software
  29.  * locking to ensure serialized access to the TLB and the corresponding CP0
  30.  * registers.
  31.  *
  32.  * Besides locking, the implementation is slightly different than on a standard
  33.  * SMP, as the CP0_CONTEXT is shared between the cores. Therefore it cannot be
  34.  * used to store the processor number, which is obtained from the CP0 CMT local
  35.  * register instead. It cannot be used to find the faulting address either.
  36.  *
  37.  * If the lock cannot be taken, we must return from exception to allow
  38.  * software interrupts (of higher priority than TLB exceptions) to be serviced.
  39.  * The TLB exception will be retaken if really needed and we can try again to
  40.  * obtain the lock.
  41.  *
  42.  * An entry may also be added on one core while the other core enters a TLB
  43.  * handler, so we must ensure the exception is is still valid by probing the TLB
  44.  * to avoid the following race:
  45.  *      TP0         TP1
  46.  *  TLB exception
  47.  *  acquire lock
  48.  *  ...         access Badvaddr corresponding to entry X
  49.  *  write to tlb entry X    enter TLB exception
  50.  *  release lock        acquire lock
  51.  *              ...
  52.  *  <refill:        Badvaddr may be present in the TLB now>
  53.  *  <mod/load/store:    Badvaddr may have been removed from the TLB>
  54.  */
  55.  
  56. /* Debug */
  57. #define LED_BLUE 6
  58. #define LED_CPU 2
  59. #define LED_REFILL 5
  60.  
  61. #define LED_ON(ptr, tmp, gpio)      \
  62.     lui ptr, 0xfffe;        \
  63.     lw  tmp, 0x8c(ptr);     \
  64.     ori tmp, (1 << gpio);   \
  65.     sw  tmp, 0x8c(ptr);
  66.    
  67. #define LED_OFF(ptr, tmp, gpio)     \
  68.     lui ptr, 0xfffe;        \
  69.     lw  tmp, 0x8c(ptr);     \
  70.     ori tmp, (1 << gpio);   \
  71.     xori    tmp, (1 << gpio);   \
  72.     sw  tmp, 0x8c(ptr);    
  73.  
  74. #define CPULED_ON(ptr, tmp)             \
  75.     lui ptr, 0xfffe;        \
  76.     lw  tmp, 0x8c(ptr);     \
  77.     mfc0    ptr, CP0_CMT_LOCAL;     \
  78.     srl     ptr, 31;                \
  79.     sll     ptr, LED_CPU;           \
  80.     addi    ptr, (1 << LED_CPU);    \
  81.     or  tmp, ptr, tmp;          \
  82.     lui ptr, 0xfffe;        \
  83.     sw  tmp, 0x8c(ptr);
  84.  
  85. #define CPULED_OFF(ptr, tmp)            \
  86.     lui ptr, 0xfffe;        \
  87.     lw  tmp, 0x8c(ptr);     \
  88.     mfc0    ptr, CP0_CMT_LOCAL;     \
  89.     srl     ptr, 31;                \
  90.     sll     ptr, LED_CPU;           \
  91.     addi    ptr, (1 << LED_CPU);    \
  92.     or  tmp, ptr, tmp;          \
  93.     xor tmp, ptr, tmp;          \
  94.     lui ptr, 0xfffe;        \
  95.     sw  tmp, 0x8c(ptr);
  96.  
  97. /*
  98.  * Joint TLB spinlock
  99.  *
  100.  * The jtlb spinlock is used to access the TLB atomically. All tlb functions
  101.  * must acquire the lock before changing EntryLo0, EntryLo1 and Index.
  102.  * WARNING: this code assumes the ->lock offset is 0 in spinlock_t.
  103.  */
  104.  
  105. #ifdef CONFIG_CMT
  106.     /* try acquiring the TLB lock */
  107. #define TLB_TRYLOCK(ptr, tmp, label)        \
  108.     lui ptr, %hi(jtlb_lock);        \
  109.     ll  tmp, %lo(jtlb_lock)(ptr);   \
  110.     bnez    tmp, label;         \
  111.      li tmp, 1;             \
  112.     sc  tmp, %lo(jtlb_lock)(ptr);   \
  113.     beqz    tmp, label;         \
  114.      sync;
  115.     /* release the TLB lock */
  116. #define TLB_UNLOCK(tmp)             \
  117.     lui tmp, %hi(jtlb_lock);        \
  118.     sync;                   \
  119.     sw  $0, %lo(jtlb_lock)(tmp);
  120.     /* macros to conditionally write a TLB entry */
  121. #define CMT_TLBP        tlbp;
  122. #define CMT_LOAD_INDEX(tmp) mfc0    tmp, CP0_INDEX;
  123. #define COND_TLBWR(tmp)             \
  124.     bltzl   tmp, 666f;          \
  125.      tlbwr;                 \
  126.     666:;
  127. #define COND_TLBWI(tmp)             \
  128.     bgezl   tmp, 666f;          \
  129.      tlbwi;                 \
  130.     666:;
  131. #else
  132. #define TLB_TRYLOCK(ptr, tmp, label)
  133. #define TLB_UNLOCK(tmp)
  134. #define CMT_TLBP
  135. #define CMT_LOAD_INDEX(tmp)
  136. #define COND_TLBWR(tmp)     tlbwr;
  137. #define COND_TLBWI(tmp)     tlbwi;
  138. #endif
  139.  
  140. #define PTE_SIZE    4
  141. #define PTEP_INDX_MSK   0xff8
  142. #define PTE_INDX_MSK    0xffc
  143. #define PTE_INDX_SHIFT  10
  144.    
  145. #ifdef CONFIG_SMP
  146. #define PTE_LW  ll
  147. #define PTE_SW  sc
  148. #else
  149. #define PTE_LW  lw
  150. #define PTE_SW  sw
  151. #endif
  152.    
  153. /* Check is PTE is present, if not then jump to LABEL.
  154.  * PTR points to the page table where this PTE is located,
  155.  * when the macro is done executing PTE will be restored
  156.  * with it's original value.
  157.  */
  158. #define PTE_PRESENT(pte, ptr, label) \
  159.     andi    pte, pte, (_PAGE_PRESENT | _PAGE_READ); \
  160.     xori    pte, pte, (_PAGE_PRESENT | _PAGE_READ); \
  161.     bnez    pte, label; \
  162.      PTE_LW pte, (ptr);
  163.  
  164. /* Check if PTE can be written to, if not branch to LABEL.
  165.  * Regardless restore PTE with value from PTR when done.
  166.  */
  167. #define PTE_WRITABLE(pte, ptr, label) \
  168.     andi    pte, pte, (_PAGE_PRESENT | _PAGE_WRITE); \
  169.     xori    pte, pte, (_PAGE_PRESENT | _PAGE_WRITE); \
  170.     bnez    pte, label; \
  171.      PTE_LW pte, (ptr);
  172.    
  173. /*
  174.  * Check if PTE can be modified, if not branch to LABEL. Regardless
  175.  * restore PTE with value from PTR when done.
  176.  */
  177. #define PTE_MODIFIABLE(pte, ptr, label) \
  178.     andi    pte, pte, _PAGE_WRITE; \
  179.     beqz    pte, label; \
  180.      PTE_LW pte, (ptr);
  181.  
  182.    
  183. /* This places the even/odd pte pair in the page
  184.  * table at PTR into ENTRYLO0 and ENTRYLO1 using
  185.  * TMP as a scratch register.
  186.  */
  187. #define PTE_RELOAD(ptr, tmp) \
  188.     ori ptr, ptr, PTE_SIZE; \
  189.     xori    ptr, ptr, PTE_SIZE; \
  190.     lw  tmp, 0(ptr); \
  191.     lw  ptr, PTE_SIZE(ptr); \
  192.     srl tmp, tmp, 6; \
  193.     mtc0    tmp, CP0_ENTRYLO0; \
  194.     srl ptr, ptr, 6; \
  195.     CMT_LOAD_INDEX(tmp); \
  196.     mtc0    ptr, CP0_ENTRYLO1;
  197.  
  198. /* Make PTE valid, store result in PTR. */
  199. #define PTE_MAKEVALID(pte, ptr) \
  200.     ori pte, pte, (_PAGE_VALID | _PAGE_ACCESSED); \
  201.     PTE_SW  pte, (ptr);
  202.  
  203. /* Make PTE writable, update software status bits as well,
  204.  * then store at PTR.
  205.  */
  206. #define PTE_MAKEWRITE(pte, ptr) \
  207.     ori pte, pte, (_PAGE_ACCESSED | _PAGE_MODIFIED | \
  208.                _PAGE_VALID | _PAGE_DIRTY); \
  209.     PTE_SW  pte, (ptr);
  210.  
  211. #ifdef CONFIG_CMT
  212. #define GET_PGD_HI(ptr, tmp)               \
  213.     mfc0    ptr, CP0_CMT_LOCAL;        \
  214.     lui     tmp, %hi(pgd_current);     \
  215.     srl     ptr, 31;                   \
  216.     sll     ptr, 2;                    \
  217.     addu    ptr, tmp, ptr;
  218. #else
  219. #define GET_PGD_HI(ptr, tmp)               \
  220.     lui     ptr, %hi(pgd_current)      
  221. #endif
  222.  
  223. #define GET_PTEP(ptr, tmp, mask)           \
  224.     GET_PGD_HI(ptr, tmp);              \
  225.     mfc0    tmp, CP0_BADVADDR;         \
  226.     lw  ptr, %lo(pgd_current)(ptr); \
  227.     srl     tmp, tmp, _PGDIR_SHIFT;    \
  228.     sll     tmp, tmp, 2;               \
  229.     addu    ptr, ptr, tmp;             \
  230.     mfc0    tmp, CP0_BADVADDR;         \
  231.     lw      ptr, (ptr);                \
  232.     srl     tmp, tmp, PTE_INDX_SHIFT;  \
  233.     and     k0, k0, mask;              \
  234.     addu    ptr, ptr, tmp;
  235.  
  236. #ifdef CONFIG_SMP
  237. #define PTE_CHANGED_HAZARD(pte, label)      \
  238.     beqz    pte, label;         \
  239.      nop;
  240. #else
  241. #define PTE_CHANGED_HAZARD(pte, label)
  242. #endif
  243.    
  244. /*
  245.  * TLB refill handler (max 64 insn)
  246.  */
  247.     .set    noreorder
  248.     .set    noat
  249.     LEAF(invtl_tlb_refill)
  250.     .set    mips32
  251.     TLB_TRYLOCK(k1, k0, 1f)         # 7
  252.     GET_PTEP(k1, k0, PTEP_INDX_MSK)     # 15
  253.     lw      k0, 0(k1)                       # 1  get even pte
  254.     lw      k1, 4(k1)                       # 1  get odd pte
  255.     CMT_TLBP
  256.     srl     k0, k0, 6                       # 1  convert to entrylo0
  257.     mtc0    k0, CP0_ENTRYLO0                # 1  load it
  258.     srl     k1, k1, 6                       # 1  convert to entrylo1
  259.     CMT_LOAD_INDEX(k0)
  260.     mtc0    k1, CP0_ENTRYLO1                # 1  load it
  261.     COND_TLBWR(k0)              # write random tlb entry
  262.     TLB_UNLOCK(k0)              # 3
  263. 1:
  264.     eret                                    # 1  return from trap
  265.     END(invtl_tlb_refill)
  266.  
  267. /*
  268.  * TLB load fastpath handler (max 128 insn)
  269.  */
  270.     .set    noreorder
  271.     .set    noat
  272.     LEAF(invtl_tlb_load)
  273.     .set    mips32
  274.     TLB_TRYLOCK(k1, k0, 1f)
  275.     GET_PTEP(k1, k0, PTE_INDX_MSK)
  276. 2:
  277.     PTE_LW  k0, (k1)
  278.     tlbp
  279.     PTE_PRESENT(k0, k1, nopage_tlbl)
  280.     PTE_MAKEVALID(k0, k1)
  281.     PTE_CHANGED_HAZARD(k0, 2b)
  282.     PTE_RELOAD(k1, k0)
  283.     COND_TLBWI(k0)              # write indexed tlb entry
  284.     TLB_UNLOCK(k0)
  285. 1: 
  286.     eret                                    # return from trap
  287. nopage_tlbl:
  288.     TLB_UNLOCK(k0)
  289.     j tlb_do_page_fault_0
  290.      nop
  291.     END(invtl_tlb_load)
  292.  
  293. /*
  294.  * TLB store fastpath handler
  295.  */
  296.     .set    noreorder
  297.     .set    noat
  298.     LEAF(invtl_tlb_store)
  299.     .set    mips32
  300.     TLB_TRYLOCK(k1, k0, 1f)
  301.     GET_PTEP(k1, k0, PTE_INDX_MSK)
  302. 2:
  303.     PTE_LW  k0, (k1)
  304.     tlbp
  305.     PTE_WRITABLE(k0, k1, nopage_tlbs)
  306.     PTE_MAKEWRITE(k0, k1)
  307.     PTE_CHANGED_HAZARD(k0, 2b)
  308.     PTE_RELOAD(k1, k0)
  309.     COND_TLBWI(k0)              # write indexed tlb entry
  310.     TLB_UNLOCK(k0)
  311. 1:
  312.     eret                                    # return from trap
  313. nopage_tlbs:
  314.     TLB_UNLOCK(k0)
  315.     j tlb_do_page_fault_1
  316.      nop
  317. invtl_tlb_store_end:
  318.     END(invtl_tlb_store)
  319.  
  320. /*
  321.  * TLB modify fastpath handler
  322.  */
  323.     .set    noreorder
  324.     .set    noat
  325.     LEAF(invtl_tlb_modify)
  326.     .set    mips32
  327.     TLB_TRYLOCK(k1, k0, 1f)
  328.     GET_PTEP(k1, k0, PTE_INDX_MSK)
  329. 2:
  330.     PTE_LW  k0, (k1)
  331.     tlbp
  332.     PTE_MODIFIABLE(k0, k1, nopage_tlbm)
  333.     PTE_MAKEWRITE(k0, k1)
  334.     PTE_CHANGED_HAZARD(k0, 2b)
  335.     PTE_RELOAD(k1, k0)
  336.     COND_TLBWI(k0)              # write indexed tlb entry
  337.     TLB_UNLOCK(k0)
  338. 1:
  339.     eret                                    # return from trap
  340. nopage_tlbm:
  341.     TLB_UNLOCK(k0)
  342.     j tlb_do_page_fault_1
  343.      nop
  344.     END(invtl_tlb_modify)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement