/*
* TLB exception handler for Inventel boards
* Copyright 2006 Vivien Chappelier -- Inventel / Thomson
*
* Based on the TLB exception handling code for r4k.
* Copyright (C) 1994, 1995, 1996 by Ralf Baechle and Andreas Busse
*
* Multi-cpu abstraction and reworking:
* Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
*
* Carsten Langgaard, carstenl@mips.com
* Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
*/
#include <linux/config.h>
#include <asm/asm.h>
#include <asm/mipsregs.h>
#include <asm/regdef.h>
#include <asm/stackframe.h>
#include <asm/pgtable-bits.h>
/*
* TLB exception handlers
*
* On a CMT CPU, the TLB is shared between the two cores. Since hardware
* exception serialization must be turned off to allow ipis to reach the other
* core during operations such as I-cache flushing, we need to use software
* locking to ensure serialized access to the TLB and the corresponding CP0
* registers.
*
* Besides locking, the implementation is slightly different than on a standard
* SMP, as the CP0_CONTEXT is shared between the cores. Therefore it cannot be
* used to store the processor number, which is obtained from the CP0 CMT local
* register instead. It cannot be used to find the faulting address either.
*
* If the lock cannot be taken, we must return from exception to allow
* software interrupts (of higher priority than TLB exceptions) to be serviced.
* The TLB exception will be retaken if really needed and we can try again to
* obtain the lock.
*
* An entry may also be added on one core while the other core enters a TLB
* handler, so we must ensure the exception is is still valid by probing the TLB
* to avoid the following race:
* TP0 TP1
* TLB exception
* acquire lock
* ... access Badvaddr corresponding to entry X
* write to tlb entry X enter TLB exception
* release lock acquire lock
* ...
* <refill: Badvaddr may be present in the TLB now>
* <mod/load/store: Badvaddr may have been removed from the TLB>
*/
/* Debug */
#define LED_BLUE 6
#define LED_CPU 2
#define LED_REFILL 5
#define LED_ON(ptr, tmp, gpio) \
lui ptr, 0xfffe; \
lw tmp, 0x8c(ptr); \
ori tmp, (1 << gpio); \
sw tmp, 0x8c(ptr);
#define LED_OFF(ptr, tmp, gpio) \
lui ptr, 0xfffe; \
lw tmp, 0x8c(ptr); \
ori tmp, (1 << gpio); \
xori tmp, (1 << gpio); \
sw tmp, 0x8c(ptr);
#define CPULED_ON(ptr, tmp) \
lui ptr, 0xfffe; \
lw tmp, 0x8c(ptr); \
mfc0 ptr, CP0_CMT_LOCAL; \
srl ptr, 31; \
sll ptr, LED_CPU; \
addi ptr, (1 << LED_CPU); \
or tmp, ptr, tmp; \
lui ptr, 0xfffe; \
sw tmp, 0x8c(ptr);
#define CPULED_OFF(ptr, tmp) \
lui ptr, 0xfffe; \
lw tmp, 0x8c(ptr); \
mfc0 ptr, CP0_CMT_LOCAL; \
srl ptr, 31; \
sll ptr, LED_CPU; \
addi ptr, (1 << LED_CPU); \
or tmp, ptr, tmp; \
xor tmp, ptr, tmp; \
lui ptr, 0xfffe; \
sw tmp, 0x8c(ptr);
/*
* Joint TLB spinlock
*
* The jtlb spinlock is used to access the TLB atomically. All tlb functions
* must acquire the lock before changing EntryLo0, EntryLo1 and Index.
* WARNING: this code assumes the ->lock offset is 0 in spinlock_t.
*/
#ifdef CONFIG_CMT
/* try acquiring the TLB lock */
#define TLB_TRYLOCK(ptr, tmp, label) \
lui ptr, %hi(jtlb_lock); \
ll tmp, %lo(jtlb_lock)(ptr); \
bnez tmp, label; \
li tmp, 1; \
sc tmp, %lo(jtlb_lock)(ptr); \
beqz tmp, label; \
sync;
/* release the TLB lock */
#define TLB_UNLOCK(tmp) \
lui tmp, %hi(jtlb_lock); \
sync; \
sw $0, %lo(jtlb_lock)(tmp);
/* macros to conditionally write a TLB entry */
#define CMT_TLBP tlbp;
#define CMT_LOAD_INDEX(tmp) mfc0 tmp, CP0_INDEX;
#define COND_TLBWR(tmp) \
bltzl tmp, 666f; \
tlbwr; \
666:;
#define COND_TLBWI(tmp) \
bgezl tmp, 666f; \
tlbwi; \
666:;
#else
#define TLB_TRYLOCK(ptr, tmp, label)
#define TLB_UNLOCK(tmp)
#define CMT_TLBP
#define CMT_LOAD_INDEX(tmp)
#define COND_TLBWR(tmp) tlbwr;
#define COND_TLBWI(tmp) tlbwi;
#endif
#define PTE_SIZE 4
#define PTEP_INDX_MSK 0xff8
#define PTE_INDX_MSK 0xffc
#define PTE_INDX_SHIFT 10
#ifdef CONFIG_SMP
#define PTE_LW ll
#define PTE_SW sc
#else
#define PTE_LW lw
#define PTE_SW sw
#endif
/* Check is PTE is present, if not then jump to LABEL.
* PTR points to the page table where this PTE is located,
* when the macro is done executing PTE will be restored
* with it's original value.
*/
#define PTE_PRESENT(pte, ptr, label) \
andi pte, pte, (_PAGE_PRESENT | _PAGE_READ); \
xori pte, pte, (_PAGE_PRESENT | _PAGE_READ); \
bnez pte, label; \
PTE_LW pte, (ptr);
/* Check if PTE can be written to, if not branch to LABEL.
* Regardless restore PTE with value from PTR when done.
*/
#define PTE_WRITABLE(pte, ptr, label) \
andi pte, pte, (_PAGE_PRESENT | _PAGE_WRITE); \
xori pte, pte, (_PAGE_PRESENT | _PAGE_WRITE); \
bnez pte, label; \
PTE_LW pte, (ptr);
/*
* Check if PTE can be modified, if not branch to LABEL. Regardless
* restore PTE with value from PTR when done.
*/
#define PTE_MODIFIABLE(pte, ptr, label) \
andi pte, pte, _PAGE_WRITE; \
beqz pte, label; \
PTE_LW pte, (ptr);
/* This places the even/odd pte pair in the page
* table at PTR into ENTRYLO0 and ENTRYLO1 using
* TMP as a scratch register.
*/
#define PTE_RELOAD(ptr, tmp) \
ori ptr, ptr, PTE_SIZE; \
xori ptr, ptr, PTE_SIZE; \
lw tmp, 0(ptr); \
lw ptr, PTE_SIZE(ptr); \
srl tmp, tmp, 6; \
mtc0 tmp, CP0_ENTRYLO0; \
srl ptr, ptr, 6; \
CMT_LOAD_INDEX(tmp); \
mtc0 ptr, CP0_ENTRYLO1;
/* Make PTE valid, store result in PTR. */
#define PTE_MAKEVALID(pte, ptr) \
ori pte, pte, (_PAGE_VALID | _PAGE_ACCESSED); \
PTE_SW pte, (ptr);
/* Make PTE writable, update software status bits as well,
* then store at PTR.
*/
#define PTE_MAKEWRITE(pte, ptr) \
ori pte, pte, (_PAGE_ACCESSED | _PAGE_MODIFIED | \
_PAGE_VALID | _PAGE_DIRTY); \
PTE_SW pte, (ptr);
#ifdef CONFIG_CMT
#define GET_PGD_HI(ptr, tmp) \
mfc0 ptr, CP0_CMT_LOCAL; \
lui tmp, %hi(pgd_current); \
srl ptr, 31; \
sll ptr, 2; \
addu ptr, tmp, ptr;
#else
#define GET_PGD_HI(ptr, tmp) \
lui ptr, %hi(pgd_current)
#endif
#define GET_PTEP(ptr, tmp, mask) \
GET_PGD_HI(ptr, tmp); \
mfc0 tmp, CP0_BADVADDR; \
lw ptr, %lo(pgd_current)(ptr); \
srl tmp, tmp, _PGDIR_SHIFT; \
sll tmp, tmp, 2; \
addu ptr, ptr, tmp; \
mfc0 tmp, CP0_BADVADDR; \
lw ptr, (ptr); \
srl tmp, tmp, PTE_INDX_SHIFT; \
and k0, k0, mask; \
addu ptr, ptr, tmp;
#ifdef CONFIG_SMP
#define PTE_CHANGED_HAZARD(pte, label) \
beqz pte, label; \
nop;
#else
#define PTE_CHANGED_HAZARD(pte, label)
#endif
/*
* TLB refill handler (max 64 insn)
*/
.set noreorder
.set noat
LEAF(invtl_tlb_refill)
.set mips32
TLB_TRYLOCK(k1, k0, 1f) # 7
GET_PTEP(k1, k0, PTEP_INDX_MSK) # 15
lw k0, 0(k1) # 1 get even pte
lw k1, 4(k1) # 1 get odd pte
CMT_TLBP
srl k0, k0, 6 # 1 convert to entrylo0
mtc0 k0, CP0_ENTRYLO0 # 1 load it
srl k1, k1, 6 # 1 convert to entrylo1
CMT_LOAD_INDEX(k0)
mtc0 k1, CP0_ENTRYLO1 # 1 load it
COND_TLBWR(k0) # write random tlb entry
TLB_UNLOCK(k0) # 3
1:
eret # 1 return from trap
END(invtl_tlb_refill)
/*
* TLB load fastpath handler (max 128 insn)
*/
.set noreorder
.set noat
LEAF(invtl_tlb_load)
.set mips32
TLB_TRYLOCK(k1, k0, 1f)
GET_PTEP(k1, k0, PTE_INDX_MSK)
2:
PTE_LW k0, (k1)
tlbp
PTE_PRESENT(k0, k1, nopage_tlbl)
PTE_MAKEVALID(k0, k1)
PTE_CHANGED_HAZARD(k0, 2b)
PTE_RELOAD(k1, k0)
COND_TLBWI(k0) # write indexed tlb entry
TLB_UNLOCK(k0)
1:
eret # return from trap
nopage_tlbl:
TLB_UNLOCK(k0)
j tlb_do_page_fault_0
nop
END(invtl_tlb_load)
/*
* TLB store fastpath handler
*/
.set noreorder
.set noat
LEAF(invtl_tlb_store)
.set mips32
TLB_TRYLOCK(k1, k0, 1f)
GET_PTEP(k1, k0, PTE_INDX_MSK)
2:
PTE_LW k0, (k1)
tlbp
PTE_WRITABLE(k0, k1, nopage_tlbs)
PTE_MAKEWRITE(k0, k1)
PTE_CHANGED_HAZARD(k0, 2b)
PTE_RELOAD(k1, k0)
COND_TLBWI(k0) # write indexed tlb entry
TLB_UNLOCK(k0)
1:
eret # return from trap
nopage_tlbs:
TLB_UNLOCK(k0)
j tlb_do_page_fault_1
nop
invtl_tlb_store_end:
END(invtl_tlb_store)
/*
* TLB modify fastpath handler
*/
.set noreorder
.set noat
LEAF(invtl_tlb_modify)
.set mips32
TLB_TRYLOCK(k1, k0, 1f)
GET_PTEP(k1, k0, PTE_INDX_MSK)
2:
PTE_LW k0, (k1)
tlbp
PTE_MODIFIABLE(k0, k1, nopage_tlbm)
PTE_MAKEWRITE(k0, k1)
PTE_CHANGED_HAZARD(k0, 2b)
PTE_RELOAD(k1, k0)
COND_TLBWI(k0) # write indexed tlb entry
TLB_UNLOCK(k0)
1:
eret # return from trap
nopage_tlbm:
TLB_UNLOCK(k0)
j tlb_do_page_fault_1
nop
END(invtl_tlb_modify)