]> Pileus Git - ~andy/linux/blobdiff - arch/powerpc/mm/tlb_low_64e.S
powerpc/e6500: TLB miss handler with hardware tablewalk support
[~andy/linux] / arch / powerpc / mm / tlb_low_64e.S
index b4113bf863538adbf6d0bffdaa3aabe090270064..75f5d2777f6101c272edab70d13cf9f56ef8d32c 100644 (file)
@@ -239,6 +239,177 @@ itlb_miss_fault_bolted:
        beq     tlb_miss_common_bolted
        b       itlb_miss_kernel_bolted
 
+/*
+ * TLB miss handling for e6500 and derivatives, using hardware tablewalk.
+ *
+ * Linear mapping is bolted: no virtual page table or nested TLB misses
+ * Indirect entries in TLB1, hardware loads resulting direct entries
+ *    into TLB0
+ * No HES or NV hint on TLB1, so we need to do software round-robin
+ * No tlbsrx. so we need a spinlock, and we have to deal
+ *    with MAS-damage caused by tlbsx
+ * 4K pages only
+ */
+
+       START_EXCEPTION(instruction_tlb_miss_e6500)
+       tlb_prolog_bolted BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR0
+
+       ld      r11,PACA_TCD_PTR(r13)
+       srdi.   r15,r16,60              /* get region */
+       ori     r16,r16,1
+
+       TLB_MISS_STATS_SAVE_INFO_BOLTED
+       bne     tlb_miss_kernel_e6500   /* user/kernel test */
+
+       b       tlb_miss_common_e6500
+
+       START_EXCEPTION(data_tlb_miss_e6500)
+       tlb_prolog_bolted BOOKE_INTERRUPT_DTLB_MISS SPRN_DEAR
+
+       ld      r11,PACA_TCD_PTR(r13)
+       srdi.   r15,r16,60              /* get region */
+       rldicr  r16,r16,0,62
+
+       TLB_MISS_STATS_SAVE_INFO_BOLTED
+       bne     tlb_miss_kernel_e6500   /* user vs kernel check */
+
+/*
+ * This is the guts of the TLB miss handler for e6500 and derivatives.
+ * We are entered with:
+ *
+ * r16 = page of faulting address (low bit 0 if data, 1 if instruction)
+ * r15 = crap (free to use)
+ * r14 = page table base
+ * r13 = PACA
+ * r11 = tlb_per_core ptr
+ * r10 = crap (free to use)
+ */
+tlb_miss_common_e6500:
+       /*
+        * Search if we already have an indirect entry for that virtual
+        * address, and if we do, bail out.
+        *
+        * MAS6:IND should be already set based on MAS4
+        */
+       addi    r10,r11,TCD_LOCK
+1:     lbarx   r15,0,r10
+       cmpdi   r15,0
+       bne     2f
+       li      r15,1
+       stbcx.  r15,0,r10
+       bne     1b
+       .subsection 1
+2:     lbz     r15,0(r10)
+       cmpdi   r15,0
+       bne     2b
+       b       1b
+       .previous
+
+       mfspr   r15,SPRN_MAS2
+
+       tlbsx   0,r16
+       mfspr   r10,SPRN_MAS1
+       andis.  r10,r10,MAS1_VALID@h
+       bne     tlb_miss_done_e6500
+
+       /* Undo MAS-damage from the tlbsx */
+       mfspr   r10,SPRN_MAS1
+       oris    r10,r10,MAS1_VALID@h
+       mtspr   SPRN_MAS1,r10
+       mtspr   SPRN_MAS2,r15
+
+       /* Now, we need to walk the page tables. First check if we are in
+        * range.
+        */
+       rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
+       bne-    tlb_miss_fault_e6500
+
+       rldicl  r15,r16,64-PGDIR_SHIFT+3,64-PGD_INDEX_SIZE-3
+       cmpldi  cr0,r14,0
+       clrrdi  r15,r15,3
+       beq-    tlb_miss_fault_e6500 /* No PGDIR, bail */
+       ldx     r14,r14,r15             /* grab pgd entry */
+
+       rldicl  r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3
+       clrrdi  r15,r15,3
+       cmpdi   cr0,r14,0
+       bge     tlb_miss_fault_e6500    /* Bad pgd entry or hugepage; bail */
+       ldx     r14,r14,r15             /* grab pud entry */
+
+       rldicl  r15,r16,64-PMD_SHIFT+3,64-PMD_INDEX_SIZE-3
+       clrrdi  r15,r15,3
+       cmpdi   cr0,r14,0
+       bge     tlb_miss_fault_e6500
+       ldx     r14,r14,r15             /* Grab pmd entry */
+
+       mfspr   r10,SPRN_MAS0
+       cmpdi   cr0,r14,0
+       bge     tlb_miss_fault_e6500
+
+       /* Now we build the MAS for a 2M indirect page:
+        *
+        * MAS 0   :    ESEL needs to be filled by software round-robin
+        * MAS 1   :    Fully set up
+        *               - PID already updated by caller if necessary
+        *               - TSIZE for now is base ind page size always
+        *               - TID already cleared if necessary
+        * MAS 2   :    Default not 2M-aligned, need to be redone
+        * MAS 3+7 :    Needs to be done
+        */
+
+       ori     r14,r14,(BOOK3E_PAGESZ_4K << MAS3_SPSIZE_SHIFT)
+       mtspr   SPRN_MAS7_MAS3,r14
+
+       clrrdi  r15,r16,21              /* make EA 2M-aligned */
+       mtspr   SPRN_MAS2,r15
+
+       lbz     r15,TCD_ESEL_NEXT(r11)
+       lbz     r16,TCD_ESEL_MAX(r11)
+       lbz     r14,TCD_ESEL_FIRST(r11)
+       rlwimi  r10,r15,16,0x00ff0000   /* insert esel_next into MAS0 */
+       addi    r15,r15,1               /* increment esel_next */
+       mtspr   SPRN_MAS0,r10
+       cmpw    r15,r16
+       iseleq  r15,r14,r15             /* if next == last use first */
+       stb     r15,TCD_ESEL_NEXT(r11)
+
+       tlbwe
+
+tlb_miss_done_e6500:
+       .macro  tlb_unlock_e6500
+       li      r15,0
+       isync
+       stb     r15,TCD_LOCK(r11)
+       .endm
+
+       tlb_unlock_e6500
+       TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK)
+       tlb_epilog_bolted
+       rfi
+
+tlb_miss_kernel_e6500:
+       mfspr   r10,SPRN_MAS1
+       ld      r14,PACA_KERNELPGD(r13)
+       cmpldi  cr0,r15,8               /* Check for vmalloc region */
+       rlwinm  r10,r10,0,16,1          /* Clear TID */
+       mtspr   SPRN_MAS1,r10
+       beq+    tlb_miss_common_e6500
+
+tlb_miss_fault_e6500:
+       tlb_unlock_e6500
+       /* We need to check if it was an instruction miss */
+       andi.   r16,r16,1
+       bne     itlb_miss_fault_e6500
+dtlb_miss_fault_e6500:
+       TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
+       tlb_epilog_bolted
+       b       exc_data_storage_book3e
+itlb_miss_fault_e6500:
+       TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
+       tlb_epilog_bolted
+       b       exc_instruction_storage_book3e
+
+
 /**********************************************************************
  *                                                                    *
  * TLB miss handling for Book3E with TLB reservation and HES support  *