2 * This file contains the routines for TLB flushing.
3 * On machines where the MMU does not use a hash table to store virtual to
4 * physical translations (ie, SW loaded TLBs or Book3E compilant processors,
5 * this does -not- include 603 however which shares the implementation with
6 * hash based processors)
10 * Copyright 2008,2009 Ben Herrenschmidt <benh@kernel.crashing.org>
13 * Derived from arch/ppc/mm/init.c:
14 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
16 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
17 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
18 * Copyright (C) 1996 Paul Mackerras
20 * Derived from "arch/i386/mm/init.c"
21 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
23 * This program is free software; you can redistribute it and/or
24 * modify it under the terms of the GNU General Public License
25 * as published by the Free Software Foundation; either version
26 * 2 of the License, or (at your option) any later version.
30 #include <linux/kernel.h>
31 #include <linux/export.h>
33 #include <linux/init.h>
34 #include <linux/highmem.h>
35 #include <linux/pagemap.h>
36 #include <linux/preempt.h>
37 #include <linux/spinlock.h>
38 #include <linux/memblock.h>
39 #include <linux/of_fdt.h>
40 #include <linux/hugetlb.h>
42 #include <asm/tlbflush.h>
44 #include <asm/code-patching.h>
45 #include <asm/hugetlb.h>
51 * This struct lists the sw-supported page sizes. The hardawre MMU may support
52 * other sizes not listed here. The .ind field is only used on MMUs that have
53 * indirect page table entries.
55 #ifdef CONFIG_PPC_BOOK3E_MMU
56 #ifdef CONFIG_PPC_FSL_BOOK3E
57 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
60 .enc = BOOK3E_PAGESZ_4K,
64 .enc = BOOK3E_PAGESZ_2M,
68 .enc = BOOK3E_PAGESZ_4M,
72 .enc = BOOK3E_PAGESZ_16M,
76 .enc = BOOK3E_PAGESZ_64M,
80 .enc = BOOK3E_PAGESZ_256M,
84 .enc = BOOK3E_PAGESZ_1GB,
88 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
92 .enc = BOOK3E_PAGESZ_4K,
96 .enc = BOOK3E_PAGESZ_16K,
101 .enc = BOOK3E_PAGESZ_64K,
105 .enc = BOOK3E_PAGESZ_1M,
110 .enc = BOOK3E_PAGESZ_16M,
114 .enc = BOOK3E_PAGESZ_256M,
118 .enc = BOOK3E_PAGESZ_1GB,
121 #endif /* CONFIG_FSL_BOOKE */
123 static inline int mmu_get_tsize(int psize)
125 return mmu_psize_defs[psize].enc;
128 static inline int mmu_get_tsize(int psize)
130 /* This isn't used on !Book3E for now */
133 #endif /* CONFIG_PPC_BOOK3E_MMU */
135 /* The variables below are currently only used on 64-bit Book3E
136 * though this will probably be made common with other nohash
137 * implementations at some point
141 int mmu_linear_psize; /* Page size used for the linear mapping */
142 int mmu_pte_psize; /* Page size used for PTE pages */
143 int mmu_vmemmap_psize; /* Page size used for the virtual mem map */
144 int book3e_htw_mode; /* HW tablewalk? Value is PPC_HTW_* */
145 unsigned long linear_map_top; /* Top of linear mapping */
147 #endif /* CONFIG_PPC64 */
149 #ifdef CONFIG_PPC_FSL_BOOK3E
150 /* next_tlbcam_idx is used to round-robin tlbcam entry assignment */
151 DEFINE_PER_CPU(int, next_tlbcam_idx);
152 EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx);
156 * Base TLB flushing operations:
158 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
159 * - flush_tlb_page(vma, vmaddr) flushes one page
160 * - flush_tlb_range(vma, start, end) flushes a range of pages
161 * - flush_tlb_kernel_range(start, end) flushes kernel pages
163 * - local_* variants of page and mm only apply to the current
168 * These are the base non-SMP variants of page and mm flushing
170 void local_flush_tlb_mm(struct mm_struct *mm)
175 pid = mm->context.id;
176 if (pid != MMU_NO_CONTEXT)
180 EXPORT_SYMBOL(local_flush_tlb_mm);
182 void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
188 pid = mm ? mm->context.id : 0;
189 if (pid != MMU_NO_CONTEXT)
190 _tlbil_va(vmaddr, pid, tsize, ind);
194 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
196 __local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
197 mmu_get_tsize(mmu_virtual_psize), 0);
199 EXPORT_SYMBOL(local_flush_tlb_page);
202 * And here are the SMP non-local implementations
206 static DEFINE_RAW_SPINLOCK(tlbivax_lock);
208 static int mm_is_core_local(struct mm_struct *mm)
210 return cpumask_subset(mm_cpumask(mm),
211 topology_thread_cpumask(smp_processor_id()));
214 struct tlb_flush_param {
221 static void do_flush_tlb_mm_ipi(void *param)
223 struct tlb_flush_param *p = param;
225 _tlbil_pid(p ? p->pid : 0);
228 static void do_flush_tlb_page_ipi(void *param)
230 struct tlb_flush_param *p = param;
232 _tlbil_va(p->addr, p->pid, p->tsize, p->ind);
236 /* Note on invalidations and PID:
238 * We snapshot the PID with preempt disabled. At this point, it can still
239 * change either because:
240 * - our context is being stolen (PID -> NO_CONTEXT) on another CPU
241 * - we are invaliating some target that isn't currently running here
242 * and is concurrently acquiring a new PID on another CPU
243 * - some other CPU is re-acquiring a lost PID for this mm
246 * However, this shouldn't be a problem as we only guarantee
247 * invalidation of TLB entries present prior to this call, so we
248 * don't care about the PID changing, and invalidating a stale PID
249 * is generally harmless.
252 void flush_tlb_mm(struct mm_struct *mm)
257 pid = mm->context.id;
258 if (unlikely(pid == MMU_NO_CONTEXT))
260 if (!mm_is_core_local(mm)) {
261 struct tlb_flush_param p = { .pid = pid };
262 /* Ignores smp_processor_id() even if set. */
263 smp_call_function_many(mm_cpumask(mm),
264 do_flush_tlb_mm_ipi, &p, 1);
270 EXPORT_SYMBOL(flush_tlb_mm);
272 void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
275 struct cpumask *cpu_mask;
279 pid = mm ? mm->context.id : 0;
280 if (unlikely(pid == MMU_NO_CONTEXT))
282 cpu_mask = mm_cpumask(mm);
283 if (!mm_is_core_local(mm)) {
284 /* If broadcast tlbivax is supported, use it */
285 if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) {
286 int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL);
288 raw_spin_lock(&tlbivax_lock);
289 _tlbivax_bcast(vmaddr, pid, tsize, ind);
291 raw_spin_unlock(&tlbivax_lock);
294 struct tlb_flush_param p = {
300 /* Ignores smp_processor_id() even if set in cpu_mask */
301 smp_call_function_many(cpu_mask,
302 do_flush_tlb_page_ipi, &p, 1);
305 _tlbil_va(vmaddr, pid, tsize, ind);
310 void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
312 #ifdef CONFIG_HUGETLB_PAGE
313 if (vma && is_vm_hugetlb_page(vma))
314 flush_hugetlb_page(vma, vmaddr);
317 __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
318 mmu_get_tsize(mmu_virtual_psize), 0);
320 EXPORT_SYMBOL(flush_tlb_page);
322 #endif /* CONFIG_SMP */
324 #ifdef CONFIG_PPC_47x
325 void __init early_init_mmu_47x(void)
328 unsigned long root = of_get_flat_dt_root();
329 if (of_get_flat_dt_prop(root, "cooperative-partition", NULL))
330 mmu_clear_feature(MMU_FTR_USE_TLBIVAX_BCAST);
331 #endif /* CONFIG_SMP */
333 #endif /* CONFIG_PPC_47x */
336 * Flush kernel TLB entries in the given range
338 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
342 smp_call_function(do_flush_tlb_mm_ipi, NULL, 1);
349 EXPORT_SYMBOL(flush_tlb_kernel_range);
352 * Currently, for range flushing, we just do a full mm flush. This should
353 * be optimized based on a threshold on the size of the range, since
354 * some implementation can stack multiple tlbivax before a tlbsync but
355 * for now, we keep it that way
357 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
361 flush_tlb_mm(vma->vm_mm);
363 EXPORT_SYMBOL(flush_tlb_range);
365 void tlb_flush(struct mmu_gather *tlb)
367 flush_tlb_mm(tlb->mm);
371 * Below are functions specific to the 64-bit variant of Book3E though that
372 * may change in the future
378 * Handling of virtual linear page tables or indirect TLB entries
379 * flushing when PTE pages are freed
381 void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
383 int tsize = mmu_psize_defs[mmu_pte_psize].enc;
385 if (book3e_htw_mode != PPC_HTW_NONE) {
386 unsigned long start = address & PMD_MASK;
387 unsigned long end = address + PMD_SIZE;
388 unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift;
390 /* This isn't the most optimal, ideally we would factor out the
391 * while preempt & CPU mask mucking around, or even the IPI but
394 while (start < end) {
395 __flush_tlb_page(tlb->mm, start, tsize, 1);
399 unsigned long rmask = 0xf000000000000000ul;
400 unsigned long rid = (address & rmask) | 0x1000000000000000ul;
401 unsigned long vpte = address & ~rmask;
403 #ifdef CONFIG_PPC_64K_PAGES
404 vpte = (vpte >> (PAGE_SHIFT - 4)) & ~0xfffful;
406 vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful;
409 __flush_tlb_page(tlb->mm, vpte, tsize, 0);
413 static void setup_page_sizes(void)
415 unsigned int tlb0cfg;
420 #ifdef CONFIG_PPC_FSL_BOOK3E
421 unsigned int mmucfg = mfspr(SPRN_MMUCFG);
422 int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E);
424 if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) {
425 unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG);
426 unsigned int min_pg, max_pg;
428 min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT;
429 max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT;
431 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
432 struct mmu_psize_def *def;
435 def = &mmu_psize_defs[psize];
438 if (shift == 0 || shift & 1)
441 /* adjust to be in terms of 4^shift Kb */
442 shift = (shift - 10) >> 1;
444 if ((shift >= min_pg) && (shift <= max_pg))
445 def->flags |= MMU_PAGE_SIZE_DIRECT;
451 if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) {
454 tlb0cfg = mfspr(SPRN_TLB0CFG);
455 tlb1cfg = mfspr(SPRN_TLB1CFG);
456 tlb1ps = mfspr(SPRN_TLB1PS);
457 eptcfg = mfspr(SPRN_EPTCFG);
459 if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT))
460 book3e_htw_mode = PPC_HTW_E6500;
463 * We expect 4K subpage size and unrestricted indirect size.
464 * The lack of a restriction on indirect size is a Freescale
465 * extension, indicated by PSn = 0 but SPSn != 0.
468 book3e_htw_mode = PPC_HTW_NONE;
470 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
471 struct mmu_psize_def *def = &mmu_psize_defs[psize];
473 if (tlb1ps & (1U << (def->shift - 10))) {
474 def->flags |= MMU_PAGE_SIZE_DIRECT;
476 if (book3e_htw_mode && psize == MMU_PAGE_2M)
477 def->flags |= MMU_PAGE_SIZE_INDIRECT;
485 tlb0cfg = mfspr(SPRN_TLB0CFG);
486 tlb0ps = mfspr(SPRN_TLB0PS);
487 eptcfg = mfspr(SPRN_EPTCFG);
489 /* Look for supported direct sizes */
490 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
491 struct mmu_psize_def *def = &mmu_psize_defs[psize];
493 if (tlb0ps & (1U << (def->shift - 10)))
494 def->flags |= MMU_PAGE_SIZE_DIRECT;
497 /* Indirect page sizes supported ? */
498 if ((tlb0cfg & TLBnCFG_IND) == 0 ||
499 (tlb0cfg & TLBnCFG_PT) == 0)
502 book3e_htw_mode = PPC_HTW_IBM;
504 /* Now, we only deal with one IND page size for each
505 * direct size. Hopefully all implementations today are
506 * unambiguous, but we might want to be careful in the
509 for (i = 0; i < 3; i++) {
510 unsigned int ps, sps;
518 for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
519 struct mmu_psize_def *def = &mmu_psize_defs[psize];
521 if (ps == (def->shift - 10))
522 def->flags |= MMU_PAGE_SIZE_INDIRECT;
523 if (sps == (def->shift - 10))
529 /* Cleanup array and print summary */
530 pr_info("MMU: Supported page sizes\n");
531 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
532 struct mmu_psize_def *def = &mmu_psize_defs[psize];
533 const char *__page_type_names[] = {
539 if (def->flags == 0) {
543 pr_info(" %8ld KB as %s\n", 1ul << (def->shift - 10),
544 __page_type_names[def->flags & 0x3]);
548 static void setup_mmu_htw(void)
551 * If we want to use HW tablewalk, enable it by patching the TLB miss
552 * handlers to branch to the one dedicated to it.
555 switch (book3e_htw_mode) {
557 patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e);
558 patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e);
561 patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e);
562 patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e);
565 pr_info("MMU: Book3E HW tablewalk %s\n",
566 book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported");
570 * Early initialization of the MMU TLB code
572 static void __early_init_mmu(int boot_cpu)
576 /* XXX This will have to be decided at runtime, but right
577 * now our boot and TLB miss code hard wires it. Ideally
578 * we should find out a suitable page size and patch the
579 * TLB miss code (either that or use the PACA to store
582 mmu_linear_psize = MMU_PAGE_1G;
584 /* XXX This should be decided at runtime based on supported
585 * page sizes in the TLB, but for now let's assume 16M is
586 * always there and a good fit (which it probably is)
588 mmu_vmemmap_psize = MMU_PAGE_16M;
590 /* XXX This code only checks for TLB 0 capabilities and doesn't
591 * check what page size combos are supported by the HW. It
592 * also doesn't handle the case where a separate array holds
593 * the IND entries from the array loaded by the PT.
596 /* Look for supported page sizes */
599 /* Look for HW tablewalk support */
603 /* Set MAS4 based on page table setting */
605 mas4 = 0x4 << MAS4_WIMGED_SHIFT;
606 switch (book3e_htw_mode) {
609 mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT;
610 mas4 |= MAS4_TLBSELD(1);
611 mmu_pte_psize = MMU_PAGE_2M;
616 #ifdef CONFIG_PPC_64K_PAGES
617 mas4 |= BOOK3E_PAGESZ_256M << MAS4_TSIZED_SHIFT;
618 mmu_pte_psize = MMU_PAGE_256M;
620 mas4 |= BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT;
621 mmu_pte_psize = MMU_PAGE_1M;
626 #ifdef CONFIG_PPC_64K_PAGES
627 mas4 |= BOOK3E_PAGESZ_64K << MAS4_TSIZED_SHIFT;
629 mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT;
631 mmu_pte_psize = mmu_virtual_psize;
634 mtspr(SPRN_MAS4, mas4);
636 /* Set the global containing the top of the linear mapping
637 * for use by the TLB miss code
639 linear_map_top = memblock_end_of_DRAM();
641 #ifdef CONFIG_PPC_FSL_BOOK3E
642 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
643 unsigned int num_cams;
645 /* use a quarter of the TLBCAM for bolted linear map */
646 num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
647 linear_map_top = map_mem_in_cams(linear_map_top, num_cams);
649 /* limit memory so we dont have linear faults */
650 memblock_enforce_memory_limit(linear_map_top);
652 if (book3e_htw_mode == PPC_HTW_NONE) {
653 patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
654 patch_exception(0x1e0,
655 exc_instruction_tlb_miss_bolted_book3e);
660 /* A sync won't hurt us after mucking around with
661 * the MMU configuration
665 memblock_set_current_limit(linear_map_top);
668 void __init early_init_mmu(void)
673 void early_init_mmu_secondary(void)
678 void setup_initial_memory_limit(phys_addr_t first_memblock_base,
679 phys_addr_t first_memblock_size)
681 /* On non-FSL Embedded 64-bit, we adjust the RMA size to match
682 * the bolted TLB entry. We know for now that only 1G
683 * entries are supported though that may eventually
686 * on FSL Embedded 64-bit, we adjust the RMA size to match the
687 * first bolted TLB entry size. We still limit max to 1G even if
688 * the TLB could cover more. This is due to what the early init
689 * code is setup to do.
691 * We crop it to the size of the first MEMBLOCK to
692 * avoid going over total available memory just in case...
694 #ifdef CONFIG_PPC_FSL_BOOK3E
695 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
696 unsigned long linear_sz;
697 linear_sz = calc_cam_sz(first_memblock_size, PAGE_OFFSET,
698 first_memblock_base);
699 ppc64_rma_size = min_t(u64, linear_sz, 0x40000000);
702 ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
704 /* Finally limit subsequent allocations */
705 memblock_set_current_limit(first_memblock_base + ppc64_rma_size);
707 #else /* ! CONFIG_PPC64 */
708 void __init early_init_mmu(void)
710 #ifdef CONFIG_PPC_47x
711 early_init_mmu_47x();
714 #endif /* CONFIG_PPC64 */