2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7 * Copyright (C) 1999 SuSE GmbH Nuernberg
8 * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
10 * Cache and TLB management
14 #include <linux/init.h>
15 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/seq_file.h>
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
22 #include <asm/cache.h>
23 #include <asm/cacheflush.h>
24 #include <asm/tlbflush.h>
26 #include <asm/pgalloc.h>
27 #include <asm/processor.h>
28 #include <asm/sections.h>
29 #include <asm/shmparam.h>
31 int split_tlb __read_mostly;
32 int dcache_stride __read_mostly;
33 int icache_stride __read_mostly;
34 EXPORT_SYMBOL(dcache_stride);
36 void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
37 EXPORT_SYMBOL(flush_dcache_page_asm);
38 void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
41 /* On some machines (e.g. ones with the Merced bus), there can be
42 * only a single PxTLB broadcast at a time; this must be guaranteed
43 * by software. We put a spinlock around all TLB flushes to
46 DEFINE_SPINLOCK(pa_tlb_lock);
48 struct pdc_cache_info cache_info __read_mostly;
50 static struct pdc_btlb_info btlb_info __read_mostly;
55 flush_data_cache(void)
57 on_each_cpu(flush_data_cache_local, NULL, 1);
60 flush_instruction_cache(void)
62 on_each_cpu(flush_instruction_cache_local, NULL, 1);
67 flush_cache_all_local(void)
69 flush_instruction_cache_local(NULL);
70 flush_data_cache_local(NULL);
72 EXPORT_SYMBOL(flush_cache_all_local);
75 update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
77 struct page *page = pte_page(*ptep);
79 if (pfn_valid(page_to_pfn(page)) && page_mapping(page) &&
80 test_bit(PG_dcache_dirty, &page->flags)) {
82 flush_kernel_dcache_page(page);
83 clear_bit(PG_dcache_dirty, &page->flags);
84 } else if (parisc_requires_coherency())
85 flush_kernel_dcache_page(page);
89 show_cache_info(struct seq_file *m)
93 seq_printf(m, "I-cache\t\t: %ld KB\n",
94 cache_info.ic_size/1024 );
95 if (cache_info.dc_loop != 1)
96 snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
97 seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
98 cache_info.dc_size/1024,
99 (cache_info.dc_conf.cc_wt ? "WT":"WB"),
100 (cache_info.dc_conf.cc_sh ? ", shared I/D":""),
101 ((cache_info.dc_loop == 1) ? "direct mapped" : buf));
102 seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
105 cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
109 /* BTLB - Block TLB */
110 if (btlb_info.max_size==0) {
111 seq_printf(m, "BTLB\t\t: not supported\n" );
114 "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
115 "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
116 "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
117 btlb_info.max_size, (int)4096,
118 btlb_info.max_size>>8,
119 btlb_info.fixed_range_info.num_i,
120 btlb_info.fixed_range_info.num_d,
121 btlb_info.fixed_range_info.num_comb,
122 btlb_info.variable_range_info.num_i,
123 btlb_info.variable_range_info.num_d,
124 btlb_info.variable_range_info.num_comb
131 parisc_cache_init(void)
133 if (pdc_cache_info(&cache_info) < 0)
134 panic("parisc_cache_init: pdc_cache_info failed");
137 printk("ic_size %lx dc_size %lx it_size %lx\n",
142 printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
144 cache_info.dc_stride,
148 printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n",
149 *(unsigned long *) (&cache_info.dc_conf),
150 cache_info.dc_conf.cc_alias,
151 cache_info.dc_conf.cc_block,
152 cache_info.dc_conf.cc_line,
153 cache_info.dc_conf.cc_shift);
154 printk(" wt %d sh %d cst %d hv %d\n",
155 cache_info.dc_conf.cc_wt,
156 cache_info.dc_conf.cc_sh,
157 cache_info.dc_conf.cc_cst,
158 cache_info.dc_conf.cc_hv);
160 printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
162 cache_info.ic_stride,
166 printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n",
167 *(unsigned long *) (&cache_info.ic_conf),
168 cache_info.ic_conf.cc_alias,
169 cache_info.ic_conf.cc_block,
170 cache_info.ic_conf.cc_line,
171 cache_info.ic_conf.cc_shift);
172 printk(" wt %d sh %d cst %d hv %d\n",
173 cache_info.ic_conf.cc_wt,
174 cache_info.ic_conf.cc_sh,
175 cache_info.ic_conf.cc_cst,
176 cache_info.ic_conf.cc_hv);
178 printk("D-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n",
179 cache_info.dt_conf.tc_sh,
180 cache_info.dt_conf.tc_page,
181 cache_info.dt_conf.tc_cst,
182 cache_info.dt_conf.tc_aid,
183 cache_info.dt_conf.tc_pad1);
185 printk("I-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n",
186 cache_info.it_conf.tc_sh,
187 cache_info.it_conf.tc_page,
188 cache_info.it_conf.tc_cst,
189 cache_info.it_conf.tc_aid,
190 cache_info.it_conf.tc_pad1);
194 if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
195 if (cache_info.dt_conf.tc_sh == 2)
196 printk(KERN_WARNING "Unexpected TLB configuration. "
197 "Will flush I/D separately (could be optimized).\n");
202 /* "New and Improved" version from Jim Hull
203 * (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
204 * The following CAFL_STRIDE is an optimized version, see
205 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
206 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
208 #define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
209 dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
210 icache_stride = CAFL_STRIDE(cache_info.ic_conf);
214 if (pdc_btlb_info(&btlb_info) < 0) {
215 memset(&btlb_info, 0, sizeof btlb_info);
219 if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
220 PDC_MODEL_NVA_UNSUPPORTED) {
221 printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
223 panic("SMP kernel required to avoid non-equivalent aliasing");
228 void disable_sr_hashing(void)
230 int srhash_type, retval;
231 unsigned long space_bits;
233 switch (boot_cpu_data.cpu_type) {
234 case pcx: /* We shouldn't get this far. setup.c should prevent it. */
241 srhash_type = SRHASH_PCXST;
245 srhash_type = SRHASH_PCXL;
248 case pcxl2: /* pcxl2 doesn't support space register hashing */
251 default: /* Currently all PA2.0 machines use the same ins. sequence */
252 srhash_type = SRHASH_PA20;
256 disable_sr_hashing_asm(srhash_type);
258 retval = pdc_spaceid_bits(&space_bits);
259 /* If this procedure isn't implemented, don't panic. */
260 if (retval < 0 && retval != PDC_BAD_OPTION)
261 panic("pdc_spaceid_bits call failed.\n");
263 panic("SpaceID hashing is still on!\n");
267 __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
268 unsigned long physaddr)
271 flush_dcache_page_asm(physaddr, vmaddr);
272 if (vma->vm_flags & VM_EXEC)
273 flush_icache_page_asm(physaddr, vmaddr);
277 void flush_dcache_page(struct page *page)
279 struct address_space *mapping = page_mapping(page);
280 struct vm_area_struct *mpnt;
281 unsigned long offset;
282 unsigned long addr, old_addr = 0;
285 if (mapping && !mapping_mapped(mapping)) {
286 set_bit(PG_dcache_dirty, &page->flags);
290 flush_kernel_dcache_page(page);
295 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
297 /* We have carefully arranged in arch_get_unmapped_area() that
298 * *any* mappings of a file are always congruently mapped (whether
299 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
300 * to flush one address here for them all to become coherent */
302 flush_dcache_mmap_lock(mapping);
303 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
304 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
305 addr = mpnt->vm_start + offset;
307 /* The TLB is the engine of coherence on parisc: The
308 * CPU is entitled to speculate any page with a TLB
309 * mapping, so here we kill the mapping then flush the
310 * page along a special flush only alias mapping.
311 * This guarantees that the page is no-longer in the
312 * cache for any process and nor may it be
313 * speculatively read in (until the user or kernel
314 * specifically accesses it, of course) */
316 flush_tlb_page(mpnt, addr);
317 if (old_addr == 0 || (old_addr & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) {
318 __flush_cache_page(mpnt, addr, page_to_phys(page));
320 printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? (char *)mpnt->vm_file->f_path.dentry->d_name.name : "(null)");
324 flush_dcache_mmap_unlock(mapping);
326 EXPORT_SYMBOL(flush_dcache_page);
328 /* Defined in arch/parisc/kernel/pacache.S */
329 EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
330 EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
331 EXPORT_SYMBOL(flush_data_cache_local);
332 EXPORT_SYMBOL(flush_kernel_icache_range_asm);
334 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
335 int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
337 void __init parisc_setup_cache_timing(void)
339 unsigned long rangetime, alltime;
344 alltime = mfctl(16) - alltime;
346 size = (unsigned long)(_end - _text);
347 rangetime = mfctl(16);
348 flush_kernel_dcache_range((unsigned long)_text, size);
349 rangetime = mfctl(16) - rangetime;
351 printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
352 alltime, size, rangetime);
354 /* Racy, but if we see an intermediate value, it's ok too... */
355 parisc_cache_flush_threshold = size * alltime / rangetime;
357 parisc_cache_flush_threshold = (parisc_cache_flush_threshold + L1_CACHE_BYTES - 1) &~ (L1_CACHE_BYTES - 1);
358 if (!parisc_cache_flush_threshold)
359 parisc_cache_flush_threshold = FLUSH_THRESHOLD;
361 if (parisc_cache_flush_threshold > cache_info.dc_size)
362 parisc_cache_flush_threshold = cache_info.dc_size;
364 printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus());
367 extern void purge_kernel_dcache_page_asm(unsigned long);
368 extern void clear_user_page_asm(void *, unsigned long);
369 extern void copy_user_page_asm(void *, void *, unsigned long);
371 void flush_kernel_dcache_page_addr(void *addr)
375 flush_kernel_dcache_page_asm(addr);
376 purge_tlb_start(flags);
378 purge_tlb_end(flags);
380 EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
382 void clear_user_page(void *vto, unsigned long vaddr, struct page *page)
385 if (!parisc_requires_coherency())
386 flush_kernel_dcache_page_asm(vto);
388 EXPORT_SYMBOL(clear_user_page);
390 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
393 /* Copy using kernel mapping. No coherency is needed
394 (all in kmap/kunmap) on machines that don't support
395 non-equivalent aliasing. However, the `from' page
396 needs to be flushed before it can be accessed through
397 the kernel mapping. */
399 flush_dcache_page_asm(__pa(vfrom), vaddr);
401 copy_page_asm(vto, vfrom);
402 if (!parisc_requires_coherency())
403 flush_kernel_dcache_page_asm(vto);
405 EXPORT_SYMBOL(copy_user_page);
409 void kunmap_parisc(void *addr)
411 if (parisc_requires_coherency())
412 flush_kernel_dcache_page_addr(addr);
414 EXPORT_SYMBOL(kunmap_parisc);
417 void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
421 /* Note: purge_tlb_entries can be called at startup with
424 /* Disable preemption while we play with %sr1. */
426 mtsp(mm->context, 1);
427 purge_tlb_start(flags);
430 purge_tlb_end(flags);
433 EXPORT_SYMBOL(purge_tlb_entries);
435 void __flush_tlb_range(unsigned long sid, unsigned long start,
438 unsigned long npages;
440 npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
441 if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */
447 purge_tlb_start(flags);
460 purge_tlb_end(flags);
464 static void cacheflush_h_tmp_function(void *dummy)
466 flush_cache_all_local();
469 void flush_cache_all(void)
471 on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
474 static inline unsigned long mm_total_size(struct mm_struct *mm)
476 struct vm_area_struct *vma;
477 unsigned long usize = 0;
479 for (vma = mm->mmap; vma; vma = vma->vm_next)
480 usize += vma->vm_end - vma->vm_start;
484 static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
488 if (!pgd_none(*pgd)) {
489 pud_t *pud = pud_offset(pgd, addr);
490 if (!pud_none(*pud)) {
491 pmd_t *pmd = pmd_offset(pud, addr);
493 ptep = pte_offset_map(pmd, addr);
499 void flush_cache_mm(struct mm_struct *mm)
501 /* Flushing the whole cache on each cpu takes forever on
502 rp3440, etc. So, avoid it if the mm isn't too big. */
503 if (mm_total_size(mm) < parisc_cache_flush_threshold) {
504 struct vm_area_struct *vma;
506 if (mm->context == mfsp(3)) {
507 for (vma = mm->mmap; vma; vma = vma->vm_next) {
508 flush_user_dcache_range_asm(vma->vm_start,
510 if (vma->vm_flags & VM_EXEC)
511 flush_user_icache_range_asm(
512 vma->vm_start, vma->vm_end);
515 pgd_t *pgd = mm->pgd;
517 for (vma = mm->mmap; vma; vma = vma->vm_next) {
520 for (addr = vma->vm_start; addr < vma->vm_end;
522 pte_t *ptep = get_ptep(pgd, addr);
525 __flush_cache_page(vma, addr,
526 page_to_phys(pte_page(pte)));
537 flush_cache_all_local();
542 flush_user_dcache_range(unsigned long start, unsigned long end)
544 if ((end - start) < parisc_cache_flush_threshold)
545 flush_user_dcache_range_asm(start,end);
551 flush_user_icache_range(unsigned long start, unsigned long end)
553 if ((end - start) < parisc_cache_flush_threshold)
554 flush_user_icache_range_asm(start,end);
556 flush_instruction_cache();
559 void flush_cache_range(struct vm_area_struct *vma,
560 unsigned long start, unsigned long end)
562 BUG_ON(!vma->vm_mm->context);
564 if ((end - start) < parisc_cache_flush_threshold) {
565 if (vma->vm_mm->context == mfsp(3)) {
566 flush_user_dcache_range_asm(start, end);
567 if (vma->vm_flags & VM_EXEC)
568 flush_user_icache_range_asm(start, end);
571 pgd_t *pgd = vma->vm_mm->pgd;
573 for (addr = start & PAGE_MASK; addr < end;
575 pte_t *ptep = get_ptep(pgd, addr);
578 flush_cache_page(vma,
587 flush_cache_all_local();
593 flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
595 BUG_ON(!vma->vm_mm->context);
597 flush_tlb_page(vma, vmaddr);
598 __flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn)));
602 #ifdef CONFIG_PARISC_TMPALIAS
604 void clear_user_highpage(struct page *page, unsigned long vaddr)
609 /* Clear using TMPALIAS region. The page doesn't need to
610 be flushed but the kernel mapping needs to be purged. */
612 vto = kmap_atomic(page, KM_USER0);
614 /* The PA-RISC 2.0 Architecture book states on page F-6:
615 "Before a write-capable translation is enabled, *all*
616 non-equivalently-aliased translations must be removed
617 from the page table and purged from the TLB. (Note
618 that the caches are not required to be flushed at this
619 time.) Before any non-equivalent aliased translation
620 is re-enabled, the virtual address range for the writeable
621 page (the entire page) must be flushed from the cache,
622 and the write-capable translation removed from the page
623 table and purged from the TLB." */
625 purge_kernel_dcache_page_asm((unsigned long)vto);
626 purge_tlb_start(flags);
628 purge_tlb_end(flags);
630 clear_user_page_asm(vto, vaddr);
633 pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */
636 void copy_user_highpage(struct page *to, struct page *from,
637 unsigned long vaddr, struct vm_area_struct *vma)
642 /* Copy using TMPALIAS region. This has the advantage
643 that the `from' page doesn't need to be flushed. However,
644 the `to' page must be flushed in copy_user_page_asm since
645 it can be used to bring in executable code. */
647 vfrom = kmap_atomic(from, KM_USER0);
648 vto = kmap_atomic(to, KM_USER1);
650 purge_kernel_dcache_page_asm((unsigned long)vto);
651 purge_tlb_start(flags);
654 purge_tlb_end(flags);
656 copy_user_page_asm(vto, vfrom, vaddr);
657 flush_dcache_page_asm(__pa(vto), vaddr);
660 pagefault_enable(); /* kunmap_atomic(addr, KM_USER1); */
661 pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */
664 #endif /* CONFIG_PARISC_TMPALIAS */