2 * ARC700 VIPT Cache Management
4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * vineetg: May 2011: for Non-aliasing VIPT D-cache following can be NOPs
11 * -flush_cache_dup_mm (fork)
12 * -likewise for flush_cache_mm (exit/execve)
13 * -likewise for flush_cache_range,flush_cache_page (munmap, exit, COW-break)
16 * -Now that MMU can support larger pg sz (16K), the determiniation of
17 * aliasing shd not be based on assumption of 8k pg
20 * -optimised version of flush_icache_range( ) for making I/D coherent
21 * when vaddr is available (agnostic of num of aliases)
24 * -Added documentation about I-cache aliasing on ARC700 and the way it
25 * was handled up until MMU V2.
26 * -Spotted a three year old bug when killing the 4 aliases, which needs
27 * bottom 2 bits, so we need to do paddr | {0x00, 0x01, 0x02, 0x03}
28 * instead of paddr | {0x00, 0x01, 0x10, 0x11}
29 * (Rajesh you owe me one now)
32 * -Off-by-one error when computing num_of_lines to flush
33 * This broke signal handling with bionic which uses synthetic sigret stub
36 * -GCC can't generate ZOL for core cache flush loops.
37 * Conv them into iterations based as opposed to while (start < end) types
40 * -In I-cache flush routine we used to chk for aliasing for every line INV.
41 * Instead now we setup routines per cache geometry and invoke them
42 * via function pointers.
45 * -Cache Line flush routines used to flush an extra line beyond end addr
46 * because check was while (end >= start) instead of (end > start)
47 * =Some call sites had to work around by doing -1, -4 etc to end param
48 * =Some callers didnt care. This was spec bad in case of INV routines
49 * which would discard valid data (cause of the horrible ext2 bug
52 * vineetg: June 11th 2008: Fixed flush_icache_range( )
53 * -Since ARC700 caches are not coherent (I$ doesnt snoop D$) both need
54 * to be flushed, which it was not doing.
55 * -load_module( ) passes vmalloc addr (Kernel Virtual Addr) to the API,
56 * however ARC cache maintenance OPs require PHY addr. Thus need to do
58 * -Also added optimisation there, that for range > PAGE SIZE we flush the
59 * entire cache in one shot rather than line by line. For e.g. a module
60 * with Code sz 600k, old code flushed 600k worth of cache (line-by-line),
61 * while cache is only 16 or 32k.
64 #include <linux/module.h>
66 #include <linux/sched.h>
67 #include <linux/cache.h>
68 #include <linux/mmu_context.h>
69 #include <linux/syscalls.h>
70 #include <linux/uaccess.h>
71 #include <asm/cacheflush.h>
72 #include <asm/cachectl.h>
73 #include <asm/setup.h>
75 char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len)
78 unsigned int c = smp_processor_id();
80 #define PR_CACHE(p, enb, str) \
83 n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
85 n += scnprintf(buf + n, len - n, \
86 str"\t\t: (%uK) VIPT, %dway set-asc, %ub Line %s\n", \
87 TO_KB((p)->sz), (p)->assoc, (p)->line_len, \
88 enb ? "" : "DISABLED (kernel-build)"); \
91 PR_CACHE(&cpuinfo_arc700[c].icache, __CONFIG_ARC_HAS_ICACHE, "I-Cache");
92 PR_CACHE(&cpuinfo_arc700[c].dcache, __CONFIG_ARC_HAS_DCACHE, "D-Cache");
98 * Read the Cache Build Confuration Registers, Decode them and save into
99 * the cpuinfo structure for later use.
100 * No Validation done here, simply read/convert the BCRs
102 void __cpuinit read_decode_cache_bcr(void)
104 struct bcr_cache ibcr, dbcr;
105 struct cpuinfo_arc_cache *p_ic, *p_dc;
106 unsigned int cpu = smp_processor_id();
108 p_ic = &cpuinfo_arc700[cpu].icache;
109 READ_BCR(ARC_REG_IC_BCR, ibcr);
111 if (ibcr.config == 0x3)
113 p_ic->line_len = 8 << ibcr.line_len;
114 p_ic->sz = 0x200 << ibcr.sz;
115 p_ic->ver = ibcr.ver;
117 p_dc = &cpuinfo_arc700[cpu].dcache;
118 READ_BCR(ARC_REG_DC_BCR, dbcr);
120 if (dbcr.config == 0x2)
122 p_dc->line_len = 16 << dbcr.line_len;
123 p_dc->sz = 0x200 << dbcr.sz;
124 p_dc->ver = dbcr.ver;
128 * 1. Validate the Cache Geomtery (compile time config matches hardware)
129 * 2. If I-cache suffers from aliasing, setup work arounds (difft flush rtn)
130 * (aliasing D-cache configurations are not supported YET)
131 * 3. Enable the Caches, setup default flush mode for D-Cache
132 * 3. Calculate the SHMLBA used by user space
134 void __cpuinit arc_cache_init(void)
137 unsigned int cpu = smp_processor_id();
138 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
139 struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
140 int way_pg_ratio = way_pg_ratio;
143 printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
148 #ifdef CONFIG_ARC_HAS_ICACHE
149 /* 1. Confirm some of I-cache params which Linux assumes */
150 if ((ic->assoc != ARC_ICACHE_WAYS) ||
151 (ic->line_len != ARC_ICACHE_LINE_LEN)) {
152 panic("Cache H/W doesn't match kernel Config");
154 #if (CONFIG_ARC_MMU_VER > 2)
157 panic("Cache ver doesn't match MMU ver\n");
159 /* For ISS - suggest the toggles to use */
160 pr_err("Use -prop=icache_version=3,-prop=dcache_version=3\n");
166 /* Enable/disable I-Cache */
167 temp = read_aux_reg(ARC_REG_IC_CTRL);
169 #ifdef CONFIG_ARC_HAS_ICACHE
170 temp &= ~IC_CTRL_CACHE_DISABLE;
172 temp |= IC_CTRL_CACHE_DISABLE;
175 write_aux_reg(ARC_REG_IC_CTRL, temp);
181 #ifdef CONFIG_ARC_HAS_DCACHE
182 if ((dc->assoc != ARC_DCACHE_WAYS) ||
183 (dc->line_len != ARC_DCACHE_LINE_LEN)) {
184 panic("Cache H/W doesn't match kernel Config");
187 /* check for D-Cache aliasing */
188 if ((dc->sz / ARC_DCACHE_WAYS) > PAGE_SIZE)
189 panic("D$ aliasing not handled right now\n");
192 /* Set the default Invalidate Mode to "simpy discard dirty lines"
193 * as this is more frequent then flush before invalidate
194 * Ofcourse we toggle this default behviour when desired
196 temp = read_aux_reg(ARC_REG_DC_CTRL);
197 temp &= ~DC_CTRL_INV_MODE_FLUSH;
199 #ifdef CONFIG_ARC_HAS_DCACHE
200 /* Enable D-Cache: Clear Bit 0 */
201 write_aux_reg(ARC_REG_DC_CTRL, temp & ~IC_CTRL_CACHE_DISABLE);
204 write_aux_reg(ARC_REG_DC_FLSH, 0x1);
205 /* Disable D cache */
206 write_aux_reg(ARC_REG_DC_CTRL, temp | IC_CTRL_CACHE_DISABLE);
214 #define OP_FLUSH_N_INV 0x3
216 #ifdef CONFIG_ARC_HAS_DCACHE
218 /***************************************************************
219 * Machine specific helpers for Entire D-Cache or Per Line ops
222 static inline void wait_for_flush(void)
224 while (read_aux_reg(ARC_REG_DC_CTRL) & DC_CTRL_FLUSH_STATUS)
229 * Operation on Entire D-Cache
230 * @cacheop = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV}
231 * Note that constant propagation ensures all the checks are gone
234 static inline void __dc_entire_op(const int cacheop)
236 unsigned long flags, tmp = tmp;
239 local_irq_save(flags);
241 if (cacheop == OP_FLUSH_N_INV) {
242 /* Dcache provides 2 cmd: FLUSH or INV
243 * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
244 * flush-n-inv is achieved by INV cmd but with IM=1
245 * Default INV sub-mode is DISCARD, which needs to be toggled
247 tmp = read_aux_reg(ARC_REG_DC_CTRL);
248 write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH);
251 if (cacheop & OP_INV) /* Inv or flush-n-inv use same cmd reg */
252 aux = ARC_REG_DC_IVDC;
254 aux = ARC_REG_DC_FLSH;
256 write_aux_reg(aux, 0x1);
258 if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */
261 /* Switch back the DISCARD ONLY Invalidate mode */
262 if (cacheop == OP_FLUSH_N_INV)
263 write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH);
265 local_irq_restore(flags);
269 * Per Line Operation on D-Cache
270 * Doesn't deal with type-of-op/IRQ-disabling/waiting-for-flush-to-complete
271 * It's sole purpose is to help gcc generate ZOL
272 * (aliasing VIPT dcache flushing needs both vaddr and paddr)
274 static inline void __dc_line_loop(unsigned long paddr, unsigned long vaddr,
275 unsigned long sz, const int aux_reg)
279 /* Ensure we properly floor/ceil the non-line aligned/sized requests
280 * and have @paddr - aligned to cache line and integral @num_lines.
281 * This however can be avoided for page sized since:
282 * -@paddr will be cache-line aligned already (being page aligned)
283 * -@sz will be integral multiple of line size (being page sized).
285 if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
286 sz += paddr & ~DCACHE_LINE_MASK;
287 paddr &= DCACHE_LINE_MASK;
288 vaddr &= DCACHE_LINE_MASK;
291 num_lines = DIV_ROUND_UP(sz, ARC_DCACHE_LINE_LEN);
293 #if (CONFIG_ARC_MMU_VER <= 2)
294 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
297 while (num_lines-- > 0) {
298 #if (CONFIG_ARC_MMU_VER > 2)
300 * Just as for I$, in MMU v3, D$ ops also require
301 * "tag" bits in DC_PTAG, "index" bits in FLDL,IVDL ops
303 write_aux_reg(ARC_REG_DC_PTAG, paddr);
305 write_aux_reg(aux_reg, vaddr);
306 vaddr += ARC_DCACHE_LINE_LEN;
308 /* paddr contains stuffed vaddrs bits */
309 write_aux_reg(aux_reg, paddr);
311 paddr += ARC_DCACHE_LINE_LEN;
315 /* For kernel mappings cache op index is same as paddr */
316 #define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
319 * D-Cache : Per Line INV (discard or wback+discard) or FLUSH (wback)
321 static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
322 unsigned long sz, const int cacheop)
324 unsigned long flags, tmp = tmp;
327 local_irq_save(flags);
329 if (cacheop == OP_FLUSH_N_INV) {
331 * Dcache provides 2 cmd: FLUSH or INV
332 * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
333 * flush-n-inv is achieved by INV cmd but with IM=1
334 * Default INV sub-mode is DISCARD, which needs to be toggled
336 tmp = read_aux_reg(ARC_REG_DC_CTRL);
337 write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH);
340 if (cacheop & OP_INV) /* Inv / flush-n-inv use same cmd reg */
341 aux = ARC_REG_DC_IVDL;
343 aux = ARC_REG_DC_FLDL;
345 __dc_line_loop(paddr, vaddr, sz, aux);
347 if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */
350 /* Switch back the DISCARD ONLY Invalidate mode */
351 if (cacheop == OP_FLUSH_N_INV)
352 write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH);
354 local_irq_restore(flags);
359 #define __dc_entire_op(cacheop)
360 #define __dc_line_op(paddr, vaddr, sz, cacheop)
361 #define __dc_line_op_k(paddr, sz, cacheop)
363 #endif /* CONFIG_ARC_HAS_DCACHE */
366 #ifdef CONFIG_ARC_HAS_ICACHE
369 * I-Cache Aliasing in ARC700 VIPT caches
371 * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag.
372 * The orig Cache Management Module "CDU" only required paddr to invalidate a
373 * certain line since it sufficed as index in Non-Aliasing VIPT cache-geometry.
374 * Infact for distinct V1,V2,P: all of {V1-P},{V2-P},{P-P} would end up fetching
375 * the exact same line.
377 * However for larger Caches (way-size > page-size) - i.e. in Aliasing config,
378 * paddr alone could not be used to correctly index the cache.
381 * MMU v1/v2 (Fixed Page Size 8k)
383 * The solution was to provide CDU with these additonal vaddr bits. These
384 * would be bits [x:13], x would depend on cache-geometry, 13 comes from
385 * standard page size of 8k.
386 * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits
387 * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the
388 * orig 5 bits of paddr were anyways ignored by CDU line ops, as they
389 * represent the offset within cache-line. The adv of using this "clumsy"
390 * interface for additional info was no new reg was needed in CDU programming
393 * 17:13 represented the max num of bits passable, actual bits needed were
394 * fewer, based on the num-of-aliases possible.
395 * -for 2 alias possibility, only bit 13 needed (32K cache)
396 * -for 4 alias possibility, bits 14:13 needed (64K cache)
401 * This ver of MMU supports variable page sizes (1k-16k): although Linux will
402 * only support 8k (default), 16k and 4k.
403 * However from hardware perspective, smaller page sizes aggrevate aliasing
404 * meaning more vaddr bits needed to disambiguate the cache-line-op ;
405 * the existing scheme of piggybacking won't work for certain configurations.
406 * Two new registers IC_PTAG and DC_PTAG inttoduced.
407 * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs
410 /***********************************************************
411 * Machine specific helper for per line I-Cache invalidate.
413 static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
420 * Ensure we properly floor/ceil the non-line aligned/sized requests:
421 * However page sized flushes can be compile time optimised.
422 * -@paddr will be cache-line aligned already (being page aligned)
423 * -@sz will be integral multiple of line size (being page sized).
425 if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
426 sz += paddr & ~ICACHE_LINE_MASK;
427 paddr &= ICACHE_LINE_MASK;
428 vaddr &= ICACHE_LINE_MASK;
431 num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN);
433 #if (CONFIG_ARC_MMU_VER <= 2)
434 /* bits 17:13 of vaddr go as bits 4:0 of paddr */
435 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
438 local_irq_save(flags);
439 while (num_lines-- > 0) {
440 #if (CONFIG_ARC_MMU_VER > 2)
441 /* tag comes from phy addr */
442 write_aux_reg(ARC_REG_IC_PTAG, paddr);
444 /* index bits come from vaddr */
445 write_aux_reg(ARC_REG_IC_IVIL, vaddr);
446 vaddr += ARC_ICACHE_LINE_LEN;
448 /* paddr contains stuffed vaddrs bits */
449 write_aux_reg(ARC_REG_IC_IVIL, paddr);
451 paddr += ARC_ICACHE_LINE_LEN;
453 local_irq_restore(flags);
458 #define __ic_line_inv_vaddr(pstart, vstart, sz)
460 #endif /* CONFIG_ARC_HAS_ICACHE */
463 /***********************************************************
467 void flush_dcache_page(struct page *page)
469 /* Make a note that dcache is not yet flushed for this page */
470 set_bit(PG_arch_1, &page->flags);
472 EXPORT_SYMBOL(flush_dcache_page);
475 void dma_cache_wback_inv(unsigned long start, unsigned long sz)
477 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
479 EXPORT_SYMBOL(dma_cache_wback_inv);
481 void dma_cache_inv(unsigned long start, unsigned long sz)
483 __dc_line_op_k(start, sz, OP_INV);
485 EXPORT_SYMBOL(dma_cache_inv);
487 void dma_cache_wback(unsigned long start, unsigned long sz)
489 __dc_line_op_k(start, sz, OP_FLUSH);
491 EXPORT_SYMBOL(dma_cache_wback);
494 * This is API for making I/D Caches consistent when modifying
495 * kernel code (loadable modules, kprobes, kgdb...)
496 * This is called on insmod, with kernel virtual address for CODE of
497 * the module. ARC cache maintenance ops require PHY address thus we
498 * need to convert vmalloc addr to PHY addr
500 void flush_icache_range(unsigned long kstart, unsigned long kend)
502 unsigned int tot_sz, off, sz;
503 unsigned long phy, pfn;
505 /* printk("Kernel Cache Cohenercy: %lx to %lx\n",kstart, kend); */
507 /* This is not the right API for user virtual address */
508 if (kstart < TASK_SIZE) {
509 BUG_ON("Flush icache range for user virtual addr space");
513 /* Shortcut for bigger flush ranges.
514 * Here we don't care if this was kernel virtual or phy addr
516 tot_sz = kend - kstart;
517 if (tot_sz > PAGE_SIZE) {
522 /* Case: Kernel Phy addr (0x8000_0000 onwards) */
523 if (likely(kstart > PAGE_OFFSET)) {
525 * The 2nd arg despite being paddr will be used to index icache
526 * This is OK since no alternate virtual mappings will exist
527 * given the callers for this case: kprobe/kgdb in built-in
530 __sync_icache_dcache(kstart, kstart, kend - kstart);
535 * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff)
536 * (1) ARC Cache Maintenance ops only take Phy addr, hence special
537 * handling of kernel vaddr.
539 * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already),
540 * it still needs to handle a 2 page scenario, where the range
541 * straddles across 2 virtual pages and hence need for loop
544 off = kstart % PAGE_SIZE;
545 pfn = vmalloc_to_pfn((void *)kstart);
546 phy = (pfn << PAGE_SHIFT) + off;
547 sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
548 __sync_icache_dcache(phy, kstart, sz);
555 * General purpose helper to make I and D cache lines consistent.
556 * @paddr is phy addr of region
557 * @vaddr is typically user or kernel vaddr (vmalloc)
558 * Howver in one instance, flush_icache_range() by kprobe (for a breakpt in
559 * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
560 * use a paddr to index the cache (despite VIPT). This is fine since since a
561 * built-in kernel page will not have any virtual mappings (not even kernel)
562 * kprobe on loadable module is different as it will have kvaddr.
564 void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len)
568 local_irq_save(flags);
569 __ic_line_inv_vaddr(paddr, vaddr, len);
570 __dc_line_op(paddr, vaddr, len, OP_FLUSH);
571 local_irq_restore(flags);
574 /* wrapper to compile time eliminate alignment checks in flush loop */
575 void __inv_icache_page(unsigned long paddr, unsigned long vaddr)
577 __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
581 * wrapper to clearout kernel or userspace mappings of a page
582 * For kernel mappings @vaddr == @paddr
584 void __flush_dcache_page(unsigned long paddr, unsigned long vaddr)
586 __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
589 void flush_icache_all(void)
593 local_irq_save(flags);
595 write_aux_reg(ARC_REG_IC_IVIC, 1);
597 /* lr will not complete till the icache inv operation is not over */
598 read_aux_reg(ARC_REG_IC_CTRL);
599 local_irq_restore(flags);
602 noinline void flush_cache_all(void)
606 local_irq_save(flags);
609 __dc_entire_op(OP_FLUSH_N_INV);
611 local_irq_restore(flags);
615 /**********************************************************************
616 * Explicit Cache flush request from user space via syscall
617 * Needed for JITs which generate code on the fly
619 SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
621 /* TBD: optimize this */