]> Pileus Git - ~andy/linux/blob - arch/sparc/mm/srmmu.c
Merge branch 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[~andy/linux] / arch / sparc / mm / srmmu.c
1 /*
2  * srmmu.c:  SRMMU specific routines for memory management.
3  *
4  * Copyright (C) 1995 David S. Miller  (davem@caip.rutgers.edu)
5  * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
6  * Copyright (C) 1996 Eddie C. Dost    (ecd@skynet.be)
7  * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8  * Copyright (C) 1999,2000 Anton Blanchard (anton@samba.org)
9  */
10
11 #include <linux/seq_file.h>
12 #include <linux/spinlock.h>
13 #include <linux/bootmem.h>
14 #include <linux/pagemap.h>
15 #include <linux/vmalloc.h>
16 #include <linux/kdebug.h>
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/log2.h>
20 #include <linux/gfp.h>
21 #include <linux/fs.h>
22 #include <linux/mm.h>
23
24 #include <asm/mmu_context.h>
25 #include <asm/cacheflush.h>
26 #include <asm/tlbflush.h>
27 #include <asm/io-unit.h>
28 #include <asm/pgalloc.h>
29 #include <asm/pgtable.h>
30 #include <asm/bitext.h>
31 #include <asm/vaddrs.h>
32 #include <asm/cache.h>
33 #include <asm/traps.h>
34 #include <asm/oplib.h>
35 #include <asm/mbus.h>
36 #include <asm/page.h>
37 #include <asm/asi.h>
38 #include <asm/msi.h>
39 #include <asm/smp.h>
40 #include <asm/io.h>
41
42 /* Now the cpu specific definitions. */
43 #include <asm/turbosparc.h>
44 #include <asm/tsunami.h>
45 #include <asm/viking.h>
46 #include <asm/swift.h>
47 #include <asm/leon.h>
48 #include <asm/mxcc.h>
49 #include <asm/ross.h>
50
51 #include "srmmu.h"
52
53 enum mbus_module srmmu_modtype;
54 static unsigned int hwbug_bitmask;
55 int vac_cache_size;
56 int vac_line_size;
57
58 extern struct resource sparc_iomap;
59
60 extern unsigned long last_valid_pfn;
61
62 static pgd_t *srmmu_swapper_pg_dir;
63
64 const struct sparc32_cachetlb_ops *sparc32_cachetlb_ops;
65
66 #ifdef CONFIG_SMP
67 const struct sparc32_cachetlb_ops *local_ops;
68
69 #define FLUSH_BEGIN(mm)
70 #define FLUSH_END
71 #else
72 #define FLUSH_BEGIN(mm) if ((mm)->context != NO_CONTEXT) {
73 #define FLUSH_END       }
74 #endif
75
76 int flush_page_for_dma_global = 1;
77
78 char *srmmu_name;
79
80 ctxd_t *srmmu_ctx_table_phys;
81 static ctxd_t *srmmu_context_table;
82
83 int viking_mxcc_present;
84 static DEFINE_SPINLOCK(srmmu_context_spinlock);
85
86 static int is_hypersparc;
87
88 static int srmmu_cache_pagetables;
89
90 /* these will be initialized in srmmu_nocache_calcsize() */
91 static unsigned long srmmu_nocache_size;
92 static unsigned long srmmu_nocache_end;
93
94 /* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */
95 #define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4)
96
97 /* The context table is a nocache user with the biggest alignment needs. */
98 #define SRMMU_NOCACHE_ALIGN_MAX (sizeof(ctxd_t)*SRMMU_MAX_CONTEXTS)
99
100 void *srmmu_nocache_pool;
101 void *srmmu_nocache_bitmap;
102 static struct bit_map srmmu_nocache_map;
103
104 static inline int srmmu_pmd_none(pmd_t pmd)
105 { return !(pmd_val(pmd) & 0xFFFFFFF); }
106
107 /* XXX should we hyper_flush_whole_icache here - Anton */
108 static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
109 { set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); }
110
111 void pmd_set(pmd_t *pmdp, pte_t *ptep)
112 {
113         unsigned long ptp;      /* Physical address, shifted right by 4 */
114         int i;
115
116         ptp = __nocache_pa((unsigned long) ptep) >> 4;
117         for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
118                 set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
119                 ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
120         }
121 }
122
123 void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
124 {
125         unsigned long ptp;      /* Physical address, shifted right by 4 */
126         int i;
127
128         ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4);      /* watch for overflow */
129         for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
130                 set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
131                 ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
132         }
133 }
134
135 /* Find an entry in the third-level page table.. */
136 pte_t *pte_offset_kernel(pmd_t *dir, unsigned long address)
137 {
138         void *pte;
139
140         pte = __nocache_va((dir->pmdv[0] & SRMMU_PTD_PMASK) << 4);
141         return (pte_t *) pte +
142             ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
143 }
144
145 /*
146  * size: bytes to allocate in the nocache area.
147  * align: bytes, number to align at.
148  * Returns the virtual address of the allocated area.
149  */
150 static void *__srmmu_get_nocache(int size, int align)
151 {
152         int offset;
153         unsigned long addr;
154
155         if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
156                 printk(KERN_ERR "Size 0x%x too small for nocache request\n",
157                        size);
158                 size = SRMMU_NOCACHE_BITMAP_SHIFT;
159         }
160         if (size & (SRMMU_NOCACHE_BITMAP_SHIFT - 1)) {
161                 printk(KERN_ERR "Size 0x%x unaligned int nocache request\n",
162                        size);
163                 size += SRMMU_NOCACHE_BITMAP_SHIFT - 1;
164         }
165         BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX);
166
167         offset = bit_map_string_get(&srmmu_nocache_map,
168                                     size >> SRMMU_NOCACHE_BITMAP_SHIFT,
169                                     align >> SRMMU_NOCACHE_BITMAP_SHIFT);
170         if (offset == -1) {
171                 printk(KERN_ERR "srmmu: out of nocache %d: %d/%d\n",
172                        size, (int) srmmu_nocache_size,
173                        srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
174                 return 0;
175         }
176
177         addr = SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT);
178         return (void *)addr;
179 }
180
181 void *srmmu_get_nocache(int size, int align)
182 {
183         void *tmp;
184
185         tmp = __srmmu_get_nocache(size, align);
186
187         if (tmp)
188                 memset(tmp, 0, size);
189
190         return tmp;
191 }
192
193 void srmmu_free_nocache(void *addr, int size)
194 {
195         unsigned long vaddr;
196         int offset;
197
198         vaddr = (unsigned long)addr;
199         if (vaddr < SRMMU_NOCACHE_VADDR) {
200                 printk("Vaddr %lx is smaller than nocache base 0x%lx\n",
201                     vaddr, (unsigned long)SRMMU_NOCACHE_VADDR);
202                 BUG();
203         }
204         if (vaddr + size > srmmu_nocache_end) {
205                 printk("Vaddr %lx is bigger than nocache end 0x%lx\n",
206                     vaddr, srmmu_nocache_end);
207                 BUG();
208         }
209         if (!is_power_of_2(size)) {
210                 printk("Size 0x%x is not a power of 2\n", size);
211                 BUG();
212         }
213         if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
214                 printk("Size 0x%x is too small\n", size);
215                 BUG();
216         }
217         if (vaddr & (size - 1)) {
218                 printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr, size);
219                 BUG();
220         }
221
222         offset = (vaddr - SRMMU_NOCACHE_VADDR) >> SRMMU_NOCACHE_BITMAP_SHIFT;
223         size = size >> SRMMU_NOCACHE_BITMAP_SHIFT;
224
225         bit_map_clear(&srmmu_nocache_map, offset, size);
226 }
227
228 static void srmmu_early_allocate_ptable_skeleton(unsigned long start,
229                                                  unsigned long end);
230
231 /* Return how much physical memory we have.  */
232 static unsigned long __init probe_memory(void)
233 {
234         unsigned long total = 0;
235         int i;
236
237         for (i = 0; sp_banks[i].num_bytes; i++)
238                 total += sp_banks[i].num_bytes;
239
240         return total;
241 }
242
243 /*
244  * Reserve nocache dynamically proportionally to the amount of
245  * system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002
246  */
247 static void __init srmmu_nocache_calcsize(void)
248 {
249         unsigned long sysmemavail = probe_memory() / 1024;
250         int srmmu_nocache_npages;
251
252         srmmu_nocache_npages =
253                 sysmemavail / SRMMU_NOCACHE_ALCRATIO / 1024 * 256;
254
255  /* P3 XXX The 4x overuse: corroborated by /proc/meminfo. */
256         // if (srmmu_nocache_npages < 256) srmmu_nocache_npages = 256;
257         if (srmmu_nocache_npages < SRMMU_MIN_NOCACHE_PAGES)
258                 srmmu_nocache_npages = SRMMU_MIN_NOCACHE_PAGES;
259
260         /* anything above 1280 blows up */
261         if (srmmu_nocache_npages > SRMMU_MAX_NOCACHE_PAGES)
262                 srmmu_nocache_npages = SRMMU_MAX_NOCACHE_PAGES;
263
264         srmmu_nocache_size = srmmu_nocache_npages * PAGE_SIZE;
265         srmmu_nocache_end = SRMMU_NOCACHE_VADDR + srmmu_nocache_size;
266 }
267
268 static void __init srmmu_nocache_init(void)
269 {
270         unsigned int bitmap_bits;
271         pgd_t *pgd;
272         pmd_t *pmd;
273         pte_t *pte;
274         unsigned long paddr, vaddr;
275         unsigned long pteval;
276
277         bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT;
278
279         srmmu_nocache_pool = __alloc_bootmem(srmmu_nocache_size,
280                 SRMMU_NOCACHE_ALIGN_MAX, 0UL);
281         memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
282
283         srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL);
284         bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
285
286         srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
287         memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE);
288         init_mm.pgd = srmmu_swapper_pg_dir;
289
290         srmmu_early_allocate_ptable_skeleton(SRMMU_NOCACHE_VADDR, srmmu_nocache_end);
291
292         paddr = __pa((unsigned long)srmmu_nocache_pool);
293         vaddr = SRMMU_NOCACHE_VADDR;
294
295         while (vaddr < srmmu_nocache_end) {
296                 pgd = pgd_offset_k(vaddr);
297                 pmd = pmd_offset(__nocache_fix(pgd), vaddr);
298                 pte = pte_offset_kernel(__nocache_fix(pmd), vaddr);
299
300                 pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV);
301
302                 if (srmmu_cache_pagetables)
303                         pteval |= SRMMU_CACHE;
304
305                 set_pte(__nocache_fix(pte), __pte(pteval));
306
307                 vaddr += PAGE_SIZE;
308                 paddr += PAGE_SIZE;
309         }
310
311         flush_cache_all();
312         flush_tlb_all();
313 }
314
315 pgd_t *get_pgd_fast(void)
316 {
317         pgd_t *pgd = NULL;
318
319         pgd = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
320         if (pgd) {
321                 pgd_t *init = pgd_offset_k(0);
322                 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
323                 memcpy(pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
324                                                 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
325         }
326
327         return pgd;
328 }
329
330 /*
331  * Hardware needs alignment to 256 only, but we align to whole page size
332  * to reduce fragmentation problems due to the buddy principle.
333  * XXX Provide actual fragmentation statistics in /proc.
334  *
335  * Alignments up to the page size are the same for physical and virtual
336  * addresses of the nocache area.
337  */
338 pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
339 {
340         unsigned long pte;
341         struct page *page;
342
343         if ((pte = (unsigned long)pte_alloc_one_kernel(mm, address)) == 0)
344                 return NULL;
345         page = pfn_to_page(__nocache_pa(pte) >> PAGE_SHIFT);
346         pgtable_page_ctor(page);
347         return page;
348 }
349
350 void pte_free(struct mm_struct *mm, pgtable_t pte)
351 {
352         unsigned long p;
353
354         pgtable_page_dtor(pte);
355         p = (unsigned long)page_address(pte);   /* Cached address (for test) */
356         if (p == 0)
357                 BUG();
358         p = page_to_pfn(pte) << PAGE_SHIFT;     /* Physical address */
359
360         /* free non cached virtual address*/
361         srmmu_free_nocache(__nocache_va(p), PTE_SIZE);
362 }
363
364 /* context handling - a dynamically sized pool is used */
365 #define NO_CONTEXT      -1
366
367 struct ctx_list {
368         struct ctx_list *next;
369         struct ctx_list *prev;
370         unsigned int ctx_number;
371         struct mm_struct *ctx_mm;
372 };
373
374 static struct ctx_list *ctx_list_pool;
375 static struct ctx_list ctx_free;
376 static struct ctx_list ctx_used;
377
378 /* At boot time we determine the number of contexts */
379 static int num_contexts;
380
381 static inline void remove_from_ctx_list(struct ctx_list *entry)
382 {
383         entry->next->prev = entry->prev;
384         entry->prev->next = entry->next;
385 }
386
387 static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry)
388 {
389         entry->next = head;
390         (entry->prev = head->prev)->next = entry;
391         head->prev = entry;
392 }
393 #define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry)
394 #define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry)
395
396
397 static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm)
398 {
399         struct ctx_list *ctxp;
400
401         ctxp = ctx_free.next;
402         if (ctxp != &ctx_free) {
403                 remove_from_ctx_list(ctxp);
404                 add_to_used_ctxlist(ctxp);
405                 mm->context = ctxp->ctx_number;
406                 ctxp->ctx_mm = mm;
407                 return;
408         }
409         ctxp = ctx_used.next;
410         if (ctxp->ctx_mm == old_mm)
411                 ctxp = ctxp->next;
412         if (ctxp == &ctx_used)
413                 panic("out of mmu contexts");
414         flush_cache_mm(ctxp->ctx_mm);
415         flush_tlb_mm(ctxp->ctx_mm);
416         remove_from_ctx_list(ctxp);
417         add_to_used_ctxlist(ctxp);
418         ctxp->ctx_mm->context = NO_CONTEXT;
419         ctxp->ctx_mm = mm;
420         mm->context = ctxp->ctx_number;
421 }
422
423 static inline void free_context(int context)
424 {
425         struct ctx_list *ctx_old;
426
427         ctx_old = ctx_list_pool + context;
428         remove_from_ctx_list(ctx_old);
429         add_to_free_ctxlist(ctx_old);
430 }
431
432 static void __init sparc_context_init(int numctx)
433 {
434         int ctx;
435         unsigned long size;
436
437         size = numctx * sizeof(struct ctx_list);
438         ctx_list_pool = __alloc_bootmem(size, SMP_CACHE_BYTES, 0UL);
439
440         for (ctx = 0; ctx < numctx; ctx++) {
441                 struct ctx_list *clist;
442
443                 clist = (ctx_list_pool + ctx);
444                 clist->ctx_number = ctx;
445                 clist->ctx_mm = NULL;
446         }
447         ctx_free.next = ctx_free.prev = &ctx_free;
448         ctx_used.next = ctx_used.prev = &ctx_used;
449         for (ctx = 0; ctx < numctx; ctx++)
450                 add_to_free_ctxlist(ctx_list_pool + ctx);
451 }
452
453 void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
454                struct task_struct *tsk)
455 {
456         if (mm->context == NO_CONTEXT) {
457                 spin_lock(&srmmu_context_spinlock);
458                 alloc_context(old_mm, mm);
459                 spin_unlock(&srmmu_context_spinlock);
460                 srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
461         }
462
463         if (sparc_cpu_model == sparc_leon)
464                 leon_switch_mm();
465
466         if (is_hypersparc)
467                 hyper_flush_whole_icache();
468
469         srmmu_set_context(mm->context);
470 }
471
472 /* Low level IO area allocation on the SRMMU. */
473 static inline void srmmu_mapioaddr(unsigned long physaddr,
474                                    unsigned long virt_addr, int bus_type)
475 {
476         pgd_t *pgdp;
477         pmd_t *pmdp;
478         pte_t *ptep;
479         unsigned long tmp;
480
481         physaddr &= PAGE_MASK;
482         pgdp = pgd_offset_k(virt_addr);
483         pmdp = pmd_offset(pgdp, virt_addr);
484         ptep = pte_offset_kernel(pmdp, virt_addr);
485         tmp = (physaddr >> 4) | SRMMU_ET_PTE;
486
487         /* I need to test whether this is consistent over all
488          * sun4m's.  The bus_type represents the upper 4 bits of
489          * 36-bit physical address on the I/O space lines...
490          */
491         tmp |= (bus_type << 28);
492         tmp |= SRMMU_PRIV;
493         __flush_page_to_ram(virt_addr);
494         set_pte(ptep, __pte(tmp));
495 }
496
497 void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
498                       unsigned long xva, unsigned int len)
499 {
500         while (len != 0) {
501                 len -= PAGE_SIZE;
502                 srmmu_mapioaddr(xpa, xva, bus);
503                 xva += PAGE_SIZE;
504                 xpa += PAGE_SIZE;
505         }
506         flush_tlb_all();
507 }
508
509 static inline void srmmu_unmapioaddr(unsigned long virt_addr)
510 {
511         pgd_t *pgdp;
512         pmd_t *pmdp;
513         pte_t *ptep;
514
515         pgdp = pgd_offset_k(virt_addr);
516         pmdp = pmd_offset(pgdp, virt_addr);
517         ptep = pte_offset_kernel(pmdp, virt_addr);
518
519         /* No need to flush uncacheable page. */
520         __pte_clear(ptep);
521 }
522
523 void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len)
524 {
525         while (len != 0) {
526                 len -= PAGE_SIZE;
527                 srmmu_unmapioaddr(virt_addr);
528                 virt_addr += PAGE_SIZE;
529         }
530         flush_tlb_all();
531 }
532
533 /* tsunami.S */
534 extern void tsunami_flush_cache_all(void);
535 extern void tsunami_flush_cache_mm(struct mm_struct *mm);
536 extern void tsunami_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
537 extern void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
538 extern void tsunami_flush_page_to_ram(unsigned long page);
539 extern void tsunami_flush_page_for_dma(unsigned long page);
540 extern void tsunami_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
541 extern void tsunami_flush_tlb_all(void);
542 extern void tsunami_flush_tlb_mm(struct mm_struct *mm);
543 extern void tsunami_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
544 extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
545 extern void tsunami_setup_blockops(void);
546
547 /* swift.S */
548 extern void swift_flush_cache_all(void);
549 extern void swift_flush_cache_mm(struct mm_struct *mm);
550 extern void swift_flush_cache_range(struct vm_area_struct *vma,
551                                     unsigned long start, unsigned long end);
552 extern void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
553 extern void swift_flush_page_to_ram(unsigned long page);
554 extern void swift_flush_page_for_dma(unsigned long page);
555 extern void swift_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
556 extern void swift_flush_tlb_all(void);
557 extern void swift_flush_tlb_mm(struct mm_struct *mm);
558 extern void swift_flush_tlb_range(struct vm_area_struct *vma,
559                                   unsigned long start, unsigned long end);
560 extern void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
561
562 #if 0  /* P3: deadwood to debug precise flushes on Swift. */
563 void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
564 {
565         int cctx, ctx1;
566
567         page &= PAGE_MASK;
568         if ((ctx1 = vma->vm_mm->context) != -1) {
569                 cctx = srmmu_get_context();
570 /* Is context # ever different from current context? P3 */
571                 if (cctx != ctx1) {
572                         printk("flush ctx %02x curr %02x\n", ctx1, cctx);
573                         srmmu_set_context(ctx1);
574                         swift_flush_page(page);
575                         __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
576                                         "r" (page), "i" (ASI_M_FLUSH_PROBE));
577                         srmmu_set_context(cctx);
578                 } else {
579                          /* Rm. prot. bits from virt. c. */
580                         /* swift_flush_cache_all(); */
581                         /* swift_flush_cache_page(vma, page); */
582                         swift_flush_page(page);
583
584                         __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
585                                 "r" (page), "i" (ASI_M_FLUSH_PROBE));
586                         /* same as above: srmmu_flush_tlb_page() */
587                 }
588         }
589 }
590 #endif
591
592 /*
593  * The following are all MBUS based SRMMU modules, and therefore could
594  * be found in a multiprocessor configuration.  On the whole, these
595  * chips seems to be much more touchy about DVMA and page tables
596  * with respect to cache coherency.
597  */
598
599 /* viking.S */
600 extern void viking_flush_cache_all(void);
601 extern void viking_flush_cache_mm(struct mm_struct *mm);
602 extern void viking_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
603                                      unsigned long end);
604 extern void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
605 extern void viking_flush_page_to_ram(unsigned long page);
606 extern void viking_flush_page_for_dma(unsigned long page);
607 extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr);
608 extern void viking_flush_page(unsigned long page);
609 extern void viking_mxcc_flush_page(unsigned long page);
610 extern void viking_flush_tlb_all(void);
611 extern void viking_flush_tlb_mm(struct mm_struct *mm);
612 extern void viking_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
613                                    unsigned long end);
614 extern void viking_flush_tlb_page(struct vm_area_struct *vma,
615                                   unsigned long page);
616 extern void sun4dsmp_flush_tlb_all(void);
617 extern void sun4dsmp_flush_tlb_mm(struct mm_struct *mm);
618 extern void sun4dsmp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
619                                    unsigned long end);
620 extern void sun4dsmp_flush_tlb_page(struct vm_area_struct *vma,
621                                   unsigned long page);
622
623 /* hypersparc.S */
624 extern void hypersparc_flush_cache_all(void);
625 extern void hypersparc_flush_cache_mm(struct mm_struct *mm);
626 extern void hypersparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
627 extern void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
628 extern void hypersparc_flush_page_to_ram(unsigned long page);
629 extern void hypersparc_flush_page_for_dma(unsigned long page);
630 extern void hypersparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
631 extern void hypersparc_flush_tlb_all(void);
632 extern void hypersparc_flush_tlb_mm(struct mm_struct *mm);
633 extern void hypersparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
634 extern void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
635 extern void hypersparc_setup_blockops(void);
636
637 /*
638  * NOTE: All of this startup code assumes the low 16mb (approx.) of
639  *       kernel mappings are done with one single contiguous chunk of
640  *       ram.  On small ram machines (classics mainly) we only get
641  *       around 8mb mapped for us.
642  */
643
644 static void __init early_pgtable_allocfail(char *type)
645 {
646         prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type);
647         prom_halt();
648 }
649
650 static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
651                                                         unsigned long end)
652 {
653         pgd_t *pgdp;
654         pmd_t *pmdp;
655         pte_t *ptep;
656
657         while (start < end) {
658                 pgdp = pgd_offset_k(start);
659                 if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
660                         pmdp = __srmmu_get_nocache(
661                             SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
662                         if (pmdp == NULL)
663                                 early_pgtable_allocfail("pmd");
664                         memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
665                         pgd_set(__nocache_fix(pgdp), pmdp);
666                 }
667                 pmdp = pmd_offset(__nocache_fix(pgdp), start);
668                 if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
669                         ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
670                         if (ptep == NULL)
671                                 early_pgtable_allocfail("pte");
672                         memset(__nocache_fix(ptep), 0, PTE_SIZE);
673                         pmd_set(__nocache_fix(pmdp), ptep);
674                 }
675                 if (start > (0xffffffffUL - PMD_SIZE))
676                         break;
677                 start = (start + PMD_SIZE) & PMD_MASK;
678         }
679 }
680
681 static void __init srmmu_allocate_ptable_skeleton(unsigned long start,
682                                                   unsigned long end)
683 {
684         pgd_t *pgdp;
685         pmd_t *pmdp;
686         pte_t *ptep;
687
688         while (start < end) {
689                 pgdp = pgd_offset_k(start);
690                 if (pgd_none(*pgdp)) {
691                         pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
692                         if (pmdp == NULL)
693                                 early_pgtable_allocfail("pmd");
694                         memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE);
695                         pgd_set(pgdp, pmdp);
696                 }
697                 pmdp = pmd_offset(pgdp, start);
698                 if (srmmu_pmd_none(*pmdp)) {
699                         ptep = __srmmu_get_nocache(PTE_SIZE,
700                                                              PTE_SIZE);
701                         if (ptep == NULL)
702                                 early_pgtable_allocfail("pte");
703                         memset(ptep, 0, PTE_SIZE);
704                         pmd_set(pmdp, ptep);
705                 }
706                 if (start > (0xffffffffUL - PMD_SIZE))
707                         break;
708                 start = (start + PMD_SIZE) & PMD_MASK;
709         }
710 }
711
712 /* These flush types are not available on all chips... */
713 static inline unsigned long srmmu_probe(unsigned long vaddr)
714 {
715         unsigned long retval;
716
717         if (sparc_cpu_model != sparc_leon) {
718
719                 vaddr &= PAGE_MASK;
720                 __asm__ __volatile__("lda [%1] %2, %0\n\t" :
721                                      "=r" (retval) :
722                                      "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE));
723         } else {
724                 retval = leon_swprobe(vaddr, 0);
725         }
726         return retval;
727 }
728
729 /*
730  * This is much cleaner than poking around physical address space
731  * looking at the prom's page table directly which is what most
732  * other OS's do.  Yuck... this is much better.
733  */
734 static void __init srmmu_inherit_prom_mappings(unsigned long start,
735                                                unsigned long end)
736 {
737         unsigned long probed;
738         unsigned long addr;
739         pgd_t *pgdp;
740         pmd_t *pmdp;
741         pte_t *ptep;
742         int what; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */
743
744         while (start <= end) {
745                 if (start == 0)
746                         break; /* probably wrap around */
747                 if (start == 0xfef00000)
748                         start = KADB_DEBUGGER_BEGVM;
749                 probed = srmmu_probe(start);
750                 if (!probed) {
751                         /* continue probing until we find an entry */
752                         start += PAGE_SIZE;
753                         continue;
754                 }
755
756                 /* A red snapper, see what it really is. */
757                 what = 0;
758                 addr = start - PAGE_SIZE;
759
760                 if (!(start & ~(SRMMU_REAL_PMD_MASK))) {
761                         if (srmmu_probe(addr + SRMMU_REAL_PMD_SIZE) == probed)
762                                 what = 1;
763                 }
764
765                 if (!(start & ~(SRMMU_PGDIR_MASK))) {
766                         if (srmmu_probe(addr + SRMMU_PGDIR_SIZE) == probed)
767                                 what = 2;
768                 }
769
770                 pgdp = pgd_offset_k(start);
771                 if (what == 2) {
772                         *(pgd_t *)__nocache_fix(pgdp) = __pgd(probed);
773                         start += SRMMU_PGDIR_SIZE;
774                         continue;
775                 }
776                 if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
777                         pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE,
778                                                    SRMMU_PMD_TABLE_SIZE);
779                         if (pmdp == NULL)
780                                 early_pgtable_allocfail("pmd");
781                         memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
782                         pgd_set(__nocache_fix(pgdp), pmdp);
783                 }
784                 pmdp = pmd_offset(__nocache_fix(pgdp), start);
785                 if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
786                         ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
787                         if (ptep == NULL)
788                                 early_pgtable_allocfail("pte");
789                         memset(__nocache_fix(ptep), 0, PTE_SIZE);
790                         pmd_set(__nocache_fix(pmdp), ptep);
791                 }
792                 if (what == 1) {
793                         /* We bend the rule where all 16 PTPs in a pmd_t point
794                          * inside the same PTE page, and we leak a perfectly
795                          * good hardware PTE piece. Alternatives seem worse.
796                          */
797                         unsigned int x; /* Index of HW PMD in soft cluster */
798                         unsigned long *val;
799                         x = (start >> PMD_SHIFT) & 15;
800                         val = &pmdp->pmdv[x];
801                         *(unsigned long *)__nocache_fix(val) = probed;
802                         start += SRMMU_REAL_PMD_SIZE;
803                         continue;
804                 }
805                 ptep = pte_offset_kernel(__nocache_fix(pmdp), start);
806                 *(pte_t *)__nocache_fix(ptep) = __pte(probed);
807                 start += PAGE_SIZE;
808         }
809 }
810
811 #define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID)
812
813 /* Create a third-level SRMMU 16MB page mapping. */
814 static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base)
815 {
816         pgd_t *pgdp = pgd_offset_k(vaddr);
817         unsigned long big_pte;
818
819         big_pte = KERNEL_PTE(phys_base >> 4);
820         *(pgd_t *)__nocache_fix(pgdp) = __pgd(big_pte);
821 }
822
823 /* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */
824 static unsigned long __init map_spbank(unsigned long vbase, int sp_entry)
825 {
826         unsigned long pstart = (sp_banks[sp_entry].base_addr & SRMMU_PGDIR_MASK);
827         unsigned long vstart = (vbase & SRMMU_PGDIR_MASK);
828         unsigned long vend = SRMMU_PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes);
829         /* Map "low" memory only */
830         const unsigned long min_vaddr = PAGE_OFFSET;
831         const unsigned long max_vaddr = PAGE_OFFSET + SRMMU_MAXMEM;
832
833         if (vstart < min_vaddr || vstart >= max_vaddr)
834                 return vstart;
835
836         if (vend > max_vaddr || vend < min_vaddr)
837                 vend = max_vaddr;
838
839         while (vstart < vend) {
840                 do_large_mapping(vstart, pstart);
841                 vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE;
842         }
843         return vstart;
844 }
845
846 static void __init map_kernel(void)
847 {
848         int i;
849
850         if (phys_base > 0) {
851                 do_large_mapping(PAGE_OFFSET, phys_base);
852         }
853
854         for (i = 0; sp_banks[i].num_bytes != 0; i++) {
855                 map_spbank((unsigned long)__va(sp_banks[i].base_addr), i);
856         }
857 }
858
859 void (*poke_srmmu)(void) __cpuinitdata = NULL;
860
861 extern unsigned long bootmem_init(unsigned long *pages_avail);
862
863 void __init srmmu_paging_init(void)
864 {
865         int i;
866         phandle cpunode;
867         char node_str[128];
868         pgd_t *pgd;
869         pmd_t *pmd;
870         pte_t *pte;
871         unsigned long pages_avail;
872
873         init_mm.context = (unsigned long) NO_CONTEXT;
874         sparc_iomap.start = SUN4M_IOBASE_VADDR; /* 16MB of IOSPACE on all sun4m's. */
875
876         if (sparc_cpu_model == sun4d)
877                 num_contexts = 65536; /* We know it is Viking */
878         else {
879                 /* Find the number of contexts on the srmmu. */
880                 cpunode = prom_getchild(prom_root_node);
881                 num_contexts = 0;
882                 while (cpunode != 0) {
883                         prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
884                         if (!strcmp(node_str, "cpu")) {
885                                 num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8);
886                                 break;
887                         }
888                         cpunode = prom_getsibling(cpunode);
889                 }
890         }
891
892         if (!num_contexts) {
893                 prom_printf("Something wrong, can't find cpu node in paging_init.\n");
894                 prom_halt();
895         }
896
897         pages_avail = 0;
898         last_valid_pfn = bootmem_init(&pages_avail);
899
900         srmmu_nocache_calcsize();
901         srmmu_nocache_init();
902         srmmu_inherit_prom_mappings(0xfe400000, (LINUX_OPPROM_ENDVM - PAGE_SIZE));
903         map_kernel();
904
905         /* ctx table has to be physically aligned to its size */
906         srmmu_context_table = __srmmu_get_nocache(num_contexts * sizeof(ctxd_t), num_contexts * sizeof(ctxd_t));
907         srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa((unsigned long)srmmu_context_table);
908
909         for (i = 0; i < num_contexts; i++)
910                 srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir);
911
912         flush_cache_all();
913         srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys);
914 #ifdef CONFIG_SMP
915         /* Stop from hanging here... */
916         local_ops->tlb_all();
917 #else
918         flush_tlb_all();
919 #endif
920         poke_srmmu();
921
922         srmmu_allocate_ptable_skeleton(sparc_iomap.start, IOBASE_END);
923         srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END);
924
925         srmmu_allocate_ptable_skeleton(
926                 __fix_to_virt(__end_of_fixed_addresses - 1), FIXADDR_TOP);
927         srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END);
928
929         pgd = pgd_offset_k(PKMAP_BASE);
930         pmd = pmd_offset(pgd, PKMAP_BASE);
931         pte = pte_offset_kernel(pmd, PKMAP_BASE);
932         pkmap_page_table = pte;
933
934         flush_cache_all();
935         flush_tlb_all();
936
937         sparc_context_init(num_contexts);
938
939         kmap_init();
940
941         {
942                 unsigned long zones_size[MAX_NR_ZONES];
943                 unsigned long zholes_size[MAX_NR_ZONES];
944                 unsigned long npages;
945                 int znum;
946
947                 for (znum = 0; znum < MAX_NR_ZONES; znum++)
948                         zones_size[znum] = zholes_size[znum] = 0;
949
950                 npages = max_low_pfn - pfn_base;
951
952                 zones_size[ZONE_DMA] = npages;
953                 zholes_size[ZONE_DMA] = npages - pages_avail;
954
955                 npages = highend_pfn - max_low_pfn;
956                 zones_size[ZONE_HIGHMEM] = npages;
957                 zholes_size[ZONE_HIGHMEM] = npages - calc_highpages();
958
959                 free_area_init_node(0, zones_size, pfn_base, zholes_size);
960         }
961 }
962
963 void mmu_info(struct seq_file *m)
964 {
965         seq_printf(m,
966                    "MMU type\t: %s\n"
967                    "contexts\t: %d\n"
968                    "nocache total\t: %ld\n"
969                    "nocache used\t: %d\n",
970                    srmmu_name,
971                    num_contexts,
972                    srmmu_nocache_size,
973                    srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
974 }
975
976 int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
977 {
978         mm->context = NO_CONTEXT;
979         return 0;
980 }
981
982 void destroy_context(struct mm_struct *mm)
983 {
984
985         if (mm->context != NO_CONTEXT) {
986                 flush_cache_mm(mm);
987                 srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir);
988                 flush_tlb_mm(mm);
989                 spin_lock(&srmmu_context_spinlock);
990                 free_context(mm->context);
991                 spin_unlock(&srmmu_context_spinlock);
992                 mm->context = NO_CONTEXT;
993         }
994 }
995
996 /* Init various srmmu chip types. */
997 static void __init srmmu_is_bad(void)
998 {
999         prom_printf("Could not determine SRMMU chip type.\n");
1000         prom_halt();
1001 }
1002
1003 static void __init init_vac_layout(void)
1004 {
1005         phandle nd;
1006         int cache_lines;
1007         char node_str[128];
1008 #ifdef CONFIG_SMP
1009         int cpu = 0;
1010         unsigned long max_size = 0;
1011         unsigned long min_line_size = 0x10000000;
1012 #endif
1013
1014         nd = prom_getchild(prom_root_node);
1015         while ((nd = prom_getsibling(nd)) != 0) {
1016                 prom_getstring(nd, "device_type", node_str, sizeof(node_str));
1017                 if (!strcmp(node_str, "cpu")) {
1018                         vac_line_size = prom_getint(nd, "cache-line-size");
1019                         if (vac_line_size == -1) {
1020                                 prom_printf("can't determine cache-line-size, halting.\n");
1021                                 prom_halt();
1022                         }
1023                         cache_lines = prom_getint(nd, "cache-nlines");
1024                         if (cache_lines == -1) {
1025                                 prom_printf("can't determine cache-nlines, halting.\n");
1026                                 prom_halt();
1027                         }
1028
1029                         vac_cache_size = cache_lines * vac_line_size;
1030 #ifdef CONFIG_SMP
1031                         if (vac_cache_size > max_size)
1032                                 max_size = vac_cache_size;
1033                         if (vac_line_size < min_line_size)
1034                                 min_line_size = vac_line_size;
1035                         //FIXME: cpus not contiguous!!
1036                         cpu++;
1037                         if (cpu >= nr_cpu_ids || !cpu_online(cpu))
1038                                 break;
1039 #else
1040                         break;
1041 #endif
1042                 }
1043         }
1044         if (nd == 0) {
1045                 prom_printf("No CPU nodes found, halting.\n");
1046                 prom_halt();
1047         }
1048 #ifdef CONFIG_SMP
1049         vac_cache_size = max_size;
1050         vac_line_size = min_line_size;
1051 #endif
1052         printk("SRMMU: Using VAC size of %d bytes, line size %d bytes.\n",
1053                (int)vac_cache_size, (int)vac_line_size);
1054 }
1055
1056 static void __cpuinit poke_hypersparc(void)
1057 {
1058         volatile unsigned long clear;
1059         unsigned long mreg = srmmu_get_mmureg();
1060
1061         hyper_flush_unconditional_combined();
1062
1063         mreg &= ~(HYPERSPARC_CWENABLE);
1064         mreg |= (HYPERSPARC_CENABLE | HYPERSPARC_WBENABLE);
1065         mreg |= (HYPERSPARC_CMODE);
1066
1067         srmmu_set_mmureg(mreg);
1068
1069 #if 0 /* XXX I think this is bad news... -DaveM */
1070         hyper_clear_all_tags();
1071 #endif
1072
1073         put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE);
1074         hyper_flush_whole_icache();
1075         clear = srmmu_get_faddr();
1076         clear = srmmu_get_fstatus();
1077 }
1078
1079 static const struct sparc32_cachetlb_ops hypersparc_ops = {
1080         .cache_all      = hypersparc_flush_cache_all,
1081         .cache_mm       = hypersparc_flush_cache_mm,
1082         .cache_page     = hypersparc_flush_cache_page,
1083         .cache_range    = hypersparc_flush_cache_range,
1084         .tlb_all        = hypersparc_flush_tlb_all,
1085         .tlb_mm         = hypersparc_flush_tlb_mm,
1086         .tlb_page       = hypersparc_flush_tlb_page,
1087         .tlb_range      = hypersparc_flush_tlb_range,
1088         .page_to_ram    = hypersparc_flush_page_to_ram,
1089         .sig_insns      = hypersparc_flush_sig_insns,
1090         .page_for_dma   = hypersparc_flush_page_for_dma,
1091 };
1092
1093 static void __init init_hypersparc(void)
1094 {
1095         srmmu_name = "ROSS HyperSparc";
1096         srmmu_modtype = HyperSparc;
1097
1098         init_vac_layout();
1099
1100         is_hypersparc = 1;
1101         sparc32_cachetlb_ops = &hypersparc_ops;
1102
1103         poke_srmmu = poke_hypersparc;
1104
1105         hypersparc_setup_blockops();
1106 }
1107
1108 static void __cpuinit poke_swift(void)
1109 {
1110         unsigned long mreg;
1111
1112         /* Clear any crap from the cache or else... */
1113         swift_flush_cache_all();
1114
1115         /* Enable I & D caches */
1116         mreg = srmmu_get_mmureg();
1117         mreg |= (SWIFT_IE | SWIFT_DE);
1118         /*
1119          * The Swift branch folding logic is completely broken.  At
1120          * trap time, if things are just right, if can mistakenly
1121          * think that a trap is coming from kernel mode when in fact
1122          * it is coming from user mode (it mis-executes the branch in
1123          * the trap code).  So you see things like crashme completely
1124          * hosing your machine which is completely unacceptable.  Turn
1125          * this shit off... nice job Fujitsu.
1126          */
1127         mreg &= ~(SWIFT_BF);
1128         srmmu_set_mmureg(mreg);
1129 }
1130
1131 static const struct sparc32_cachetlb_ops swift_ops = {
1132         .cache_all      = swift_flush_cache_all,
1133         .cache_mm       = swift_flush_cache_mm,
1134         .cache_page     = swift_flush_cache_page,
1135         .cache_range    = swift_flush_cache_range,
1136         .tlb_all        = swift_flush_tlb_all,
1137         .tlb_mm         = swift_flush_tlb_mm,
1138         .tlb_page       = swift_flush_tlb_page,
1139         .tlb_range      = swift_flush_tlb_range,
1140         .page_to_ram    = swift_flush_page_to_ram,
1141         .sig_insns      = swift_flush_sig_insns,
1142         .page_for_dma   = swift_flush_page_for_dma,
1143 };
1144
1145 #define SWIFT_MASKID_ADDR  0x10003018
1146 static void __init init_swift(void)
1147 {
1148         unsigned long swift_rev;
1149
1150         __asm__ __volatile__("lda [%1] %2, %0\n\t"
1151                              "srl %0, 0x18, %0\n\t" :
1152                              "=r" (swift_rev) :
1153                              "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS));
1154         srmmu_name = "Fujitsu Swift";
1155         switch (swift_rev) {
1156         case 0x11:
1157         case 0x20:
1158         case 0x23:
1159         case 0x30:
1160                 srmmu_modtype = Swift_lots_o_bugs;
1161                 hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN);
1162                 /*
1163                  * Gee george, I wonder why Sun is so hush hush about
1164                  * this hardware bug... really braindamage stuff going
1165                  * on here.  However I think we can find a way to avoid
1166                  * all of the workaround overhead under Linux.  Basically,
1167                  * any page fault can cause kernel pages to become user
1168                  * accessible (the mmu gets confused and clears some of
1169                  * the ACC bits in kernel ptes).  Aha, sounds pretty
1170                  * horrible eh?  But wait, after extensive testing it appears
1171                  * that if you use pgd_t level large kernel pte's (like the
1172                  * 4MB pages on the Pentium) the bug does not get tripped
1173                  * at all.  This avoids almost all of the major overhead.
1174                  * Welcome to a world where your vendor tells you to,
1175                  * "apply this kernel patch" instead of "sorry for the
1176                  * broken hardware, send it back and we'll give you
1177                  * properly functioning parts"
1178                  */
1179                 break;
1180         case 0x25:
1181         case 0x31:
1182                 srmmu_modtype = Swift_bad_c;
1183                 hwbug_bitmask |= HWBUG_KERN_CBITBROKEN;
1184                 /*
1185                  * You see Sun allude to this hardware bug but never
1186                  * admit things directly, they'll say things like,
1187                  * "the Swift chip cache problems" or similar.
1188                  */
1189                 break;
1190         default:
1191                 srmmu_modtype = Swift_ok;
1192                 break;
1193         }
1194
1195         sparc32_cachetlb_ops = &swift_ops;
1196         flush_page_for_dma_global = 0;
1197
1198         /*
1199          * Are you now convinced that the Swift is one of the
1200          * biggest VLSI abortions of all time?  Bravo Fujitsu!
1201          * Fujitsu, the !#?!%$'d up processor people.  I bet if
1202          * you examined the microcode of the Swift you'd find
1203          * XXX's all over the place.
1204          */
1205         poke_srmmu = poke_swift;
1206 }
1207
1208 static void turbosparc_flush_cache_all(void)
1209 {
1210         flush_user_windows();
1211         turbosparc_idflash_clear();
1212 }
1213
1214 static void turbosparc_flush_cache_mm(struct mm_struct *mm)
1215 {
1216         FLUSH_BEGIN(mm)
1217         flush_user_windows();
1218         turbosparc_idflash_clear();
1219         FLUSH_END
1220 }
1221
1222 static void turbosparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1223 {
1224         FLUSH_BEGIN(vma->vm_mm)
1225         flush_user_windows();
1226         turbosparc_idflash_clear();
1227         FLUSH_END
1228 }
1229
1230 static void turbosparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1231 {
1232         FLUSH_BEGIN(vma->vm_mm)
1233         flush_user_windows();
1234         if (vma->vm_flags & VM_EXEC)
1235                 turbosparc_flush_icache();
1236         turbosparc_flush_dcache();
1237         FLUSH_END
1238 }
1239
1240 /* TurboSparc is copy-back, if we turn it on, but this does not work. */
1241 static void turbosparc_flush_page_to_ram(unsigned long page)
1242 {
1243 #ifdef TURBOSPARC_WRITEBACK
1244         volatile unsigned long clear;
1245
1246         if (srmmu_probe(page))
1247                 turbosparc_flush_page_cache(page);
1248         clear = srmmu_get_fstatus();
1249 #endif
1250 }
1251
1252 static void turbosparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
1253 {
1254 }
1255
1256 static void turbosparc_flush_page_for_dma(unsigned long page)
1257 {
1258         turbosparc_flush_dcache();
1259 }
1260
1261 static void turbosparc_flush_tlb_all(void)
1262 {
1263         srmmu_flush_whole_tlb();
1264 }
1265
1266 static void turbosparc_flush_tlb_mm(struct mm_struct *mm)
1267 {
1268         FLUSH_BEGIN(mm)
1269         srmmu_flush_whole_tlb();
1270         FLUSH_END
1271 }
1272
1273 static void turbosparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1274 {
1275         FLUSH_BEGIN(vma->vm_mm)
1276         srmmu_flush_whole_tlb();
1277         FLUSH_END
1278 }
1279
1280 static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1281 {
1282         FLUSH_BEGIN(vma->vm_mm)
1283         srmmu_flush_whole_tlb();
1284         FLUSH_END
1285 }
1286
1287
1288 static void __cpuinit poke_turbosparc(void)
1289 {
1290         unsigned long mreg = srmmu_get_mmureg();
1291         unsigned long ccreg;
1292
1293         /* Clear any crap from the cache or else... */
1294         turbosparc_flush_cache_all();
1295         /* Temporarily disable I & D caches */
1296         mreg &= ~(TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE);
1297         mreg &= ~(TURBOSPARC_PCENABLE);         /* Don't check parity */
1298         srmmu_set_mmureg(mreg);
1299
1300         ccreg = turbosparc_get_ccreg();
1301
1302 #ifdef TURBOSPARC_WRITEBACK
1303         ccreg |= (TURBOSPARC_SNENABLE);         /* Do DVMA snooping in Dcache */
1304         ccreg &= ~(TURBOSPARC_uS2 | TURBOSPARC_WTENABLE);
1305                         /* Write-back D-cache, emulate VLSI
1306                          * abortion number three, not number one */
1307 #else
1308         /* For now let's play safe, optimize later */
1309         ccreg |= (TURBOSPARC_SNENABLE | TURBOSPARC_WTENABLE);
1310                         /* Do DVMA snooping in Dcache, Write-thru D-cache */
1311         ccreg &= ~(TURBOSPARC_uS2);
1312                         /* Emulate VLSI abortion number three, not number one */
1313 #endif
1314
1315         switch (ccreg & 7) {
1316         case 0: /* No SE cache */
1317         case 7: /* Test mode */
1318                 break;
1319         default:
1320                 ccreg |= (TURBOSPARC_SCENABLE);
1321         }
1322         turbosparc_set_ccreg(ccreg);
1323
1324         mreg |= (TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* I & D caches on */
1325         mreg |= (TURBOSPARC_ICSNOOP);           /* Icache snooping on */
1326         srmmu_set_mmureg(mreg);
1327 }
1328
1329 static const struct sparc32_cachetlb_ops turbosparc_ops = {
1330         .cache_all      = turbosparc_flush_cache_all,
1331         .cache_mm       = turbosparc_flush_cache_mm,
1332         .cache_page     = turbosparc_flush_cache_page,
1333         .cache_range    = turbosparc_flush_cache_range,
1334         .tlb_all        = turbosparc_flush_tlb_all,
1335         .tlb_mm         = turbosparc_flush_tlb_mm,
1336         .tlb_page       = turbosparc_flush_tlb_page,
1337         .tlb_range      = turbosparc_flush_tlb_range,
1338         .page_to_ram    = turbosparc_flush_page_to_ram,
1339         .sig_insns      = turbosparc_flush_sig_insns,
1340         .page_for_dma   = turbosparc_flush_page_for_dma,
1341 };
1342
1343 static void __init init_turbosparc(void)
1344 {
1345         srmmu_name = "Fujitsu TurboSparc";
1346         srmmu_modtype = TurboSparc;
1347         sparc32_cachetlb_ops = &turbosparc_ops;
1348         poke_srmmu = poke_turbosparc;
1349 }
1350
1351 static void __cpuinit poke_tsunami(void)
1352 {
1353         unsigned long mreg = srmmu_get_mmureg();
1354
1355         tsunami_flush_icache();
1356         tsunami_flush_dcache();
1357         mreg &= ~TSUNAMI_ITD;
1358         mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB);
1359         srmmu_set_mmureg(mreg);
1360 }
1361
1362 static const struct sparc32_cachetlb_ops tsunami_ops = {
1363         .cache_all      = tsunami_flush_cache_all,
1364         .cache_mm       = tsunami_flush_cache_mm,
1365         .cache_page     = tsunami_flush_cache_page,
1366         .cache_range    = tsunami_flush_cache_range,
1367         .tlb_all        = tsunami_flush_tlb_all,
1368         .tlb_mm         = tsunami_flush_tlb_mm,
1369         .tlb_page       = tsunami_flush_tlb_page,
1370         .tlb_range      = tsunami_flush_tlb_range,
1371         .page_to_ram    = tsunami_flush_page_to_ram,
1372         .sig_insns      = tsunami_flush_sig_insns,
1373         .page_for_dma   = tsunami_flush_page_for_dma,
1374 };
1375
1376 static void __init init_tsunami(void)
1377 {
1378         /*
1379          * Tsunami's pretty sane, Sun and TI actually got it
1380          * somewhat right this time.  Fujitsu should have
1381          * taken some lessons from them.
1382          */
1383
1384         srmmu_name = "TI Tsunami";
1385         srmmu_modtype = Tsunami;
1386         sparc32_cachetlb_ops = &tsunami_ops;
1387         poke_srmmu = poke_tsunami;
1388
1389         tsunami_setup_blockops();
1390 }
1391
1392 static void __cpuinit poke_viking(void)
1393 {
1394         unsigned long mreg = srmmu_get_mmureg();
1395         static int smp_catch;
1396
1397         if (viking_mxcc_present) {
1398                 unsigned long mxcc_control = mxcc_get_creg();
1399
1400                 mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE);
1401                 mxcc_control &= ~(MXCC_CTL_RRC);
1402                 mxcc_set_creg(mxcc_control);
1403
1404                 /*
1405                  * We don't need memory parity checks.
1406                  * XXX This is a mess, have to dig out later. ecd.
1407                 viking_mxcc_turn_off_parity(&mreg, &mxcc_control);
1408                  */
1409
1410                 /* We do cache ptables on MXCC. */
1411                 mreg |= VIKING_TCENABLE;
1412         } else {
1413                 unsigned long bpreg;
1414
1415                 mreg &= ~(VIKING_TCENABLE);
1416                 if (smp_catch++) {
1417                         /* Must disable mixed-cmd mode here for other cpu's. */
1418                         bpreg = viking_get_bpreg();
1419                         bpreg &= ~(VIKING_ACTION_MIX);
1420                         viking_set_bpreg(bpreg);
1421
1422                         /* Just in case PROM does something funny. */
1423                         msi_set_sync();
1424                 }
1425         }
1426
1427         mreg |= VIKING_SPENABLE;
1428         mreg |= (VIKING_ICENABLE | VIKING_DCENABLE);
1429         mreg |= VIKING_SBENABLE;
1430         mreg &= ~(VIKING_ACENABLE);
1431         srmmu_set_mmureg(mreg);
1432 }
1433
1434 static struct sparc32_cachetlb_ops viking_ops = {
1435         .cache_all      = viking_flush_cache_all,
1436         .cache_mm       = viking_flush_cache_mm,
1437         .cache_page     = viking_flush_cache_page,
1438         .cache_range    = viking_flush_cache_range,
1439         .tlb_all        = viking_flush_tlb_all,
1440         .tlb_mm         = viking_flush_tlb_mm,
1441         .tlb_page       = viking_flush_tlb_page,
1442         .tlb_range      = viking_flush_tlb_range,
1443         .page_to_ram    = viking_flush_page_to_ram,
1444         .sig_insns      = viking_flush_sig_insns,
1445         .page_for_dma   = viking_flush_page_for_dma,
1446 };
1447
1448 #ifdef CONFIG_SMP
1449 /* On sun4d the cpu broadcasts local TLB flushes, so we can just
1450  * perform the local TLB flush and all the other cpus will see it.
1451  * But, unfortunately, there is a bug in the sun4d XBUS backplane
1452  * that requires that we add some synchronization to these flushes.
1453  *
1454  * The bug is that the fifo which keeps track of all the pending TLB
1455  * broadcasts in the system is an entry or two too small, so if we
1456  * have too many going at once we'll overflow that fifo and lose a TLB
1457  * flush resulting in corruption.
1458  *
1459  * Our workaround is to take a global spinlock around the TLB flushes,
1460  * which guarentees we won't ever have too many pending.  It's a big
1461  * hammer, but a semaphore like system to make sure we only have N TLB
1462  * flushes going at once will require SMP locking anyways so there's
1463  * no real value in trying any harder than this.
1464  */
1465 static struct sparc32_cachetlb_ops viking_sun4d_smp_ops = {
1466         .cache_all      = viking_flush_cache_all,
1467         .cache_mm       = viking_flush_cache_mm,
1468         .cache_page     = viking_flush_cache_page,
1469         .cache_range    = viking_flush_cache_range,
1470         .tlb_all        = sun4dsmp_flush_tlb_all,
1471         .tlb_mm         = sun4dsmp_flush_tlb_mm,
1472         .tlb_page       = sun4dsmp_flush_tlb_page,
1473         .tlb_range      = sun4dsmp_flush_tlb_range,
1474         .page_to_ram    = viking_flush_page_to_ram,
1475         .sig_insns      = viking_flush_sig_insns,
1476         .page_for_dma   = viking_flush_page_for_dma,
1477 };
1478 #endif
1479
1480 static void __init init_viking(void)
1481 {
1482         unsigned long mreg = srmmu_get_mmureg();
1483
1484         /* Ahhh, the viking.  SRMMU VLSI abortion number two... */
1485         if (mreg & VIKING_MMODE) {
1486                 srmmu_name = "TI Viking";
1487                 viking_mxcc_present = 0;
1488                 msi_set_sync();
1489
1490                 /*
1491                  * We need this to make sure old viking takes no hits
1492                  * on it's cache for dma snoops to workaround the
1493                  * "load from non-cacheable memory" interrupt bug.
1494                  * This is only necessary because of the new way in
1495                  * which we use the IOMMU.
1496                  */
1497                 viking_ops.page_for_dma = viking_flush_page;
1498 #ifdef CONFIG_SMP
1499                 viking_sun4d_smp_ops.page_for_dma = viking_flush_page;
1500 #endif
1501                 flush_page_for_dma_global = 0;
1502         } else {
1503                 srmmu_name = "TI Viking/MXCC";
1504                 viking_mxcc_present = 1;
1505                 srmmu_cache_pagetables = 1;
1506         }
1507
1508         sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1509                 &viking_ops;
1510 #ifdef CONFIG_SMP
1511         if (sparc_cpu_model == sun4d)
1512                 sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1513                         &viking_sun4d_smp_ops;
1514 #endif
1515
1516         poke_srmmu = poke_viking;
1517 }
1518
1519 /* Probe for the srmmu chip version. */
1520 static void __init get_srmmu_type(void)
1521 {
1522         unsigned long mreg, psr;
1523         unsigned long mod_typ, mod_rev, psr_typ, psr_vers;
1524
1525         srmmu_modtype = SRMMU_INVAL_MOD;
1526         hwbug_bitmask = 0;
1527
1528         mreg = srmmu_get_mmureg(); psr = get_psr();
1529         mod_typ = (mreg & 0xf0000000) >> 28;
1530         mod_rev = (mreg & 0x0f000000) >> 24;
1531         psr_typ = (psr >> 28) & 0xf;
1532         psr_vers = (psr >> 24) & 0xf;
1533
1534         /* First, check for sparc-leon. */
1535         if (sparc_cpu_model == sparc_leon) {
1536                 init_leon();
1537                 return;
1538         }
1539
1540         /* Second, check for HyperSparc or Cypress. */
1541         if (mod_typ == 1) {
1542                 switch (mod_rev) {
1543                 case 7:
1544                         /* UP or MP Hypersparc */
1545                         init_hypersparc();
1546                         break;
1547                 case 0:
1548                 case 2:
1549                 case 10:
1550                 case 11:
1551                 case 12:
1552                 case 13:
1553                 case 14:
1554                 case 15:
1555                 default:
1556                         prom_printf("Sparc-Linux Cypress support does not longer exit.\n");
1557                         prom_halt();
1558                         break;
1559                 }
1560                 return;
1561         }
1562
1563         /* Now Fujitsu TurboSparc. It might happen that it is
1564          * in Swift emulation mode, so we will check later...
1565          */
1566         if (psr_typ == 0 && psr_vers == 5) {
1567                 init_turbosparc();
1568                 return;
1569         }
1570
1571         /* Next check for Fujitsu Swift. */
1572         if (psr_typ == 0 && psr_vers == 4) {
1573                 phandle cpunode;
1574                 char node_str[128];
1575
1576                 /* Look if it is not a TurboSparc emulating Swift... */
1577                 cpunode = prom_getchild(prom_root_node);
1578                 while ((cpunode = prom_getsibling(cpunode)) != 0) {
1579                         prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
1580                         if (!strcmp(node_str, "cpu")) {
1581                                 if (!prom_getintdefault(cpunode, "psr-implementation", 1) &&
1582                                     prom_getintdefault(cpunode, "psr-version", 1) == 5) {
1583                                         init_turbosparc();
1584                                         return;
1585                                 }
1586                                 break;
1587                         }
1588                 }
1589
1590                 init_swift();
1591                 return;
1592         }
1593
1594         /* Now the Viking family of srmmu. */
1595         if (psr_typ == 4 &&
1596            ((psr_vers == 0) ||
1597             ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) {
1598                 init_viking();
1599                 return;
1600         }
1601
1602         /* Finally the Tsunami. */
1603         if (psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) {
1604                 init_tsunami();
1605                 return;
1606         }
1607
1608         /* Oh well */
1609         srmmu_is_bad();
1610 }
1611
1612 #ifdef CONFIG_SMP
1613 /* Local cross-calls. */
1614 static void smp_flush_page_for_dma(unsigned long page)
1615 {
1616         xc1((smpfunc_t) local_ops->page_for_dma, page);
1617         local_ops->page_for_dma(page);
1618 }
1619
1620 static void smp_flush_cache_all(void)
1621 {
1622         xc0((smpfunc_t) local_ops->cache_all);
1623         local_ops->cache_all();
1624 }
1625
1626 static void smp_flush_tlb_all(void)
1627 {
1628         xc0((smpfunc_t) local_ops->tlb_all);
1629         local_ops->tlb_all();
1630 }
1631
1632 static void smp_flush_cache_mm(struct mm_struct *mm)
1633 {
1634         if (mm->context != NO_CONTEXT) {
1635                 cpumask_t cpu_mask;
1636                 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1637                 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1638                 if (!cpumask_empty(&cpu_mask))
1639                         xc1((smpfunc_t) local_ops->cache_mm, (unsigned long) mm);
1640                 local_ops->cache_mm(mm);
1641         }
1642 }
1643
1644 static void smp_flush_tlb_mm(struct mm_struct *mm)
1645 {
1646         if (mm->context != NO_CONTEXT) {
1647                 cpumask_t cpu_mask;
1648                 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1649                 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1650                 if (!cpumask_empty(&cpu_mask)) {
1651                         xc1((smpfunc_t) local_ops->tlb_mm, (unsigned long) mm);
1652                         if (atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
1653                                 cpumask_copy(mm_cpumask(mm),
1654                                              cpumask_of(smp_processor_id()));
1655                 }
1656                 local_ops->tlb_mm(mm);
1657         }
1658 }
1659
1660 static void smp_flush_cache_range(struct vm_area_struct *vma,
1661                                   unsigned long start,
1662                                   unsigned long end)
1663 {
1664         struct mm_struct *mm = vma->vm_mm;
1665
1666         if (mm->context != NO_CONTEXT) {
1667                 cpumask_t cpu_mask;
1668                 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1669                 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1670                 if (!cpumask_empty(&cpu_mask))
1671                         xc3((smpfunc_t) local_ops->cache_range,
1672                             (unsigned long) vma, start, end);
1673                 local_ops->cache_range(vma, start, end);
1674         }
1675 }
1676
1677 static void smp_flush_tlb_range(struct vm_area_struct *vma,
1678                                 unsigned long start,
1679                                 unsigned long end)
1680 {
1681         struct mm_struct *mm = vma->vm_mm;
1682
1683         if (mm->context != NO_CONTEXT) {
1684                 cpumask_t cpu_mask;
1685                 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1686                 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1687                 if (!cpumask_empty(&cpu_mask))
1688                         xc3((smpfunc_t) local_ops->tlb_range,
1689                             (unsigned long) vma, start, end);
1690                 local_ops->tlb_range(vma, start, end);
1691         }
1692 }
1693
1694 static void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1695 {
1696         struct mm_struct *mm = vma->vm_mm;
1697
1698         if (mm->context != NO_CONTEXT) {
1699                 cpumask_t cpu_mask;
1700                 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1701                 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1702                 if (!cpumask_empty(&cpu_mask))
1703                         xc2((smpfunc_t) local_ops->cache_page,
1704                             (unsigned long) vma, page);
1705                 local_ops->cache_page(vma, page);
1706         }
1707 }
1708
1709 static void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1710 {
1711         struct mm_struct *mm = vma->vm_mm;
1712
1713         if (mm->context != NO_CONTEXT) {
1714                 cpumask_t cpu_mask;
1715                 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1716                 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1717                 if (!cpumask_empty(&cpu_mask))
1718                         xc2((smpfunc_t) local_ops->tlb_page,
1719                             (unsigned long) vma, page);
1720                 local_ops->tlb_page(vma, page);
1721         }
1722 }
1723
1724 static void smp_flush_page_to_ram(unsigned long page)
1725 {
1726         /* Current theory is that those who call this are the one's
1727          * who have just dirtied their cache with the pages contents
1728          * in kernel space, therefore we only run this on local cpu.
1729          *
1730          * XXX This experiment failed, research further... -DaveM
1731          */
1732 #if 1
1733         xc1((smpfunc_t) local_ops->page_to_ram, page);
1734 #endif
1735         local_ops->page_to_ram(page);
1736 }
1737
1738 static void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
1739 {
1740         cpumask_t cpu_mask;
1741         cpumask_copy(&cpu_mask, mm_cpumask(mm));
1742         cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1743         if (!cpumask_empty(&cpu_mask))
1744                 xc2((smpfunc_t) local_ops->sig_insns,
1745                     (unsigned long) mm, insn_addr);
1746         local_ops->sig_insns(mm, insn_addr);
1747 }
1748
1749 static struct sparc32_cachetlb_ops smp_cachetlb_ops = {
1750         .cache_all      = smp_flush_cache_all,
1751         .cache_mm       = smp_flush_cache_mm,
1752         .cache_page     = smp_flush_cache_page,
1753         .cache_range    = smp_flush_cache_range,
1754         .tlb_all        = smp_flush_tlb_all,
1755         .tlb_mm         = smp_flush_tlb_mm,
1756         .tlb_page       = smp_flush_tlb_page,
1757         .tlb_range      = smp_flush_tlb_range,
1758         .page_to_ram    = smp_flush_page_to_ram,
1759         .sig_insns      = smp_flush_sig_insns,
1760         .page_for_dma   = smp_flush_page_for_dma,
1761 };
1762 #endif
1763
1764 /* Load up routines and constants for sun4m and sun4d mmu */
1765 void __init load_mmu(void)
1766 {
1767         extern void ld_mmu_iommu(void);
1768         extern void ld_mmu_iounit(void);
1769
1770         /* Functions */
1771         get_srmmu_type();
1772
1773 #ifdef CONFIG_SMP
1774         /* El switcheroo... */
1775         local_ops = sparc32_cachetlb_ops;
1776
1777         if (sparc_cpu_model == sun4d || sparc_cpu_model == sparc_leon) {
1778                 smp_cachetlb_ops.tlb_all = local_ops->tlb_all;
1779                 smp_cachetlb_ops.tlb_mm = local_ops->tlb_mm;
1780                 smp_cachetlb_ops.tlb_range = local_ops->tlb_range;
1781                 smp_cachetlb_ops.tlb_page = local_ops->tlb_page;
1782         }
1783
1784         if (poke_srmmu == poke_viking) {
1785                 /* Avoid unnecessary cross calls. */
1786                 smp_cachetlb_ops.cache_all = local_ops->cache_all;
1787                 smp_cachetlb_ops.cache_mm = local_ops->cache_mm;
1788                 smp_cachetlb_ops.cache_range = local_ops->cache_range;
1789                 smp_cachetlb_ops.cache_page = local_ops->cache_page;
1790
1791                 smp_cachetlb_ops.page_to_ram = local_ops->page_to_ram;
1792                 smp_cachetlb_ops.sig_insns = local_ops->sig_insns;
1793                 smp_cachetlb_ops.page_for_dma = local_ops->page_for_dma;
1794         }
1795
1796         /* It really is const after this point. */
1797         sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1798                 &smp_cachetlb_ops;
1799 #endif
1800
1801         if (sparc_cpu_model == sun4d)
1802                 ld_mmu_iounit();
1803         else
1804                 ld_mmu_iommu();
1805 #ifdef CONFIG_SMP
1806         if (sparc_cpu_model == sun4d)
1807                 sun4d_init_smp();
1808         else if (sparc_cpu_model == sparc_leon)
1809                 leon_init_smp();
1810         else
1811                 sun4m_init_smp();
1812 #endif
1813 }