1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * srmmu.c: SRMMU specific routines for memory management.
4 *
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
7 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
8 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9 * Copyright (C) 1999,2000 Anton Blanchard (anton@samba.org)
10 */
11
12 #include <linux/seq_file.h>
13 #include <linux/spinlock.h>
14 #include <linux/memblock.h>
15 #include <linux/pagemap.h>
16 #include <linux/vmalloc.h>
17 #include <linux/kdebug.h>
18 #include <linux/export.h>
19 #include <linux/kernel.h>
20 #include <linux/init.h>
21 #include <linux/log2.h>
22 #include <linux/gfp.h>
23 #include <linux/fs.h>
24 #include <linux/mm.h>
25
26 #include <asm/mmu_context.h>
27 #include <asm/cacheflush.h>
28 #include <asm/tlbflush.h>
29 #include <asm/io-unit.h>
30 #include <asm/pgalloc.h>
31 #include <asm/pgtable.h>
32 #include <asm/bitext.h>
33 #include <asm/vaddrs.h>
34 #include <asm/cache.h>
35 #include <asm/traps.h>
36 #include <asm/oplib.h>
37 #include <asm/mbus.h>
38 #include <asm/page.h>
39 #include <asm/asi.h>
40 #include <asm/smp.h>
41 #include <asm/io.h>
42
43 /* Now the cpu specific definitions. */
44 #include <asm/turbosparc.h>
45 #include <asm/tsunami.h>
46 #include <asm/viking.h>
47 #include <asm/swift.h>
48 #include <asm/leon.h>
49 #include <asm/mxcc.h>
50 #include <asm/ross.h>
51
52 #include "mm_32.h"
53
54 enum mbus_module srmmu_modtype;
55 static unsigned int hwbug_bitmask;
56 int vac_cache_size;
57 EXPORT_SYMBOL(vac_cache_size);
58 int vac_line_size;
59
60 extern struct resource sparc_iomap;
61
62 extern unsigned long last_valid_pfn;
63
64 static pgd_t *srmmu_swapper_pg_dir;
65
66 const struct sparc32_cachetlb_ops *sparc32_cachetlb_ops;
67 EXPORT_SYMBOL(sparc32_cachetlb_ops);
68
69 #ifdef CONFIG_SMP
70 const struct sparc32_cachetlb_ops *local_ops;
71
72 #define FLUSH_BEGIN(mm)
73 #define FLUSH_END
74 #else
75 #define FLUSH_BEGIN(mm) if ((mm)->context != NO_CONTEXT) {
76 #define FLUSH_END }
77 #endif
78
79 int flush_page_for_dma_global = 1;
80
81 char *srmmu_name;
82
83 ctxd_t *srmmu_ctx_table_phys;
84 static ctxd_t *srmmu_context_table;
85
86 int viking_mxcc_present;
87 static DEFINE_SPINLOCK(srmmu_context_spinlock);
88
89 static int is_hypersparc;
90
91 static int srmmu_cache_pagetables;
92
93 /* these will be initialized in srmmu_nocache_calcsize() */
94 static unsigned long srmmu_nocache_size;
95 static unsigned long srmmu_nocache_end;
96
97 /* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */
98 #define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4)
99
100 /* The context table is a nocache user with the biggest alignment needs. */
101 #define SRMMU_NOCACHE_ALIGN_MAX (sizeof(ctxd_t)*SRMMU_MAX_CONTEXTS)
102
103 void *srmmu_nocache_pool;
104 static struct bit_map srmmu_nocache_map;
105
srmmu_pmd_none(pmd_t pmd)106 static inline int srmmu_pmd_none(pmd_t pmd)
107 { return !(pmd_val(pmd) & 0xFFFFFFF); }
108
109 /* XXX should we hyper_flush_whole_icache here - Anton */
srmmu_ctxd_set(ctxd_t * ctxp,pgd_t * pgdp)110 static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
111 {
112 pte_t pte;
113
114 pte = __pte((SRMMU_ET_PTD | (__nocache_pa(pgdp) >> 4)));
115 set_pte((pte_t *)ctxp, pte);
116 }
117
118 /*
119 * Locations of MSI Registers.
120 */
121 #define MSI_MBUS_ARBEN 0xe0001008 /* MBus Arbiter Enable register */
122
123 /*
124 * Useful bits in the MSI Registers.
125 */
126 #define MSI_ASYNC_MODE 0x80000000 /* Operate the MSI asynchronously */
127
msi_set_sync(void)128 static void msi_set_sync(void)
129 {
130 __asm__ __volatile__ ("lda [%0] %1, %%g3\n\t"
131 "andn %%g3, %2, %%g3\n\t"
132 "sta %%g3, [%0] %1\n\t" : :
133 "r" (MSI_MBUS_ARBEN),
134 "i" (ASI_M_CTL), "r" (MSI_ASYNC_MODE) : "g3");
135 }
136
pmd_set(pmd_t * pmdp,pte_t * ptep)137 void pmd_set(pmd_t *pmdp, pte_t *ptep)
138 {
139 unsigned long ptp = __nocache_pa(ptep) >> 4;
140 set_pte((pte_t *)&pmd_val(*pmdp), __pte(SRMMU_ET_PTD | ptp));
141 }
142
143 /*
144 * size: bytes to allocate in the nocache area.
145 * align: bytes, number to align at.
146 * Returns the virtual address of the allocated area.
147 */
__srmmu_get_nocache(int size,int align)148 static void *__srmmu_get_nocache(int size, int align)
149 {
150 int offset, minsz = 1 << SRMMU_NOCACHE_BITMAP_SHIFT;
151 unsigned long addr;
152
153 if (size < minsz) {
154 printk(KERN_ERR "Size 0x%x too small for nocache request\n",
155 size);
156 size = minsz;
157 }
158 if (size & (minsz - 1)) {
159 printk(KERN_ERR "Size 0x%x unaligned in nocache request\n",
160 size);
161 size += minsz - 1;
162 }
163 BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX);
164
165 offset = bit_map_string_get(&srmmu_nocache_map,
166 size >> SRMMU_NOCACHE_BITMAP_SHIFT,
167 align >> SRMMU_NOCACHE_BITMAP_SHIFT);
168 if (offset == -1) {
169 printk(KERN_ERR "srmmu: out of nocache %d: %d/%d\n",
170 size, (int) srmmu_nocache_size,
171 srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
172 return NULL;
173 }
174
175 addr = SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT);
176 return (void *)addr;
177 }
178
srmmu_get_nocache(int size,int align)179 void *srmmu_get_nocache(int size, int align)
180 {
181 void *tmp;
182
183 tmp = __srmmu_get_nocache(size, align);
184
185 if (tmp)
186 memset(tmp, 0, size);
187
188 return tmp;
189 }
190
srmmu_free_nocache(void * addr,int size)191 void srmmu_free_nocache(void *addr, int size)
192 {
193 unsigned long vaddr;
194 int offset;
195
196 vaddr = (unsigned long)addr;
197 if (vaddr < SRMMU_NOCACHE_VADDR) {
198 printk("Vaddr %lx is smaller than nocache base 0x%lx\n",
199 vaddr, (unsigned long)SRMMU_NOCACHE_VADDR);
200 BUG();
201 }
202 if (vaddr + size > srmmu_nocache_end) {
203 printk("Vaddr %lx is bigger than nocache end 0x%lx\n",
204 vaddr, srmmu_nocache_end);
205 BUG();
206 }
207 if (!is_power_of_2(size)) {
208 printk("Size 0x%x is not a power of 2\n", size);
209 BUG();
210 }
211 if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
212 printk("Size 0x%x is too small\n", size);
213 BUG();
214 }
215 if (vaddr & (size - 1)) {
216 printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr, size);
217 BUG();
218 }
219
220 offset = (vaddr - SRMMU_NOCACHE_VADDR) >> SRMMU_NOCACHE_BITMAP_SHIFT;
221 size = size >> SRMMU_NOCACHE_BITMAP_SHIFT;
222
223 bit_map_clear(&srmmu_nocache_map, offset, size);
224 }
225
226 static void srmmu_early_allocate_ptable_skeleton(unsigned long start,
227 unsigned long end);
228
229 /* Return how much physical memory we have. */
probe_memory(void)230 static unsigned long __init probe_memory(void)
231 {
232 unsigned long total = 0;
233 int i;
234
235 for (i = 0; sp_banks[i].num_bytes; i++)
236 total += sp_banks[i].num_bytes;
237
238 return total;
239 }
240
241 /*
242 * Reserve nocache dynamically proportionally to the amount of
243 * system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002
244 */
srmmu_nocache_calcsize(void)245 static void __init srmmu_nocache_calcsize(void)
246 {
247 unsigned long sysmemavail = probe_memory() / 1024;
248 int srmmu_nocache_npages;
249
250 srmmu_nocache_npages =
251 sysmemavail / SRMMU_NOCACHE_ALCRATIO / 1024 * 256;
252
253 /* P3 XXX The 4x overuse: corroborated by /proc/meminfo. */
254 // if (srmmu_nocache_npages < 256) srmmu_nocache_npages = 256;
255 if (srmmu_nocache_npages < SRMMU_MIN_NOCACHE_PAGES)
256 srmmu_nocache_npages = SRMMU_MIN_NOCACHE_PAGES;
257
258 /* anything above 1280 blows up */
259 if (srmmu_nocache_npages > SRMMU_MAX_NOCACHE_PAGES)
260 srmmu_nocache_npages = SRMMU_MAX_NOCACHE_PAGES;
261
262 srmmu_nocache_size = srmmu_nocache_npages * PAGE_SIZE;
263 srmmu_nocache_end = SRMMU_NOCACHE_VADDR + srmmu_nocache_size;
264 }
265
srmmu_nocache_init(void)266 static void __init srmmu_nocache_init(void)
267 {
268 void *srmmu_nocache_bitmap;
269 unsigned int bitmap_bits;
270 pgd_t *pgd;
271 p4d_t *p4d;
272 pud_t *pud;
273 pmd_t *pmd;
274 pte_t *pte;
275 unsigned long paddr, vaddr;
276 unsigned long pteval;
277
278 bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT;
279
280 srmmu_nocache_pool = memblock_alloc_or_panic(srmmu_nocache_size,
281 SRMMU_NOCACHE_ALIGN_MAX);
282 memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
283
284 srmmu_nocache_bitmap =
285 memblock_alloc_or_panic(BITS_TO_LONGS(bitmap_bits) * sizeof(long),
286 SMP_CACHE_BYTES);
287 bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
288
289 srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
290 memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE);
291 init_mm.pgd = srmmu_swapper_pg_dir;
292
293 srmmu_early_allocate_ptable_skeleton(SRMMU_NOCACHE_VADDR, srmmu_nocache_end);
294
295 paddr = __pa((unsigned long)srmmu_nocache_pool);
296 vaddr = SRMMU_NOCACHE_VADDR;
297
298 while (vaddr < srmmu_nocache_end) {
299 pgd = pgd_offset_k(vaddr);
300 p4d = p4d_offset(pgd, vaddr);
301 pud = pud_offset(p4d, vaddr);
302 pmd = pmd_offset(__nocache_fix(pud), vaddr);
303 pte = pte_offset_kernel(__nocache_fix(pmd), vaddr);
304
305 pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV);
306
307 if (srmmu_cache_pagetables)
308 pteval |= SRMMU_CACHE;
309
310 set_pte(__nocache_fix(pte), __pte(pteval));
311
312 vaddr += PAGE_SIZE;
313 paddr += PAGE_SIZE;
314 }
315
316 flush_cache_all();
317 flush_tlb_all();
318 }
319
get_pgd_fast(void)320 pgd_t *get_pgd_fast(void)
321 {
322 pgd_t *pgd = NULL;
323
324 pgd = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
325 if (pgd) {
326 pgd_t *init = pgd_offset_k(0);
327 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
328 memcpy(pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
329 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
330 }
331
332 return pgd;
333 }
334
335 /*
336 * Hardware needs alignment to 256 only, but we align to whole page size
337 * to reduce fragmentation problems due to the buddy principle.
338 * XXX Provide actual fragmentation statistics in /proc.
339 *
340 * Alignments up to the page size are the same for physical and virtual
341 * addresses of the nocache area.
342 */
pte_alloc_one(struct mm_struct * mm)343 pgtable_t pte_alloc_one(struct mm_struct *mm)
344 {
345 pte_t *ptep;
346 struct page *page;
347
348 if (!(ptep = pte_alloc_one_kernel(mm)))
349 return NULL;
350 page = pfn_to_page(__nocache_pa((unsigned long)ptep) >> PAGE_SHIFT);
351 spin_lock(&mm->page_table_lock);
352 if (page_ref_inc_return(page) == 2 &&
353 !pagetable_pte_ctor(mm, page_ptdesc(page))) {
354 page_ref_dec(page);
355 ptep = NULL;
356 }
357 spin_unlock(&mm->page_table_lock);
358
359 return ptep;
360 }
361
pte_free(struct mm_struct * mm,pgtable_t ptep)362 void pte_free(struct mm_struct *mm, pgtable_t ptep)
363 {
364 struct page *page;
365
366 page = pfn_to_page(__nocache_pa((unsigned long)ptep) >> PAGE_SHIFT);
367 spin_lock(&mm->page_table_lock);
368 if (page_ref_dec_return(page) == 1)
369 pagetable_dtor(page_ptdesc(page));
370 spin_unlock(&mm->page_table_lock);
371
372 srmmu_free_nocache(ptep, SRMMU_PTE_TABLE_SIZE);
373 }
374
375 /* context handling - a dynamically sized pool is used */
376 #define NO_CONTEXT -1
377
378 struct ctx_list {
379 struct ctx_list *next;
380 struct ctx_list *prev;
381 unsigned int ctx_number;
382 struct mm_struct *ctx_mm;
383 };
384
385 static struct ctx_list *ctx_list_pool;
386 static struct ctx_list ctx_free;
387 static struct ctx_list ctx_used;
388
389 /* At boot time we determine the number of contexts */
390 static int num_contexts;
391
remove_from_ctx_list(struct ctx_list * entry)392 static inline void remove_from_ctx_list(struct ctx_list *entry)
393 {
394 entry->next->prev = entry->prev;
395 entry->prev->next = entry->next;
396 }
397
add_to_ctx_list(struct ctx_list * head,struct ctx_list * entry)398 static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry)
399 {
400 entry->next = head;
401 (entry->prev = head->prev)->next = entry;
402 head->prev = entry;
403 }
404 #define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry)
405 #define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry)
406
407
alloc_context(struct mm_struct * old_mm,struct mm_struct * mm)408 static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm)
409 {
410 struct ctx_list *ctxp;
411
412 ctxp = ctx_free.next;
413 if (ctxp != &ctx_free) {
414 remove_from_ctx_list(ctxp);
415 add_to_used_ctxlist(ctxp);
416 mm->context = ctxp->ctx_number;
417 ctxp->ctx_mm = mm;
418 return;
419 }
420 ctxp = ctx_used.next;
421 if (ctxp->ctx_mm == old_mm)
422 ctxp = ctxp->next;
423 if (ctxp == &ctx_used)
424 panic("out of mmu contexts");
425 flush_cache_mm(ctxp->ctx_mm);
426 flush_tlb_mm(ctxp->ctx_mm);
427 remove_from_ctx_list(ctxp);
428 add_to_used_ctxlist(ctxp);
429 ctxp->ctx_mm->context = NO_CONTEXT;
430 ctxp->ctx_mm = mm;
431 mm->context = ctxp->ctx_number;
432 }
433
free_context(int context)434 static inline void free_context(int context)
435 {
436 struct ctx_list *ctx_old;
437
438 ctx_old = ctx_list_pool + context;
439 remove_from_ctx_list(ctx_old);
440 add_to_free_ctxlist(ctx_old);
441 }
442
sparc_context_init(int numctx)443 static void __init sparc_context_init(int numctx)
444 {
445 int ctx;
446 unsigned long size;
447
448 size = numctx * sizeof(struct ctx_list);
449 ctx_list_pool = memblock_alloc_or_panic(size, SMP_CACHE_BYTES);
450
451 for (ctx = 0; ctx < numctx; ctx++) {
452 struct ctx_list *clist;
453
454 clist = (ctx_list_pool + ctx);
455 clist->ctx_number = ctx;
456 clist->ctx_mm = NULL;
457 }
458 ctx_free.next = ctx_free.prev = &ctx_free;
459 ctx_used.next = ctx_used.prev = &ctx_used;
460 for (ctx = 0; ctx < numctx; ctx++)
461 add_to_free_ctxlist(ctx_list_pool + ctx);
462 }
463
switch_mm(struct mm_struct * old_mm,struct mm_struct * mm,struct task_struct * tsk)464 void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
465 struct task_struct *tsk)
466 {
467 unsigned long flags;
468
469 if (mm->context == NO_CONTEXT) {
470 spin_lock_irqsave(&srmmu_context_spinlock, flags);
471 alloc_context(old_mm, mm);
472 spin_unlock_irqrestore(&srmmu_context_spinlock, flags);
473 srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
474 }
475
476 if (sparc_cpu_model == sparc_leon)
477 leon_switch_mm();
478
479 if (is_hypersparc)
480 hyper_flush_whole_icache();
481
482 srmmu_set_context(mm->context);
483 }
484
485 /* Low level IO area allocation on the SRMMU. */
srmmu_mapioaddr(unsigned long physaddr,unsigned long virt_addr,int bus_type)486 static inline void srmmu_mapioaddr(unsigned long physaddr,
487 unsigned long virt_addr, int bus_type)
488 {
489 pgd_t *pgdp;
490 p4d_t *p4dp;
491 pud_t *pudp;
492 pmd_t *pmdp;
493 pte_t *ptep;
494 unsigned long tmp;
495
496 physaddr &= PAGE_MASK;
497 pgdp = pgd_offset_k(virt_addr);
498 p4dp = p4d_offset(pgdp, virt_addr);
499 pudp = pud_offset(p4dp, virt_addr);
500 pmdp = pmd_offset(pudp, virt_addr);
501 ptep = pte_offset_kernel(pmdp, virt_addr);
502 tmp = (physaddr >> 4) | SRMMU_ET_PTE;
503
504 /* I need to test whether this is consistent over all
505 * sun4m's. The bus_type represents the upper 4 bits of
506 * 36-bit physical address on the I/O space lines...
507 */
508 tmp |= (bus_type << 28);
509 tmp |= SRMMU_PRIV;
510 __flush_page_to_ram(virt_addr);
511 set_pte(ptep, __pte(tmp));
512 }
513
srmmu_mapiorange(unsigned int bus,unsigned long xpa,unsigned long xva,unsigned int len)514 void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
515 unsigned long xva, unsigned int len)
516 {
517 while (len != 0) {
518 len -= PAGE_SIZE;
519 srmmu_mapioaddr(xpa, xva, bus);
520 xva += PAGE_SIZE;
521 xpa += PAGE_SIZE;
522 }
523 flush_tlb_all();
524 }
525
srmmu_unmapioaddr(unsigned long virt_addr)526 static inline void srmmu_unmapioaddr(unsigned long virt_addr)
527 {
528 pgd_t *pgdp;
529 p4d_t *p4dp;
530 pud_t *pudp;
531 pmd_t *pmdp;
532 pte_t *ptep;
533
534
535 pgdp = pgd_offset_k(virt_addr);
536 p4dp = p4d_offset(pgdp, virt_addr);
537 pudp = pud_offset(p4dp, virt_addr);
538 pmdp = pmd_offset(pudp, virt_addr);
539 ptep = pte_offset_kernel(pmdp, virt_addr);
540
541 /* No need to flush uncacheable page. */
542 __pte_clear(ptep);
543 }
544
srmmu_unmapiorange(unsigned long virt_addr,unsigned int len)545 void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len)
546 {
547 while (len != 0) {
548 len -= PAGE_SIZE;
549 srmmu_unmapioaddr(virt_addr);
550 virt_addr += PAGE_SIZE;
551 }
552 flush_tlb_all();
553 }
554
555 /* tsunami.S */
556 extern void tsunami_flush_cache_all(void);
557 extern void tsunami_flush_cache_mm(struct mm_struct *mm);
558 extern void tsunami_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
559 extern void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
560 extern void tsunami_flush_page_to_ram(unsigned long page);
561 extern void tsunami_flush_page_for_dma(unsigned long page);
562 extern void tsunami_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
563 extern void tsunami_flush_tlb_all(void);
564 extern void tsunami_flush_tlb_mm(struct mm_struct *mm);
565 extern void tsunami_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
566 extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
567 extern void tsunami_setup_blockops(void);
568
569 /* swift.S */
570 extern void swift_flush_cache_all(void);
571 extern void swift_flush_cache_mm(struct mm_struct *mm);
572 extern void swift_flush_cache_range(struct vm_area_struct *vma,
573 unsigned long start, unsigned long end);
574 extern void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
575 extern void swift_flush_page_to_ram(unsigned long page);
576 extern void swift_flush_page_for_dma(unsigned long page);
577 extern void swift_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
578 extern void swift_flush_tlb_all(void);
579 extern void swift_flush_tlb_mm(struct mm_struct *mm);
580 extern void swift_flush_tlb_range(struct vm_area_struct *vma,
581 unsigned long start, unsigned long end);
582 extern void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
583
584 #if 0 /* P3: deadwood to debug precise flushes on Swift. */
585 void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
586 {
587 int cctx, ctx1;
588
589 page &= PAGE_MASK;
590 if ((ctx1 = vma->vm_mm->context) != -1) {
591 cctx = srmmu_get_context();
592 /* Is context # ever different from current context? P3 */
593 if (cctx != ctx1) {
594 printk("flush ctx %02x curr %02x\n", ctx1, cctx);
595 srmmu_set_context(ctx1);
596 swift_flush_page(page);
597 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
598 "r" (page), "i" (ASI_M_FLUSH_PROBE));
599 srmmu_set_context(cctx);
600 } else {
601 /* Rm. prot. bits from virt. c. */
602 /* swift_flush_cache_all(); */
603 /* swift_flush_cache_page(vma, page); */
604 swift_flush_page(page);
605
606 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
607 "r" (page), "i" (ASI_M_FLUSH_PROBE));
608 /* same as above: srmmu_flush_tlb_page() */
609 }
610 }
611 }
612 #endif
613
614 /*
615 * The following are all MBUS based SRMMU modules, and therefore could
616 * be found in a multiprocessor configuration. On the whole, these
617 * chips seems to be much more touchy about DVMA and page tables
618 * with respect to cache coherency.
619 */
620
621 /* viking.S */
622 extern void viking_flush_cache_all(void);
623 extern void viking_flush_cache_mm(struct mm_struct *mm);
624 extern void viking_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
625 unsigned long end);
626 extern void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
627 extern void viking_flush_page_to_ram(unsigned long page);
628 extern void viking_flush_page_for_dma(unsigned long page);
629 extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr);
630 extern void viking_flush_page(unsigned long page);
631 extern void viking_mxcc_flush_page(unsigned long page);
632 extern void viking_flush_tlb_all(void);
633 extern void viking_flush_tlb_mm(struct mm_struct *mm);
634 extern void viking_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
635 unsigned long end);
636 extern void viking_flush_tlb_page(struct vm_area_struct *vma,
637 unsigned long page);
638 extern void sun4dsmp_flush_tlb_all(void);
639 extern void sun4dsmp_flush_tlb_mm(struct mm_struct *mm);
640 extern void sun4dsmp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
641 unsigned long end);
642 extern void sun4dsmp_flush_tlb_page(struct vm_area_struct *vma,
643 unsigned long page);
644
645 /* hypersparc.S */
646 extern void hypersparc_flush_cache_all(void);
647 extern void hypersparc_flush_cache_mm(struct mm_struct *mm);
648 extern void hypersparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
649 extern void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
650 extern void hypersparc_flush_page_to_ram(unsigned long page);
651 extern void hypersparc_flush_page_for_dma(unsigned long page);
652 extern void hypersparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
653 extern void hypersparc_flush_tlb_all(void);
654 extern void hypersparc_flush_tlb_mm(struct mm_struct *mm);
655 extern void hypersparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
656 extern void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
657 extern void hypersparc_setup_blockops(void);
658
659 /*
660 * NOTE: All of this startup code assumes the low 16mb (approx.) of
661 * kernel mappings are done with one single contiguous chunk of
662 * ram. On small ram machines (classics mainly) we only get
663 * around 8mb mapped for us.
664 */
665
early_pgtable_allocfail(char * type)666 static void __init early_pgtable_allocfail(char *type)
667 {
668 prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type);
669 prom_halt();
670 }
671
srmmu_early_allocate_ptable_skeleton(unsigned long start,unsigned long end)672 static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
673 unsigned long end)
674 {
675 pgd_t *pgdp;
676 p4d_t *p4dp;
677 pud_t *pudp;
678 pmd_t *pmdp;
679 pte_t *ptep;
680
681 while (start < end) {
682 pgdp = pgd_offset_k(start);
683 p4dp = p4d_offset(pgdp, start);
684 pudp = pud_offset(p4dp, start);
685 if (pud_none(*__nocache_fix(pudp))) {
686 pmdp = __srmmu_get_nocache(
687 SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
688 if (pmdp == NULL)
689 early_pgtable_allocfail("pmd");
690 memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
691 pud_set(__nocache_fix(pudp), pmdp);
692 }
693 pmdp = pmd_offset(__nocache_fix(pudp), start);
694 if (srmmu_pmd_none(*__nocache_fix(pmdp))) {
695 ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
696 if (ptep == NULL)
697 early_pgtable_allocfail("pte");
698 memset(__nocache_fix(ptep), 0, PTE_SIZE);
699 pmd_set(__nocache_fix(pmdp), ptep);
700 }
701 if (start > (0xffffffffUL - PMD_SIZE))
702 break;
703 start = (start + PMD_SIZE) & PMD_MASK;
704 }
705 }
706
srmmu_allocate_ptable_skeleton(unsigned long start,unsigned long end)707 static void __init srmmu_allocate_ptable_skeleton(unsigned long start,
708 unsigned long end)
709 {
710 pgd_t *pgdp;
711 p4d_t *p4dp;
712 pud_t *pudp;
713 pmd_t *pmdp;
714 pte_t *ptep;
715
716 while (start < end) {
717 pgdp = pgd_offset_k(start);
718 p4dp = p4d_offset(pgdp, start);
719 pudp = pud_offset(p4dp, start);
720 if (pud_none(*pudp)) {
721 pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
722 if (pmdp == NULL)
723 early_pgtable_allocfail("pmd");
724 memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE);
725 pud_set((pud_t *)pgdp, pmdp);
726 }
727 pmdp = pmd_offset(pudp, start);
728 if (srmmu_pmd_none(*pmdp)) {
729 ptep = __srmmu_get_nocache(PTE_SIZE,
730 PTE_SIZE);
731 if (ptep == NULL)
732 early_pgtable_allocfail("pte");
733 memset(ptep, 0, PTE_SIZE);
734 pmd_set(pmdp, ptep);
735 }
736 if (start > (0xffffffffUL - PMD_SIZE))
737 break;
738 start = (start + PMD_SIZE) & PMD_MASK;
739 }
740 }
741
742 /* These flush types are not available on all chips... */
srmmu_probe(unsigned long vaddr)743 static inline unsigned long srmmu_probe(unsigned long vaddr)
744 {
745 unsigned long retval;
746
747 if (sparc_cpu_model != sparc_leon) {
748
749 vaddr &= PAGE_MASK;
750 __asm__ __volatile__("lda [%1] %2, %0\n\t" :
751 "=r" (retval) :
752 "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE));
753 } else {
754 retval = leon_swprobe(vaddr, NULL);
755 }
756 return retval;
757 }
758
759 /*
760 * This is much cleaner than poking around physical address space
761 * looking at the prom's page table directly which is what most
762 * other OS's do. Yuck... this is much better.
763 */
srmmu_inherit_prom_mappings(unsigned long start,unsigned long end)764 static void __init srmmu_inherit_prom_mappings(unsigned long start,
765 unsigned long end)
766 {
767 unsigned long probed;
768 unsigned long addr;
769 pgd_t *pgdp;
770 p4d_t *p4dp;
771 pud_t *pudp;
772 pmd_t *pmdp;
773 pte_t *ptep;
774 int what; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */
775
776 while (start <= end) {
777 if (start == 0)
778 break; /* probably wrap around */
779 if (start == 0xfef00000)
780 start = KADB_DEBUGGER_BEGVM;
781 probed = srmmu_probe(start);
782 if (!probed) {
783 /* continue probing until we find an entry */
784 start += PAGE_SIZE;
785 continue;
786 }
787
788 /* A red snapper, see what it really is. */
789 what = 0;
790 addr = start - PAGE_SIZE;
791
792 if (!(start & ~(PMD_MASK))) {
793 if (srmmu_probe(addr + PMD_SIZE) == probed)
794 what = 1;
795 }
796
797 if (!(start & ~(PGDIR_MASK))) {
798 if (srmmu_probe(addr + PGDIR_SIZE) == probed)
799 what = 2;
800 }
801
802 pgdp = pgd_offset_k(start);
803 p4dp = p4d_offset(pgdp, start);
804 pudp = pud_offset(p4dp, start);
805 if (what == 2) {
806 *__nocache_fix(pgdp) = __pgd(probed);
807 start += PGDIR_SIZE;
808 continue;
809 }
810 if (pud_none(*__nocache_fix(pudp))) {
811 pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE,
812 SRMMU_PMD_TABLE_SIZE);
813 if (pmdp == NULL)
814 early_pgtable_allocfail("pmd");
815 memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
816 pud_set(__nocache_fix(pudp), pmdp);
817 }
818 pmdp = pmd_offset(__nocache_fix(pudp), start);
819 if (what == 1) {
820 *(pmd_t *)__nocache_fix(pmdp) = __pmd(probed);
821 start += PMD_SIZE;
822 continue;
823 }
824 if (srmmu_pmd_none(*__nocache_fix(pmdp))) {
825 ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
826 if (ptep == NULL)
827 early_pgtable_allocfail("pte");
828 memset(__nocache_fix(ptep), 0, PTE_SIZE);
829 pmd_set(__nocache_fix(pmdp), ptep);
830 }
831 ptep = pte_offset_kernel(__nocache_fix(pmdp), start);
832 *__nocache_fix(ptep) = __pte(probed);
833 start += PAGE_SIZE;
834 }
835 }
836
837 #define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID)
838
839 /* Create a third-level SRMMU 16MB page mapping. */
do_large_mapping(unsigned long vaddr,unsigned long phys_base)840 static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base)
841 {
842 pgd_t *pgdp = pgd_offset_k(vaddr);
843 unsigned long big_pte;
844
845 big_pte = KERNEL_PTE(phys_base >> 4);
846 *__nocache_fix(pgdp) = __pgd(big_pte);
847 }
848
849 /* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */
map_spbank(unsigned long vbase,int sp_entry)850 static unsigned long __init map_spbank(unsigned long vbase, int sp_entry)
851 {
852 unsigned long pstart = (sp_banks[sp_entry].base_addr & PGDIR_MASK);
853 unsigned long vstart = (vbase & PGDIR_MASK);
854 unsigned long vend = PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes);
855 /* Map "low" memory only */
856 const unsigned long min_vaddr = PAGE_OFFSET;
857 const unsigned long max_vaddr = PAGE_OFFSET + SRMMU_MAXMEM;
858
859 if (vstart < min_vaddr || vstart >= max_vaddr)
860 return vstart;
861
862 if (vend > max_vaddr || vend < min_vaddr)
863 vend = max_vaddr;
864
865 while (vstart < vend) {
866 do_large_mapping(vstart, pstart);
867 vstart += PGDIR_SIZE; pstart += PGDIR_SIZE;
868 }
869 return vstart;
870 }
871
map_kernel(void)872 static void __init map_kernel(void)
873 {
874 int i;
875
876 if (phys_base > 0) {
877 do_large_mapping(PAGE_OFFSET, phys_base);
878 }
879
880 for (i = 0; sp_banks[i].num_bytes != 0; i++) {
881 map_spbank((unsigned long)__va(sp_banks[i].base_addr), i);
882 }
883 }
884
885 void (*poke_srmmu)(void) = NULL;
886
arch_zone_limits_init(unsigned long * max_zone_pfns)887 void __init arch_zone_limits_init(unsigned long *max_zone_pfns)
888 {
889 max_zone_pfns[ZONE_DMA] = max_low_pfn;
890 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
891 max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
892 }
893
srmmu_paging_init(void)894 void __init srmmu_paging_init(void)
895 {
896 int i;
897 phandle cpunode;
898 char node_str[128];
899 pgd_t *pgd;
900 p4d_t *p4d;
901 pud_t *pud;
902 pmd_t *pmd;
903 pte_t *pte;
904 unsigned long pages_avail;
905
906 init_mm.context = (unsigned long) NO_CONTEXT;
907 sparc_iomap.start = SUN4M_IOBASE_VADDR; /* 16MB of IOSPACE on all sun4m's. */
908
909 if (sparc_cpu_model == sun4d)
910 num_contexts = 65536; /* We know it is Viking */
911 else {
912 /* Find the number of contexts on the srmmu. */
913 cpunode = prom_getchild(prom_root_node);
914 num_contexts = 0;
915 while (cpunode != 0) {
916 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
917 if (!strcmp(node_str, "cpu")) {
918 num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8);
919 break;
920 }
921 cpunode = prom_getsibling(cpunode);
922 }
923 }
924
925 if (!num_contexts) {
926 prom_printf("Something wrong, can't find cpu node in paging_init.\n");
927 prom_halt();
928 }
929
930 pages_avail = 0;
931 last_valid_pfn = bootmem_init(&pages_avail);
932
933 srmmu_nocache_calcsize();
934 srmmu_nocache_init();
935 srmmu_inherit_prom_mappings(0xfe400000, (LINUX_OPPROM_ENDVM - PAGE_SIZE));
936 map_kernel();
937
938 /* ctx table has to be physically aligned to its size */
939 srmmu_context_table = __srmmu_get_nocache(num_contexts * sizeof(ctxd_t), num_contexts * sizeof(ctxd_t));
940 srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa(srmmu_context_table);
941
942 for (i = 0; i < num_contexts; i++)
943 srmmu_ctxd_set(__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir);
944
945 flush_cache_all();
946 srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys);
947 #ifdef CONFIG_SMP
948 /* Stop from hanging here... */
949 local_ops->tlb_all();
950 #else
951 flush_tlb_all();
952 #endif
953 poke_srmmu();
954
955 srmmu_allocate_ptable_skeleton(sparc_iomap.start, IOBASE_END);
956 srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END);
957
958 srmmu_allocate_ptable_skeleton(
959 __fix_to_virt(__end_of_fixed_addresses - 1), FIXADDR_TOP);
960 srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END);
961
962 pgd = pgd_offset_k(PKMAP_BASE);
963 p4d = p4d_offset(pgd, PKMAP_BASE);
964 pud = pud_offset(p4d, PKMAP_BASE);
965 pmd = pmd_offset(pud, PKMAP_BASE);
966 pte = pte_offset_kernel(pmd, PKMAP_BASE);
967 pkmap_page_table = pte;
968
969 flush_cache_all();
970 flush_tlb_all();
971
972 sparc_context_init(num_contexts);
973 }
974
mmu_info(struct seq_file * m)975 void mmu_info(struct seq_file *m)
976 {
977 seq_printf(m,
978 "MMU type\t: %s\n"
979 "contexts\t: %d\n"
980 "nocache total\t: %ld\n"
981 "nocache used\t: %d\n",
982 srmmu_name,
983 num_contexts,
984 srmmu_nocache_size,
985 srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
986 }
987
init_new_context(struct task_struct * tsk,struct mm_struct * mm)988 int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
989 {
990 mm->context = NO_CONTEXT;
991 return 0;
992 }
993
destroy_context(struct mm_struct * mm)994 void destroy_context(struct mm_struct *mm)
995 {
996 unsigned long flags;
997
998 if (mm->context != NO_CONTEXT) {
999 flush_cache_mm(mm);
1000 srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir);
1001 flush_tlb_mm(mm);
1002 spin_lock_irqsave(&srmmu_context_spinlock, flags);
1003 free_context(mm->context);
1004 spin_unlock_irqrestore(&srmmu_context_spinlock, flags);
1005 mm->context = NO_CONTEXT;
1006 }
1007 }
1008
1009 /* Init various srmmu chip types. */
srmmu_is_bad(void)1010 static void __init srmmu_is_bad(void)
1011 {
1012 prom_printf("Could not determine SRMMU chip type.\n");
1013 prom_halt();
1014 }
1015
init_vac_layout(void)1016 static void __init init_vac_layout(void)
1017 {
1018 phandle nd;
1019 int cache_lines;
1020 char node_str[128];
1021 #ifdef CONFIG_SMP
1022 int cpu = 0;
1023 unsigned long max_size = 0;
1024 unsigned long min_line_size = 0x10000000;
1025 #endif
1026
1027 nd = prom_getchild(prom_root_node);
1028 while ((nd = prom_getsibling(nd)) != 0) {
1029 prom_getstring(nd, "device_type", node_str, sizeof(node_str));
1030 if (!strcmp(node_str, "cpu")) {
1031 vac_line_size = prom_getint(nd, "cache-line-size");
1032 if (vac_line_size == -1) {
1033 prom_printf("can't determine cache-line-size, halting.\n");
1034 prom_halt();
1035 }
1036 cache_lines = prom_getint(nd, "cache-nlines");
1037 if (cache_lines == -1) {
1038 prom_printf("can't determine cache-nlines, halting.\n");
1039 prom_halt();
1040 }
1041
1042 vac_cache_size = cache_lines * vac_line_size;
1043 #ifdef CONFIG_SMP
1044 if (vac_cache_size > max_size)
1045 max_size = vac_cache_size;
1046 if (vac_line_size < min_line_size)
1047 min_line_size = vac_line_size;
1048 //FIXME: cpus not contiguous!!
1049 cpu++;
1050 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
1051 break;
1052 #else
1053 break;
1054 #endif
1055 }
1056 }
1057 if (nd == 0) {
1058 prom_printf("No CPU nodes found, halting.\n");
1059 prom_halt();
1060 }
1061 #ifdef CONFIG_SMP
1062 vac_cache_size = max_size;
1063 vac_line_size = min_line_size;
1064 #endif
1065 printk("SRMMU: Using VAC size of %d bytes, line size %d bytes.\n",
1066 (int)vac_cache_size, (int)vac_line_size);
1067 }
1068
poke_hypersparc(void)1069 static void poke_hypersparc(void)
1070 {
1071 volatile unsigned long clear;
1072 unsigned long mreg = srmmu_get_mmureg();
1073
1074 hyper_flush_unconditional_combined();
1075
1076 mreg &= ~(HYPERSPARC_CWENABLE);
1077 mreg |= (HYPERSPARC_CENABLE | HYPERSPARC_WBENABLE);
1078 mreg |= (HYPERSPARC_CMODE);
1079
1080 srmmu_set_mmureg(mreg);
1081
1082 #if 0 /* XXX I think this is bad news... -DaveM */
1083 hyper_clear_all_tags();
1084 #endif
1085
1086 put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE);
1087 hyper_flush_whole_icache();
1088 clear = srmmu_get_faddr();
1089 clear = srmmu_get_fstatus();
1090 }
1091
1092 static const struct sparc32_cachetlb_ops hypersparc_ops = {
1093 .cache_all = hypersparc_flush_cache_all,
1094 .cache_mm = hypersparc_flush_cache_mm,
1095 .cache_page = hypersparc_flush_cache_page,
1096 .cache_range = hypersparc_flush_cache_range,
1097 .tlb_all = hypersparc_flush_tlb_all,
1098 .tlb_mm = hypersparc_flush_tlb_mm,
1099 .tlb_page = hypersparc_flush_tlb_page,
1100 .tlb_range = hypersparc_flush_tlb_range,
1101 .page_to_ram = hypersparc_flush_page_to_ram,
1102 .sig_insns = hypersparc_flush_sig_insns,
1103 .page_for_dma = hypersparc_flush_page_for_dma,
1104 };
1105
init_hypersparc(void)1106 static void __init init_hypersparc(void)
1107 {
1108 srmmu_name = "ROSS HyperSparc";
1109 srmmu_modtype = HyperSparc;
1110
1111 init_vac_layout();
1112
1113 is_hypersparc = 1;
1114 sparc32_cachetlb_ops = &hypersparc_ops;
1115
1116 poke_srmmu = poke_hypersparc;
1117
1118 hypersparc_setup_blockops();
1119 }
1120
poke_swift(void)1121 static void poke_swift(void)
1122 {
1123 unsigned long mreg;
1124
1125 /* Clear any crap from the cache or else... */
1126 swift_flush_cache_all();
1127
1128 /* Enable I & D caches */
1129 mreg = srmmu_get_mmureg();
1130 mreg |= (SWIFT_IE | SWIFT_DE);
1131 /*
1132 * The Swift branch folding logic is completely broken. At
1133 * trap time, if things are just right, if can mistakenly
1134 * think that a trap is coming from kernel mode when in fact
1135 * it is coming from user mode (it mis-executes the branch in
1136 * the trap code). So you see things like crashme completely
1137 * hosing your machine which is completely unacceptable. Turn
1138 * this shit off... nice job Fujitsu.
1139 */
1140 mreg &= ~(SWIFT_BF);
1141 srmmu_set_mmureg(mreg);
1142 }
1143
1144 static const struct sparc32_cachetlb_ops swift_ops = {
1145 .cache_all = swift_flush_cache_all,
1146 .cache_mm = swift_flush_cache_mm,
1147 .cache_page = swift_flush_cache_page,
1148 .cache_range = swift_flush_cache_range,
1149 .tlb_all = swift_flush_tlb_all,
1150 .tlb_mm = swift_flush_tlb_mm,
1151 .tlb_page = swift_flush_tlb_page,
1152 .tlb_range = swift_flush_tlb_range,
1153 .page_to_ram = swift_flush_page_to_ram,
1154 .sig_insns = swift_flush_sig_insns,
1155 .page_for_dma = swift_flush_page_for_dma,
1156 };
1157
1158 #define SWIFT_MASKID_ADDR 0x10003018
init_swift(void)1159 static void __init init_swift(void)
1160 {
1161 unsigned long swift_rev;
1162
1163 __asm__ __volatile__("lda [%1] %2, %0\n\t"
1164 "srl %0, 0x18, %0\n\t" :
1165 "=r" (swift_rev) :
1166 "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS));
1167 srmmu_name = "Fujitsu Swift";
1168 switch (swift_rev) {
1169 case 0x11:
1170 case 0x20:
1171 case 0x23:
1172 case 0x30:
1173 srmmu_modtype = Swift_lots_o_bugs;
1174 hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN);
1175 /*
1176 * Gee george, I wonder why Sun is so hush hush about
1177 * this hardware bug... really braindamage stuff going
1178 * on here. However I think we can find a way to avoid
1179 * all of the workaround overhead under Linux. Basically,
1180 * any page fault can cause kernel pages to become user
1181 * accessible (the mmu gets confused and clears some of
1182 * the ACC bits in kernel ptes). Aha, sounds pretty
1183 * horrible eh? But wait, after extensive testing it appears
1184 * that if you use pgd_t level large kernel pte's (like the
1185 * 4MB pages on the Pentium) the bug does not get tripped
1186 * at all. This avoids almost all of the major overhead.
1187 * Welcome to a world where your vendor tells you to,
1188 * "apply this kernel patch" instead of "sorry for the
1189 * broken hardware, send it back and we'll give you
1190 * properly functioning parts"
1191 */
1192 break;
1193 case 0x25:
1194 case 0x31:
1195 srmmu_modtype = Swift_bad_c;
1196 hwbug_bitmask |= HWBUG_KERN_CBITBROKEN;
1197 /*
1198 * You see Sun allude to this hardware bug but never
1199 * admit things directly, they'll say things like,
1200 * "the Swift chip cache problems" or similar.
1201 */
1202 break;
1203 default:
1204 srmmu_modtype = Swift_ok;
1205 break;
1206 }
1207
1208 sparc32_cachetlb_ops = &swift_ops;
1209 flush_page_for_dma_global = 0;
1210
1211 /*
1212 * Are you now convinced that the Swift is one of the
1213 * biggest VLSI abortions of all time? Bravo Fujitsu!
1214 * Fujitsu, the !#?!%$'d up processor people. I bet if
1215 * you examined the microcode of the Swift you'd find
1216 * XXX's all over the place.
1217 */
1218 poke_srmmu = poke_swift;
1219 }
1220
turbosparc_flush_cache_all(void)1221 static void turbosparc_flush_cache_all(void)
1222 {
1223 flush_user_windows();
1224 turbosparc_idflash_clear();
1225 }
1226
turbosparc_flush_cache_mm(struct mm_struct * mm)1227 static void turbosparc_flush_cache_mm(struct mm_struct *mm)
1228 {
1229 FLUSH_BEGIN(mm)
1230 flush_user_windows();
1231 turbosparc_idflash_clear();
1232 FLUSH_END
1233 }
1234
turbosparc_flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)1235 static void turbosparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1236 {
1237 FLUSH_BEGIN(vma->vm_mm)
1238 flush_user_windows();
1239 turbosparc_idflash_clear();
1240 FLUSH_END
1241 }
1242
turbosparc_flush_cache_page(struct vm_area_struct * vma,unsigned long page)1243 static void turbosparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1244 {
1245 FLUSH_BEGIN(vma->vm_mm)
1246 flush_user_windows();
1247 if (vma->vm_flags & VM_EXEC)
1248 turbosparc_flush_icache();
1249 turbosparc_flush_dcache();
1250 FLUSH_END
1251 }
1252
1253 /* TurboSparc is copy-back, if we turn it on, but this does not work. */
turbosparc_flush_page_to_ram(unsigned long page)1254 static void turbosparc_flush_page_to_ram(unsigned long page)
1255 {
1256 #ifdef TURBOSPARC_WRITEBACK
1257 volatile unsigned long clear;
1258
1259 if (srmmu_probe(page))
1260 turbosparc_flush_page_cache(page);
1261 clear = srmmu_get_fstatus();
1262 #endif
1263 }
1264
turbosparc_flush_sig_insns(struct mm_struct * mm,unsigned long insn_addr)1265 static void turbosparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
1266 {
1267 }
1268
turbosparc_flush_page_for_dma(unsigned long page)1269 static void turbosparc_flush_page_for_dma(unsigned long page)
1270 {
1271 turbosparc_flush_dcache();
1272 }
1273
turbosparc_flush_tlb_all(void)1274 static void turbosparc_flush_tlb_all(void)
1275 {
1276 srmmu_flush_whole_tlb();
1277 }
1278
turbosparc_flush_tlb_mm(struct mm_struct * mm)1279 static void turbosparc_flush_tlb_mm(struct mm_struct *mm)
1280 {
1281 FLUSH_BEGIN(mm)
1282 srmmu_flush_whole_tlb();
1283 FLUSH_END
1284 }
1285
turbosparc_flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)1286 static void turbosparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1287 {
1288 FLUSH_BEGIN(vma->vm_mm)
1289 srmmu_flush_whole_tlb();
1290 FLUSH_END
1291 }
1292
turbosparc_flush_tlb_page(struct vm_area_struct * vma,unsigned long page)1293 static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1294 {
1295 FLUSH_BEGIN(vma->vm_mm)
1296 srmmu_flush_whole_tlb();
1297 FLUSH_END
1298 }
1299
1300
poke_turbosparc(void)1301 static void poke_turbosparc(void)
1302 {
1303 unsigned long mreg = srmmu_get_mmureg();
1304 unsigned long ccreg;
1305
1306 /* Clear any crap from the cache or else... */
1307 turbosparc_flush_cache_all();
1308 /* Temporarily disable I & D caches */
1309 mreg &= ~(TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE);
1310 mreg &= ~(TURBOSPARC_PCENABLE); /* Don't check parity */
1311 srmmu_set_mmureg(mreg);
1312
1313 ccreg = turbosparc_get_ccreg();
1314
1315 #ifdef TURBOSPARC_WRITEBACK
1316 ccreg |= (TURBOSPARC_SNENABLE); /* Do DVMA snooping in Dcache */
1317 ccreg &= ~(TURBOSPARC_uS2 | TURBOSPARC_WTENABLE);
1318 /* Write-back D-cache, emulate VLSI
1319 * abortion number three, not number one */
1320 #else
1321 /* For now let's play safe, optimize later */
1322 ccreg |= (TURBOSPARC_SNENABLE | TURBOSPARC_WTENABLE);
1323 /* Do DVMA snooping in Dcache, Write-thru D-cache */
1324 ccreg &= ~(TURBOSPARC_uS2);
1325 /* Emulate VLSI abortion number three, not number one */
1326 #endif
1327
1328 switch (ccreg & 7) {
1329 case 0: /* No SE cache */
1330 case 7: /* Test mode */
1331 break;
1332 default:
1333 ccreg |= (TURBOSPARC_SCENABLE);
1334 }
1335 turbosparc_set_ccreg(ccreg);
1336
1337 mreg |= (TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* I & D caches on */
1338 mreg |= (TURBOSPARC_ICSNOOP); /* Icache snooping on */
1339 srmmu_set_mmureg(mreg);
1340 }
1341
1342 static const struct sparc32_cachetlb_ops turbosparc_ops = {
1343 .cache_all = turbosparc_flush_cache_all,
1344 .cache_mm = turbosparc_flush_cache_mm,
1345 .cache_page = turbosparc_flush_cache_page,
1346 .cache_range = turbosparc_flush_cache_range,
1347 .tlb_all = turbosparc_flush_tlb_all,
1348 .tlb_mm = turbosparc_flush_tlb_mm,
1349 .tlb_page = turbosparc_flush_tlb_page,
1350 .tlb_range = turbosparc_flush_tlb_range,
1351 .page_to_ram = turbosparc_flush_page_to_ram,
1352 .sig_insns = turbosparc_flush_sig_insns,
1353 .page_for_dma = turbosparc_flush_page_for_dma,
1354 };
1355
init_turbosparc(void)1356 static void __init init_turbosparc(void)
1357 {
1358 srmmu_name = "Fujitsu TurboSparc";
1359 srmmu_modtype = TurboSparc;
1360 sparc32_cachetlb_ops = &turbosparc_ops;
1361 poke_srmmu = poke_turbosparc;
1362 }
1363
poke_tsunami(void)1364 static void poke_tsunami(void)
1365 {
1366 unsigned long mreg = srmmu_get_mmureg();
1367
1368 tsunami_flush_icache();
1369 tsunami_flush_dcache();
1370 mreg &= ~TSUNAMI_ITD;
1371 mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB);
1372 srmmu_set_mmureg(mreg);
1373 }
1374
1375 static const struct sparc32_cachetlb_ops tsunami_ops = {
1376 .cache_all = tsunami_flush_cache_all,
1377 .cache_mm = tsunami_flush_cache_mm,
1378 .cache_page = tsunami_flush_cache_page,
1379 .cache_range = tsunami_flush_cache_range,
1380 .tlb_all = tsunami_flush_tlb_all,
1381 .tlb_mm = tsunami_flush_tlb_mm,
1382 .tlb_page = tsunami_flush_tlb_page,
1383 .tlb_range = tsunami_flush_tlb_range,
1384 .page_to_ram = tsunami_flush_page_to_ram,
1385 .sig_insns = tsunami_flush_sig_insns,
1386 .page_for_dma = tsunami_flush_page_for_dma,
1387 };
1388
init_tsunami(void)1389 static void __init init_tsunami(void)
1390 {
1391 /*
1392 * Tsunami's pretty sane, Sun and TI actually got it
1393 * somewhat right this time. Fujitsu should have
1394 * taken some lessons from them.
1395 */
1396
1397 srmmu_name = "TI Tsunami";
1398 srmmu_modtype = Tsunami;
1399 sparc32_cachetlb_ops = &tsunami_ops;
1400 poke_srmmu = poke_tsunami;
1401
1402 tsunami_setup_blockops();
1403 }
1404
poke_viking(void)1405 static void poke_viking(void)
1406 {
1407 unsigned long mreg = srmmu_get_mmureg();
1408 static int smp_catch;
1409
1410 if (viking_mxcc_present) {
1411 unsigned long mxcc_control = mxcc_get_creg();
1412
1413 mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE);
1414 mxcc_control &= ~(MXCC_CTL_RRC);
1415 mxcc_set_creg(mxcc_control);
1416
1417 /*
1418 * We don't need memory parity checks.
1419 * XXX This is a mess, have to dig out later. ecd.
1420 viking_mxcc_turn_off_parity(&mreg, &mxcc_control);
1421 */
1422
1423 /* We do cache ptables on MXCC. */
1424 mreg |= VIKING_TCENABLE;
1425 } else {
1426 unsigned long bpreg;
1427
1428 mreg &= ~(VIKING_TCENABLE);
1429 if (smp_catch++) {
1430 /* Must disable mixed-cmd mode here for other cpu's. */
1431 bpreg = viking_get_bpreg();
1432 bpreg &= ~(VIKING_ACTION_MIX);
1433 viking_set_bpreg(bpreg);
1434
1435 /* Just in case PROM does something funny. */
1436 msi_set_sync();
1437 }
1438 }
1439
1440 mreg |= VIKING_SPENABLE;
1441 mreg |= (VIKING_ICENABLE | VIKING_DCENABLE);
1442 mreg |= VIKING_SBENABLE;
1443 mreg &= ~(VIKING_ACENABLE);
1444 srmmu_set_mmureg(mreg);
1445 }
1446
1447 static struct sparc32_cachetlb_ops viking_ops __ro_after_init = {
1448 .cache_all = viking_flush_cache_all,
1449 .cache_mm = viking_flush_cache_mm,
1450 .cache_page = viking_flush_cache_page,
1451 .cache_range = viking_flush_cache_range,
1452 .tlb_all = viking_flush_tlb_all,
1453 .tlb_mm = viking_flush_tlb_mm,
1454 .tlb_page = viking_flush_tlb_page,
1455 .tlb_range = viking_flush_tlb_range,
1456 .page_to_ram = viking_flush_page_to_ram,
1457 .sig_insns = viking_flush_sig_insns,
1458 .page_for_dma = viking_flush_page_for_dma,
1459 };
1460
1461 #ifdef CONFIG_SMP
1462 /* On sun4d the cpu broadcasts local TLB flushes, so we can just
1463 * perform the local TLB flush and all the other cpus will see it.
1464 * But, unfortunately, there is a bug in the sun4d XBUS backplane
1465 * that requires that we add some synchronization to these flushes.
1466 *
1467 * The bug is that the fifo which keeps track of all the pending TLB
1468 * broadcasts in the system is an entry or two too small, so if we
1469 * have too many going at once we'll overflow that fifo and lose a TLB
1470 * flush resulting in corruption.
1471 *
1472 * Our workaround is to take a global spinlock around the TLB flushes,
1473 * which guarentees we won't ever have too many pending. It's a big
1474 * hammer, but a semaphore like system to make sure we only have N TLB
1475 * flushes going at once will require SMP locking anyways so there's
1476 * no real value in trying any harder than this.
1477 */
1478 static struct sparc32_cachetlb_ops viking_sun4d_smp_ops __ro_after_init = {
1479 .cache_all = viking_flush_cache_all,
1480 .cache_mm = viking_flush_cache_mm,
1481 .cache_page = viking_flush_cache_page,
1482 .cache_range = viking_flush_cache_range,
1483 .tlb_all = sun4dsmp_flush_tlb_all,
1484 .tlb_mm = sun4dsmp_flush_tlb_mm,
1485 .tlb_page = sun4dsmp_flush_tlb_page,
1486 .tlb_range = sun4dsmp_flush_tlb_range,
1487 .page_to_ram = viking_flush_page_to_ram,
1488 .sig_insns = viking_flush_sig_insns,
1489 .page_for_dma = viking_flush_page_for_dma,
1490 };
1491 #endif
1492
init_viking(void)1493 static void __init init_viking(void)
1494 {
1495 unsigned long mreg = srmmu_get_mmureg();
1496
1497 /* Ahhh, the viking. SRMMU VLSI abortion number two... */
1498 if (mreg & VIKING_MMODE) {
1499 srmmu_name = "TI Viking";
1500 viking_mxcc_present = 0;
1501 msi_set_sync();
1502
1503 /*
1504 * We need this to make sure old viking takes no hits
1505 * on its cache for dma snoops to workaround the
1506 * "load from non-cacheable memory" interrupt bug.
1507 * This is only necessary because of the new way in
1508 * which we use the IOMMU.
1509 */
1510 viking_ops.page_for_dma = viking_flush_page;
1511 #ifdef CONFIG_SMP
1512 viking_sun4d_smp_ops.page_for_dma = viking_flush_page;
1513 #endif
1514 flush_page_for_dma_global = 0;
1515 } else {
1516 srmmu_name = "TI Viking/MXCC";
1517 viking_mxcc_present = 1;
1518 srmmu_cache_pagetables = 1;
1519 }
1520
1521 sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1522 &viking_ops;
1523 #ifdef CONFIG_SMP
1524 if (sparc_cpu_model == sun4d)
1525 sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1526 &viking_sun4d_smp_ops;
1527 #endif
1528
1529 poke_srmmu = poke_viking;
1530 }
1531
1532 /* Probe for the srmmu chip version. */
get_srmmu_type(void)1533 static void __init get_srmmu_type(void)
1534 {
1535 unsigned long mreg, psr;
1536 unsigned long mod_typ, mod_rev, psr_typ, psr_vers;
1537
1538 srmmu_modtype = SRMMU_INVAL_MOD;
1539 hwbug_bitmask = 0;
1540
1541 mreg = srmmu_get_mmureg(); psr = get_psr();
1542 mod_typ = (mreg & 0xf0000000) >> 28;
1543 mod_rev = (mreg & 0x0f000000) >> 24;
1544 psr_typ = (psr >> 28) & 0xf;
1545 psr_vers = (psr >> 24) & 0xf;
1546
1547 /* First, check for sparc-leon. */
1548 if (sparc_cpu_model == sparc_leon) {
1549 init_leon();
1550 return;
1551 }
1552
1553 /* Second, check for HyperSparc or Cypress. */
1554 if (mod_typ == 1) {
1555 switch (mod_rev) {
1556 case 7:
1557 /* UP or MP Hypersparc */
1558 init_hypersparc();
1559 break;
1560 case 0:
1561 case 2:
1562 case 10:
1563 case 11:
1564 case 12:
1565 case 13:
1566 case 14:
1567 case 15:
1568 default:
1569 prom_printf("Sparc-Linux Cypress support does not longer exit.\n");
1570 prom_halt();
1571 break;
1572 }
1573 return;
1574 }
1575
1576 /* Now Fujitsu TurboSparc. It might happen that it is
1577 * in Swift emulation mode, so we will check later...
1578 */
1579 if (psr_typ == 0 && psr_vers == 5) {
1580 init_turbosparc();
1581 return;
1582 }
1583
1584 /* Next check for Fujitsu Swift. */
1585 if (psr_typ == 0 && psr_vers == 4) {
1586 phandle cpunode;
1587 char node_str[128];
1588
1589 /* Look if it is not a TurboSparc emulating Swift... */
1590 cpunode = prom_getchild(prom_root_node);
1591 while ((cpunode = prom_getsibling(cpunode)) != 0) {
1592 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
1593 if (!strcmp(node_str, "cpu")) {
1594 if (!prom_getintdefault(cpunode, "psr-implementation", 1) &&
1595 prom_getintdefault(cpunode, "psr-version", 1) == 5) {
1596 init_turbosparc();
1597 return;
1598 }
1599 break;
1600 }
1601 }
1602
1603 init_swift();
1604 return;
1605 }
1606
1607 /* Now the Viking family of srmmu. */
1608 if (psr_typ == 4 &&
1609 ((psr_vers == 0) ||
1610 ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) {
1611 init_viking();
1612 return;
1613 }
1614
1615 /* Finally the Tsunami. */
1616 if (psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) {
1617 init_tsunami();
1618 return;
1619 }
1620
1621 /* Oh well */
1622 srmmu_is_bad();
1623 }
1624
1625 #ifdef CONFIG_SMP
1626 /* Local cross-calls. */
smp_flush_page_for_dma(unsigned long page)1627 static void smp_flush_page_for_dma(unsigned long page)
1628 {
1629 xc1(local_ops->page_for_dma, page);
1630 local_ops->page_for_dma(page);
1631 }
1632
smp_flush_cache_all(void)1633 static void smp_flush_cache_all(void)
1634 {
1635 xc0(local_ops->cache_all);
1636 local_ops->cache_all();
1637 }
1638
smp_flush_tlb_all(void)1639 static void smp_flush_tlb_all(void)
1640 {
1641 xc0(local_ops->tlb_all);
1642 local_ops->tlb_all();
1643 }
1644
any_other_mm_cpus(struct mm_struct * mm)1645 static bool any_other_mm_cpus(struct mm_struct *mm)
1646 {
1647 return cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids;
1648 }
1649
smp_flush_cache_mm(struct mm_struct * mm)1650 static void smp_flush_cache_mm(struct mm_struct *mm)
1651 {
1652 if (mm->context != NO_CONTEXT) {
1653 if (any_other_mm_cpus(mm))
1654 xc1(local_ops->cache_mm, (unsigned long)mm);
1655 local_ops->cache_mm(mm);
1656 }
1657 }
1658
smp_flush_tlb_mm(struct mm_struct * mm)1659 static void smp_flush_tlb_mm(struct mm_struct *mm)
1660 {
1661 if (mm->context != NO_CONTEXT) {
1662 if (any_other_mm_cpus(mm)) {
1663 xc1(local_ops->tlb_mm, (unsigned long)mm);
1664 if (atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
1665 cpumask_copy(mm_cpumask(mm),
1666 cpumask_of(smp_processor_id()));
1667 }
1668 local_ops->tlb_mm(mm);
1669 }
1670 }
1671
smp_flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)1672 static void smp_flush_cache_range(struct vm_area_struct *vma,
1673 unsigned long start,
1674 unsigned long end)
1675 {
1676 struct mm_struct *mm = vma->vm_mm;
1677
1678 if (mm->context != NO_CONTEXT) {
1679 if (any_other_mm_cpus(mm))
1680 xc3(local_ops->cache_range, (unsigned long)vma, start,
1681 end);
1682 local_ops->cache_range(vma, start, end);
1683 }
1684 }
1685
smp_flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)1686 static void smp_flush_tlb_range(struct vm_area_struct *vma,
1687 unsigned long start,
1688 unsigned long end)
1689 {
1690 struct mm_struct *mm = vma->vm_mm;
1691
1692 if (mm->context != NO_CONTEXT) {
1693 if (any_other_mm_cpus(mm))
1694 xc3(local_ops->tlb_range, (unsigned long)vma, start,
1695 end);
1696 local_ops->tlb_range(vma, start, end);
1697 }
1698 }
1699
smp_flush_cache_page(struct vm_area_struct * vma,unsigned long page)1700 static void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1701 {
1702 struct mm_struct *mm = vma->vm_mm;
1703
1704 if (mm->context != NO_CONTEXT) {
1705 if (any_other_mm_cpus(mm))
1706 xc2(local_ops->cache_page, (unsigned long)vma, page);
1707 local_ops->cache_page(vma, page);
1708 }
1709 }
1710
smp_flush_tlb_page(struct vm_area_struct * vma,unsigned long page)1711 static void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1712 {
1713 struct mm_struct *mm = vma->vm_mm;
1714
1715 if (mm->context != NO_CONTEXT) {
1716 if (any_other_mm_cpus(mm))
1717 xc2(local_ops->tlb_page, (unsigned long)vma, page);
1718 local_ops->tlb_page(vma, page);
1719 }
1720 }
1721
smp_flush_page_to_ram(unsigned long page)1722 static void smp_flush_page_to_ram(unsigned long page)
1723 {
1724 /* Current theory is that those who call this are the one's
1725 * who have just dirtied their cache with the pages contents
1726 * in kernel space, therefore we only run this on local cpu.
1727 *
1728 * XXX This experiment failed, research further... -DaveM
1729 */
1730 #if 1
1731 xc1(local_ops->page_to_ram, page);
1732 #endif
1733 local_ops->page_to_ram(page);
1734 }
1735
smp_flush_sig_insns(struct mm_struct * mm,unsigned long insn_addr)1736 static void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
1737 {
1738 if (any_other_mm_cpus(mm))
1739 xc2(local_ops->sig_insns, (unsigned long)mm, insn_addr);
1740 local_ops->sig_insns(mm, insn_addr);
1741 }
1742
1743 static struct sparc32_cachetlb_ops smp_cachetlb_ops __ro_after_init = {
1744 .cache_all = smp_flush_cache_all,
1745 .cache_mm = smp_flush_cache_mm,
1746 .cache_page = smp_flush_cache_page,
1747 .cache_range = smp_flush_cache_range,
1748 .tlb_all = smp_flush_tlb_all,
1749 .tlb_mm = smp_flush_tlb_mm,
1750 .tlb_page = smp_flush_tlb_page,
1751 .tlb_range = smp_flush_tlb_range,
1752 .page_to_ram = smp_flush_page_to_ram,
1753 .sig_insns = smp_flush_sig_insns,
1754 .page_for_dma = smp_flush_page_for_dma,
1755 };
1756 #endif
1757
1758 /* Load up routines and constants for sun4m and sun4d mmu */
load_mmu(void)1759 void __init load_mmu(void)
1760 {
1761 /* Functions */
1762 get_srmmu_type();
1763
1764 #ifdef CONFIG_SMP
1765 /* El switcheroo... */
1766 local_ops = sparc32_cachetlb_ops;
1767
1768 if (sparc_cpu_model == sun4d || sparc_cpu_model == sparc_leon) {
1769 smp_cachetlb_ops.tlb_all = local_ops->tlb_all;
1770 smp_cachetlb_ops.tlb_mm = local_ops->tlb_mm;
1771 smp_cachetlb_ops.tlb_range = local_ops->tlb_range;
1772 smp_cachetlb_ops.tlb_page = local_ops->tlb_page;
1773 }
1774
1775 if (poke_srmmu == poke_viking) {
1776 /* Avoid unnecessary cross calls. */
1777 smp_cachetlb_ops.cache_all = local_ops->cache_all;
1778 smp_cachetlb_ops.cache_mm = local_ops->cache_mm;
1779 smp_cachetlb_ops.cache_range = local_ops->cache_range;
1780 smp_cachetlb_ops.cache_page = local_ops->cache_page;
1781
1782 smp_cachetlb_ops.page_to_ram = local_ops->page_to_ram;
1783 smp_cachetlb_ops.sig_insns = local_ops->sig_insns;
1784 smp_cachetlb_ops.page_for_dma = local_ops->page_for_dma;
1785 }
1786
1787 /* It really is const after this point. */
1788 sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1789 &smp_cachetlb_ops;
1790 #endif
1791
1792 if (sparc_cpu_model != sun4d)
1793 ld_mmu_iommu();
1794 #ifdef CONFIG_SMP
1795 if (sparc_cpu_model == sun4d)
1796 sun4d_init_smp();
1797 else if (sparc_cpu_model == sparc_leon)
1798 leon_init_smp();
1799 else
1800 sun4m_init_smp();
1801 #endif
1802 }
1803