1 // SPDX-License-Identifier: GPL-2.0
3 * srmmu.c: SRMMU specific routines for memory management.
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
7 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
8 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9 * Copyright (C) 1999,2000 Anton Blanchard (anton@samba.org)
12 #include <linux/seq_file.h>
13 #include <linux/spinlock.h>
14 #include <linux/memblock.h>
15 #include <linux/pagemap.h>
16 #include <linux/vmalloc.h>
17 #include <linux/kdebug.h>
18 #include <linux/export.h>
19 #include <linux/kernel.h>
20 #include <linux/init.h>
21 #include <linux/log2.h>
22 #include <linux/gfp.h>
26 #include <asm/mmu_context.h>
27 #include <asm/cacheflush.h>
28 #include <asm/tlbflush.h>
29 #include <asm/io-unit.h>
30 #include <asm/pgalloc.h>
31 #include <asm/pgtable.h>
32 #include <asm/bitext.h>
33 #include <asm/vaddrs.h>
34 #include <asm/cache.h>
35 #include <asm/traps.h>
36 #include <asm/oplib.h>
43 /* Now the cpu specific definitions. */
44 #include <asm/turbosparc.h>
45 #include <asm/tsunami.h>
46 #include <asm/viking.h>
47 #include <asm/swift.h>
54 enum mbus_module srmmu_modtype;
55 static unsigned int hwbug_bitmask;
57 EXPORT_SYMBOL(vac_cache_size);
60 extern struct resource sparc_iomap;
62 extern unsigned long last_valid_pfn;
64 static pgd_t *srmmu_swapper_pg_dir;
66 const struct sparc32_cachetlb_ops *sparc32_cachetlb_ops;
67 EXPORT_SYMBOL(sparc32_cachetlb_ops);
70 const struct sparc32_cachetlb_ops *local_ops;
72 #define FLUSH_BEGIN(mm)
75 #define FLUSH_BEGIN(mm) if ((mm)->context != NO_CONTEXT) {
79 int flush_page_for_dma_global = 1;
83 ctxd_t *srmmu_ctx_table_phys;
84 static ctxd_t *srmmu_context_table;
86 int viking_mxcc_present;
87 static DEFINE_SPINLOCK(srmmu_context_spinlock);
89 static int is_hypersparc;
91 static int srmmu_cache_pagetables;
93 /* these will be initialized in srmmu_nocache_calcsize() */
94 static unsigned long srmmu_nocache_size;
95 static unsigned long srmmu_nocache_end;
97 /* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */
98 #define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4)
100 /* The context table is a nocache user with the biggest alignment needs. */
101 #define SRMMU_NOCACHE_ALIGN_MAX (sizeof(ctxd_t)*SRMMU_MAX_CONTEXTS)
103 void *srmmu_nocache_pool;
104 static struct bit_map srmmu_nocache_map;
106 static inline int srmmu_pmd_none(pmd_t pmd)
107 { return !(pmd_val(pmd) & 0xFFFFFFF); }
109 /* XXX should we hyper_flush_whole_icache here - Anton */
110 static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
114 pte = __pte((SRMMU_ET_PTD | (__nocache_pa(pgdp) >> 4)));
115 set_pte((pte_t *)ctxp, pte);
119 * Locations of MSI Registers.
121 #define MSI_MBUS_ARBEN 0xe0001008 /* MBus Arbiter Enable register */
124 * Useful bits in the MSI Registers.
126 #define MSI_ASYNC_MODE 0x80000000 /* Operate the MSI asynchronously */
128 static void msi_set_sync(void)
130 __asm__ __volatile__ ("lda [%0] %1, %%g3\n\t"
131 "andn %%g3, %2, %%g3\n\t"
132 "sta %%g3, [%0] %1\n\t" : :
133 "r" (MSI_MBUS_ARBEN),
134 "i" (ASI_M_CTL), "r" (MSI_ASYNC_MODE) : "g3");
137 void pmd_set(pmd_t *pmdp, pte_t *ptep)
139 unsigned long ptp = __nocache_pa(ptep) >> 4;
140 set_pte((pte_t *)&pmd_val(*pmdp), __pte(SRMMU_ET_PTD | ptp));
143 /* Find an entry in the third-level page table.. */
144 pte_t *pte_offset_kernel(pmd_t *dir, unsigned long address)
148 pte = __nocache_va((pmd_val(*dir) & SRMMU_PTD_PMASK) << 4);
149 return (pte_t *) pte +
150 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
154 * size: bytes to allocate in the nocache area.
155 * align: bytes, number to align at.
156 * Returns the virtual address of the allocated area.
158 static void *__srmmu_get_nocache(int size, int align)
160 int offset, minsz = 1 << SRMMU_NOCACHE_BITMAP_SHIFT;
164 printk(KERN_ERR "Size 0x%x too small for nocache request\n",
168 if (size & (minsz - 1)) {
169 printk(KERN_ERR "Size 0x%x unaligned in nocache request\n",
173 BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX);
175 offset = bit_map_string_get(&srmmu_nocache_map,
176 size >> SRMMU_NOCACHE_BITMAP_SHIFT,
177 align >> SRMMU_NOCACHE_BITMAP_SHIFT);
179 printk(KERN_ERR "srmmu: out of nocache %d: %d/%d\n",
180 size, (int) srmmu_nocache_size,
181 srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
185 addr = SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT);
189 void *srmmu_get_nocache(int size, int align)
193 tmp = __srmmu_get_nocache(size, align);
196 memset(tmp, 0, size);
201 void srmmu_free_nocache(void *addr, int size)
206 vaddr = (unsigned long)addr;
207 if (vaddr < SRMMU_NOCACHE_VADDR) {
208 printk("Vaddr %lx is smaller than nocache base 0x%lx\n",
209 vaddr, (unsigned long)SRMMU_NOCACHE_VADDR);
212 if (vaddr + size > srmmu_nocache_end) {
213 printk("Vaddr %lx is bigger than nocache end 0x%lx\n",
214 vaddr, srmmu_nocache_end);
217 if (!is_power_of_2(size)) {
218 printk("Size 0x%x is not a power of 2\n", size);
221 if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
222 printk("Size 0x%x is too small\n", size);
225 if (vaddr & (size - 1)) {
226 printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr, size);
230 offset = (vaddr - SRMMU_NOCACHE_VADDR) >> SRMMU_NOCACHE_BITMAP_SHIFT;
231 size = size >> SRMMU_NOCACHE_BITMAP_SHIFT;
233 bit_map_clear(&srmmu_nocache_map, offset, size);
236 static void srmmu_early_allocate_ptable_skeleton(unsigned long start,
239 /* Return how much physical memory we have. */
240 static unsigned long __init probe_memory(void)
242 unsigned long total = 0;
245 for (i = 0; sp_banks[i].num_bytes; i++)
246 total += sp_banks[i].num_bytes;
252 * Reserve nocache dynamically proportionally to the amount of
253 * system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002
255 static void __init srmmu_nocache_calcsize(void)
257 unsigned long sysmemavail = probe_memory() / 1024;
258 int srmmu_nocache_npages;
260 srmmu_nocache_npages =
261 sysmemavail / SRMMU_NOCACHE_ALCRATIO / 1024 * 256;
263 /* P3 XXX The 4x overuse: corroborated by /proc/meminfo. */
264 // if (srmmu_nocache_npages < 256) srmmu_nocache_npages = 256;
265 if (srmmu_nocache_npages < SRMMU_MIN_NOCACHE_PAGES)
266 srmmu_nocache_npages = SRMMU_MIN_NOCACHE_PAGES;
268 /* anything above 1280 blows up */
269 if (srmmu_nocache_npages > SRMMU_MAX_NOCACHE_PAGES)
270 srmmu_nocache_npages = SRMMU_MAX_NOCACHE_PAGES;
272 srmmu_nocache_size = srmmu_nocache_npages * PAGE_SIZE;
273 srmmu_nocache_end = SRMMU_NOCACHE_VADDR + srmmu_nocache_size;
276 static void __init srmmu_nocache_init(void)
278 void *srmmu_nocache_bitmap;
279 unsigned int bitmap_bits;
285 unsigned long paddr, vaddr;
286 unsigned long pteval;
288 bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT;
290 srmmu_nocache_pool = memblock_alloc(srmmu_nocache_size,
291 SRMMU_NOCACHE_ALIGN_MAX);
292 if (!srmmu_nocache_pool)
293 panic("%s: Failed to allocate %lu bytes align=0x%x\n",
294 __func__, srmmu_nocache_size, SRMMU_NOCACHE_ALIGN_MAX);
295 memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
297 srmmu_nocache_bitmap =
298 memblock_alloc(BITS_TO_LONGS(bitmap_bits) * sizeof(long),
300 if (!srmmu_nocache_bitmap)
301 panic("%s: Failed to allocate %zu bytes\n", __func__,
302 BITS_TO_LONGS(bitmap_bits) * sizeof(long));
303 bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
305 srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
306 memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE);
307 init_mm.pgd = srmmu_swapper_pg_dir;
309 srmmu_early_allocate_ptable_skeleton(SRMMU_NOCACHE_VADDR, srmmu_nocache_end);
311 paddr = __pa((unsigned long)srmmu_nocache_pool);
312 vaddr = SRMMU_NOCACHE_VADDR;
314 while (vaddr < srmmu_nocache_end) {
315 pgd = pgd_offset_k(vaddr);
316 p4d = p4d_offset(pgd, vaddr);
317 pud = pud_offset(p4d, vaddr);
318 pmd = pmd_offset(__nocache_fix(pud), vaddr);
319 pte = pte_offset_kernel(__nocache_fix(pmd), vaddr);
321 pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV);
323 if (srmmu_cache_pagetables)
324 pteval |= SRMMU_CACHE;
326 set_pte(__nocache_fix(pte), __pte(pteval));
336 pgd_t *get_pgd_fast(void)
340 pgd = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
342 pgd_t *init = pgd_offset_k(0);
343 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
344 memcpy(pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
345 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
352 * Hardware needs alignment to 256 only, but we align to whole page size
353 * to reduce fragmentation problems due to the buddy principle.
354 * XXX Provide actual fragmentation statistics in /proc.
356 * Alignments up to the page size are the same for physical and virtual
357 * addresses of the nocache area.
359 pgtable_t pte_alloc_one(struct mm_struct *mm)
364 if ((ptep = pte_alloc_one_kernel(mm)) == 0)
366 page = pfn_to_page(__nocache_pa((unsigned long)ptep) >> PAGE_SHIFT);
367 spin_lock(&mm->page_table_lock);
368 if (page_ref_inc_return(page) == 2 && !pgtable_pte_page_ctor(page)) {
372 spin_unlock(&mm->page_table_lock);
377 void pte_free(struct mm_struct *mm, pgtable_t ptep)
381 page = pfn_to_page(__nocache_pa((unsigned long)ptep) >> PAGE_SHIFT);
382 spin_lock(&mm->page_table_lock);
383 if (page_ref_dec_return(page) == 1)
384 pgtable_pte_page_dtor(page);
385 spin_unlock(&mm->page_table_lock);
387 srmmu_free_nocache(ptep, SRMMU_PTE_TABLE_SIZE);
390 /* context handling - a dynamically sized pool is used */
391 #define NO_CONTEXT -1
394 struct ctx_list *next;
395 struct ctx_list *prev;
396 unsigned int ctx_number;
397 struct mm_struct *ctx_mm;
400 static struct ctx_list *ctx_list_pool;
401 static struct ctx_list ctx_free;
402 static struct ctx_list ctx_used;
404 /* At boot time we determine the number of contexts */
405 static int num_contexts;
407 static inline void remove_from_ctx_list(struct ctx_list *entry)
409 entry->next->prev = entry->prev;
410 entry->prev->next = entry->next;
413 static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry)
416 (entry->prev = head->prev)->next = entry;
419 #define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry)
420 #define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry)
423 static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm)
425 struct ctx_list *ctxp;
427 ctxp = ctx_free.next;
428 if (ctxp != &ctx_free) {
429 remove_from_ctx_list(ctxp);
430 add_to_used_ctxlist(ctxp);
431 mm->context = ctxp->ctx_number;
435 ctxp = ctx_used.next;
436 if (ctxp->ctx_mm == old_mm)
438 if (ctxp == &ctx_used)
439 panic("out of mmu contexts");
440 flush_cache_mm(ctxp->ctx_mm);
441 flush_tlb_mm(ctxp->ctx_mm);
442 remove_from_ctx_list(ctxp);
443 add_to_used_ctxlist(ctxp);
444 ctxp->ctx_mm->context = NO_CONTEXT;
446 mm->context = ctxp->ctx_number;
449 static inline void free_context(int context)
451 struct ctx_list *ctx_old;
453 ctx_old = ctx_list_pool + context;
454 remove_from_ctx_list(ctx_old);
455 add_to_free_ctxlist(ctx_old);
458 static void __init sparc_context_init(int numctx)
463 size = numctx * sizeof(struct ctx_list);
464 ctx_list_pool = memblock_alloc(size, SMP_CACHE_BYTES);
466 panic("%s: Failed to allocate %lu bytes\n", __func__, size);
468 for (ctx = 0; ctx < numctx; ctx++) {
469 struct ctx_list *clist;
471 clist = (ctx_list_pool + ctx);
472 clist->ctx_number = ctx;
473 clist->ctx_mm = NULL;
475 ctx_free.next = ctx_free.prev = &ctx_free;
476 ctx_used.next = ctx_used.prev = &ctx_used;
477 for (ctx = 0; ctx < numctx; ctx++)
478 add_to_free_ctxlist(ctx_list_pool + ctx);
481 void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
482 struct task_struct *tsk)
486 if (mm->context == NO_CONTEXT) {
487 spin_lock_irqsave(&srmmu_context_spinlock, flags);
488 alloc_context(old_mm, mm);
489 spin_unlock_irqrestore(&srmmu_context_spinlock, flags);
490 srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
493 if (sparc_cpu_model == sparc_leon)
497 hyper_flush_whole_icache();
499 srmmu_set_context(mm->context);
502 /* Low level IO area allocation on the SRMMU. */
503 static inline void srmmu_mapioaddr(unsigned long physaddr,
504 unsigned long virt_addr, int bus_type)
513 physaddr &= PAGE_MASK;
514 pgdp = pgd_offset_k(virt_addr);
515 p4dp = p4d_offset(pgdp, virt_addr);
516 pudp = pud_offset(p4dp, virt_addr);
517 pmdp = pmd_offset(pudp, virt_addr);
518 ptep = pte_offset_kernel(pmdp, virt_addr);
519 tmp = (physaddr >> 4) | SRMMU_ET_PTE;
521 /* I need to test whether this is consistent over all
522 * sun4m's. The bus_type represents the upper 4 bits of
523 * 36-bit physical address on the I/O space lines...
525 tmp |= (bus_type << 28);
527 __flush_page_to_ram(virt_addr);
528 set_pte(ptep, __pte(tmp));
531 void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
532 unsigned long xva, unsigned int len)
536 srmmu_mapioaddr(xpa, xva, bus);
543 static inline void srmmu_unmapioaddr(unsigned long virt_addr)
552 pgdp = pgd_offset_k(virt_addr);
553 p4dp = p4d_offset(pgdp, virt_addr);
554 pudp = pud_offset(p4dp, virt_addr);
555 pmdp = pmd_offset(pudp, virt_addr);
556 ptep = pte_offset_kernel(pmdp, virt_addr);
558 /* No need to flush uncacheable page. */
562 void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len)
566 srmmu_unmapioaddr(virt_addr);
567 virt_addr += PAGE_SIZE;
573 extern void tsunami_flush_cache_all(void);
574 extern void tsunami_flush_cache_mm(struct mm_struct *mm);
575 extern void tsunami_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
576 extern void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
577 extern void tsunami_flush_page_to_ram(unsigned long page);
578 extern void tsunami_flush_page_for_dma(unsigned long page);
579 extern void tsunami_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
580 extern void tsunami_flush_tlb_all(void);
581 extern void tsunami_flush_tlb_mm(struct mm_struct *mm);
582 extern void tsunami_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
583 extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
584 extern void tsunami_setup_blockops(void);
587 extern void swift_flush_cache_all(void);
588 extern void swift_flush_cache_mm(struct mm_struct *mm);
589 extern void swift_flush_cache_range(struct vm_area_struct *vma,
590 unsigned long start, unsigned long end);
591 extern void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
592 extern void swift_flush_page_to_ram(unsigned long page);
593 extern void swift_flush_page_for_dma(unsigned long page);
594 extern void swift_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
595 extern void swift_flush_tlb_all(void);
596 extern void swift_flush_tlb_mm(struct mm_struct *mm);
597 extern void swift_flush_tlb_range(struct vm_area_struct *vma,
598 unsigned long start, unsigned long end);
599 extern void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
601 #if 0 /* P3: deadwood to debug precise flushes on Swift. */
602 void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
607 if ((ctx1 = vma->vm_mm->context) != -1) {
608 cctx = srmmu_get_context();
609 /* Is context # ever different from current context? P3 */
611 printk("flush ctx %02x curr %02x\n", ctx1, cctx);
612 srmmu_set_context(ctx1);
613 swift_flush_page(page);
614 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
615 "r" (page), "i" (ASI_M_FLUSH_PROBE));
616 srmmu_set_context(cctx);
618 /* Rm. prot. bits from virt. c. */
619 /* swift_flush_cache_all(); */
620 /* swift_flush_cache_page(vma, page); */
621 swift_flush_page(page);
623 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
624 "r" (page), "i" (ASI_M_FLUSH_PROBE));
625 /* same as above: srmmu_flush_tlb_page() */
632 * The following are all MBUS based SRMMU modules, and therefore could
633 * be found in a multiprocessor configuration. On the whole, these
634 * chips seems to be much more touchy about DVMA and page tables
635 * with respect to cache coherency.
639 extern void viking_flush_cache_all(void);
640 extern void viking_flush_cache_mm(struct mm_struct *mm);
641 extern void viking_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
643 extern void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
644 extern void viking_flush_page_to_ram(unsigned long page);
645 extern void viking_flush_page_for_dma(unsigned long page);
646 extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr);
647 extern void viking_flush_page(unsigned long page);
648 extern void viking_mxcc_flush_page(unsigned long page);
649 extern void viking_flush_tlb_all(void);
650 extern void viking_flush_tlb_mm(struct mm_struct *mm);
651 extern void viking_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
653 extern void viking_flush_tlb_page(struct vm_area_struct *vma,
655 extern void sun4dsmp_flush_tlb_all(void);
656 extern void sun4dsmp_flush_tlb_mm(struct mm_struct *mm);
657 extern void sun4dsmp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
659 extern void sun4dsmp_flush_tlb_page(struct vm_area_struct *vma,
663 extern void hypersparc_flush_cache_all(void);
664 extern void hypersparc_flush_cache_mm(struct mm_struct *mm);
665 extern void hypersparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
666 extern void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
667 extern void hypersparc_flush_page_to_ram(unsigned long page);
668 extern void hypersparc_flush_page_for_dma(unsigned long page);
669 extern void hypersparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
670 extern void hypersparc_flush_tlb_all(void);
671 extern void hypersparc_flush_tlb_mm(struct mm_struct *mm);
672 extern void hypersparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
673 extern void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
674 extern void hypersparc_setup_blockops(void);
677 * NOTE: All of this startup code assumes the low 16mb (approx.) of
678 * kernel mappings are done with one single contiguous chunk of
679 * ram. On small ram machines (classics mainly) we only get
680 * around 8mb mapped for us.
683 static void __init early_pgtable_allocfail(char *type)
685 prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type);
689 static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
698 while (start < end) {
699 pgdp = pgd_offset_k(start);
700 p4dp = p4d_offset(pgdp, start);
701 pudp = pud_offset(p4dp, start);
702 if (pud_none(*(pud_t *)__nocache_fix(pudp))) {
703 pmdp = __srmmu_get_nocache(
704 SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
706 early_pgtable_allocfail("pmd");
707 memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
708 pud_set(__nocache_fix(pudp), pmdp);
710 pmdp = pmd_offset(__nocache_fix(pudp), start);
711 if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
712 ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
714 early_pgtable_allocfail("pte");
715 memset(__nocache_fix(ptep), 0, PTE_SIZE);
716 pmd_set(__nocache_fix(pmdp), ptep);
718 if (start > (0xffffffffUL - PMD_SIZE))
720 start = (start + PMD_SIZE) & PMD_MASK;
724 static void __init srmmu_allocate_ptable_skeleton(unsigned long start,
733 while (start < end) {
734 pgdp = pgd_offset_k(start);
735 p4dp = p4d_offset(pgdp, start);
736 pudp = pud_offset(p4dp, start);
737 if (pud_none(*pudp)) {
738 pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
740 early_pgtable_allocfail("pmd");
741 memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE);
742 pud_set((pud_t *)pgdp, pmdp);
744 pmdp = pmd_offset(pudp, start);
745 if (srmmu_pmd_none(*pmdp)) {
746 ptep = __srmmu_get_nocache(PTE_SIZE,
749 early_pgtable_allocfail("pte");
750 memset(ptep, 0, PTE_SIZE);
753 if (start > (0xffffffffUL - PMD_SIZE))
755 start = (start + PMD_SIZE) & PMD_MASK;
759 /* These flush types are not available on all chips... */
760 static inline unsigned long srmmu_probe(unsigned long vaddr)
762 unsigned long retval;
764 if (sparc_cpu_model != sparc_leon) {
767 __asm__ __volatile__("lda [%1] %2, %0\n\t" :
769 "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE));
771 retval = leon_swprobe(vaddr, NULL);
777 * This is much cleaner than poking around physical address space
778 * looking at the prom's page table directly which is what most
779 * other OS's do. Yuck... this is much better.
781 static void __init srmmu_inherit_prom_mappings(unsigned long start,
784 unsigned long probed;
791 int what; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */
793 while (start <= end) {
795 break; /* probably wrap around */
796 if (start == 0xfef00000)
797 start = KADB_DEBUGGER_BEGVM;
798 probed = srmmu_probe(start);
800 /* continue probing until we find an entry */
805 /* A red snapper, see what it really is. */
807 addr = start - PAGE_SIZE;
809 if (!(start & ~(PMD_MASK))) {
810 if (srmmu_probe(addr + PMD_SIZE) == probed)
814 if (!(start & ~(PGDIR_MASK))) {
815 if (srmmu_probe(addr + PGDIR_SIZE) == probed)
819 pgdp = pgd_offset_k(start);
820 p4dp = p4d_offset(pgdp, start);
821 pudp = pud_offset(p4dp, start);
823 *(pgd_t *)__nocache_fix(pgdp) = __pgd(probed);
827 if (pud_none(*(pud_t *)__nocache_fix(pudp))) {
828 pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE,
829 SRMMU_PMD_TABLE_SIZE);
831 early_pgtable_allocfail("pmd");
832 memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
833 pud_set(__nocache_fix(pudp), pmdp);
835 pmdp = pmd_offset(__nocache_fix(pgdp), start);
837 *(pmd_t *)__nocache_fix(pmdp) = __pmd(probed);
841 if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
842 ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
844 early_pgtable_allocfail("pte");
845 memset(__nocache_fix(ptep), 0, PTE_SIZE);
846 pmd_set(__nocache_fix(pmdp), ptep);
848 ptep = pte_offset_kernel(__nocache_fix(pmdp), start);
849 *(pte_t *)__nocache_fix(ptep) = __pte(probed);
854 #define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID)
856 /* Create a third-level SRMMU 16MB page mapping. */
857 static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base)
859 pgd_t *pgdp = pgd_offset_k(vaddr);
860 unsigned long big_pte;
862 big_pte = KERNEL_PTE(phys_base >> 4);
863 *(pgd_t *)__nocache_fix(pgdp) = __pgd(big_pte);
866 /* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */
867 static unsigned long __init map_spbank(unsigned long vbase, int sp_entry)
869 unsigned long pstart = (sp_banks[sp_entry].base_addr & PGDIR_MASK);
870 unsigned long vstart = (vbase & PGDIR_MASK);
871 unsigned long vend = PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes);
872 /* Map "low" memory only */
873 const unsigned long min_vaddr = PAGE_OFFSET;
874 const unsigned long max_vaddr = PAGE_OFFSET + SRMMU_MAXMEM;
876 if (vstart < min_vaddr || vstart >= max_vaddr)
879 if (vend > max_vaddr || vend < min_vaddr)
882 while (vstart < vend) {
883 do_large_mapping(vstart, pstart);
884 vstart += PGDIR_SIZE; pstart += PGDIR_SIZE;
889 static void __init map_kernel(void)
894 do_large_mapping(PAGE_OFFSET, phys_base);
897 for (i = 0; sp_banks[i].num_bytes != 0; i++) {
898 map_spbank((unsigned long)__va(sp_banks[i].base_addr), i);
902 void (*poke_srmmu)(void) = NULL;
904 void __init srmmu_paging_init(void)
914 unsigned long pages_avail;
916 init_mm.context = (unsigned long) NO_CONTEXT;
917 sparc_iomap.start = SUN4M_IOBASE_VADDR; /* 16MB of IOSPACE on all sun4m's. */
919 if (sparc_cpu_model == sun4d)
920 num_contexts = 65536; /* We know it is Viking */
922 /* Find the number of contexts on the srmmu. */
923 cpunode = prom_getchild(prom_root_node);
925 while (cpunode != 0) {
926 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
927 if (!strcmp(node_str, "cpu")) {
928 num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8);
931 cpunode = prom_getsibling(cpunode);
936 prom_printf("Something wrong, can't find cpu node in paging_init.\n");
941 last_valid_pfn = bootmem_init(&pages_avail);
943 srmmu_nocache_calcsize();
944 srmmu_nocache_init();
945 srmmu_inherit_prom_mappings(0xfe400000, (LINUX_OPPROM_ENDVM - PAGE_SIZE));
948 /* ctx table has to be physically aligned to its size */
949 srmmu_context_table = __srmmu_get_nocache(num_contexts * sizeof(ctxd_t), num_contexts * sizeof(ctxd_t));
950 srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa(srmmu_context_table);
952 for (i = 0; i < num_contexts; i++)
953 srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir);
956 srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys);
958 /* Stop from hanging here... */
959 local_ops->tlb_all();
965 srmmu_allocate_ptable_skeleton(sparc_iomap.start, IOBASE_END);
966 srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END);
968 srmmu_allocate_ptable_skeleton(
969 __fix_to_virt(__end_of_fixed_addresses - 1), FIXADDR_TOP);
970 srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END);
972 pgd = pgd_offset_k(PKMAP_BASE);
973 p4d = p4d_offset(pgd, PKMAP_BASE);
974 pud = pud_offset(p4d, PKMAP_BASE);
975 pmd = pmd_offset(pud, PKMAP_BASE);
976 pte = pte_offset_kernel(pmd, PKMAP_BASE);
977 pkmap_page_table = pte;
982 sparc_context_init(num_contexts);
987 unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
989 max_zone_pfn[ZONE_DMA] = max_low_pfn;
990 max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
991 max_zone_pfn[ZONE_HIGHMEM] = highend_pfn;
993 free_area_init(max_zone_pfn);
997 void mmu_info(struct seq_file *m)
1002 "nocache total\t: %ld\n"
1003 "nocache used\t: %d\n",
1007 srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
1010 int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
1012 mm->context = NO_CONTEXT;
1016 void destroy_context(struct mm_struct *mm)
1018 unsigned long flags;
1020 if (mm->context != NO_CONTEXT) {
1022 srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir);
1024 spin_lock_irqsave(&srmmu_context_spinlock, flags);
1025 free_context(mm->context);
1026 spin_unlock_irqrestore(&srmmu_context_spinlock, flags);
1027 mm->context = NO_CONTEXT;
1031 /* Init various srmmu chip types. */
1032 static void __init srmmu_is_bad(void)
1034 prom_printf("Could not determine SRMMU chip type.\n");
1038 static void __init init_vac_layout(void)
1045 unsigned long max_size = 0;
1046 unsigned long min_line_size = 0x10000000;
1049 nd = prom_getchild(prom_root_node);
1050 while ((nd = prom_getsibling(nd)) != 0) {
1051 prom_getstring(nd, "device_type", node_str, sizeof(node_str));
1052 if (!strcmp(node_str, "cpu")) {
1053 vac_line_size = prom_getint(nd, "cache-line-size");
1054 if (vac_line_size == -1) {
1055 prom_printf("can't determine cache-line-size, halting.\n");
1058 cache_lines = prom_getint(nd, "cache-nlines");
1059 if (cache_lines == -1) {
1060 prom_printf("can't determine cache-nlines, halting.\n");
1064 vac_cache_size = cache_lines * vac_line_size;
1066 if (vac_cache_size > max_size)
1067 max_size = vac_cache_size;
1068 if (vac_line_size < min_line_size)
1069 min_line_size = vac_line_size;
1070 //FIXME: cpus not contiguous!!
1072 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
1080 prom_printf("No CPU nodes found, halting.\n");
1084 vac_cache_size = max_size;
1085 vac_line_size = min_line_size;
1087 printk("SRMMU: Using VAC size of %d bytes, line size %d bytes.\n",
1088 (int)vac_cache_size, (int)vac_line_size);
1091 static void poke_hypersparc(void)
1093 volatile unsigned long clear;
1094 unsigned long mreg = srmmu_get_mmureg();
1096 hyper_flush_unconditional_combined();
1098 mreg &= ~(HYPERSPARC_CWENABLE);
1099 mreg |= (HYPERSPARC_CENABLE | HYPERSPARC_WBENABLE);
1100 mreg |= (HYPERSPARC_CMODE);
1102 srmmu_set_mmureg(mreg);
1104 #if 0 /* XXX I think this is bad news... -DaveM */
1105 hyper_clear_all_tags();
1108 put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE);
1109 hyper_flush_whole_icache();
1110 clear = srmmu_get_faddr();
1111 clear = srmmu_get_fstatus();
1114 static const struct sparc32_cachetlb_ops hypersparc_ops = {
1115 .cache_all = hypersparc_flush_cache_all,
1116 .cache_mm = hypersparc_flush_cache_mm,
1117 .cache_page = hypersparc_flush_cache_page,
1118 .cache_range = hypersparc_flush_cache_range,
1119 .tlb_all = hypersparc_flush_tlb_all,
1120 .tlb_mm = hypersparc_flush_tlb_mm,
1121 .tlb_page = hypersparc_flush_tlb_page,
1122 .tlb_range = hypersparc_flush_tlb_range,
1123 .page_to_ram = hypersparc_flush_page_to_ram,
1124 .sig_insns = hypersparc_flush_sig_insns,
1125 .page_for_dma = hypersparc_flush_page_for_dma,
1128 static void __init init_hypersparc(void)
1130 srmmu_name = "ROSS HyperSparc";
1131 srmmu_modtype = HyperSparc;
1136 sparc32_cachetlb_ops = &hypersparc_ops;
1138 poke_srmmu = poke_hypersparc;
1140 hypersparc_setup_blockops();
1143 static void poke_swift(void)
1147 /* Clear any crap from the cache or else... */
1148 swift_flush_cache_all();
1150 /* Enable I & D caches */
1151 mreg = srmmu_get_mmureg();
1152 mreg |= (SWIFT_IE | SWIFT_DE);
1154 * The Swift branch folding logic is completely broken. At
1155 * trap time, if things are just right, if can mistakenly
1156 * think that a trap is coming from kernel mode when in fact
1157 * it is coming from user mode (it mis-executes the branch in
1158 * the trap code). So you see things like crashme completely
1159 * hosing your machine which is completely unacceptable. Turn
1160 * this shit off... nice job Fujitsu.
1162 mreg &= ~(SWIFT_BF);
1163 srmmu_set_mmureg(mreg);
1166 static const struct sparc32_cachetlb_ops swift_ops = {
1167 .cache_all = swift_flush_cache_all,
1168 .cache_mm = swift_flush_cache_mm,
1169 .cache_page = swift_flush_cache_page,
1170 .cache_range = swift_flush_cache_range,
1171 .tlb_all = swift_flush_tlb_all,
1172 .tlb_mm = swift_flush_tlb_mm,
1173 .tlb_page = swift_flush_tlb_page,
1174 .tlb_range = swift_flush_tlb_range,
1175 .page_to_ram = swift_flush_page_to_ram,
1176 .sig_insns = swift_flush_sig_insns,
1177 .page_for_dma = swift_flush_page_for_dma,
1180 #define SWIFT_MASKID_ADDR 0x10003018
1181 static void __init init_swift(void)
1183 unsigned long swift_rev;
1185 __asm__ __volatile__("lda [%1] %2, %0\n\t"
1186 "srl %0, 0x18, %0\n\t" :
1188 "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS));
1189 srmmu_name = "Fujitsu Swift";
1190 switch (swift_rev) {
1195 srmmu_modtype = Swift_lots_o_bugs;
1196 hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN);
1198 * Gee george, I wonder why Sun is so hush hush about
1199 * this hardware bug... really braindamage stuff going
1200 * on here. However I think we can find a way to avoid
1201 * all of the workaround overhead under Linux. Basically,
1202 * any page fault can cause kernel pages to become user
1203 * accessible (the mmu gets confused and clears some of
1204 * the ACC bits in kernel ptes). Aha, sounds pretty
1205 * horrible eh? But wait, after extensive testing it appears
1206 * that if you use pgd_t level large kernel pte's (like the
1207 * 4MB pages on the Pentium) the bug does not get tripped
1208 * at all. This avoids almost all of the major overhead.
1209 * Welcome to a world where your vendor tells you to,
1210 * "apply this kernel patch" instead of "sorry for the
1211 * broken hardware, send it back and we'll give you
1212 * properly functioning parts"
1217 srmmu_modtype = Swift_bad_c;
1218 hwbug_bitmask |= HWBUG_KERN_CBITBROKEN;
1220 * You see Sun allude to this hardware bug but never
1221 * admit things directly, they'll say things like,
1222 * "the Swift chip cache problems" or similar.
1226 srmmu_modtype = Swift_ok;
1230 sparc32_cachetlb_ops = &swift_ops;
1231 flush_page_for_dma_global = 0;
1234 * Are you now convinced that the Swift is one of the
1235 * biggest VLSI abortions of all time? Bravo Fujitsu!
1236 * Fujitsu, the !#?!%$'d up processor people. I bet if
1237 * you examined the microcode of the Swift you'd find
1238 * XXX's all over the place.
1240 poke_srmmu = poke_swift;
1243 static void turbosparc_flush_cache_all(void)
1245 flush_user_windows();
1246 turbosparc_idflash_clear();
1249 static void turbosparc_flush_cache_mm(struct mm_struct *mm)
1252 flush_user_windows();
1253 turbosparc_idflash_clear();
1257 static void turbosparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1259 FLUSH_BEGIN(vma->vm_mm)
1260 flush_user_windows();
1261 turbosparc_idflash_clear();
1265 static void turbosparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1267 FLUSH_BEGIN(vma->vm_mm)
1268 flush_user_windows();
1269 if (vma->vm_flags & VM_EXEC)
1270 turbosparc_flush_icache();
1271 turbosparc_flush_dcache();
1275 /* TurboSparc is copy-back, if we turn it on, but this does not work. */
1276 static void turbosparc_flush_page_to_ram(unsigned long page)
1278 #ifdef TURBOSPARC_WRITEBACK
1279 volatile unsigned long clear;
1281 if (srmmu_probe(page))
1282 turbosparc_flush_page_cache(page);
1283 clear = srmmu_get_fstatus();
1287 static void turbosparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
1291 static void turbosparc_flush_page_for_dma(unsigned long page)
1293 turbosparc_flush_dcache();
1296 static void turbosparc_flush_tlb_all(void)
1298 srmmu_flush_whole_tlb();
1301 static void turbosparc_flush_tlb_mm(struct mm_struct *mm)
1304 srmmu_flush_whole_tlb();
1308 static void turbosparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1310 FLUSH_BEGIN(vma->vm_mm)
1311 srmmu_flush_whole_tlb();
1315 static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1317 FLUSH_BEGIN(vma->vm_mm)
1318 srmmu_flush_whole_tlb();
1323 static void poke_turbosparc(void)
1325 unsigned long mreg = srmmu_get_mmureg();
1326 unsigned long ccreg;
1328 /* Clear any crap from the cache or else... */
1329 turbosparc_flush_cache_all();
1330 /* Temporarily disable I & D caches */
1331 mreg &= ~(TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE);
1332 mreg &= ~(TURBOSPARC_PCENABLE); /* Don't check parity */
1333 srmmu_set_mmureg(mreg);
1335 ccreg = turbosparc_get_ccreg();
1337 #ifdef TURBOSPARC_WRITEBACK
1338 ccreg |= (TURBOSPARC_SNENABLE); /* Do DVMA snooping in Dcache */
1339 ccreg &= ~(TURBOSPARC_uS2 | TURBOSPARC_WTENABLE);
1340 /* Write-back D-cache, emulate VLSI
1341 * abortion number three, not number one */
1343 /* For now let's play safe, optimize later */
1344 ccreg |= (TURBOSPARC_SNENABLE | TURBOSPARC_WTENABLE);
1345 /* Do DVMA snooping in Dcache, Write-thru D-cache */
1346 ccreg &= ~(TURBOSPARC_uS2);
1347 /* Emulate VLSI abortion number three, not number one */
1350 switch (ccreg & 7) {
1351 case 0: /* No SE cache */
1352 case 7: /* Test mode */
1355 ccreg |= (TURBOSPARC_SCENABLE);
1357 turbosparc_set_ccreg(ccreg);
1359 mreg |= (TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* I & D caches on */
1360 mreg |= (TURBOSPARC_ICSNOOP); /* Icache snooping on */
1361 srmmu_set_mmureg(mreg);
1364 static const struct sparc32_cachetlb_ops turbosparc_ops = {
1365 .cache_all = turbosparc_flush_cache_all,
1366 .cache_mm = turbosparc_flush_cache_mm,
1367 .cache_page = turbosparc_flush_cache_page,
1368 .cache_range = turbosparc_flush_cache_range,
1369 .tlb_all = turbosparc_flush_tlb_all,
1370 .tlb_mm = turbosparc_flush_tlb_mm,
1371 .tlb_page = turbosparc_flush_tlb_page,
1372 .tlb_range = turbosparc_flush_tlb_range,
1373 .page_to_ram = turbosparc_flush_page_to_ram,
1374 .sig_insns = turbosparc_flush_sig_insns,
1375 .page_for_dma = turbosparc_flush_page_for_dma,
1378 static void __init init_turbosparc(void)
1380 srmmu_name = "Fujitsu TurboSparc";
1381 srmmu_modtype = TurboSparc;
1382 sparc32_cachetlb_ops = &turbosparc_ops;
1383 poke_srmmu = poke_turbosparc;
1386 static void poke_tsunami(void)
1388 unsigned long mreg = srmmu_get_mmureg();
1390 tsunami_flush_icache();
1391 tsunami_flush_dcache();
1392 mreg &= ~TSUNAMI_ITD;
1393 mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB);
1394 srmmu_set_mmureg(mreg);
1397 static const struct sparc32_cachetlb_ops tsunami_ops = {
1398 .cache_all = tsunami_flush_cache_all,
1399 .cache_mm = tsunami_flush_cache_mm,
1400 .cache_page = tsunami_flush_cache_page,
1401 .cache_range = tsunami_flush_cache_range,
1402 .tlb_all = tsunami_flush_tlb_all,
1403 .tlb_mm = tsunami_flush_tlb_mm,
1404 .tlb_page = tsunami_flush_tlb_page,
1405 .tlb_range = tsunami_flush_tlb_range,
1406 .page_to_ram = tsunami_flush_page_to_ram,
1407 .sig_insns = tsunami_flush_sig_insns,
1408 .page_for_dma = tsunami_flush_page_for_dma,
1411 static void __init init_tsunami(void)
1414 * Tsunami's pretty sane, Sun and TI actually got it
1415 * somewhat right this time. Fujitsu should have
1416 * taken some lessons from them.
1419 srmmu_name = "TI Tsunami";
1420 srmmu_modtype = Tsunami;
1421 sparc32_cachetlb_ops = &tsunami_ops;
1422 poke_srmmu = poke_tsunami;
1424 tsunami_setup_blockops();
1427 static void poke_viking(void)
1429 unsigned long mreg = srmmu_get_mmureg();
1430 static int smp_catch;
1432 if (viking_mxcc_present) {
1433 unsigned long mxcc_control = mxcc_get_creg();
1435 mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE);
1436 mxcc_control &= ~(MXCC_CTL_RRC);
1437 mxcc_set_creg(mxcc_control);
1440 * We don't need memory parity checks.
1441 * XXX This is a mess, have to dig out later. ecd.
1442 viking_mxcc_turn_off_parity(&mreg, &mxcc_control);
1445 /* We do cache ptables on MXCC. */
1446 mreg |= VIKING_TCENABLE;
1448 unsigned long bpreg;
1450 mreg &= ~(VIKING_TCENABLE);
1452 /* Must disable mixed-cmd mode here for other cpu's. */
1453 bpreg = viking_get_bpreg();
1454 bpreg &= ~(VIKING_ACTION_MIX);
1455 viking_set_bpreg(bpreg);
1457 /* Just in case PROM does something funny. */
1462 mreg |= VIKING_SPENABLE;
1463 mreg |= (VIKING_ICENABLE | VIKING_DCENABLE);
1464 mreg |= VIKING_SBENABLE;
1465 mreg &= ~(VIKING_ACENABLE);
1466 srmmu_set_mmureg(mreg);
1469 static struct sparc32_cachetlb_ops viking_ops __ro_after_init = {
1470 .cache_all = viking_flush_cache_all,
1471 .cache_mm = viking_flush_cache_mm,
1472 .cache_page = viking_flush_cache_page,
1473 .cache_range = viking_flush_cache_range,
1474 .tlb_all = viking_flush_tlb_all,
1475 .tlb_mm = viking_flush_tlb_mm,
1476 .tlb_page = viking_flush_tlb_page,
1477 .tlb_range = viking_flush_tlb_range,
1478 .page_to_ram = viking_flush_page_to_ram,
1479 .sig_insns = viking_flush_sig_insns,
1480 .page_for_dma = viking_flush_page_for_dma,
1484 /* On sun4d the cpu broadcasts local TLB flushes, so we can just
1485 * perform the local TLB flush and all the other cpus will see it.
1486 * But, unfortunately, there is a bug in the sun4d XBUS backplane
1487 * that requires that we add some synchronization to these flushes.
1489 * The bug is that the fifo which keeps track of all the pending TLB
1490 * broadcasts in the system is an entry or two too small, so if we
1491 * have too many going at once we'll overflow that fifo and lose a TLB
1492 * flush resulting in corruption.
1494 * Our workaround is to take a global spinlock around the TLB flushes,
1495 * which guarentees we won't ever have too many pending. It's a big
1496 * hammer, but a semaphore like system to make sure we only have N TLB
1497 * flushes going at once will require SMP locking anyways so there's
1498 * no real value in trying any harder than this.
1500 static struct sparc32_cachetlb_ops viking_sun4d_smp_ops __ro_after_init = {
1501 .cache_all = viking_flush_cache_all,
1502 .cache_mm = viking_flush_cache_mm,
1503 .cache_page = viking_flush_cache_page,
1504 .cache_range = viking_flush_cache_range,
1505 .tlb_all = sun4dsmp_flush_tlb_all,
1506 .tlb_mm = sun4dsmp_flush_tlb_mm,
1507 .tlb_page = sun4dsmp_flush_tlb_page,
1508 .tlb_range = sun4dsmp_flush_tlb_range,
1509 .page_to_ram = viking_flush_page_to_ram,
1510 .sig_insns = viking_flush_sig_insns,
1511 .page_for_dma = viking_flush_page_for_dma,
1515 static void __init init_viking(void)
1517 unsigned long mreg = srmmu_get_mmureg();
1519 /* Ahhh, the viking. SRMMU VLSI abortion number two... */
1520 if (mreg & VIKING_MMODE) {
1521 srmmu_name = "TI Viking";
1522 viking_mxcc_present = 0;
1526 * We need this to make sure old viking takes no hits
1527 * on it's cache for dma snoops to workaround the
1528 * "load from non-cacheable memory" interrupt bug.
1529 * This is only necessary because of the new way in
1530 * which we use the IOMMU.
1532 viking_ops.page_for_dma = viking_flush_page;
1534 viking_sun4d_smp_ops.page_for_dma = viking_flush_page;
1536 flush_page_for_dma_global = 0;
1538 srmmu_name = "TI Viking/MXCC";
1539 viking_mxcc_present = 1;
1540 srmmu_cache_pagetables = 1;
1543 sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1546 if (sparc_cpu_model == sun4d)
1547 sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1548 &viking_sun4d_smp_ops;
1551 poke_srmmu = poke_viking;
1554 /* Probe for the srmmu chip version. */
1555 static void __init get_srmmu_type(void)
1557 unsigned long mreg, psr;
1558 unsigned long mod_typ, mod_rev, psr_typ, psr_vers;
1560 srmmu_modtype = SRMMU_INVAL_MOD;
1563 mreg = srmmu_get_mmureg(); psr = get_psr();
1564 mod_typ = (mreg & 0xf0000000) >> 28;
1565 mod_rev = (mreg & 0x0f000000) >> 24;
1566 psr_typ = (psr >> 28) & 0xf;
1567 psr_vers = (psr >> 24) & 0xf;
1569 /* First, check for sparc-leon. */
1570 if (sparc_cpu_model == sparc_leon) {
1575 /* Second, check for HyperSparc or Cypress. */
1579 /* UP or MP Hypersparc */
1591 prom_printf("Sparc-Linux Cypress support does not longer exit.\n");
1598 /* Now Fujitsu TurboSparc. It might happen that it is
1599 * in Swift emulation mode, so we will check later...
1601 if (psr_typ == 0 && psr_vers == 5) {
1606 /* Next check for Fujitsu Swift. */
1607 if (psr_typ == 0 && psr_vers == 4) {
1611 /* Look if it is not a TurboSparc emulating Swift... */
1612 cpunode = prom_getchild(prom_root_node);
1613 while ((cpunode = prom_getsibling(cpunode)) != 0) {
1614 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
1615 if (!strcmp(node_str, "cpu")) {
1616 if (!prom_getintdefault(cpunode, "psr-implementation", 1) &&
1617 prom_getintdefault(cpunode, "psr-version", 1) == 5) {
1629 /* Now the Viking family of srmmu. */
1632 ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) {
1637 /* Finally the Tsunami. */
1638 if (psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) {
1648 /* Local cross-calls. */
1649 static void smp_flush_page_for_dma(unsigned long page)
1651 xc1((smpfunc_t) local_ops->page_for_dma, page);
1652 local_ops->page_for_dma(page);
1655 static void smp_flush_cache_all(void)
1657 xc0((smpfunc_t) local_ops->cache_all);
1658 local_ops->cache_all();
1661 static void smp_flush_tlb_all(void)
1663 xc0((smpfunc_t) local_ops->tlb_all);
1664 local_ops->tlb_all();
1667 static void smp_flush_cache_mm(struct mm_struct *mm)
1669 if (mm->context != NO_CONTEXT) {
1671 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1672 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1673 if (!cpumask_empty(&cpu_mask))
1674 xc1((smpfunc_t) local_ops->cache_mm, (unsigned long) mm);
1675 local_ops->cache_mm(mm);
1679 static void smp_flush_tlb_mm(struct mm_struct *mm)
1681 if (mm->context != NO_CONTEXT) {
1683 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1684 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1685 if (!cpumask_empty(&cpu_mask)) {
1686 xc1((smpfunc_t) local_ops->tlb_mm, (unsigned long) mm);
1687 if (atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
1688 cpumask_copy(mm_cpumask(mm),
1689 cpumask_of(smp_processor_id()));
1691 local_ops->tlb_mm(mm);
1695 static void smp_flush_cache_range(struct vm_area_struct *vma,
1696 unsigned long start,
1699 struct mm_struct *mm = vma->vm_mm;
1701 if (mm->context != NO_CONTEXT) {
1703 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1704 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1705 if (!cpumask_empty(&cpu_mask))
1706 xc3((smpfunc_t) local_ops->cache_range,
1707 (unsigned long) vma, start, end);
1708 local_ops->cache_range(vma, start, end);
1712 static void smp_flush_tlb_range(struct vm_area_struct *vma,
1713 unsigned long start,
1716 struct mm_struct *mm = vma->vm_mm;
1718 if (mm->context != NO_CONTEXT) {
1720 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1721 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1722 if (!cpumask_empty(&cpu_mask))
1723 xc3((smpfunc_t) local_ops->tlb_range,
1724 (unsigned long) vma, start, end);
1725 local_ops->tlb_range(vma, start, end);
1729 static void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1731 struct mm_struct *mm = vma->vm_mm;
1733 if (mm->context != NO_CONTEXT) {
1735 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1736 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1737 if (!cpumask_empty(&cpu_mask))
1738 xc2((smpfunc_t) local_ops->cache_page,
1739 (unsigned long) vma, page);
1740 local_ops->cache_page(vma, page);
1744 static void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1746 struct mm_struct *mm = vma->vm_mm;
1748 if (mm->context != NO_CONTEXT) {
1750 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1751 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1752 if (!cpumask_empty(&cpu_mask))
1753 xc2((smpfunc_t) local_ops->tlb_page,
1754 (unsigned long) vma, page);
1755 local_ops->tlb_page(vma, page);
1759 static void smp_flush_page_to_ram(unsigned long page)
1761 /* Current theory is that those who call this are the one's
1762 * who have just dirtied their cache with the pages contents
1763 * in kernel space, therefore we only run this on local cpu.
1765 * XXX This experiment failed, research further... -DaveM
1768 xc1((smpfunc_t) local_ops->page_to_ram, page);
1770 local_ops->page_to_ram(page);
1773 static void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
1776 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1777 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1778 if (!cpumask_empty(&cpu_mask))
1779 xc2((smpfunc_t) local_ops->sig_insns,
1780 (unsigned long) mm, insn_addr);
1781 local_ops->sig_insns(mm, insn_addr);
1784 static struct sparc32_cachetlb_ops smp_cachetlb_ops __ro_after_init = {
1785 .cache_all = smp_flush_cache_all,
1786 .cache_mm = smp_flush_cache_mm,
1787 .cache_page = smp_flush_cache_page,
1788 .cache_range = smp_flush_cache_range,
1789 .tlb_all = smp_flush_tlb_all,
1790 .tlb_mm = smp_flush_tlb_mm,
1791 .tlb_page = smp_flush_tlb_page,
1792 .tlb_range = smp_flush_tlb_range,
1793 .page_to_ram = smp_flush_page_to_ram,
1794 .sig_insns = smp_flush_sig_insns,
1795 .page_for_dma = smp_flush_page_for_dma,
1799 /* Load up routines and constants for sun4m and sun4d mmu */
1800 void __init load_mmu(void)
1806 /* El switcheroo... */
1807 local_ops = sparc32_cachetlb_ops;
1809 if (sparc_cpu_model == sun4d || sparc_cpu_model == sparc_leon) {
1810 smp_cachetlb_ops.tlb_all = local_ops->tlb_all;
1811 smp_cachetlb_ops.tlb_mm = local_ops->tlb_mm;
1812 smp_cachetlb_ops.tlb_range = local_ops->tlb_range;
1813 smp_cachetlb_ops.tlb_page = local_ops->tlb_page;
1816 if (poke_srmmu == poke_viking) {
1817 /* Avoid unnecessary cross calls. */
1818 smp_cachetlb_ops.cache_all = local_ops->cache_all;
1819 smp_cachetlb_ops.cache_mm = local_ops->cache_mm;
1820 smp_cachetlb_ops.cache_range = local_ops->cache_range;
1821 smp_cachetlb_ops.cache_page = local_ops->cache_page;
1823 smp_cachetlb_ops.page_to_ram = local_ops->page_to_ram;
1824 smp_cachetlb_ops.sig_insns = local_ops->sig_insns;
1825 smp_cachetlb_ops.page_for_dma = local_ops->page_for_dma;
1828 /* It really is const after this point. */
1829 sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1833 if (sparc_cpu_model != sun4d)
1836 if (sparc_cpu_model == sun4d)
1838 else if (sparc_cpu_model == sparc_leon)