1 // SPDX-License-Identifier: GPL-2.0-only
3 * TLB Management (flush/create/diagnostics) for MMUv3 and MMUv4
5 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
9 #include <linux/module.h>
10 #include <linux/bug.h>
11 #include <linux/mm_types.h>
13 #include <asm/arcregs.h>
14 #include <asm/setup.h>
15 #include <asm/mmu_context.h>
18 /* A copy of the ASID from the PID reg is kept in asid_cache */
19 DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE;
21 static int __read_mostly pae_exists;
24 * Utility Routine to erase a J-TLB entry
25 * Caller needs to setup Index Reg (manually or via getIndex)
27 static inline void __tlb_entry_erase(void)
29 write_aux_reg(ARC_REG_TLBPD1, 0);
31 if (is_pae40_enabled())
32 write_aux_reg(ARC_REG_TLBPD1HI, 0);
34 write_aux_reg(ARC_REG_TLBPD0, 0);
35 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
38 static void utlb_invalidate(void)
40 write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB);
43 #ifdef CONFIG_ARC_MMU_V3
45 static inline unsigned int tlb_entry_lkup(unsigned long vaddr_n_asid)
49 write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid);
51 write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe);
52 idx = read_aux_reg(ARC_REG_TLBINDEX);
57 static void tlb_entry_erase(unsigned int vaddr_n_asid)
61 /* Locate the TLB entry for this vaddr + ASID */
62 idx = tlb_entry_lkup(vaddr_n_asid);
64 /* No error means entry found, zero it out */
65 if (likely(!(idx & TLB_LKUP_ERR))) {
68 /* Duplicate entry error */
69 WARN(idx == TLB_DUP_ERR, "Probe returned Dup PD for %x\n",
74 static void tlb_entry_insert(unsigned int pd0, pte_t pd1)
79 * First verify if entry for this vaddr+ASID already exists
80 * This also sets up PD0 (vaddr, ASID..) for final commit
82 idx = tlb_entry_lkup(pd0);
85 * If Not already present get a free slot from MMU.
86 * Otherwise, Probe would have located the entry and set INDEX Reg
87 * with existing location. This will cause Write CMD to over-write
88 * existing entry with new PD0 and PD1
90 if (likely(idx & TLB_LKUP_ERR))
91 write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex);
93 /* setup the other half of TLB entry (pfn, rwx..) */
94 write_aux_reg(ARC_REG_TLBPD1, pd1);
97 * Commit the Entry to MMU
98 * It doesn't sound safe to use the TLBWriteNI cmd here
99 * which doesn't flush uTLBs. I'd rather be safe than sorry.
101 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
106 static void tlb_entry_erase(unsigned int vaddr_n_asid)
108 write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid | _PAGE_PRESENT);
109 write_aux_reg(ARC_REG_TLBCOMMAND, TLBDeleteEntry);
112 static void tlb_entry_insert(unsigned int pd0, pte_t pd1)
114 write_aux_reg(ARC_REG_TLBPD0, pd0);
115 write_aux_reg(ARC_REG_TLBPD1, pd1);
117 if (is_pae40_enabled())
118 write_aux_reg(ARC_REG_TLBPD1HI, (u64)pd1 >> 32);
120 write_aux_reg(ARC_REG_TLBCOMMAND, TLBInsertEntry);
126 * Un-conditionally (without lookup) erase the entire MMU contents
129 noinline void local_flush_tlb_all(void)
131 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
134 int num_tlb = mmu->sets * mmu->ways;
136 local_irq_save(flags);
138 /* Load PD0 and PD1 with template for a Blank Entry */
139 write_aux_reg(ARC_REG_TLBPD1, 0);
141 if (is_pae40_enabled())
142 write_aux_reg(ARC_REG_TLBPD1HI, 0);
144 write_aux_reg(ARC_REG_TLBPD0, 0);
146 for (entry = 0; entry < num_tlb; entry++) {
147 /* write this entry to the TLB */
148 write_aux_reg(ARC_REG_TLBINDEX, entry);
149 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWriteNI);
152 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
153 const int stlb_idx = 0x800;
155 /* Blank sTLB entry */
156 write_aux_reg(ARC_REG_TLBPD0, _PAGE_HW_SZ);
158 for (entry = stlb_idx; entry < stlb_idx + 16; entry++) {
159 write_aux_reg(ARC_REG_TLBINDEX, entry);
160 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWriteNI);
166 local_irq_restore(flags);
170 * Flush the entire MM for userland. The fastest way is to move to Next ASID
172 noinline void local_flush_tlb_mm(struct mm_struct *mm)
175 * Small optimisation courtesy IA64
176 * flush_mm called during fork,exit,munmap etc, multiple times as well.
177 * Only for fork( ) do we need to move parent to a new MMU ctxt,
178 * all other cases are NOPs, hence this check.
180 if (atomic_read(&mm->mm_users) == 0)
184 * - Move to a new ASID, but only if the mm is still wired in
185 * (Android Binder ended up calling this for vma->mm != tsk->mm,
186 * causing h/w - s/w ASID to get out of sync)
187 * - Also get_new_mmu_context() new implementation allocates a new
188 * ASID only if it is not allocated already - so unallocate first
191 if (current->mm == mm)
192 get_new_mmu_context(mm);
196 * Flush a Range of TLB entries for userland.
197 * @start is inclusive, while @end is exclusive
198 * Difference between this and Kernel Range Flush is
199 * -Here the fastest way (if range is too large) is to move to next ASID
200 * without doing any explicit Shootdown
201 * -In case of kernel Flush, entry has to be shot down explicitly
203 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
206 const unsigned int cpu = smp_processor_id();
209 /* If range @start to @end is more than 32 TLB entries deep,
210 * its better to move to a new ASID rather than searching for
211 * individual entries and then shooting them down
213 * The calc above is rough, doesn't account for unaligned parts,
214 * since this is heuristics based anyways
216 if (unlikely((end - start) >= PAGE_SIZE * 32)) {
217 local_flush_tlb_mm(vma->vm_mm);
222 * @start moved to page start: this alone suffices for checking
223 * loop end condition below, w/o need for aligning @end to end
224 * e.g. 2000 to 4001 will anyhow loop twice
228 local_irq_save(flags);
230 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
231 while (start < end) {
232 tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu));
237 local_irq_restore(flags);
240 /* Flush the kernel TLB entries - vmalloc/modules (Global from MMU perspective)
241 * @start, @end interpreted as kvaddr
242 * Interestingly, shared TLB entries can also be flushed using just
243 * @start,@end alone (interpreted as user vaddr), although technically SASID
244 * is also needed. However our smart TLbProbe lookup takes care of that.
246 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
250 /* exactly same as above, except for TLB entry not taking ASID */
252 if (unlikely((end - start) >= PAGE_SIZE * 32)) {
253 local_flush_tlb_all();
259 local_irq_save(flags);
260 while (start < end) {
261 tlb_entry_erase(start);
265 local_irq_restore(flags);
269 * Delete TLB entry in MMU for a given page (??? address)
270 * NOTE One TLB entry contains translation for single PAGE
273 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
275 const unsigned int cpu = smp_processor_id();
278 /* Note that it is critical that interrupts are DISABLED between
279 * checking the ASID and using it flush the TLB entry
281 local_irq_save(flags);
283 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
284 tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu));
287 local_irq_restore(flags);
293 struct vm_area_struct *ta_vma;
294 unsigned long ta_start;
295 unsigned long ta_end;
298 static inline void ipi_flush_tlb_page(void *arg)
300 struct tlb_args *ta = arg;
302 local_flush_tlb_page(ta->ta_vma, ta->ta_start);
305 static inline void ipi_flush_tlb_range(void *arg)
307 struct tlb_args *ta = arg;
309 local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
312 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
313 static inline void ipi_flush_pmd_tlb_range(void *arg)
315 struct tlb_args *ta = arg;
317 local_flush_pmd_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
321 static inline void ipi_flush_tlb_kernel_range(void *arg)
323 struct tlb_args *ta = (struct tlb_args *)arg;
325 local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
328 void flush_tlb_all(void)
330 on_each_cpu((smp_call_func_t)local_flush_tlb_all, NULL, 1);
333 void flush_tlb_mm(struct mm_struct *mm)
335 on_each_cpu_mask(mm_cpumask(mm), (smp_call_func_t)local_flush_tlb_mm,
339 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
341 struct tlb_args ta = {
346 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1);
349 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
352 struct tlb_args ta = {
358 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1);
361 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
362 void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
365 struct tlb_args ta = {
371 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_pmd_tlb_range, &ta, 1);
375 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
377 struct tlb_args ta = {
382 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
387 * Routine to create a TLB entry
389 void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep)
392 unsigned int asid_or_sasid, rwx;
397 * create_tlb() assumes that current->mm == vma->mm, since
398 * -it ASID for TLB entry is fetched from MMU ASID reg (valid for curr)
399 * -completes the lazy write to SASID reg (again valid for curr tsk)
401 * Removing the assumption involves
402 * -Using vma->mm->context{ASID,SASID}, as opposed to MMU reg.
403 * -Fix the TLB paranoid debug code to not trigger false negatives.
404 * -More importantly it makes this handler inconsistent with fast-path
405 * TLB Refill handler which always deals with "current"
407 * Lets see the use cases when current->mm != vma->mm and we land here
408 * 1. execve->copy_strings()->__get_user_pages->handle_mm_fault
409 * Here VM wants to pre-install a TLB entry for user stack while
410 * current->mm still points to pre-execve mm (hence the condition).
411 * However the stack vaddr is soon relocated (randomization) and
412 * move_page_tables() tries to undo that TLB entry.
413 * Thus not creating TLB entry is not any worse.
415 * 2. ptrace(POKETEXT) causes a CoW - debugger(current) inserting a
416 * breakpoint in debugged task. Not creating a TLB now is not
417 * performance critical.
419 * Both the cases above are not good enough for code churn.
421 if (current->active_mm != vma->vm_mm)
424 local_irq_save(flags);
426 tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_id()), vaddr);
430 /* update this PTE credentials */
431 pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED);
433 /* Create HW TLB(PD0,PD1) from PTE */
435 /* ASID for this task */
436 asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff;
438 pd0 = vaddr | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0);
441 * ARC MMU provides fully orthogonal access bits for K/U mode,
442 * however Linux only saves 1 set to save PTE real-estate
443 * Here we convert 3 PTE bits into 6 MMU bits:
444 * -Kernel only entries have Kr Kw Kx 0 0 0
445 * -User entries have mirrored K and U bits
447 rwx = pte_val(*ptep) & PTE_BITS_RWX;
449 if (pte_val(*ptep) & _PAGE_GLOBAL)
450 rwx <<= 3; /* r w x => Kr Kw Kx 0 0 0 */
452 rwx |= (rwx << 3); /* r w x => Kr Kw Kx Ur Uw Ux */
454 pd1 = rwx | (pte_val(*ptep) & PTE_BITS_NON_RWX_IN_PD1);
456 tlb_entry_insert(pd0, pd1);
458 local_irq_restore(flags);
462 * Called at the end of pagefault, for a userspace mapped page
463 * -pre-install the corresponding TLB entry into MMU
464 * -Finalize the delayed D-cache flush of kernel mapping of page due to
465 * flush_dcache_page(), copy_user_page()
467 * Note that flush (when done) involves both WBACK - so physical page is
468 * in sync as well as INV - so any non-congruent aliases don't remain
470 void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
473 unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
474 phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK_PHYS;
475 struct page *page = pfn_to_page(pte_pfn(*ptep));
477 create_tlb(vma, vaddr, ptep);
479 if (page == ZERO_PAGE(0)) {
484 * Exec page : Independent of aliasing/page-color considerations,
485 * since icache doesn't snoop dcache on ARC, any dirty
486 * K-mapping of a code page needs to be wback+inv so that
487 * icache fetch by userspace sees code correctly.
488 * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it
489 * so userspace sees the right data.
490 * (Avoids the flush for Non-exec + congruent mapping case)
492 if ((vma->vm_flags & VM_EXEC) ||
493 addr_not_cache_congruent(paddr, vaddr)) {
495 int dirty = !test_and_set_bit(PG_dc_clean, &page->flags);
497 /* wback + inv dcache lines (K-mapping) */
498 __flush_dcache_page(paddr, paddr);
500 /* invalidate any existing icache lines (U-mapping) */
501 if (vma->vm_flags & VM_EXEC)
502 __inv_icache_page(paddr, vaddr);
507 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
510 * MMUv4 in HS38x cores supports Super Pages which are basis for Linux THP
513 * Normal and Super pages can co-exist (ofcourse not overlap) in TLB with a
514 * new bit "SZ" in TLB page descriptor to distinguish between them.
515 * Super Page size is configurable in hardware (4K to 16M), but fixed once
518 * The exact THP size a Linux configuration will support is a function of:
519 * - MMU page size (typical 8K, RTL fixed)
520 * - software page walker address split between PGD:PTE:PFN (typical
521 * 11:8:13, but can be changed with 1 line)
522 * So for above default, THP size supported is 8K * (2^8) = 2M
524 * Default Page Walker is 2 levels, PGD:PTE:PFN, which in THP regime
525 * reduces to 1 level (as PTE is folded into PGD and canonically referred
527 * Thus THP PMD accessors are implemented in terms of PTE (just like sparc)
530 void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
533 pte_t pte = __pte(pmd_val(*pmd));
534 update_mmu_cache(vma, addr, &pte);
537 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
540 struct list_head *lh = (struct list_head *) pgtable;
542 assert_spin_locked(&mm->page_table_lock);
545 if (!pmd_huge_pte(mm, pmdp))
548 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
549 pmd_huge_pte(mm, pmdp) = pgtable;
552 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
554 struct list_head *lh;
557 assert_spin_locked(&mm->page_table_lock);
559 pgtable = pmd_huge_pte(mm, pmdp);
560 lh = (struct list_head *) pgtable;
562 pmd_huge_pte(mm, pmdp) = NULL;
564 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
568 pte_val(pgtable[0]) = 0;
569 pte_val(pgtable[1]) = 0;
574 void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
580 local_irq_save(flags);
582 cpu = smp_processor_id();
584 if (likely(asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID)) {
585 unsigned int asid = hw_pid(vma->vm_mm, cpu);
587 /* No need to loop here: this will always be for 1 Huge Page */
588 tlb_entry_erase(start | _PAGE_HW_SZ | asid);
591 local_irq_restore(flags);
596 /* Read the Cache Build Configuration Registers, Decode them and save into
597 * the cpuinfo structure for later use.
598 * No Validation is done here, simply read/convert the BCRs
600 void read_decode_mmu_bcr(void)
602 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
605 #ifdef CONFIG_CPU_BIG_ENDIAN
606 unsigned int ver:8, ways:4, sets:4, res:3, sasid:1, pg_sz:4,
609 unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, sasid:1, res:3, sets:4,
615 #ifdef CONFIG_CPU_BIG_ENDIAN
616 unsigned int ver:8, sasid:1, sz1:4, sz0:4, res:2, pae:1,
617 n_ways:2, n_entry:2, n_super:2, u_itlb:3, u_dtlb:3;
619 /* DTLB ITLB JES JE JA */
620 unsigned int u_dtlb:3, u_itlb:3, n_super:2, n_entry:2, n_ways:2,
621 pae:1, res:2, sz0:4, sz1:4, sasid:1, ver:8;
625 tmp = read_aux_reg(ARC_REG_MMU_BCR);
626 mmu->ver = (tmp >> 24);
628 if (is_isa_arcompact() && mmu->ver == 3) {
629 mmu3 = (struct bcr_mmu_3 *)&tmp;
630 mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1);
631 mmu->sets = 1 << mmu3->sets;
632 mmu->ways = 1 << mmu3->ways;
633 mmu->u_dtlb = mmu3->u_dtlb;
634 mmu->u_itlb = mmu3->u_itlb;
635 mmu->sasid = mmu3->sasid;
637 mmu4 = (struct bcr_mmu_4 *)&tmp;
638 mmu->pg_sz_k = 1 << (mmu4->sz0 - 1);
639 mmu->s_pg_sz_m = 1 << (mmu4->sz1 - 11);
640 mmu->sets = 64 << mmu4->n_entry;
641 mmu->ways = mmu4->n_ways * 2;
642 mmu->u_dtlb = mmu4->u_dtlb * 4;
643 mmu->u_itlb = mmu4->u_itlb * 4;
644 mmu->sasid = mmu4->sasid;
645 pae_exists = mmu->pae = mmu4->pae;
649 char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
652 struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[cpu_id].mmu;
653 char super_pg[64] = "";
655 if (p_mmu->s_pg_sz_m)
656 scnprintf(super_pg, 64, "%dM Super Page %s",
658 IS_USED_CFG(CONFIG_TRANSPARENT_HUGEPAGE));
660 n += scnprintf(buf + n, len - n,
661 "MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d%s%s\n",
662 p_mmu->ver, p_mmu->pg_sz_k, super_pg,
663 p_mmu->sets * p_mmu->ways, p_mmu->sets, p_mmu->ways,
664 p_mmu->u_dtlb, p_mmu->u_itlb,
665 IS_AVAIL2(p_mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40));
670 int pae40_exist_but_not_enab(void)
672 return pae_exists && !is_pae40_enabled();
675 void arc_mmu_init(void)
677 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
681 pr_info("%s", arc_mmu_mumbojumbo(0, str, sizeof(str)));
684 * Can't be done in processor.h due to header include dependencies
686 BUILD_BUG_ON(!IS_ALIGNED((CONFIG_ARC_KVADDR_SIZE << 20), PMD_SIZE));
689 * stack top size sanity check,
690 * Can't be done in processor.h due to header include dependencies
692 BUILD_BUG_ON(!IS_ALIGNED(STACK_TOP, PMD_SIZE));
695 * Ensure that MMU features assumed by kernel exist in hardware.
696 * - For older ARC700 cpus, only v3 supported
697 * - For HS cpus, v4 was baseline and v5 is backwards compatible
698 * (will run older software).
700 if (is_isa_arcompact() && mmu->ver == 3)
702 else if (is_isa_arcv2() && mmu->ver >= 4)
706 panic("MMU ver %d doesn't match kernel built for\n", mmu->ver);
708 if (mmu->pg_sz_k != TO_KB(PAGE_SIZE))
709 panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE));
711 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
712 mmu->s_pg_sz_m != TO_MB(HPAGE_PMD_SIZE))
713 panic("MMU Super pg size != Linux HPAGE_PMD_SIZE (%luM)\n",
714 (unsigned long)TO_MB(HPAGE_PMD_SIZE));
716 if (IS_ENABLED(CONFIG_ARC_HAS_PAE40) && !mmu->pae)
717 panic("Hardware doesn't support PAE40\n");
720 write_aux_reg(ARC_REG_PID, MMU_ENABLE);
722 /* In smp we use this reg for interrupt 1 scratch */
723 #ifdef ARC_USE_SCRATCH_REG
724 /* swapper_pg_dir is the pgd for the kernel, used by vmalloc */
725 write_aux_reg(ARC_REG_SCRATCH_DATA0, swapper_pg_dir);
728 if (pae40_exist_but_not_enab())
729 write_aux_reg(ARC_REG_TLBPD1HI, 0);
733 * TLB Programmer's Model uses Linear Indexes: 0 to {255, 511} for 128 x {2,4}
734 * The mapping is Column-first.
735 * --------------------- -----------
736 * |way0|way1|way2|way3| |way0|way1|
737 * --------------------- -----------
738 * [set0] | 0 | 1 | 2 | 3 | | 0 | 1 |
739 * [set1] | 4 | 5 | 6 | 7 | | 2 | 3 |
741 * [set127] | 508| 509| 510| 511| | 254| 255|
742 * --------------------- -----------
743 * For normal operations we don't(must not) care how above works since
744 * MMU cmd getIndex(vaddr) abstracts that out.
745 * However for walking WAYS of a SET, we need to know this
747 #define SET_WAY_TO_IDX(mmu, set, way) ((set) * mmu->ways + (way))
749 /* Handling of Duplicate PD (TLB entry) in MMU.
750 * -Could be due to buggy customer tapeouts or obscure kernel bugs
751 * -MMU complaints not at the time of duplicate PD installation, but at the
752 * time of lookup matching multiple ways.
753 * -Ideally these should never happen - but if they do - workaround by deleting
755 * -Knob to be verbose abt it.(TODO: hook them up to debugfs)
757 volatile int dup_pd_silent; /* Be silent abt it or complain (default) */
759 void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
760 struct pt_regs *regs)
762 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
764 int set, n_ways = mmu->ways;
766 n_ways = min(n_ways, 4);
767 BUG_ON(mmu->ways > 4);
769 local_irq_save(flags);
771 /* loop thru all sets of TLB */
772 for (set = 0; set < mmu->sets; set++) {
777 /* read out all the ways of current set */
778 for (way = 0, is_valid = 0; way < n_ways; way++) {
779 write_aux_reg(ARC_REG_TLBINDEX,
780 SET_WAY_TO_IDX(mmu, set, way));
781 write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead);
782 pd0[way] = read_aux_reg(ARC_REG_TLBPD0);
783 is_valid |= pd0[way] & _PAGE_PRESENT;
784 pd0[way] &= PAGE_MASK;
787 /* If all the WAYS in SET are empty, skip to next SET */
791 /* Scan the set for duplicate ways: needs a nested loop */
792 for (way = 0; way < n_ways - 1; way++) {
799 for (n = way + 1; n < n_ways; n++) {
800 if (pd0[way] != pd0[n])
804 pr_info("Dup TLB PD0 %08x @ set %d ways %d,%d\n",
805 pd0[way], set, way, n);
808 * clear entry @way and not @n.
809 * This is critical to our optimised loop
812 write_aux_reg(ARC_REG_TLBINDEX,
813 SET_WAY_TO_IDX(mmu, set, way));
819 local_irq_restore(flags);
822 /***********************************************************************
823 * Diagnostic Routines
824 * -Called from Low Level TLB Handlers if things don;t look good
825 **********************************************************************/
827 #ifdef CONFIG_ARC_DBG_TLB_PARANOIA
830 * Low Level ASM TLB handler calls this if it finds that HW and SW ASIDS
833 void print_asid_mismatch(int mm_asid, int mmu_asid, int is_fast_path)
835 pr_emerg("ASID Mismatch in %s Path Handler: sw-pid=0x%x hw-pid=0x%x\n",
836 is_fast_path ? "Fast" : "Slow", mm_asid, mmu_asid);
838 __asm__ __volatile__("flag 1");
841 void tlb_paranoid_check(unsigned int mm_asid, unsigned long addr)
843 unsigned int mmu_asid;
845 mmu_asid = read_aux_reg(ARC_REG_PID) & 0xff;
848 * At the time of a TLB miss/installation
849 * - HW version needs to match SW version
850 * - SW needs to have a valid ASID
852 if (addr < 0x70000000 &&
853 ((mm_asid == MM_CTXT_NO_ASID) ||
854 (mmu_asid != (mm_asid & MM_CTXT_ASID_MASK))))
855 print_asid_mismatch(mm_asid, mmu_asid, 0);