2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
7 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
8 * Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved.
11 #include <linux/cpu_pm.h>
12 #include <linux/init.h>
13 #include <linux/sched.h>
14 #include <linux/smp.h>
16 #include <linux/hugetlb.h>
17 #include <linux/export.h>
20 #include <asm/cpu-type.h>
21 #include <asm/bootinfo.h>
22 #include <asm/hazards.h>
23 #include <asm/mmu_context.h>
25 #include <asm/tlbex.h>
26 #include <asm/tlbmisc.h>
27 #include <asm/setup.h>
30 * LOONGSON-2 has a 4 entry itlb which is a subset of jtlb, LOONGSON-3 has
31 * a 4 entry itlb and a 4 entry dtlb which are subsets of jtlb. Unfortunately,
32 * itlb/dtlb are not totally transparent to software.
34 static inline void flush_micro_tlb(void)
36 switch (current_cpu_type()) {
38 write_c0_diag(LOONGSON_DIAG_ITLB);
41 write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB);
48 static inline void flush_micro_tlb_vm(struct vm_area_struct *vma)
50 if (vma->vm_flags & VM_EXEC)
54 void local_flush_tlb_all(void)
57 unsigned long old_ctx;
58 int entry, ftlbhighset;
60 local_irq_save(flags);
61 /* Save old context and create impossible VPN2 value */
62 old_ctx = read_c0_entryhi();
67 entry = num_wired_entries();
71 * If there are any wired entries, fall back to iterating
73 if (cpu_has_tlbinv && !entry) {
74 if (current_cpu_data.tlbsizevtlb) {
77 tlbinvf(); /* invalidate VTLB */
79 ftlbhighset = current_cpu_data.tlbsizevtlb +
80 current_cpu_data.tlbsizeftlbsets;
81 for (entry = current_cpu_data.tlbsizevtlb;
84 write_c0_index(entry);
86 tlbinvf(); /* invalidate one FTLB set */
89 while (entry < current_cpu_data.tlbsize) {
90 /* Make sure all entries differ. */
91 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
92 write_c0_index(entry);
99 write_c0_entryhi(old_ctx);
102 local_irq_restore(flags);
104 EXPORT_SYMBOL(local_flush_tlb_all);
106 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
109 struct mm_struct *mm = vma->vm_mm;
110 int cpu = smp_processor_id();
112 if (cpu_context(cpu, mm) != 0) {
113 unsigned long size, flags;
115 local_irq_save(flags);
116 start = round_down(start, PAGE_SIZE << 1);
117 end = round_up(end, PAGE_SIZE << 1);
118 size = (end - start) >> (PAGE_SHIFT + 1);
119 if (size <= (current_cpu_data.tlbsizeftlbsets ?
120 current_cpu_data.tlbsize / 8 :
121 current_cpu_data.tlbsize / 2)) {
122 unsigned long old_entryhi, old_mmid;
123 int newpid = cpu_asid(cpu, mm);
125 old_entryhi = read_c0_entryhi();
127 old_mmid = read_c0_memorymapid();
128 write_c0_memorymapid(newpid);
132 while (start < end) {
136 write_c0_entryhi(start);
138 write_c0_entryhi(start | newpid);
139 start += (PAGE_SIZE << 1);
143 idx = read_c0_index();
144 write_c0_entrylo0(0);
145 write_c0_entrylo1(0);
148 /* Make sure all entries differ. */
149 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
154 write_c0_entryhi(old_entryhi);
156 write_c0_memorymapid(old_mmid);
159 drop_mmu_context(mm);
162 local_irq_restore(flags);
166 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
168 unsigned long size, flags;
170 local_irq_save(flags);
171 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
172 size = (size + 1) >> 1;
173 if (size <= (current_cpu_data.tlbsizeftlbsets ?
174 current_cpu_data.tlbsize / 8 :
175 current_cpu_data.tlbsize / 2)) {
176 int pid = read_c0_entryhi();
178 start &= (PAGE_MASK << 1);
179 end += ((PAGE_SIZE << 1) - 1);
180 end &= (PAGE_MASK << 1);
183 while (start < end) {
186 write_c0_entryhi(start);
187 start += (PAGE_SIZE << 1);
191 idx = read_c0_index();
192 write_c0_entrylo0(0);
193 write_c0_entrylo1(0);
196 /* Make sure all entries differ. */
197 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
202 write_c0_entryhi(pid);
205 local_flush_tlb_all();
208 local_irq_restore(flags);
211 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
213 int cpu = smp_processor_id();
215 if (cpu_context(cpu, vma->vm_mm) != 0) {
216 unsigned long old_mmid;
217 unsigned long flags, old_entryhi;
220 page &= (PAGE_MASK << 1);
221 local_irq_save(flags);
222 old_entryhi = read_c0_entryhi();
225 old_mmid = read_c0_memorymapid();
226 write_c0_entryhi(page);
227 write_c0_memorymapid(cpu_asid(cpu, vma->vm_mm));
229 write_c0_entryhi(page | cpu_asid(cpu, vma->vm_mm));
234 idx = read_c0_index();
235 write_c0_entrylo0(0);
236 write_c0_entrylo1(0);
239 /* Make sure all entries differ. */
240 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
246 write_c0_entryhi(old_entryhi);
248 write_c0_memorymapid(old_mmid);
250 flush_micro_tlb_vm(vma);
251 local_irq_restore(flags);
256 * This one is only used for pages with the global bit set so we don't care
257 * much about the ASID.
259 void local_flush_tlb_one(unsigned long page)
264 local_irq_save(flags);
265 oldpid = read_c0_entryhi();
267 page &= (PAGE_MASK << 1);
268 write_c0_entryhi(page);
272 idx = read_c0_index();
273 write_c0_entrylo0(0);
274 write_c0_entrylo1(0);
276 /* Make sure all entries differ. */
277 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
282 write_c0_entryhi(oldpid);
285 local_irq_restore(flags);
289 * We will need multiple versions of update_mmu_cache(), one that just
290 * updates the TLB with the new pte(s), and another which also checks
291 * for the R4k "end of page" hardware bug and does the needy.
293 void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
300 pte_t *ptep, *ptemap = NULL;
304 * Handle debugger faulting in for debuggee.
306 if (current->active_mm != vma->vm_mm)
309 local_irq_save(flags);
312 address &= (PAGE_MASK << 1);
314 write_c0_entryhi(address);
316 pid = read_c0_entryhi() & cpu_asid_mask(¤t_cpu_data);
317 write_c0_entryhi(address | pid);
319 pgdp = pgd_offset(vma->vm_mm, address);
323 p4dp = p4d_offset(pgdp, address);
324 pudp = pud_offset(p4dp, address);
325 pmdp = pmd_offset(pudp, address);
326 idx = read_c0_index();
327 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
328 /* this could be a huge page */
329 if (pmd_huge(*pmdp)) {
331 write_c0_pagemask(PM_HUGE_MASK);
332 ptep = (pte_t *)pmdp;
333 lo = pte_to_entrylo(pte_val(*ptep));
334 write_c0_entrylo0(lo);
335 write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
343 write_c0_pagemask(PM_DEFAULT_MASK);
347 ptemap = ptep = pte_offset_map(pmdp, address);
349 * update_mmu_cache() is called between pte_offset_map_lock()
350 * and pte_unmap_unlock(), so we can assume that ptep is not
351 * NULL here: and what should be done below if it were NULL?
354 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
356 write_c0_entrylo0(pte_to_entrylo(ptep->pte_high));
358 writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK);
360 write_c0_entrylo1(pte_to_entrylo(ptep->pte_high));
362 writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK);
364 write_c0_entrylo0(ptep->pte_high);
366 write_c0_entrylo1(ptep->pte_high);
369 write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
370 write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
380 flush_micro_tlb_vm(vma);
384 local_irq_restore(flags);
387 void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
388 unsigned long entryhi, unsigned long pagemask)
391 panic("Broken for XPA kernels");
393 unsigned int old_mmid;
396 unsigned long old_pagemask;
397 unsigned long old_ctx;
399 local_irq_save(flags);
401 old_mmid = read_c0_memorymapid();
402 write_c0_memorymapid(MMID_KERNEL_WIRED);
404 /* Save old context and create impossible VPN2 value */
405 old_ctx = read_c0_entryhi();
407 old_pagemask = read_c0_pagemask();
408 wired = num_wired_entries();
409 write_c0_wired(wired + 1);
410 write_c0_index(wired);
411 tlbw_use_hazard(); /* What is the hazard here? */
412 write_c0_pagemask(pagemask);
413 write_c0_entryhi(entryhi);
414 write_c0_entrylo0(entrylo0);
415 write_c0_entrylo1(entrylo1);
420 write_c0_entryhi(old_ctx);
422 write_c0_memorymapid(old_mmid);
423 tlbw_use_hazard(); /* What is the hazard here? */
425 write_c0_pagemask(old_pagemask);
426 local_flush_tlb_all();
427 local_irq_restore(flags);
431 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
433 int has_transparent_hugepage(void)
435 static unsigned int mask = -1;
437 if (mask == -1) { /* first call comes during __init */
440 local_irq_save(flags);
441 write_c0_pagemask(PM_HUGE_MASK);
442 back_to_back_c0_hazard();
443 mask = read_c0_pagemask();
444 write_c0_pagemask(PM_DEFAULT_MASK);
445 local_irq_restore(flags);
447 return mask == PM_HUGE_MASK;
449 EXPORT_SYMBOL(has_transparent_hugepage);
451 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
454 * Used for loading TLB entries before trap_init() has started, when we
455 * don't actually want to add a wired entry which remains throughout the
456 * lifetime of the system
462 __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
463 unsigned long entryhi, unsigned long pagemask)
468 unsigned long old_pagemask;
469 unsigned long old_ctx;
471 local_irq_save(flags);
472 /* Save old context and create impossible VPN2 value */
474 old_ctx = read_c0_entryhi();
475 old_pagemask = read_c0_pagemask();
476 wired = num_wired_entries();
477 if (--temp_tlb_entry < wired) {
479 "No TLB space left for add_temporary_entry\n");
484 write_c0_index(temp_tlb_entry);
485 write_c0_pagemask(pagemask);
486 write_c0_entryhi(entryhi);
487 write_c0_entrylo0(entrylo0);
488 write_c0_entrylo1(entrylo1);
493 write_c0_entryhi(old_ctx);
494 write_c0_pagemask(old_pagemask);
497 local_irq_restore(flags);
503 static int __init set_ntlb(char *str)
505 get_option(&str, &ntlb);
509 __setup("ntlb=", set_ntlb);
512 * Configure TLB (for init or after a CPU has been powered off).
514 static void r4k_tlb_configure(void)
517 * You should never change this register:
518 * - On R4600 1.7 the tlbp never hits for pages smaller than
519 * the value in the c0_pagemask register.
520 * - The entire mm handling assumes the c0_pagemask register to
521 * be set to fixed-size pages.
523 write_c0_pagemask(PM_DEFAULT_MASK);
524 back_to_back_c0_hazard();
525 if (read_c0_pagemask() != PM_DEFAULT_MASK)
526 panic("MMU doesn't support PAGE_SIZE=0x%lx", PAGE_SIZE);
529 if (current_cpu_type() == CPU_R10000 ||
530 current_cpu_type() == CPU_R12000 ||
531 current_cpu_type() == CPU_R14000 ||
532 current_cpu_type() == CPU_R16000)
533 write_c0_framemask(0);
537 * Enable the no read, no exec bits, and enable large physical
541 set_c0_pagegrain(PG_RIE | PG_XIE | PG_ELPA);
543 set_c0_pagegrain(PG_RIE | PG_XIE);
547 temp_tlb_entry = current_cpu_data.tlbsize - 1;
549 /* From this point on the ARC firmware is dead. */
550 local_flush_tlb_all();
552 /* Did I tell you that ARC SUCKS? */
560 if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
561 int wired = current_cpu_data.tlbsize - ntlb;
562 write_c0_wired(wired);
563 write_c0_index(wired-1);
564 printk("Restricting TLB to %d entries\n", ntlb);
566 printk("Ignoring invalid argument ntlb=%d\n", ntlb);
569 build_tlb_refill_handler();
572 static int r4k_tlb_pm_notifier(struct notifier_block *self, unsigned long cmd,
576 case CPU_PM_ENTER_FAILED:
585 static struct notifier_block r4k_tlb_pm_notifier_block = {
586 .notifier_call = r4k_tlb_pm_notifier,
589 static int __init r4k_tlb_init_pm(void)
591 return cpu_pm_register_notifier(&r4k_tlb_pm_notifier_block);
593 arch_initcall(r4k_tlb_init_pm);