1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * This file contains the routines for handling the MMU on those
4 * PowerPC implementations where the MMU is not using the hash
5 * table, such as 8xx, 4xx, BookE's etc...
7 * Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org>
10 * Derived from previous arch/powerpc/mm/mmu_context.c
11 * and arch/powerpc/include/asm/mmu_context.h
15 * - The global context lock will not scale very well
16 * - The maps should be dynamically allocated to allow for processors
17 * that support more PID bits at runtime
18 * - Implement flush_tlb_mm() by making the context stale and picking
20 * - More aggressively clear stale map bits and maybe find some way to
21 * also clear mm->cpu_vm_mask bits when processes are migrated
24 //#define DEBUG_CLAMP_LAST_CONTEXT 31
25 //#define DEBUG_HARDER
27 /* We don't use DEBUG because it tends to be compiled in always nowadays
28 * and this would generate way too much output
31 #define pr_hard(args...) printk(KERN_DEBUG args)
32 #define pr_hardcont(args...) printk(KERN_CONT args)
34 #define pr_hard(args...) do { } while(0)
35 #define pr_hardcont(args...) do { } while(0)
38 #include <linux/kernel.h>
40 #include <linux/init.h>
41 #include <linux/spinlock.h>
42 #include <linux/memblock.h>
43 #include <linux/notifier.h>
44 #include <linux/cpu.h>
45 #include <linux/slab.h>
47 #include <asm/mmu_context.h>
48 #include <asm/tlbflush.h>
51 #include <mm/mmu_decl.h>
54 * Room for two PTE table pointers, usually the kernel and current user
55 * pointer to their respective root page table (pgdir).
57 void *abatron_pteptrs[2];
60 * The MPC8xx has only 16 contexts. We rotate through them on each task switch.
61 * A better way would be to keep track of tasks that own contexts, and implement
62 * an LRU usage. That way very active tasks don't always have to pay the TLB
63 * reload overhead. The kernel pages are mapped shared, so the kernel can run on
64 * behalf of any task that makes a kernel entry. Shared does not mean they are
65 * not protected, just that the ASID comparison is not performed. -- Dan
67 * The IBM4xx has 256 contexts, so we can just rotate through these as a way of
68 * "switching" contexts. If the TID of the TLB is zero, the PID/TID comparison
69 * is disabled, so we can use a TID of zero to represent all kernel pages as
70 * shared among all contexts. -- Dan
72 * The IBM 47x core supports 16-bit PIDs, thus 65535 contexts. We should
73 * normally never have to steal though the facility is present if needed.
76 #define FIRST_CONTEXT 1
77 #ifdef DEBUG_CLAMP_LAST_CONTEXT
78 #define LAST_CONTEXT DEBUG_CLAMP_LAST_CONTEXT
79 #elif defined(CONFIG_PPC_8xx)
80 #define LAST_CONTEXT 16
81 #elif defined(CONFIG_PPC_47x)
82 #define LAST_CONTEXT 65535
84 #define LAST_CONTEXT 255
87 static unsigned int next_context, nr_free_contexts;
88 static unsigned long *context_map;
89 static unsigned long *stale_map[NR_CPUS];
90 static struct mm_struct **context_mm;
91 static DEFINE_RAW_SPINLOCK(context_lock);
93 #define CTX_MAP_SIZE \
94 (sizeof(unsigned long) * (LAST_CONTEXT / BITS_PER_LONG + 1))
97 /* Steal a context from a task that has one at the moment.
99 * This is used when we are running out of available PID numbers
102 * This isn't an LRU system, it just frees up each context in
103 * turn (sort-of pseudo-random replacement :). This would be the
104 * place to implement an LRU scheme if anyone was motivated to do it.
107 * For context stealing, we use a slightly different approach for
108 * SMP and UP. Basically, the UP one is simpler and doesn't use
109 * the stale map as we can just flush the local CPU
112 static unsigned int steal_context_smp(unsigned int id)
114 struct mm_struct *mm;
115 unsigned int cpu, max, i;
117 max = LAST_CONTEXT - FIRST_CONTEXT;
119 /* Attempt to free next_context first and then loop until we manage */
121 /* Pick up the victim mm */
124 /* We have a candidate victim, check if it's active, on SMP
125 * we cannot steal active contexts
127 if (mm->context.active) {
129 if (id > LAST_CONTEXT)
133 pr_hardcont(" | steal %d from 0x%p", id, mm);
135 /* Mark this mm has having no context anymore */
136 mm->context.id = MMU_NO_CONTEXT;
138 /* Mark it stale on all CPUs that used this mm. For threaded
139 * implementations, we set it on all threads on each core
140 * represented in the mask. A future implementation will use
141 * a core map instead but this will do for now.
143 for_each_cpu(cpu, mm_cpumask(mm)) {
144 for (i = cpu_first_thread_sibling(cpu);
145 i <= cpu_last_thread_sibling(cpu); i++) {
147 __set_bit(id, stale_map[i]);
154 /* This will happen if you have more CPUs than available contexts,
155 * all we can do here is wait a bit and try again
157 raw_spin_unlock(&context_lock);
159 raw_spin_lock(&context_lock);
161 /* This will cause the caller to try again */
162 return MMU_NO_CONTEXT;
165 static unsigned int steal_all_contexts(void)
167 struct mm_struct *mm;
168 int cpu = smp_processor_id();
171 for (id = FIRST_CONTEXT; id <= LAST_CONTEXT; id++) {
172 /* Pick up the victim mm */
175 pr_hardcont(" | steal %d from 0x%p", id, mm);
177 /* Mark this mm as having no context anymore */
178 mm->context.id = MMU_NO_CONTEXT;
179 if (id != FIRST_CONTEXT) {
180 context_mm[id] = NULL;
181 __clear_bit(id, context_map);
183 if (IS_ENABLED(CONFIG_SMP))
184 __clear_bit(id, stale_map[cpu]);
187 /* Flush the TLB for all contexts (not to be used on SMP) */
190 nr_free_contexts = LAST_CONTEXT - FIRST_CONTEXT;
192 return FIRST_CONTEXT;
195 /* Note that this will also be called on SMP if all other CPUs are
196 * offlined, which means that it may be called for cpu != 0. For
197 * this to work, we somewhat assume that CPUs that are onlined
198 * come up with a fully clean TLB (or are cleaned when offlined)
200 static unsigned int steal_context_up(unsigned int id)
202 struct mm_struct *mm;
203 int cpu = smp_processor_id();
205 /* Pick up the victim mm */
208 pr_hardcont(" | steal %d from 0x%p", id, mm);
210 /* Flush the TLB for that context */
211 local_flush_tlb_mm(mm);
213 /* Mark this mm has having no context anymore */
214 mm->context.id = MMU_NO_CONTEXT;
216 /* XXX This clear should ultimately be part of local_flush_tlb_mm */
217 if (IS_ENABLED(CONFIG_SMP))
218 __clear_bit(id, stale_map[cpu]);
223 static void set_context(unsigned long id, pgd_t *pgd)
225 if (IS_ENABLED(CONFIG_PPC_8xx)) {
226 s16 offset = (s16)(__pa(swapper_pg_dir));
229 * Register M_TWB will contain base address of level 1 table minus the
230 * lower part of the kernel PGDIR base address, so that all accesses to
231 * level 1 table are done relative to lower part of kernel PGDIR base
234 mtspr(SPRN_M_TWB, __pa(pgd) - offset);
237 mtspr(SPRN_M_CASID, id - 1);
242 if (IS_ENABLED(CONFIG_40x))
250 void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
251 struct task_struct *tsk)
254 unsigned int i, cpu = smp_processor_id();
257 /* No lockless fast path .. yet */
258 raw_spin_lock(&context_lock);
260 pr_hard("[%d] activating context for mm @%p, active=%d, id=%d",
261 cpu, next, next->context.active, next->context.id);
263 if (IS_ENABLED(CONFIG_SMP)) {
264 /* Mark us active and the previous one not anymore */
265 next->context.active++;
267 pr_hardcont(" (old=0x%p a=%d)", prev, prev->context.active);
268 WARN_ON(prev->context.active < 1);
269 prev->context.active--;
275 /* If we already have a valid assigned context, skip all that */
276 id = next->context.id;
277 if (likely(id != MMU_NO_CONTEXT))
280 /* We really don't have a context, let's try to acquire one */
282 if (id > LAST_CONTEXT)
286 /* No more free contexts, let's try to steal one */
287 if (nr_free_contexts == 0) {
288 if (num_online_cpus() > 1) {
289 id = steal_context_smp(id);
290 if (id == MMU_NO_CONTEXT)
294 if (IS_ENABLED(CONFIG_PPC_8xx))
295 id = steal_all_contexts();
297 id = steal_context_up(id);
302 /* We know there's at least one free context, try to find it */
303 while (__test_and_set_bit(id, map)) {
304 id = find_next_zero_bit(map, LAST_CONTEXT+1, id);
305 if (id > LAST_CONTEXT)
309 next_context = id + 1;
310 context_mm[id] = next;
311 next->context.id = id;
312 pr_hardcont(" | new id=%d,nrf=%d", id, nr_free_contexts);
316 /* If that context got marked stale on this CPU, then flush the
317 * local TLB for it and unmark it before we use it
319 if (IS_ENABLED(CONFIG_SMP) && test_bit(id, stale_map[cpu])) {
320 pr_hardcont(" | stale flush %d [%d..%d]",
321 id, cpu_first_thread_sibling(cpu),
322 cpu_last_thread_sibling(cpu));
324 local_flush_tlb_mm(next);
326 /* XXX This clear should ultimately be part of local_flush_tlb_mm */
327 for (i = cpu_first_thread_sibling(cpu);
328 i <= cpu_last_thread_sibling(cpu); i++) {
330 __clear_bit(id, stale_map[i]);
334 /* Flick the MMU and release lock */
335 pr_hardcont(" -> %d\n", id);
336 if (IS_ENABLED(CONFIG_BDI_SWITCH))
337 abatron_pteptrs[1] = next->pgd;
338 set_context(id, next->pgd);
339 raw_spin_unlock(&context_lock);
343 * Set up the context for a new address space.
345 int init_new_context(struct task_struct *t, struct mm_struct *mm)
347 pr_hard("initing context for mm @%p\n", mm);
350 * We have MMU_NO_CONTEXT set to be ~0. Hence check
351 * explicitly against context.id == 0. This ensures that we properly
352 * initialize context slice details for newly allocated mm's (which will
353 * have id == 0) and don't alter context slice inherited via fork (which
354 * will have id != 0).
356 if (mm->context.id == 0)
357 slice_init_new_context_exec(mm);
358 mm->context.id = MMU_NO_CONTEXT;
359 mm->context.active = 0;
360 pte_frag_set(&mm->context, NULL);
365 * We're finished using the context for an address space.
367 void destroy_context(struct mm_struct *mm)
372 if (mm->context.id == MMU_NO_CONTEXT)
375 WARN_ON(mm->context.active != 0);
377 raw_spin_lock_irqsave(&context_lock, flags);
379 if (id != MMU_NO_CONTEXT) {
380 __clear_bit(id, context_map);
381 mm->context.id = MMU_NO_CONTEXT;
382 context_mm[id] = NULL;
385 raw_spin_unlock_irqrestore(&context_lock, flags);
388 static int mmu_ctx_cpu_prepare(unsigned int cpu)
390 /* We don't touch CPU 0 map, it's allocated at aboot and kept
393 if (cpu == boot_cpuid)
396 pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu);
397 stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL);
401 static int mmu_ctx_cpu_dead(unsigned int cpu)
403 #ifdef CONFIG_HOTPLUG_CPU
404 if (cpu == boot_cpuid)
407 pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu);
408 kfree(stale_map[cpu]);
409 stale_map[cpu] = NULL;
411 /* We also clear the cpu_vm_mask bits of CPUs going away */
412 clear_tasks_mm_cpumask(cpu);
418 * Initialize the context management stuff.
420 void __init mmu_context_init(void)
422 /* Mark init_mm as being active on all possible CPUs since
423 * we'll get called with prev == init_mm the first time
424 * we schedule on a given CPU
426 init_mm.context.active = NR_CPUS;
429 * Allocate the maps used by context management
431 context_map = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES);
433 panic("%s: Failed to allocate %zu bytes\n", __func__,
435 context_mm = memblock_alloc(sizeof(void *) * (LAST_CONTEXT + 1),
438 panic("%s: Failed to allocate %zu bytes\n", __func__,
439 sizeof(void *) * (LAST_CONTEXT + 1));
440 if (IS_ENABLED(CONFIG_SMP)) {
441 stale_map[boot_cpuid] = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES);
442 if (!stale_map[boot_cpuid])
443 panic("%s: Failed to allocate %zu bytes\n", __func__,
446 cpuhp_setup_state_nocalls(CPUHP_POWERPC_MMU_CTX_PREPARE,
447 "powerpc/mmu/ctx:prepare",
448 mmu_ctx_cpu_prepare, mmu_ctx_cpu_dead);
452 "MMU: Allocated %zu bytes of context maps for %d contexts\n",
453 2 * CTX_MAP_SIZE + (sizeof(void *) * (LAST_CONTEXT + 1)),
454 LAST_CONTEXT - FIRST_CONTEXT + 1);
457 * Some processors have too few contexts to reserve one for
458 * init_mm, and require using context 0 for a normal task.
459 * Other processors reserve the use of context zero for the kernel.
460 * This code assumes FIRST_CONTEXT < 32.
462 context_map[0] = (1 << FIRST_CONTEXT) - 1;
463 next_context = FIRST_CONTEXT;
464 nr_free_contexts = LAST_CONTEXT - FIRST_CONTEXT + 1;