1 #include <linux/init.h>
4 #include <linux/spinlock.h>
6 #include <linux/interrupt.h>
7 #include <linux/export.h>
9 #include <linux/debugfs.h>
10 #include <linux/gfp.h>
12 #include <asm/tlbflush.h>
13 #include <asm/mmu_context.h>
14 #include <asm/nospec-branch.h>
15 #include <asm/cache.h>
17 #include <asm/uv/uv.h>
20 * TLB flushing, formerly SMP-only
23 * These mean you can really definitely utterly forget about
24 * writing to user space from interrupts. (Its not allowed anyway).
26 * Optimizations Manfred Spraul <manfred@colorfullife.com>
28 * More scalable flush, from Andi Kleen
30 * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
34 * We get here when we do something requiring a TLB invalidation
35 * but could not go invalidate all of the contexts. We do the
36 * necessary invalidation by clearing out the 'ctx_id' which
37 * forces a TLB flush when the context is loaded.
39 void clear_asid_other(void)
44 * This is only expected to be set if we have disabled
45 * kernel _PAGE_GLOBAL pages.
47 if (!static_cpu_has(X86_FEATURE_PTI)) {
52 for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) {
53 /* Do not need to flush the current asid */
54 if (asid == this_cpu_read(cpu_tlbstate.loaded_mm_asid))
57 * Make sure the next time we go to switch to
58 * this asid, we do a flush:
60 this_cpu_write(cpu_tlbstate.ctxs[asid].ctx_id, 0);
62 this_cpu_write(cpu_tlbstate.invalidate_other, false);
65 atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
68 static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
69 u16 *new_asid, bool *need_flush)
73 if (!static_cpu_has(X86_FEATURE_PCID)) {
79 if (this_cpu_read(cpu_tlbstate.invalidate_other))
82 for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) {
83 if (this_cpu_read(cpu_tlbstate.ctxs[asid].ctx_id) !=
88 *need_flush = (this_cpu_read(cpu_tlbstate.ctxs[asid].tlb_gen) <
94 * We don't currently own an ASID slot on this CPU.
97 *new_asid = this_cpu_add_return(cpu_tlbstate.next_asid, 1) - 1;
98 if (*new_asid >= TLB_NR_DYN_ASIDS) {
100 this_cpu_write(cpu_tlbstate.next_asid, 1);
105 static void load_new_mm_cr3(pgd_t *pgdir, u16 new_asid, bool need_flush)
107 unsigned long new_mm_cr3;
110 invalidate_user_asid(new_asid);
111 new_mm_cr3 = build_cr3(pgdir, new_asid);
113 new_mm_cr3 = build_cr3_noflush(pgdir, new_asid);
117 * Caution: many callers of this function expect
118 * that load_cr3() is serializing and orders TLB
119 * fills with respect to the mm_cpumask writes.
121 write_cr3(new_mm_cr3);
124 void leave_mm(int cpu)
126 struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
129 * It's plausible that we're in lazy TLB mode while our mm is init_mm.
130 * If so, our callers still expect us to flush the TLB, but there
131 * aren't any user TLB entries in init_mm to worry about.
133 * This needs to happen before any other sanity checks due to
134 * intel_idle's shenanigans.
136 if (loaded_mm == &init_mm)
139 /* Warn if we're not lazy. */
140 WARN_ON(!this_cpu_read(cpu_tlbstate.is_lazy));
142 switch_mm(NULL, &init_mm, NULL);
144 EXPORT_SYMBOL_GPL(leave_mm);
146 void switch_mm(struct mm_struct *prev, struct mm_struct *next,
147 struct task_struct *tsk)
151 local_irq_save(flags);
152 switch_mm_irqs_off(prev, next, tsk);
153 local_irq_restore(flags);
156 static void sync_current_stack_to_mm(struct mm_struct *mm)
158 unsigned long sp = current_stack_pointer;
159 pgd_t *pgd = pgd_offset(mm, sp);
161 if (pgtable_l5_enabled()) {
162 if (unlikely(pgd_none(*pgd))) {
163 pgd_t *pgd_ref = pgd_offset_k(sp);
165 set_pgd(pgd, *pgd_ref);
169 * "pgd" is faked. The top level entries are "p4d"s, so sync
170 * the p4d. This compiles to approximately the same code as
173 p4d_t *p4d = p4d_offset(pgd, sp);
175 if (unlikely(p4d_none(*p4d))) {
176 pgd_t *pgd_ref = pgd_offset_k(sp);
177 p4d_t *p4d_ref = p4d_offset(pgd_ref, sp);
179 set_p4d(p4d, *p4d_ref);
184 void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
185 struct task_struct *tsk)
187 struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm);
188 u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
189 bool was_lazy = this_cpu_read(cpu_tlbstate.is_lazy);
190 unsigned cpu = smp_processor_id();
196 * NB: The scheduler will call us with prev == next when switching
197 * from lazy TLB mode to normal mode if active_mm isn't changing.
198 * When this happens, we don't assume that CR3 (and hence
199 * cpu_tlbstate.loaded_mm) matches next.
201 * NB: leave_mm() calls us with prev == NULL and tsk == NULL.
204 /* We don't want flush_tlb_func_* to run concurrently with us. */
205 if (IS_ENABLED(CONFIG_PROVE_LOCKING))
206 WARN_ON_ONCE(!irqs_disabled());
209 * Verify that CR3 is what we think it is. This will catch
210 * hypothetical buggy code that directly switches to swapper_pg_dir
211 * without going through leave_mm() / switch_mm_irqs_off() or that
212 * does something like write_cr3(read_cr3_pa()).
214 * Only do this check if CONFIG_DEBUG_VM=y because __read_cr3()
217 #ifdef CONFIG_DEBUG_VM
218 if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev->pgd, prev_asid))) {
220 * If we were to BUG here, we'd be very likely to kill
221 * the system so hard that we don't see the call trace.
222 * Try to recover instead by ignoring the error and doing
223 * a global flush to minimize the chance of corruption.
225 * (This is far from being a fully correct recovery.
226 * Architecturally, the CPU could prefetch something
227 * back into an incorrect ASID slot and leave it there
228 * to cause trouble down the road. It's better than
234 this_cpu_write(cpu_tlbstate.is_lazy, false);
237 * The membarrier system call requires a full memory barrier and
238 * core serialization before returning to user-space, after
239 * storing to rq->curr. Writing to CR3 provides that full
240 * memory barrier and core serializing instruction.
242 if (real_prev == next) {
243 VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
244 next->context.ctx_id);
247 * Even in lazy TLB mode, the CPU should stay set in the
248 * mm_cpumask. The TLB shootdown code can figure out from
249 * from cpu_tlbstate.is_lazy whether or not to send an IPI.
251 if (WARN_ON_ONCE(real_prev != &init_mm &&
252 !cpumask_test_cpu(cpu, mm_cpumask(next))))
253 cpumask_set_cpu(cpu, mm_cpumask(next));
256 * If the CPU is not in lazy TLB mode, we are just switching
257 * from one thread in a process to another thread in the same
258 * process. No TLB flush required.
264 * Read the tlb_gen to check whether a flush is needed.
265 * If the TLB is up to date, just use it.
266 * The barrier synchronizes with the tlb_gen increment in
267 * the TLB shootdown code.
270 next_tlb_gen = atomic64_read(&next->context.tlb_gen);
271 if (this_cpu_read(cpu_tlbstate.ctxs[prev_asid].tlb_gen) ==
276 * TLB contents went out of date while we were in lazy
277 * mode. Fall through to the TLB switching code below.
279 new_asid = prev_asid;
282 u64 last_ctx_id = this_cpu_read(cpu_tlbstate.last_ctx_id);
285 * Avoid user/user BTB poisoning by flushing the branch
286 * predictor when switching between processes. This stops
287 * one process from doing Spectre-v2 attacks on another.
289 * As an optimization, flush indirect branches only when
290 * switching into processes that disable dumping. This
291 * protects high value processes like gpg, without having
292 * too high performance overhead. IBPB is *expensive*!
294 * This will not flush branches when switching into kernel
295 * threads. It will also not flush if we switch to idle
296 * thread and back to the same process. It will flush if we
297 * switch to a different non-dumpable process.
299 if (tsk && tsk->mm &&
300 tsk->mm->context.ctx_id != last_ctx_id &&
301 get_dumpable(tsk->mm) != SUID_DUMP_USER)
302 indirect_branch_prediction_barrier();
304 if (IS_ENABLED(CONFIG_VMAP_STACK)) {
306 * If our current stack is in vmalloc space and isn't
307 * mapped in the new pgd, we'll double-fault. Forcibly
310 sync_current_stack_to_mm(next);
313 /* Stop remote flushes for the previous mm */
314 VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(real_prev)) &&
315 real_prev != &init_mm);
316 cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
319 * Start remote flushes and then read tlb_gen.
321 cpumask_set_cpu(cpu, mm_cpumask(next));
322 next_tlb_gen = atomic64_read(&next->context.tlb_gen);
324 choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
328 this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
329 this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
330 load_new_mm_cr3(next->pgd, new_asid, true);
333 * NB: This gets called via leave_mm() in the idle path
334 * where RCU functions differently. Tracing normally
335 * uses RCU, so we need to use the _rcuidle variant.
337 * (There is no good reason for this. The idle code should
338 * be rearranged to call this before rcu_idle_enter().)
340 trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
342 /* The new ASID is already up to date. */
343 load_new_mm_cr3(next->pgd, new_asid, false);
345 /* See above wrt _rcuidle. */
346 trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
350 * Record last user mm's context id, so we can avoid
351 * flushing branch buffer with IBPB if we switch back
354 if (next != &init_mm)
355 this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
357 this_cpu_write(cpu_tlbstate.loaded_mm, next);
358 this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
361 switch_ldt(real_prev, next);
365 * Please ignore the name of this function. It should be called
366 * switch_to_kernel_thread().
368 * enter_lazy_tlb() is a hint from the scheduler that we are entering a
369 * kernel thread or other context without an mm. Acceptable implementations
370 * include doing nothing whatsoever, switching to init_mm, or various clever
371 * lazy tricks to try to minimize TLB flushes.
373 * The scheduler reserves the right to call enter_lazy_tlb() several times
374 * in a row. It will notify us that we're going back to a real mm by
375 * calling switch_mm_irqs_off().
377 void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
379 if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm)
382 if (tlb_defer_switch_to_init_mm()) {
384 * There's a significant optimization that may be possible
385 * here. We have accurate enough TLB flush tracking that we
386 * don't need to maintain coherence of TLB per se when we're
387 * lazy. We do, however, need to maintain coherence of
388 * paging-structure caches. We could, in principle, leave our
389 * old mm loaded and only switch to init_mm when
390 * tlb_remove_page() happens.
392 this_cpu_write(cpu_tlbstate.is_lazy, true);
394 switch_mm(NULL, &init_mm, NULL);
399 * Call this when reinitializing a CPU. It fixes the following potential
402 * - The ASID changed from what cpu_tlbstate thinks it is (most likely
403 * because the CPU was taken down and came back up with CR3's PCID
404 * bits clear. CPU hotplug can do this.
406 * - The TLB contains junk in slots corresponding to inactive ASIDs.
408 * - The CPU went so far out to lunch that it may have missed a TLB
411 void initialize_tlbstate_and_flush(void)
414 struct mm_struct *mm = this_cpu_read(cpu_tlbstate.loaded_mm);
415 u64 tlb_gen = atomic64_read(&init_mm.context.tlb_gen);
416 unsigned long cr3 = __read_cr3();
418 /* Assert that CR3 already references the right mm. */
419 WARN_ON((cr3 & CR3_ADDR_MASK) != __pa(mm->pgd));
422 * Assert that CR4.PCIDE is set if needed. (CR4.PCIDE initialization
423 * doesn't work like other CR4 bits because it can only be set from
426 WARN_ON(boot_cpu_has(X86_FEATURE_PCID) &&
427 !(cr4_read_shadow() & X86_CR4_PCIDE));
429 /* Force ASID 0 and force a TLB flush. */
430 write_cr3(build_cr3(mm->pgd, 0));
432 /* Reinitialize tlbstate. */
433 this_cpu_write(cpu_tlbstate.last_ctx_id, mm->context.ctx_id);
434 this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0);
435 this_cpu_write(cpu_tlbstate.next_asid, 1);
436 this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id);
437 this_cpu_write(cpu_tlbstate.ctxs[0].tlb_gen, tlb_gen);
439 for (i = 1; i < TLB_NR_DYN_ASIDS; i++)
440 this_cpu_write(cpu_tlbstate.ctxs[i].ctx_id, 0);
444 * flush_tlb_func_common()'s memory ordering requirement is that any
445 * TLB fills that happen after we flush the TLB are ordered after we
446 * read active_mm's tlb_gen. We don't need any explicit barriers
447 * because all x86 flush operations are serializing and the
448 * atomic64_read operation won't be reordered by the compiler.
450 static void flush_tlb_func_common(const struct flush_tlb_info *f,
451 bool local, enum tlb_flush_reason reason)
454 * We have three different tlb_gen values in here. They are:
456 * - mm_tlb_gen: the latest generation.
457 * - local_tlb_gen: the generation that this CPU has already caught
459 * - f->new_tlb_gen: the generation that the requester of the flush
460 * wants us to catch up to.
462 struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
463 u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
464 u64 mm_tlb_gen = atomic64_read(&loaded_mm->context.tlb_gen);
465 u64 local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen);
467 /* This code cannot presently handle being reentered. */
468 VM_WARN_ON(!irqs_disabled());
470 if (unlikely(loaded_mm == &init_mm))
473 VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) !=
474 loaded_mm->context.ctx_id);
476 if (this_cpu_read(cpu_tlbstate.is_lazy)) {
478 * We're in lazy mode. We need to at least flush our
479 * paging-structure cache to avoid speculatively reading
480 * garbage into our TLB. Since switching to init_mm is barely
481 * slower than a minimal flush, just switch to init_mm.
483 * This should be rare, with native_flush_tlb_others skipping
484 * IPIs to lazy TLB mode CPUs.
486 switch_mm_irqs_off(NULL, &init_mm, NULL);
490 if (unlikely(local_tlb_gen == mm_tlb_gen)) {
492 * There's nothing to do: we're already up to date. This can
493 * happen if two concurrent flushes happen -- the first flush to
494 * be handled can catch us all the way up, leaving no work for
497 trace_tlb_flush(reason, 0);
501 WARN_ON_ONCE(local_tlb_gen > mm_tlb_gen);
502 WARN_ON_ONCE(f->new_tlb_gen > mm_tlb_gen);
505 * If we get to this point, we know that our TLB is out of date.
506 * This does not strictly imply that we need to flush (it's
507 * possible that f->new_tlb_gen <= local_tlb_gen), but we're
508 * going to need to flush in the very near future, so we might
509 * as well get it over with.
511 * The only question is whether to do a full or partial flush.
513 * We do a partial flush if requested and two extra conditions
516 * 1. f->new_tlb_gen == local_tlb_gen + 1. We have an invariant that
517 * we've always done all needed flushes to catch up to
518 * local_tlb_gen. If, for example, local_tlb_gen == 2 and
519 * f->new_tlb_gen == 3, then we know that the flush needed to bring
520 * us up to date for tlb_gen 3 is the partial flush we're
523 * As an example of why this check is needed, suppose that there
524 * are two concurrent flushes. The first is a full flush that
525 * changes context.tlb_gen from 1 to 2. The second is a partial
526 * flush that changes context.tlb_gen from 2 to 3. If they get
527 * processed on this CPU in reverse order, we'll see
528 * local_tlb_gen == 1, mm_tlb_gen == 3, and end != TLB_FLUSH_ALL.
529 * If we were to use __flush_tlb_one_user() and set local_tlb_gen to
530 * 3, we'd be break the invariant: we'd update local_tlb_gen above
531 * 1 without the full flush that's needed for tlb_gen 2.
533 * 2. f->new_tlb_gen == mm_tlb_gen. This is purely an optimiation.
534 * Partial TLB flushes are not all that much cheaper than full TLB
535 * flushes, so it seems unlikely that it would be a performance win
536 * to do a partial flush if that won't bring our TLB fully up to
537 * date. By doing a full flush instead, we can increase
538 * local_tlb_gen all the way to mm_tlb_gen and we can probably
539 * avoid another flush in the very near future.
541 if (f->end != TLB_FLUSH_ALL &&
542 f->new_tlb_gen == local_tlb_gen + 1 &&
543 f->new_tlb_gen == mm_tlb_gen) {
546 unsigned long nr_pages = (f->end - f->start) >> PAGE_SHIFT;
549 while (addr < f->end) {
550 __flush_tlb_one_user(addr);
554 count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, nr_pages);
555 trace_tlb_flush(reason, nr_pages);
560 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
561 trace_tlb_flush(reason, TLB_FLUSH_ALL);
564 /* Both paths above update our state to mm_tlb_gen. */
565 this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen);
568 static void flush_tlb_func_local(void *info, enum tlb_flush_reason reason)
570 const struct flush_tlb_info *f = info;
572 flush_tlb_func_common(f, true, reason);
575 static void flush_tlb_func_remote(void *info)
577 const struct flush_tlb_info *f = info;
579 inc_irq_stat(irq_tlb_count);
581 if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.loaded_mm))
584 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
585 flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN);
588 void native_flush_tlb_others(const struct cpumask *cpumask,
589 const struct flush_tlb_info *info)
591 cpumask_var_t lazymask;
594 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
595 if (info->end == TLB_FLUSH_ALL)
596 trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
598 trace_tlb_flush(TLB_REMOTE_SEND_IPI,
599 (info->end - info->start) >> PAGE_SHIFT);
601 if (is_uv_system()) {
603 * This whole special case is confused. UV has a "Broadcast
604 * Assist Unit", which seems to be a fancy way to send IPIs.
605 * Back when x86 used an explicit TLB flush IPI, UV was
606 * optimized to use its own mechanism. These days, x86 uses
607 * smp_call_function_many(), but UV still uses a manual IPI,
608 * and that IPI's action is out of date -- it does a manual
609 * flush instead of calling flush_tlb_func_remote(). This
610 * means that the percpu tlb_gen variables won't be updated
611 * and we'll do pointless flushes on future context switches.
613 * Rather than hooking native_flush_tlb_others() here, I think
614 * that UV should be updated so that smp_call_function_many(),
615 * etc, are optimal on UV.
617 cpu = smp_processor_id();
618 cpumask = uv_flush_tlb_others(cpumask, info);
620 smp_call_function_many(cpumask, flush_tlb_func_remote,
626 * A temporary cpumask is used in order to skip sending IPIs
627 * to CPUs in lazy TLB state, while keeping them in mm_cpumask(mm).
628 * If the allocation fails, simply IPI every CPU in mm_cpumask.
630 if (!alloc_cpumask_var(&lazymask, GFP_ATOMIC)) {
631 smp_call_function_many(cpumask, flush_tlb_func_remote,
636 cpumask_copy(lazymask, cpumask);
638 for_each_cpu(cpu, lazymask) {
639 if (per_cpu(cpu_tlbstate.is_lazy, cpu))
640 cpumask_clear_cpu(cpu, lazymask);
643 smp_call_function_many(lazymask, flush_tlb_func_remote,
646 free_cpumask_var(lazymask);
650 * See Documentation/x86/tlb.txt for details. We choose 33
651 * because it is large enough to cover the vast majority (at
652 * least 95%) of allocations, and is small enough that we are
653 * confident it will not cause too much overhead. Each single
654 * flush is about 100 ns, so this caps the maximum overhead at
657 * This is in units of pages.
659 static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
661 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
662 unsigned long end, unsigned long vmflag)
666 struct flush_tlb_info info __aligned(SMP_CACHE_BYTES) = {
672 /* This is also a barrier that synchronizes with switch_mm(). */
673 info.new_tlb_gen = inc_mm_tlb_gen(mm);
675 /* Should we flush just the requested range? */
676 if ((end != TLB_FLUSH_ALL) &&
677 !(vmflag & VM_HUGETLB) &&
678 ((end - start) >> PAGE_SHIFT) <= tlb_single_page_flush_ceiling) {
683 info.end = TLB_FLUSH_ALL;
686 if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {
687 VM_WARN_ON(irqs_disabled());
689 flush_tlb_func_local(&info, TLB_LOCAL_MM_SHOOTDOWN);
693 if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids)
694 flush_tlb_others(mm_cpumask(mm), &info);
699 void tlb_flush_remove_tables_local(void *arg)
701 struct mm_struct *mm = arg;
703 if (this_cpu_read(cpu_tlbstate.loaded_mm) == mm &&
704 this_cpu_read(cpu_tlbstate.is_lazy)) {
706 * We're in lazy mode. We need to at least flush our
707 * paging-structure cache to avoid speculatively reading
708 * garbage into our TLB. Since switching to init_mm is barely
709 * slower than a minimal flush, just switch to init_mm.
711 switch_mm_irqs_off(NULL, &init_mm, NULL);
715 void tlb_flush_remove_tables(struct mm_struct *mm)
719 * XXX: this really only needs to be called for CPUs in lazy TLB mode.
721 if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids)
722 smp_call_function_many(mm_cpumask(mm), tlb_flush_remove_tables_local, (void *)mm, 1);
727 static void do_flush_tlb_all(void *info)
729 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
733 void flush_tlb_all(void)
735 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
736 on_each_cpu(do_flush_tlb_all, NULL, 1);
739 static void do_kernel_range_flush(void *info)
741 struct flush_tlb_info *f = info;
744 /* flush range by one by one 'invlpg' */
745 for (addr = f->start; addr < f->end; addr += PAGE_SIZE)
746 __flush_tlb_one_kernel(addr);
749 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
752 /* Balance as user space task's flush, a bit conservative */
753 if (end == TLB_FLUSH_ALL ||
754 (end - start) > tlb_single_page_flush_ceiling << PAGE_SHIFT) {
755 on_each_cpu(do_flush_tlb_all, NULL, 1);
757 struct flush_tlb_info info;
760 on_each_cpu(do_kernel_range_flush, &info, 1);
764 void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
766 struct flush_tlb_info info = {
769 .end = TLB_FLUSH_ALL,
774 if (cpumask_test_cpu(cpu, &batch->cpumask)) {
775 VM_WARN_ON(irqs_disabled());
777 flush_tlb_func_local(&info, TLB_LOCAL_SHOOTDOWN);
781 if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids)
782 flush_tlb_others(&batch->cpumask, &info);
784 cpumask_clear(&batch->cpumask);
789 static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
790 size_t count, loff_t *ppos)
795 len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling);
796 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
799 static ssize_t tlbflush_write_file(struct file *file,
800 const char __user *user_buf, size_t count, loff_t *ppos)
806 len = min(count, sizeof(buf) - 1);
807 if (copy_from_user(buf, user_buf, len))
811 if (kstrtoint(buf, 0, &ceiling))
817 tlb_single_page_flush_ceiling = ceiling;
821 static const struct file_operations fops_tlbflush = {
822 .read = tlbflush_read_file,
823 .write = tlbflush_write_file,
824 .llseek = default_llseek,
827 static int __init create_tlb_single_page_flush_ceiling(void)
829 debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR,
830 arch_debugfs_dir, NULL, &fops_tlbflush);
833 late_initcall(create_tlb_single_page_flush_ceiling);