1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
4 #include <linux/errno.h>
5 #include <linux/kernel.h>
8 #include <linux/prctl.h>
9 #include <linux/slab.h>
10 #include <linux/sched.h>
11 #include <linux/sched/idle.h>
12 #include <linux/sched/debug.h>
13 #include <linux/sched/task.h>
14 #include <linux/sched/task_stack.h>
15 #include <linux/init.h>
16 #include <linux/export.h>
18 #include <linux/tick.h>
19 #include <linux/random.h>
20 #include <linux/user-return-notifier.h>
21 #include <linux/dmi.h>
22 #include <linux/utsname.h>
23 #include <linux/stackprotector.h>
24 #include <linux/cpuidle.h>
25 #include <linux/acpi.h>
26 #include <linux/elf-randomize.h>
27 #include <trace/events/power.h>
28 #include <linux/hw_breakpoint.h>
31 #include <linux/uaccess.h>
32 #include <asm/mwait.h>
33 #include <asm/fpu/internal.h>
34 #include <asm/debugreg.h>
36 #include <asm/tlbflush.h>
39 #include <asm/switch_to.h>
41 #include <asm/prctl.h>
42 #include <asm/spec-ctrl.h>
43 #include <asm/io_bitmap.h>
44 #include <asm/proto.h>
49 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
50 * no more per-task TSS's. The TSS size is kept cacheline-aligned
51 * so they are allowed to end up in the .data..cacheline_aligned
52 * section. Since TSS's are completely CPU-local, we want them
53 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
55 __visible DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw) = {
58 * .sp0 is only used when entering ring 0 from a lower
59 * privilege level. Since the init task never runs anything
60 * but ring 0 code, there is no need for a valid value here.
63 .sp0 = (1UL << (BITS_PER_LONG-1)) + 1,
66 * .sp1 is cpu_current_top_of_stack. The init task never
67 * runs user code, but cpu_current_top_of_stack should still
68 * be well defined before the first context switch.
70 .sp1 = TOP_OF_INIT_STACK,
76 .io_bitmap_base = IO_BITMAP_OFFSET_INVALID,
79 EXPORT_PER_CPU_SYMBOL(cpu_tss_rw);
81 DEFINE_PER_CPU(bool, __tss_limit_invalid);
82 EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid);
85 * this gets called so that we can store lazy state into memory and copy the
86 * current task into the new thread.
88 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
90 memcpy(dst, src, arch_task_struct_size);
92 dst->thread.vm86 = NULL;
95 return fpu__copy(dst, src);
99 * Free thread data structures etc..
101 void exit_thread(struct task_struct *tsk)
103 struct thread_struct *t = &tsk->thread;
104 struct fpu *fpu = &t->fpu;
106 if (test_thread_flag(TIF_IO_BITMAP))
114 static int set_new_tls(struct task_struct *p, unsigned long tls)
116 struct user_desc __user *utls = (struct user_desc __user *)tls;
118 if (in_ia32_syscall())
119 return do_set_thread_area(p, -1, utls, 0);
121 return do_set_thread_area_64(p, ARCH_SET_FS, tls);
124 int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
125 unsigned long arg, struct task_struct *p, unsigned long tls)
127 struct inactive_task_frame *frame;
128 struct fork_frame *fork_frame;
129 struct pt_regs *childregs;
132 childregs = task_pt_regs(p);
133 fork_frame = container_of(childregs, struct fork_frame, regs);
134 frame = &fork_frame->frame;
137 frame->ret_addr = (unsigned long) ret_from_fork;
138 p->thread.sp = (unsigned long) fork_frame;
139 p->thread.io_bitmap = NULL;
140 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
143 savesegment(gs, p->thread.gsindex);
144 p->thread.gsbase = p->thread.gsindex ? 0 : current->thread.gsbase;
145 savesegment(fs, p->thread.fsindex);
146 p->thread.fsbase = p->thread.fsindex ? 0 : current->thread.fsbase;
147 savesegment(es, p->thread.es);
148 savesegment(ds, p->thread.ds);
150 p->thread.sp0 = (unsigned long) (childregs + 1);
152 * Clear all status flags including IF and set fixed bit. 64bit
153 * does not have this initialization as the frame does not contain
154 * flags. The flags consistency (especially vs. AC) is there
155 * ensured via objtool, which lacks 32bit support.
157 frame->flags = X86_EFLAGS_FIXED;
160 /* Kernel thread ? */
161 if (unlikely(p->flags & PF_KTHREAD)) {
162 memset(childregs, 0, sizeof(struct pt_regs));
163 kthread_frame_init(frame, sp, arg);
168 *childregs = *current_pt_regs();
174 task_user_gs(p) = get_user_gs(current_pt_regs());
177 /* Set a new TLS for the child thread? */
178 if (clone_flags & CLONE_SETTLS)
179 ret = set_new_tls(p, tls);
181 if (!ret && unlikely(test_tsk_thread_flag(current, TIF_IO_BITMAP)))
187 void flush_thread(void)
189 struct task_struct *tsk = current;
191 flush_ptrace_hw_breakpoint(tsk);
192 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
194 fpu__clear_all(&tsk->thread.fpu);
197 void disable_TSC(void)
200 if (!test_and_set_thread_flag(TIF_NOTSC))
202 * Must flip the CPU state synchronously with
203 * TIF_NOTSC in the current running context.
205 cr4_set_bits(X86_CR4_TSD);
209 static void enable_TSC(void)
212 if (test_and_clear_thread_flag(TIF_NOTSC))
214 * Must flip the CPU state synchronously with
215 * TIF_NOTSC in the current running context.
217 cr4_clear_bits(X86_CR4_TSD);
221 int get_tsc_mode(unsigned long adr)
225 if (test_thread_flag(TIF_NOTSC))
226 val = PR_TSC_SIGSEGV;
230 return put_user(val, (unsigned int __user *)adr);
233 int set_tsc_mode(unsigned int val)
235 if (val == PR_TSC_SIGSEGV)
237 else if (val == PR_TSC_ENABLE)
245 DEFINE_PER_CPU(u64, msr_misc_features_shadow);
247 static void set_cpuid_faulting(bool on)
251 msrval = this_cpu_read(msr_misc_features_shadow);
252 msrval &= ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
253 msrval |= (on << MSR_MISC_FEATURES_ENABLES_CPUID_FAULT_BIT);
254 this_cpu_write(msr_misc_features_shadow, msrval);
255 wrmsrl(MSR_MISC_FEATURES_ENABLES, msrval);
258 static void disable_cpuid(void)
261 if (!test_and_set_thread_flag(TIF_NOCPUID)) {
263 * Must flip the CPU state synchronously with
264 * TIF_NOCPUID in the current running context.
266 set_cpuid_faulting(true);
271 static void enable_cpuid(void)
274 if (test_and_clear_thread_flag(TIF_NOCPUID)) {
276 * Must flip the CPU state synchronously with
277 * TIF_NOCPUID in the current running context.
279 set_cpuid_faulting(false);
284 static int get_cpuid_mode(void)
286 return !test_thread_flag(TIF_NOCPUID);
289 static int set_cpuid_mode(struct task_struct *task, unsigned long cpuid_enabled)
291 if (!boot_cpu_has(X86_FEATURE_CPUID_FAULT))
303 * Called immediately after a successful exec.
305 void arch_setup_new_exec(void)
307 /* If cpuid was previously disabled for this task, re-enable it. */
308 if (test_thread_flag(TIF_NOCPUID))
312 * Don't inherit TIF_SSBD across exec boundary when
313 * PR_SPEC_DISABLE_NOEXEC is used.
315 if (test_thread_flag(TIF_SSBD) &&
316 task_spec_ssb_noexec(current)) {
317 clear_thread_flag(TIF_SSBD);
318 task_clear_spec_ssb_disable(current);
319 task_clear_spec_ssb_noexec(current);
320 speculation_ctrl_update(task_thread_info(current)->flags);
324 #ifdef CONFIG_X86_IOPL_IOPERM
325 static inline void switch_to_bitmap(unsigned long tifp)
328 * Invalidate I/O bitmap if the previous task used it. This prevents
329 * any possible leakage of an active I/O bitmap.
331 * If the next task has an I/O bitmap it will handle it on exit to
334 if (tifp & _TIF_IO_BITMAP)
335 tss_invalidate_io_bitmap();
338 static void tss_copy_io_bitmap(struct tss_struct *tss, struct io_bitmap *iobm)
341 * Copy at least the byte range of the incoming tasks bitmap which
342 * covers the permitted I/O ports.
344 * If the previous task which used an I/O bitmap had more bits
345 * permitted, then the copy needs to cover those as well so they
348 memcpy(tss->io_bitmap.bitmap, iobm->bitmap,
349 max(tss->io_bitmap.prev_max, iobm->max));
352 * Store the new max and the sequence number of this bitmap
353 * and a pointer to the bitmap itself.
355 tss->io_bitmap.prev_max = iobm->max;
356 tss->io_bitmap.prev_sequence = iobm->sequence;
360 * tss_update_io_bitmap - Update I/O bitmap before exiting to usermode
362 void native_tss_update_io_bitmap(void)
364 struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
365 struct thread_struct *t = ¤t->thread;
366 u16 *base = &tss->x86_tss.io_bitmap_base;
368 if (!test_thread_flag(TIF_IO_BITMAP)) {
369 native_tss_invalidate_io_bitmap();
373 if (IS_ENABLED(CONFIG_X86_IOPL_IOPERM) && t->iopl_emul == 3) {
374 *base = IO_BITMAP_OFFSET_VALID_ALL;
376 struct io_bitmap *iobm = t->io_bitmap;
379 * Only copy bitmap data when the sequence number differs. The
380 * update time is accounted to the incoming task.
382 if (tss->io_bitmap.prev_sequence != iobm->sequence)
383 tss_copy_io_bitmap(tss, iobm);
385 /* Enable the bitmap */
386 *base = IO_BITMAP_OFFSET_VALID_MAP;
390 * Make sure that the TSS limit is covering the IO bitmap. It might have
391 * been cut down by a VMEXIT to 0x67 which would cause a subsequent I/O
392 * access from user space to trigger a #GP because tbe bitmap is outside
397 #else /* CONFIG_X86_IOPL_IOPERM */
398 static inline void switch_to_bitmap(unsigned long tifp) { }
404 struct ssb_state *shared_state;
406 unsigned int disable_state;
407 unsigned long local_state;
412 static DEFINE_PER_CPU(struct ssb_state, ssb_state);
414 void speculative_store_bypass_ht_init(void)
416 struct ssb_state *st = this_cpu_ptr(&ssb_state);
417 unsigned int this_cpu = smp_processor_id();
423 * Shared state setup happens once on the first bringup
424 * of the CPU. It's not destroyed on CPU hotunplug.
426 if (st->shared_state)
429 raw_spin_lock_init(&st->lock);
432 * Go over HT siblings and check whether one of them has set up the
433 * shared state pointer already.
435 for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) {
439 if (!per_cpu(ssb_state, cpu).shared_state)
442 /* Link it to the state of the sibling: */
443 st->shared_state = per_cpu(ssb_state, cpu).shared_state;
448 * First HT sibling to come up on the core. Link shared state of
449 * the first HT sibling to itself. The siblings on the same core
450 * which come up later will see the shared state pointer and link
451 * themself to the state of this CPU.
453 st->shared_state = st;
457 * Logic is: First HT sibling enables SSBD for both siblings in the core
458 * and last sibling to disable it, disables it for the whole core. This how
459 * MSR_SPEC_CTRL works in "hardware":
461 * CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL
463 static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
465 struct ssb_state *st = this_cpu_ptr(&ssb_state);
466 u64 msr = x86_amd_ls_cfg_base;
468 if (!static_cpu_has(X86_FEATURE_ZEN)) {
469 msr |= ssbd_tif_to_amd_ls_cfg(tifn);
470 wrmsrl(MSR_AMD64_LS_CFG, msr);
474 if (tifn & _TIF_SSBD) {
476 * Since this can race with prctl(), block reentry on the
479 if (__test_and_set_bit(LSTATE_SSB, &st->local_state))
482 msr |= x86_amd_ls_cfg_ssbd_mask;
484 raw_spin_lock(&st->shared_state->lock);
485 /* First sibling enables SSBD: */
486 if (!st->shared_state->disable_state)
487 wrmsrl(MSR_AMD64_LS_CFG, msr);
488 st->shared_state->disable_state++;
489 raw_spin_unlock(&st->shared_state->lock);
491 if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state))
494 raw_spin_lock(&st->shared_state->lock);
495 st->shared_state->disable_state--;
496 if (!st->shared_state->disable_state)
497 wrmsrl(MSR_AMD64_LS_CFG, msr);
498 raw_spin_unlock(&st->shared_state->lock);
502 static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
504 u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
506 wrmsrl(MSR_AMD64_LS_CFG, msr);
510 static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
513 * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL,
514 * so ssbd_tif_to_spec_ctrl() just works.
516 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
520 * Update the MSRs managing speculation control, during context switch.
522 * tifp: Previous task's thread flags
523 * tifn: Next task's thread flags
525 static __always_inline void __speculation_ctrl_update(unsigned long tifp,
528 unsigned long tif_diff = tifp ^ tifn;
529 u64 msr = x86_spec_ctrl_base;
532 lockdep_assert_irqs_disabled();
534 /* Handle change of TIF_SSBD depending on the mitigation method. */
535 if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
536 if (tif_diff & _TIF_SSBD)
537 amd_set_ssb_virt_state(tifn);
538 } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
539 if (tif_diff & _TIF_SSBD)
540 amd_set_core_ssb_state(tifn);
541 } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
542 static_cpu_has(X86_FEATURE_AMD_SSBD)) {
543 updmsr |= !!(tif_diff & _TIF_SSBD);
544 msr |= ssbd_tif_to_spec_ctrl(tifn);
547 /* Only evaluate TIF_SPEC_IB if conditional STIBP is enabled. */
548 if (IS_ENABLED(CONFIG_SMP) &&
549 static_branch_unlikely(&switch_to_cond_stibp)) {
550 updmsr |= !!(tif_diff & _TIF_SPEC_IB);
551 msr |= stibp_tif_to_spec_ctrl(tifn);
555 wrmsrl(MSR_IA32_SPEC_CTRL, msr);
558 static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
560 if (test_and_clear_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE)) {
561 if (task_spec_ssb_disable(tsk))
562 set_tsk_thread_flag(tsk, TIF_SSBD);
564 clear_tsk_thread_flag(tsk, TIF_SSBD);
566 if (task_spec_ib_disable(tsk))
567 set_tsk_thread_flag(tsk, TIF_SPEC_IB);
569 clear_tsk_thread_flag(tsk, TIF_SPEC_IB);
571 /* Return the updated threadinfo flags*/
572 return task_thread_info(tsk)->flags;
575 void speculation_ctrl_update(unsigned long tif)
579 /* Forced update. Make sure all relevant TIF flags are different */
580 local_irq_save(flags);
581 __speculation_ctrl_update(~tif, tif);
582 local_irq_restore(flags);
585 /* Called from seccomp/prctl update */
586 void speculation_ctrl_update_current(void)
589 speculation_ctrl_update(speculation_ctrl_update_tif(current));
593 static inline void cr4_toggle_bits_irqsoff(unsigned long mask)
595 unsigned long newval, cr4 = this_cpu_read(cpu_tlbstate.cr4);
599 this_cpu_write(cpu_tlbstate.cr4, newval);
604 void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p)
606 unsigned long tifp, tifn;
608 tifn = READ_ONCE(task_thread_info(next_p)->flags);
609 tifp = READ_ONCE(task_thread_info(prev_p)->flags);
611 switch_to_bitmap(tifp);
613 propagate_user_return_notify(prev_p, next_p);
615 if ((tifp & _TIF_BLOCKSTEP || tifn & _TIF_BLOCKSTEP) &&
616 arch_has_block_step()) {
617 unsigned long debugctl, msk;
619 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
620 debugctl &= ~DEBUGCTLMSR_BTF;
621 msk = tifn & _TIF_BLOCKSTEP;
622 debugctl |= (msk >> TIF_BLOCKSTEP) << DEBUGCTLMSR_BTF_SHIFT;
623 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
626 if ((tifp ^ tifn) & _TIF_NOTSC)
627 cr4_toggle_bits_irqsoff(X86_CR4_TSD);
629 if ((tifp ^ tifn) & _TIF_NOCPUID)
630 set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
632 if (likely(!((tifp | tifn) & _TIF_SPEC_FORCE_UPDATE))) {
633 __speculation_ctrl_update(tifp, tifn);
635 speculation_ctrl_update_tif(prev_p);
636 tifn = speculation_ctrl_update_tif(next_p);
638 /* Enforce MSR update to ensure consistent state */
639 __speculation_ctrl_update(~tifn, tifn);
642 if ((tifp ^ tifn) & _TIF_SLD)
647 * Idle related variables and functions
649 unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
650 EXPORT_SYMBOL(boot_option_idle_override);
652 static void (*x86_idle)(void);
655 static inline void play_dead(void)
661 void arch_cpu_idle_enter(void)
663 tsc_verify_tsc_adjust(false);
667 void arch_cpu_idle_dead(void)
673 * Called from the generic idle code.
675 void arch_cpu_idle(void)
681 * We use this if we don't have any better idle routine..
683 void __cpuidle default_idle(void)
685 trace_cpu_idle_rcuidle(1, smp_processor_id());
687 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
689 #if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE)
690 EXPORT_SYMBOL(default_idle);
694 bool xen_set_default_idle(void)
696 bool ret = !!x86_idle;
698 x86_idle = default_idle;
704 void stop_this_cpu(void *dummy)
710 set_cpu_online(smp_processor_id(), false);
711 disable_local_APIC();
712 mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
715 * Use wbinvd on processors that support SME. This provides support
716 * for performing a successful kexec when going from SME inactive
717 * to SME active (or vice-versa). The cache must be cleared so that
718 * if there are entries with the same physical address, both with and
719 * without the encryption bit, they don't race each other when flushed
720 * and potentially end up with the wrong entry being committed to
723 if (boot_cpu_has(X86_FEATURE_SME))
727 * Use native_halt() so that memory contents don't change
728 * (stack usage and variables) after possibly issuing the
729 * native_wbinvd() above.
736 * AMD Erratum 400 aware idle routine. We handle it the same way as C3 power
737 * states (local apic timer and TSC stop).
739 static void amd_e400_idle(void)
742 * We cannot use static_cpu_has_bug() here because X86_BUG_AMD_APIC_C1E
743 * gets set after static_cpu_has() places have been converted via
746 if (!boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) {
751 tick_broadcast_enter();
756 * The switch back from broadcast mode needs to be called with
757 * interrupts disabled.
760 tick_broadcast_exit();
765 * Intel Core2 and older machines prefer MWAIT over HALT for C1.
766 * We can't rely on cpuidle installing MWAIT, because it will not load
767 * on systems that support only C1 -- so the boot default must be MWAIT.
769 * Some AMD machines are the opposite, they depend on using HALT.
771 * So for default C1, which is used during boot until cpuidle loads,
772 * use MWAIT-C1 on Intel HW that has it, else use HALT.
774 static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
776 if (c->x86_vendor != X86_VENDOR_INTEL)
779 if (!cpu_has(c, X86_FEATURE_MWAIT) || boot_cpu_has_bug(X86_BUG_MONITOR))
786 * MONITOR/MWAIT with no hints, used for default C1 state. This invokes MWAIT
787 * with interrupts enabled and no flags, which is backwards compatible with the
788 * original MWAIT implementation.
790 static __cpuidle void mwait_idle(void)
792 if (!current_set_polling_and_test()) {
793 trace_cpu_idle_rcuidle(1, smp_processor_id());
794 if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
796 clflush((void *)¤t_thread_info()->flags);
800 __monitor((void *)¤t_thread_info()->flags, 0, 0);
805 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
809 __current_clr_polling();
812 void select_idle_routine(const struct cpuinfo_x86 *c)
815 if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1)
816 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
818 if (x86_idle || boot_option_idle_override == IDLE_POLL)
821 if (boot_cpu_has_bug(X86_BUG_AMD_E400)) {
822 pr_info("using AMD E400 aware idle routine\n");
823 x86_idle = amd_e400_idle;
824 } else if (prefer_mwait_c1_over_halt(c)) {
825 pr_info("using mwait in idle threads\n");
826 x86_idle = mwait_idle;
828 x86_idle = default_idle;
831 void amd_e400_c1e_apic_setup(void)
833 if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) {
834 pr_info("Switch to broadcast mode on CPU%d\n", smp_processor_id());
836 tick_broadcast_force();
841 void __init arch_post_acpi_subsys_init(void)
845 if (!boot_cpu_has_bug(X86_BUG_AMD_E400))
849 * AMD E400 detection needs to happen after ACPI has been enabled. If
850 * the machine is affected K8_INTP_C1E_ACTIVE_MASK bits are set in
851 * MSR_K8_INT_PENDING_MSG.
853 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
854 if (!(lo & K8_INTP_C1E_ACTIVE_MASK))
857 boot_cpu_set_bug(X86_BUG_AMD_APIC_C1E);
859 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
860 mark_tsc_unstable("TSC halt in AMD C1E");
861 pr_info("System has AMD C1E enabled\n");
864 static int __init idle_setup(char *str)
869 if (!strcmp(str, "poll")) {
870 pr_info("using polling idle threads\n");
871 boot_option_idle_override = IDLE_POLL;
872 cpu_idle_poll_ctrl(true);
873 } else if (!strcmp(str, "halt")) {
875 * When the boot option of idle=halt is added, halt is
876 * forced to be used for CPU idle. In such case CPU C2/C3
877 * won't be used again.
878 * To continue to load the CPU idle driver, don't touch
879 * the boot_option_idle_override.
881 x86_idle = default_idle;
882 boot_option_idle_override = IDLE_HALT;
883 } else if (!strcmp(str, "nomwait")) {
885 * If the boot option of "idle=nomwait" is added,
886 * it means that mwait will be disabled for CPU C2/C3
887 * states. In such case it won't touch the variable
888 * of boot_option_idle_override.
890 boot_option_idle_override = IDLE_NOMWAIT;
896 early_param("idle", idle_setup);
898 unsigned long arch_align_stack(unsigned long sp)
900 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
901 sp -= get_random_int() % 8192;
905 unsigned long arch_randomize_brk(struct mm_struct *mm)
907 return randomize_page(mm->brk, 0x02000000);
911 * Called from fs/proc with a reference on @p to find the function
912 * which called into schedule(). This needs to be done carefully
913 * because the task might wake up and we might look at a stack
916 unsigned long get_wchan(struct task_struct *p)
918 unsigned long start, bottom, top, sp, fp, ip, ret = 0;
921 if (p == current || p->state == TASK_RUNNING)
924 if (!try_get_task_stack(p))
927 start = (unsigned long)task_stack_page(p);
932 * Layout of the stack page:
934 * ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long)
936 * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING
938 * ----------- bottom = start
940 * The tasks stack pointer points at the location where the
941 * framepointer is stored. The data on the stack is:
942 * ... IP FP ... IP FP
944 * We need to read FP and IP, so we need to adjust the upper
945 * bound by another unsigned long.
947 top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;
948 top -= 2 * sizeof(unsigned long);
951 sp = READ_ONCE(p->thread.sp);
952 if (sp < bottom || sp > top)
955 fp = READ_ONCE_NOCHECK(((struct inactive_task_frame *)sp)->bp);
957 if (fp < bottom || fp > top)
959 ip = READ_ONCE_NOCHECK(*(unsigned long *)(fp + sizeof(unsigned long)));
960 if (!in_sched_functions(ip)) {
964 fp = READ_ONCE_NOCHECK(*(unsigned long *)fp);
965 } while (count++ < 16 && p->state != TASK_RUNNING);
972 long do_arch_prctl_common(struct task_struct *task, int option,
973 unsigned long cpuid_enabled)
977 return get_cpuid_mode();
979 return set_cpuid_mode(task, cpuid_enabled);