1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
4 #include <linux/errno.h>
5 #include <linux/kernel.h>
8 #include <linux/prctl.h>
9 #include <linux/slab.h>
10 #include <linux/sched.h>
11 #include <linux/sched/idle.h>
12 #include <linux/sched/debug.h>
13 #include <linux/sched/task.h>
14 #include <linux/sched/task_stack.h>
15 #include <linux/init.h>
16 #include <linux/export.h>
18 #include <linux/tick.h>
19 #include <linux/random.h>
20 #include <linux/user-return-notifier.h>
21 #include <linux/dmi.h>
22 #include <linux/utsname.h>
23 #include <linux/stackprotector.h>
24 #include <linux/cpuidle.h>
25 #include <linux/acpi.h>
26 #include <linux/elf-randomize.h>
27 #include <trace/events/power.h>
28 #include <linux/hw_breakpoint.h>
31 #include <linux/uaccess.h>
32 #include <asm/mwait.h>
33 #include <asm/fpu/api.h>
34 #include <asm/fpu/sched.h>
35 #include <asm/fpu/xstate.h>
36 #include <asm/debugreg.h>
38 #include <asm/tlbflush.h>
41 #include <asm/switch_to.h>
43 #include <asm/prctl.h>
44 #include <asm/spec-ctrl.h>
45 #include <asm/io_bitmap.h>
46 #include <asm/proto.h>
47 #include <asm/frame.h>
48 #include <asm/unwind.h>
53 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
54 * no more per-task TSS's. The TSS size is kept cacheline-aligned
55 * so they are allowed to end up in the .data..cacheline_aligned
56 * section. Since TSS's are completely CPU-local, we want them
57 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
59 __visible DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw) = {
62 * .sp0 is only used when entering ring 0 from a lower
63 * privilege level. Since the init task never runs anything
64 * but ring 0 code, there is no need for a valid value here.
67 .sp0 = (1UL << (BITS_PER_LONG-1)) + 1,
70 .sp1 = TOP_OF_INIT_STACK,
75 .io_bitmap_base = IO_BITMAP_OFFSET_INVALID,
78 EXPORT_PER_CPU_SYMBOL(cpu_tss_rw);
80 DEFINE_PER_CPU(bool, __tss_limit_invalid);
81 EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid);
84 * this gets called so that we can store lazy state into memory and copy the
85 * current task into the new thread.
87 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
89 memcpy(dst, src, arch_task_struct_size);
91 dst->thread.vm86 = NULL;
93 /* Drop the copied pointer to current's fpstate */
94 dst->thread.fpu.fpstate = NULL;
100 void arch_release_task_struct(struct task_struct *tsk)
102 if (fpu_state_size_dynamic())
103 fpstate_free(&tsk->thread.fpu);
108 * Free thread data structures etc..
110 void exit_thread(struct task_struct *tsk)
112 struct thread_struct *t = &tsk->thread;
113 struct fpu *fpu = &t->fpu;
115 if (test_thread_flag(TIF_IO_BITMAP))
123 static int set_new_tls(struct task_struct *p, unsigned long tls)
125 struct user_desc __user *utls = (struct user_desc __user *)tls;
127 if (in_ia32_syscall())
128 return do_set_thread_area(p, -1, utls, 0);
130 return do_set_thread_area_64(p, ARCH_SET_FS, tls);
133 int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
135 unsigned long clone_flags = args->flags;
136 unsigned long sp = args->stack;
137 unsigned long arg = args->stack_size;
138 unsigned long tls = args->tls;
139 struct inactive_task_frame *frame;
140 struct fork_frame *fork_frame;
141 struct pt_regs *childregs;
144 childregs = task_pt_regs(p);
145 fork_frame = container_of(childregs, struct fork_frame, regs);
146 frame = &fork_frame->frame;
148 frame->bp = encode_frame_pointer(childregs);
149 frame->ret_addr = (unsigned long) ret_from_fork;
150 p->thread.sp = (unsigned long) fork_frame;
151 p->thread.io_bitmap = NULL;
152 p->thread.iopl_warn = 0;
153 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
157 p->thread.fsindex = current->thread.fsindex;
158 p->thread.fsbase = current->thread.fsbase;
159 p->thread.gsindex = current->thread.gsindex;
160 p->thread.gsbase = current->thread.gsbase;
162 savesegment(es, p->thread.es);
163 savesegment(ds, p->thread.ds);
165 p->thread.sp0 = (unsigned long) (childregs + 1);
167 * Clear all status flags including IF and set fixed bit. 64bit
168 * does not have this initialization as the frame does not contain
169 * flags. The flags consistency (especially vs. AC) is there
170 * ensured via objtool, which lacks 32bit support.
172 frame->flags = X86_EFLAGS_FIXED;
175 fpu_clone(p, clone_flags);
177 /* Kernel thread ? */
178 if (unlikely(p->flags & PF_KTHREAD)) {
179 p->thread.pkru = pkru_get_init_value();
180 memset(childregs, 0, sizeof(struct pt_regs));
181 kthread_frame_init(frame, sp, arg);
186 * Clone current's PKRU value from hardware. tsk->thread.pkru
187 * is only valid when scheduled out.
189 p->thread.pkru = read_pkru();
192 *childregs = *current_pt_regs();
198 task_user_gs(p) = get_user_gs(current_pt_regs());
201 if (unlikely(p->flags & PF_IO_WORKER)) {
203 * An IO thread is a user space thread, but it doesn't
204 * return to ret_after_fork().
206 * In order to indicate that to tools like gdb,
207 * we reset the stack and instruction pointers.
209 * It does the same kernel frame setup to return to a kernel
210 * function that a kernel thread does.
214 kthread_frame_init(frame, sp, arg);
218 /* Set a new TLS for the child thread? */
219 if (clone_flags & CLONE_SETTLS)
220 ret = set_new_tls(p, tls);
222 if (!ret && unlikely(test_tsk_thread_flag(current, TIF_IO_BITMAP)))
228 static void pkru_flush_thread(void)
231 * If PKRU is enabled the default PKRU value has to be loaded into
232 * the hardware right here (similar to context switch).
234 pkru_write_default();
237 void flush_thread(void)
239 struct task_struct *tsk = current;
241 flush_ptrace_hw_breakpoint(tsk);
242 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
248 void disable_TSC(void)
251 if (!test_and_set_thread_flag(TIF_NOTSC))
253 * Must flip the CPU state synchronously with
254 * TIF_NOTSC in the current running context.
256 cr4_set_bits(X86_CR4_TSD);
260 static void enable_TSC(void)
263 if (test_and_clear_thread_flag(TIF_NOTSC))
265 * Must flip the CPU state synchronously with
266 * TIF_NOTSC in the current running context.
268 cr4_clear_bits(X86_CR4_TSD);
272 int get_tsc_mode(unsigned long adr)
276 if (test_thread_flag(TIF_NOTSC))
277 val = PR_TSC_SIGSEGV;
281 return put_user(val, (unsigned int __user *)adr);
284 int set_tsc_mode(unsigned int val)
286 if (val == PR_TSC_SIGSEGV)
288 else if (val == PR_TSC_ENABLE)
296 DEFINE_PER_CPU(u64, msr_misc_features_shadow);
298 static void set_cpuid_faulting(bool on)
302 msrval = this_cpu_read(msr_misc_features_shadow);
303 msrval &= ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
304 msrval |= (on << MSR_MISC_FEATURES_ENABLES_CPUID_FAULT_BIT);
305 this_cpu_write(msr_misc_features_shadow, msrval);
306 wrmsrl(MSR_MISC_FEATURES_ENABLES, msrval);
309 static void disable_cpuid(void)
312 if (!test_and_set_thread_flag(TIF_NOCPUID)) {
314 * Must flip the CPU state synchronously with
315 * TIF_NOCPUID in the current running context.
317 set_cpuid_faulting(true);
322 static void enable_cpuid(void)
325 if (test_and_clear_thread_flag(TIF_NOCPUID)) {
327 * Must flip the CPU state synchronously with
328 * TIF_NOCPUID in the current running context.
330 set_cpuid_faulting(false);
335 static int get_cpuid_mode(void)
337 return !test_thread_flag(TIF_NOCPUID);
340 static int set_cpuid_mode(struct task_struct *task, unsigned long cpuid_enabled)
342 if (!boot_cpu_has(X86_FEATURE_CPUID_FAULT))
354 * Called immediately after a successful exec.
356 void arch_setup_new_exec(void)
358 /* If cpuid was previously disabled for this task, re-enable it. */
359 if (test_thread_flag(TIF_NOCPUID))
363 * Don't inherit TIF_SSBD across exec boundary when
364 * PR_SPEC_DISABLE_NOEXEC is used.
366 if (test_thread_flag(TIF_SSBD) &&
367 task_spec_ssb_noexec(current)) {
368 clear_thread_flag(TIF_SSBD);
369 task_clear_spec_ssb_disable(current);
370 task_clear_spec_ssb_noexec(current);
371 speculation_ctrl_update(read_thread_flags());
375 #ifdef CONFIG_X86_IOPL_IOPERM
376 static inline void switch_to_bitmap(unsigned long tifp)
379 * Invalidate I/O bitmap if the previous task used it. This prevents
380 * any possible leakage of an active I/O bitmap.
382 * If the next task has an I/O bitmap it will handle it on exit to
385 if (tifp & _TIF_IO_BITMAP)
386 tss_invalidate_io_bitmap();
389 static void tss_copy_io_bitmap(struct tss_struct *tss, struct io_bitmap *iobm)
392 * Copy at least the byte range of the incoming tasks bitmap which
393 * covers the permitted I/O ports.
395 * If the previous task which used an I/O bitmap had more bits
396 * permitted, then the copy needs to cover those as well so they
399 memcpy(tss->io_bitmap.bitmap, iobm->bitmap,
400 max(tss->io_bitmap.prev_max, iobm->max));
403 * Store the new max and the sequence number of this bitmap
404 * and a pointer to the bitmap itself.
406 tss->io_bitmap.prev_max = iobm->max;
407 tss->io_bitmap.prev_sequence = iobm->sequence;
411 * tss_update_io_bitmap - Update I/O bitmap before exiting to usermode
413 void native_tss_update_io_bitmap(void)
415 struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
416 struct thread_struct *t = ¤t->thread;
417 u16 *base = &tss->x86_tss.io_bitmap_base;
419 if (!test_thread_flag(TIF_IO_BITMAP)) {
420 native_tss_invalidate_io_bitmap();
424 if (IS_ENABLED(CONFIG_X86_IOPL_IOPERM) && t->iopl_emul == 3) {
425 *base = IO_BITMAP_OFFSET_VALID_ALL;
427 struct io_bitmap *iobm = t->io_bitmap;
430 * Only copy bitmap data when the sequence number differs. The
431 * update time is accounted to the incoming task.
433 if (tss->io_bitmap.prev_sequence != iobm->sequence)
434 tss_copy_io_bitmap(tss, iobm);
436 /* Enable the bitmap */
437 *base = IO_BITMAP_OFFSET_VALID_MAP;
441 * Make sure that the TSS limit is covering the IO bitmap. It might have
442 * been cut down by a VMEXIT to 0x67 which would cause a subsequent I/O
443 * access from user space to trigger a #GP because tbe bitmap is outside
448 #else /* CONFIG_X86_IOPL_IOPERM */
449 static inline void switch_to_bitmap(unsigned long tifp) { }
455 struct ssb_state *shared_state;
457 unsigned int disable_state;
458 unsigned long local_state;
463 static DEFINE_PER_CPU(struct ssb_state, ssb_state);
465 void speculative_store_bypass_ht_init(void)
467 struct ssb_state *st = this_cpu_ptr(&ssb_state);
468 unsigned int this_cpu = smp_processor_id();
474 * Shared state setup happens once on the first bringup
475 * of the CPU. It's not destroyed on CPU hotunplug.
477 if (st->shared_state)
480 raw_spin_lock_init(&st->lock);
483 * Go over HT siblings and check whether one of them has set up the
484 * shared state pointer already.
486 for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) {
490 if (!per_cpu(ssb_state, cpu).shared_state)
493 /* Link it to the state of the sibling: */
494 st->shared_state = per_cpu(ssb_state, cpu).shared_state;
499 * First HT sibling to come up on the core. Link shared state of
500 * the first HT sibling to itself. The siblings on the same core
501 * which come up later will see the shared state pointer and link
502 * themselves to the state of this CPU.
504 st->shared_state = st;
508 * Logic is: First HT sibling enables SSBD for both siblings in the core
509 * and last sibling to disable it, disables it for the whole core. This how
510 * MSR_SPEC_CTRL works in "hardware":
512 * CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL
514 static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
516 struct ssb_state *st = this_cpu_ptr(&ssb_state);
517 u64 msr = x86_amd_ls_cfg_base;
519 if (!static_cpu_has(X86_FEATURE_ZEN)) {
520 msr |= ssbd_tif_to_amd_ls_cfg(tifn);
521 wrmsrl(MSR_AMD64_LS_CFG, msr);
525 if (tifn & _TIF_SSBD) {
527 * Since this can race with prctl(), block reentry on the
530 if (__test_and_set_bit(LSTATE_SSB, &st->local_state))
533 msr |= x86_amd_ls_cfg_ssbd_mask;
535 raw_spin_lock(&st->shared_state->lock);
536 /* First sibling enables SSBD: */
537 if (!st->shared_state->disable_state)
538 wrmsrl(MSR_AMD64_LS_CFG, msr);
539 st->shared_state->disable_state++;
540 raw_spin_unlock(&st->shared_state->lock);
542 if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state))
545 raw_spin_lock(&st->shared_state->lock);
546 st->shared_state->disable_state--;
547 if (!st->shared_state->disable_state)
548 wrmsrl(MSR_AMD64_LS_CFG, msr);
549 raw_spin_unlock(&st->shared_state->lock);
553 static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
555 u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
557 wrmsrl(MSR_AMD64_LS_CFG, msr);
561 static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
564 * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL,
565 * so ssbd_tif_to_spec_ctrl() just works.
567 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
571 * Update the MSRs managing speculation control, during context switch.
573 * tifp: Previous task's thread flags
574 * tifn: Next task's thread flags
576 static __always_inline void __speculation_ctrl_update(unsigned long tifp,
579 unsigned long tif_diff = tifp ^ tifn;
580 u64 msr = x86_spec_ctrl_base;
583 lockdep_assert_irqs_disabled();
585 /* Handle change of TIF_SSBD depending on the mitigation method. */
586 if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
587 if (tif_diff & _TIF_SSBD)
588 amd_set_ssb_virt_state(tifn);
589 } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
590 if (tif_diff & _TIF_SSBD)
591 amd_set_core_ssb_state(tifn);
592 } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
593 static_cpu_has(X86_FEATURE_AMD_SSBD)) {
594 updmsr |= !!(tif_diff & _TIF_SSBD);
595 msr |= ssbd_tif_to_spec_ctrl(tifn);
598 /* Only evaluate TIF_SPEC_IB if conditional STIBP is enabled. */
599 if (IS_ENABLED(CONFIG_SMP) &&
600 static_branch_unlikely(&switch_to_cond_stibp)) {
601 updmsr |= !!(tif_diff & _TIF_SPEC_IB);
602 msr |= stibp_tif_to_spec_ctrl(tifn);
606 wrmsrl(MSR_IA32_SPEC_CTRL, msr);
609 static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
611 if (test_and_clear_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE)) {
612 if (task_spec_ssb_disable(tsk))
613 set_tsk_thread_flag(tsk, TIF_SSBD);
615 clear_tsk_thread_flag(tsk, TIF_SSBD);
617 if (task_spec_ib_disable(tsk))
618 set_tsk_thread_flag(tsk, TIF_SPEC_IB);
620 clear_tsk_thread_flag(tsk, TIF_SPEC_IB);
622 /* Return the updated threadinfo flags*/
623 return read_task_thread_flags(tsk);
626 void speculation_ctrl_update(unsigned long tif)
630 /* Forced update. Make sure all relevant TIF flags are different */
631 local_irq_save(flags);
632 __speculation_ctrl_update(~tif, tif);
633 local_irq_restore(flags);
636 /* Called from seccomp/prctl update */
637 void speculation_ctrl_update_current(void)
640 speculation_ctrl_update(speculation_ctrl_update_tif(current));
644 static inline void cr4_toggle_bits_irqsoff(unsigned long mask)
646 unsigned long newval, cr4 = this_cpu_read(cpu_tlbstate.cr4);
650 this_cpu_write(cpu_tlbstate.cr4, newval);
655 void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p)
657 unsigned long tifp, tifn;
659 tifn = read_task_thread_flags(next_p);
660 tifp = read_task_thread_flags(prev_p);
662 switch_to_bitmap(tifp);
664 propagate_user_return_notify(prev_p, next_p);
666 if ((tifp & _TIF_BLOCKSTEP || tifn & _TIF_BLOCKSTEP) &&
667 arch_has_block_step()) {
668 unsigned long debugctl, msk;
670 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
671 debugctl &= ~DEBUGCTLMSR_BTF;
672 msk = tifn & _TIF_BLOCKSTEP;
673 debugctl |= (msk >> TIF_BLOCKSTEP) << DEBUGCTLMSR_BTF_SHIFT;
674 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
677 if ((tifp ^ tifn) & _TIF_NOTSC)
678 cr4_toggle_bits_irqsoff(X86_CR4_TSD);
680 if ((tifp ^ tifn) & _TIF_NOCPUID)
681 set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
683 if (likely(!((tifp | tifn) & _TIF_SPEC_FORCE_UPDATE))) {
684 __speculation_ctrl_update(tifp, tifn);
686 speculation_ctrl_update_tif(prev_p);
687 tifn = speculation_ctrl_update_tif(next_p);
689 /* Enforce MSR update to ensure consistent state */
690 __speculation_ctrl_update(~tifn, tifn);
693 if ((tifp ^ tifn) & _TIF_SLD)
698 * Idle related variables and functions
700 unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
701 EXPORT_SYMBOL(boot_option_idle_override);
703 static void (*x86_idle)(void);
706 static inline void play_dead(void)
712 void arch_cpu_idle_enter(void)
714 tsc_verify_tsc_adjust(false);
718 void arch_cpu_idle_dead(void)
724 * Called from the generic idle code.
726 void arch_cpu_idle(void)
732 * We use this if we don't have any better idle routine..
734 void __cpuidle default_idle(void)
738 #if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE)
739 EXPORT_SYMBOL(default_idle);
743 bool xen_set_default_idle(void)
745 bool ret = !!x86_idle;
747 x86_idle = default_idle;
753 void __noreturn stop_this_cpu(void *dummy)
759 set_cpu_online(smp_processor_id(), false);
760 disable_local_APIC();
761 mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
764 * Use wbinvd on processors that support SME. This provides support
765 * for performing a successful kexec when going from SME inactive
766 * to SME active (or vice-versa). The cache must be cleared so that
767 * if there are entries with the same physical address, both with and
768 * without the encryption bit, they don't race each other when flushed
769 * and potentially end up with the wrong entry being committed to
772 * Test the CPUID bit directly because the machine might've cleared
773 * X86_FEATURE_SME due to cmdline options.
775 if (cpuid_eax(0x8000001f) & BIT(0))
779 * Use native_halt() so that memory contents don't change
780 * (stack usage and variables) after possibly issuing the
781 * native_wbinvd() above.
788 * AMD Erratum 400 aware idle routine. We handle it the same way as C3 power
789 * states (local apic timer and TSC stop).
791 * XXX this function is completely buggered vs RCU and tracing.
793 static void amd_e400_idle(void)
796 * We cannot use static_cpu_has_bug() here because X86_BUG_AMD_APIC_C1E
797 * gets set after static_cpu_has() places have been converted via
800 if (!boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) {
805 tick_broadcast_enter();
810 * The switch back from broadcast mode needs to be called with
811 * interrupts disabled.
813 raw_local_irq_disable();
814 tick_broadcast_exit();
815 raw_local_irq_enable();
819 * Intel Core2 and older machines prefer MWAIT over HALT for C1.
820 * We can't rely on cpuidle installing MWAIT, because it will not load
821 * on systems that support only C1 -- so the boot default must be MWAIT.
823 * Some AMD machines are the opposite, they depend on using HALT.
825 * So for default C1, which is used during boot until cpuidle loads,
826 * use MWAIT-C1 on Intel HW that has it, else use HALT.
828 static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
830 if (c->x86_vendor != X86_VENDOR_INTEL)
833 if (!cpu_has(c, X86_FEATURE_MWAIT) || boot_cpu_has_bug(X86_BUG_MONITOR))
840 * MONITOR/MWAIT with no hints, used for default C1 state. This invokes MWAIT
841 * with interrupts enabled and no flags, which is backwards compatible with the
842 * original MWAIT implementation.
844 static __cpuidle void mwait_idle(void)
846 if (!current_set_polling_and_test()) {
847 if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
849 clflush((void *)¤t_thread_info()->flags);
853 __monitor((void *)¤t_thread_info()->flags, 0, 0);
857 raw_local_irq_enable();
859 raw_local_irq_enable();
861 __current_clr_polling();
864 void select_idle_routine(const struct cpuinfo_x86 *c)
867 if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1)
868 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
870 if (x86_idle || boot_option_idle_override == IDLE_POLL)
873 if (boot_cpu_has_bug(X86_BUG_AMD_E400)) {
874 pr_info("using AMD E400 aware idle routine\n");
875 x86_idle = amd_e400_idle;
876 } else if (prefer_mwait_c1_over_halt(c)) {
877 pr_info("using mwait in idle threads\n");
878 x86_idle = mwait_idle;
880 x86_idle = default_idle;
883 void amd_e400_c1e_apic_setup(void)
885 if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) {
886 pr_info("Switch to broadcast mode on CPU%d\n", smp_processor_id());
888 tick_broadcast_force();
893 void __init arch_post_acpi_subsys_init(void)
897 if (!boot_cpu_has_bug(X86_BUG_AMD_E400))
901 * AMD E400 detection needs to happen after ACPI has been enabled. If
902 * the machine is affected K8_INTP_C1E_ACTIVE_MASK bits are set in
903 * MSR_K8_INT_PENDING_MSG.
905 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
906 if (!(lo & K8_INTP_C1E_ACTIVE_MASK))
909 boot_cpu_set_bug(X86_BUG_AMD_APIC_C1E);
911 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
912 mark_tsc_unstable("TSC halt in AMD C1E");
913 pr_info("System has AMD C1E enabled\n");
916 static int __init idle_setup(char *str)
921 if (!strcmp(str, "poll")) {
922 pr_info("using polling idle threads\n");
923 boot_option_idle_override = IDLE_POLL;
924 cpu_idle_poll_ctrl(true);
925 } else if (!strcmp(str, "halt")) {
927 * When the boot option of idle=halt is added, halt is
928 * forced to be used for CPU idle. In such case CPU C2/C3
929 * won't be used again.
930 * To continue to load the CPU idle driver, don't touch
931 * the boot_option_idle_override.
933 x86_idle = default_idle;
934 boot_option_idle_override = IDLE_HALT;
935 } else if (!strcmp(str, "nomwait")) {
937 * If the boot option of "idle=nomwait" is added,
938 * it means that mwait will be disabled for CPU C2/C3
939 * states. In such case it won't touch the variable
940 * of boot_option_idle_override.
942 boot_option_idle_override = IDLE_NOMWAIT;
948 early_param("idle", idle_setup);
950 unsigned long arch_align_stack(unsigned long sp)
952 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
953 sp -= get_random_int() % 8192;
957 unsigned long arch_randomize_brk(struct mm_struct *mm)
959 return randomize_page(mm->brk, 0x02000000);
963 * Called from fs/proc with a reference on @p to find the function
964 * which called into schedule(). This needs to be done carefully
965 * because the task might wake up and we might look at a stack
968 unsigned long __get_wchan(struct task_struct *p)
970 struct unwind_state state;
971 unsigned long addr = 0;
973 if (!try_get_task_stack(p))
976 for (unwind_start(&state, p, NULL, NULL); !unwind_done(&state);
977 unwind_next_frame(&state)) {
978 addr = unwind_get_return_address(&state);
981 if (in_sched_functions(addr))
991 long do_arch_prctl_common(struct task_struct *task, int option,
996 return get_cpuid_mode();
998 return set_cpuid_mode(task, arg2);
999 case ARCH_GET_XCOMP_SUPP:
1000 case ARCH_GET_XCOMP_PERM:
1001 case ARCH_REQ_XCOMP_PERM:
1002 case ARCH_GET_XCOMP_GUEST_PERM:
1003 case ARCH_REQ_XCOMP_GUEST_PERM:
1004 return fpu_xstate_prctl(task, option, arg2);