2 * Copyright (C) 1995 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
10 * CPU hotplug support - ashok.raj@intel.com
14 * This file handles the architecture-dependent parts of process handling..
17 #include <linux/cpu.h>
18 #include <linux/errno.h>
19 #include <linux/sched.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
23 #include <linux/kernel.h>
25 #include <linux/elfcore.h>
26 #include <linux/smp.h>
27 #include <linux/slab.h>
28 #include <linux/user.h>
29 #include <linux/interrupt.h>
30 #include <linux/delay.h>
31 #include <linux/export.h>
32 #include <linux/ptrace.h>
33 #include <linux/notifier.h>
34 #include <linux/kprobes.h>
35 #include <linux/kdebug.h>
36 #include <linux/prctl.h>
37 #include <linux/uaccess.h>
39 #include <linux/ftrace.h>
40 #include <linux/syscalls.h>
42 #include <asm/pgtable.h>
43 #include <asm/processor.h>
44 #include <asm/fpu/internal.h>
45 #include <asm/mmu_context.h>
46 #include <asm/prctl.h>
48 #include <asm/proto.h>
50 #include <asm/syscalls.h>
51 #include <asm/debugreg.h>
52 #include <asm/switch_to.h>
53 #include <asm/xen/hypervisor.h>
55 #include <asm/intel_rdt_sched.h>
56 #include <asm/unistd.h>
57 #ifdef CONFIG_IA32_EMULATION
58 /* Not included via unistd.h */
59 #include <asm/unistd_32_ia32.h>
62 __visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
64 /* Prints also some state that isn't saved in the pt_regs */
65 void __show_regs(struct pt_regs *regs, int all)
67 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
68 unsigned long d0, d1, d2, d3, d6, d7;
69 unsigned int fsindex, gsindex;
70 unsigned int ds, cs, es;
74 if (regs->orig_ax != -1)
75 pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax);
79 printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
80 regs->ax, regs->bx, regs->cx);
81 printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
82 regs->dx, regs->si, regs->di);
83 printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
84 regs->bp, regs->r8, regs->r9);
85 printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
86 regs->r10, regs->r11, regs->r12);
87 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
88 regs->r13, regs->r14, regs->r15);
93 asm("movl %%ds,%0" : "=r" (ds));
94 asm("movl %%cs,%0" : "=r" (cs));
95 asm("movl %%es,%0" : "=r" (es));
96 asm("movl %%fs,%0" : "=r" (fsindex));
97 asm("movl %%gs,%0" : "=r" (gsindex));
99 rdmsrl(MSR_FS_BASE, fs);
100 rdmsrl(MSR_GS_BASE, gs);
101 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
108 printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
109 fs, fsindex, gs, gsindex, shadowgs);
110 printk(KERN_DEFAULT "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
112 printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
122 /* Only print out debug registers if they are in their non-default state. */
123 if (!((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
124 (d6 == DR6_RESERVED) && (d7 == 0x400))) {
125 printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n",
127 printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n",
131 if (boot_cpu_has(X86_FEATURE_OSPKE))
132 printk(KERN_DEFAULT "PKRU: %08x\n", read_pkru());
135 void release_thread(struct task_struct *dead_task)
138 #ifdef CONFIG_MODIFY_LDT_SYSCALL
139 if (dead_task->mm->context.ldt) {
140 pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
142 dead_task->mm->context.ldt->entries,
143 dead_task->mm->context.ldt->nr_entries);
150 enum which_selector {
156 * Saves the FS or GS base for an outgoing thread if FSGSBASE extensions are
157 * not available. The goal is to be reasonably fast on non-FSGSBASE systems.
158 * It's forcibly inlined because it'll generate better code and this function
161 static __always_inline void save_base_legacy(struct task_struct *prev_p,
162 unsigned short selector,
163 enum which_selector which)
165 if (likely(selector == 0)) {
167 * On Intel (without X86_BUG_NULL_SEG), the segment base could
168 * be the pre-existing saved base or it could be zero. On AMD
169 * (with X86_BUG_NULL_SEG), the segment base could be almost
172 * This branch is very hot (it's hit twice on almost every
173 * context switch between 64-bit programs), and avoiding
174 * the RDMSR helps a lot, so we just assume that whatever
175 * value is already saved is correct. This matches historical
176 * Linux behavior, so it won't break existing applications.
178 * To avoid leaking state, on non-X86_BUG_NULL_SEG CPUs, if we
179 * report that the base is zero, it needs to actually be zero:
180 * see the corresponding logic in load_seg_legacy.
184 * If the selector is 1, 2, or 3, then the base is zero on
185 * !X86_BUG_NULL_SEG CPUs and could be anything on
186 * X86_BUG_NULL_SEG CPUs. In the latter case, Linux
187 * has never attempted to preserve the base across context
190 * If selector > 3, then it refers to a real segment, and
191 * saving the base isn't necessary.
194 prev_p->thread.fsbase = 0;
196 prev_p->thread.gsbase = 0;
200 static __always_inline void save_fsgs(struct task_struct *task)
202 savesegment(fs, task->thread.fsindex);
203 savesegment(gs, task->thread.gsindex);
204 save_base_legacy(task, task->thread.fsindex, FS);
205 save_base_legacy(task, task->thread.gsindex, GS);
208 #if IS_ENABLED(CONFIG_KVM)
210 * While a process is running,current->thread.fsbase and current->thread.gsbase
211 * may not match the corresponding CPU registers (see save_base_legacy()). KVM
212 * wants an efficient way to save and restore FSBASE and GSBASE.
213 * When FSGSBASE extensions are enabled, this will have to use RD{FS,GS}BASE.
215 void save_fsgs_for_kvm(void)
219 EXPORT_SYMBOL_GPL(save_fsgs_for_kvm);
222 static __always_inline void loadseg(enum which_selector which,
226 loadsegment(fs, sel);
231 static __always_inline void load_seg_legacy(unsigned short prev_index,
232 unsigned long prev_base,
233 unsigned short next_index,
234 unsigned long next_base,
235 enum which_selector which)
237 if (likely(next_index <= 3)) {
239 * The next task is using 64-bit TLS, is not using this
240 * segment at all, or is having fun with arcane CPU features.
242 if (next_base == 0) {
244 * Nasty case: on AMD CPUs, we need to forcibly zero
247 if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
248 loadseg(which, __USER_DS);
249 loadseg(which, next_index);
252 * We could try to exhaustively detect cases
253 * under which we can skip the segment load,
254 * but there's really only one case that matters
255 * for performance: if both the previous and
256 * next states are fully zeroed, we can skip
259 * (This assumes that prev_base == 0 has no
260 * false positives. This is the case on
263 if (likely(prev_index | next_index | prev_base))
264 loadseg(which, next_index);
267 if (prev_index != next_index)
268 loadseg(which, next_index);
269 wrmsrl(which == FS ? MSR_FS_BASE : MSR_KERNEL_GS_BASE,
274 * The next task is using a real segment. Loading the selector
277 loadseg(which, next_index);
281 int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
282 unsigned long arg, struct task_struct *p, unsigned long tls)
285 struct pt_regs *childregs;
286 struct fork_frame *fork_frame;
287 struct inactive_task_frame *frame;
288 struct task_struct *me = current;
290 childregs = task_pt_regs(p);
291 fork_frame = container_of(childregs, struct fork_frame, regs);
292 frame = &fork_frame->frame;
294 frame->ret_addr = (unsigned long) ret_from_fork;
295 p->thread.sp = (unsigned long) fork_frame;
296 p->thread.io_bitmap_ptr = NULL;
298 savesegment(gs, p->thread.gsindex);
299 p->thread.gsbase = p->thread.gsindex ? 0 : me->thread.gsbase;
300 savesegment(fs, p->thread.fsindex);
301 p->thread.fsbase = p->thread.fsindex ? 0 : me->thread.fsbase;
302 savesegment(es, p->thread.es);
303 savesegment(ds, p->thread.ds);
304 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
306 if (unlikely(p->flags & PF_KTHREAD)) {
308 memset(childregs, 0, sizeof(struct pt_regs));
309 frame->bx = sp; /* function */
314 *childregs = *current_pt_regs();
321 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
322 p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
323 IO_BITMAP_BYTES, GFP_KERNEL);
324 if (!p->thread.io_bitmap_ptr) {
325 p->thread.io_bitmap_max = 0;
328 set_tsk_thread_flag(p, TIF_IO_BITMAP);
332 * Set a new TLS for the child thread?
334 if (clone_flags & CLONE_SETTLS) {
335 #ifdef CONFIG_IA32_EMULATION
336 if (in_ia32_syscall())
337 err = do_set_thread_area(p, -1,
338 (struct user_desc __user *)tls, 0);
341 err = do_arch_prctl_64(p, ARCH_SET_FS, tls);
347 if (err && p->thread.io_bitmap_ptr) {
348 kfree(p->thread.io_bitmap_ptr);
349 p->thread.io_bitmap_max = 0;
356 start_thread_common(struct pt_regs *regs, unsigned long new_ip,
357 unsigned long new_sp,
358 unsigned int _cs, unsigned int _ss, unsigned int _ds)
360 WARN_ON_ONCE(regs != current_pt_regs());
362 if (static_cpu_has(X86_BUG_NULL_SEG)) {
363 /* Loading zero below won't clear the base. */
364 loadsegment(fs, __USER_DS);
365 load_gs_index(__USER_DS);
369 loadsegment(es, _ds);
370 loadsegment(ds, _ds);
377 regs->flags = X86_EFLAGS_IF;
382 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
384 start_thread_common(regs, new_ip, new_sp,
385 __USER_CS, __USER_DS, 0);
387 EXPORT_SYMBOL_GPL(start_thread);
390 void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
392 start_thread_common(regs, new_ip, new_sp,
393 test_thread_flag(TIF_X32)
394 ? __USER_CS : __USER32_CS,
395 __USER_DS, __USER_DS);
400 * switch_to(x,y) should switch tasks from x to y.
402 * This could still be optimized:
403 * - fold all the options into a flag word and test it with a single test.
404 * - could test fs/gs bitsliced
406 * Kprobes not supported here. Set the probe on schedule instead.
407 * Function graph tracer not supported too.
409 __visible __notrace_funcgraph struct task_struct *
410 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
412 struct thread_struct *prev = &prev_p->thread;
413 struct thread_struct *next = &next_p->thread;
414 struct fpu *prev_fpu = &prev->fpu;
415 struct fpu *next_fpu = &next->fpu;
416 int cpu = smp_processor_id();
417 struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu);
419 WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
420 this_cpu_read(irq_count) != -1);
422 switch_fpu_prepare(prev_fpu, cpu);
424 /* We must save %fs and %gs before load_TLS() because
425 * %fs and %gs may be cleared by load_TLS().
427 * (e.g. xen_load_tls())
432 * Load TLS before restoring any segments so that segment loads
433 * reference the correct GDT entries.
438 * Leave lazy mode, flushing any hypercalls made here. This
439 * must be done after loading TLS entries in the GDT but before
440 * loading segments that might reference them, and and it must
441 * be done before fpu__restore(), so the TS bit is up to
444 arch_end_context_switch(next_p);
448 * Reading them only returns the selectors, but writing them (if
449 * nonzero) loads the full descriptor from the GDT or LDT. The
450 * LDT for next is loaded in switch_mm, and the GDT is loaded
453 * We therefore need to write new values to the segment
454 * registers on every context switch unless both the new and old
457 * Note that we don't need to do anything for CS and SS, as
458 * those are saved and restored as part of pt_regs.
460 savesegment(es, prev->es);
461 if (unlikely(next->es | prev->es))
462 loadsegment(es, next->es);
464 savesegment(ds, prev->ds);
465 if (unlikely(next->ds | prev->ds))
466 loadsegment(ds, next->ds);
468 load_seg_legacy(prev->fsindex, prev->fsbase,
469 next->fsindex, next->fsbase, FS);
470 load_seg_legacy(prev->gsindex, prev->gsbase,
471 next->gsindex, next->gsbase, GS);
473 switch_fpu_finish(next_fpu, cpu);
476 * Switch the PDA and FPU contexts.
478 this_cpu_write(current_task, next_p);
479 this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
482 update_task_stack(next_p);
485 * Now maybe reload the debug registers and handle I/O bitmaps
487 if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
488 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
489 __switch_to_xtra(prev_p, next_p, tss);
493 * On Xen PV, IOPL bits in pt_regs->flags have no effect, and
494 * current_pt_regs()->flags may not match the current task's
495 * intended IOPL. We need to switch it manually.
497 if (unlikely(static_cpu_has(X86_FEATURE_XENPV) &&
498 prev->iopl != next->iopl))
499 xen_set_iopl_mask(next->iopl);
502 if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
504 * AMD CPUs have a misfeature: SYSRET sets the SS selector but
505 * does not update the cached descriptor. As a result, if we
506 * do SYSRET while SS is NULL, we'll end up in user mode with
507 * SS apparently equal to __USER_DS but actually unusable.
509 * The straightforward workaround would be to fix it up just
510 * before SYSRET, but that would slow down the system call
511 * fast paths. Instead, we ensure that SS is never NULL in
512 * system call context. We do this by replacing NULL SS
513 * selectors at every context switch. SYSCALL sets up a valid
514 * SS, so the only way to get NULL is to re-enter the kernel
515 * from CPL 3 through an interrupt. Since that can't happen
516 * in the same task as a running syscall, we are guaranteed to
517 * context switch between every interrupt vector entry and a
520 * We read SS first because SS reads are much faster than
521 * writes. Out of caution, we force SS to __KERNEL_DS even if
522 * it previously had a different non-NULL value.
524 unsigned short ss_sel;
525 savesegment(ss, ss_sel);
526 if (ss_sel != __KERNEL_DS)
527 loadsegment(ss, __KERNEL_DS);
530 /* Load the Intel cache allocation PQR MSR. */
531 intel_rdt_sched_in();
536 void set_personality_64bit(void)
538 /* inherit personality from parent */
540 /* Make sure to be in 64bit mode */
541 clear_thread_flag(TIF_IA32);
542 clear_thread_flag(TIF_ADDR32);
543 clear_thread_flag(TIF_X32);
544 /* Pretend that this comes from a 64bit execve */
545 task_pt_regs(current)->orig_ax = __NR_execve;
546 current_thread_info()->status &= ~TS_COMPAT;
548 /* Ensure the corresponding mm is not marked. */
550 current->mm->context.ia32_compat = 0;
552 /* TBD: overwrites user setup. Should have two bits.
553 But 64bit processes have always behaved this way,
554 so it's not too bad. The main problem is just that
555 32bit childs are affected again. */
556 current->personality &= ~READ_IMPLIES_EXEC;
559 static void __set_personality_x32(void)
561 #ifdef CONFIG_X86_X32
562 clear_thread_flag(TIF_IA32);
563 set_thread_flag(TIF_X32);
565 current->mm->context.ia32_compat = TIF_X32;
566 current->personality &= ~READ_IMPLIES_EXEC;
568 * in_compat_syscall() uses the presence of the x32 syscall bit
569 * flag to determine compat status. The x86 mmap() code relies on
570 * the syscall bitness so set x32 syscall bit right here to make
571 * in_compat_syscall() work during exec().
573 * Pretend to come from a x32 execve.
575 task_pt_regs(current)->orig_ax = __NR_x32_execve | __X32_SYSCALL_BIT;
576 current_thread_info()->status &= ~TS_COMPAT;
580 static void __set_personality_ia32(void)
582 #ifdef CONFIG_IA32_EMULATION
583 set_thread_flag(TIF_IA32);
584 clear_thread_flag(TIF_X32);
586 current->mm->context.ia32_compat = TIF_IA32;
587 current->personality |= force_personality32;
588 /* Prepare the first "return" to user space */
589 task_pt_regs(current)->orig_ax = __NR_ia32_execve;
590 current_thread_info()->status |= TS_COMPAT;
594 void set_personality_ia32(bool x32)
596 /* Make sure to be in 32bit mode */
597 set_thread_flag(TIF_ADDR32);
600 __set_personality_x32();
602 __set_personality_ia32();
604 EXPORT_SYMBOL_GPL(set_personality_ia32);
606 #ifdef CONFIG_CHECKPOINT_RESTORE
607 static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr)
611 ret = map_vdso_once(image, addr);
615 return (long)image->size;
619 long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2)
622 int doit = task == current;
627 if (arg2 >= TASK_SIZE_MAX)
630 task->thread.gsindex = 0;
631 task->thread.gsbase = arg2;
634 ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, arg2);
639 /* Not strictly needed for fs, but do it for symmetry
641 if (arg2 >= TASK_SIZE_MAX)
644 task->thread.fsindex = 0;
645 task->thread.fsbase = arg2;
647 /* set the selector to 0 to not confuse __switch_to */
649 ret = wrmsrl_safe(MSR_FS_BASE, arg2);
657 rdmsrl(MSR_FS_BASE, base);
659 base = task->thread.fsbase;
660 ret = put_user(base, (unsigned long __user *)arg2);
667 rdmsrl(MSR_KERNEL_GS_BASE, base);
669 base = task->thread.gsbase;
670 ret = put_user(base, (unsigned long __user *)arg2);
674 #ifdef CONFIG_CHECKPOINT_RESTORE
675 # ifdef CONFIG_X86_X32_ABI
676 case ARCH_MAP_VDSO_X32:
677 return prctl_map_vdso(&vdso_image_x32, arg2);
679 # if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
680 case ARCH_MAP_VDSO_32:
681 return prctl_map_vdso(&vdso_image_32, arg2);
683 case ARCH_MAP_VDSO_64:
684 return prctl_map_vdso(&vdso_image_64, arg2);
695 SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
699 ret = do_arch_prctl_64(current, option, arg2);
701 ret = do_arch_prctl_common(current, option, arg2);
706 #ifdef CONFIG_IA32_EMULATION
707 COMPAT_SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
709 return do_arch_prctl_common(current, option, arg2);
713 unsigned long KSTK_ESP(struct task_struct *task)
715 return task_pt_regs(task)->sp;