2 * Copyright (C) 1995 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
10 * CPU hotplug support - ashok.raj@intel.com
14 * This file handles the architecture-dependent parts of process handling..
17 #include <linux/cpu.h>
18 #include <linux/errno.h>
19 #include <linux/sched.h>
21 #include <linux/kernel.h>
23 #include <linux/elfcore.h>
24 #include <linux/smp.h>
25 #include <linux/slab.h>
26 #include <linux/user.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/export.h>
30 #include <linux/ptrace.h>
31 #include <linux/notifier.h>
32 #include <linux/kprobes.h>
33 #include <linux/kdebug.h>
34 #include <linux/prctl.h>
35 #include <linux/uaccess.h>
37 #include <linux/ftrace.h>
39 #include <asm/pgtable.h>
40 #include <asm/processor.h>
41 #include <asm/fpu/internal.h>
42 #include <asm/mmu_context.h>
43 #include <asm/prctl.h>
45 #include <asm/proto.h>
48 #include <asm/syscalls.h>
49 #include <asm/debugreg.h>
50 #include <asm/switch_to.h>
51 #include <asm/xen/hypervisor.h>
53 #include <asm/intel_rdt.h>
55 __visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
57 /* Prints also some state that isn't saved in the pt_regs */
58 void __show_regs(struct pt_regs *regs, int all)
60 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
61 unsigned long d0, d1, d2, d3, d6, d7;
62 unsigned int fsindex, gsindex;
63 unsigned int ds, cs, es;
65 printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
66 printk_address(regs->ip);
67 printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
68 regs->sp, regs->flags);
69 printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
70 regs->ax, regs->bx, regs->cx);
71 printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
72 regs->dx, regs->si, regs->di);
73 printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
74 regs->bp, regs->r8, regs->r9);
75 printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
76 regs->r10, regs->r11, regs->r12);
77 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
78 regs->r13, regs->r14, regs->r15);
80 asm("movl %%ds,%0" : "=r" (ds));
81 asm("movl %%cs,%0" : "=r" (cs));
82 asm("movl %%es,%0" : "=r" (es));
83 asm("movl %%fs,%0" : "=r" (fsindex));
84 asm("movl %%gs,%0" : "=r" (gsindex));
86 rdmsrl(MSR_FS_BASE, fs);
87 rdmsrl(MSR_GS_BASE, gs);
88 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
98 printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
99 fs, fsindex, gs, gsindex, shadowgs);
100 printk(KERN_DEFAULT "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
102 printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
112 /* Only print out debug registers if they are in their non-default state. */
113 if (!((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
114 (d6 == DR6_RESERVED) && (d7 == 0x400))) {
115 printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n",
117 printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n",
121 if (boot_cpu_has(X86_FEATURE_OSPKE))
122 printk(KERN_DEFAULT "PKRU: %08x\n", read_pkru());
125 void release_thread(struct task_struct *dead_task)
128 #ifdef CONFIG_MODIFY_LDT_SYSCALL
129 if (dead_task->mm->context.ldt) {
130 pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
132 dead_task->mm->context.ldt->entries,
133 dead_task->mm->context.ldt->size);
140 int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
141 unsigned long arg, struct task_struct *p, unsigned long tls)
144 struct pt_regs *childregs;
145 struct fork_frame *fork_frame;
146 struct inactive_task_frame *frame;
147 struct task_struct *me = current;
149 p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
150 childregs = task_pt_regs(p);
151 fork_frame = container_of(childregs, struct fork_frame, regs);
152 frame = &fork_frame->frame;
154 frame->ret_addr = (unsigned long) ret_from_fork;
155 p->thread.sp = (unsigned long) fork_frame;
156 p->thread.io_bitmap_ptr = NULL;
158 savesegment(gs, p->thread.gsindex);
159 p->thread.gsbase = p->thread.gsindex ? 0 : me->thread.gsbase;
160 savesegment(fs, p->thread.fsindex);
161 p->thread.fsbase = p->thread.fsindex ? 0 : me->thread.fsbase;
162 savesegment(es, p->thread.es);
163 savesegment(ds, p->thread.ds);
164 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
166 if (unlikely(p->flags & PF_KTHREAD)) {
168 memset(childregs, 0, sizeof(struct pt_regs));
169 frame->bx = sp; /* function */
174 *childregs = *current_pt_regs();
181 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
182 p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
183 IO_BITMAP_BYTES, GFP_KERNEL);
184 if (!p->thread.io_bitmap_ptr) {
185 p->thread.io_bitmap_max = 0;
188 set_tsk_thread_flag(p, TIF_IO_BITMAP);
192 * Set a new TLS for the child thread?
194 if (clone_flags & CLONE_SETTLS) {
195 #ifdef CONFIG_IA32_EMULATION
196 if (in_ia32_syscall())
197 err = do_set_thread_area(p, -1,
198 (struct user_desc __user *)tls, 0);
201 err = do_arch_prctl(p, ARCH_SET_FS, tls);
207 if (err && p->thread.io_bitmap_ptr) {
208 kfree(p->thread.io_bitmap_ptr);
209 p->thread.io_bitmap_max = 0;
216 start_thread_common(struct pt_regs *regs, unsigned long new_ip,
217 unsigned long new_sp,
218 unsigned int _cs, unsigned int _ss, unsigned int _ds)
221 loadsegment(es, _ds);
222 loadsegment(ds, _ds);
228 regs->flags = X86_EFLAGS_IF;
233 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
235 start_thread_common(regs, new_ip, new_sp,
236 __USER_CS, __USER_DS, 0);
240 void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
242 start_thread_common(regs, new_ip, new_sp,
243 test_thread_flag(TIF_X32)
244 ? __USER_CS : __USER32_CS,
245 __USER_DS, __USER_DS);
250 * switch_to(x,y) should switch tasks from x to y.
252 * This could still be optimized:
253 * - fold all the options into a flag word and test it with a single test.
254 * - could test fs/gs bitsliced
256 * Kprobes not supported here. Set the probe on schedule instead.
257 * Function graph tracer not supported too.
259 __visible __notrace_funcgraph struct task_struct *
260 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
262 struct thread_struct *prev = &prev_p->thread;
263 struct thread_struct *next = &next_p->thread;
264 struct fpu *prev_fpu = &prev->fpu;
265 struct fpu *next_fpu = &next->fpu;
266 int cpu = smp_processor_id();
267 struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
268 unsigned prev_fsindex, prev_gsindex;
269 fpu_switch_t fpu_switch;
271 fpu_switch = switch_fpu_prepare(prev_fpu, next_fpu, cpu);
273 /* We must save %fs and %gs before load_TLS() because
274 * %fs and %gs may be cleared by load_TLS().
276 * (e.g. xen_load_tls())
278 savesegment(fs, prev_fsindex);
279 savesegment(gs, prev_gsindex);
282 * Load TLS before restoring any segments so that segment loads
283 * reference the correct GDT entries.
288 * Leave lazy mode, flushing any hypercalls made here. This
289 * must be done after loading TLS entries in the GDT but before
290 * loading segments that might reference them, and and it must
291 * be done before fpu__restore(), so the TS bit is up to
294 arch_end_context_switch(next_p);
298 * Reading them only returns the selectors, but writing them (if
299 * nonzero) loads the full descriptor from the GDT or LDT. The
300 * LDT for next is loaded in switch_mm, and the GDT is loaded
303 * We therefore need to write new values to the segment
304 * registers on every context switch unless both the new and old
307 * Note that we don't need to do anything for CS and SS, as
308 * those are saved and restored as part of pt_regs.
310 savesegment(es, prev->es);
311 if (unlikely(next->es | prev->es))
312 loadsegment(es, next->es);
314 savesegment(ds, prev->ds);
315 if (unlikely(next->ds | prev->ds))
316 loadsegment(ds, next->ds);
321 * These are even more complicated than DS and ES: they have
322 * 64-bit bases are that controlled by arch_prctl. The bases
323 * don't necessarily match the selectors, as user code can do
324 * any number of things to cause them to be inconsistent.
326 * We don't promise to preserve the bases if the selectors are
327 * nonzero. We also don't promise to preserve the base if the
328 * selector is zero and the base doesn't match whatever was
329 * most recently passed to ARCH_SET_FS/GS. (If/when the
330 * FSGSBASE instructions are enabled, we'll need to offer
331 * stronger guarantees.)
334 * (fsbase != 0 && fsindex != 0) || (gsbase != 0 && gsindex != 0) is
338 /* Loading a nonzero value into FS sets the index and base. */
339 loadsegment(fs, next->fsindex);
342 /* Next index is zero but next base is nonzero. */
345 wrmsrl(MSR_FS_BASE, next->fsbase);
347 /* Next base and index are both zero. */
348 if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
350 * We don't know the previous base and can't
351 * find out without RDMSR. Forcibly clear it.
353 loadsegment(fs, __USER_DS);
357 * If the previous index is zero and ARCH_SET_FS
358 * didn't change the base, then the base is
359 * also zero and we don't need to do anything.
361 if (prev->fsbase || prev_fsindex)
367 * Save the old state and preserve the invariant.
368 * NB: if prev_fsindex == 0, then we can't reliably learn the base
369 * without RDMSR because Intel user code can zero it without telling
370 * us and AMD user code can program any 32-bit value without telling
375 prev->fsindex = prev_fsindex;
378 /* Loading a nonzero value into GS sets the index and base. */
379 load_gs_index(next->gsindex);
382 /* Next index is zero but next base is nonzero. */
385 wrmsrl(MSR_KERNEL_GS_BASE, next->gsbase);
387 /* Next base and index are both zero. */
388 if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
390 * We don't know the previous base and can't
391 * find out without RDMSR. Forcibly clear it.
393 * This contains a pointless SWAPGS pair.
394 * Fixing it would involve an explicit check
395 * for Xen or a new pvop.
397 load_gs_index(__USER_DS);
401 * If the previous index is zero and ARCH_SET_GS
402 * didn't change the base, then the base is
403 * also zero and we don't need to do anything.
405 if (prev->gsbase || prev_gsindex)
411 * Save the old state and preserve the invariant.
412 * NB: if prev_gsindex == 0, then we can't reliably learn the base
413 * without RDMSR because Intel user code can zero it without telling
414 * us and AMD user code can program any 32-bit value without telling
419 prev->gsindex = prev_gsindex;
421 switch_fpu_finish(next_fpu, fpu_switch);
424 * Switch the PDA and FPU contexts.
426 this_cpu_write(current_task, next_p);
428 /* Reload esp0 and ss1. This changes current_thread_info(). */
432 * Now maybe reload the debug registers and handle I/O bitmaps
434 if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
435 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
436 __switch_to_xtra(prev_p, next_p, tss);
440 * On Xen PV, IOPL bits in pt_regs->flags have no effect, and
441 * current_pt_regs()->flags may not match the current task's
442 * intended IOPL. We need to switch it manually.
444 if (unlikely(static_cpu_has(X86_FEATURE_XENPV) &&
445 prev->iopl != next->iopl))
446 xen_set_iopl_mask(next->iopl);
449 if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
451 * AMD CPUs have a misfeature: SYSRET sets the SS selector but
452 * does not update the cached descriptor. As a result, if we
453 * do SYSRET while SS is NULL, we'll end up in user mode with
454 * SS apparently equal to __USER_DS but actually unusable.
456 * The straightforward workaround would be to fix it up just
457 * before SYSRET, but that would slow down the system call
458 * fast paths. Instead, we ensure that SS is never NULL in
459 * system call context. We do this by replacing NULL SS
460 * selectors at every context switch. SYSCALL sets up a valid
461 * SS, so the only way to get NULL is to re-enter the kernel
462 * from CPL 3 through an interrupt. Since that can't happen
463 * in the same task as a running syscall, we are guaranteed to
464 * context switch between every interrupt vector entry and a
467 * We read SS first because SS reads are much faster than
468 * writes. Out of caution, we force SS to __KERNEL_DS even if
469 * it previously had a different non-NULL value.
471 unsigned short ss_sel;
472 savesegment(ss, ss_sel);
473 if (ss_sel != __KERNEL_DS)
474 loadsegment(ss, __KERNEL_DS);
477 /* Load the Intel cache allocation PQR MSR. */
478 intel_rdt_sched_in();
483 void set_personality_64bit(void)
485 /* inherit personality from parent */
487 /* Make sure to be in 64bit mode */
488 clear_thread_flag(TIF_IA32);
489 clear_thread_flag(TIF_ADDR32);
490 clear_thread_flag(TIF_X32);
492 /* Ensure the corresponding mm is not marked. */
494 current->mm->context.ia32_compat = 0;
496 /* TBD: overwrites user setup. Should have two bits.
497 But 64bit processes have always behaved this way,
498 so it's not too bad. The main problem is just that
499 32bit childs are affected again. */
500 current->personality &= ~READ_IMPLIES_EXEC;
503 void set_personality_ia32(bool x32)
505 /* inherit personality from parent */
507 /* Make sure to be in 32bit mode */
508 set_thread_flag(TIF_ADDR32);
510 /* Mark the associated mm as containing 32-bit tasks. */
512 clear_thread_flag(TIF_IA32);
513 set_thread_flag(TIF_X32);
515 current->mm->context.ia32_compat = TIF_X32;
516 current->personality &= ~READ_IMPLIES_EXEC;
517 /* in_compat_syscall() uses the presence of the x32
518 syscall bit flag to determine compat status */
519 current->thread.status &= ~TS_COMPAT;
521 set_thread_flag(TIF_IA32);
522 clear_thread_flag(TIF_X32);
524 current->mm->context.ia32_compat = TIF_IA32;
525 current->personality |= force_personality32;
526 /* Prepare the first "return" to user space */
527 current->thread.status |= TS_COMPAT;
530 EXPORT_SYMBOL_GPL(set_personality_ia32);
532 #ifdef CONFIG_CHECKPOINT_RESTORE
533 static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr)
537 ret = map_vdso_once(image, addr);
541 return (long)image->size;
545 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
548 int doit = task == current;
553 if (addr >= TASK_SIZE_MAX)
556 task->thread.gsindex = 0;
557 task->thread.gsbase = addr;
560 ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, addr);
565 /* Not strictly needed for fs, but do it for symmetry
567 if (addr >= TASK_SIZE_MAX)
570 task->thread.fsindex = 0;
571 task->thread.fsbase = addr;
573 /* set the selector to 0 to not confuse __switch_to */
575 ret = wrmsrl_safe(MSR_FS_BASE, addr);
582 rdmsrl(MSR_FS_BASE, base);
584 base = task->thread.fsbase;
585 ret = put_user(base, (unsigned long __user *)addr);
591 rdmsrl(MSR_KERNEL_GS_BASE, base);
593 base = task->thread.gsbase;
594 ret = put_user(base, (unsigned long __user *)addr);
598 #ifdef CONFIG_CHECKPOINT_RESTORE
599 # ifdef CONFIG_X86_X32_ABI
600 case ARCH_MAP_VDSO_X32:
601 return prctl_map_vdso(&vdso_image_x32, addr);
603 # if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
604 case ARCH_MAP_VDSO_32:
605 return prctl_map_vdso(&vdso_image_32, addr);
607 case ARCH_MAP_VDSO_64:
608 return prctl_map_vdso(&vdso_image_64, addr);
619 long sys_arch_prctl(int code, unsigned long addr)
621 return do_arch_prctl(current, code, addr);
624 unsigned long KSTK_ESP(struct task_struct *task)
626 return task_pt_regs(task)->sp;