1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 1995 Linus Torvalds
5 * Pentium III FXSR, SSE support
6 * Gareth Hughes <gareth@valinux.com>, May 2000
11 * CPU hotplug support - ashok.raj@intel.com
15 * This file handles the architecture-dependent parts of process handling..
18 #include <linux/cpu.h>
19 #include <linux/errno.h>
20 #include <linux/sched.h>
21 #include <linux/sched/task.h>
22 #include <linux/sched/task_stack.h>
24 #include <linux/kernel.h>
26 #include <linux/elfcore.h>
27 #include <linux/smp.h>
28 #include <linux/slab.h>
29 #include <linux/user.h>
30 #include <linux/interrupt.h>
31 #include <linux/delay.h>
32 #include <linux/export.h>
33 #include <linux/ptrace.h>
34 #include <linux/notifier.h>
35 #include <linux/kprobes.h>
36 #include <linux/kdebug.h>
37 #include <linux/prctl.h>
38 #include <linux/uaccess.h>
40 #include <linux/ftrace.h>
41 #include <linux/syscalls.h>
43 #include <asm/pgtable.h>
44 #include <asm/processor.h>
45 #include <asm/fpu/internal.h>
46 #include <asm/mmu_context.h>
47 #include <asm/prctl.h>
49 #include <asm/proto.h>
51 #include <asm/syscalls.h>
52 #include <asm/debugreg.h>
53 #include <asm/switch_to.h>
54 #include <asm/xen/hypervisor.h>
56 #include <asm/resctrl_sched.h>
57 #include <asm/unistd.h>
58 #include <asm/fsgsbase.h>
59 #ifdef CONFIG_IA32_EMULATION
60 /* Not included via unistd.h */
61 #include <asm/unistd_32_ia32.h>
66 /* Prints also some state that isn't saved in the pt_regs */
67 void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
69 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
70 unsigned long d0, d1, d2, d3, d6, d7;
71 unsigned int fsindex, gsindex;
76 if (regs->orig_ax != -1)
77 pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax);
81 printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
82 regs->ax, regs->bx, regs->cx);
83 printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
84 regs->dx, regs->si, regs->di);
85 printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
86 regs->bp, regs->r8, regs->r9);
87 printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
88 regs->r10, regs->r11, regs->r12);
89 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
90 regs->r13, regs->r14, regs->r15);
92 if (mode == SHOW_REGS_SHORT)
95 if (mode == SHOW_REGS_USER) {
96 rdmsrl(MSR_FS_BASE, fs);
97 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
98 printk(KERN_DEFAULT "FS: %016lx GS: %016lx\n",
103 asm("movl %%ds,%0" : "=r" (ds));
104 asm("movl %%es,%0" : "=r" (es));
105 asm("movl %%fs,%0" : "=r" (fsindex));
106 asm("movl %%gs,%0" : "=r" (gsindex));
108 rdmsrl(MSR_FS_BASE, fs);
109 rdmsrl(MSR_GS_BASE, gs);
110 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
117 printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
118 fs, fsindex, gs, gsindex, shadowgs);
119 printk(KERN_DEFAULT "CS: %04lx DS: %04x ES: %04x CR0: %016lx\n", regs->cs, ds,
121 printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
131 /* Only print out debug registers if they are in their non-default state. */
132 if (!((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
133 (d6 == DR6_RESERVED) && (d7 == 0x400))) {
134 printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n",
136 printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n",
140 if (boot_cpu_has(X86_FEATURE_OSPKE))
141 printk(KERN_DEFAULT "PKRU: %08x\n", read_pkru());
144 void release_thread(struct task_struct *dead_task)
147 #ifdef CONFIG_MODIFY_LDT_SYSCALL
148 if (dead_task->mm->context.ldt) {
149 pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
151 dead_task->mm->context.ldt->entries,
152 dead_task->mm->context.ldt->nr_entries);
159 enum which_selector {
165 * Out of line to be protected from kprobes. It is not used on Xen
166 * paravirt. When paravirt support is needed, it needs to be renamed
167 * with native_ prefix.
169 static noinline unsigned long __rdgsbase_inactive(void)
171 unsigned long gsbase;
173 lockdep_assert_irqs_disabled();
181 NOKPROBE_SYMBOL(__rdgsbase_inactive);
184 * Out of line to be protected from kprobes. It is not used on Xen
185 * paravirt. When paravirt support is needed, it needs to be renamed
186 * with native_ prefix.
188 static noinline void __wrgsbase_inactive(unsigned long gsbase)
190 lockdep_assert_irqs_disabled();
196 NOKPROBE_SYMBOL(__wrgsbase_inactive);
199 * Saves the FS or GS base for an outgoing thread if FSGSBASE extensions are
200 * not available. The goal is to be reasonably fast on non-FSGSBASE systems.
201 * It's forcibly inlined because it'll generate better code and this function
204 static __always_inline void save_base_legacy(struct task_struct *prev_p,
205 unsigned short selector,
206 enum which_selector which)
208 if (likely(selector == 0)) {
210 * On Intel (without X86_BUG_NULL_SEG), the segment base could
211 * be the pre-existing saved base or it could be zero. On AMD
212 * (with X86_BUG_NULL_SEG), the segment base could be almost
215 * This branch is very hot (it's hit twice on almost every
216 * context switch between 64-bit programs), and avoiding
217 * the RDMSR helps a lot, so we just assume that whatever
218 * value is already saved is correct. This matches historical
219 * Linux behavior, so it won't break existing applications.
221 * To avoid leaking state, on non-X86_BUG_NULL_SEG CPUs, if we
222 * report that the base is zero, it needs to actually be zero:
223 * see the corresponding logic in load_seg_legacy.
227 * If the selector is 1, 2, or 3, then the base is zero on
228 * !X86_BUG_NULL_SEG CPUs and could be anything on
229 * X86_BUG_NULL_SEG CPUs. In the latter case, Linux
230 * has never attempted to preserve the base across context
233 * If selector > 3, then it refers to a real segment, and
234 * saving the base isn't necessary.
237 prev_p->thread.fsbase = 0;
239 prev_p->thread.gsbase = 0;
243 static __always_inline void save_fsgs(struct task_struct *task)
245 savesegment(fs, task->thread.fsindex);
246 savesegment(gs, task->thread.gsindex);
247 save_base_legacy(task, task->thread.fsindex, FS);
248 save_base_legacy(task, task->thread.gsindex, GS);
251 #if IS_ENABLED(CONFIG_KVM)
253 * While a process is running,current->thread.fsbase and current->thread.gsbase
254 * may not match the corresponding CPU registers (see save_base_legacy()). KVM
255 * wants an efficient way to save and restore FSBASE and GSBASE.
256 * When FSGSBASE extensions are enabled, this will have to use RD{FS,GS}BASE.
258 void save_fsgs_for_kvm(void)
262 EXPORT_SYMBOL_GPL(save_fsgs_for_kvm);
265 static __always_inline void loadseg(enum which_selector which,
269 loadsegment(fs, sel);
274 static __always_inline void load_seg_legacy(unsigned short prev_index,
275 unsigned long prev_base,
276 unsigned short next_index,
277 unsigned long next_base,
278 enum which_selector which)
280 if (likely(next_index <= 3)) {
282 * The next task is using 64-bit TLS, is not using this
283 * segment at all, or is having fun with arcane CPU features.
285 if (next_base == 0) {
287 * Nasty case: on AMD CPUs, we need to forcibly zero
290 if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
291 loadseg(which, __USER_DS);
292 loadseg(which, next_index);
295 * We could try to exhaustively detect cases
296 * under which we can skip the segment load,
297 * but there's really only one case that matters
298 * for performance: if both the previous and
299 * next states are fully zeroed, we can skip
302 * (This assumes that prev_base == 0 has no
303 * false positives. This is the case on
306 if (likely(prev_index | next_index | prev_base))
307 loadseg(which, next_index);
310 if (prev_index != next_index)
311 loadseg(which, next_index);
312 wrmsrl(which == FS ? MSR_FS_BASE : MSR_KERNEL_GS_BASE,
317 * The next task is using a real segment. Loading the selector
320 loadseg(which, next_index);
324 static __always_inline void x86_fsgsbase_load(struct thread_struct *prev,
325 struct thread_struct *next)
327 load_seg_legacy(prev->fsindex, prev->fsbase,
328 next->fsindex, next->fsbase, FS);
329 load_seg_legacy(prev->gsindex, prev->gsbase,
330 next->gsindex, next->gsbase, GS);
333 static unsigned long x86_fsgsbase_read_task(struct task_struct *task,
334 unsigned short selector)
336 unsigned short idx = selector >> 3;
339 if (likely((selector & SEGMENT_TI_MASK) == 0)) {
340 if (unlikely(idx >= GDT_ENTRIES))
344 * There are no user segments in the GDT with nonzero bases
345 * other than the TLS segments.
347 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
350 idx -= GDT_ENTRY_TLS_MIN;
351 base = get_desc_base(&task->thread.tls_array[idx]);
353 #ifdef CONFIG_MODIFY_LDT_SYSCALL
354 struct ldt_struct *ldt;
357 * If performance here mattered, we could protect the LDT
358 * with RCU. This is a slow path, though, so we can just
361 mutex_lock(&task->mm->context.lock);
362 ldt = task->mm->context.ldt;
363 if (unlikely(idx >= ldt->nr_entries))
366 base = get_desc_base(ldt->entries + idx);
367 mutex_unlock(&task->mm->context.lock);
376 unsigned long x86_gsbase_read_cpu_inactive(void)
378 unsigned long gsbase;
380 if (static_cpu_has(X86_FEATURE_FSGSBASE)) {
383 /* Interrupts are disabled here. */
384 local_irq_save(flags);
385 gsbase = __rdgsbase_inactive();
386 local_irq_restore(flags);
388 rdmsrl(MSR_KERNEL_GS_BASE, gsbase);
394 void x86_gsbase_write_cpu_inactive(unsigned long gsbase)
396 if (static_cpu_has(X86_FEATURE_FSGSBASE)) {
399 /* Interrupts are disabled here. */
400 local_irq_save(flags);
401 __wrgsbase_inactive(gsbase);
402 local_irq_restore(flags);
404 wrmsrl(MSR_KERNEL_GS_BASE, gsbase);
408 unsigned long x86_fsbase_read_task(struct task_struct *task)
410 unsigned long fsbase;
413 fsbase = x86_fsbase_read_cpu();
414 else if (task->thread.fsindex == 0)
415 fsbase = task->thread.fsbase;
417 fsbase = x86_fsgsbase_read_task(task, task->thread.fsindex);
422 unsigned long x86_gsbase_read_task(struct task_struct *task)
424 unsigned long gsbase;
427 gsbase = x86_gsbase_read_cpu_inactive();
428 else if (task->thread.gsindex == 0)
429 gsbase = task->thread.gsbase;
431 gsbase = x86_fsgsbase_read_task(task, task->thread.gsindex);
436 void x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase)
438 WARN_ON_ONCE(task == current);
440 task->thread.fsbase = fsbase;
443 void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase)
445 WARN_ON_ONCE(task == current);
447 task->thread.gsbase = gsbase;
450 int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
451 unsigned long arg, struct task_struct *p, unsigned long tls)
454 struct pt_regs *childregs;
455 struct fork_frame *fork_frame;
456 struct inactive_task_frame *frame;
457 struct task_struct *me = current;
459 childregs = task_pt_regs(p);
460 fork_frame = container_of(childregs, struct fork_frame, regs);
461 frame = &fork_frame->frame;
464 frame->ret_addr = (unsigned long) ret_from_fork;
465 p->thread.sp = (unsigned long) fork_frame;
466 p->thread.io_bitmap_ptr = NULL;
468 savesegment(gs, p->thread.gsindex);
469 p->thread.gsbase = p->thread.gsindex ? 0 : me->thread.gsbase;
470 savesegment(fs, p->thread.fsindex);
471 p->thread.fsbase = p->thread.fsindex ? 0 : me->thread.fsbase;
472 savesegment(es, p->thread.es);
473 savesegment(ds, p->thread.ds);
474 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
476 if (unlikely(p->flags & PF_KTHREAD)) {
478 memset(childregs, 0, sizeof(struct pt_regs));
479 frame->bx = sp; /* function */
484 *childregs = *current_pt_regs();
491 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
492 p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
493 IO_BITMAP_BYTES, GFP_KERNEL);
494 if (!p->thread.io_bitmap_ptr) {
495 p->thread.io_bitmap_max = 0;
498 set_tsk_thread_flag(p, TIF_IO_BITMAP);
502 * Set a new TLS for the child thread?
504 if (clone_flags & CLONE_SETTLS) {
505 #ifdef CONFIG_IA32_EMULATION
506 if (in_ia32_syscall())
507 err = do_set_thread_area(p, -1,
508 (struct user_desc __user *)tls, 0);
511 err = do_arch_prctl_64(p, ARCH_SET_FS, tls);
517 if (err && p->thread.io_bitmap_ptr) {
518 kfree(p->thread.io_bitmap_ptr);
519 p->thread.io_bitmap_max = 0;
526 start_thread_common(struct pt_regs *regs, unsigned long new_ip,
527 unsigned long new_sp,
528 unsigned int _cs, unsigned int _ss, unsigned int _ds)
530 WARN_ON_ONCE(regs != current_pt_regs());
532 if (static_cpu_has(X86_BUG_NULL_SEG)) {
533 /* Loading zero below won't clear the base. */
534 loadsegment(fs, __USER_DS);
535 load_gs_index(__USER_DS);
539 loadsegment(es, _ds);
540 loadsegment(ds, _ds);
547 regs->flags = X86_EFLAGS_IF;
552 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
554 start_thread_common(regs, new_ip, new_sp,
555 __USER_CS, __USER_DS, 0);
557 EXPORT_SYMBOL_GPL(start_thread);
560 void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
562 start_thread_common(regs, new_ip, new_sp,
563 test_thread_flag(TIF_X32)
564 ? __USER_CS : __USER32_CS,
565 __USER_DS, __USER_DS);
570 * switch_to(x,y) should switch tasks from x to y.
572 * This could still be optimized:
573 * - fold all the options into a flag word and test it with a single test.
574 * - could test fs/gs bitsliced
576 * Kprobes not supported here. Set the probe on schedule instead.
577 * Function graph tracer not supported too.
579 __visible __notrace_funcgraph struct task_struct *
580 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
582 struct thread_struct *prev = &prev_p->thread;
583 struct thread_struct *next = &next_p->thread;
584 struct fpu *prev_fpu = &prev->fpu;
585 struct fpu *next_fpu = &next->fpu;
586 int cpu = smp_processor_id();
588 WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
589 this_cpu_read(irq_count) != -1);
591 if (!test_thread_flag(TIF_NEED_FPU_LOAD))
592 switch_fpu_prepare(prev_fpu, cpu);
594 /* We must save %fs and %gs before load_TLS() because
595 * %fs and %gs may be cleared by load_TLS().
597 * (e.g. xen_load_tls())
602 * Load TLS before restoring any segments so that segment loads
603 * reference the correct GDT entries.
608 * Leave lazy mode, flushing any hypercalls made here. This
609 * must be done after loading TLS entries in the GDT but before
610 * loading segments that might reference them.
612 arch_end_context_switch(next_p);
616 * Reading them only returns the selectors, but writing them (if
617 * nonzero) loads the full descriptor from the GDT or LDT. The
618 * LDT for next is loaded in switch_mm, and the GDT is loaded
621 * We therefore need to write new values to the segment
622 * registers on every context switch unless both the new and old
625 * Note that we don't need to do anything for CS and SS, as
626 * those are saved and restored as part of pt_regs.
628 savesegment(es, prev->es);
629 if (unlikely(next->es | prev->es))
630 loadsegment(es, next->es);
632 savesegment(ds, prev->ds);
633 if (unlikely(next->ds | prev->ds))
634 loadsegment(ds, next->ds);
636 x86_fsgsbase_load(prev, next);
639 * Switch the PDA and FPU contexts.
641 this_cpu_write(current_task, next_p);
642 this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
644 switch_fpu_finish(next_fpu);
647 update_task_stack(next_p);
649 switch_to_extra(prev_p, next_p);
653 * On Xen PV, IOPL bits in pt_regs->flags have no effect, and
654 * current_pt_regs()->flags may not match the current task's
655 * intended IOPL. We need to switch it manually.
657 if (unlikely(static_cpu_has(X86_FEATURE_XENPV) &&
658 prev->iopl != next->iopl))
659 xen_set_iopl_mask(next->iopl);
662 if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
664 * AMD CPUs have a misfeature: SYSRET sets the SS selector but
665 * does not update the cached descriptor. As a result, if we
666 * do SYSRET while SS is NULL, we'll end up in user mode with
667 * SS apparently equal to __USER_DS but actually unusable.
669 * The straightforward workaround would be to fix it up just
670 * before SYSRET, but that would slow down the system call
671 * fast paths. Instead, we ensure that SS is never NULL in
672 * system call context. We do this by replacing NULL SS
673 * selectors at every context switch. SYSCALL sets up a valid
674 * SS, so the only way to get NULL is to re-enter the kernel
675 * from CPL 3 through an interrupt. Since that can't happen
676 * in the same task as a running syscall, we are guaranteed to
677 * context switch between every interrupt vector entry and a
680 * We read SS first because SS reads are much faster than
681 * writes. Out of caution, we force SS to __KERNEL_DS even if
682 * it previously had a different non-NULL value.
684 unsigned short ss_sel;
685 savesegment(ss, ss_sel);
686 if (ss_sel != __KERNEL_DS)
687 loadsegment(ss, __KERNEL_DS);
690 /* Load the Intel cache allocation PQR MSR. */
696 void set_personality_64bit(void)
698 /* inherit personality from parent */
700 /* Make sure to be in 64bit mode */
701 clear_thread_flag(TIF_IA32);
702 clear_thread_flag(TIF_ADDR32);
703 clear_thread_flag(TIF_X32);
704 /* Pretend that this comes from a 64bit execve */
705 task_pt_regs(current)->orig_ax = __NR_execve;
706 current_thread_info()->status &= ~TS_COMPAT;
708 /* Ensure the corresponding mm is not marked. */
710 current->mm->context.ia32_compat = 0;
712 /* TBD: overwrites user setup. Should have two bits.
713 But 64bit processes have always behaved this way,
714 so it's not too bad. The main problem is just that
715 32bit children are affected again. */
716 current->personality &= ~READ_IMPLIES_EXEC;
719 static void __set_personality_x32(void)
721 #ifdef CONFIG_X86_X32
722 clear_thread_flag(TIF_IA32);
723 set_thread_flag(TIF_X32);
725 current->mm->context.ia32_compat = TIF_X32;
726 current->personality &= ~READ_IMPLIES_EXEC;
728 * in_32bit_syscall() uses the presence of the x32 syscall bit
729 * flag to determine compat status. The x86 mmap() code relies on
730 * the syscall bitness so set x32 syscall bit right here to make
731 * in_32bit_syscall() work during exec().
733 * Pretend to come from a x32 execve.
735 task_pt_regs(current)->orig_ax = __NR_x32_execve | __X32_SYSCALL_BIT;
736 current_thread_info()->status &= ~TS_COMPAT;
740 static void __set_personality_ia32(void)
742 #ifdef CONFIG_IA32_EMULATION
743 set_thread_flag(TIF_IA32);
744 clear_thread_flag(TIF_X32);
746 current->mm->context.ia32_compat = TIF_IA32;
747 current->personality |= force_personality32;
748 /* Prepare the first "return" to user space */
749 task_pt_regs(current)->orig_ax = __NR_ia32_execve;
750 current_thread_info()->status |= TS_COMPAT;
754 void set_personality_ia32(bool x32)
756 /* Make sure to be in 32bit mode */
757 set_thread_flag(TIF_ADDR32);
760 __set_personality_x32();
762 __set_personality_ia32();
764 EXPORT_SYMBOL_GPL(set_personality_ia32);
766 #ifdef CONFIG_CHECKPOINT_RESTORE
767 static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr)
771 ret = map_vdso_once(image, addr);
775 return (long)image->size;
779 long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2)
785 if (unlikely(arg2 >= TASK_SIZE_MAX))
790 * ARCH_SET_GS has always overwritten the index
791 * and the base. Zero is the most sensible value
792 * to put in the index, and is the only value that
793 * makes any sense if FSGSBASE is unavailable.
795 if (task == current) {
797 x86_gsbase_write_cpu_inactive(arg2);
800 * On non-FSGSBASE systems, save_base_legacy() expects
801 * that we also fill in thread.gsbase.
803 task->thread.gsbase = arg2;
806 task->thread.gsindex = 0;
807 x86_gsbase_write_task(task, arg2);
814 * Not strictly needed for %fs, but do it for symmetry
817 if (unlikely(arg2 >= TASK_SIZE_MAX))
822 * Set the selector to 0 for the same reason
825 if (task == current) {
827 x86_fsbase_write_cpu(arg2);
830 * On non-FSGSBASE systems, save_base_legacy() expects
831 * that we also fill in thread.fsbase.
833 task->thread.fsbase = arg2;
835 task->thread.fsindex = 0;
836 x86_fsbase_write_task(task, arg2);
842 unsigned long base = x86_fsbase_read_task(task);
844 ret = put_user(base, (unsigned long __user *)arg2);
848 unsigned long base = x86_gsbase_read_task(task);
850 ret = put_user(base, (unsigned long __user *)arg2);
854 #ifdef CONFIG_CHECKPOINT_RESTORE
855 # ifdef CONFIG_X86_X32_ABI
856 case ARCH_MAP_VDSO_X32:
857 return prctl_map_vdso(&vdso_image_x32, arg2);
859 # if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
860 case ARCH_MAP_VDSO_32:
861 return prctl_map_vdso(&vdso_image_32, arg2);
863 case ARCH_MAP_VDSO_64:
864 return prctl_map_vdso(&vdso_image_64, arg2);
875 SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
879 ret = do_arch_prctl_64(current, option, arg2);
881 ret = do_arch_prctl_common(current, option, arg2);
886 #ifdef CONFIG_IA32_EMULATION
887 COMPAT_SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
889 return do_arch_prctl_common(current, option, arg2);
893 unsigned long KSTK_ESP(struct task_struct *task)
895 return task_pt_regs(task)->sp;