Merge remote-tracking branch 'tip/perf/urgent' into perf/core
authorArnaldo Carvalho de Melo <acme@redhat.com>
Tue, 24 Jul 2018 17:34:32 +0000 (14:34 -0300)
committerArnaldo Carvalho de Melo <acme@redhat.com>
Tue, 24 Jul 2018 17:34:32 +0000 (14:34 -0300)
To pick up fixes.

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
58 files changed:
Documentation/kprobes.txt
arch/arc/include/asm/kprobes.h
arch/arc/kernel/kprobes.c
arch/arm/include/asm/hw_breakpoint.h
arch/arm/include/asm/kprobes.h
arch/arm/include/asm/probes.h
arch/arm/kernel/hw_breakpoint.c
arch/arm/probes/kprobes/core.c
arch/arm/probes/kprobes/test-core.c
arch/arm64/include/asm/hw_breakpoint.h
arch/arm64/include/asm/kprobes.h
arch/arm64/kernel/hw_breakpoint.c
arch/arm64/kernel/probes/kprobes.c
arch/ia64/include/asm/kprobes.h
arch/ia64/include/uapi/asm/break.h
arch/ia64/kernel/Makefile
arch/ia64/kernel/jprobes.S [deleted file]
arch/ia64/kernel/kprobes.c
arch/mips/include/asm/kprobes.h
arch/mips/kernel/kprobes.c
arch/powerpc/include/asm/hw_breakpoint.h
arch/powerpc/include/asm/kprobes.h
arch/powerpc/kernel/hw_breakpoint.c
arch/powerpc/kernel/kprobes-ftrace.c
arch/powerpc/kernel/kprobes.c
arch/powerpc/kernel/trace/ftrace_64_mprofile.S
arch/powerpc/perf/core-book3s.c
arch/s390/include/asm/kprobes.h
arch/s390/kernel/kprobes.c
arch/sh/include/asm/hw_breakpoint.h
arch/sh/include/asm/kprobes.h
arch/sh/kernel/hw_breakpoint.c
arch/sh/kernel/kprobes.c
arch/sparc/include/asm/kprobes.h
arch/sparc/kernel/kprobes.c
arch/x86/events/intel/lbr.c
arch/x86/events/perf_event.h
arch/x86/include/asm/hw_breakpoint.h
arch/x86/include/asm/kprobes.h
arch/x86/kernel/hw_breakpoint.c
arch/x86/kernel/kprobes/common.h
arch/x86/kernel/kprobes/core.c
arch/x86/kernel/kprobes/ftrace.c
arch/x86/kernel/kprobes/opt.c
arch/xtensa/include/asm/hw_breakpoint.h
arch/xtensa/kernel/hw_breakpoint.c
include/linux/kprobes.h
include/linux/perf_event.h
kernel/events/core.c
kernel/events/hw_breakpoint.c
kernel/events/uprobes.c
kernel/fail_function.c
kernel/kprobes.c
kernel/test_kprobes.c
kernel/trace/trace_kprobe.c
lib/Kconfig.debug
tools/perf/Documentation/perf-list.txt
tools/perf/Documentation/perf-record.txt

index cb3b0de..10f4499 100644 (file)
@@ -80,6 +80,26 @@ After the instruction is single-stepped, Kprobes executes the
 "post_handler," if any, that is associated with the kprobe.
 Execution then continues with the instruction following the probepoint.
 
+Changing Execution Path
+-----------------------
+
+Since kprobes can probe into a running kernel code, it can change the
+register set, including instruction pointer. This operation requires
+maximum care, such as keeping the stack frame, recovering the execution
+path etc. Since it operates on a running kernel and needs deep knowledge
+of computer architecture and concurrent computing, you can easily shoot
+your foot.
+
+If you change the instruction pointer (and set up other related
+registers) in pre_handler, you must return !0 so that kprobes stops
+single stepping and just returns to the given address.
+This also means post_handler should not be called anymore.
+
+Note that this operation may be harder on some architectures which use
+TOC (Table of Contents) for function call, since you have to setup a new
+TOC for your function in your module, and recover the old one after
+returning from it.
+
 Return Probes
 -------------
 
@@ -262,7 +282,7 @@ is optimized, that modification is ignored.  Thus, if you want to
 tweak the kernel's execution path, you need to suppress optimization,
 using one of the following techniques:
 
-- Specify an empty function for the kprobe's post_handler or break_handler.
+- Specify an empty function for the kprobe's post_handler.
 
 or
 
@@ -474,7 +494,7 @@ error occurs during registration, all probes in the array, up to
 the bad probe, are safely unregistered before the register_*probes
 function returns.
 
-- kps/rps/jps: an array of pointers to ``*probe`` data structures
+- kps/rps: an array of pointers to ``*probe`` data structures
 - num: the number of the array entries.
 
 .. note::
@@ -566,12 +586,11 @@ the same handler) may run concurrently on different CPUs.
 Kprobes does not use mutexes or allocate memory except during
 registration and unregistration.
 
-Probe handlers are run with preemption disabled.  Depending on the
-architecture and optimization state, handlers may also run with
-interrupts disabled (e.g., kretprobe handlers and optimized kprobe
-handlers run without interrupt disabled on x86/x86-64).  In any case,
-your handler should not yield the CPU (e.g., by attempting to acquire
-a semaphore).
+Probe handlers are run with preemption disabled or interrupt disabled,
+which depends on the architecture and optimization state.  (e.g.,
+kretprobe handlers and optimized kprobe handlers run without interrupt
+disabled on x86/x86-64).  In any case, your handler should not yield
+the CPU (e.g., by attempting to acquire a semaphore, or waiting I/O).
 
 Since a return probe is implemented by replacing the return
 address with the trampoline's address, stack backtraces and calls
index 2e52d18..2c1b479 100644 (file)
@@ -45,8 +45,6 @@ struct prev_kprobe {
 
 struct kprobe_ctlblk {
        unsigned int kprobe_status;
-       struct pt_regs jprobe_saved_regs;
-       char jprobes_stack[MAX_STACK_SIZE];
        struct prev_kprobe prev_kprobe;
 };
 
index 42b0504..df35d4c 100644 (file)
@@ -225,24 +225,18 @@ int __kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs)
 
                /* If we have no pre-handler or it returned 0, we continue with
                 * normal processing. If we have a pre-handler and it returned
-                * non-zero - which is expected from setjmp_pre_handler for
-                * jprobe, we return without single stepping and leave that to
-                * the break-handler which is invoked by a kprobe from
-                * jprobe_return
+                * non-zero - which means user handler setup registers to exit
+                * to another instruction, we must skip the single stepping.
                 */
                if (!p->pre_handler || !p->pre_handler(p, regs)) {
                        setup_singlestep(p, regs);
                        kcb->kprobe_status = KPROBE_HIT_SS;
+               } else {
+                       reset_current_kprobe();
+                       preempt_enable_no_resched();
                }
 
                return 1;
-       } else if (kprobe_running()) {
-               p = __this_cpu_read(current_kprobe);
-               if (p->break_handler && p->break_handler(p, regs)) {
-                       setup_singlestep(p, regs);
-                       kcb->kprobe_status = KPROBE_HIT_SS;
-                       return 1;
-               }
        }
 
        /* no_kprobe: */
@@ -386,38 +380,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
        return ret;
 }
 
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct jprobe *jp = container_of(p, struct jprobe, kp);
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-       unsigned long sp_addr = regs->sp;
-
-       kcb->jprobe_saved_regs = *regs;
-       memcpy(kcb->jprobes_stack, (void *)sp_addr, MIN_STACK_SIZE(sp_addr));
-       regs->ret = (unsigned long)(jp->entry);
-
-       return 1;
-}
-
-void __kprobes jprobe_return(void)
-{
-       __asm__ __volatile__("unimp_s");
-       return;
-}
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-       unsigned long sp_addr;
-
-       *regs = kcb->jprobe_saved_regs;
-       sp_addr = regs->sp;
-       memcpy((void *)sp_addr, kcb->jprobes_stack, MIN_STACK_SIZE(sp_addr));
-       preempt_enable_no_resched();
-
-       return 1;
-}
-
 static void __used kretprobe_trampoline_holder(void)
 {
        __asm__ __volatile__(".global kretprobe_trampoline\n"
@@ -483,9 +445,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
        kretprobe_assert(ri, orig_ret_address, trampoline_address);
        regs->ret = orig_ret_address;
 
-       reset_current_kprobe();
        kretprobe_hash_unlock(current, &flags);
-       preempt_enable_no_resched();
 
        hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
                hlist_del(&ri->hlist);
index e46e4e7..ac54c06 100644 (file)
@@ -111,14 +111,17 @@ static inline void decode_ctrl_reg(u32 reg,
        asm volatile("mcr p14, 0, %0, " #N "," #M ", " #OP2 : : "r" (VAL));\
 } while (0)
 
+struct perf_event_attr;
 struct notifier_block;
 struct perf_event;
 struct pmu;
 
 extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
                                  int *gen_len, int *gen_type);
-extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
-extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
+extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+extern int hw_breakpoint_arch_parse(struct perf_event *bp,
+                                   const struct perf_event_attr *attr,
+                                   struct arch_hw_breakpoint *hw);
 extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
                                           unsigned long val, void *data);
 
index 5965545..82290f2 100644 (file)
@@ -44,8 +44,6 @@ struct prev_kprobe {
 struct kprobe_ctlblk {
        unsigned int kprobe_status;
        struct prev_kprobe prev_kprobe;
-       struct pt_regs jprobe_saved_regs;
-       char jprobes_stack[MAX_STACK_SIZE];
 };
 
 void arch_remove_kprobe(struct kprobe *);
index 1e5b9bb..991c912 100644 (file)
@@ -51,7 +51,6 @@ struct arch_probes_insn {
  * We assume one instruction can consume at most 64 bytes stack, which is
  * 'push {r0-r15}'. Instructions consume more or unknown stack space like
  * 'str r0, [sp, #-80]' and 'str r0, [sp, r1]' should be prohibit to probe.
- * Both kprobe and jprobe use this macro.
  */
 #define MAX_STACK_SIZE                 64
 
index 629e251..1d5fbf1 100644 (file)
@@ -456,14 +456,13 @@ static int get_hbp_len(u8 hbp_len)
 /*
  * Check whether bp virtual address is in kernel space.
  */
-int arch_check_bp_in_kernelspace(struct perf_event *bp)
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
 {
        unsigned int len;
        unsigned long va;
-       struct arch_hw_breakpoint *info = counter_arch_bp(bp);
 
-       va = info->address;
-       len = get_hbp_len(info->ctrl.len);
+       va = hw->address;
+       len = get_hbp_len(hw->ctrl.len);
 
        return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
 }
@@ -518,42 +517,42 @@ int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
 /*
  * Construct an arch_hw_breakpoint from a perf_event.
  */
-static int arch_build_bp_info(struct perf_event *bp)
+static int arch_build_bp_info(struct perf_event *bp,
+                             const struct perf_event_attr *attr,
+                             struct arch_hw_breakpoint *hw)
 {
-       struct arch_hw_breakpoint *info = counter_arch_bp(bp);
-
        /* Type */
-       switch (bp->attr.bp_type) {
+       switch (attr->bp_type) {
        case HW_BREAKPOINT_X:
-               info->ctrl.type = ARM_BREAKPOINT_EXECUTE;
+               hw->ctrl.type = ARM_BREAKPOINT_EXECUTE;
                break;
        case HW_BREAKPOINT_R:
-               info->ctrl.type = ARM_BREAKPOINT_LOAD;
+               hw->ctrl.type = ARM_BREAKPOINT_LOAD;
                break;
        case HW_BREAKPOINT_W:
-               info->ctrl.type = ARM_BREAKPOINT_STORE;
+               hw->ctrl.type = ARM_BREAKPOINT_STORE;
                break;
        case HW_BREAKPOINT_RW:
-               info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
+               hw->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
                break;
        default:
                return -EINVAL;
        }
 
        /* Len */
-       switch (bp->attr.bp_len) {
+       switch (attr->bp_len) {
        case HW_BREAKPOINT_LEN_1:
-               info->ctrl.len = ARM_BREAKPOINT_LEN_1;
+               hw->ctrl.len = ARM_BREAKPOINT_LEN_1;
                break;
        case HW_BREAKPOINT_LEN_2:
-               info->ctrl.len = ARM_BREAKPOINT_LEN_2;
+               hw->ctrl.len = ARM_BREAKPOINT_LEN_2;
                break;
        case HW_BREAKPOINT_LEN_4:
-               info->ctrl.len = ARM_BREAKPOINT_LEN_4;
+               hw->ctrl.len = ARM_BREAKPOINT_LEN_4;
                break;
        case HW_BREAKPOINT_LEN_8:
-               info->ctrl.len = ARM_BREAKPOINT_LEN_8;
-               if ((info->ctrl.type != ARM_BREAKPOINT_EXECUTE)
+               hw->ctrl.len = ARM_BREAKPOINT_LEN_8;
+               if ((hw->ctrl.type != ARM_BREAKPOINT_EXECUTE)
                        && max_watchpoint_len >= 8)
                        break;
        default:
@@ -566,24 +565,24 @@ static int arch_build_bp_info(struct perf_event *bp)
         * by the hardware and must be aligned to the appropriate number of
         * bytes.
         */
-       if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE &&
-           info->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
-           info->ctrl.len != ARM_BREAKPOINT_LEN_4)
+       if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE &&
+           hw->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
+           hw->ctrl.len != ARM_BREAKPOINT_LEN_4)
                return -EINVAL;
 
        /* Address */
-       info->address = bp->attr.bp_addr;
+       hw->address = attr->bp_addr;
 
        /* Privilege */
-       info->ctrl.privilege = ARM_BREAKPOINT_USER;
-       if (arch_check_bp_in_kernelspace(bp))
-               info->ctrl.privilege |= ARM_BREAKPOINT_PRIV;
+       hw->ctrl.privilege = ARM_BREAKPOINT_USER;
+       if (arch_check_bp_in_kernelspace(hw))
+               hw->ctrl.privilege |= ARM_BREAKPOINT_PRIV;
 
        /* Enabled? */
-       info->ctrl.enabled = !bp->attr.disabled;
+       hw->ctrl.enabled = !attr->disabled;
 
        /* Mismatch */
-       info->ctrl.mismatch = 0;
+       hw->ctrl.mismatch = 0;
 
        return 0;
 }
@@ -591,9 +590,10 @@ static int arch_build_bp_info(struct perf_event *bp)
 /*
  * Validate the arch-specific HW Breakpoint register settings.
  */
-int arch_validate_hwbkpt_settings(struct perf_event *bp)
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+                            const struct perf_event_attr *attr,
+                            struct arch_hw_breakpoint *hw)
 {
-       struct arch_hw_breakpoint *info = counter_arch_bp(bp);
        int ret = 0;
        u32 offset, alignment_mask = 0x3;
 
@@ -602,14 +602,14 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
                return -ENODEV;
 
        /* Build the arch_hw_breakpoint. */
-       ret = arch_build_bp_info(bp);
+       ret = arch_build_bp_info(bp, attr, hw);
        if (ret)
                goto out;
 
        /* Check address alignment. */
-       if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
+       if (hw->ctrl.len == ARM_BREAKPOINT_LEN_8)
                alignment_mask = 0x7;
-       offset = info->address & alignment_mask;
+       offset = hw->address & alignment_mask;
        switch (offset) {
        case 0:
                /* Aligned */
@@ -617,19 +617,19 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
        case 1:
        case 2:
                /* Allow halfword watchpoints and breakpoints. */
-               if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
+               if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
                        break;
        case 3:
                /* Allow single byte watchpoint. */
-               if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
+               if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
                        break;
        default:
                ret = -EINVAL;
                goto out;
        }
 
-       info->address &= ~alignment_mask;
-       info->ctrl.len <<= offset;
+       hw->address &= ~alignment_mask;
+       hw->ctrl.len <<= offset;
 
        if (is_default_overflow_handler(bp)) {
                /*
@@ -640,7 +640,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
                        return -EINVAL;
 
                /* We don't allow mismatch breakpoints in kernel space. */
-               if (arch_check_bp_in_kernelspace(bp))
+               if (arch_check_bp_in_kernelspace(hw))
                        return -EPERM;
 
                /*
@@ -655,8 +655,8 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
                 * reports them.
                 */
                if (!debug_exception_updates_fsr() &&
-                   (info->ctrl.type == ARM_BREAKPOINT_LOAD ||
-                    info->ctrl.type == ARM_BREAKPOINT_STORE))
+                   (hw->ctrl.type == ARM_BREAKPOINT_LOAD ||
+                    hw->ctrl.type == ARM_BREAKPOINT_STORE))
                        return -EINVAL;
        }
 
index e90cc8a..f8bd523 100644 (file)
@@ -47,9 +47,6 @@
                           (unsigned long)(addr) +      \
                           (size))
 
-/* Used as a marker in ARM_pc to note when we're in a jprobe. */
-#define JPROBE_MAGIC_ADDR              0xffffffff
-
 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
 
@@ -289,8 +286,8 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
                                break;
                        case KPROBE_REENTER:
                                /* A nested probe was hit in FIQ, it is a BUG */
-                               pr_warn("Unrecoverable kprobe detected at %p.\n",
-                                       p->addr);
+                               pr_warn("Unrecoverable kprobe detected.\n");
+                               dump_kprobe(p);
                                /* fall through */
                        default:
                                /* impossible cases */
@@ -303,10 +300,10 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
 
                        /*
                         * If we have no pre-handler or it returned 0, we
-                        * continue with normal processing.  If we have a
-                        * pre-handler and it returned non-zero, it prepped
-                        * for calling the break_handler below on re-entry,
-                        * so get out doing nothing more here.
+                        * continue with normal processing. If we have a
+                        * pre-handler and it returned non-zero, it will
+                        * modify the execution path and no need to single
+                        * stepping. Let's just reset current kprobe and exit.
                         */
                        if (!p->pre_handler || !p->pre_handler(p, regs)) {
                                kcb->kprobe_status = KPROBE_HIT_SS;
@@ -315,20 +312,9 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
                                        kcb->kprobe_status = KPROBE_HIT_SSDONE;
                                        p->post_handler(p, regs, 0);
                                }
-                               reset_current_kprobe();
-                       }
-               }
-       } else if (cur) {
-               /* We probably hit a jprobe.  Call its break handler. */
-               if (cur->break_handler && cur->break_handler(cur, regs)) {
-                       kcb->kprobe_status = KPROBE_HIT_SS;
-                       singlestep(cur, regs, kcb);
-                       if (cur->post_handler) {
-                               kcb->kprobe_status = KPROBE_HIT_SSDONE;
-                               cur->post_handler(cur, regs, 0);
                        }
+                       reset_current_kprobe();
                }
-               reset_current_kprobe();
        } else {
                /*
                 * The probe was removed and a race is in progress.
@@ -521,117 +507,6 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
        regs->ARM_lr = (unsigned long)&kretprobe_trampoline;
 }
 
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct jprobe *jp = container_of(p, struct jprobe, kp);
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-       long sp_addr = regs->ARM_sp;
-       long cpsr;
-
-       kcb->jprobe_saved_regs = *regs;
-       memcpy(kcb->jprobes_stack, (void *)sp_addr, MIN_STACK_SIZE(sp_addr));
-       regs->ARM_pc = (long)jp->entry;
-
-       cpsr = regs->ARM_cpsr | PSR_I_BIT;
-#ifdef CONFIG_THUMB2_KERNEL
-       /* Set correct Thumb state in cpsr */
-       if (regs->ARM_pc & 1)
-               cpsr |= PSR_T_BIT;
-       else
-               cpsr &= ~PSR_T_BIT;
-#endif
-       regs->ARM_cpsr = cpsr;
-
-       preempt_disable();
-       return 1;
-}
-
-void __kprobes jprobe_return(void)
-{
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
-       __asm__ __volatile__ (
-               /*
-                * Setup an empty pt_regs. Fill SP and PC fields as
-                * they're needed by longjmp_break_handler.
-                *
-                * We allocate some slack between the original SP and start of
-                * our fabricated regs. To be precise we want to have worst case
-                * covered which is STMFD with all 16 regs so we allocate 2 *
-                * sizeof(struct_pt_regs)).
-                *
-                * This is to prevent any simulated instruction from writing
-                * over the regs when they are accessing the stack.
-                */
-#ifdef CONFIG_THUMB2_KERNEL
-               "sub    r0, %0, %1              \n\t"
-               "mov    sp, r0                  \n\t"
-#else
-               "sub    sp, %0, %1              \n\t"
-#endif
-               "ldr    r0, ="__stringify(JPROBE_MAGIC_ADDR)"\n\t"
-               "str    %0, [sp, %2]            \n\t"
-               "str    r0, [sp, %3]            \n\t"
-               "mov    r0, sp                  \n\t"
-               "bl     kprobe_handler          \n\t"
-
-               /*
-                * Return to the context saved by setjmp_pre_handler
-                * and restored by longjmp_break_handler.
-                */
-#ifdef CONFIG_THUMB2_KERNEL
-               "ldr    lr, [sp, %2]            \n\t" /* lr = saved sp */
-               "ldrd   r0, r1, [sp, %5]        \n\t" /* r0,r1 = saved lr,pc */
-               "ldr    r2, [sp, %4]            \n\t" /* r2 = saved psr */
-               "stmdb  lr!, {r0, r1, r2}       \n\t" /* push saved lr and */
-                                                     /* rfe context */
-               "ldmia  sp, {r0 - r12}          \n\t"
-               "mov    sp, lr                  \n\t"
-               "ldr    lr, [sp], #4            \n\t"
-               "rfeia  sp!                     \n\t"
-#else
-               "ldr    r0, [sp, %4]            \n\t"
-               "msr    cpsr_cxsf, r0           \n\t"
-               "ldmia  sp, {r0 - pc}           \n\t"
-#endif
-               :
-               : "r" (kcb->jprobe_saved_regs.ARM_sp),
-                 "I" (sizeof(struct pt_regs) * 2),
-                 "J" (offsetof(struct pt_regs, ARM_sp)),
-                 "J" (offsetof(struct pt_regs, ARM_pc)),
-                 "J" (offsetof(struct pt_regs, ARM_cpsr)),
-                 "J" (offsetof(struct pt_regs, ARM_lr))
-               : "memory", "cc");
-}
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-       long stack_addr = kcb->jprobe_saved_regs.ARM_sp;
-       long orig_sp = regs->ARM_sp;
-       struct jprobe *jp = container_of(p, struct jprobe, kp);
-
-       if (regs->ARM_pc == JPROBE_MAGIC_ADDR) {
-               if (orig_sp != stack_addr) {
-                       struct pt_regs *saved_regs =
-                               (struct pt_regs *)kcb->jprobe_saved_regs.ARM_sp;
-                       printk("current sp %lx does not match saved sp %lx\n",
-                              orig_sp, stack_addr);
-                       printk("Saved registers for jprobe %p\n", jp);
-                       show_regs(saved_regs);
-                       printk("Current registers\n");
-                       show_regs(regs);
-                       BUG();
-               }
-               *regs = kcb->jprobe_saved_regs;
-               memcpy((void *)stack_addr, kcb->jprobes_stack,
-                      MIN_STACK_SIZE(stack_addr));
-               preempt_enable_no_resched();
-               return 1;
-       }
-       return 0;
-}
-
 int __kprobes arch_trampoline_kprobe(struct kprobe *p)
 {
        return 0;
index 14db141..cc237fa 100644 (file)
@@ -1461,7 +1461,6 @@ fail:
        print_registers(&result_regs);
 
        if (mem) {
-               pr_err("current_stack=%p\n", current_stack);
                pr_err("expected_memory:\n");
                print_memory(expected_memory, mem_size);
                pr_err("result_memory:\n");
index 4177076..6a53e59 100644 (file)
@@ -119,13 +119,16 @@ static inline void decode_ctrl_reg(u32 reg,
 
 struct task_struct;
 struct notifier_block;
+struct perf_event_attr;
 struct perf_event;
 struct pmu;
 
 extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
                                  int *gen_len, int *gen_type, int *offset);
-extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
-extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
+extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+extern int hw_breakpoint_arch_parse(struct perf_event *bp,
+                                   const struct perf_event_attr *attr,
+                                   struct arch_hw_breakpoint *hw);
 extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
                                           unsigned long val, void *data);
 
index 6deb8d7..d5a44cf 100644 (file)
@@ -48,7 +48,6 @@ struct kprobe_ctlblk {
        unsigned long saved_irqflag;
        struct prev_kprobe prev_kprobe;
        struct kprobe_step_ctx ss_ctx;
-       struct pt_regs jprobe_saved_regs;
 };
 
 void arch_remove_kprobe(struct kprobe *);
index 413dbe5..8c96443 100644 (file)
@@ -343,14 +343,13 @@ static int get_hbp_len(u8 hbp_len)
 /*
  * Check whether bp virtual address is in kernel space.
  */
-int arch_check_bp_in_kernelspace(struct perf_event *bp)
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
 {
        unsigned int len;
        unsigned long va;
-       struct arch_hw_breakpoint *info = counter_arch_bp(bp);
 
-       va = info->address;
-       len = get_hbp_len(info->ctrl.len);
+       va = hw->address;
+       len = get_hbp_len(hw->ctrl.len);
 
        return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
 }
@@ -421,53 +420,53 @@ int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
 /*
  * Construct an arch_hw_breakpoint from a perf_event.
  */
-static int arch_build_bp_info(struct perf_event *bp)
+static int arch_build_bp_info(struct perf_event *bp,
+                             const struct perf_event_attr *attr,
+                             struct arch_hw_breakpoint *hw)
 {
-       struct arch_hw_breakpoint *info = counter_arch_bp(bp);
-
        /* Type */
-       switch (bp->attr.bp_type) {
+       switch (attr->bp_type) {
        case HW_BREAKPOINT_X:
-               info->ctrl.type = ARM_BREAKPOINT_EXECUTE;
+               hw->ctrl.type = ARM_BREAKPOINT_EXECUTE;
                break;
        case HW_BREAKPOINT_R:
-               info->ctrl.type = ARM_BREAKPOINT_LOAD;
+               hw->ctrl.type = ARM_BREAKPOINT_LOAD;
                break;
        case HW_BREAKPOINT_W:
-               info->ctrl.type = ARM_BREAKPOINT_STORE;
+               hw->ctrl.type = ARM_BREAKPOINT_STORE;
                break;
        case HW_BREAKPOINT_RW:
-               info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
+               hw->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
                break;
        default:
                return -EINVAL;
        }
 
        /* Len */
-       switch (bp->attr.bp_len) {
+       switch (attr->bp_len) {
        case HW_BREAKPOINT_LEN_1:
-               info->ctrl.len = ARM_BREAKPOINT_LEN_1;
+               hw->ctrl.len = ARM_BREAKPOINT_LEN_1;
                break;
        case HW_BREAKPOINT_LEN_2:
-               info->ctrl.len = ARM_BREAKPOINT_LEN_2;
+               hw->ctrl.len = ARM_BREAKPOINT_LEN_2;
                break;
        case HW_BREAKPOINT_LEN_3:
-               info->ctrl.len = ARM_BREAKPOINT_LEN_3;
+               hw->ctrl.len = ARM_BREAKPOINT_LEN_3;
                break;
        case HW_BREAKPOINT_LEN_4:
-               info->ctrl.len = ARM_BREAKPOINT_LEN_4;
+               hw->ctrl.len = ARM_BREAKPOINT_LEN_4;
                break;
        case HW_BREAKPOINT_LEN_5:
-               info->ctrl.len = ARM_BREAKPOINT_LEN_5;
+               hw->ctrl.len = ARM_BREAKPOINT_LEN_5;
                break;
        case HW_BREAKPOINT_LEN_6:
-               info->ctrl.len = ARM_BREAKPOINT_LEN_6;
+               hw->ctrl.len = ARM_BREAKPOINT_LEN_6;
                break;
        case HW_BREAKPOINT_LEN_7:
-               info->ctrl.len = ARM_BREAKPOINT_LEN_7;
+               hw->ctrl.len = ARM_BREAKPOINT_LEN_7;
                break;
        case HW_BREAKPOINT_LEN_8:
-               info->ctrl.len = ARM_BREAKPOINT_LEN_8;
+               hw->ctrl.len = ARM_BREAKPOINT_LEN_8;
                break;
        default:
                return -EINVAL;
@@ -478,37 +477,37 @@ static int arch_build_bp_info(struct perf_event *bp)
         * AArch32 also requires breakpoints of length 2 for Thumb.
         * Watchpoints can be of length 1, 2, 4 or 8 bytes.
         */
-       if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
+       if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
                if (is_compat_bp(bp)) {
-                       if (info->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
-                           info->ctrl.len != ARM_BREAKPOINT_LEN_4)
+                       if (hw->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
+                           hw->ctrl.len != ARM_BREAKPOINT_LEN_4)
                                return -EINVAL;
-               } else if (info->ctrl.len != ARM_BREAKPOINT_LEN_4) {
+               } else if (hw->ctrl.len != ARM_BREAKPOINT_LEN_4) {
                        /*
                         * FIXME: Some tools (I'm looking at you perf) assume
                         *        that breakpoints should be sizeof(long). This
                         *        is nonsense. For now, we fix up the parameter
                         *        but we should probably return -EINVAL instead.
                         */
-                       info->ctrl.len = ARM_BREAKPOINT_LEN_4;
+                       hw->ctrl.len = ARM_BREAKPOINT_LEN_4;
                }
        }
 
        /* Address */
-       info->address = bp->attr.bp_addr;
+       hw->address = attr->bp_addr;
 
        /*
         * Privilege
         * Note that we disallow combined EL0/EL1 breakpoints because
         * that would complicate the stepping code.
         */
-       if (arch_check_bp_in_kernelspace(bp))
-               info->ctrl.privilege = AARCH64_BREAKPOINT_EL1;
+       if (arch_check_bp_in_kernelspace(hw))
+               hw->ctrl.privilege = AARCH64_BREAKPOINT_EL1;
        else
-               info->ctrl.privilege = AARCH64_BREAKPOINT_EL0;
+               hw->ctrl.privilege = AARCH64_BREAKPOINT_EL0;
 
        /* Enabled? */
-       info->ctrl.enabled = !bp->attr.disabled;
+       hw->ctrl.enabled = !attr->disabled;
 
        return 0;
 }
@@ -516,14 +515,15 @@ static int arch_build_bp_info(struct perf_event *bp)
 /*
  * Validate the arch-specific HW Breakpoint register settings.
  */
-int arch_validate_hwbkpt_settings(struct perf_event *bp)
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+                            const struct perf_event_attr *attr,
+                            struct arch_hw_breakpoint *hw)
 {
-       struct arch_hw_breakpoint *info = counter_arch_bp(bp);
        int ret;
        u64 alignment_mask, offset;
 
        /* Build the arch_hw_breakpoint. */
-       ret = arch_build_bp_info(bp);
+       ret = arch_build_bp_info(bp, attr, hw);
        if (ret)
                return ret;
 
@@ -537,42 +537,42 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
         * that here.
         */
        if (is_compat_bp(bp)) {
-               if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
+               if (hw->ctrl.len == ARM_BREAKPOINT_LEN_8)
                        alignment_mask = 0x7;
                else
                        alignment_mask = 0x3;
-               offset = info->address & alignment_mask;
+               offset = hw->address & alignment_mask;
                switch (offset) {
                case 0:
                        /* Aligned */
                        break;
                case 1:
                        /* Allow single byte watchpoint. */
-                       if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
+                       if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
                                break;
                case 2:
                        /* Allow halfword watchpoints and breakpoints. */
-                       if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
+                       if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
                                break;
                default:
                        return -EINVAL;
                }
        } else {
-               if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE)
+               if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE)
                        alignment_mask = 0x3;
                else
                        alignment_mask = 0x7;
-               offset = info->address & alignment_mask;
+               offset = hw->address & alignment_mask;
        }
 
-       info->address &= ~alignment_mask;
-       info->ctrl.len <<= offset;
+       hw->address &= ~alignment_mask;
+       hw->ctrl.len <<= offset;
 
        /*
         * Disallow per-task kernel breakpoints since these would
         * complicate the stepping code.
         */
-       if (info->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target)
+       if (hw->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target)
                return -EINVAL;
 
        return 0;
index d849d98..e78c3ef 100644 (file)
@@ -275,7 +275,7 @@ static int __kprobes reenter_kprobe(struct kprobe *p,
                break;
        case KPROBE_HIT_SS:
        case KPROBE_REENTER:
-               pr_warn("Unrecoverable kprobe detected at %p.\n", p->addr);
+               pr_warn("Unrecoverable kprobe detected.\n");
                dump_kprobe(p);
                BUG();
                break;
@@ -395,9 +395,9 @@ static void __kprobes kprobe_handler(struct pt_regs *regs)
                        /*
                         * If we have no pre-handler or it returned 0, we
                         * continue with normal processing.  If we have a
-                        * pre-handler and it returned non-zero, it prepped
-                        * for calling the break_handler below on re-entry,
-                        * so get out doing nothing more here.
+                        * pre-handler and it returned non-zero, it will
+                        * modify the execution path and no need to single
+                        * stepping. Let's just reset current kprobe and exit.
                         *
                         * pre_handler can hit a breakpoint and can step thru
                         * before return, keep PSTATE D-flag enabled until
@@ -405,16 +405,8 @@ static void __kprobes kprobe_handler(struct pt_regs *regs)
                         */
                        if (!p->pre_handler || !p->pre_handler(p, regs)) {
                                setup_singlestep(p, regs, kcb, 0);
-                               return;
-                       }
-               }
-       } else if ((le32_to_cpu(*(kprobe_opcode_t *) addr) ==
-           BRK64_OPCODE_KPROBES) && cur_kprobe) {
-               /* We probably hit a jprobe.  Call its break handler. */
-               if (cur_kprobe->break_handler  &&
-                    cur_kprobe->break_handler(cur_kprobe, regs)) {
-                       setup_singlestep(cur_kprobe, regs, kcb, 0);
-                       return;
+                       } else
+                               reset_current_kprobe();
                }
        }
        /*
@@ -465,74 +457,6 @@ kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr)
        return DBG_HOOK_HANDLED;
 }
 
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct jprobe *jp = container_of(p, struct jprobe, kp);
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
-       kcb->jprobe_saved_regs = *regs;
-       /*
-        * Since we can't be sure where in the stack frame "stacked"
-        * pass-by-value arguments are stored we just don't try to
-        * duplicate any of the stack. Do not use jprobes on functions that
-        * use more than 64 bytes (after padding each to an 8 byte boundary)
-        * of arguments, or pass individual arguments larger than 16 bytes.
-        */
-
-       instruction_pointer_set(regs, (unsigned long) jp->entry);
-       preempt_disable();
-       pause_graph_tracing();
-       return 1;
-}
-
-void __kprobes jprobe_return(void)
-{
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
-       /*
-        * Jprobe handler return by entering break exception,
-        * encoded same as kprobe, but with following conditions
-        * -a special PC to identify it from the other kprobes.
-        * -restore stack addr to original saved pt_regs
-        */
-       asm volatile("                          mov sp, %0      \n"
-                    "jprobe_return_break:      brk %1          \n"
-                    :
-                    : "r" (kcb->jprobe_saved_regs.sp),
-                      "I" (BRK64_ESR_KPROBES)
-                    : "memory");
-
-       unreachable();
-}
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-       long stack_addr = kcb->jprobe_saved_regs.sp;
-       long orig_sp = kernel_stack_pointer(regs);
-       struct jprobe *jp = container_of(p, struct jprobe, kp);
-       extern const char jprobe_return_break[];
-
-       if (instruction_pointer(regs) != (u64) jprobe_return_break)
-               return 0;
-
-       if (orig_sp != stack_addr) {
-               struct pt_regs *saved_regs =
-                   (struct pt_regs *)kcb->jprobe_saved_regs.sp;
-               pr_err("current sp %lx does not match saved sp %lx\n",
-                      orig_sp, stack_addr);
-               pr_err("Saved registers for jprobe %p\n", jp);
-               __show_regs(saved_regs);
-               pr_err("Current registers\n");
-               __show_regs(regs);
-               BUG();
-       }
-       unpause_graph_tracing();
-       *regs = kcb->jprobe_saved_regs;
-       preempt_enable_no_resched();
-       return 1;
-}
-
 bool arch_within_kprobe_blacklist(unsigned long addr)
 {
        if ((addr >= (unsigned long)__kprobes_text_start &&
index 0302b36..580356a 100644 (file)
@@ -82,8 +82,6 @@ struct prev_kprobe {
 #define ARCH_PREV_KPROBE_SZ 2
 struct kprobe_ctlblk {
        unsigned long kprobe_status;
-       struct pt_regs jprobe_saved_regs;
-       unsigned long jprobes_saved_stacked_regs[MAX_PARAM_RSE_SIZE];
        unsigned long *bsp;
        unsigned long cfm;
        atomic_t prev_kprobe_index;
index 5d742bc..4ca110f 100644 (file)
@@ -14,7 +14,6 @@
  */
 #define __IA64_BREAK_KDB               0x80100
 #define __IA64_BREAK_KPROBE            0x81000 /* .. 0x81fff */
-#define __IA64_BREAK_JPROBE            0x82000
 
 /*
  * OS-specific break numbers:
index 498f3da..d0c0ccd 100644 (file)
@@ -25,7 +25,7 @@ obj-$(CONFIG_NUMA)            += numa.o
 obj-$(CONFIG_PERFMON)          += perfmon_default_smpl.o
 obj-$(CONFIG_IA64_CYCLONE)     += cyclone.o
 obj-$(CONFIG_IA64_MCA_RECOVERY)        += mca_recovery.o
-obj-$(CONFIG_KPROBES)          += kprobes.o jprobes.o
+obj-$(CONFIG_KPROBES)          += kprobes.o
 obj-$(CONFIG_DYNAMIC_FTRACE)   += ftrace.o
 obj-$(CONFIG_KEXEC)            += machine_kexec.o relocate_kernel.o crash.o
 obj-$(CONFIG_CRASH_DUMP)       += crash_dump.o
diff --git a/arch/ia64/kernel/jprobes.S b/arch/ia64/kernel/jprobes.S
deleted file mode 100644 (file)
index f69389c..0000000
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Jprobe specific operations
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (C) Intel Corporation, 2005
- *
- * 2005-May     Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy
- *              <anil.s.keshavamurthy@intel.com> initial implementation
- *
- * Jprobes (a.k.a. "jump probes" which is built on-top of kprobes) allow a
- * probe to be inserted into the beginning of a function call.  The fundamental
- * difference between a jprobe and a kprobe is the jprobe handler is executed
- * in the same context as the target function, while the kprobe handlers
- * are executed in interrupt context.
- *
- * For jprobes we initially gain control by placing a break point in the
- * first instruction of the targeted function.  When we catch that specific
- * break, we:
- *        * set the return address to our jprobe_inst_return() function
- *        * jump to the jprobe handler function
- *
- * Since we fixed up the return address, the jprobe handler will return to our
- * jprobe_inst_return() function, giving us control again.  At this point we
- * are back in the parents frame marker, so we do yet another call to our
- * jprobe_break() function to fix up the frame marker as it would normally
- * exist in the target function.
- *
- * Our jprobe_return function then transfers control back to kprobes.c by
- * executing a break instruction using one of our reserved numbers.  When we
- * catch that break in kprobes.c, we continue like we do for a normal kprobe
- * by single stepping the emulated instruction, and then returning execution
- * to the correct location.
- */
-#include <asm/asmmacro.h>
-#include <asm/break.h>
-
-       /*
-        * void jprobe_break(void)
-        */
-       .section .kprobes.text, "ax"
-ENTRY(jprobe_break)
-       break.m __IA64_BREAK_JPROBE
-END(jprobe_break)
-
-       /*
-        * void jprobe_inst_return(void)
-        */
-GLOBAL_ENTRY(jprobe_inst_return)
-       br.call.sptk.many b0=jprobe_break
-END(jprobe_inst_return)
-
-GLOBAL_ENTRY(invalidate_stacked_regs)
-       movl r16=invalidate_restore_cfm
-       ;;
-       mov b6=r16
-       ;;
-       br.ret.sptk.many b6
-       ;;
-invalidate_restore_cfm:
-       mov r16=ar.rsc
-       ;;
-       mov ar.rsc=r0
-       ;;
-       loadrs
-       ;;
-       mov ar.rsc=r16
-       ;;
-       br.cond.sptk.many rp
-END(invalidate_stacked_regs)
-
-GLOBAL_ENTRY(flush_register_stack)
-       // flush dirty regs to backing store (must be first in insn group)
-       flushrs
-       ;;
-       br.ret.sptk.many rp
-END(flush_register_stack)
-
index f5f3a5e..aa41bd5 100644 (file)
@@ -35,8 +35,6 @@
 #include <asm/sections.h>
 #include <asm/exception.h>
 
-extern void jprobe_inst_return(void);
-
 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
 
@@ -480,12 +478,9 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
                         */
                        break;
        }
-
        kretprobe_assert(ri, orig_ret_address, trampoline_address);
 
-       reset_current_kprobe();
        kretprobe_hash_unlock(current, &flags);
-       preempt_enable_no_resched();
 
        hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
                hlist_del(&ri->hlist);
@@ -819,14 +814,6 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
                        prepare_ss(p, regs);
                        kcb->kprobe_status = KPROBE_REENTER;
                        return 1;
-               } else if (args->err == __IA64_BREAK_JPROBE) {
-                       /*
-                        * jprobe instrumented function just completed
-                        */
-                       p = __this_cpu_read(current_kprobe);
-                       if (p->break_handler && p->break_handler(p, regs)) {
-                               goto ss_probe;
-                       }
                } else if (!is_ia64_break_inst(regs)) {
                        /* The breakpoint instruction was removed by
                         * another cpu right after we hit, no further
@@ -861,15 +848,12 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
        set_current_kprobe(p, kcb);
        kcb->kprobe_status = KPROBE_HIT_ACTIVE;
 
-       if (p->pre_handler && p->pre_handler(p, regs))
-               /*
-                * Our pre-handler is specifically requesting that we just
-                * do a return.  This is used for both the jprobe pre-handler
-                * and the kretprobe trampoline
-                */
+       if (p->pre_handler && p->pre_handler(p, regs)) {
+               reset_current_kprobe();
+               preempt_enable_no_resched();
                return 1;
+       }
 
-ss_probe:
 #if !defined(CONFIG_PREEMPT)
        if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) {
                /* Boost up -- we can execute copied instructions directly */
@@ -992,7 +976,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
        case DIE_BREAK:
                /* err is break number from ia64_bad_break() */
                if ((args->err >> 12) == (__IA64_BREAK_KPROBE >> 12)
-                       || args->err == __IA64_BREAK_JPROBE
                        || args->err == 0)
                        if (pre_kprobes_handler(args))
                                ret = NOTIFY_STOP;
@@ -1040,74 +1023,6 @@ unsigned long arch_deref_entry_point(void *entry)
        return ((struct fnptr *)entry)->ip;
 }
 
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct jprobe *jp = container_of(p, struct jprobe, kp);
-       unsigned long addr = arch_deref_entry_point(jp->entry);
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-       struct param_bsp_cfm pa;
-       int bytes;
-
-       /*
-        * Callee owns the argument space and could overwrite it, eg
-        * tail call optimization. So to be absolutely safe
-        * we save the argument space before transferring the control
-        * to instrumented jprobe function which runs in
-        * the process context
-        */
-       pa.ip = regs->cr_iip;
-       unw_init_running(ia64_get_bsp_cfm, &pa);
-       bytes = (char *)ia64_rse_skip_regs(pa.bsp, pa.cfm & 0x3f)
-                               - (char *)pa.bsp;
-       memcpy( kcb->jprobes_saved_stacked_regs,
-               pa.bsp,
-               bytes );
-       kcb->bsp = pa.bsp;
-       kcb->cfm = pa.cfm;
-
-       /* save architectural state */
-       kcb->jprobe_saved_regs = *regs;
-
-       /* after rfi, execute the jprobe instrumented function */
-       regs->cr_iip = addr & ~0xFULL;
-       ia64_psr(regs)->ri = addr & 0xf;
-       regs->r1 = ((struct fnptr *)(jp->entry))->gp;
-
-       /*
-        * fix the return address to our jprobe_inst_return() function
-        * in the jprobes.S file
-        */
-       regs->b0 = ((struct fnptr *)(jprobe_inst_return))->ip;
-
-       return 1;
-}
-
-/* ia64 does not need this */
-void __kprobes jprobe_return(void)
-{
-}
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-       int bytes;
-
-       /* restoring architectural state */
-       *regs = kcb->jprobe_saved_regs;
-
-       /* restoring the original argument space */
-       flush_register_stack();
-       bytes = (char *)ia64_rse_skip_regs(kcb->bsp, kcb->cfm & 0x3f)
-                               - (char *)kcb->bsp;
-       memcpy( kcb->bsp,
-               kcb->jprobes_saved_stacked_regs,
-               bytes );
-       invalidate_stacked_regs();
-
-       preempt_enable_no_resched();
-       return 1;
-}
-
 static struct kprobe trampoline_p = {
        .pre_handler = trampoline_probe_handler
 };
index ad1a999..a72dfbf 100644 (file)
@@ -68,16 +68,6 @@ struct prev_kprobe {
        unsigned long saved_epc;
 };
 
-#define MAX_JPROBES_STACK_SIZE 128
-#define MAX_JPROBES_STACK_ADDR \
-       (((unsigned long)current_thread_info()) + THREAD_SIZE - 32 - sizeof(struct pt_regs))
-
-#define MIN_JPROBES_STACK_SIZE(ADDR)                                   \
-       ((((ADDR) + MAX_JPROBES_STACK_SIZE) > MAX_JPROBES_STACK_ADDR)   \
-               ? MAX_JPROBES_STACK_ADDR - (ADDR)                       \
-               : MAX_JPROBES_STACK_SIZE)
-
-
 #define SKIP_DELAYSLOT 0x0001
 
 /* per-cpu kprobe control block */
@@ -86,12 +76,9 @@ struct kprobe_ctlblk {
        unsigned long kprobe_old_SR;
        unsigned long kprobe_saved_SR;
        unsigned long kprobe_saved_epc;
-       unsigned long jprobe_saved_sp;
-       struct pt_regs jprobe_saved_regs;
        /* Per-thread fields, used while emulating branches */
        unsigned long flags;
        unsigned long target_epc;
-       u8 jprobes_stack[MAX_JPROBES_STACK_SIZE];
        struct prev_kprobe prev_kprobe;
 };
 
index f5c8bce..54cd675 100644 (file)
@@ -326,19 +326,13 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
                                preempt_enable_no_resched();
                        }
                        return 1;
-               } else {
-                       if (addr->word != breakpoint_insn.word) {
-                               /*
-                                * The breakpoint instruction was removed by
-                                * another cpu right after we hit, no further
-                                * handling of this interrupt is appropriate
-                                */
-                               ret = 1;
-                               goto no_kprobe;
-                       }
-                       p = __this_cpu_read(current_kprobe);
-                       if (p->break_handler && p->break_handler(p, regs))
-                               goto ss_probe;
+               } else if (addr->word != breakpoint_insn.word) {
+                       /*
+                        * The breakpoint instruction was removed by
+                        * another cpu right after we hit, no further
+                        * handling of this interrupt is appropriate
+                        */
+                       ret = 1;
                }
                goto no_kprobe;
        }
@@ -364,10 +358,11 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
 
        if (p->pre_handler && p->pre_handler(p, regs)) {
                /* handler has already set things up, so skip ss setup */
+               reset_current_kprobe();
+               preempt_enable_no_resched();
                return 1;
        }
 
-ss_probe:
        prepare_singlestep(p, regs, kcb);
        if (kcb->flags & SKIP_DELAYSLOT) {
                kcb->kprobe_status = KPROBE_HIT_SSDONE;
@@ -468,51 +463,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
        return ret;
 }
 
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct jprobe *jp = container_of(p, struct jprobe, kp);
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
-       kcb->jprobe_saved_regs = *regs;
-       kcb->jprobe_saved_sp = regs->regs[29];
-
-       memcpy(kcb->jprobes_stack, (void *)kcb->jprobe_saved_sp,
-              MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
-
-       regs->cp0_epc = (unsigned long)(jp->entry);
-
-       return 1;
-}
-
-/* Defined in the inline asm below. */
-void jprobe_return_end(void);
-
-void __kprobes jprobe_return(void)
-{
-       /* Assembler quirk necessitates this '0,code' business.  */
-       asm volatile(
-               "break 0,%0\n\t"
-               ".globl jprobe_return_end\n"
-               "jprobe_return_end:\n"
-               : : "n" (BRK_KPROBE_BP) : "memory");
-}
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
-       if (regs->cp0_epc >= (unsigned long)jprobe_return &&
-           regs->cp0_epc <= (unsigned long)jprobe_return_end) {
-               *regs = kcb->jprobe_saved_regs;
-               memcpy((void *)kcb->jprobe_saved_sp, kcb->jprobes_stack,
-                      MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
-               preempt_enable_no_resched();
-
-               return 1;
-       }
-       return 0;
-}
-
 /*
  * Function return probe trampoline:
  *     - init_kprobes() establishes a probepoint here
@@ -595,9 +545,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
        kretprobe_assert(ri, orig_ret_address, trampoline_address);
        instruction_pointer(regs) = orig_ret_address;
 
-       reset_current_kprobe();
        kretprobe_hash_unlock(current, &flags);
-       preempt_enable_no_resched();
 
        hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
                hlist_del(&ri->hlist);
index 8e7b097..27d6e3c 100644 (file)
@@ -52,6 +52,7 @@ struct arch_hw_breakpoint {
 #include <asm/reg.h>
 #include <asm/debug.h>
 
+struct perf_event_attr;
 struct perf_event;
 struct pmu;
 struct perf_sample_data;
@@ -60,8 +61,10 @@ struct perf_sample_data;
 
 extern int hw_breakpoint_slots(int type);
 extern int arch_bp_generic_fields(int type, int *gen_bp_type);
-extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
-extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
+extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+extern int hw_breakpoint_arch_parse(struct perf_event *bp,
+                                   const struct perf_event_attr *attr,
+                                   struct arch_hw_breakpoint *hw);
 extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
                                                unsigned long val, void *data);
 int arch_install_hw_breakpoint(struct perf_event *bp);
index 9f3be5c..785c464 100644 (file)
@@ -88,7 +88,6 @@ struct prev_kprobe {
 struct kprobe_ctlblk {
        unsigned long kprobe_status;
        unsigned long kprobe_saved_msr;
-       struct pt_regs jprobe_saved_regs;
        struct prev_kprobe prev_kprobe;
 };
 
@@ -103,17 +102,6 @@ extern int kprobe_exceptions_notify(struct notifier_block *self,
 extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
 extern int kprobe_handler(struct pt_regs *regs);
 extern int kprobe_post_handler(struct pt_regs *regs);
-#ifdef CONFIG_KPROBES_ON_FTRACE
-extern int __is_active_jprobe(unsigned long addr);
-extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
-                          struct kprobe_ctlblk *kcb);
-#else
-static inline int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
-                                 struct kprobe_ctlblk *kcb)
-{
-       return 0;
-}
-#endif
 #else
 static inline int kprobe_handler(struct pt_regs *regs) { return 0; }
 static inline int kprobe_post_handler(struct pt_regs *regs) { return 0; }
index 80547da..fec8a67 100644 (file)
@@ -119,11 +119,9 @@ void arch_unregister_hw_breakpoint(struct perf_event *bp)
 /*
  * Check for virtual address in kernel space.
  */
-int arch_check_bp_in_kernelspace(struct perf_event *bp)
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
 {
-       struct arch_hw_breakpoint *info = counter_arch_bp(bp);
-
-       return is_kernel_addr(info->address);
+       return is_kernel_addr(hw->address);
 }
 
 int arch_bp_generic_fields(int type, int *gen_bp_type)
@@ -141,30 +139,31 @@ int arch_bp_generic_fields(int type, int *gen_bp_type)
 /*
  * Validate the arch-specific HW Breakpoint register settings
  */
-int arch_validate_hwbkpt_settings(struct perf_event *bp)
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+                            const struct perf_event_attr *attr,
+                            struct arch_hw_breakpoint *hw)
 {
        int ret = -EINVAL, length_max;
-       struct arch_hw_breakpoint *info = counter_arch_bp(bp);
 
        if (!bp)
                return ret;
 
-       info->type = HW_BRK_TYPE_TRANSLATE;
-       if (bp->attr.bp_type & HW_BREAKPOINT_R)
-               info->type |= HW_BRK_TYPE_READ;
-       if (bp->attr.bp_type & HW_BREAKPOINT_W)
-               info->type |= HW_BRK_TYPE_WRITE;
-       if (info->type == HW_BRK_TYPE_TRANSLATE)
+       hw->type = HW_BRK_TYPE_TRANSLATE;
+       if (attr->bp_type & HW_BREAKPOINT_R)
+               hw->type |= HW_BRK_TYPE_READ;
+       if (attr->bp_type & HW_BREAKPOINT_W)
+               hw->type |= HW_BRK_TYPE_WRITE;
+       if (hw->type == HW_BRK_TYPE_TRANSLATE)
                /* must set alteast read or write */
                return ret;
-       if (!(bp->attr.exclude_user))
-               info->type |= HW_BRK_TYPE_USER;
-       if (!(bp->attr.exclude_kernel))
-               info->type |= HW_BRK_TYPE_KERNEL;
-       if (!(bp->attr.exclude_hv))
-               info->type |= HW_BRK_TYPE_HYP;
-       info->address = bp->attr.bp_addr;
-       info->len = bp->attr.bp_len;
+       if (!attr->exclude_user)
+               hw->type |= HW_BRK_TYPE_USER;
+       if (!attr->exclude_kernel)
+               hw->type |= HW_BRK_TYPE_KERNEL;
+       if (!attr->exclude_hv)
+               hw->type |= HW_BRK_TYPE_HYP;
+       hw->address = attr->bp_addr;
+       hw->len = attr->bp_len;
 
        /*
         * Since breakpoint length can be a maximum of HW_BREAKPOINT_LEN(8)
@@ -178,12 +177,12 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
        if (cpu_has_feature(CPU_FTR_DAWR)) {
                length_max = 512 ; /* 64 doublewords */
                /* DAWR region can't cross 512 boundary */
-               if ((bp->attr.bp_addr >> 9) !=
-                   ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 9))
+               if ((attr->bp_addr >> 9) !=
+                   ((attr->bp_addr + attr->bp_len - 1) >> 9))
                        return -EINVAL;
        }
-       if (info->len >
-           (length_max - (info->address & HW_BREAKPOINT_ALIGN)))
+       if (hw->len >
+           (length_max - (hw->address & HW_BREAKPOINT_ALIGN)))
                return -EINVAL;
        return 0;
 }
index 7a1f99f..e4a49c0 100644 (file)
 #include <linux/preempt.h>
 #include <linux/ftrace.h>
 
-/*
- * This is called from ftrace code after invoking registered handlers to
- * disambiguate regs->nip changes done by jprobes and livepatch. We check if
- * there is an active jprobe at the provided address (mcount location).
- */
-int __is_active_jprobe(unsigned long addr)
-{
-       if (!preemptible()) {
-               struct kprobe *p = raw_cpu_read(current_kprobe);
-               return (p && (unsigned long)p->addr == addr) ? 1 : 0;
-       }
-
-       return 0;
-}
-
-static nokprobe_inline
-int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
-                     struct kprobe_ctlblk *kcb, unsigned long orig_nip)
-{
-       /*
-        * Emulate singlestep (and also recover regs->nip)
-        * as if there is a nop
-        */
-       regs->nip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
-       if (unlikely(p->post_handler)) {
-               kcb->kprobe_status = KPROBE_HIT_SSDONE;
-               p->post_handler(p, regs, 0);
-       }
-       __this_cpu_write(current_kprobe, NULL);
-       if (orig_nip)
-               regs->nip = orig_nip;
-       return 1;
-}
-
-int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
-                   struct kprobe_ctlblk *kcb)
-{
-       if (kprobe_ftrace(p))
-               return __skip_singlestep(p, regs, kcb, 0);
-       else
-               return 0;
-}
-NOKPROBE_SYMBOL(skip_singlestep);
-
 /* Ftrace callback handler for kprobes */
 void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
                           struct ftrace_ops *ops, struct pt_regs *regs)
@@ -76,18 +32,14 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
        struct kprobe *p;
        struct kprobe_ctlblk *kcb;
 
-       preempt_disable();
-
        p = get_kprobe((kprobe_opcode_t *)nip);
        if (unlikely(!p) || kprobe_disabled(p))
-               goto end;
+               return;
 
        kcb = get_kprobe_ctlblk();
        if (kprobe_running()) {
                kprobes_inc_nmissed_count(p);
        } else {
-               unsigned long orig_nip = regs->nip;
-
                /*
                 * On powerpc, NIP is *before* this instruction for the
                 * pre handler
@@ -96,19 +48,23 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
 
                __this_cpu_write(current_kprobe, p);
                kcb->kprobe_status = KPROBE_HIT_ACTIVE;
-               if (!p->pre_handler || !p->pre_handler(p, regs))
-                       __skip_singlestep(p, regs, kcb, orig_nip);
-               else {
+               if (!p->pre_handler || !p->pre_handler(p, regs)) {
                        /*
-                        * If pre_handler returns !0, it sets regs->nip and
-                        * resets current kprobe. In this case, we should not
-                        * re-enable preemption.
+                        * Emulate singlestep (and also recover regs->nip)
+                        * as if there is a nop
                         */
-                       return;
+                       regs->nip += MCOUNT_INSN_SIZE;
+                       if (unlikely(p->post_handler)) {
+                               kcb->kprobe_status = KPROBE_HIT_SSDONE;
+                               p->post_handler(p, regs, 0);
+                       }
                }
+               /*
+                * If pre_handler returns !0, it changes regs->nip. We have to
+                * skip emulating post_handler.
+                */
+               __this_cpu_write(current_kprobe, NULL);
        }
-end:
-       preempt_enable_no_resched();
 }
 NOKPROBE_SYMBOL(kprobe_ftrace_handler);
 
index e4c5bf3..5c60bb0 100644 (file)
@@ -317,25 +317,17 @@ int kprobe_handler(struct pt_regs *regs)
                        }
                        prepare_singlestep(p, regs);
                        return 1;
-               } else {
-                       if (*addr != BREAKPOINT_INSTRUCTION) {
-                               /* If trap variant, then it belongs not to us */
-                               kprobe_opcode_t cur_insn = *addr;
-                               if (is_trap(cur_insn))
-                                       goto no_kprobe;
-                               /* The breakpoint instruction was removed by
-                                * another cpu right after we hit, no further
-                                * handling of this interrupt is appropriate
-                                */
-                               ret = 1;
+               } else if (*addr != BREAKPOINT_INSTRUCTION) {
+                       /* If trap variant, then it belongs not to us */
+                       kprobe_opcode_t cur_insn = *addr;
+
+                       if (is_trap(cur_insn))
                                goto no_kprobe;
-                       }
-                       p = __this_cpu_read(current_kprobe);
-                       if (p->break_handler && p->break_handler(p, regs)) {
-                               if (!skip_singlestep(p, regs, kcb))
-                                       goto ss_probe;
-                               ret = 1;
-                       }
+                       /* The breakpoint instruction was removed by
+                        * another cpu right after we hit, no further
+                        * handling of this interrupt is appropriate
+                        */
+                       ret = 1;
                }
                goto no_kprobe;
        }
@@ -350,7 +342,7 @@ int kprobe_handler(struct pt_regs *regs)
                         */
                        kprobe_opcode_t cur_insn = *addr;
                        if (is_trap(cur_insn))
-                               goto no_kprobe;
+                               goto no_kprobe;
                        /*
                         * The breakpoint instruction was removed right
                         * after we hit it.  Another cpu has removed
@@ -366,11 +358,13 @@ int kprobe_handler(struct pt_regs *regs)
 
        kcb->kprobe_status = KPROBE_HIT_ACTIVE;
        set_current_kprobe(p, regs, kcb);
-       if (p->pre_handler && p->pre_handler(p, regs))
-               /* handler has already set things up, so skip ss setup */
+       if (p->pre_handler && p->pre_handler(p, regs)) {
+               /* handler changed execution path, so skip ss setup */
+               reset_current_kprobe();
+               preempt_enable_no_resched();
                return 1;
+       }
 
-ss_probe:
        if (p->ainsn.boostable >= 0) {
                ret = try_to_emulate(p, regs);
 
@@ -611,60 +605,6 @@ unsigned long arch_deref_entry_point(void *entry)
 }
 NOKPROBE_SYMBOL(arch_deref_entry_point);
 
-int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct jprobe *jp = container_of(p, struct jprobe, kp);
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
-       memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
-
-       /* setup return addr to the jprobe handler routine */
-       regs->nip = arch_deref_entry_point(jp->entry);
-#ifdef PPC64_ELF_ABI_v2
-       regs->gpr[12] = (unsigned long)jp->entry;
-#elif defined(PPC64_ELF_ABI_v1)
-       regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
-#endif
-
-       /*
-        * jprobes use jprobe_return() which skips the normal return
-        * path of the function, and this messes up the accounting of the
-        * function graph tracer.
-        *
-        * Pause function graph tracing while performing the jprobe function.
-        */
-       pause_graph_tracing();
-
-       return 1;
-}
-NOKPROBE_SYMBOL(setjmp_pre_handler);
-
-void __used jprobe_return(void)
-{
-       asm volatile("jprobe_return_trap:\n"
-                    "trap\n"
-                    ::: "memory");
-}
-NOKPROBE_SYMBOL(jprobe_return);
-
-int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
-       if (regs->nip != ppc_kallsyms_lookup_name("jprobe_return_trap")) {
-               pr_debug("longjmp_break_handler NIP (0x%lx) does not match jprobe_return_trap (0x%lx)\n",
-                               regs->nip, ppc_kallsyms_lookup_name("jprobe_return_trap"));
-               return 0;
-       }
-
-       memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
-       /* It's OK to start function graph tracing again */
-       unpause_graph_tracing();
-       preempt_enable_no_resched();
-       return 1;
-}
-NOKPROBE_SYMBOL(longjmp_break_handler);
-
 static struct kprobe trampoline_p = {
        .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
        .pre_handler = trampoline_probe_handler
index 9a5b5a5..32476a6 100644 (file)
@@ -104,39 +104,13 @@ ftrace_regs_call:
        bl      ftrace_stub
        nop
 
-       /* Load the possibly modified NIP */
-       ld      r15, _NIP(r1)
-
+       /* Load ctr with the possibly modified NIP */
+       ld      r3, _NIP(r1)
+       mtctr   r3
 #ifdef CONFIG_LIVEPATCH
-       cmpd    r14, r15        /* has NIP been altered? */
+       cmpd    r14, r        /* has NIP been altered? */
 #endif
 
-#if defined(CONFIG_LIVEPATCH) && defined(CONFIG_KPROBES_ON_FTRACE)
-       /* NIP has not been altered, skip over further checks */
-       beq     1f
-
-       /* Check if there is an active jprobe on us */
-       subi    r3, r14, 4
-       bl      __is_active_jprobe
-       nop
-
-       /*
-        * If r3 == 1, then this is a kprobe/jprobe.
-        * else, this is livepatched function.
-        *
-        * The conditional branch for livepatch_handler below will use the
-        * result of this comparison. For kprobe/jprobe, we just need to branch to
-        * the new NIP, not call livepatch_handler. The branch below is bne, so we
-        * want CR0[EQ] to be true if this is a kprobe/jprobe. Which means we want
-        * CR0[EQ] = (r3 == 1).
-        */
-       cmpdi   r3, 1
-1:
-#endif
-
-       /* Load CTR with the possibly modified NIP */
-       mtctr   r15
-
        /* Restore gprs */
        REST_GPR(0,r1)
        REST_10GPRS(2,r1)
@@ -154,10 +128,7 @@ ftrace_regs_call:
        addi r1, r1, SWITCH_FRAME_SIZE
 
 #ifdef CONFIG_LIVEPATCH
-        /*
-        * Based on the cmpd or cmpdi above, if the NIP was altered and we're
-        * not on a kprobe/jprobe, then handle livepatch.
-        */
+        /* Based on the cmpd above, if the NIP was altered handle livepatch */
        bne-    livepatch_handler
 #endif
 
index 3f66fcf..19d8ab4 100644 (file)
@@ -1469,7 +1469,7 @@ static int collect_events(struct perf_event *group, int max_count,
 }
 
 /*
- * Add a event to the PMU.
+ * Add an event to the PMU.
  * If all events are not already frozen, then we disable and
  * re-enable the PMU in order to get hw_perf_enable to do the
  * actual work of reconfiguring the PMU.
@@ -1548,7 +1548,7 @@ nocheck:
 }
 
 /*
- * Remove a event from the PMU.
+ * Remove an event from the PMU.
  */
 static void power_pmu_del(struct perf_event *event, int ef_flags)
 {
@@ -1742,7 +1742,7 @@ static int power_pmu_commit_txn(struct pmu *pmu)
 /*
  * Return 1 if we might be able to put event on a limited PMC,
  * or 0 if not.
- * A event can only go on a limited PMC if it counts something
+ * An event can only go on a limited PMC if it counts something
  * that a limited PMC can count, doesn't require interrupts, and
  * doesn't exclude any processor mode.
  */
index 13de80c..b106aa2 100644 (file)
@@ -68,8 +68,6 @@ struct kprobe_ctlblk {
        unsigned long kprobe_saved_imask;
        unsigned long kprobe_saved_ctl[3];
        struct prev_kprobe prev_kprobe;
-       struct pt_regs jprobe_saved_regs;
-       kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
 };
 
 void arch_remove_kprobe(struct kprobe *p);
index 60f60af..7c0a095 100644 (file)
@@ -321,38 +321,20 @@ static int kprobe_handler(struct pt_regs *regs)
                         * If we have no pre-handler or it returned 0, we
                         * continue with single stepping. If we have a
                         * pre-handler and it returned non-zero, it prepped
-                        * for calling the break_handler below on re-entry
-                        * for jprobe processing, so get out doing nothing
-                        * more here.
+                        * for changing execution path, so get out doing
+                        * nothing more here.
                         */
                        push_kprobe(kcb, p);
                        kcb->kprobe_status = KPROBE_HIT_ACTIVE;
-                       if (p->pre_handler && p->pre_handler(p, regs))
+                       if (p->pre_handler && p->pre_handler(p, regs)) {
+                               pop_kprobe(kcb);
+                               preempt_enable_no_resched();
                                return 1;
+                       }
                        kcb->kprobe_status = KPROBE_HIT_SS;
                }
                enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn);
                return 1;
-       } else if (kprobe_running()) {
-               p = __this_cpu_read(current_kprobe);
-               if (p->break_handler && p->break_handler(p, regs)) {
-                       /*
-                        * Continuation after the jprobe completed and
-                        * caused the jprobe_return trap. The jprobe
-                        * break_handler "returns" to the original
-                        * function that still has the kprobe breakpoint
-                        * installed. We continue with single stepping.
-                        */
-                       kcb->kprobe_status = KPROBE_HIT_SS;
-                       enable_singlestep(kcb, regs,
-                                         (unsigned long) p->ainsn.insn);
-                       return 1;
-               } /* else:
-                  * No kprobe at this address and the current kprobe
-                  * has no break handler (no jprobe!). The kernel just
-                  * exploded, let the standard trap handler pick up the
-                  * pieces.
-                  */
        } /* else:
           * No kprobe at this address and no active kprobe. The trap has
           * not been caused by a kprobe breakpoint. The race of breakpoint
@@ -452,9 +434,7 @@ static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
 
        regs->psw.addr = orig_ret_address;
 
-       pop_kprobe(get_kprobe_ctlblk());
        kretprobe_hash_unlock(current, &flags);
-       preempt_enable_no_resched();
 
        hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
                hlist_del(&ri->hlist);
@@ -661,60 +641,6 @@ int kprobe_exceptions_notify(struct notifier_block *self,
 }
 NOKPROBE_SYMBOL(kprobe_exceptions_notify);
 
-int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct jprobe *jp = container_of(p, struct jprobe, kp);
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-       unsigned long stack;
-
-       memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
-
-       /* setup return addr to the jprobe handler routine */
-       regs->psw.addr = (unsigned long) jp->entry;
-       regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
-
-       /* r15 is the stack pointer */
-       stack = (unsigned long) regs->gprs[15];
-
-       memcpy(kcb->jprobes_stack, (void *) stack, MIN_STACK_SIZE(stack));
-
-       /*
-        * jprobes use jprobe_return() which skips the normal return
-        * path of the function, and this messes up the accounting of the
-        * function graph tracer to get messed up.
-        *
-        * Pause function graph tracing while performing the jprobe function.
-        */
-       pause_graph_tracing();
-       return 1;
-}
-NOKPROBE_SYMBOL(setjmp_pre_handler);
-
-void jprobe_return(void)
-{
-       asm volatile(".word 0x0002");
-}
-NOKPROBE_SYMBOL(jprobe_return);
-
-int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-       unsigned long stack;
-
-       /* It's OK to start function graph tracing again */
-       unpause_graph_tracing();
-
-       stack = (unsigned long) kcb->jprobe_saved_regs.gprs[15];
-
-       /* Put the regs back */
-       memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
-       /* put the stack back */
-       memcpy((void *) stack, kcb->jprobes_stack, MIN_STACK_SIZE(stack));
-       preempt_enable_no_resched();
-       return 1;
-}
-NOKPROBE_SYMBOL(longjmp_break_handler);
-
 static struct kprobe trampoline = {
        .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
        .pre_handler = trampoline_probe_handler
index 7431c17..199d17b 100644 (file)
@@ -10,7 +10,6 @@
 #include <linux/types.h>
 
 struct arch_hw_breakpoint {
-       char            *name; /* Contains name of the symbol to set bkpt */
        unsigned long   address;
        u16             len;
        u16             type;
@@ -41,6 +40,7 @@ struct sh_ubc {
        struct clk      *clk;   /* optional interface clock / MSTP bit */
 };
 
+struct perf_event_attr;
 struct perf_event;
 struct task_struct;
 struct pmu;
@@ -54,8 +54,10 @@ static inline int hw_breakpoint_slots(int type)
 }
 
 /* arch/sh/kernel/hw_breakpoint.c */
-extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
-extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
+extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+extern int hw_breakpoint_arch_parse(struct perf_event *bp,
+                                   const struct perf_event_attr *attr,
+                                   struct arch_hw_breakpoint *hw);
 extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
                                           unsigned long val, void *data);
 
index 85d8bca..6171682 100644 (file)
@@ -27,7 +27,6 @@ struct kprobe;
 
 void arch_remove_kprobe(struct kprobe *);
 void kretprobe_trampoline(void);
-void jprobe_return_end(void);
 
 /* Architecture specific copy of original instruction*/
 struct arch_specific_insn {
@@ -43,9 +42,6 @@ struct prev_kprobe {
 /* per-cpu kprobe control block */
 struct kprobe_ctlblk {
        unsigned long kprobe_status;
-       unsigned long jprobe_saved_r15;
-       struct pt_regs jprobe_saved_regs;
-       kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
        struct prev_kprobe prev_kprobe;
 };
 
index 8648ed0..d9ff3b4 100644 (file)
@@ -124,14 +124,13 @@ static int get_hbp_len(u16 hbp_len)
 /*
  * Check for virtual address in kernel space.
  */
-int arch_check_bp_in_kernelspace(struct perf_event *bp)
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
 {
        unsigned int len;
        unsigned long va;
-       struct arch_hw_breakpoint *info = counter_arch_bp(bp);
 
-       va = info->address;
-       len = get_hbp_len(info->len);
+       va = hw->address;
+       len = get_hbp_len(hw->len);
 
        return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
 }
@@ -174,40 +173,40 @@ int arch_bp_generic_fields(int sh_len, int sh_type,
        return 0;
 }
 
-static int arch_build_bp_info(struct perf_event *bp)
+static int arch_build_bp_info(struct perf_event *bp,
+                             const struct perf_event_attr *attr,
+                             struct arch_hw_breakpoint *hw)
 {
-       struct arch_hw_breakpoint *info = counter_arch_bp(bp);
-
-       info->address = bp->attr.bp_addr;
+       hw->address = attr->bp_addr;
 
        /* Len */
-       switch (bp->attr.bp_len) {
+       switch (attr->bp_len) {
        case HW_BREAKPOINT_LEN_1:
-               info->len = SH_BREAKPOINT_LEN_1;
+               hw->len = SH_BREAKPOINT_LEN_1;
                break;
        case HW_BREAKPOINT_LEN_2:
-               info->len = SH_BREAKPOINT_LEN_2;
+               hw->len = SH_BREAKPOINT_LEN_2;
                break;
        case HW_BREAKPOINT_LEN_4:
-               info->len = SH_BREAKPOINT_LEN_4;
+               hw->len = SH_BREAKPOINT_LEN_4;
                break;
        case HW_BREAKPOINT_LEN_8:
-               info->len = SH_BREAKPOINT_LEN_8;
+               hw->len = SH_BREAKPOINT_LEN_8;
                break;
        default:
                return -EINVAL;
        }
 
        /* Type */
-       switch (bp->attr.bp_type) {
+       switch (attr->bp_type) {
        case HW_BREAKPOINT_R:
-               info->type = SH_BREAKPOINT_READ;
+               hw->type = SH_BREAKPOINT_READ;
                break;
        case HW_BREAKPOINT_W:
-               info->type = SH_BREAKPOINT_WRITE;
+               hw->type = SH_BREAKPOINT_WRITE;
                break;
        case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
-               info->type = SH_BREAKPOINT_RW;
+               hw->type = SH_BREAKPOINT_RW;
                break;
        default:
                return -EINVAL;
@@ -219,19 +218,20 @@ static int arch_build_bp_info(struct perf_event *bp)
 /*
  * Validate the arch-specific HW Breakpoint register settings
  */
-int arch_validate_hwbkpt_settings(struct perf_event *bp)
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+                            const struct perf_event_attr *attr,
+                            struct arch_hw_breakpoint *hw)
 {
-       struct arch_hw_breakpoint *info = counter_arch_bp(bp);
        unsigned int align;
        int ret;
 
-       ret = arch_build_bp_info(bp);
+       ret = arch_build_bp_info(bp, attr, hw);
        if (ret)
                return ret;
 
        ret = -EINVAL;
 
-       switch (info->len) {
+       switch (hw->len) {
        case SH_BREAKPOINT_LEN_1:
                align = 0;
                break;
@@ -248,18 +248,11 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
                return ret;
        }
 
-       /*
-        * For kernel-addresses, either the address or symbol name can be
-        * specified.
-        */
-       if (info->name)
-               info->address = (unsigned long)kallsyms_lookup_name(info->name);
-
        /*
         * Check that the low-order bits of the address are appropriate
         * for the alignment implied by len.
         */
-       if (info->address & align)
+       if (hw->address & align)
                return -EINVAL;
 
        return 0;
@@ -346,7 +339,7 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args)
                perf_bp_event(bp, args->regs);
 
                /* Deliver the signal to userspace */
-               if (!arch_check_bp_in_kernelspace(bp)) {
+               if (!arch_check_bp_in_kernelspace(&bp->hw.info)) {
                        force_sig_fault(SIGTRAP, TRAP_HWBKPT,
                                        (void __user *)NULL, current);
                }
index 52a5e11..241e903 100644 (file)
@@ -248,11 +248,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
                        prepare_singlestep(p, regs);
                        kcb->kprobe_status = KPROBE_REENTER;
                        return 1;
-               } else {
-                       p = __this_cpu_read(current_kprobe);
-                       if (p->break_handler && p->break_handler(p, regs)) {
-                               goto ss_probe;
-                       }
                }
                goto no_kprobe;
        }
@@ -277,11 +272,13 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
        set_current_kprobe(p, regs, kcb);
        kcb->kprobe_status = KPROBE_HIT_ACTIVE;
 
-       if (p->pre_handler && p->pre_handler(p, regs))
+       if (p->pre_handler && p->pre_handler(p, regs)) {
                /* handler has already set things up, so skip ss setup */
+               reset_current_kprobe();
+               preempt_enable_no_resched();
                return 1;
+       }
 
-ss_probe:
        prepare_singlestep(p, regs);
        kcb->kprobe_status = KPROBE_HIT_SS;
        return 1;
@@ -358,8 +355,6 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
        regs->pc = orig_ret_address;
        kretprobe_hash_unlock(current, &flags);
 
-       preempt_enable_no_resched();
-
        hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
                hlist_del(&ri->hlist);
                kfree(ri);
@@ -508,14 +503,8 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
                                if (post_kprobe_handler(args->regs))
                                        ret = NOTIFY_STOP;
                        } else {
-                               if (kprobe_handler(args->regs)) {
+                               if (kprobe_handler(args->regs))
                                        ret = NOTIFY_STOP;
-                               } else {
-                                       p = __this_cpu_read(current_kprobe);
-                                       if (p->break_handler &&
-                                           p->break_handler(p, args->regs))
-                                               ret = NOTIFY_STOP;
-                               }
                        }
                }
        }
@@ -523,57 +512,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
        return ret;
 }
 
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct jprobe *jp = container_of(p, struct jprobe, kp);
-       unsigned long addr;
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
-       kcb->jprobe_saved_regs = *regs;
-       kcb->jprobe_saved_r15 = regs->regs[15];
-       addr = kcb->jprobe_saved_r15;
-
-       /*
-        * TBD: As Linus pointed out, gcc assumes that the callee
-        * owns the argument space and could overwrite it, e.g.
-        * tailcall optimization. So, to be absolutely safe
-        * we also save and restore enough stack bytes to cover
-        * the argument area.
-        */
-       memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr,
-              MIN_STACK_SIZE(addr));
-
-       regs->pc = (unsigned long)(jp->entry);
-
-       return 1;
-}
-
-void __kprobes jprobe_return(void)
-{
-       asm volatile ("trapa #0x3a\n\t" "jprobe_return_end:\n\t" "nop\n\t");
-}
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-       unsigned long stack_addr = kcb->jprobe_saved_r15;
-       u8 *addr = (u8 *)regs->pc;
-
-       if ((addr >= (u8 *)jprobe_return) &&
-           (addr <= (u8 *)jprobe_return_end)) {
-               *regs = kcb->jprobe_saved_regs;
-
-               memcpy((kprobe_opcode_t *)stack_addr, kcb->jprobes_stack,
-                      MIN_STACK_SIZE(stack_addr));
-
-               kcb->kprobe_status = KPROBE_HIT_SS;
-               preempt_enable_no_resched();
-               return 1;
-       }
-
-       return 0;
-}
-
 static struct kprobe trampoline_p = {
        .addr = (kprobe_opcode_t *)&kretprobe_trampoline,
        .pre_handler = trampoline_probe_handler
index 3704490..bfcaa63 100644 (file)
@@ -44,7 +44,6 @@ struct kprobe_ctlblk {
        unsigned long kprobe_status;
        unsigned long kprobe_orig_tnpc;
        unsigned long kprobe_orig_tstate_pil;
-       struct pt_regs jprobe_saved_regs;
        struct prev_kprobe prev_kprobe;
 };
 
index ab4ba43..dfbca24 100644 (file)
@@ -147,18 +147,12 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
                        kcb->kprobe_status = KPROBE_REENTER;
                        prepare_singlestep(p, regs, kcb);
                        return 1;
-               } else {
-                       if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) {
+               } else if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) {
                        /* The breakpoint instruction was removed by
                         * another cpu right after we hit, no further
                         * handling of this interrupt is appropriate
                         */
-                               ret = 1;
-                               goto no_kprobe;
-                       }
-                       p = __this_cpu_read(current_kprobe);
-                       if (p->break_handler && p->break_handler(p, regs))
-                               goto ss_probe;
+                       ret = 1;
                }
                goto no_kprobe;
        }
@@ -181,10 +175,12 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
 
        set_current_kprobe(p, regs, kcb);
        kcb->kprobe_status = KPROBE_HIT_ACTIVE;
-       if (p->pre_handler && p->pre_handler(p, regs))
+       if (p->pre_handler && p->pre_handler(p, regs)) {
+               reset_current_kprobe();
+               preempt_enable_no_resched();
                return 1;
+       }
 
-ss_probe:
        prepare_singlestep(p, regs, kcb);
        kcb->kprobe_status = KPROBE_HIT_SS;
        return 1;
@@ -441,53 +437,6 @@ out:
        exception_exit(prev_state);
 }
 
-/* Jprobes support.  */
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct jprobe *jp = container_of(p, struct jprobe, kp);
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
-       memcpy(&(kcb->jprobe_saved_regs), regs, sizeof(*regs));
-
-       regs->tpc  = (unsigned long) jp->entry;
-       regs->tnpc = ((unsigned long) jp->entry) + 0x4UL;
-       regs->tstate |= TSTATE_PIL;
-
-       return 1;
-}
-
-void __kprobes jprobe_return(void)
-{
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-       register unsigned long orig_fp asm("g1");
-
-       orig_fp = kcb->jprobe_saved_regs.u_regs[UREG_FP];
-       __asm__ __volatile__("\n"
-"1:    cmp             %%sp, %0\n\t"
-       "blu,a,pt       %%xcc, 1b\n\t"
-       " restore\n\t"
-       ".globl         jprobe_return_trap_instruction\n"
-"jprobe_return_trap_instruction:\n\t"
-       "ta             0x70"
-       : /* no outputs */
-       : "r" (orig_fp));
-}
-
-extern void jprobe_return_trap_instruction(void);
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       u32 *addr = (u32 *) regs->tpc;
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
-       if (addr == (u32 *) jprobe_return_trap_instruction) {
-               memcpy(regs, &(kcb->jprobe_saved_regs), sizeof(*regs));
-               preempt_enable_no_resched();
-               return 1;
-       }
-       return 0;
-}
-
 /* The value stored in the return address register is actually 2
  * instructions before where the callee will return to.
  * Sequences usually look something like this
@@ -562,9 +511,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
        regs->tpc = orig_ret_address;
        regs->tnpc = orig_ret_address + 4;
 
-       reset_current_kprobe();
        kretprobe_hash_unlock(current, &flags);
-       preempt_enable_no_resched();
 
        hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
                hlist_del(&ri->hlist);
index cf372b9..f3e006b 100644 (file)
@@ -216,6 +216,8 @@ static void intel_pmu_lbr_reset_64(void)
 
 void intel_pmu_lbr_reset(void)
 {
+       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+
        if (!x86_pmu.lbr_nr)
                return;
 
@@ -223,6 +225,9 @@ void intel_pmu_lbr_reset(void)
                intel_pmu_lbr_reset_32();
        else
                intel_pmu_lbr_reset_64();
+
+       cpuc->last_task_ctx = NULL;
+       cpuc->last_log_id = 0;
 }
 
 /*
@@ -334,6 +339,7 @@ static inline u64 rdlbr_to(unsigned int idx)
 
 static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
 {
+       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
        int i;
        unsigned lbr_idx, mask;
        u64 tos;
@@ -344,9 +350,21 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
                return;
        }
 
-       mask = x86_pmu.lbr_nr - 1;
        tos = task_ctx->tos;
-       for (i = 0; i < tos; i++) {
+       /*
+        * Does not restore the LBR registers, if
+        * - No one else touched them, and
+        * - Did not enter C6
+        */
+       if ((task_ctx == cpuc->last_task_ctx) &&
+           (task_ctx->log_id == cpuc->last_log_id) &&
+           rdlbr_from(tos)) {
+               task_ctx->lbr_stack_state = LBR_NONE;
+               return;
+       }
+
+       mask = x86_pmu.lbr_nr - 1;
+       for (i = 0; i < task_ctx->valid_lbrs; i++) {
                lbr_idx = (tos - i) & mask;
                wrlbr_from(lbr_idx, task_ctx->lbr_from[i]);
                wrlbr_to  (lbr_idx, task_ctx->lbr_to[i]);
@@ -354,14 +372,24 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
                if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
                        wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
        }
+
+       for (; i < x86_pmu.lbr_nr; i++) {
+               lbr_idx = (tos - i) & mask;
+               wrlbr_from(lbr_idx, 0);
+               wrlbr_to(lbr_idx, 0);
+               if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
+                       wrmsrl(MSR_LBR_INFO_0 + lbr_idx, 0);
+       }
+
        wrmsrl(x86_pmu.lbr_tos, tos);
        task_ctx->lbr_stack_state = LBR_NONE;
 }
 
 static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
 {
+       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
        unsigned lbr_idx, mask;
-       u64 tos;
+       u64 tos, from;
        int i;
 
        if (task_ctx->lbr_callstack_users == 0) {
@@ -371,15 +399,22 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
 
        mask = x86_pmu.lbr_nr - 1;
        tos = intel_pmu_lbr_tos();
-       for (i = 0; i < tos; i++) {
+       for (i = 0; i < x86_pmu.lbr_nr; i++) {
                lbr_idx = (tos - i) & mask;
-               task_ctx->lbr_from[i] = rdlbr_from(lbr_idx);
+               from = rdlbr_from(lbr_idx);
+               if (!from)
+                       break;
+               task_ctx->lbr_from[i] = from;
                task_ctx->lbr_to[i]   = rdlbr_to(lbr_idx);
                if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
                        rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
        }
+       task_ctx->valid_lbrs = i;
        task_ctx->tos = tos;
        task_ctx->lbr_stack_state = LBR_VALID;
+
+       cpuc->last_task_ctx = task_ctx;
+       cpuc->last_log_id = ++task_ctx->log_id;
 }
 
 void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in)
@@ -531,7 +566,7 @@ static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
  */
 static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
 {
-       bool need_info = false;
+       bool need_info = false, call_stack = false;
        unsigned long mask = x86_pmu.lbr_nr - 1;
        int lbr_format = x86_pmu.intel_cap.lbr_format;
        u64 tos = intel_pmu_lbr_tos();
@@ -542,7 +577,7 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
        if (cpuc->lbr_sel) {
                need_info = !(cpuc->lbr_sel->config & LBR_NO_INFO);
                if (cpuc->lbr_sel->config & LBR_CALL_STACK)
-                       num = tos;
+                       call_stack = true;
        }
 
        for (i = 0; i < num; i++) {
@@ -555,6 +590,13 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
                from = rdlbr_from(lbr_idx);
                to   = rdlbr_to(lbr_idx);
 
+               /*
+                * Read LBR call stack entries
+                * until invalid entry (0s) is detected.
+                */
+               if (call_stack && !from)
+                       break;
+
                if (lbr_format == LBR_FORMAT_INFO && need_info) {
                        u64 info;
 
index 9f37114..2430398 100644 (file)
@@ -163,6 +163,7 @@ struct intel_excl_cntrs {
        unsigned        core_id;        /* per-core: core id */
 };
 
+struct x86_perf_task_context;
 #define MAX_LBR_ENTRIES                32
 
 enum {
@@ -214,6 +215,8 @@ struct cpu_hw_events {
        struct perf_branch_entry        lbr_entries[MAX_LBR_ENTRIES];
        struct er_account               *lbr_sel;
        u64                             br_sel;
+       struct x86_perf_task_context    *last_task_ctx;
+       int                             last_log_id;
 
        /*
         * Intel host/guest exclude bits
@@ -648,8 +651,10 @@ struct x86_perf_task_context {
        u64 lbr_to[MAX_LBR_ENTRIES];
        u64 lbr_info[MAX_LBR_ENTRIES];
        int tos;
+       int valid_lbrs;
        int lbr_callstack_users;
        int lbr_stack_state;
+       int log_id;
 };
 
 #define x86_add_quirk(func_)                                           \
index f59c398..a1f0e90 100644 (file)
@@ -49,11 +49,14 @@ static inline int hw_breakpoint_slots(int type)
        return HBP_NUM;
 }
 
+struct perf_event_attr;
 struct perf_event;
 struct pmu;
 
-extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
-extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
+extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+extern int hw_breakpoint_arch_parse(struct perf_event *bp,
+                                   const struct perf_event_attr *attr,
+                                   struct arch_hw_breakpoint *hw);
 extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
                                           unsigned long val, void *data);
 
index 367d99c..c8cec1b 100644 (file)
@@ -78,7 +78,7 @@ struct arch_specific_insn {
         * boostable = true: This instruction has been boosted: we have
         * added a relative jump after the instruction copy in insn,
         * so no single-step and fixup are needed (unless there's
-        * a post_handler or break_handler).
+        * a post_handler).
         */
        bool boostable;
        bool if_modifier;
@@ -111,9 +111,6 @@ struct kprobe_ctlblk {
        unsigned long kprobe_status;
        unsigned long kprobe_old_flags;
        unsigned long kprobe_saved_flags;
-       unsigned long *jprobe_saved_sp;
-       struct pt_regs jprobe_saved_regs;
-       kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
        struct prev_kprobe prev_kprobe;
 };
 
index 8771766..34a5c17 100644 (file)
@@ -169,28 +169,29 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
                set_dr_addr_mask(0, i);
 }
 
-/*
- * Check for virtual address in kernel space.
- */
-int arch_check_bp_in_kernelspace(struct perf_event *bp)
+static int arch_bp_generic_len(int x86_len)
 {
-       unsigned int len;
-       unsigned long va;
-       struct arch_hw_breakpoint *info = counter_arch_bp(bp);
-
-       va = info->address;
-       len = bp->attr.bp_len;
-
-       /*
-        * We don't need to worry about va + len - 1 overflowing:
-        * we already require that va is aligned to a multiple of len.
-        */
-       return (va >= TASK_SIZE_MAX) || ((va + len - 1) >= TASK_SIZE_MAX);
+       switch (x86_len) {
+       case X86_BREAKPOINT_LEN_1:
+               return HW_BREAKPOINT_LEN_1;
+       case X86_BREAKPOINT_LEN_2:
+               return HW_BREAKPOINT_LEN_2;
+       case X86_BREAKPOINT_LEN_4:
+               return HW_BREAKPOINT_LEN_4;
+#ifdef CONFIG_X86_64
+       case X86_BREAKPOINT_LEN_8:
+               return HW_BREAKPOINT_LEN_8;
+#endif
+       default:
+               return -EINVAL;
+       }
 }
 
 int arch_bp_generic_fields(int x86_len, int x86_type,
                           int *gen_len, int *gen_type)
 {
+       int len;
+
        /* Type */
        switch (x86_type) {
        case X86_BREAKPOINT_EXECUTE:
@@ -211,42 +212,47 @@ int arch_bp_generic_fields(int x86_len, int x86_type,
        }
 
        /* Len */
-       switch (x86_len) {
-       case X86_BREAKPOINT_LEN_1:
-               *gen_len = HW_BREAKPOINT_LEN_1;
-               break;
-       case X86_BREAKPOINT_LEN_2:
-               *gen_len = HW_BREAKPOINT_LEN_2;
-               break;
-       case X86_BREAKPOINT_LEN_4:
-               *gen_len = HW_BREAKPOINT_LEN_4;
-               break;
-#ifdef CONFIG_X86_64
-       case X86_BREAKPOINT_LEN_8:
-               *gen_len = HW_BREAKPOINT_LEN_8;
-               break;
-#endif
-       default:
+       len = arch_bp_generic_len(x86_len);
+       if (len < 0)
                return -EINVAL;
-       }
+       *gen_len = len;
 
        return 0;
 }
 
-
-static int arch_build_bp_info(struct perf_event *bp)
+/*
+ * Check for virtual address in kernel space.
+ */
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
 {
-       struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+       unsigned long va;
+       int len;
 
-       info->address = bp->attr.bp_addr;
+       va = hw->address;
+       len = arch_bp_generic_len(hw->len);
+       WARN_ON_ONCE(len < 0);
+
+       /*
+        * We don't need to worry about va + len - 1 overflowing:
+        * we already require that va is aligned to a multiple of len.
+        */
+       return (va >= TASK_SIZE_MAX) || ((va + len - 1) >= TASK_SIZE_MAX);
+}
+
+static int arch_build_bp_info(struct perf_event *bp,
+                             const struct perf_event_attr *attr,
+                             struct arch_hw_breakpoint *hw)
+{
+       hw->address = attr->bp_addr;
+       hw->mask = 0;
 
        /* Type */
-       switch (bp->attr.bp_type) {
+       switch (attr->bp_type) {
        case HW_BREAKPOINT_W:
-               info->type = X86_BREAKPOINT_WRITE;
+               hw->type = X86_BREAKPOINT_WRITE;
                break;
        case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
-               info->type = X86_BREAKPOINT_RW;
+               hw->type = X86_BREAKPOINT_RW;
                break;
        case HW_BREAKPOINT_X:
                /*
@@ -254,23 +260,23 @@ static int arch_build_bp_info(struct perf_event *bp)
                 * acceptable for kprobes.  On non-kprobes kernels, we don't
                 * allow kernel breakpoints at all.
                 */
-               if (bp->attr.bp_addr >= TASK_SIZE_MAX) {
+               if (attr->bp_addr >= TASK_SIZE_MAX) {
 #ifdef CONFIG_KPROBES
-                       if (within_kprobe_blacklist(bp->attr.bp_addr))
+                       if (within_kprobe_blacklist(attr->bp_addr))
                                return -EINVAL;
 #else
                        return -EINVAL;
 #endif
                }
 
-               info->type = X86_BREAKPOINT_EXECUTE;
+               hw->type = X86_BREAKPOINT_EXECUTE;
                /*
                 * x86 inst breakpoints need to have a specific undefined len.
                 * But we still need to check userspace is not trying to setup
                 * an unsupported length, to get a range breakpoint for example.
                 */
-               if (bp->attr.bp_len == sizeof(long)) {
-                       info->len = X86_BREAKPOINT_LEN_X;
+               if (attr->bp_len == sizeof(long)) {
+                       hw->len = X86_BREAKPOINT_LEN_X;
                        return 0;
                }
        default:
@@ -278,28 +284,26 @@ static int arch_build_bp_info(struct perf_event *bp)
        }
 
        /* Len */
-       info->mask = 0;
-
-       switch (bp->attr.bp_len) {
+       switch (attr->bp_len) {
        case HW_BREAKPOINT_LEN_1:
-               info->len = X86_BREAKPOINT_LEN_1;
+               hw->len = X86_BREAKPOINT_LEN_1;
                break;
        case HW_BREAKPOINT_LEN_2:
-               info->len = X86_BREAKPOINT_LEN_2;
+               hw->len = X86_BREAKPOINT_LEN_2;
                break;
        case HW_BREAKPOINT_LEN_4:
-               info->len = X86_BREAKPOINT_LEN_4;
+               hw->len = X86_BREAKPOINT_LEN_4;
                break;
 #ifdef CONFIG_X86_64
        case HW_BREAKPOINT_LEN_8:
-               info->len = X86_BREAKPOINT_LEN_8;
+               hw->len = X86_BREAKPOINT_LEN_8;
                break;
 #endif
        default:
                /* AMD range breakpoint */
-               if (!is_power_of_2(bp->attr.bp_len))
+               if (!is_power_of_2(attr->bp_len))
                        return -EINVAL;
-               if (bp->attr.bp_addr & (bp->attr.bp_len - 1))
+               if (attr->bp_addr & (attr->bp_len - 1))
                        return -EINVAL;
 
                if (!boot_cpu_has(X86_FEATURE_BPEXT))
@@ -312,8 +316,8 @@ static int arch_build_bp_info(struct perf_event *bp)
                 * breakpoints, then we'll have to check for kprobe-blacklisted
                 * addresses anywhere in the range.
                 */
-               info->mask = bp->attr.bp_len - 1;
-               info->len = X86_BREAKPOINT_LEN_1;
+               hw->mask = attr->bp_len - 1;
+               hw->len = X86_BREAKPOINT_LEN_1;
        }
 
        return 0;
@@ -322,22 +326,23 @@ static int arch_build_bp_info(struct perf_event *bp)
 /*
  * Validate the arch-specific HW Breakpoint register settings
  */
-int arch_validate_hwbkpt_settings(struct perf_event *bp)
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+                            const struct perf_event_attr *attr,
+                            struct arch_hw_breakpoint *hw)
 {
-       struct arch_hw_breakpoint *info = counter_arch_bp(bp);
        unsigned int align;
        int ret;
 
 
-       ret = arch_build_bp_info(bp);
+       ret = arch_build_bp_info(bp, attr, hw);
        if (ret)
                return ret;
 
-       switch (info->len) {
+       switch (hw->len) {
        case X86_BREAKPOINT_LEN_1:
                align = 0;
-               if (info->mask)
-                       align = info->mask;
+               if (hw->mask)
+                       align = hw->mask;
                break;
        case X86_BREAKPOINT_LEN_2:
                align = 1;
@@ -358,7 +363,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
         * Check that the low-order bits of the address are appropriate
         * for the alignment implied by len.
         */
-       if (info->address & align)
+       if (hw->address & align)
                return -EINVAL;
 
        return 0;
index ae38dcc..2b949f4 100644 (file)
@@ -105,14 +105,4 @@ static inline unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsig
 }
 #endif
 
-#ifdef CONFIG_KPROBES_ON_FTRACE
-extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
-                          struct kprobe_ctlblk *kcb);
-#else
-static inline int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
-                                 struct kprobe_ctlblk *kcb)
-{
-       return 0;
-}
-#endif
 #endif
index 6f4d423..b0d1e81 100644 (file)
@@ -66,8 +66,6 @@
 
 #include "common.h"
 
-void jprobe_return_end(void);
-
 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
 
@@ -395,8 +393,6 @@ int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn)
                          - (u8 *) real;
                if ((s64) (s32) newdisp != newdisp) {
                        pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp);
-                       pr_err("\tSrc: %p, Dest: %p, old disp: %x\n",
-                               src, real, insn->displacement.value);
                        return 0;
                }
                disp = (u8 *) dest + insn_offset_displacement(insn);
@@ -596,7 +592,6 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
                 * stepping.
                 */
                regs->ip = (unsigned long)p->ainsn.insn;
-               preempt_enable_no_resched();
                return;
        }
 #endif
@@ -640,8 +635,7 @@ static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
                 * Raise a BUG or we'll continue in an endless reentering loop
                 * and eventually a stack overflow.
                 */
-               printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n",
-                      p->addr);
+               pr_err("Unrecoverable kprobe detected.\n");
                dump_kprobe(p);
                BUG();
        default:
@@ -669,12 +663,10 @@ int kprobe_int3_handler(struct pt_regs *regs)
 
        addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
        /*
-        * We don't want to be preempted for the entire
-        * duration of kprobe processing. We conditionally
-        * re-enable preemption at the end of this function,
-        * and also in reenter_kprobe() and setup_singlestep().
+        * We don't want to be preempted for the entire duration of kprobe
+        * processing. Since int3 and debug trap disables irqs and we clear
+        * IF while singlestepping, it must be no preemptible.
         */
-       preempt_disable();
 
        kcb = get_kprobe_ctlblk();
        p = get_kprobe(addr);
@@ -690,13 +682,14 @@ int kprobe_int3_handler(struct pt_regs *regs)
                        /*
                         * If we have no pre-handler or it returned 0, we
                         * continue with normal processing.  If we have a
-                        * pre-handler and it returned non-zero, it prepped
-                        * for calling the break_handler below on re-entry
-                        * for jprobe processing, so get out doing nothing
-                        * more here.
+                        * pre-handler and it returned non-zero, that means
+                        * user handler setup registers to exit to another
+                        * instruction, we must skip the single stepping.
                         */
                        if (!p->pre_handler || !p->pre_handler(p, regs))
                                setup_singlestep(p, regs, kcb, 0);
+                       else
+                               reset_current_kprobe();
                        return 1;
                }
        } else if (*addr != BREAKPOINT_INSTRUCTION) {
@@ -710,18 +703,9 @@ int kprobe_int3_handler(struct pt_regs *regs)
                 * the original instruction.
                 */
                regs->ip = (unsigned long)addr;
-               preempt_enable_no_resched();
                return 1;
-       } else if (kprobe_running()) {
-               p = __this_cpu_read(current_kprobe);
-               if (p->break_handler && p->break_handler(p, regs)) {
-                       if (!skip_singlestep(p, regs, kcb))
-                               setup_singlestep(p, regs, kcb, 0);
-                       return 1;
-               }
        } /* else: not a kprobe fault; let the kernel handle it */
 
-       preempt_enable_no_resched();
        return 0;
 }
 NOKPROBE_SYMBOL(kprobe_int3_handler);
@@ -972,8 +956,6 @@ int kprobe_debug_handler(struct pt_regs *regs)
        }
        reset_current_kprobe();
 out:
-       preempt_enable_no_resched();
-
        /*
         * if somebody else is singlestepping across a probe point, flags
         * will have TF set, in which case, continue the remaining processing
@@ -1020,7 +1002,6 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
                        restore_previous_kprobe(kcb);
                else
                        reset_current_kprobe();
-               preempt_enable_no_resched();
        } else if (kcb->kprobe_status == KPROBE_HIT_ACTIVE ||
                   kcb->kprobe_status == KPROBE_HIT_SSDONE) {
                /*
@@ -1083,93 +1064,6 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
 }
 NOKPROBE_SYMBOL(kprobe_exceptions_notify);
 
-int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct jprobe *jp = container_of(p, struct jprobe, kp);
-       unsigned long addr;
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
-       kcb->jprobe_saved_regs = *regs;
-       kcb->jprobe_saved_sp = stack_addr(regs);
-       addr = (unsigned long)(kcb->jprobe_saved_sp);
-
-       /*
-        * As Linus pointed out, gcc assumes that the callee
-        * owns the argument space and could overwrite it, e.g.
-        * tailcall optimization. So, to be absolutely safe
-        * we also save and restore enough stack bytes to cover
-        * the argument area.
-        * Use __memcpy() to avoid KASAN stack out-of-bounds reports as we copy
-        * raw stack chunk with redzones:
-        */
-       __memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, MIN_STACK_SIZE(addr));
-       regs->ip = (unsigned long)(jp->entry);
-
-       /*
-        * jprobes use jprobe_return() which skips the normal return
-        * path of the function, and this messes up the accounting of the
-        * function graph tracer to get messed up.
-        *
-        * Pause function graph tracing while performing the jprobe function.
-        */
-       pause_graph_tracing();
-       return 1;
-}
-NOKPROBE_SYMBOL(setjmp_pre_handler);
-
-void jprobe_return(void)
-{
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
-       /* Unpoison stack redzones in the frames we are going to jump over. */
-       kasan_unpoison_stack_above_sp_to(kcb->jprobe_saved_sp);
-
-       asm volatile (
-#ifdef CONFIG_X86_64
-                       "       xchg   %%rbx,%%rsp      \n"
-#else
-                       "       xchgl   %%ebx,%%esp     \n"
-#endif
-                       "       int3                    \n"
-                       "       .globl jprobe_return_end\n"
-                       "       jprobe_return_end:      \n"
-                       "       nop                     \n"::"b"
-                       (kcb->jprobe_saved_sp):"memory");
-}
-NOKPROBE_SYMBOL(jprobe_return);
-NOKPROBE_SYMBOL(jprobe_return_end);
-
-int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-       u8 *addr = (u8 *) (regs->ip - 1);
-       struct jprobe *jp = container_of(p, struct jprobe, kp);
-       void *saved_sp = kcb->jprobe_saved_sp;
-
-       if ((addr > (u8 *) jprobe_return) &&
-           (addr < (u8 *) jprobe_return_end)) {
-               if (stack_addr(regs) != saved_sp) {
-                       struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
-                       printk(KERN_ERR
-                              "current sp %p does not match saved sp %p\n",
-                              stack_addr(regs), saved_sp);
-                       printk(KERN_ERR "Saved registers for jprobe %p\n", jp);
-                       show_regs(saved_regs);
-                       printk(KERN_ERR "Current registers\n");
-                       show_regs(regs);
-                       BUG();
-               }
-               /* It's OK to start function graph tracing again */
-               unpause_graph_tracing();
-               *regs = kcb->jprobe_saved_regs;
-               __memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp));
-               preempt_enable_no_resched();
-               return 1;
-       }
-       return 0;
-}
-NOKPROBE_SYMBOL(longjmp_break_handler);
-
 bool arch_within_kprobe_blacklist(unsigned long addr)
 {
        bool is_in_entry_trampoline_section = false;
index 8dc0161..ef819e1 100644 (file)
 
 #include "common.h"
 
-static nokprobe_inline
-void __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
-                     struct kprobe_ctlblk *kcb, unsigned long orig_ip)
-{
-       /*
-        * Emulate singlestep (and also recover regs->ip)
-        * as if there is a 5byte nop
-        */
-       regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
-       if (unlikely(p->post_handler)) {
-               kcb->kprobe_status = KPROBE_HIT_SSDONE;
-               p->post_handler(p, regs, 0);
-       }
-       __this_cpu_write(current_kprobe, NULL);
-       if (orig_ip)
-               regs->ip = orig_ip;
-}
-
-int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
-                   struct kprobe_ctlblk *kcb)
-{
-       if (kprobe_ftrace(p)) {
-               __skip_singlestep(p, regs, kcb, 0);
-               preempt_enable_no_resched();
-               return 1;
-       }
-       return 0;
-}
-NOKPROBE_SYMBOL(skip_singlestep);
-
 /* Ftrace callback handler for kprobes -- called under preepmt disabed */
 void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
                           struct ftrace_ops *ops, struct pt_regs *regs)
@@ -75,18 +45,25 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
                /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
                regs->ip = ip + sizeof(kprobe_opcode_t);
 
-               /* To emulate trap based kprobes, preempt_disable here */
-               preempt_disable();
                __this_cpu_write(current_kprobe, p);
                kcb->kprobe_status = KPROBE_HIT_ACTIVE;
                if (!p->pre_handler || !p->pre_handler(p, regs)) {
-                       __skip_singlestep(p, regs, kcb, orig_ip);
-                       preempt_enable_no_resched();
+                       /*
+                        * Emulate singlestep (and also recover regs->ip)
+                        * as if there is a 5byte nop
+                        */
+                       regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
+                       if (unlikely(p->post_handler)) {
+                               kcb->kprobe_status = KPROBE_HIT_SSDONE;
+                               p->post_handler(p, regs, 0);
+                       }
+                       regs->ip = orig_ip;
                }
                /*
-                * If pre_handler returns !0, it sets regs->ip and
-                * resets current kprobe, and keep preempt count +1.
+                * If pre_handler returns !0, it changes regs->ip. We have to
+                * skip emulating post_handler.
                 */
+               __this_cpu_write(current_kprobe, NULL);
        }
 }
 NOKPROBE_SYMBOL(kprobe_ftrace_handler);
index 203d398..eaf02f2 100644 (file)
@@ -491,7 +491,6 @@ int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
                regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
                if (!reenter)
                        reset_current_kprobe();
-               preempt_enable_no_resched();
                return 1;
        }
        return 0;
index dbe3053..9f119c1 100644 (file)
@@ -30,13 +30,16 @@ struct arch_hw_breakpoint {
        u16 type;
 };
 
+struct perf_event_attr;
 struct perf_event;
 struct pt_regs;
 struct task_struct;
 
 int hw_breakpoint_slots(int type);
-int arch_check_bp_in_kernelspace(struct perf_event *bp);
-int arch_validate_hwbkpt_settings(struct perf_event *bp);
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+                            const struct perf_event_attr *attr,
+                            struct arch_hw_breakpoint *hw);
 int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
                                    unsigned long val, void *data);
 
index b35656a..c2e387c 100644 (file)
@@ -33,14 +33,13 @@ int hw_breakpoint_slots(int type)
        }
 }
 
-int arch_check_bp_in_kernelspace(struct perf_event *bp)
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
 {
        unsigned int len;
        unsigned long va;
-       struct arch_hw_breakpoint *info = counter_arch_bp(bp);
 
-       va = info->address;
-       len = bp->attr.bp_len;
+       va = hw->address;
+       len = hw->len;
 
        return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
 }
@@ -48,50 +47,41 @@ int arch_check_bp_in_kernelspace(struct perf_event *bp)
 /*
  * Construct an arch_hw_breakpoint from a perf_event.
  */
-static int arch_build_bp_info(struct perf_event *bp)
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+                            const struct perf_event_attr *attr,
+                            struct arch_hw_breakpoint *hw)
 {
-       struct arch_hw_breakpoint *info = counter_arch_bp(bp);
-
        /* Type */
-       switch (bp->attr.bp_type) {
+       switch (attr->bp_type) {
        case HW_BREAKPOINT_X:
-               info->type = XTENSA_BREAKPOINT_EXECUTE;
+               hw->type = XTENSA_BREAKPOINT_EXECUTE;
                break;
        case HW_BREAKPOINT_R:
-               info->type = XTENSA_BREAKPOINT_LOAD;
+               hw->type = XTENSA_BREAKPOINT_LOAD;
                break;
        case HW_BREAKPOINT_W:
-               info->type = XTENSA_BREAKPOINT_STORE;
+               hw->type = XTENSA_BREAKPOINT_STORE;
                break;
        case HW_BREAKPOINT_RW:
-               info->type = XTENSA_BREAKPOINT_LOAD | XTENSA_BREAKPOINT_STORE;
+               hw->type = XTENSA_BREAKPOINT_LOAD | XTENSA_BREAKPOINT_STORE;
                break;
        default:
                return -EINVAL;
        }
 
        /* Len */
-       info->len = bp->attr.bp_len;
-       if (info->len < 1 || info->len > 64 || !is_power_of_2(info->len))
+       hw->len = attr->bp_len;
+       if (hw->len < 1 || hw->len > 64 || !is_power_of_2(hw->len))
                return -EINVAL;
 
        /* Address */
-       info->address = bp->attr.bp_addr;
-       if (info->address & (info->len - 1))
+       hw->address = attr->bp_addr;
+       if (hw->address & (hw->len - 1))
                return -EINVAL;
 
        return 0;
 }
 
-int arch_validate_hwbkpt_settings(struct perf_event *bp)
-{
-       int ret;
-
-       /* Build the arch_hw_breakpoint. */
-       ret = arch_build_bp_info(bp);
-       return ret;
-}
-
 int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
                                    unsigned long val, void *data)
 {
index 9440a2f..e909413 100644 (file)
@@ -63,7 +63,6 @@ struct pt_regs;
 struct kretprobe;
 struct kretprobe_instance;
 typedef int (*kprobe_pre_handler_t) (struct kprobe *, struct pt_regs *);
-typedef int (*kprobe_break_handler_t) (struct kprobe *, struct pt_regs *);
 typedef void (*kprobe_post_handler_t) (struct kprobe *, struct pt_regs *,
                                       unsigned long flags);
 typedef int (*kprobe_fault_handler_t) (struct kprobe *, struct pt_regs *,
@@ -101,12 +100,6 @@ struct kprobe {
         */
        kprobe_fault_handler_t fault_handler;
 
-       /*
-        * ... called if breakpoint trap occurs in probe handler.
-        * Return 1 if it handled break, otherwise kernel will see it.
-        */
-       kprobe_break_handler_t break_handler;
-
        /* Saved opcode (which has been replaced with breakpoint) */
        kprobe_opcode_t opcode;
 
@@ -154,24 +147,6 @@ static inline int kprobe_ftrace(struct kprobe *p)
        return p->flags & KPROBE_FLAG_FTRACE;
 }
 
-/*
- * Special probe type that uses setjmp-longjmp type tricks to resume
- * execution at a specified entry with a matching prototype corresponding
- * to the probed function - a trick to enable arguments to become
- * accessible seamlessly by probe handling logic.
- * Note:
- * Because of the way compilers allocate stack space for local variables
- * etc upfront, regardless of sub-scopes within a function, this mirroring
- * principle currently works only for probes placed on function entry points.
- */
-struct jprobe {
-       struct kprobe kp;
-       void *entry;    /* probe handling code to jump to */
-};
-
-/* For backward compatibility with old code using JPROBE_ENTRY() */
-#define JPROBE_ENTRY(handler)  (handler)
-
 /*
  * Function-return probe -
  * Note:
@@ -389,9 +364,6 @@ int register_kprobe(struct kprobe *p);
 void unregister_kprobe(struct kprobe *p);
 int register_kprobes(struct kprobe **kps, int num);
 void unregister_kprobes(struct kprobe **kps, int num);
-int setjmp_pre_handler(struct kprobe *, struct pt_regs *);
-int longjmp_break_handler(struct kprobe *, struct pt_regs *);
-void jprobe_return(void);
 unsigned long arch_deref_entry_point(void *);
 
 int register_kretprobe(struct kretprobe *rp);
@@ -439,9 +411,6 @@ static inline void unregister_kprobe(struct kprobe *p)
 static inline void unregister_kprobes(struct kprobe **kps, int num)
 {
 }
-static inline void jprobe_return(void)
-{
-}
 static inline int register_kretprobe(struct kretprobe *rp)
 {
        return -ENOSYS;
@@ -468,20 +437,6 @@ static inline int enable_kprobe(struct kprobe *kp)
        return -ENOSYS;
 }
 #endif /* CONFIG_KPROBES */
-static inline int register_jprobe(struct jprobe *p)
-{
-       return -ENOSYS;
-}
-static inline int register_jprobes(struct jprobe **jps, int num)
-{
-       return -ENOSYS;
-}
-static inline void unregister_jprobe(struct jprobe *p)
-{
-}
-static inline void unregister_jprobes(struct jprobe **jps, int num)
-{
-}
 static inline int disable_kretprobe(struct kretprobe *rp)
 {
        return disable_kprobe(&rp->kp);
@@ -490,14 +445,6 @@ static inline int enable_kretprobe(struct kretprobe *rp)
 {
        return enable_kprobe(&rp->kp);
 }
-static inline int disable_jprobe(struct jprobe *jp)
-{
-       return -ENOSYS;
-}
-static inline int enable_jprobe(struct jprobe *jp)
-{
-       return -ENOSYS;
-}
 
 #ifndef CONFIG_KPROBES
 static inline bool is_kprobe_insn_slot(unsigned long addr)
index 1fa1288..e6dd3a2 100644 (file)
@@ -490,7 +490,7 @@ struct perf_addr_filters_head {
 };
 
 /**
- * enum perf_event_state - the states of a event
+ * enum perf_event_state - the states of an event:
  */
 enum perf_event_state {
        PERF_EVENT_STATE_DEAD           = -4,
index 8f0434a..95dc727 100644 (file)
@@ -1656,7 +1656,7 @@ perf_event_groups_next(struct perf_event *event)
                                typeof(*event), group_node))
 
 /*
- * Add a event from the lists for its context.
+ * Add an event from the lists for its context.
  * Must be called with ctx->mutex and ctx->lock held.
  */
 static void
@@ -1844,7 +1844,7 @@ static void perf_group_attach(struct perf_event *event)
 }
 
 /*
- * Remove a event from the lists for its context.
+ * Remove an event from the lists for its context.
  * Must be called with ctx->mutex and ctx->lock held.
  */
 static void
@@ -2148,7 +2148,7 @@ static void __perf_event_disable(struct perf_event *event,
 }
 
 /*
- * Disable a event.
+ * Disable an event.
  *
  * If event->ctx is a cloned context, callers must make sure that
  * every task struct that event->ctx->task could possibly point to
@@ -2677,7 +2677,7 @@ static void __perf_event_enable(struct perf_event *event,
 }
 
 /*
- * Enable a event.
+ * Enable an event.
  *
  * If event->ctx is a cloned context, callers must make sure that
  * every task struct that event->ctx->task could possibly point to
@@ -2755,7 +2755,7 @@ static int __perf_event_stop(void *info)
         * events will refuse to restart because of rb::aux_mmap_count==0,
         * see comments in perf_aux_output_begin().
         *
-        * Since this is happening on a event-local CPU, no trace is lost
+        * Since this is happening on an event-local CPU, no trace is lost
         * while restarting.
         */
        if (sd->restart)
@@ -4827,7 +4827,7 @@ __perf_read(struct perf_event *event, char __user *buf, size_t count)
        int ret;
 
        /*
-        * Return end-of-file for a read on a event that is in
+        * Return end-of-file for a read on an event that is in
         * error state (i.e. because it was pinned but it couldn't be
         * scheduled on to the CPU at some point).
         */
@@ -5273,11 +5273,11 @@ unlock:
 }
 EXPORT_SYMBOL_GPL(perf_event_update_userpage);
 
-static int perf_mmap_fault(struct vm_fault *vmf)
+static vm_fault_t perf_mmap_fault(struct vm_fault *vmf)
 {
        struct perf_event *event = vmf->vma->vm_file->private_data;
        struct ring_buffer *rb;
-       int ret = VM_FAULT_SIGBUS;
+       vm_fault_t ret = VM_FAULT_SIGBUS;
 
        if (vmf->flags & FAULT_FLAG_MKWRITE) {
                if (vmf->pgoff == 0)
@@ -9898,7 +9898,7 @@ enabled:
 }
 
 /*
- * Allocate and initialize a event structure
+ * Allocate and initialize an event structure
  */
 static struct perf_event *
 perf_event_alloc(struct perf_event_attr *attr, int cpu,
@@ -11229,7 +11229,7 @@ const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
 }
 
 /*
- * Inherit a event from parent task to child task.
+ * Inherit an event from parent task to child task.
  *
  * Returns:
  *  - valid pointer on success
index 6e28d28..b3814fc 100644 (file)
@@ -345,13 +345,13 @@ void release_bp_slot(struct perf_event *bp)
        mutex_unlock(&nr_bp_mutex);
 }
 
-static int __modify_bp_slot(struct perf_event *bp, u64 old_type)
+static int __modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type)
 {
        int err;
 
        __release_bp_slot(bp, old_type);
 
-       err = __reserve_bp_slot(bp, bp->attr.bp_type);
+       err = __reserve_bp_slot(bp, new_type);
        if (err) {
                /*
                 * Reserve the old_type slot back in case
@@ -367,12 +367,12 @@ static int __modify_bp_slot(struct perf_event *bp, u64 old_type)
        return err;
 }
 
-static int modify_bp_slot(struct perf_event *bp, u64 old_type)
+static int modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type)
 {
        int ret;
 
        mutex_lock(&nr_bp_mutex);
-       ret = __modify_bp_slot(bp, old_type);
+       ret = __modify_bp_slot(bp, old_type, new_type);
        mutex_unlock(&nr_bp_mutex);
        return ret;
 }
@@ -400,16 +400,18 @@ int dbg_release_bp_slot(struct perf_event *bp)
        return 0;
 }
 
-static int validate_hw_breakpoint(struct perf_event *bp)
+static int hw_breakpoint_parse(struct perf_event *bp,
+                              const struct perf_event_attr *attr,
+                              struct arch_hw_breakpoint *hw)
 {
-       int ret;
+       int err;
 
-       ret = arch_validate_hwbkpt_settings(bp);
-       if (ret)
-               return ret;
+       err = hw_breakpoint_arch_parse(bp, attr, hw);
+       if (err)
+               return err;
 
-       if (arch_check_bp_in_kernelspace(bp)) {
-               if (bp->attr.exclude_kernel)
+       if (arch_check_bp_in_kernelspace(hw)) {
+               if (attr->exclude_kernel)
                        return -EINVAL;
                /*
                 * Don't let unprivileged users set a breakpoint in the trap
@@ -424,19 +426,22 @@ static int validate_hw_breakpoint(struct perf_event *bp)
 
 int register_perf_hw_breakpoint(struct perf_event *bp)
 {
-       int ret;
-
-       ret = reserve_bp_slot(bp);
-       if (ret)
-               return ret;
+       struct arch_hw_breakpoint hw;
+       int err;
 
-       ret = validate_hw_breakpoint(bp);
+       err = reserve_bp_slot(bp);
+       if (err)
+               return err;
 
-       /* if arch_validate_hwbkpt_settings() fails then release bp slot */
-       if (ret)
+       err = hw_breakpoint_parse(bp, &bp->attr, &hw);
+       if (err) {
                release_bp_slot(bp);
+               return err;
+       }
 
-       return ret;
+       bp->hw.info = hw;
+
+       return 0;
 }
 
 /**
@@ -456,35 +461,44 @@ register_user_hw_breakpoint(struct perf_event_attr *attr,
 }
 EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
 
+static void hw_breakpoint_copy_attr(struct perf_event_attr *to,
+                                   struct perf_event_attr *from)
+{
+       to->bp_addr = from->bp_addr;
+       to->bp_type = from->bp_type;
+       to->bp_len  = from->bp_len;
+       to->disabled = from->disabled;
+}
+
 int
 modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr,
                                bool check)
 {
-       u64 old_addr = bp->attr.bp_addr;
-       u64 old_len  = bp->attr.bp_len;
-       int old_type = bp->attr.bp_type;
-       bool modify  = attr->bp_type != old_type;
-       int err = 0;
+       struct arch_hw_breakpoint hw;
+       int err;
 
-       bp->attr.bp_addr = attr->bp_addr;
-       bp->attr.bp_type = attr->bp_type;
-       bp->attr.bp_len  = attr->bp_len;
+       err = hw_breakpoint_parse(bp, attr, &hw);
+       if (err)
+               return err;
 
-       if (check && memcmp(&bp->attr, attr, sizeof(*attr)))
-               return -EINVAL;
+       if (check) {
+               struct perf_event_attr old_attr;
 
-       err = validate_hw_breakpoint(bp);
-       if (!err && modify)
-               err = modify_bp_slot(bp, old_type);
+               old_attr = bp->attr;
+               hw_breakpoint_copy_attr(&old_attr, attr);
+               if (memcmp(&old_attr, attr, sizeof(*attr)))
+                       return -EINVAL;
+       }
 
-       if (err) {
-               bp->attr.bp_addr = old_addr;
-               bp->attr.bp_type = old_type;
-               bp->attr.bp_len  = old_len;
-               return err;
+       if (bp->attr.bp_type != attr->bp_type) {
+               err = modify_bp_slot(bp, bp->attr.bp_type, attr->bp_type);
+               if (err)
+                       return err;
        }
 
-       bp->attr.disabled = attr->disabled;
+       hw_breakpoint_copy_attr(&bp->attr, attr);
+       bp->hw.info = hw;
+
        return 0;
 }
 
index ccc579a..aed1ba5 100644 (file)
@@ -918,7 +918,7 @@ int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *
 EXPORT_SYMBOL_GPL(uprobe_register);
 
 /*
- * uprobe_apply - unregister a already registered probe.
+ * uprobe_apply - unregister an already registered probe.
  * @inode: the file in which the probe has to be removed.
  * @offset: offset from the start of the file.
  * @uc: consumer which wants to add more or remove some breakpoints
@@ -947,7 +947,7 @@ int uprobe_apply(struct inode *inode, loff_t offset,
 }
 
 /*
- * uprobe_unregister - unregister a already registered probe.
+ * uprobe_unregister - unregister an already registered probe.
  * @inode: the file in which the probe has to be removed.
  * @offset: offset from the start of the file.
  * @uc: identify which probe if multiple probes are colocated.
@@ -1403,7 +1403,7 @@ static struct return_instance *free_ret_instance(struct return_instance *ri)
 
 /*
  * Called with no locks held.
- * Called in context of a exiting or a exec-ing thread.
+ * Called in context of an exiting or an exec-ing thread.
  */
 void uprobe_free_utask(struct task_struct *t)
 {
index 5349c91..bc80a4e 100644 (file)
@@ -184,9 +184,6 @@ static int fei_kprobe_handler(struct kprobe *kp, struct pt_regs *regs)
        if (should_fail(&fei_fault_attr, 1)) {
                regs_set_return_value(regs, attr->retval);
                override_function_with_return(regs);
-               /* Kprobe specific fixup */
-               reset_current_kprobe();
-               preempt_enable_no_resched();
                return 1;
        }
 
index ea61902..ab257be 100644 (file)
@@ -627,8 +627,8 @@ static void optimize_kprobe(struct kprobe *p)
            (kprobe_disabled(p) || kprobes_all_disarmed))
                return;
 
-       /* Both of break_handler and post_handler are not supported. */
-       if (p->break_handler || p->post_handler)
+       /* kprobes with post_handler can not be optimized */
+       if (p->post_handler)
                return;
 
        op = container_of(p, struct optimized_kprobe, kp);
@@ -710,9 +710,7 @@ static void reuse_unused_kprobe(struct kprobe *ap)
         * there is still a relative jump) and disabled.
         */
        op = container_of(ap, struct optimized_kprobe, kp);
-       if (unlikely(list_empty(&op->list)))
-               printk(KERN_WARNING "Warning: found a stray unused "
-                       "aggrprobe@%p\n", ap->addr);
+       WARN_ON_ONCE(list_empty(&op->list));
        /* Enable the probe again */
        ap->flags &= ~KPROBE_FLAG_DISABLED;
        /* Optimize it again (remove from op->list) */
@@ -985,7 +983,8 @@ static int arm_kprobe_ftrace(struct kprobe *p)
        ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
                                   (unsigned long)p->addr, 0, 0);
        if (ret) {
-               pr_debug("Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
+               pr_debug("Failed to arm kprobe-ftrace at %pS (%d)\n",
+                        p->addr, ret);
                return ret;
        }
 
@@ -1025,7 +1024,8 @@ static int disarm_kprobe_ftrace(struct kprobe *p)
 
        ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
                           (unsigned long)p->addr, 1, 0);
-       WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret);
+       WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (%d)\n",
+                 p->addr, ret);
        return ret;
 }
 #else  /* !CONFIG_KPROBES_ON_FTRACE */
@@ -1116,20 +1116,6 @@ static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
 }
 NOKPROBE_SYMBOL(aggr_fault_handler);
 
-static int aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
-       struct kprobe *cur = __this_cpu_read(kprobe_instance);
-       int ret = 0;
-
-       if (cur && cur->break_handler) {
-               if (cur->break_handler(cur, regs))
-                       ret = 1;
-       }
-       reset_kprobe_instance();
-       return ret;
-}
-NOKPROBE_SYMBOL(aggr_break_handler);
-
 /* Walks the list and increments nmissed count for multiprobe case */
 void kprobes_inc_nmissed_count(struct kprobe *p)
 {
@@ -1270,24 +1256,15 @@ static void cleanup_rp_inst(struct kretprobe *rp)
 }
 NOKPROBE_SYMBOL(cleanup_rp_inst);
 
-/*
-* Add the new probe to ap->list. Fail if this is the
-* second jprobe at the address - two jprobes can't coexist
-*/
+/* Add the new probe to ap->list */
 static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
 {
        BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
 
-       if (p->break_handler || p->post_handler)
+       if (p->post_handler)
                unoptimize_kprobe(ap, true);    /* Fall back to normal kprobe */
 
-       if (p->break_handler) {
-               if (ap->break_handler)
-                       return -EEXIST;
-               list_add_tail_rcu(&p->list, &ap->list);
-               ap->break_handler = aggr_break_handler;
-       } else
-               list_add_rcu(&p->list, &ap->list);
+       list_add_rcu(&p->list, &ap->list);
        if (p->post_handler && !ap->post_handler)
                ap->post_handler = aggr_post_handler;
 
@@ -1310,8 +1287,6 @@ static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
        /* We don't care the kprobe which has gone. */
        if (p->post_handler && !kprobe_gone(p))
                ap->post_handler = aggr_post_handler;
-       if (p->break_handler && !kprobe_gone(p))
-               ap->break_handler = aggr_break_handler;
 
        INIT_LIST_HEAD(&ap->list);
        INIT_HLIST_NODE(&ap->hlist);
@@ -1706,8 +1681,6 @@ static int __unregister_kprobe_top(struct kprobe *p)
                goto disarmed;
        else {
                /* If disabling probe has special handlers, update aggrprobe */
-               if (p->break_handler && !kprobe_gone(p))
-                       ap->break_handler = NULL;
                if (p->post_handler && !kprobe_gone(p)) {
                        list_for_each_entry_rcu(list_p, &ap->list, list) {
                                if ((list_p != p) && (list_p->post_handler))
@@ -1812,77 +1785,6 @@ unsigned long __weak arch_deref_entry_point(void *entry)
        return (unsigned long)entry;
 }
 
-#if 0
-int register_jprobes(struct jprobe **jps, int num)
-{
-       int ret = 0, i;
-
-       if (num <= 0)
-               return -EINVAL;
-
-       for (i = 0; i < num; i++) {
-               ret = register_jprobe(jps[i]);
-
-               if (ret < 0) {
-                       if (i > 0)
-                               unregister_jprobes(jps, i);
-                       break;
-               }
-       }
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(register_jprobes);
-
-int register_jprobe(struct jprobe *jp)
-{
-       unsigned long addr, offset;
-       struct kprobe *kp = &jp->kp;
-
-       /*
-        * Verify probepoint as well as the jprobe handler are
-        * valid function entry points.
-        */
-       addr = arch_deref_entry_point(jp->entry);
-
-       if (kallsyms_lookup_size_offset(addr, NULL, &offset) && offset == 0 &&
-           kprobe_on_func_entry(kp->addr, kp->symbol_name, kp->offset)) {
-               kp->pre_handler = setjmp_pre_handler;
-               kp->break_handler = longjmp_break_handler;
-               return register_kprobe(kp);
-       }
-
-       return -EINVAL;
-}
-EXPORT_SYMBOL_GPL(register_jprobe);
-
-void unregister_jprobe(struct jprobe *jp)
-{
-       unregister_jprobes(&jp, 1);
-}
-EXPORT_SYMBOL_GPL(unregister_jprobe);
-
-void unregister_jprobes(struct jprobe **jps, int num)
-{
-       int i;
-
-       if (num <= 0)
-               return;
-       mutex_lock(&kprobe_mutex);
-       for (i = 0; i < num; i++)
-               if (__unregister_kprobe_top(&jps[i]->kp) < 0)
-                       jps[i]->kp.addr = NULL;
-       mutex_unlock(&kprobe_mutex);
-
-       synchronize_sched();
-       for (i = 0; i < num; i++) {
-               if (jps[i]->kp.addr)
-                       __unregister_kprobe_bottom(&jps[i]->kp);
-       }
-}
-EXPORT_SYMBOL_GPL(unregister_jprobes);
-#endif
-
 #ifdef CONFIG_KRETPROBES
 /*
  * This kprobe pre_handler is registered with every kretprobe. When probe
@@ -1982,7 +1884,6 @@ int register_kretprobe(struct kretprobe *rp)
        rp->kp.pre_handler = pre_handler_kretprobe;
        rp->kp.post_handler = NULL;
        rp->kp.fault_handler = NULL;
-       rp->kp.break_handler = NULL;
 
        /* Pre-allocate memory for max kretprobe instances */
        if (rp->maxactive <= 0) {
@@ -2105,7 +2006,6 @@ static void kill_kprobe(struct kprobe *p)
                list_for_each_entry_rcu(kp, &p->list, list)
                        kp->flags |= KPROBE_FLAG_GONE;
                p->post_handler = NULL;
-               p->break_handler = NULL;
                kill_optimized_kprobe(p);
        }
        /*
@@ -2169,11 +2069,12 @@ out:
 }
 EXPORT_SYMBOL_GPL(enable_kprobe);
 
+/* Caller must NOT call this in usual path. This is only for critical case */
 void dump_kprobe(struct kprobe *kp)
 {
-       printk(KERN_WARNING "Dumping kprobe:\n");
-       printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
-              kp->symbol_name, kp->addr, kp->offset);
+       pr_err("Dumping kprobe:\n");
+       pr_err("Name: %s\nOffset: %x\nAddress: %pS\n",
+              kp->symbol_name, kp->offset, kp->addr);
 }
 NOKPROBE_SYMBOL(dump_kprobe);
 
@@ -2196,11 +2097,8 @@ static int __init populate_kprobe_blacklist(unsigned long *start,
                entry = arch_deref_entry_point((void *)*iter);
 
                if (!kernel_text_address(entry) ||
-                   !kallsyms_lookup_size_offset(entry, &size, &offset)) {
-                       pr_err("Failed to find blacklist at %p\n",
-                               (void *)entry);
+                   !kallsyms_lookup_size_offset(entry, &size, &offset))
                        continue;
-               }
 
                ent = kmalloc(sizeof(*ent), GFP_KERNEL);
                if (!ent)
@@ -2326,21 +2224,23 @@ static void report_probe(struct seq_file *pi, struct kprobe *p,
                const char *sym, int offset, char *modname, struct kprobe *pp)
 {
        char *kprobe_type;
+       void *addr = p->addr;
 
        if (p->pre_handler == pre_handler_kretprobe)
                kprobe_type = "r";
-       else if (p->pre_handler == setjmp_pre_handler)
-               kprobe_type = "j";
        else
                kprobe_type = "k";
 
+       if (!kallsyms_show_value())
+               addr = NULL;
+
        if (sym)
-               seq_printf(pi, "%p  %s  %s+0x%x  %s ",
-                       p->addr, kprobe_type, sym, offset,
+               seq_printf(pi, "%px  %s  %s+0x%x  %s ",
+                       addr, kprobe_type, sym, offset,
                        (modname ? modname : " "));
-       else
-               seq_printf(pi, "%p  %s  %p ",
-                       p->addr, kprobe_type, p->addr);
+       else    /* try to use %pS */
+               seq_printf(pi, "%px  %s  %pS ",
+                       addr, kprobe_type, p->addr);
 
        if (!pp)
                pp = p;
@@ -2428,8 +2328,16 @@ static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
        struct kprobe_blacklist_entry *ent =
                list_entry(v, struct kprobe_blacklist_entry, list);
 
-       seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr,
-                  (void *)ent->end_addr, (void *)ent->start_addr);
+       /*
+        * If /proc/kallsyms is not showing kernel address, we won't
+        * show them here either.
+        */
+       if (!kallsyms_show_value())
+               seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL,
+                          (void *)ent->start_addr);
+       else
+               seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr,
+                          (void *)ent->end_addr, (void *)ent->start_addr);
        return 0;
 }
 
@@ -2611,7 +2519,7 @@ static int __init debugfs_kprobe_init(void)
        if (!dir)
                return -ENOMEM;
 
-       file = debugfs_create_file("list", 0444, dir, NULL,
+       file = debugfs_create_file("list", 0400, dir, NULL,
                                &debugfs_kprobes_operations);
        if (!file)
                goto error;
@@ -2621,7 +2529,7 @@ static int __init debugfs_kprobe_init(void)
        if (!file)
                goto error;
 
-       file = debugfs_create_file("blacklist", 0444, dir, NULL,
+       file = debugfs_create_file("blacklist", 0400, dir, NULL,
                                &debugfs_kprobe_blacklist_ops);
        if (!file)
                goto error;
@@ -2637,6 +2545,3 @@ late_initcall(debugfs_kprobe_init);
 #endif /* CONFIG_DEBUG_FS */
 
 module_init(init_kprobes);
-
-/* defined in arch/.../kernel/kprobes.c */
-EXPORT_SYMBOL_GPL(jprobe_return);
index dd53e35..7bca480 100644 (file)
@@ -162,90 +162,6 @@ static int test_kprobes(void)
 
 }
 
-#if 0
-static u32 jph_val;
-
-static u32 j_kprobe_target(u32 value)
-{
-       if (preemptible()) {
-               handler_errors++;
-               pr_err("jprobe-handler is preemptible\n");
-       }
-       if (value != rand1) {
-               handler_errors++;
-               pr_err("incorrect value in jprobe handler\n");
-       }
-
-       jph_val = rand1;
-       jprobe_return();
-       return 0;
-}
-
-static struct jprobe jp = {
-       .entry          = j_kprobe_target,
-       .kp.symbol_name = "kprobe_target"
-};
-
-static int test_jprobe(void)
-{
-       int ret;
-
-       ret = register_jprobe(&jp);
-       if (ret < 0) {
-               pr_err("register_jprobe returned %d\n", ret);
-               return ret;
-       }
-
-       ret = target(rand1);
-       unregister_jprobe(&jp);
-       if (jph_val == 0) {
-               pr_err("jprobe handler not called\n");
-               handler_errors++;
-       }
-
-       return 0;
-}
-
-static struct jprobe jp2 = {
-       .entry          = j_kprobe_target,
-       .kp.symbol_name = "kprobe_target2"
-};
-
-static int test_jprobes(void)
-{
-       int ret;
-       struct jprobe *jps[2] = {&jp, &jp2};
-
-       /* addr and flags should be cleard for reusing kprobe. */
-       jp.kp.addr = NULL;
-       jp.kp.flags = 0;
-       ret = register_jprobes(jps, 2);
-       if (ret < 0) {
-               pr_err("register_jprobes returned %d\n", ret);
-               return ret;
-       }
-
-       jph_val = 0;
-       ret = target(rand1);
-       if (jph_val == 0) {
-               pr_err("jprobe handler not called\n");
-               handler_errors++;
-       }
-
-       jph_val = 0;
-       ret = target2(rand1);
-       if (jph_val == 0) {
-               pr_err("jprobe handler2 not called\n");
-               handler_errors++;
-       }
-       unregister_jprobes(jps, 2);
-
-       return 0;
-}
-#else
-#define test_jprobe() (0)
-#define test_jprobes() (0)
-#endif
 #ifdef CONFIG_KRETPROBES
 static u32 krph_val;
 
@@ -383,16 +299,6 @@ int init_test_probes(void)
        if (ret < 0)
                errors++;
 
-       num_tests++;
-       ret = test_jprobe();
-       if (ret < 0)
-               errors++;
-
-       num_tests++;
-       ret = test_jprobes();
-       if (ret < 0)
-               errors++;
-
 #ifdef CONFIG_KRETPROBES
        num_tests++;
        ret = test_kretprobe();
index 21f7184..b37b92e 100644 (file)
@@ -1217,16 +1217,11 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
 
                /*
                 * We need to check and see if we modified the pc of the
-                * pt_regs, and if so clear the kprobe and return 1 so that we
-                * don't do the single stepping.
-                * The ftrace kprobe handler leaves it up to us to re-enable
-                * preemption here before returning if we've modified the ip.
+                * pt_regs, and if so return 1 so that we don't do the
+                * single stepping.
                 */
-               if (orig_ip != instruction_pointer(regs)) {
-                       reset_current_kprobe();
-                       preempt_enable_no_resched();
+               if (orig_ip != instruction_pointer(regs))
                        return 1;
-               }
                if (!ret)
                        return 0;
        }
index 8838d11..0b066b3 100644 (file)
@@ -1718,7 +1718,7 @@ config KPROBES_SANITY_TEST
        default n
        help
          This option provides for testing basic kprobes functionality on
-         boot. A sample kprobe, jprobe and kretprobe are inserted and
+         boot. Samples of kprobe and kretprobe are inserted and
          verified for functionality.
 
          Say N if you are unsure.
index 11300db..14e1351 100644 (file)
@@ -234,7 +234,7 @@ perf also supports group leader sampling using the :S specifier.
   perf record -e '{cycles,instructions}:S' ...
   perf report --group
 
-Normally all events in a event group sample, but with :S only
+Normally all events in an event group sample, but with :S only
 the first event (the leader) samples, and it only reads the values of the
 other events in the group.
 
index 04168da..246dee0 100644 (file)
@@ -94,7 +94,7 @@ OPTIONS
          "perf report" to view group events together.
 
 --filter=<filter>::
-        Event filter. This option should follow a event selector (-e) which
+        Event filter. This option should follow an event selector (-e) which
        selects either tracepoint event(s) or a hardware trace PMU
        (e.g. Intel PT or CoreSight).
 
@@ -153,7 +153,7 @@ OPTIONS
 
 --exclude-perf::
        Don't record events issued by perf itself. This option should follow
-       a event selector (-e) which selects tracepoint event(s). It adds a
+       an event selector (-e) which selects tracepoint event(s). It adds a
        filter expression 'common_pid != $PERFPID' to filters. If other
        '--filter' exists, the new filter expression will be combined with
        them by '&&'.