kprobes: treewide: Use 'kprobe_opcode_t *' for the code address in get_optimized_kprobe()
authorMasami Hiramatsu <mhiramat@kernel.org>
Tue, 14 Sep 2021 14:40:07 +0000 (23:40 +0900)
committerSteven Rostedt (VMware) <rostedt@goodmis.org>
Fri, 1 Oct 2021 01:24:05 +0000 (21:24 -0400)
Since get_optimized_kprobe() is only used inside kprobes,
it doesn't need to use 'unsigned long' type for 'addr' parameter.
Make it use 'kprobe_opcode_t *' for the 'addr' parameter and
subsequent call of arch_within_optimized_kprobe() also should use
'kprobe_opcode_t *'.

Note that MAX_OPTIMIZED_LENGTH and RELATIVEJUMP_SIZE are defined
by byte-size, but the size of 'kprobe_opcode_t' depends on the
architecture. Therefore, we must be careful when calculating
addresses using those macros.

Link: https://lkml.kernel.org/r/163163040680.489837.12133032364499833736.stgit@devnote2
Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
arch/arm/probes/kprobes/opt-arm.c
arch/powerpc/kernel/optprobes.c
arch/x86/kernel/kprobes/opt.c
include/linux/kprobes.h
kernel/kprobes.c

index c781801..dbef34e 100644 (file)
@@ -347,10 +347,11 @@ void arch_unoptimize_kprobes(struct list_head *oplist,
 }
 
 int arch_within_optimized_kprobe(struct optimized_kprobe *op,
-                               unsigned long addr)
+                                kprobe_opcode_t *addr)
 {
-       return ((unsigned long)op->kp.addr <= addr &&
-               (unsigned long)op->kp.addr + RELATIVEJUMP_SIZE > addr);
+       return (op->kp.addr <= addr &&
+               op->kp.addr + (RELATIVEJUMP_SIZE / sizeof(kprobe_opcode_t)) > addr);
+
 }
 
 void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
index c79899a..325ba54 100644 (file)
@@ -301,8 +301,8 @@ void arch_unoptimize_kprobes(struct list_head *oplist, struct list_head *done_li
        }
 }
 
-int arch_within_optimized_kprobe(struct optimized_kprobe *op, unsigned long addr)
+int arch_within_optimized_kprobe(struct optimized_kprobe *op, kprobe_opcode_t *addr)
 {
-       return ((unsigned long)op->kp.addr <= addr &&
-               (unsigned long)op->kp.addr + RELATIVEJUMP_SIZE > addr);
+       return (op->kp.addr <= addr &&
+               op->kp.addr + (RELATIVEJUMP_SIZE / sizeof(kprobe_opcode_t)) > addr);
 }
index 71425eb..b4a54a5 100644 (file)
@@ -367,10 +367,10 @@ int arch_check_optimized_kprobe(struct optimized_kprobe *op)
 
 /* Check the addr is within the optimized instructions. */
 int arch_within_optimized_kprobe(struct optimized_kprobe *op,
-                                unsigned long addr)
+                                kprobe_opcode_t *addr)
 {
-       return ((unsigned long)op->kp.addr <= addr &&
-               (unsigned long)op->kp.addr + op->optinsn.size > addr);
+       return (op->kp.addr <= addr &&
+               op->kp.addr + op->optinsn.size > addr);
 }
 
 /* Free optimized instruction slot */
index 9c28fbb..6a5995f 100644 (file)
@@ -329,7 +329,7 @@ extern void arch_unoptimize_kprobes(struct list_head *oplist,
                                    struct list_head *done_list);
 extern void arch_unoptimize_kprobe(struct optimized_kprobe *op);
 extern int arch_within_optimized_kprobe(struct optimized_kprobe *op,
-                                       unsigned long addr);
+                                       kprobe_opcode_t *addr);
 
 extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs);
 
index ec3d97f..b6f1dcf 100644 (file)
@@ -485,15 +485,15 @@ static int kprobe_queued(struct kprobe *p)
  * Return an optimized kprobe whose optimizing code replaces
  * instructions including 'addr' (exclude breakpoint).
  */
-static struct kprobe *get_optimized_kprobe(unsigned long addr)
+static struct kprobe *get_optimized_kprobe(kprobe_opcode_t *addr)
 {
        int i;
        struct kprobe *p = NULL;
        struct optimized_kprobe *op;
 
        /* Don't check i == 0, since that is a breakpoint case. */
-       for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++)
-               p = get_kprobe((void *)(addr - i));
+       for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH / sizeof(kprobe_opcode_t); i++)
+               p = get_kprobe(addr - i);
 
        if (p && kprobe_optready(p)) {
                op = container_of(p, struct optimized_kprobe, kp);
@@ -967,7 +967,7 @@ static void __arm_kprobe(struct kprobe *p)
        lockdep_assert_held(&text_mutex);
 
        /* Find the overlapping optimized kprobes. */
-       _p = get_optimized_kprobe((unsigned long)p->addr);
+       _p = get_optimized_kprobe(p->addr);
        if (unlikely(_p))
                /* Fallback to unoptimized kprobe */
                unoptimize_kprobe(_p, true);
@@ -989,7 +989,7 @@ static void __disarm_kprobe(struct kprobe *p, bool reopt)
        if (!kprobe_queued(p)) {
                arch_disarm_kprobe(p);
                /* If another kprobe was blocked, re-optimize it. */
-               _p = get_optimized_kprobe((unsigned long)p->addr);
+               _p = get_optimized_kprobe(p->addr);
                if (unlikely(_p) && reopt)
                        optimize_kprobe(_p);
        }