From 9dd97064e21fc9cba391d4f4983aff4861a7cce8 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Fri, 13 Nov 2020 14:42:22 +0800 Subject: [PATCH] riscv: Make stack walk callback consistent with generic code In order to use generic arch_stack_walk() code, make stack walk callback consistent with it. Signed-off-by: Kefeng Wang Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/stacktrace.h | 2 +- arch/riscv/kernel/perf_callchain.c | 2 +- arch/riscv/kernel/stacktrace.c | 10 +++++----- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/riscv/include/asm/stacktrace.h b/arch/riscv/include/asm/stacktrace.h index f09c1e31bde9..470a65c4ccdc 100644 --- a/arch/riscv/include/asm/stacktrace.h +++ b/arch/riscv/include/asm/stacktrace.h @@ -12,6 +12,6 @@ struct stackframe { }; extern void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, - bool (*fn)(unsigned long, void *), void *arg); + bool (*fn)(void *, unsigned long), void *arg); #endif /* _ASM_RISCV_STACKTRACE_H */ diff --git a/arch/riscv/kernel/perf_callchain.c b/arch/riscv/kernel/perf_callchain.c index 623ecd36a720..0bb1854dce83 100644 --- a/arch/riscv/kernel/perf_callchain.c +++ b/arch/riscv/kernel/perf_callchain.c @@ -70,7 +70,7 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry, fp = user_backtrace(entry, fp, 0); } -static bool fill_callchain(unsigned long pc, void *entry) +static bool fill_callchain(void *entry, unsigned long pc) { return perf_callchain_store(entry, pc); } diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c index 345a202c92bb..81c48144c504 100644 --- a/arch/riscv/kernel/stacktrace.c +++ b/arch/riscv/kernel/stacktrace.c @@ -43,7 +43,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, unsigned long low, high; struct stackframe *frame; - if (unlikely(!__kernel_text_address(pc) || fn(pc, arg))) + if (unlikely(!__kernel_text_address(pc) || fn(arg, pc))) break; /* Validate frame pointer */ @@ -63,7 +63,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, #else /* !CONFIG_FRAME_POINTER */ void notrace walk_stackframe(struct task_struct *task, - struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg) + struct pt_regs *regs, bool (*fn)(void *, unsigned long), void *arg) { unsigned long sp, pc; unsigned long *ksp; @@ -85,7 +85,7 @@ void notrace walk_stackframe(struct task_struct *task, ksp = (unsigned long *)sp; while (!kstack_end(ksp)) { - if (__kernel_text_address(pc) && unlikely(fn(pc, arg))) + if (__kernel_text_address(pc) && unlikely(fn(arg, pc))) break; pc = (*ksp++) - 0x4; } @@ -107,7 +107,7 @@ void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) walk_stackframe(task, NULL, print_trace_address, (void *)loglvl); } -static bool save_wchan(unsigned long pc, void *arg) +static bool save_wchan(void *arg, unsigned long pc) { if (!in_sched_functions(pc)) { unsigned long *p = arg; @@ -143,7 +143,7 @@ static bool __save_trace(unsigned long pc, void *arg, bool nosched) return (trace->nr_entries >= trace->max_entries); } -static bool save_trace(unsigned long pc, void *arg) +static bool save_trace(void *arg, unsigned long pc) { return __save_trace(pc, arg, false); } -- 2.20.1