parisc: Add static branch and JUMP_LABEL feature
authorHelge Deller <deller@gmx.de>
Fri, 3 May 2019 21:51:00 +0000 (23:51 +0200)
committerHelge Deller <deller@gmx.de>
Sun, 5 May 2019 22:10:03 +0000 (00:10 +0200)
Signed-off-by: Helge Deller <deller@gmx.de>
arch/parisc/Kconfig
arch/parisc/include/asm/jump_label.h [new file with mode: 0644]
arch/parisc/kernel/Makefile
arch/parisc/kernel/jump_label.c [new file with mode: 0644]
arch/parisc/kernel/vmlinux.lds.S

index 26c2155..c971256 100644 (file)
@@ -45,6 +45,8 @@ config PARISC
        select HAVE_DEBUG_STACKOVERFLOW
        select HAVE_ARCH_AUDITSYSCALL
        select HAVE_ARCH_HASH
+       select HAVE_ARCH_JUMP_LABEL
+       select HAVE_ARCH_JUMP_LABEL_RELATIVE
        select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_TRACEHOOK
        select HAVE_REGS_AND_STACK_ACCESS_API
diff --git a/arch/parisc/include/asm/jump_label.h b/arch/parisc/include/asm/jump_label.h
new file mode 100644 (file)
index 0000000..7efb1aa
--- /dev/null
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_PARISC_JUMP_LABEL_H
+#define _ASM_PARISC_JUMP_LABEL_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <asm/assembly.h>
+
+#define JUMP_LABEL_NOP_SIZE 4
+
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
+{
+       asm_volatile_goto("1:\n\t"
+                "nop\n\t"
+                ".pushsection __jump_table,  \"aw\"\n\t"
+                ".word 1b - ., %l[l_yes] - .\n\t"
+                __stringify(ASM_ULONG_INSN) " %c0 - .\n\t"
+                ".popsection\n\t"
+                : :  "i" (&((char *)key)[branch]) :  : l_yes);
+
+       return false;
+l_yes:
+       return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+{
+       asm_volatile_goto("1:\n\t"
+                "b,n %l[l_yes]\n\t"
+                ".pushsection __jump_table,  \"aw\"\n\t"
+                ".word 1b - ., %l[l_yes] - .\n\t"
+                __stringify(ASM_ULONG_INSN) " %c0 - .\n\t"
+                ".popsection\n\t"
+                : :  "i" (&((char *)key)[branch]) :  : l_yes);
+
+       return false;
+l_yes:
+       return true;
+}
+
+#endif  /* __ASSEMBLY__ */
+#endif
index b818b28..fc0df5c 100644 (file)
@@ -33,5 +33,6 @@ obj-$(CONFIG_64BIT)   += perf.o perf_asm.o $(obj64-y)
 obj-$(CONFIG_PARISC_CPU_TOPOLOGY)      += topology.o
 obj-$(CONFIG_FUNCTION_TRACER)          += ftrace.o
 obj-$(CONFIG_FUNCTION_GRAPH_TRACER)    += ftrace.o
+obj-$(CONFIG_JUMP_LABEL)               += jump_label.o
 obj-$(CONFIG_KGDB)                     += kgdb.o
 obj-$(CONFIG_KPROBES)                  += kprobes.o
diff --git a/arch/parisc/kernel/jump_label.c b/arch/parisc/kernel/jump_label.c
new file mode 100644 (file)
index 0000000..d2f3cb1
--- /dev/null
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 Helge Deller <deller@gmx.de>
+ *
+ * Based on arch/arm64/kernel/jump_label.c
+ */
+#include <linux/kernel.h>
+#include <linux/jump_label.h>
+#include <linux/bug.h>
+#include <asm/alternative.h>
+#include <asm/patch.h>
+
+static inline int reassemble_17(int as17)
+{
+       return (((as17 & 0x10000) >> 16) |
+               ((as17 & 0x0f800) << 5) |
+               ((as17 & 0x00400) >> 8) |
+               ((as17 & 0x003ff) << 3));
+}
+
+void arch_jump_label_transform(struct jump_entry *entry,
+                              enum jump_label_type type)
+{
+       void *addr = (void *)jump_entry_code(entry);
+       u32 insn;
+
+       if (type == JUMP_LABEL_JMP) {
+               void *target = (void *)jump_entry_target(entry);
+               int distance = target - addr;
+               /*
+                * Encode the PA1.1 "b,n" instruction with a 17-bit
+                * displacement.  In case we hit the BUG(), we could use
+                * another branch instruction with a 22-bit displacement on
+                * 64-bit CPUs instead. But this seems sufficient for now.
+                */
+               distance -= 8;
+               BUG_ON(distance > 262143 || distance < -262144);
+               insn = 0xe8000002 | reassemble_17(distance >> 2);
+       } else {
+               insn = INSN_NOP;
+       }
+
+       patch_text(addr, insn);
+}
+
+void arch_jump_label_transform_static(struct jump_entry *entry,
+                                     enum jump_label_type type)
+{
+       /*
+        * We use the architected NOP in arch_static_branch, so there's no
+        * need to patch an identical NOP over the top of it here. The core
+        * will call arch_jump_label_transform from a module notifier if the
+        * NOP needs to be replaced by a branch.
+        */
+}
index c3b1b9c..a8be7a4 100644 (file)
@@ -18,6 +18,9 @@
                                *(.data..vm0.pgd) \
                                *(.data..vm0.pte)
 
+/* No __ro_after_init data in the .rodata section - which will always be ro */
+#define RO_AFTER_INIT_DATA
+
 #include <asm-generic/vmlinux.lds.h>
 
 /* needed for the processor specific cache alignment size */