arm64: avoid double ISB on kernel entry
authorPeter Collingbourne <pcc@google.com>
Tue, 27 Jul 2021 20:54:39 +0000 (13:54 -0700)
committerCatalin Marinas <catalin.marinas@arm.com>
Wed, 28 Jul 2021 17:40:12 +0000 (18:40 +0100)
Although an ISB is required in order to make the MTE-related system
register update to GCR_EL1 effective, and the same is true for
PAC-related updates to SCTLR_EL1 or APIAKey{Hi,Lo}_EL1, we issue two
ISBs on machines that support both features while we only need to
issue one. To avoid the unnecessary additional ISB, remove the ISBs
from the PAC and MTE-specific alternative blocks and add a couple
of additional blocks that cause us to only execute one ISB if both
features are supported.

Signed-off-by: Peter Collingbourne <pcc@google.com>
Link: https://linux-review.googlesource.com/id/Idee7e8114d5ae5a0b171d06220a0eb4bb015a51c
Link: https://lore.kernel.org/r/20210727205439.2557419-1-pcc@google.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/kernel/entry.S

index 8c8581e..468fae0 100644 (file)
@@ -184,7 +184,6 @@ alternative_else_nop_endif
        ldr_l   \tmp, gcr_kernel_excl
 
        mte_set_gcr \tmp, \tmp2
-       isb
 1:
 #endif
        .endm
@@ -257,7 +256,6 @@ alternative_if ARM64_HAS_ADDRESS_AUTH
        orr     x0, x0, SCTLR_ELx_ENIA
        msr     sctlr_el1, x0
 2:
-       isb
 alternative_else_nop_endif
 #endif
 
@@ -265,6 +263,19 @@ alternative_else_nop_endif
 
        mte_set_kernel_gcr x22, x23
 
+       /*
+        * Any non-self-synchronizing system register updates required for
+        * kernel entry should be placed before this point.
+        */
+alternative_if ARM64_MTE
+       isb
+       b       1f
+alternative_else_nop_endif
+alternative_if ARM64_HAS_ADDRESS_AUTH
+       isb
+alternative_else_nop_endif
+1:
+
        scs_load tsk
        .else
        add     x21, sp, #PT_REGS_SIZE