Merge branch 'for-next/cpuidle' into for-next/core
authorWill Deacon <will@kernel.org>
Thu, 24 Jun 2021 12:36:39 +0000 (13:36 +0100)
committerWill Deacon <will@kernel.org>
Thu, 24 Jun 2021 12:36:39 +0000 (13:36 +0100)
Fix resume from idle when pNMI is being used.

* for-next/cpuidle:
  arm64: suspend: Use cpuidle context helpers in cpu_suspend()
  PSCI: Use cpuidle context helpers in psci_cpu_suspend_enter()
  arm64: Convert cpu_do_idle() to using cpuidle context helpers
  arm64: Add cpuidle context save/restore helpers

60 files changed:
Makefile
arch/arm64/include/asm/alternative-macros.h
arch/arm64/include/asm/arch_gicv3.h
arch/arm64/include/asm/assembler.h
arch/arm64/include/asm/cacheflush.h
arch/arm64/include/asm/cpu.h
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/efi.h
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/include/asm/linkage.h
arch/arm64/include/asm/processor.h
arch/arm64/include/asm/scs.h
arch/arm64/include/asm/sdei.h
arch/arm64/include/asm/smp.h
arch/arm64/include/asm/stacktrace.h
arch/arm64/kernel/alternative.c
arch/arm64/kernel/asm-offsets.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/cpuinfo.c
arch/arm64/kernel/efi-entry.S
arch/arm64/kernel/entry.S
arch/arm64/kernel/head.S
arch/arm64/kernel/hibernate-asm.S
arch/arm64/kernel/hibernate.c
arch/arm64/kernel/idreg-override.c
arch/arm64/kernel/image-vars.h
arch/arm64/kernel/insn.c
arch/arm64/kernel/kaslr.c
arch/arm64/kernel/machine_kexec.c
arch/arm64/kernel/perf_callchain.c
arch/arm64/kernel/probes/uprobes.c
arch/arm64/kernel/process.c
arch/arm64/kernel/ptrace.c
arch/arm64/kernel/sdei.c
arch/arm64/kernel/setup.c
arch/arm64/kernel/signal.c
arch/arm64/kernel/smp.c
arch/arm64/kernel/smp_spin_table.c
arch/arm64/kernel/stacktrace.c
arch/arm64/kernel/sys_compat.c
arch/arm64/kvm/arm.c
arch/arm64/kvm/hyp/nvhe/cache.S
arch/arm64/kvm/hyp/nvhe/setup.c
arch/arm64/kvm/hyp/nvhe/tlb.c
arch/arm64/kvm/hyp/pgtable.c
arch/arm64/lib/Makefile
arch/arm64/lib/clear_user.S
arch/arm64/lib/memchr.S
arch/arm64/lib/memcmp.S
arch/arm64/lib/memcpy.S
arch/arm64/lib/memmove.S [deleted file]
arch/arm64/lib/strcmp.S
arch/arm64/lib/strlen.S
arch/arm64/lib/strncmp.S
arch/arm64/lib/uaccess_flushcache.c
arch/arm64/mm/cache.S
arch/arm64/mm/flush.c
arch/arm64/mm/proc.S
arch/arm64/tools/cpucaps
scripts/tools-support-relr.sh

index e446835..e38c74d 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1031,7 +1031,7 @@ LDFLAGS_vmlinux   += $(call ld-option, -X,)
 endif
 
 ifeq ($(CONFIG_RELR),y)
-LDFLAGS_vmlinux        += --pack-dyn-relocs=relr
+LDFLAGS_vmlinux        += --pack-dyn-relocs=relr --use-android-relr-tags
 endif
 
 # We never want expected sections to be placed heuristically by the
index 8a078fc..4777035 100644 (file)
@@ -197,11 +197,6 @@ alternative_endif
 #define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...)  \
        alternative_insn insn1, insn2, cap, IS_ENABLED(cfg)
 
-.macro user_alt, label, oldinstr, newinstr, cond
-9999:  alternative_insn "\oldinstr", "\newinstr", \cond
-       _asm_extable 9999b, \label
-.endm
-
 #endif  /*  __ASSEMBLY__  */
 
 /*
index 934b9be..4ad22c3 100644 (file)
@@ -124,7 +124,8 @@ static inline u32 gic_read_rpr(void)
 #define gic_read_lpir(c)               readq_relaxed(c)
 #define gic_write_lpir(v, c)           writeq_relaxed(v, c)
 
-#define gic_flush_dcache_to_poc(a,l)   __flush_dcache_area((a), (l))
+#define gic_flush_dcache_to_poc(a,l)   \
+       dcache_clean_inval_poc((unsigned long)(a), (unsigned long)(a)+(l))
 
 #define gits_read_baser(c)             readq_relaxed(c)
 #define gits_write_baser(v, c)         writeq_relaxed(v, c)
index 8418c1b..89faca0 100644 (file)
@@ -130,15 +130,27 @@ alternative_endif
        .endm
 
 /*
- * Emit an entry into the exception table
+ * Create an exception table entry for `insn`, which will branch to `fixup`
+ * when an unhandled fault is taken.
  */
-       .macro          _asm_extable, from, to
+       .macro          _asm_extable, insn, fixup
        .pushsection    __ex_table, "a"
        .align          3
-       .long           (\from - .), (\to - .)
+       .long           (\insn - .), (\fixup - .)
        .popsection
        .endm
 
+/*
+ * Create an exception table entry for `insn` if `fixup` is provided. Otherwise
+ * do nothing.
+ */
+       .macro          _cond_extable, insn, fixup
+       .ifnc           \fixup,
+       _asm_extable    \insn, \fixup
+       .endif
+       .endm
+
+
 #define USER(l, x...)                          \
 9999:  x;                                      \
        _asm_extable    9999b, l
@@ -232,15 +244,23 @@ lr        .req    x30             // link register
         * @dst: destination register
         */
 #if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
-       .macro  this_cpu_offset, dst
+       .macro  get_this_cpu_offset, dst
        mrs     \dst, tpidr_el2
        .endm
 #else
-       .macro  this_cpu_offset, dst
+       .macro  get_this_cpu_offset, dst
 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
        mrs     \dst, tpidr_el1
 alternative_else
        mrs     \dst, tpidr_el2
+alternative_endif
+       .endm
+
+       .macro  set_this_cpu_offset, src
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
+       msr     tpidr_el1, \src
+alternative_else
+       msr     tpidr_el2, \src
 alternative_endif
        .endm
 #endif
@@ -253,7 +273,7 @@ alternative_endif
        .macro adr_this_cpu, dst, sym, tmp
        adrp    \tmp, \sym
        add     \dst, \tmp, #:lo12:\sym
-       this_cpu_offset \tmp
+       get_this_cpu_offset \tmp
        add     \dst, \dst, \tmp
        .endm
 
@@ -264,7 +284,7 @@ alternative_endif
         */
        .macro ldr_this_cpu dst, sym, tmp
        adr_l   \dst, \sym
-       this_cpu_offset \tmp
+       get_this_cpu_offset \tmp
        ldr     \dst, [\dst, \tmp]
        .endm
 
@@ -375,51 +395,53 @@ alternative_cb_end
        bfi     \tcr, \tmp0, \pos, #3
        .endm
 
+       .macro __dcache_op_workaround_clean_cache, op, addr
+alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
+       dc      \op, \addr
+alternative_else
+       dc      civac, \addr
+alternative_endif
+       .endm
+
 /*
  * Macro to perform a data cache maintenance for the interval
- * [kaddr, kaddr + size)
+ * [start, end)
  *
  *     op:             operation passed to dc instruction
  *     domain:         domain used in dsb instruciton
- *     kaddr:          starting virtual address of the region
- *     size:           size of the region
- *     Corrupts:       kaddr, size, tmp1, tmp2
+ *     start:          starting virtual address of the region
+ *     end:            end virtual address of the region
+ *     fixup:          optional label to branch to on user fault
+ *     Corrupts:       start, end, tmp1, tmp2
  */
-       .macro __dcache_op_workaround_clean_cache, op, kaddr
-alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
-       dc      \op, \kaddr
-alternative_else
-       dc      civac, \kaddr
-alternative_endif
-       .endm
-
-       .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
+       .macro dcache_by_line_op op, domain, start, end, tmp1, tmp2, fixup
        dcache_line_size \tmp1, \tmp2
-       add     \size, \kaddr, \size
        sub     \tmp2, \tmp1, #1
-       bic     \kaddr, \kaddr, \tmp2
-9998:
+       bic     \start, \start, \tmp2
+.Ldcache_op\@:
        .ifc    \op, cvau
-       __dcache_op_workaround_clean_cache \op, \kaddr
+       __dcache_op_workaround_clean_cache \op, \start
        .else
        .ifc    \op, cvac
-       __dcache_op_workaround_clean_cache \op, \kaddr
+       __dcache_op_workaround_clean_cache \op, \start
        .else
        .ifc    \op, cvap
-       sys     3, c7, c12, 1, \kaddr   // dc cvap
+       sys     3, c7, c12, 1, \start   // dc cvap
        .else
        .ifc    \op, cvadp
-       sys     3, c7, c13, 1, \kaddr   // dc cvadp
+       sys     3, c7, c13, 1, \start   // dc cvadp
        .else
-       dc      \op, \kaddr
+       dc      \op, \start
        .endif
        .endif
        .endif
        .endif
-       add     \kaddr, \kaddr, \tmp1
-       cmp     \kaddr, \size
-       b.lo    9998b
+       add     \start, \start, \tmp1
+       cmp     \start, \end
+       b.lo    .Ldcache_op\@
        dsb     \domain
+
+       _cond_extable .Ldcache_op\@, \fixup
        .endm
 
 /*
@@ -427,20 +449,22 @@ alternative_endif
  * [start, end)
  *
  *     start, end:     virtual addresses describing the region
- *     label:          A label to branch to on user fault.
+ *     fixup:          optional label to branch to on user fault
  *     Corrupts:       tmp1, tmp2
  */
-       .macro invalidate_icache_by_line start, end, tmp1, tmp2, label
+       .macro invalidate_icache_by_line start, end, tmp1, tmp2, fixup
        icache_line_size \tmp1, \tmp2
        sub     \tmp2, \tmp1, #1
        bic     \tmp2, \start, \tmp2
-9997:
-USER(\label, ic        ivau, \tmp2)                    // invalidate I line PoU
+.Licache_op\@:
+       ic      ivau, \tmp2                     // invalidate I line PoU
        add     \tmp2, \tmp2, \tmp1
        cmp     \tmp2, \end
-       b.lo    9997b
+       b.lo    .Licache_op\@
        dsb     ish
        isb
+
+       _cond_extable .Licache_op\@, \fixup
        .endm
 
 /*
@@ -745,7 +769,7 @@ USER(\label, ic     ivau, \tmp2)                    // invalidate I line PoU
        cbz             \tmp, \lbl
 #endif
        adr_l           \tmp, irq_stat + IRQ_CPUSTAT_SOFTIRQ_PENDING
-       this_cpu_offset \tmp2
+       get_this_cpu_offset     \tmp2
        ldr             w\tmp, [\tmp, \tmp2]
        cbnz            w\tmp, \lbl     // yield on pending softirq in task context
 .Lnoyield_\@:
index 52e5c16..543c997 100644 (file)
  *     the implementation assumes non-aliasing VIPT D-cache and (aliasing)
  *     VIPT I-cache.
  *
- *     flush_icache_range(start, end)
+ *     All functions below apply to the interval [start, end)
+ *             - start  - virtual start address (inclusive)
+ *             - end    - virtual end address (exclusive)
  *
- *             Ensure coherency between the I-cache and the D-cache in the
- *             region described by start, end.
- *             - start  - virtual start address
- *             - end    - virtual end address
+ *     caches_clean_inval_pou(start, end)
  *
- *     invalidate_icache_range(start, end)
+ *             Ensure coherency between the I-cache and the D-cache region to
+ *             the Point of Unification.
  *
- *             Invalidate the I-cache in the region described by start, end.
- *             - start  - virtual start address
- *             - end    - virtual end address
+ *     caches_clean_inval_user_pou(start, end)
  *
- *     __flush_cache_user_range(start, end)
+ *             Ensure coherency between the I-cache and the D-cache region to
+ *             the Point of Unification.
+ *             Use only if the region might access user memory.
  *
- *             Ensure coherency between the I-cache and the D-cache in the
- *             region described by start, end.
- *             - start  - virtual start address
- *             - end    - virtual end address
+ *     icache_inval_pou(start, end)
  *
- *     __flush_dcache_area(kaddr, size)
+ *             Invalidate I-cache region to the Point of Unification.
  *
- *             Ensure that the data held in page is written back.
- *             - kaddr  - page address
- *             - size   - region size
+ *     dcache_clean_inval_poc(start, end)
+ *
+ *             Clean and invalidate D-cache region to the Point of Coherency.
+ *
+ *     dcache_inval_poc(start, end)
+ *
+ *             Invalidate D-cache region to the Point of Coherency.
+ *
+ *     dcache_clean_poc(start, end)
+ *
+ *             Clean D-cache region to the Point of Coherency.
+ *
+ *     dcache_clean_pop(start, end)
+ *
+ *             Clean D-cache region to the Point of Persistence.
+ *
+ *     dcache_clean_pou(start, end)
+ *
+ *             Clean D-cache region to the Point of Unification.
  */
-extern void __flush_icache_range(unsigned long start, unsigned long end);
-extern int  invalidate_icache_range(unsigned long start, unsigned long end);
-extern void __flush_dcache_area(void *addr, size_t len);
-extern void __inval_dcache_area(void *addr, size_t len);
-extern void __clean_dcache_area_poc(void *addr, size_t len);
-extern void __clean_dcache_area_pop(void *addr, size_t len);
-extern void __clean_dcache_area_pou(void *addr, size_t len);
-extern long __flush_cache_user_range(unsigned long start, unsigned long end);
-extern void sync_icache_aliases(void *kaddr, unsigned long len);
+extern void caches_clean_inval_pou(unsigned long start, unsigned long end);
+extern void icache_inval_pou(unsigned long start, unsigned long end);
+extern void dcache_clean_inval_poc(unsigned long start, unsigned long end);
+extern void dcache_inval_poc(unsigned long start, unsigned long end);
+extern void dcache_clean_poc(unsigned long start, unsigned long end);
+extern void dcache_clean_pop(unsigned long start, unsigned long end);
+extern void dcache_clean_pou(unsigned long start, unsigned long end);
+extern long caches_clean_inval_user_pou(unsigned long start, unsigned long end);
+extern void sync_icache_aliases(unsigned long start, unsigned long end);
 
 static inline void flush_icache_range(unsigned long start, unsigned long end)
 {
-       __flush_icache_range(start, end);
+       caches_clean_inval_pou(start, end);
 
        /*
         * IPI all online CPUs so that they undergo a context synchronization
@@ -122,7 +135,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
 extern void flush_dcache_page(struct page *);
 
-static __always_inline void __flush_icache_all(void)
+static __always_inline void icache_inval_all_pou(void)
 {
        if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
                return;
index 7faae6f..0f6d16f 100644 (file)
 /*
  * Records attributes of an individual CPU.
  */
-struct cpuinfo_arm64 {
-       struct cpu      cpu;
-       struct kobject  kobj;
-       u32             reg_ctr;
-       u32             reg_cntfrq;
-       u32             reg_dczid;
-       u32             reg_midr;
-       u32             reg_revidr;
-
-       u64             reg_id_aa64dfr0;
-       u64             reg_id_aa64dfr1;
-       u64             reg_id_aa64isar0;
-       u64             reg_id_aa64isar1;
-       u64             reg_id_aa64mmfr0;
-       u64             reg_id_aa64mmfr1;
-       u64             reg_id_aa64mmfr2;
-       u64             reg_id_aa64pfr0;
-       u64             reg_id_aa64pfr1;
-       u64             reg_id_aa64zfr0;
-
+struct cpuinfo_32bit {
        u32             reg_id_dfr0;
        u32             reg_id_dfr1;
        u32             reg_id_isar0;
@@ -54,6 +35,30 @@ struct cpuinfo_arm64 {
        u32             reg_mvfr0;
        u32             reg_mvfr1;
        u32             reg_mvfr2;
+};
+
+struct cpuinfo_arm64 {
+       struct cpu      cpu;
+       struct kobject  kobj;
+       u64             reg_ctr;
+       u64             reg_cntfrq;
+       u64             reg_dczid;
+       u64             reg_midr;
+       u64             reg_revidr;
+       u64             reg_gmid;
+
+       u64             reg_id_aa64dfr0;
+       u64             reg_id_aa64dfr1;
+       u64             reg_id_aa64isar0;
+       u64             reg_id_aa64isar1;
+       u64             reg_id_aa64mmfr0;
+       u64             reg_id_aa64mmfr1;
+       u64             reg_id_aa64mmfr2;
+       u64             reg_id_aa64pfr0;
+       u64             reg_id_aa64pfr1;
+       u64             reg_id_aa64zfr0;
+
+       struct cpuinfo_32bit    aarch32;
 
        /* pseudo-ZCR for recording maximum ZCR_EL1 LEN value: */
        u64             reg_zcr;
index 338840c..9bb9d11 100644 (file)
@@ -619,6 +619,13 @@ static inline bool id_aa64pfr0_sve(u64 pfr0)
        return val > 0;
 }
 
+static inline bool id_aa64pfr1_mte(u64 pfr1)
+{
+       u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_MTE_SHIFT);
+
+       return val >= ID_AA64PFR1_MTE;
+}
+
 void __init setup_cpu_features(void);
 void check_local_cpu_capabilities(void);
 
@@ -630,9 +637,15 @@ static inline bool cpu_supports_mixed_endian_el0(void)
        return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
 }
 
+const struct cpumask *system_32bit_el0_cpumask(void);
+DECLARE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);
+
 static inline bool system_supports_32bit_el0(void)
 {
-       return cpus_have_const_cap(ARM64_HAS_32BIT_EL0);
+       u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
+
+       return static_branch_unlikely(&arm64_mismatched_32bit_el0) ||
+              id_aa64pfr0_32bit_el0(pfr0);
 }
 
 static inline bool system_supports_4kb_granule(void)
index 3578aba..1bed37e 100644 (file)
@@ -137,7 +137,7 @@ void efi_virtmap_unload(void);
 
 static inline void efi_capsule_flush_cache_range(void *addr, int size)
 {
-       __flush_dcache_area(addr, size);
+       dcache_clean_inval_poc((unsigned long)addr, (unsigned long)addr + size);
 }
 
 #endif /* _ASM_EFI_H */
index 25ed956..f4cbfa9 100644 (file)
@@ -180,7 +180,8 @@ static inline void *__kvm_vector_slot2addr(void *base,
 
 struct kvm;
 
-#define kvm_flush_dcache_to_poc(a,l)   __flush_dcache_area((a), (l))
+#define kvm_flush_dcache_to_poc(a,l)   \
+       dcache_clean_inval_poc((unsigned long)(a), (unsigned long)(a)+(l))
 
 static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
 {
@@ -208,12 +209,12 @@ static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn,
 {
        if (icache_is_aliasing()) {
                /* any kind of VIPT cache */
-               __flush_icache_all();
+               icache_inval_all_pou();
        } else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) {
                /* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */
                void *va = page_address(pfn_to_page(pfn));
 
-               invalidate_icache_range((unsigned long)va,
+               icache_inval_pou((unsigned long)va,
                                        (unsigned long)va + size);
        }
 }
index ba89a9a..9906541 100644 (file)
                SYM_FUNC_START_ALIAS(__pi_##x); \
                SYM_FUNC_START_WEAK(x)
 
+#define SYM_FUNC_START_WEAK_ALIAS_PI(x)                \
+               SYM_FUNC_START_ALIAS(__pi_##x); \
+               SYM_START(x, SYM_L_WEAK, SYM_A_ALIGN)
+
 #define SYM_FUNC_END_PI(x)                     \
                SYM_FUNC_END(x);                \
                SYM_FUNC_END_ALIAS(__pi_##x)
 
+#define SYM_FUNC_END_ALIAS_PI(x)               \
+               SYM_FUNC_END_ALIAS(x);          \
+               SYM_FUNC_END_ALIAS(__pi_##x)
+
 #endif
index 9df3fee..7a094aa 100644 (file)
@@ -329,13 +329,13 @@ long get_tagged_addr_ctrl(struct task_struct *task);
  * of header definitions for the use of task_stack_page.
  */
 
-#define current_top_of_stack()                                                 \
-({                                                                             \
-       struct stack_info _info;                                                \
-       BUG_ON(!on_accessible_stack(current, current_stack_pointer, &_info));   \
-       _info.high;                                                             \
+#define current_top_of_stack()                                                         \
+({                                                                                     \
+       struct stack_info _info;                                                        \
+       BUG_ON(!on_accessible_stack(current, current_stack_pointer, 1, &_info));        \
+       _info.high;                                                                     \
 })
-#define on_thread_stack()      (on_task_stack(current, current_stack_pointer, NULL))
+#define on_thread_stack()      (on_task_stack(current, current_stack_pointer, 1, NULL))
 
 #endif /* __ASSEMBLY__ */
 #endif /* __ASM_PROCESSOR_H */
index eaa2cd9..8297bcc 100644 (file)
@@ -9,18 +9,18 @@
 #ifdef CONFIG_SHADOW_CALL_STACK
        scs_sp  .req    x18
 
-       .macro scs_load tsk, tmp
+       .macro scs_load tsk
        ldr     scs_sp, [\tsk, #TSK_TI_SCS_SP]
        .endm
 
-       .macro scs_save tsk, tmp
+       .macro scs_save tsk
        str     scs_sp, [\tsk, #TSK_TI_SCS_SP]
        .endm
 #else
-       .macro scs_load tsk, tmp
+       .macro scs_load tsk
        .endm
 
-       .macro scs_save tsk, tmp
+       .macro scs_save tsk
        .endm
 #endif /* CONFIG_SHADOW_CALL_STACK */
 
index 63e0b92..8bc30a5 100644 (file)
@@ -42,8 +42,9 @@ unsigned long sdei_arch_get_entry_point(int conduit);
 
 struct stack_info;
 
-bool _on_sdei_stack(unsigned long sp, struct stack_info *info);
-static inline bool on_sdei_stack(unsigned long sp,
+bool _on_sdei_stack(unsigned long sp, unsigned long size,
+                   struct stack_info *info);
+static inline bool on_sdei_stack(unsigned long sp, unsigned long size,
                                struct stack_info *info)
 {
        if (!IS_ENABLED(CONFIG_VMAP_STACK))
@@ -51,7 +52,7 @@ static inline bool on_sdei_stack(unsigned long sp,
        if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE))
                return false;
        if (in_nmi())
-               return _on_sdei_stack(sp, info);
+               return _on_sdei_stack(sp, size, info);
 
        return false;
 }
index 0e35775..fc55f5a 100644 (file)
@@ -73,12 +73,10 @@ asmlinkage void secondary_start_kernel(void);
 
 /*
  * Initial data for bringing up a secondary CPU.
- * @stack  - sp for the secondary CPU
  * @status - Result passed back from the secondary CPU to
  *           indicate failure.
  */
 struct secondary_data {
-       void *stack;
        struct task_struct *task;
        long status;
 };
index 4b33ca6..1801399 100644 (file)
@@ -69,14 +69,14 @@ extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
 
 DECLARE_PER_CPU(unsigned long *, irq_stack_ptr);
 
-static inline bool on_stack(unsigned long sp, unsigned long low,
-                               unsigned long high, enum stack_type type,
-                               struct stack_info *info)
+static inline bool on_stack(unsigned long sp, unsigned long size,
+                           unsigned long low, unsigned long high,
+                           enum stack_type type, struct stack_info *info)
 {
        if (!low)
                return false;
 
-       if (sp < low || sp >= high)
+       if (sp < low || sp + size < sp || sp + size > high)
                return false;
 
        if (info) {
@@ -87,38 +87,38 @@ static inline bool on_stack(unsigned long sp, unsigned long low,
        return true;
 }
 
-static inline bool on_irq_stack(unsigned long sp,
+static inline bool on_irq_stack(unsigned long sp, unsigned long size,
                                struct stack_info *info)
 {
        unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr);
        unsigned long high = low + IRQ_STACK_SIZE;
 
-       return on_stack(sp, low, high, STACK_TYPE_IRQ, info);
+       return on_stack(sp, size, low, high, STACK_TYPE_IRQ, info);
 }
 
 static inline bool on_task_stack(const struct task_struct *tsk,
-                                unsigned long sp,
+                                unsigned long sp, unsigned long size,
                                 struct stack_info *info)
 {
        unsigned long low = (unsigned long)task_stack_page(tsk);
        unsigned long high = low + THREAD_SIZE;
 
-       return on_stack(sp, low, high, STACK_TYPE_TASK, info);
+       return on_stack(sp, size, low, high, STACK_TYPE_TASK, info);
 }
 
 #ifdef CONFIG_VMAP_STACK
 DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
 
-static inline bool on_overflow_stack(unsigned long sp,
+static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
                                struct stack_info *info)
 {
        unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack);
        unsigned long high = low + OVERFLOW_STACK_SIZE;
 
-       return on_stack(sp, low, high, STACK_TYPE_OVERFLOW, info);
+       return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info);
 }
 #else
-static inline bool on_overflow_stack(unsigned long sp,
+static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
                        struct stack_info *info) { return false; }
 #endif
 
@@ -128,21 +128,21 @@ static inline bool on_overflow_stack(unsigned long sp,
  * context.
  */
 static inline bool on_accessible_stack(const struct task_struct *tsk,
-                                      unsigned long sp,
+                                      unsigned long sp, unsigned long size,
                                       struct stack_info *info)
 {
        if (info)
                info->type = STACK_TYPE_UNKNOWN;
 
-       if (on_task_stack(tsk, sp, info))
+       if (on_task_stack(tsk, sp, size, info))
                return true;
        if (tsk != current || preemptible())
                return false;
-       if (on_irq_stack(sp, info))
+       if (on_irq_stack(sp, size, info))
                return true;
-       if (on_overflow_stack(sp, info))
+       if (on_overflow_stack(sp, size, info))
                return true;
-       if (on_sdei_stack(sp, info))
+       if (on_sdei_stack(sp, size, info))
                return true;
 
        return false;
index c906d20..3fb79b7 100644 (file)
@@ -181,7 +181,7 @@ static void __nocfi __apply_alternatives(struct alt_region *region, bool is_modu
         */
        if (!is_module) {
                dsb(ish);
-               __flush_icache_all();
+               icache_inval_all_pou();
                isb();
 
                /* Ignore ARM64_CB bit from feature mask */
index 0cb34cc..bd0fc23 100644 (file)
@@ -27,6 +27,7 @@
 int main(void)
 {
   DEFINE(TSK_ACTIVE_MM,                offsetof(struct task_struct, active_mm));
+  DEFINE(TSK_CPU,              offsetof(struct task_struct, cpu));
   BLANK();
   DEFINE(TSK_TI_FLAGS,         offsetof(struct task_struct, thread_info.flags));
   DEFINE(TSK_TI_PREEMPT,       offsetof(struct task_struct, thread_info.preempt_count));
@@ -99,7 +100,6 @@ int main(void)
   DEFINE(SOFTIRQ_SHIFT, SOFTIRQ_SHIFT);
   DEFINE(IRQ_CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
   BLANK();
-  DEFINE(CPU_BOOT_STACK,       offsetof(struct secondary_data, stack));
   DEFINE(CPU_BOOT_TASK,                offsetof(struct secondary_data, task));
   BLANK();
   DEFINE(FTR_OVR_VAL_OFFSET,   offsetof(struct arm64_ftr_override, val));
index efed283..dbae006 100644 (file)
@@ -107,6 +107,24 @@ DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE);
 bool arm64_use_ng_mappings = false;
 EXPORT_SYMBOL(arm64_use_ng_mappings);
 
+/*
+ * Permit PER_LINUX32 and execve() of 32-bit binaries even if not all CPUs
+ * support it?
+ */
+static bool __read_mostly allow_mismatched_32bit_el0;
+
+/*
+ * Static branch enabled only if allow_mismatched_32bit_el0 is set and we have
+ * seen at least one CPU capable of 32-bit EL0.
+ */
+DEFINE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);
+
+/*
+ * Mask of CPUs supporting 32-bit EL0.
+ * Only valid if arm64_mismatched_32bit_el0 is enabled.
+ */
+static cpumask_var_t cpu_32bit_el0_mask __cpumask_var_read_mostly;
+
 /*
  * Flag to indicate if we have computed the system wide
  * capabilities based on the boot time active CPUs. This
@@ -400,6 +418,11 @@ static const struct arm64_ftr_bits ftr_dczid[] = {
        ARM64_FTR_END,
 };
 
+static const struct arm64_ftr_bits ftr_gmid[] = {
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, SYS_GMID_EL1_BS_SHIFT, 4, 0),
+       ARM64_FTR_END,
+};
+
 static const struct arm64_ftr_bits ftr_id_isar0[] = {
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_DIVIDE_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_DEBUG_SHIFT, 4, 0),
@@ -617,6 +640,9 @@ static const struct __ftr_reg_entry {
        /* Op1 = 0, CRn = 1, CRm = 2 */
        ARM64_FTR_REG(SYS_ZCR_EL1, ftr_zcr),
 
+       /* Op1 = 1, CRn = 0, CRm = 0 */
+       ARM64_FTR_REG(SYS_GMID_EL1, ftr_gmid),
+
        /* Op1 = 3, CRn = 0, CRm = 0 */
        { SYS_CTR_EL0, &arm64_ftr_reg_ctrel0 },
        ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid),
@@ -767,7 +793,7 @@ static void __init sort_ftr_regs(void)
  * Any bits that are not covered by an arm64_ftr_bits entry are considered
  * RES0 for the system-wide value, and must strictly match.
  */
-static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
+static void init_cpu_ftr_reg(u32 sys_reg, u64 new)
 {
        u64 val = 0;
        u64 strict_mask = ~0x0ULL;
@@ -863,6 +889,31 @@ static void __init init_cpu_hwcaps_indirect_list(void)
 
 static void __init setup_boot_cpu_capabilities(void);
 
+static void init_32bit_cpu_features(struct cpuinfo_32bit *info)
+{
+       init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
+       init_cpu_ftr_reg(SYS_ID_DFR1_EL1, info->reg_id_dfr1);
+       init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
+       init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
+       init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
+       init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
+       init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
+       init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
+       init_cpu_ftr_reg(SYS_ID_ISAR6_EL1, info->reg_id_isar6);
+       init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
+       init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
+       init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
+       init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
+       init_cpu_ftr_reg(SYS_ID_MMFR4_EL1, info->reg_id_mmfr4);
+       init_cpu_ftr_reg(SYS_ID_MMFR5_EL1, info->reg_id_mmfr5);
+       init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
+       init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
+       init_cpu_ftr_reg(SYS_ID_PFR2_EL1, info->reg_id_pfr2);
+       init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
+       init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
+       init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
+}
+
 void __init init_cpu_features(struct cpuinfo_arm64 *info)
 {
        /* Before we start using the tables, make sure it is sorted */
@@ -882,35 +933,17 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
        init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
        init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0);
 
-       if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
-               init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
-               init_cpu_ftr_reg(SYS_ID_DFR1_EL1, info->reg_id_dfr1);
-               init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
-               init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
-               init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
-               init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
-               init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
-               init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
-               init_cpu_ftr_reg(SYS_ID_ISAR6_EL1, info->reg_id_isar6);
-               init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
-               init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
-               init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
-               init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
-               init_cpu_ftr_reg(SYS_ID_MMFR4_EL1, info->reg_id_mmfr4);
-               init_cpu_ftr_reg(SYS_ID_MMFR5_EL1, info->reg_id_mmfr5);
-               init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
-               init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
-               init_cpu_ftr_reg(SYS_ID_PFR2_EL1, info->reg_id_pfr2);
-               init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
-               init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
-               init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
-       }
+       if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0))
+               init_32bit_cpu_features(&info->aarch32);
 
        if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
                init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr);
                sve_init_vq_map();
        }
 
+       if (id_aa64pfr1_mte(info->reg_id_aa64pfr1))
+               init_cpu_ftr_reg(SYS_GMID_EL1, info->reg_gmid);
+
        /*
         * Initialize the indirect array of CPU hwcaps capabilities pointers
         * before we handle the boot CPU below.
@@ -975,20 +1008,28 @@ static void relax_cpu_ftr_reg(u32 sys_id, int field)
        WARN_ON(!ftrp->width);
 }
 
-static int update_32bit_cpu_features(int cpu, struct cpuinfo_arm64 *info,
-                                    struct cpuinfo_arm64 *boot)
+static void lazy_init_32bit_cpu_features(struct cpuinfo_arm64 *info,
+                                        struct cpuinfo_arm64 *boot)
+{
+       static bool boot_cpu_32bit_regs_overridden = false;
+
+       if (!allow_mismatched_32bit_el0 || boot_cpu_32bit_regs_overridden)
+               return;
+
+       if (id_aa64pfr0_32bit_el0(boot->reg_id_aa64pfr0))
+               return;
+
+       boot->aarch32 = info->aarch32;
+       init_32bit_cpu_features(&boot->aarch32);
+       boot_cpu_32bit_regs_overridden = true;
+}
+
+static int update_32bit_cpu_features(int cpu, struct cpuinfo_32bit *info,
+                                    struct cpuinfo_32bit *boot)
 {
        int taint = 0;
        u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
 
-       /*
-        * If we don't have AArch32 at all then skip the checks entirely
-        * as the register values may be UNKNOWN and we're not going to be
-        * using them for anything.
-        */
-       if (!id_aa64pfr0_32bit_el0(pfr0))
-               return taint;
-
        /*
         * If we don't have AArch32 at EL1, then relax the strictness of
         * EL1-dependent register fields to avoid spurious sanity check fails.
@@ -1135,10 +1176,29 @@ void update_cpu_features(int cpu,
        }
 
        /*
+        * The kernel uses the LDGM/STGM instructions and the number of tags
+        * they read/write depends on the GMID_EL1.BS field. Check that the
+        * value is the same on all CPUs.
+        */
+       if (IS_ENABLED(CONFIG_ARM64_MTE) &&
+           id_aa64pfr1_mte(info->reg_id_aa64pfr1)) {
+               taint |= check_update_ftr_reg(SYS_GMID_EL1, cpu,
+                                             info->reg_gmid, boot->reg_gmid);
+       }
+
+       /*
+        * If we don't have AArch32 at all then skip the checks entirely
+        * as the register values may be UNKNOWN and we're not going to be
+        * using them for anything.
+        *
         * This relies on a sanitised view of the AArch64 ID registers
         * (e.g. SYS_ID_AA64PFR0_EL1), so we call it last.
         */
-       taint |= update_32bit_cpu_features(cpu, info, boot);
+       if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
+               lazy_init_32bit_cpu_features(info, boot);
+               taint |= update_32bit_cpu_features(cpu, &info->aarch32,
+                                                  &boot->aarch32);
+       }
 
        /*
         * Mismatched CPU features are a recipe for disaster. Don't even
@@ -1248,6 +1308,28 @@ has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
        return feature_matches(val, entry);
 }
 
+const struct cpumask *system_32bit_el0_cpumask(void)
+{
+       if (!system_supports_32bit_el0())
+               return cpu_none_mask;
+
+       if (static_branch_unlikely(&arm64_mismatched_32bit_el0))
+               return cpu_32bit_el0_mask;
+
+       return cpu_possible_mask;
+}
+
+static bool has_32bit_el0(const struct arm64_cpu_capabilities *entry, int scope)
+{
+       if (!has_cpuid_feature(entry, scope))
+               return allow_mismatched_32bit_el0;
+
+       if (scope == SCOPE_SYSTEM)
+               pr_info("detected: 32-bit EL0 Support\n");
+
+       return true;
+}
+
 static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry, int scope)
 {
        bool has_sre;
@@ -1866,10 +1948,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .cpu_enable = cpu_copy_el2regs,
        },
        {
-               .desc = "32-bit EL0 Support",
-               .capability = ARM64_HAS_32BIT_EL0,
+               .capability = ARM64_HAS_32BIT_EL0_DO_NOT_USE,
                .type = ARM64_CPUCAP_SYSTEM_FEATURE,
-               .matches = has_cpuid_feature,
+               .matches = has_32bit_el0,
                .sys_reg = SYS_ID_AA64PFR0_EL1,
                .sign = FTR_UNSIGNED,
                .field_pos = ID_AA64PFR0_EL0_SHIFT,
@@ -2378,7 +2459,7 @@ static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
        {},
 };
 
-static void __init cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap)
+static void cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap)
 {
        switch (cap->hwcap_type) {
        case CAP_HWCAP:
@@ -2423,7 +2504,7 @@ static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap)
        return rc;
 }
 
-static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
+static void setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
 {
        /* We support emulation of accesses to CPU ID feature registers */
        cpu_set_named_feature(CPUID);
@@ -2598,7 +2679,7 @@ static void check_early_cpu_features(void)
 }
 
 static void
-verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
+__verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
 {
 
        for (; caps->matches; caps++)
@@ -2609,6 +2690,14 @@ verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
                }
 }
 
+static void verify_local_elf_hwcaps(void)
+{
+       __verify_local_elf_hwcaps(arm64_elf_hwcaps);
+
+       if (id_aa64pfr0_32bit_el0(read_cpuid(ID_AA64PFR0_EL1)))
+               __verify_local_elf_hwcaps(compat_elf_hwcaps);
+}
+
 static void verify_sve_features(void)
 {
        u64 safe_zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
@@ -2673,11 +2762,7 @@ static void verify_local_cpu_capabilities(void)
         * on all secondary CPUs.
         */
        verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU);
-
-       verify_local_elf_hwcaps(arm64_elf_hwcaps);
-
-       if (system_supports_32bit_el0())
-               verify_local_elf_hwcaps(compat_elf_hwcaps);
+       verify_local_elf_hwcaps();
 
        if (system_supports_sve())
                verify_sve_features();
@@ -2812,6 +2897,34 @@ void __init setup_cpu_features(void)
                        ARCH_DMA_MINALIGN);
 }
 
+static int enable_mismatched_32bit_el0(unsigned int cpu)
+{
+       struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
+       bool cpu_32bit = id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0);
+
+       if (cpu_32bit) {
+               cpumask_set_cpu(cpu, cpu_32bit_el0_mask);
+               static_branch_enable_cpuslocked(&arm64_mismatched_32bit_el0);
+               setup_elf_hwcaps(compat_elf_hwcaps);
+       }
+
+       return 0;
+}
+
+static int __init init_32bit_el0_mask(void)
+{
+       if (!allow_mismatched_32bit_el0)
+               return 0;
+
+       if (!zalloc_cpumask_var(&cpu_32bit_el0_mask, GFP_KERNEL))
+               return -ENOMEM;
+
+       return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+                                "arm64/mismatched_32bit_el0:online",
+                                enable_mismatched_32bit_el0, NULL);
+}
+subsys_initcall_sync(init_32bit_el0_mask);
+
 static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap)
 {
        cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
@@ -2905,8 +3018,8 @@ static int emulate_mrs(struct pt_regs *regs, u32 insn)
 }
 
 static struct undef_hook mrs_hook = {
-       .instr_mask = 0xfff00000,
-       .instr_val  = 0xd5300000,
+       .instr_mask = 0xffff0000,
+       .instr_val  = 0xd5380000,
        .pstate_mask = PSR_AA32_MODE_MASK,
        .pstate_val = PSR_MODE_EL0t,
        .fn = emulate_mrs,
index 51fcf99..87731fe 100644 (file)
@@ -246,7 +246,7 @@ static struct kobj_type cpuregs_kobj_type = {
                struct cpuinfo_arm64 *info = kobj_to_cpuinfo(kobj);             \
                                                                                \
                if (info->reg_midr)                                             \
-                       return sprintf(buf, "0x%016x\n", info->reg_##_field);   \
+                       return sprintf(buf, "0x%016llx\n", info->reg_##_field); \
                else                                                            \
                        return 0;                                               \
        }                                                                       \
@@ -344,6 +344,32 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
        pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu);
 }
 
+static void __cpuinfo_store_cpu_32bit(struct cpuinfo_32bit *info)
+{
+       info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
+       info->reg_id_dfr1 = read_cpuid(ID_DFR1_EL1);
+       info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
+       info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
+       info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
+       info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
+       info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
+       info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
+       info->reg_id_isar6 = read_cpuid(ID_ISAR6_EL1);
+       info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
+       info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
+       info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
+       info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1);
+       info->reg_id_mmfr4 = read_cpuid(ID_MMFR4_EL1);
+       info->reg_id_mmfr5 = read_cpuid(ID_MMFR5_EL1);
+       info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
+       info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
+       info->reg_id_pfr2 = read_cpuid(ID_PFR2_EL1);
+
+       info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
+       info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
+       info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
+}
+
 static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
 {
        info->reg_cntfrq = arch_timer_get_cntfrq();
@@ -371,31 +397,11 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
        info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
        info->reg_id_aa64zfr0 = read_cpuid(ID_AA64ZFR0_EL1);
 
-       /* Update the 32bit ID registers only if AArch32 is implemented */
-       if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
-               info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
-               info->reg_id_dfr1 = read_cpuid(ID_DFR1_EL1);
-               info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
-               info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
-               info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
-               info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
-               info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
-               info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
-               info->reg_id_isar6 = read_cpuid(ID_ISAR6_EL1);
-               info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
-               info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
-               info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
-               info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1);
-               info->reg_id_mmfr4 = read_cpuid(ID_MMFR4_EL1);
-               info->reg_id_mmfr5 = read_cpuid(ID_MMFR5_EL1);
-               info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
-               info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
-               info->reg_id_pfr2 = read_cpuid(ID_PFR2_EL1);
-
-               info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
-               info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
-               info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
-       }
+       if (id_aa64pfr1_mte(info->reg_id_aa64pfr1))
+               info->reg_gmid = read_cpuid(GMID_EL1);
+
+       if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0))
+               __cpuinfo_store_cpu_32bit(&info->aarch32);
 
        if (IS_ENABLED(CONFIG_ARM64_SVE) &&
            id_aa64pfr0_sve(info->reg_id_aa64pfr0))
index 0073b24..61a87fa 100644 (file)
@@ -28,7 +28,8 @@ SYM_CODE_START(efi_enter_kernel)
         * stale icache entries from before relocation.
         */
        ldr     w1, =kernel_size
-       bl      __clean_dcache_area_poc
+       add     x1, x0, x1
+       bl      dcache_clean_poc
        ic      ialluis
 
        /*
@@ -36,8 +37,8 @@ SYM_CODE_START(efi_enter_kernel)
         * so that we can safely disable the MMU and caches.
         */
        adr     x0, 0f
-       ldr     w1, 3f
-       bl      __clean_dcache_area_poc
+       adr     x1, 3f
+       bl      dcache_clean_poc
 0:
        /* Turn off Dcache and MMU */
        mrs     x0, CurrentEL
@@ -64,5 +65,5 @@ SYM_CODE_START(efi_enter_kernel)
        mov     x2, xzr
        mov     x3, xzr
        br      x19
+3:
 SYM_CODE_END(efi_enter_kernel)
-3:     .long   . - 0b
index 3513984..3153f14 100644 (file)
@@ -275,7 +275,7 @@ alternative_else_nop_endif
 
        mte_set_kernel_gcr x22, x23
 
-       scs_load tsk, x20
+       scs_load tsk
        .else
        add     x21, sp, #PT_REGS_SIZE
        get_current_task tsk
@@ -285,7 +285,7 @@ alternative_else_nop_endif
        stp     lr, x21, [sp, #S_LR]
 
        /*
-        * For exceptions from EL0, create a terminal frame record.
+        * For exceptions from EL0, create a final frame record.
         * For exceptions from EL1, create a synthetic frame record so the
         * interrupted code shows up in the backtrace.
         */
@@ -375,7 +375,7 @@ alternative_if ARM64_WORKAROUND_845719
 alternative_else_nop_endif
 #endif
 3:
-       scs_save tsk, x0
+       scs_save tsk
 
 #ifdef CONFIG_ARM64_PTR_AUTH
 alternative_if ARM64_HAS_ADDRESS_AUTH
@@ -979,8 +979,8 @@ SYM_FUNC_START(cpu_switch_to)
        mov     sp, x9
        msr     sp_el0, x1
        ptrauth_keys_install_kernel x1, x8, x9, x10
-       scs_save x0, x8
-       scs_load x1, x8
+       scs_save x0
+       scs_load x1
        ret
 SYM_FUNC_END(cpu_switch_to)
 NOKPROBE(cpu_switch_to)
index 96873df..a6ccd65 100644 (file)
@@ -16,6 +16,7 @@
 #include <asm/asm_pointer_auth.h>
 #include <asm/assembler.h>
 #include <asm/boot.h>
+#include <asm/bug.h>
 #include <asm/ptrace.h>
 #include <asm/asm-offsets.h>
 #include <asm/cache.h>
@@ -117,8 +118,8 @@ SYM_CODE_START_LOCAL(preserve_boot_args)
        dmb     sy                              // needed before dc ivac with
                                                // MMU off
 
-       mov     x1, #0x20                       // 4 x 8 bytes
-       b       __inval_dcache_area             // tail call
+       add     x1, x0, #0x20                   // 4 x 8 bytes
+       b       dcache_inval_poc                // tail call
 SYM_CODE_END(preserve_boot_args)
 
 /*
@@ -268,8 +269,7 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
         */
        adrp    x0, init_pg_dir
        adrp    x1, init_pg_end
-       sub     x1, x1, x0
-       bl      __inval_dcache_area
+       bl      dcache_inval_poc
 
        /*
         * Clear the init page tables.
@@ -382,39 +382,57 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
 
        adrp    x0, idmap_pg_dir
        adrp    x1, idmap_pg_end
-       sub     x1, x1, x0
-       bl      __inval_dcache_area
+       bl      dcache_inval_poc
 
        adrp    x0, init_pg_dir
        adrp    x1, init_pg_end
-       sub     x1, x1, x0
-       bl      __inval_dcache_area
+       bl      dcache_inval_poc
 
        ret     x28
 SYM_FUNC_END(__create_page_tables)
 
+       /*
+        * Initialize CPU registers with task-specific and cpu-specific context.
+        *
+        * Create a final frame record at task_pt_regs(current)->stackframe, so
+        * that the unwinder can identify the final frame record of any task by
+        * its location in the task stack. We reserve the entire pt_regs space
+        * for consistency with user tasks and kthreads.
+        */
+       .macro  init_cpu_task tsk, tmp1, tmp2
+       msr     sp_el0, \tsk
+
+       ldr     \tmp1, [\tsk, #TSK_STACK]
+       add     sp, \tmp1, #THREAD_SIZE
+       sub     sp, sp, #PT_REGS_SIZE
+
+       stp     xzr, xzr, [sp, #S_STACKFRAME]
+       add     x29, sp, #S_STACKFRAME
+
+       scs_load \tsk
+
+       adr_l   \tmp1, __per_cpu_offset
+       ldr     w\tmp2, [\tsk, #TSK_CPU]
+       ldr     \tmp1, [\tmp1, \tmp2, lsl #3]
+       set_this_cpu_offset \tmp1
+       .endm
+
 /*
  * The following fragment of code is executed with the MMU enabled.
  *
  *   x0 = __PHYS_OFFSET
  */
 SYM_FUNC_START_LOCAL(__primary_switched)
-       adrp    x4, init_thread_union
-       add     sp, x4, #THREAD_SIZE
-       adr_l   x5, init_task
-       msr     sp_el0, x5                      // Save thread_info
+       adr_l   x4, init_task
+       init_cpu_task x4, x5, x6
 
        adr_l   x8, vectors                     // load VBAR_EL1 with virtual
        msr     vbar_el1, x8                    // vector table address
        isb
 
-       stp     xzr, x30, [sp, #-16]!
+       stp     x29, x30, [sp, #-16]!
        mov     x29, sp
 
-#ifdef CONFIG_SHADOW_CALL_STACK
-       adr_l   scs_sp, init_shadow_call_stack  // Set shadow call stack
-#endif
-
        str_l   x21, __fdt_pointer, x5          // Save FDT pointer
 
        ldr_l   x4, kimage_vaddr                // Save the offset between
@@ -446,10 +464,9 @@ SYM_FUNC_START_LOCAL(__primary_switched)
 0:
 #endif
        bl      switch_to_vhe                   // Prefer VHE if possible
-       add     sp, sp, #16
-       mov     x29, #0
-       mov     x30, #0
-       b       start_kernel
+       ldp     x29, x30, [sp], #16
+       bl      start_kernel
+       ASM_BUG()
 SYM_FUNC_END(__primary_switched)
 
        .pushsection ".rodata", "a"
@@ -632,21 +649,17 @@ SYM_FUNC_START_LOCAL(__secondary_switched)
        isb
 
        adr_l   x0, secondary_data
-       ldr     x1, [x0, #CPU_BOOT_STACK]       // get secondary_data.stack
-       cbz     x1, __secondary_too_slow
-       mov     sp, x1
        ldr     x2, [x0, #CPU_BOOT_TASK]
        cbz     x2, __secondary_too_slow
-       msr     sp_el0, x2
-       scs_load x2, x3
-       mov     x29, #0
-       mov     x30, #0
+
+       init_cpu_task x2, x1, x3
 
 #ifdef CONFIG_ARM64_PTR_AUTH
        ptrauth_keys_init_cpu x2, x3, x4, x5
 #endif
 
-       b       secondary_start_kernel
+       bl      secondary_start_kernel
+       ASM_BUG()
 SYM_FUNC_END(__secondary_switched)
 
 SYM_FUNC_START_LOCAL(__secondary_too_slow)
index 8ccca66..81c0186 100644 (file)
@@ -45,7 +45,7 @@
  * Because this code has to be copied to a 'safe' page, it can't call out to
  * other functions by PC-relative address. Also remember that it may be
  * mid-way through over-writing other functions. For this reason it contains
- * code from flush_icache_range() and uses the copy_page() macro.
+ * code from caches_clean_inval_pou() and uses the copy_page() macro.
  *
  * This 'safe' page is mapped via ttbr0, and executed from there. This function
  * switches to a copy of the linear map in ttbr1, performs the restore, then
@@ -87,11 +87,12 @@ SYM_CODE_START(swsusp_arch_suspend_exit)
        copy_page       x0, x1, x2, x3, x4, x5, x6, x7, x8, x9
 
        add     x1, x10, #PAGE_SIZE
-       /* Clean the copied page to PoU - based on flush_icache_range() */
+       /* Clean the copied page to PoU - based on caches_clean_inval_pou() */
        raw_dcache_line_size x2, x3
        sub     x3, x2, #1
        bic     x4, x10, x3
-2:     dc      cvau, x4        /* clean D line / unified line */
+2:     /* clean D line / unified line */
+alternative_insn "dc cvau, x4",  "dc civac, x4",  ARM64_WORKAROUND_CLEAN_CACHE
        add     x4, x4, x2
        cmp     x4, x1
        b.lo    2b
index b1cef37..46a0b4d 100644 (file)
@@ -210,7 +210,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
                return -ENOMEM;
 
        memcpy(page, src_start, length);
-       __flush_icache_range((unsigned long)page, (unsigned long)page + length);
+       caches_clean_inval_pou((unsigned long)page, (unsigned long)page + length);
        rc = trans_pgd_idmap_page(&trans_info, &trans_ttbr0, &t0sz, page);
        if (rc)
                return rc;
@@ -240,8 +240,6 @@ static int create_safe_exec_page(void *src_start, size_t length,
        return 0;
 }
 
-#define dcache_clean_range(start, end) __flush_dcache_area(start, (end - start))
-
 #ifdef CONFIG_ARM64_MTE
 
 static DEFINE_XARRAY(mte_pages);
@@ -383,13 +381,18 @@ int swsusp_arch_suspend(void)
                ret = swsusp_save();
        } else {
                /* Clean kernel core startup/idle code to PoC*/
-               dcache_clean_range(__mmuoff_data_start, __mmuoff_data_end);
-               dcache_clean_range(__idmap_text_start, __idmap_text_end);
+               dcache_clean_inval_poc((unsigned long)__mmuoff_data_start,
+                                   (unsigned long)__mmuoff_data_end);
+               dcache_clean_inval_poc((unsigned long)__idmap_text_start,
+                                   (unsigned long)__idmap_text_end);
 
                /* Clean kvm setup code to PoC? */
                if (el2_reset_needed()) {
-                       dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end);
-                       dcache_clean_range(__hyp_text_start, __hyp_text_end);
+                       dcache_clean_inval_poc(
+                               (unsigned long)__hyp_idmap_text_start,
+                               (unsigned long)__hyp_idmap_text_end);
+                       dcache_clean_inval_poc((unsigned long)__hyp_text_start,
+                                           (unsigned long)__hyp_text_end);
                }
 
                swsusp_mte_restore_tags();
@@ -474,7 +477,8 @@ int swsusp_arch_resume(void)
         * The hibernate exit text contains a set of el2 vectors, that will
         * be executed at el2 with the mmu off in order to reload hyp-stub.
         */
-       __flush_dcache_area(hibernate_exit, exit_size);
+       dcache_clean_inval_poc((unsigned long)hibernate_exit,
+                           (unsigned long)hibernate_exit + exit_size);
 
        /*
         * KASLR will cause the el2 vectors to be in a different location in
index e628c8c..53a381a 100644 (file)
@@ -237,7 +237,8 @@ asmlinkage void __init init_feature_override(void)
 
        for (i = 0; i < ARRAY_SIZE(regs); i++) {
                if (regs[i]->override)
-                       __flush_dcache_area(regs[i]->override,
+                       dcache_clean_inval_poc((unsigned long)regs[i]->override,
+                                           (unsigned long)regs[i]->override +
                                            sizeof(*regs[i]->override));
        }
 }
index bcf3c27..c96a9a0 100644 (file)
@@ -35,7 +35,7 @@ __efistub_strnlen             = __pi_strnlen;
 __efistub_strcmp               = __pi_strcmp;
 __efistub_strncmp              = __pi_strncmp;
 __efistub_strrchr              = __pi_strrchr;
-__efistub___clean_dcache_area_poc = __pi___clean_dcache_area_poc;
+__efistub_dcache_clean_poc = __pi_dcache_clean_poc;
 
 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
 __efistub___memcpy             = __pi_memcpy;
index 6c0de2f..51cb8dc 100644 (file)
@@ -198,7 +198,7 @@ int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
 
        ret = aarch64_insn_write(tp, insn);
        if (ret == 0)
-               __flush_icache_range((uintptr_t)tp,
+               caches_clean_inval_pou((uintptr_t)tp,
                                     (uintptr_t)tp + AARCH64_INSN_SIZE);
 
        return ret;
index 341342b..cfa2cfd 100644 (file)
@@ -72,7 +72,9 @@ u64 __init kaslr_early_init(void)
         * we end up running with module randomization disabled.
         */
        module_alloc_base = (u64)_etext - MODULES_VSIZE;
-       __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
+       dcache_clean_inval_poc((unsigned long)&module_alloc_base,
+                           (unsigned long)&module_alloc_base +
+                                   sizeof(module_alloc_base));
 
        /*
         * Try to map the FDT early. If this fails, we simply bail,
@@ -170,8 +172,12 @@ u64 __init kaslr_early_init(void)
        module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
        module_alloc_base &= PAGE_MASK;
 
-       __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
-       __flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed));
+       dcache_clean_inval_poc((unsigned long)&module_alloc_base,
+                           (unsigned long)&module_alloc_base +
+                                   sizeof(module_alloc_base));
+       dcache_clean_inval_poc((unsigned long)&memstart_offset_seed,
+                           (unsigned long)&memstart_offset_seed +
+                                   sizeof(memstart_offset_seed));
 
        return offset;
 }
index 90a335c..03ceabe 100644 (file)
@@ -68,10 +68,16 @@ int machine_kexec_post_load(struct kimage *kimage)
        kimage->arch.kern_reloc = __pa(reloc_code);
        kexec_image_info(kimage);
 
-       /* Flush the reloc_code in preparation for its execution. */
-       __flush_dcache_area(reloc_code, arm64_relocate_new_kernel_size);
-       flush_icache_range((uintptr_t)reloc_code, (uintptr_t)reloc_code +
-                          arm64_relocate_new_kernel_size);
+       /*
+        * For execution with the MMU off, reloc_code needs to be cleaned to the
+        * PoC and invalidated from the I-cache.
+        */
+       dcache_clean_inval_poc((unsigned long)reloc_code,
+                           (unsigned long)reloc_code +
+                                   arm64_relocate_new_kernel_size);
+       icache_inval_pou((uintptr_t)reloc_code,
+                               (uintptr_t)reloc_code +
+                                       arm64_relocate_new_kernel_size);
 
        return 0;
 }
@@ -102,16 +108,18 @@ static void kexec_list_flush(struct kimage *kimage)
 
        for (entry = &kimage->head; ; entry++) {
                unsigned int flag;
-               void *addr;
+               unsigned long addr;
 
                /* flush the list entries. */
-               __flush_dcache_area(entry, sizeof(kimage_entry_t));
+               dcache_clean_inval_poc((unsigned long)entry,
+                                   (unsigned long)entry +
+                                           sizeof(kimage_entry_t));
 
                flag = *entry & IND_FLAGS;
                if (flag == IND_DONE)
                        break;
 
-               addr = phys_to_virt(*entry & PAGE_MASK);
+               addr = (unsigned long)phys_to_virt(*entry & PAGE_MASK);
 
                switch (flag) {
                case IND_INDIRECTION:
@@ -120,7 +128,7 @@ static void kexec_list_flush(struct kimage *kimage)
                        break;
                case IND_SOURCE:
                        /* flush the source pages. */
-                       __flush_dcache_area(addr, PAGE_SIZE);
+                       dcache_clean_inval_poc(addr, addr + PAGE_SIZE);
                        break;
                case IND_DESTINATION:
                        break;
@@ -147,8 +155,10 @@ static void kexec_segment_flush(const struct kimage *kimage)
                        kimage->segment[i].memsz,
                        kimage->segment[i].memsz /  PAGE_SIZE);
 
-               __flush_dcache_area(phys_to_virt(kimage->segment[i].mem),
-                       kimage->segment[i].memsz);
+               dcache_clean_inval_poc(
+                       (unsigned long)phys_to_virt(kimage->segment[i].mem),
+                       (unsigned long)phys_to_virt(kimage->segment[i].mem) +
+                               kimage->segment[i].memsz);
        }
 }
 
index 88ff471..4a72c27 100644 (file)
@@ -116,7 +116,7 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
                tail = (struct frame_tail __user *)regs->regs[29];
 
                while (entry->nr < entry->max_stack &&
-                      tail && !((unsigned long)tail & 0xf))
+                      tail && !((unsigned long)tail & 0x7))
                        tail = user_backtrace(tail, entry);
        } else {
 #ifdef CONFIG_COMPAT
index 2c24763..9be668f 100644 (file)
@@ -21,7 +21,7 @@ void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
        memcpy(dst, src, len);
 
        /* flush caches (dcache/icache) */
-       sync_icache_aliases(dst, len);
+       sync_icache_aliases((unsigned long)dst, (unsigned long)dst + len);
 
        kunmap_atomic(xol_page_kaddr);
 }
index b715c6b..3925ec3 100644 (file)
@@ -412,6 +412,11 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
        }
        p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
        p->thread.cpu_context.sp = (unsigned long)childregs;
+       /*
+        * For the benefit of the unwinder, set up childregs->stackframe
+        * as the final frame for the new task.
+        */
+       p->thread.cpu_context.fp = (unsigned long)childregs->stackframe;
 
        ptrace_hw_copy_thread(p);
 
@@ -504,6 +509,15 @@ static void erratum_1418040_thread_switch(struct task_struct *prev,
        write_sysreg(val, cntkctl_el1);
 }
 
+static void compat_thread_switch(struct task_struct *next)
+{
+       if (!is_compat_thread(task_thread_info(next)))
+               return;
+
+       if (static_branch_unlikely(&arm64_mismatched_32bit_el0))
+               set_tsk_thread_flag(next, TIF_NOTIFY_RESUME);
+}
+
 static void update_sctlr_el1(u64 sctlr)
 {
        /*
@@ -545,6 +559,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
        ssbs_thread_switch(next);
        erratum_1418040_thread_switch(prev, next);
        ptrauth_thread_switch_user(next);
+       compat_thread_switch(next);
 
        /*
         * Complete any pending TLB or cache maintenance on this CPU in case
@@ -610,8 +625,15 @@ unsigned long arch_align_stack(unsigned long sp)
  */
 void arch_setup_new_exec(void)
 {
-       current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0;
+       unsigned long mmflags = 0;
+
+       if (is_compat_task()) {
+               mmflags = MMCF_AARCH32;
+               if (static_branch_unlikely(&arm64_mismatched_32bit_el0))
+                       set_tsk_thread_flag(current, TIF_NOTIFY_RESUME);
+       }
 
+       current->mm->context.flags = mmflags;
        ptrauth_thread_init_user();
        mte_thread_init_user();
 
index eb2f739..499b6b2 100644 (file)
@@ -122,7 +122,7 @@ static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
 {
        return ((addr & ~(THREAD_SIZE - 1))  ==
                (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
-               on_irq_stack(addr, NULL);
+               on_irq_stack(addr, sizeof(unsigned long), NULL);
 }
 
 /**
index 2c7ca44..c524f96 100644 (file)
@@ -162,31 +162,33 @@ static int init_sdei_scs(void)
        return err;
 }
 
-static bool on_sdei_normal_stack(unsigned long sp, struct stack_info *info)
+static bool on_sdei_normal_stack(unsigned long sp, unsigned long size,
+                                struct stack_info *info)
 {
        unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
        unsigned long high = low + SDEI_STACK_SIZE;
 
-       return on_stack(sp, low, high, STACK_TYPE_SDEI_NORMAL, info);
+       return on_stack(sp, size, low, high, STACK_TYPE_SDEI_NORMAL, info);
 }
 
-static bool on_sdei_critical_stack(unsigned long sp, struct stack_info *info)
+static bool on_sdei_critical_stack(unsigned long sp, unsigned long size,
+                                  struct stack_info *info)
 {
        unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
        unsigned long high = low + SDEI_STACK_SIZE;
 
-       return on_stack(sp, low, high, STACK_TYPE_SDEI_CRITICAL, info);
+       return on_stack(sp, size, low, high, STACK_TYPE_SDEI_CRITICAL, info);
 }
 
-bool _on_sdei_stack(unsigned long sp, struct stack_info *info)
+bool _on_sdei_stack(unsigned long sp, unsigned long size, struct stack_info *info)
 {
        if (!IS_ENABLED(CONFIG_VMAP_STACK))
                return false;
 
-       if (on_sdei_critical_stack(sp, info))
+       if (on_sdei_critical_stack(sp, size, info))
                return true;
 
-       if (on_sdei_normal_stack(sp, info))
+       if (on_sdei_normal_stack(sp, size, info))
                return true;
 
        return false;
index 61845c0..b7a35a0 100644 (file)
@@ -87,12 +87,6 @@ void __init smp_setup_processor_id(void)
        u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
        set_cpu_logical_map(0, mpidr);
 
-       /*
-        * clear __my_cpu_offset on boot CPU to avoid hang caused by
-        * using percpu variable early, for example, lockdep will
-        * access percpu variable inside lock_release
-        */
-       set_my_cpu_offset(0);
        pr_info("Booting Linux on physical CPU 0x%010lx [0x%08x]\n",
                (unsigned long)mpidr, read_cpuid_id());
 }
index 6237486..f8192f4 100644 (file)
@@ -911,6 +911,19 @@ static void do_signal(struct pt_regs *regs)
        restore_saved_sigmask();
 }
 
+static bool cpu_affinity_invalid(struct pt_regs *regs)
+{
+       if (!compat_user_mode(regs))
+               return false;
+
+       /*
+        * We're preemptible, but a reschedule will cause us to check the
+        * affinity again.
+        */
+       return !cpumask_test_cpu(raw_smp_processor_id(),
+                                system_32bit_el0_cpumask());
+}
+
 asmlinkage void do_notify_resume(struct pt_regs *regs,
                                 unsigned long thread_flags)
 {
@@ -938,6 +951,19 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
                        if (thread_flags & _TIF_NOTIFY_RESUME) {
                                tracehook_notify_resume(regs);
                                rseq_handle_notify_resume(NULL, regs);
+
+                               /*
+                                * If we reschedule after checking the affinity
+                                * then we must ensure that TIF_NOTIFY_RESUME
+                                * is set so that we check the affinity again.
+                                * Since tracehook_notify_resume() clears the
+                                * flag, ensure that the compiler doesn't move
+                                * it after the affinity check.
+                                */
+                               barrier();
+
+                               if (cpu_affinity_invalid(regs))
+                                       force_sig(SIGKILL);
                        }
 
                        if (thread_flags & _TIF_FOREIGN_FPSTATE)
index dcd7041..2fe8fab 100644 (file)
@@ -120,9 +120,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
         * page tables.
         */
        secondary_data.task = idle;
-       secondary_data.stack = task_stack_page(idle) + THREAD_SIZE;
        update_cpu_boot_status(CPU_MMU_OFF);
-       __flush_dcache_area(&secondary_data, sizeof(secondary_data));
 
        /* Now bring the CPU into our world */
        ret = boot_secondary(cpu, idle);
@@ -142,8 +140,6 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
 
        pr_crit("CPU%u: failed to come online\n", cpu);
        secondary_data.task = NULL;
-       secondary_data.stack = NULL;
-       __flush_dcache_area(&secondary_data, sizeof(secondary_data));
        status = READ_ONCE(secondary_data.status);
        if (status == CPU_MMU_OFF)
                status = READ_ONCE(__early_cpu_boot_status);
@@ -202,10 +198,7 @@ asmlinkage notrace void secondary_start_kernel(void)
        u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
        struct mm_struct *mm = &init_mm;
        const struct cpu_operations *ops;
-       unsigned int cpu;
-
-       cpu = task_cpu(current);
-       set_my_cpu_offset(per_cpu_offset(cpu));
+       unsigned int cpu = smp_processor_id();
 
        /*
         * All kernel threads share the same mm context; grab a
@@ -452,6 +445,11 @@ void __init smp_cpus_done(unsigned int max_cpus)
 
 void __init smp_prepare_boot_cpu(void)
 {
+       /*
+        * The runtime per-cpu areas have been allocated by
+        * setup_per_cpu_areas(), and CPU0's boot time per-cpu area will be
+        * freed shortly, so we must move over to the runtime per-cpu area.
+        */
        set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
        cpuinfo_store_boot_cpu();
 
index c45a835..7e1624e 100644 (file)
@@ -36,7 +36,7 @@ static void write_pen_release(u64 val)
        unsigned long size = sizeof(secondary_holding_pen_release);
 
        secondary_holding_pen_release = val;
-       __flush_dcache_area(start, size);
+       dcache_clean_inval_poc((unsigned long)start, (unsigned long)start + size);
 }
 
 
@@ -90,8 +90,9 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu)
         * the boot protocol.
         */
        writeq_relaxed(pa_holding_pen, release_addr);
-       __flush_dcache_area((__force void *)release_addr,
-                           sizeof(*release_addr));
+       dcache_clean_inval_poc((__force unsigned long)release_addr,
+                           (__force unsigned long)release_addr +
+                                   sizeof(*release_addr));
 
        /*
         * Send an event to wake up the secondary CPU.
index de07147..b189de5 100644 (file)
@@ -68,13 +68,17 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
        unsigned long fp = frame->fp;
        struct stack_info info;
 
-       if (fp & 0xf)
-               return -EINVAL;
-
        if (!tsk)
                tsk = current;
 
-       if (!on_accessible_stack(tsk, fp, &info))
+       /* Final frame; nothing to unwind */
+       if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
+               return -ENOENT;
+
+       if (fp & 0x7)
+               return -EINVAL;
+
+       if (!on_accessible_stack(tsk, fp, 16, &info))
                return -EINVAL;
 
        if (test_bit(info.type, frame->stacks_done))
@@ -128,12 +132,6 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
 
        frame->pc = ptrauth_strip_insn_pac(frame->pc);
 
-       /*
-        * This is a terminal record, so we have finished unwinding.
-        */
-       if (!frame->fp && !frame->pc)
-               return -ENOENT;
-
        return 0;
 }
 NOKPROBE_SYMBOL(unwind_frame);
index 265fe3e..db5159a 100644 (file)
@@ -41,7 +41,7 @@ __do_compat_cache_op(unsigned long start, unsigned long end)
                        dsb(ish);
                }
 
-               ret = __flush_cache_user_range(start, start + chunk);
+               ret = caches_clean_inval_user_pou(start, start + chunk);
                if (ret)
                        return ret;
 
index 1cb39c0..dc2bc55 100644 (file)
@@ -692,6 +692,15 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
        }
 }
 
+static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu)
+{
+       if (likely(!vcpu_mode_is_32bit(vcpu)))
+               return false;
+
+       return !system_supports_32bit_el0() ||
+               static_branch_unlikely(&arm64_mismatched_32bit_el0);
+}
+
 /**
  * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
  * @vcpu:      The VCPU pointer
@@ -875,7 +884,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
                 * with the asymmetric AArch32 case), return to userspace with
                 * a fatal error.
                 */
-               if (!system_supports_32bit_el0() && vcpu_mode_is_32bit(vcpu)) {
+               if (vcpu_mode_is_bad_32bit(vcpu)) {
                        /*
                         * As we have caught the guest red-handed, decide that
                         * it isn't fit for purpose anymore by making the vcpu
@@ -1064,7 +1073,7 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
                if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
                        stage2_unmap_vm(vcpu->kvm);
                else
-                       __flush_icache_all();
+                       icache_inval_all_pou();
        }
 
        vcpu_reset_hcr(vcpu);
index 36cef69..958734f 100644 (file)
@@ -7,7 +7,7 @@
 #include <asm/assembler.h>
 #include <asm/alternative.h>
 
-SYM_FUNC_START_PI(__flush_dcache_area)
+SYM_FUNC_START_PI(dcache_clean_inval_poc)
        dcache_by_line_op civac, sy, x0, x1, x2, x3
        ret
-SYM_FUNC_END_PI(__flush_dcache_area)
+SYM_FUNC_END_PI(dcache_clean_inval_poc)
index 7488f53..8143ebd 100644 (file)
@@ -134,7 +134,8 @@ static void update_nvhe_init_params(void)
        for (i = 0; i < hyp_nr_cpus; i++) {
                params = per_cpu_ptr(&kvm_init_params, i);
                params->pgd_pa = __hyp_pa(pkvm_pgtable.pgd);
-               __flush_dcache_area(params, sizeof(*params));
+               dcache_clean_inval_poc((unsigned long)params,
+                                   (unsigned long)params + sizeof(*params));
        }
 }
 
index 83dc3b2..38ed0f6 100644 (file)
@@ -104,7 +104,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
         * you should be running with VHE enabled.
         */
        if (icache_is_vpipt())
-               __flush_icache_all();
+               icache_inval_all_pou();
 
        __tlb_switch_to_host(&cxt);
 }
index c37c1dc..e9ad7fb 100644 (file)
@@ -839,8 +839,11 @@ static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
        stage2_put_pte(ptep, mmu, addr, level, mm_ops);
 
        if (need_flush) {
-               __flush_dcache_area(kvm_pte_follow(pte, mm_ops),
-                                   kvm_granule_size(level));
+               kvm_pte_t *pte_follow = kvm_pte_follow(pte, mm_ops);
+
+               dcache_clean_inval_poc((unsigned long)pte_follow,
+                                   (unsigned long)pte_follow +
+                                           kvm_granule_size(level));
        }
 
        if (childp)
@@ -988,11 +991,15 @@ static int stage2_flush_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
        struct kvm_pgtable *pgt = arg;
        struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
        kvm_pte_t pte = *ptep;
+       kvm_pte_t *pte_follow;
 
        if (!kvm_pte_valid(pte) || !stage2_pte_cacheable(pgt, pte))
                return 0;
 
-       __flush_dcache_area(kvm_pte_follow(pte, mm_ops), kvm_granule_size(level));
+       pte_follow = kvm_pte_follow(pte, mm_ops);
+       dcache_clean_inval_poc((unsigned long)pte_follow,
+                           (unsigned long)pte_follow +
+                                   kvm_granule_size(level));
        return 0;
 }
 
index d31e116..01c596a 100644 (file)
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 lib-y          := clear_user.o delay.o copy_from_user.o                \
                   copy_to_user.o copy_in_user.o copy_page.o            \
-                  clear_page.o csum.o memchr.o memcpy.o memmove.o      \
+                  clear_page.o csum.o memchr.o memcpy.o                \
                   memset.o memcmp.o strcmp.o strncmp.o strlen.o        \
                   strnlen.o strchr.o strrchr.o tishift.o
 
index af9afcb..a7efb2a 100644 (file)
@@ -1,12 +1,9 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Based on arch/arm/lib/clear_user.S
- *
- * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2021 Arm Ltd.
  */
-#include <linux/linkage.h>
 
-#include <asm/asm-uaccess.h>
+#include <linux/linkage.h>
 #include <asm/assembler.h>
 
        .text
  *
  * Alignment fixed up by hardware.
  */
+
+       .p2align 4
+       // Alignment is for the loop, but since the prologue (including BTI)
+       // is also 16 bytes we can keep any padding outside the function
 SYM_FUNC_START(__arch_clear_user)
-       mov     x2, x1                  // save the size for fixup return
+       add     x2, x0, x1
        subs    x1, x1, #8
        b.mi    2f
 1:
-user_ldst 9f, sttr, xzr, x0, 8
+USER(9f, sttr  xzr, [x0])
+       add     x0, x0, #8
        subs    x1, x1, #8
-       b.pl    1b
-2:     adds    x1, x1, #4
-       b.mi    3f
-user_ldst 9f, sttr, wzr, x0, 4
-       sub     x1, x1, #4
-3:     adds    x1, x1, #2
-       b.mi    4f
-user_ldst 9f, sttrh, wzr, x0, 2
-       sub     x1, x1, #2
-4:     adds    x1, x1, #1
-       b.mi    5f
-user_ldst 9f, sttrb, wzr, x0, 0
+       b.hi    1b
+USER(9f, sttr  xzr, [x2, #-8])
+       mov     x0, #0
+       ret
+
+2:     tbz     x1, #2, 3f
+USER(9f, sttr  wzr, [x0])
+USER(8f, sttr  wzr, [x2, #-4])
+       mov     x0, #0
+       ret
+
+3:     tbz     x1, #1, 4f
+USER(9f, sttrh wzr, [x0])
+4:     tbz     x1, #0, 5f
+USER(7f, sttrb wzr, [x2, #-1])
 5:     mov     x0, #0
        ret
 SYM_FUNC_END(__arch_clear_user)
@@ -45,6 +50,8 @@ EXPORT_SYMBOL(__arch_clear_user)
 
        .section .fixup,"ax"
        .align  2
-9:     mov     x0, x2                  // return the original size
+7:     sub     x0, x2, #5      // Adjust for faulting on the final byte...
+8:     add     x0, x0, #4      // ...or the second word of the 4-7 byte case
+9:     sub     x0, x2, x0
        ret
        .previous
index edf6b97..7c2276f 100644 (file)
@@ -1,9 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Based on arch/arm/lib/memchr.S
- *
- * Copyright (C) 1995-2000 Russell King
- * Copyright (C) 2013 ARM Ltd.
+ * Copyright (C) 2021 Arm Ltd.
  */
 
 #include <linux/linkage.h>
  * Returns:
  *     x0 - address of first occurrence of 'c' or 0
  */
+
+#define L(label) .L ## label
+
+#define REP8_01 0x0101010101010101
+#define REP8_7f 0x7f7f7f7f7f7f7f7f
+
+#define srcin          x0
+#define chrin          w1
+#define cntin          x2
+
+#define result         x0
+
+#define wordcnt                x3
+#define rep01          x4
+#define repchr         x5
+#define cur_word       x6
+#define cur_byte       w6
+#define tmp            x7
+#define tmp2           x8
+
+       .p2align 4
+       nop
 SYM_FUNC_START_WEAK_PI(memchr)
-       and     w1, w1, #0xff
-1:     subs    x2, x2, #1
-       b.mi    2f
-       ldrb    w3, [x0], #1
-       cmp     w3, w1
-       b.ne    1b
-       sub     x0, x0, #1
+       and     chrin, chrin, #0xff
+       lsr     wordcnt, cntin, #3
+       cbz     wordcnt, L(byte_loop)
+       mov     rep01, #REP8_01
+       mul     repchr, x1, rep01
+       and     cntin, cntin, #7
+L(word_loop):
+       ldr     cur_word, [srcin], #8
+       sub     wordcnt, wordcnt, #1
+       eor     cur_word, cur_word, repchr
+       sub     tmp, cur_word, rep01
+       orr     tmp2, cur_word, #REP8_7f
+       bics    tmp, tmp, tmp2
+       b.ne    L(found_word)
+       cbnz    wordcnt, L(word_loop)
+L(byte_loop):
+       cbz     cntin, L(not_found)
+       ldrb    cur_byte, [srcin], #1
+       sub     cntin, cntin, #1
+       cmp     cur_byte, chrin
+       b.ne    L(byte_loop)
+       sub     srcin, srcin, #1
+       ret
+L(found_word):
+CPU_LE(        rev     tmp, tmp)
+       clz     tmp, tmp
+       sub     tmp, tmp, #64
+       add     result, srcin, tmp, asr #3
        ret
-2:     mov     x0, #0
+L(not_found):
+       mov     result, #0
        ret
 SYM_FUNC_END_PI(memchr)
 EXPORT_SYMBOL_NOKASAN(memchr)
index c0671e7..7d95638 100644 (file)
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (C) 2013 ARM Ltd.
- * Copyright (C) 2013 Linaro.
+ * Copyright (c) 2013-2021, Arm Limited.
  *
- * This code is based on glibc cortex strings work originally authored by Linaro
- * be found @
- *
- * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
- * files/head:/src/aarch64/
+ * Adapted from the original at:
+ * https://github.com/ARM-software/optimized-routines/blob/e823e3abf5f89ecb/string/aarch64/memcmp.S
  */
 
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 
-/*
-* compare memory areas(when two memory areas' offset are different,
-* alignment handled by the hardware)
-*
-* Parameters:
-*  x0 - const memory area 1 pointer
-*  x1 - const memory area 2 pointer
-*  x2 - the maximal compare byte length
-* Returns:
-*  x0 - a compare result, maybe less than, equal to, or greater than ZERO
-*/
+/* Assumptions:
+ *
+ * ARMv8-a, AArch64, unaligned accesses.
+ */
+
+#define L(label) .L ## label
 
 /* Parameters and result.  */
-src1           .req    x0
-src2           .req    x1
-limit          .req    x2
-result         .req    x0
+#define src1           x0
+#define src2           x1
+#define limit          x2
+#define result         w0
 
 /* Internal variables.  */
-data1          .req    x3
-data1w         .req    w3
-data2          .req    x4
-data2w         .req    w4
-has_nul                .req    x5
-diff           .req    x6
-endloop                .req    x7
-tmp1           .req    x8
-tmp2           .req    x9
-tmp3           .req    x10
-pos            .req    x11
-limit_wd       .req    x12
-mask           .req    x13
+#define data1          x3
+#define data1w         w3
+#define data1h         x4
+#define data2          x5
+#define data2w         w5
+#define data2h         x6
+#define tmp1           x7
+#define tmp2           x8
 
 SYM_FUNC_START_WEAK_PI(memcmp)
-       cbz     limit, .Lret0
-       eor     tmp1, src1, src2
-       tst     tmp1, #7
-       b.ne    .Lmisaligned8
-       ands    tmp1, src1, #7
-       b.ne    .Lmutual_align
-       sub     limit_wd, limit, #1 /* limit != 0, so no underflow.  */
-       lsr     limit_wd, limit_wd, #3 /* Convert to Dwords.  */
-       /*
-       * The input source addresses are at alignment boundary.
-       * Directly compare eight bytes each time.
-       */
-.Lloop_aligned:
-       ldr     data1, [src1], #8
-       ldr     data2, [src2], #8
-.Lstart_realigned:
-       subs    limit_wd, limit_wd, #1
-       eor     diff, data1, data2      /* Non-zero if differences found.  */
-       csinv   endloop, diff, xzr, cs  /* Last Dword or differences.  */
-       cbz     endloop, .Lloop_aligned
-
-       /* Not reached the limit, must have found a diff.  */
-       tbz     limit_wd, #63, .Lnot_limit
-
-       /* Limit % 8 == 0 => the diff is in the last 8 bytes. */
-       ands    limit, limit, #7
-       b.eq    .Lnot_limit
-       /*
-       * The remained bytes less than 8. It is needed to extract valid data
-       * from last eight bytes of the intended memory range.
-       */
-       lsl     limit, limit, #3        /* bytes-> bits.  */
-       mov     mask, #~0
-CPU_BE( lsr    mask, mask, limit )
-CPU_LE( lsl    mask, mask, limit )
-       bic     data1, data1, mask
-       bic     data2, data2, mask
-
-       orr     diff, diff, mask
-       b       .Lnot_limit
-
-.Lmutual_align:
-       /*
-       * Sources are mutually aligned, but are not currently at an
-       * alignment boundary. Round down the addresses and then mask off
-       * the bytes that precede the start point.
-       */
-       bic     src1, src1, #7
-       bic     src2, src2, #7
-       ldr     data1, [src1], #8
-       ldr     data2, [src2], #8
-       /*
-       * We can not add limit with alignment offset(tmp1) here. Since the
-       * addition probably make the limit overflown.
-       */
-       sub     limit_wd, limit, #1/*limit != 0, so no underflow.*/
-       and     tmp3, limit_wd, #7
-       lsr     limit_wd, limit_wd, #3
-       add     tmp3, tmp3, tmp1
-       add     limit_wd, limit_wd, tmp3, lsr #3
-       add     limit, limit, tmp1/* Adjust the limit for the extra.  */
-
-       lsl     tmp1, tmp1, #3/* Bytes beyond alignment -> bits.*/
-       neg     tmp1, tmp1/* Bits to alignment -64.  */
-       mov     tmp2, #~0
-       /*mask off the non-intended bytes before the start address.*/
-CPU_BE( lsl    tmp2, tmp2, tmp1 )/*Big-endian.Early bytes are at MSB*/
-       /* Little-endian.  Early bytes are at LSB.  */
-CPU_LE( lsr    tmp2, tmp2, tmp1 )
-
-       orr     data1, data1, tmp2
-       orr     data2, data2, tmp2
-       b       .Lstart_realigned
-
-       /*src1 and src2 have different alignment offset.*/
-.Lmisaligned8:
-       cmp     limit, #8
-       b.lo    .Ltiny8proc /*limit < 8: compare byte by byte*/
-
-       and     tmp1, src1, #7
-       neg     tmp1, tmp1
-       add     tmp1, tmp1, #8/*valid length in the first 8 bytes of src1*/
-       and     tmp2, src2, #7
-       neg     tmp2, tmp2
-       add     tmp2, tmp2, #8/*valid length in the first 8 bytes of src2*/
-       subs    tmp3, tmp1, tmp2
-       csel    pos, tmp1, tmp2, hi /*Choose the maximum.*/
-
-       sub     limit, limit, pos
-       /*compare the proceeding bytes in the first 8 byte segment.*/
-.Ltinycmp:
-       ldrb    data1w, [src1], #1
-       ldrb    data2w, [src2], #1
-       subs    pos, pos, #1
-       ccmp    data1w, data2w, #0, ne  /* NZCV = 0b0000.  */
-       b.eq    .Ltinycmp
-       cbnz    pos, 1f /*diff occurred before the last byte.*/
-       cmp     data1w, data2w
-       b.eq    .Lstart_align
-1:
-       sub     result, data1, data2
+       subs    limit, limit, 8
+       b.lo    L(less8)
+
+       ldr     data1, [src1], 8
+       ldr     data2, [src2], 8
+       cmp     data1, data2
+       b.ne    L(return)
+
+       subs    limit, limit, 8
+       b.gt    L(more16)
+
+       ldr     data1, [src1, limit]
+       ldr     data2, [src2, limit]
+       b       L(return)
+
+L(more16):
+       ldr     data1, [src1], 8
+       ldr     data2, [src2], 8
+       cmp     data1, data2
+       bne     L(return)
+
+       /* Jump directly to comparing the last 16 bytes for 32 byte (or less)
+          strings.  */
+       subs    limit, limit, 16
+       b.ls    L(last_bytes)
+
+       /* We overlap loads between 0-32 bytes at either side of SRC1 when we
+          try to align, so limit it only to strings larger than 128 bytes.  */
+       cmp     limit, 96
+       b.ls    L(loop16)
+
+       /* Align src1 and adjust src2 with bytes not yet done.  */
+       and     tmp1, src1, 15
+       add     limit, limit, tmp1
+       sub     src1, src1, tmp1
+       sub     src2, src2, tmp1
+
+       /* Loop performing 16 bytes per iteration using aligned src1.
+          Limit is pre-decremented by 16 and must be larger than zero.
+          Exit if <= 16 bytes left to do or if the data is not equal.  */
+       .p2align 4
+L(loop16):
+       ldp     data1, data1h, [src1], 16
+       ldp     data2, data2h, [src2], 16
+       subs    limit, limit, 16
+       ccmp    data1, data2, 0, hi
+       ccmp    data1h, data2h, 0, eq
+       b.eq    L(loop16)
+
+       cmp     data1, data2
+       bne     L(return)
+       mov     data1, data1h
+       mov     data2, data2h
+       cmp     data1, data2
+       bne     L(return)
+
+       /* Compare last 1-16 bytes using unaligned access.  */
+L(last_bytes):
+       add     src1, src1, limit
+       add     src2, src2, limit
+       ldp     data1, data1h, [src1]
+       ldp     data2, data2h, [src2]
+       cmp     data1, data2
+       bne     L(return)
+       mov     data1, data1h
+       mov     data2, data2h
+       cmp     data1, data2
+
+       /* Compare data bytes and set return value to 0, -1 or 1.  */
+L(return):
+#ifndef __AARCH64EB__
+       rev     data1, data1
+       rev     data2, data2
+#endif
+       cmp     data1, data2
+L(ret_eq):
+       cset    result, ne
+       cneg    result, result, lo
        ret
 
-.Lstart_align:
-       lsr     limit_wd, limit, #3
-       cbz     limit_wd, .Lremain8
-
-       ands    xzr, src1, #7
-       b.eq    .Lrecal_offset
-       /*process more leading bytes to make src1 aligned...*/
-       add     src1, src1, tmp3 /*backwards src1 to alignment boundary*/
-       add     src2, src2, tmp3
-       sub     limit, limit, tmp3
-       lsr     limit_wd, limit, #3
-       cbz     limit_wd, .Lremain8
-       /*load 8 bytes from aligned SRC1..*/
-       ldr     data1, [src1], #8
-       ldr     data2, [src2], #8
-
-       subs    limit_wd, limit_wd, #1
-       eor     diff, data1, data2  /*Non-zero if differences found.*/
-       csinv   endloop, diff, xzr, ne
-       cbnz    endloop, .Lunequal_proc
-       /*How far is the current SRC2 from the alignment boundary...*/
-       and     tmp3, tmp3, #7
-
-.Lrecal_offset:/*src1 is aligned now..*/
-       neg     pos, tmp3
-.Lloopcmp_proc:
-       /*
-       * Divide the eight bytes into two parts. First,backwards the src2
-       * to an alignment boundary,load eight bytes and compare from
-       * the SRC2 alignment boundary. If all 8 bytes are equal,then start
-       * the second part's comparison. Otherwise finish the comparison.
-       * This special handle can garantee all the accesses are in the
-       * thread/task space in avoid to overrange access.
-       */
-       ldr     data1, [src1,pos]
-       ldr     data2, [src2,pos]
-       eor     diff, data1, data2  /* Non-zero if differences found.  */
-       cbnz    diff, .Lnot_limit
-
-       /*The second part process*/
-       ldr     data1, [src1], #8
-       ldr     data2, [src2], #8
-       eor     diff, data1, data2  /* Non-zero if differences found.  */
-       subs    limit_wd, limit_wd, #1
-       csinv   endloop, diff, xzr, ne/*if limit_wd is 0,will finish the cmp*/
-       cbz     endloop, .Lloopcmp_proc
-.Lunequal_proc:
-       cbz     diff, .Lremain8
-
-/* There is difference occurred in the latest comparison. */
-.Lnot_limit:
-/*
-* For little endian,reverse the low significant equal bits into MSB,then
-* following CLZ can find how many equal bits exist.
-*/
-CPU_LE( rev    diff, diff )
-CPU_LE( rev    data1, data1 )
-CPU_LE( rev    data2, data2 )
-
-       /*
-       * The MS-non-zero bit of DIFF marks either the first bit
-       * that is different, or the end of the significant data.
-       * Shifting left now will bring the critical information into the
-       * top bits.
-       */
-       clz     pos, diff
-       lsl     data1, data1, pos
-       lsl     data2, data2, pos
-       /*
-       * We need to zero-extend (char is unsigned) the value and then
-       * perform a signed subtraction.
-       */
-       lsr     data1, data1, #56
-       sub     result, data1, data2, lsr #56
+       .p2align 4
+       /* Compare up to 8 bytes.  Limit is [-8..-1].  */
+L(less8):
+       adds    limit, limit, 4
+       b.lo    L(less4)
+       ldr     data1w, [src1], 4
+       ldr     data2w, [src2], 4
+       cmp     data1w, data2w
+       b.ne    L(return)
+       sub     limit, limit, 4
+L(less4):
+       adds    limit, limit, 4
+       beq     L(ret_eq)
+L(byte_loop):
+       ldrb    data1w, [src1], 1
+       ldrb    data2w, [src2], 1
+       subs    limit, limit, 1
+       ccmp    data1w, data2w, 0, ne   /* NZCV = 0b0000.  */
+       b.eq    L(byte_loop)
+       sub     result, data1w, data2w
        ret
 
-.Lremain8:
-       /* Limit % 8 == 0 =>. all data are equal.*/
-       ands    limit, limit, #7
-       b.eq    .Lret0
-
-.Ltiny8proc:
-       ldrb    data1w, [src1], #1
-       ldrb    data2w, [src2], #1
-       subs    limit, limit, #1
-
-       ccmp    data1w, data2w, #0, ne  /* NZCV = 0b0000. */
-       b.eq    .Ltiny8proc
-       sub     result, data1, data2
-       ret
-.Lret0:
-       mov     result, #0
-       ret
 SYM_FUNC_END_PI(memcmp)
 EXPORT_SYMBOL_NOKASAN(memcmp)
index dc8d2a2..b82fd64 100644 (file)
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (C) 2013 ARM Ltd.
- * Copyright (C) 2013 Linaro.
+ * Copyright (c) 2012-2021, Arm Limited.
  *
- * This code is based on glibc cortex strings work originally authored by Linaro
- * be found @
- *
- * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
- * files/head:/src/aarch64/
+ * Adapted from the original at:
+ * https://github.com/ARM-software/optimized-routines/blob/afd6244a1f8d9229/string/aarch64/memcpy.S
  */
 
 #include <linux/linkage.h>
 #include <asm/assembler.h>
-#include <asm/cache.h>
 
-/*
- * Copy a buffer from src to dest (alignment handled by the hardware)
+/* Assumptions:
+ *
+ * ARMv8-a, AArch64, unaligned accesses.
  *
- * Parameters:
- *     x0 - dest
- *     x1 - src
- *     x2 - n
- * Returns:
- *     x0 - dest
  */
-       .macro ldrb1 reg, ptr, val
-       ldrb  \reg, [\ptr], \val
-       .endm
-
-       .macro strb1 reg, ptr, val
-       strb \reg, [\ptr], \val
-       .endm
 
-       .macro ldrh1 reg, ptr, val
-       ldrh  \reg, [\ptr], \val
-       .endm
+#define L(label) .L ## label
 
-       .macro strh1 reg, ptr, val
-       strh \reg, [\ptr], \val
-       .endm
+#define dstin  x0
+#define src    x1
+#define count  x2
+#define dst    x3
+#define srcend x4
+#define dstend x5
+#define A_l    x6
+#define A_lw   w6
+#define A_h    x7
+#define B_l    x8
+#define B_lw   w8
+#define B_h    x9
+#define C_l    x10
+#define C_lw   w10
+#define C_h    x11
+#define D_l    x12
+#define D_h    x13
+#define E_l    x14
+#define E_h    x15
+#define F_l    x16
+#define F_h    x17
+#define G_l    count
+#define G_h    dst
+#define H_l    src
+#define H_h    srcend
+#define tmp1   x14
 
-       .macro ldr1 reg, ptr, val
-       ldr \reg, [\ptr], \val
-       .endm
+/* This implementation handles overlaps and supports both memcpy and memmove
+   from a single entry point.  It uses unaligned accesses and branchless
+   sequences to keep the code small, simple and improve performance.
 
-       .macro str1 reg, ptr, val
-       str \reg, [\ptr], \val
-       .endm
+   Copies are split into 3 main cases: small copies of up to 32 bytes, medium
+   copies of up to 128 bytes, and large copies.  The overhead of the overlap
+   check is negligible since it is only required for large copies.
 
-       .macro ldp1 reg1, reg2, ptr, val
-       ldp \reg1, \reg2, [\ptr], \val
-       .endm
-
-       .macro stp1 reg1, reg2, ptr, val
-       stp \reg1, \reg2, [\ptr], \val
-       .endm
+   Large copies use a software pipelined loop processing 64 bytes per iteration.
+   The destination pointer is 16-byte aligned to minimize unaligned accesses.
+   The loop tail is handled by always copying 64 bytes from the end.
+*/
 
+SYM_FUNC_START_ALIAS(__memmove)
+SYM_FUNC_START_WEAK_ALIAS_PI(memmove)
 SYM_FUNC_START_ALIAS(__memcpy)
 SYM_FUNC_START_WEAK_PI(memcpy)
-#include "copy_template.S"
+       add     srcend, src, count
+       add     dstend, dstin, count
+       cmp     count, 128
+       b.hi    L(copy_long)
+       cmp     count, 32
+       b.hi    L(copy32_128)
+
+       /* Small copies: 0..32 bytes.  */
+       cmp     count, 16
+       b.lo    L(copy16)
+       ldp     A_l, A_h, [src]
+       ldp     D_l, D_h, [srcend, -16]
+       stp     A_l, A_h, [dstin]
+       stp     D_l, D_h, [dstend, -16]
+       ret
+
+       /* Copy 8-15 bytes.  */
+L(copy16):
+       tbz     count, 3, L(copy8)
+       ldr     A_l, [src]
+       ldr     A_h, [srcend, -8]
+       str     A_l, [dstin]
+       str     A_h, [dstend, -8]
+       ret
+
+       .p2align 3
+       /* Copy 4-7 bytes.  */
+L(copy8):
+       tbz     count, 2, L(copy4)
+       ldr     A_lw, [src]
+       ldr     B_lw, [srcend, -4]
+       str     A_lw, [dstin]
+       str     B_lw, [dstend, -4]
+       ret
+
+       /* Copy 0..3 bytes using a branchless sequence.  */
+L(copy4):
+       cbz     count, L(copy0)
+       lsr     tmp1, count, 1
+       ldrb    A_lw, [src]
+       ldrb    C_lw, [srcend, -1]
+       ldrb    B_lw, [src, tmp1]
+       strb    A_lw, [dstin]
+       strb    B_lw, [dstin, tmp1]
+       strb    C_lw, [dstend, -1]
+L(copy0):
+       ret
+
+       .p2align 4
+       /* Medium copies: 33..128 bytes.  */
+L(copy32_128):
+       ldp     A_l, A_h, [src]
+       ldp     B_l, B_h, [src, 16]
+       ldp     C_l, C_h, [srcend, -32]
+       ldp     D_l, D_h, [srcend, -16]
+       cmp     count, 64
+       b.hi    L(copy128)
+       stp     A_l, A_h, [dstin]
+       stp     B_l, B_h, [dstin, 16]
+       stp     C_l, C_h, [dstend, -32]
+       stp     D_l, D_h, [dstend, -16]
        ret
+
+       .p2align 4
+       /* Copy 65..128 bytes.  */
+L(copy128):
+       ldp     E_l, E_h, [src, 32]
+       ldp     F_l, F_h, [src, 48]
+       cmp     count, 96
+       b.ls    L(copy96)
+       ldp     G_l, G_h, [srcend, -64]
+       ldp     H_l, H_h, [srcend, -48]
+       stp     G_l, G_h, [dstend, -64]
+       stp     H_l, H_h, [dstend, -48]
+L(copy96):
+       stp     A_l, A_h, [dstin]
+       stp     B_l, B_h, [dstin, 16]
+       stp     E_l, E_h, [dstin, 32]
+       stp     F_l, F_h, [dstin, 48]
+       stp     C_l, C_h, [dstend, -32]
+       stp     D_l, D_h, [dstend, -16]
+       ret
+
+       .p2align 4
+       /* Copy more than 128 bytes.  */
+L(copy_long):
+       /* Use backwards copy if there is an overlap.  */
+       sub     tmp1, dstin, src
+       cbz     tmp1, L(copy0)
+       cmp     tmp1, count
+       b.lo    L(copy_long_backwards)
+
+       /* Copy 16 bytes and then align dst to 16-byte alignment.  */
+
+       ldp     D_l, D_h, [src]
+       and     tmp1, dstin, 15
+       bic     dst, dstin, 15
+       sub     src, src, tmp1
+       add     count, count, tmp1      /* Count is now 16 too large.  */
+       ldp     A_l, A_h, [src, 16]
+       stp     D_l, D_h, [dstin]
+       ldp     B_l, B_h, [src, 32]
+       ldp     C_l, C_h, [src, 48]
+       ldp     D_l, D_h, [src, 64]!
+       subs    count, count, 128 + 16  /* Test and readjust count.  */
+       b.ls    L(copy64_from_end)
+
+L(loop64):
+       stp     A_l, A_h, [dst, 16]
+       ldp     A_l, A_h, [src, 16]
+       stp     B_l, B_h, [dst, 32]
+       ldp     B_l, B_h, [src, 32]
+       stp     C_l, C_h, [dst, 48]
+       ldp     C_l, C_h, [src, 48]
+       stp     D_l, D_h, [dst, 64]!
+       ldp     D_l, D_h, [src, 64]!
+       subs    count, count, 64
+       b.hi    L(loop64)
+
+       /* Write the last iteration and copy 64 bytes from the end.  */
+L(copy64_from_end):
+       ldp     E_l, E_h, [srcend, -64]
+       stp     A_l, A_h, [dst, 16]
+       ldp     A_l, A_h, [srcend, -48]
+       stp     B_l, B_h, [dst, 32]
+       ldp     B_l, B_h, [srcend, -32]
+       stp     C_l, C_h, [dst, 48]
+       ldp     C_l, C_h, [srcend, -16]
+       stp     D_l, D_h, [dst, 64]
+       stp     E_l, E_h, [dstend, -64]
+       stp     A_l, A_h, [dstend, -48]
+       stp     B_l, B_h, [dstend, -32]
+       stp     C_l, C_h, [dstend, -16]
+       ret
+
+       .p2align 4
+
+       /* Large backwards copy for overlapping copies.
+          Copy 16 bytes and then align dst to 16-byte alignment.  */
+L(copy_long_backwards):
+       ldp     D_l, D_h, [srcend, -16]
+       and     tmp1, dstend, 15
+       sub     srcend, srcend, tmp1
+       sub     count, count, tmp1
+       ldp     A_l, A_h, [srcend, -16]
+       stp     D_l, D_h, [dstend, -16]
+       ldp     B_l, B_h, [srcend, -32]
+       ldp     C_l, C_h, [srcend, -48]
+       ldp     D_l, D_h, [srcend, -64]!
+       sub     dstend, dstend, tmp1
+       subs    count, count, 128
+       b.ls    L(copy64_from_start)
+
+L(loop64_backwards):
+       stp     A_l, A_h, [dstend, -16]
+       ldp     A_l, A_h, [srcend, -16]
+       stp     B_l, B_h, [dstend, -32]
+       ldp     B_l, B_h, [srcend, -32]
+       stp     C_l, C_h, [dstend, -48]
+       ldp     C_l, C_h, [srcend, -48]
+       stp     D_l, D_h, [dstend, -64]!
+       ldp     D_l, D_h, [srcend, -64]!
+       subs    count, count, 64
+       b.hi    L(loop64_backwards)
+
+       /* Write the last iteration and copy 64 bytes from the start.  */
+L(copy64_from_start):
+       ldp     G_l, G_h, [src, 48]
+       stp     A_l, A_h, [dstend, -16]
+       ldp     A_l, A_h, [src, 32]
+       stp     B_l, B_h, [dstend, -32]
+       ldp     B_l, B_h, [src, 16]
+       stp     C_l, C_h, [dstend, -48]
+       ldp     C_l, C_h, [src]
+       stp     D_l, D_h, [dstend, -64]
+       stp     G_l, G_h, [dstin, 48]
+       stp     A_l, A_h, [dstin, 32]
+       stp     B_l, B_h, [dstin, 16]
+       stp     C_l, C_h, [dstin]
+       ret
+
 SYM_FUNC_END_PI(memcpy)
 EXPORT_SYMBOL(memcpy)
 SYM_FUNC_END_ALIAS(__memcpy)
 EXPORT_SYMBOL(__memcpy)
+SYM_FUNC_END_ALIAS_PI(memmove)
+EXPORT_SYMBOL(memmove)
+SYM_FUNC_END_ALIAS(__memmove)
+EXPORT_SYMBOL(__memmove)
diff --git a/arch/arm64/lib/memmove.S b/arch/arm64/lib/memmove.S
deleted file mode 100644 (file)
index 1035dce..0000000
+++ /dev/null
@@ -1,189 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2013 ARM Ltd.
- * Copyright (C) 2013 Linaro.
- *
- * This code is based on glibc cortex strings work originally authored by Linaro
- * be found @
- *
- * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
- * files/head:/src/aarch64/
- */
-
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-#include <asm/cache.h>
-
-/*
- * Move a buffer from src to test (alignment handled by the hardware).
- * If dest <= src, call memcpy, otherwise copy in reverse order.
- *
- * Parameters:
- *     x0 - dest
- *     x1 - src
- *     x2 - n
- * Returns:
- *     x0 - dest
- */
-dstin  .req    x0
-src    .req    x1
-count  .req    x2
-tmp1   .req    x3
-tmp1w  .req    w3
-tmp2   .req    x4
-tmp2w  .req    w4
-tmp3   .req    x5
-tmp3w  .req    w5
-dst    .req    x6
-
-A_l    .req    x7
-A_h    .req    x8
-B_l    .req    x9
-B_h    .req    x10
-C_l    .req    x11
-C_h    .req    x12
-D_l    .req    x13
-D_h    .req    x14
-
-SYM_FUNC_START_ALIAS(__memmove)
-SYM_FUNC_START_WEAK_PI(memmove)
-       cmp     dstin, src
-       b.lo    __memcpy
-       add     tmp1, src, count
-       cmp     dstin, tmp1
-       b.hs    __memcpy                /* No overlap.  */
-
-       add     dst, dstin, count
-       add     src, src, count
-       cmp     count, #16
-       b.lo    .Ltail15  /*probably non-alignment accesses.*/
-
-       ands    tmp2, src, #15     /* Bytes to reach alignment.  */
-       b.eq    .LSrcAligned
-       sub     count, count, tmp2
-       /*
-       * process the aligned offset length to make the src aligned firstly.
-       * those extra instructions' cost is acceptable. It also make the
-       * coming accesses are based on aligned address.
-       */
-       tbz     tmp2, #0, 1f
-       ldrb    tmp1w, [src, #-1]!
-       strb    tmp1w, [dst, #-1]!
-1:
-       tbz     tmp2, #1, 2f
-       ldrh    tmp1w, [src, #-2]!
-       strh    tmp1w, [dst, #-2]!
-2:
-       tbz     tmp2, #2, 3f
-       ldr     tmp1w, [src, #-4]!
-       str     tmp1w, [dst, #-4]!
-3:
-       tbz     tmp2, #3, .LSrcAligned
-       ldr     tmp1, [src, #-8]!
-       str     tmp1, [dst, #-8]!
-
-.LSrcAligned:
-       cmp     count, #64
-       b.ge    .Lcpy_over64
-
-       /*
-       * Deal with small copies quickly by dropping straight into the
-       * exit block.
-       */
-.Ltail63:
-       /*
-       * Copy up to 48 bytes of data. At this point we only need the
-       * bottom 6 bits of count to be accurate.
-       */
-       ands    tmp1, count, #0x30
-       b.eq    .Ltail15
-       cmp     tmp1w, #0x20
-       b.eq    1f
-       b.lt    2f
-       ldp     A_l, A_h, [src, #-16]!
-       stp     A_l, A_h, [dst, #-16]!
-1:
-       ldp     A_l, A_h, [src, #-16]!
-       stp     A_l, A_h, [dst, #-16]!
-2:
-       ldp     A_l, A_h, [src, #-16]!
-       stp     A_l, A_h, [dst, #-16]!
-
-.Ltail15:
-       tbz     count, #3, 1f
-       ldr     tmp1, [src, #-8]!
-       str     tmp1, [dst, #-8]!
-1:
-       tbz     count, #2, 2f
-       ldr     tmp1w, [src, #-4]!
-       str     tmp1w, [dst, #-4]!
-2:
-       tbz     count, #1, 3f
-       ldrh    tmp1w, [src, #-2]!
-       strh    tmp1w, [dst, #-2]!
-3:
-       tbz     count, #0, .Lexitfunc
-       ldrb    tmp1w, [src, #-1]
-       strb    tmp1w, [dst, #-1]
-
-.Lexitfunc:
-       ret
-
-.Lcpy_over64:
-       subs    count, count, #128
-       b.ge    .Lcpy_body_large
-       /*
-       * Less than 128 bytes to copy, so handle 64 bytes here and then jump
-       * to the tail.
-       */
-       ldp     A_l, A_h, [src, #-16]
-       stp     A_l, A_h, [dst, #-16]
-       ldp     B_l, B_h, [src, #-32]
-       ldp     C_l, C_h, [src, #-48]
-       stp     B_l, B_h, [dst, #-32]
-       stp     C_l, C_h, [dst, #-48]
-       ldp     D_l, D_h, [src, #-64]!
-       stp     D_l, D_h, [dst, #-64]!
-
-       tst     count, #0x3f
-       b.ne    .Ltail63
-       ret
-
-       /*
-       * Critical loop. Start at a new cache line boundary. Assuming
-       * 64 bytes per line this ensures the entire loop is in one line.
-       */
-       .p2align        L1_CACHE_SHIFT
-.Lcpy_body_large:
-       /* pre-load 64 bytes data. */
-       ldp     A_l, A_h, [src, #-16]
-       ldp     B_l, B_h, [src, #-32]
-       ldp     C_l, C_h, [src, #-48]
-       ldp     D_l, D_h, [src, #-64]!
-1:
-       /*
-       * interlace the load of next 64 bytes data block with store of the last
-       * loaded 64 bytes data.
-       */
-       stp     A_l, A_h, [dst, #-16]
-       ldp     A_l, A_h, [src, #-16]
-       stp     B_l, B_h, [dst, #-32]
-       ldp     B_l, B_h, [src, #-32]
-       stp     C_l, C_h, [dst, #-48]
-       ldp     C_l, C_h, [src, #-48]
-       stp     D_l, D_h, [dst, #-64]!
-       ldp     D_l, D_h, [src, #-64]!
-       subs    count, count, #64
-       b.ge    1b
-       stp     A_l, A_h, [dst, #-16]
-       stp     B_l, B_h, [dst, #-32]
-       stp     C_l, C_h, [dst, #-48]
-       stp     D_l, D_h, [dst, #-64]!
-
-       tst     count, #0x3f
-       b.ne    .Ltail63
-       ret
-SYM_FUNC_END_PI(memmove)
-EXPORT_SYMBOL(memmove)
-SYM_FUNC_END_ALIAS(__memmove)
-EXPORT_SYMBOL(__memmove)
index 4e79566..d7bee21 100644 (file)
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (C) 2013 ARM Ltd.
- * Copyright (C) 2013 Linaro.
+ * Copyright (c) 2012-2021, Arm Limited.
  *
- * This code is based on glibc cortex strings work originally authored by Linaro
- * be found @
- *
- * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
- * files/head:/src/aarch64/
+ * Adapted from the original at:
+ * https://github.com/ARM-software/optimized-routines/blob/afd6244a1f8d9229/string/aarch64/strcmp.S
  */
 
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 
-/*
- * compare two strings
+/* Assumptions:
  *
- * Parameters:
- *     x0 - const string 1 pointer
- *    x1 - const string 2 pointer
- * Returns:
- * x0 - an integer less than, equal to, or greater than zero
- * if  s1  is  found, respectively, to be less than, to match,
- * or be greater than s2.
+ * ARMv8-a, AArch64
  */
 
+#define L(label) .L ## label
+
 #define REP8_01 0x0101010101010101
 #define REP8_7f 0x7f7f7f7f7f7f7f7f
 #define REP8_80 0x8080808080808080
 
 /* Parameters and result.  */
-src1           .req    x0
-src2           .req    x1
-result         .req    x0
+#define src1           x0
+#define src2           x1
+#define result         x0
 
 /* Internal variables.  */
-data1          .req    x2
-data1w         .req    w2
-data2          .req    x3
-data2w         .req    w3
-has_nul                .req    x4
-diff           .req    x5
-syndrome       .req    x6
-tmp1           .req    x7
-tmp2           .req    x8
-tmp3           .req    x9
-zeroones       .req    x10
-pos            .req    x11
-
+#define data1          x2
+#define data1w         w2
+#define data2          x3
+#define data2w         w3
+#define has_nul                x4
+#define diff           x5
+#define syndrome       x6
+#define tmp1           x7
+#define tmp2           x8
+#define tmp3           x9
+#define zeroones       x10
+#define pos            x11
+
+       /* Start of performance-critical section  -- one 64B cache line.  */
+       .align 6
 SYM_FUNC_START_WEAK_PI(strcmp)
        eor     tmp1, src1, src2
        mov     zeroones, #REP8_01
        tst     tmp1, #7
-       b.ne    .Lmisaligned8
+       b.ne    L(misaligned8)
        ands    tmp1, src1, #7
-       b.ne    .Lmutual_align
-
-       /*
-       * NUL detection works on the principle that (X - 1) & (~X) & 0x80
-       * (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
-       * can be done in parallel across the entire word.
-       */
-.Lloop_aligned:
+       b.ne    L(mutual_align)
+       /* NUL detection works on the principle that (X - 1) & (~X) & 0x80
+          (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
+          can be done in parallel across the entire word.  */
+L(loop_aligned):
        ldr     data1, [src1], #8
        ldr     data2, [src2], #8
-.Lstart_realigned:
+L(start_realigned):
        sub     tmp1, data1, zeroones
        orr     tmp2, data1, #REP8_7f
        eor     diff, data1, data2      /* Non-zero if differences found.  */
        bic     has_nul, tmp1, tmp2     /* Non-zero if NUL terminator.  */
        orr     syndrome, diff, has_nul
-       cbz     syndrome, .Lloop_aligned
-       b       .Lcal_cmpresult
+       cbz     syndrome, L(loop_aligned)
+       /* End of performance-critical section  -- one 64B cache line.  */
+
+L(end):
+#ifndef        __AARCH64EB__
+       rev     syndrome, syndrome
+       rev     data1, data1
+       /* The MS-non-zero bit of the syndrome marks either the first bit
+          that is different, or the top bit of the first zero byte.
+          Shifting left now will bring the critical information into the
+          top bits.  */
+       clz     pos, syndrome
+       rev     data2, data2
+       lsl     data1, data1, pos
+       lsl     data2, data2, pos
+       /* But we need to zero-extend (char is unsigned) the value and then
+          perform a signed 32-bit subtraction.  */
+       lsr     data1, data1, #56
+       sub     result, data1, data2, lsr #56
+       ret
+#else
+       /* For big-endian we cannot use the trick with the syndrome value
+          as carry-propagation can corrupt the upper bits if the trailing
+          bytes in the string contain 0x01.  */
+       /* However, if there is no NUL byte in the dword, we can generate
+          the result directly.  We can't just subtract the bytes as the
+          MSB might be significant.  */
+       cbnz    has_nul, 1f
+       cmp     data1, data2
+       cset    result, ne
+       cneg    result, result, lo
+       ret
+1:
+       /* Re-compute the NUL-byte detection, using a byte-reversed value.  */
+       rev     tmp3, data1
+       sub     tmp1, tmp3, zeroones
+       orr     tmp2, tmp3, #REP8_7f
+       bic     has_nul, tmp1, tmp2
+       rev     has_nul, has_nul
+       orr     syndrome, diff, has_nul
+       clz     pos, syndrome
+       /* The MS-non-zero bit of the syndrome marks either the first bit
+          that is different, or the top bit of the first zero byte.
+          Shifting left now will bring the critical information into the
+          top bits.  */
+       lsl     data1, data1, pos
+       lsl     data2, data2, pos
+       /* But we need to zero-extend (char is unsigned) the value and then
+          perform a signed 32-bit subtraction.  */
+       lsr     data1, data1, #56
+       sub     result, data1, data2, lsr #56
+       ret
+#endif
 
-.Lmutual_align:
-       /*
-       * Sources are mutually aligned, but are not currently at an
-       * alignment boundary.  Round down the addresses and then mask off
-       * the bytes that preceed the start point.
-       */
+L(mutual_align):
+       /* Sources are mutually aligned, but are not currently at an
+          alignment boundary.  Round down the addresses and then mask off
+          the bytes that preceed the start point.  */
        bic     src1, src1, #7
        bic     src2, src2, #7
        lsl     tmp1, tmp1, #3          /* Bytes beyond alignment -> bits.  */
@@ -86,138 +125,52 @@ SYM_FUNC_START_WEAK_PI(strcmp)
        neg     tmp1, tmp1              /* Bits to alignment -64.  */
        ldr     data2, [src2], #8
        mov     tmp2, #~0
+#ifdef __AARCH64EB__
        /* Big-endian.  Early bytes are at MSB.  */
-CPU_BE( lsl    tmp2, tmp2, tmp1 )      /* Shift (tmp1 & 63).  */
+       lsl     tmp2, tmp2, tmp1        /* Shift (tmp1 & 63).  */
+#else
        /* Little-endian.  Early bytes are at LSB.  */
-CPU_LE( lsr    tmp2, tmp2, tmp1 )      /* Shift (tmp1 & 63).  */
-
+       lsr     tmp2, tmp2, tmp1        /* Shift (tmp1 & 63).  */
+#endif
        orr     data1, data1, tmp2
        orr     data2, data2, tmp2
-       b       .Lstart_realigned
-
-.Lmisaligned8:
-       /*
-       * Get the align offset length to compare per byte first.
-       * After this process, one string's address will be aligned.
-       */
-       and     tmp1, src1, #7
-       neg     tmp1, tmp1
-       add     tmp1, tmp1, #8
-       and     tmp2, src2, #7
-       neg     tmp2, tmp2
-       add     tmp2, tmp2, #8
-       subs    tmp3, tmp1, tmp2
-       csel    pos, tmp1, tmp2, hi /*Choose the maximum. */
-.Ltinycmp:
+       b       L(start_realigned)
+
+L(misaligned8):
+       /* Align SRC1 to 8 bytes and then compare 8 bytes at a time, always
+          checking to make sure that we don't access beyond page boundary in
+          SRC2.  */
+       tst     src1, #7
+       b.eq    L(loop_misaligned)
+L(do_misaligned):
        ldrb    data1w, [src1], #1
        ldrb    data2w, [src2], #1
-       subs    pos, pos, #1
-       ccmp    data1w, #1, #0, ne  /* NZCV = 0b0000.  */
-       ccmp    data1w, data2w, #0, cs  /* NZCV = 0b0000.  */
-       b.eq    .Ltinycmp
-       cbnz    pos, 1f /*find the null or unequal...*/
        cmp     data1w, #1
-       ccmp    data1w, data2w, #0, cs
-       b.eq    .Lstart_align /*the last bytes are equal....*/
-1:
-       sub     result, data1, data2
-       ret
-
-.Lstart_align:
-       ands    xzr, src1, #7
-       b.eq    .Lrecal_offset
-       /*process more leading bytes to make str1 aligned...*/
-       add     src1, src1, tmp3
-       add     src2, src2, tmp3
-       /*load 8 bytes from aligned str1 and non-aligned str2..*/
+       ccmp    data1w, data2w, #0, cs  /* NZCV = 0b0000.  */
+       b.ne    L(done)
+       tst     src1, #7
+       b.ne    L(do_misaligned)
+
+L(loop_misaligned):
+       /* Test if we are within the last dword of the end of a 4K page.  If
+          yes then jump back to the misaligned loop to copy a byte at a time.  */
+       and     tmp1, src2, #0xff8
+       eor     tmp1, tmp1, #0xff8
+       cbz     tmp1, L(do_misaligned)
        ldr     data1, [src1], #8
        ldr     data2, [src2], #8
 
        sub     tmp1, data1, zeroones
        orr     tmp2, data1, #REP8_7f
-       bic     has_nul, tmp1, tmp2
-       eor     diff, data1, data2 /* Non-zero if differences found.  */
-       orr     syndrome, diff, has_nul
-       cbnz    syndrome, .Lcal_cmpresult
-       /*How far is the current str2 from the alignment boundary...*/
-       and     tmp3, tmp3, #7
-.Lrecal_offset:
-       neg     pos, tmp3
-.Lloopcmp_proc:
-       /*
-       * Divide the eight bytes into two parts. First,backwards the src2
-       * to an alignment boundary,load eight bytes from the SRC2 alignment
-       * boundary,then compare with the relative bytes from SRC1.
-       * If all 8 bytes are equal,then start the second part's comparison.
-       * Otherwise finish the comparison.
-       * This special handle can garantee all the accesses are in the
-       * thread/task space in avoid to overrange access.
-       */
-       ldr     data1, [src1,pos]
-       ldr     data2, [src2,pos]
-       sub     tmp1, data1, zeroones
-       orr     tmp2, data1, #REP8_7f
-       bic     has_nul, tmp1, tmp2
-       eor     diff, data1, data2  /* Non-zero if differences found.  */
-       orr     syndrome, diff, has_nul
-       cbnz    syndrome, .Lcal_cmpresult
-
-       /*The second part process*/
-       ldr     data1, [src1], #8
-       ldr     data2, [src2], #8
-       sub     tmp1, data1, zeroones
-       orr     tmp2, data1, #REP8_7f
-       bic     has_nul, tmp1, tmp2
-       eor     diff, data1, data2  /* Non-zero if differences found.  */
+       eor     diff, data1, data2      /* Non-zero if differences found.  */
+       bic     has_nul, tmp1, tmp2     /* Non-zero if NUL terminator.  */
        orr     syndrome, diff, has_nul
-       cbz     syndrome, .Lloopcmp_proc
+       cbz     syndrome, L(loop_misaligned)
+       b       L(end)
 
-.Lcal_cmpresult:
-       /*
-       * reversed the byte-order as big-endian,then CLZ can find the most
-       * significant zero bits.
-       */
-CPU_LE( rev    syndrome, syndrome )
-CPU_LE( rev    data1, data1 )
-CPU_LE( rev    data2, data2 )
-
-       /*
-       * For big-endian we cannot use the trick with the syndrome value
-       * as carry-propagation can corrupt the upper bits if the trailing
-       * bytes in the string contain 0x01.
-       * However, if there is no NUL byte in the dword, we can generate
-       * the result directly.  We cannot just subtract the bytes as the
-       * MSB might be significant.
-       */
-CPU_BE( cbnz   has_nul, 1f )
-CPU_BE( cmp    data1, data2 )
-CPU_BE( cset   result, ne )
-CPU_BE( cneg   result, result, lo )
-CPU_BE( ret )
-CPU_BE( 1: )
-       /*Re-compute the NUL-byte detection, using a byte-reversed value. */
-CPU_BE(        rev     tmp3, data1 )
-CPU_BE(        sub     tmp1, tmp3, zeroones )
-CPU_BE(        orr     tmp2, tmp3, #REP8_7f )
-CPU_BE(        bic     has_nul, tmp1, tmp2 )
-CPU_BE(        rev     has_nul, has_nul )
-CPU_BE(        orr     syndrome, diff, has_nul )
-
-       clz     pos, syndrome
-       /*
-       * The MS-non-zero bit of the syndrome marks either the first bit
-       * that is different, or the top bit of the first zero byte.
-       * Shifting left now will bring the critical information into the
-       * top bits.
-       */
-       lsl     data1, data1, pos
-       lsl     data2, data2, pos
-       /*
-       * But we need to zero-extend (char is unsigned) the value and then
-       * perform a signed 32-bit subtraction.
-       */
-       lsr     data1, data1, #56
-       sub     result, data1, data2, lsr #56
+L(done):
+       sub     result, data1, data2
        ret
+
 SYM_FUNC_END_PI(strcmp)
 EXPORT_SYMBOL_NOKASAN(strcmp)
index ee3ed88..35fbdb7 100644 (file)
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (C) 2013 ARM Ltd.
- * Copyright (C) 2013 Linaro.
+ * Copyright (c) 2013-2021, Arm Limited.
  *
- * This code is based on glibc cortex strings work originally authored by Linaro
- * be found @
- *
- * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
- * files/head:/src/aarch64/
+ * Adapted from the original at:
+ * https://github.com/ARM-software/optimized-routines/blob/98e4d6a5c13c8e54/string/aarch64/strlen.S
  */
 
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 
-/*
- * calculate the length of a string
+/* Assumptions:
  *
- * Parameters:
- *     x0 - const string pointer
- * Returns:
- *     x0 - the return length of specific string
+ * ARMv8-a, AArch64, unaligned accesses, min page size 4k.
  */
 
+#define L(label) .L ## label
+
 /* Arguments and results.  */
-srcin          .req    x0
-len            .req    x0
+#define srcin          x0
+#define len            x0
 
 /* Locals and temporaries.  */
-src            .req    x1
-data1          .req    x2
-data2          .req    x3
-data2a         .req    x4
-has_nul1       .req    x5
-has_nul2       .req    x6
-tmp1           .req    x7
-tmp2           .req    x8
-tmp3           .req    x9
-tmp4           .req    x10
-zeroones       .req    x11
-pos            .req    x12
+#define src            x1
+#define data1          x2
+#define data2          x3
+#define has_nul1       x4
+#define has_nul2       x5
+#define tmp1           x4
+#define tmp2           x5
+#define tmp3           x6
+#define tmp4           x7
+#define zeroones       x8
+
+       /* NUL detection works on the principle that (X - 1) & (~X) & 0x80
+          (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
+          can be done in parallel across the entire word. A faster check
+          (X - 1) & 0x80 is zero for non-NUL ASCII characters, but gives
+          false hits for characters 129..255.  */
 
 #define REP8_01 0x0101010101010101
 #define REP8_7f 0x7f7f7f7f7f7f7f7f
 #define REP8_80 0x8080808080808080
 
+#define MIN_PAGE_SIZE 4096
+
+       /* Since strings are short on average, we check the first 16 bytes
+          of the string for a NUL character.  In order to do an unaligned ldp
+          safely we have to do a page cross check first.  If there is a NUL
+          byte we calculate the length from the 2 8-byte words using
+          conditional select to reduce branch mispredictions (it is unlikely
+          strlen will be repeatedly called on strings with the same length).
+
+          If the string is longer than 16 bytes, we align src so don't need
+          further page cross checks, and process 32 bytes per iteration
+          using the fast NUL check.  If we encounter non-ASCII characters,
+          fallback to a second loop using the full NUL check.
+
+          If the page cross check fails, we read 16 bytes from an aligned
+          address, remove any characters before the string, and continue
+          in the main loop using aligned loads.  Since strings crossing a
+          page in the first 16 bytes are rare (probability of
+          16/MIN_PAGE_SIZE ~= 0.4%), this case does not need to be optimized.
+
+          AArch64 systems have a minimum page size of 4k.  We don't bother
+          checking for larger page sizes - the cost of setting up the correct
+          page size is just not worth the extra gain from a small reduction in
+          the cases taking the slow path.  Note that we only care about
+          whether the first fetch, which may be misaligned, crosses a page
+          boundary.  */
+
 SYM_FUNC_START_WEAK_PI(strlen)
-       mov     zeroones, #REP8_01
-       bic     src, srcin, #15
-       ands    tmp1, srcin, #15
-       b.ne    .Lmisaligned
-       /*
-       * NUL detection works on the principle that (X - 1) & (~X) & 0x80
-       * (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
-       * can be done in parallel across the entire word.
-       */
-       /*
-       * The inner loop deals with two Dwords at a time. This has a
-       * slightly higher start-up cost, but we should win quite quickly,
-       * especially on cores with a high number of issue slots per
-       * cycle, as we get much better parallelism out of the operations.
-       */
-.Lloop:
-       ldp     data1, data2, [src], #16
-.Lrealigned:
+       and     tmp1, srcin, MIN_PAGE_SIZE - 1
+       mov     zeroones, REP8_01
+       cmp     tmp1, MIN_PAGE_SIZE - 16
+       b.gt    L(page_cross)
+       ldp     data1, data2, [srcin]
+#ifdef __AARCH64EB__
+       /* For big-endian, carry propagation (if the final byte in the
+          string is 0x01) means we cannot use has_nul1/2 directly.
+          Since we expect strings to be small and early-exit,
+          byte-swap the data now so has_null1/2 will be correct.  */
+       rev     data1, data1
+       rev     data2, data2
+#endif
        sub     tmp1, data1, zeroones
-       orr     tmp2, data1, #REP8_7f
+       orr     tmp2, data1, REP8_7f
        sub     tmp3, data2, zeroones
-       orr     tmp4, data2, #REP8_7f
-       bic     has_nul1, tmp1, tmp2
-       bics    has_nul2, tmp3, tmp4
-       ccmp    has_nul1, #0, #0, eq    /* NZCV = 0000  */
-       b.eq    .Lloop
+       orr     tmp4, data2, REP8_7f
+       bics    has_nul1, tmp1, tmp2
+       bic     has_nul2, tmp3, tmp4
+       ccmp    has_nul2, 0, 0, eq
+       beq     L(main_loop_entry)
+
+       /* Enter with C = has_nul1 == 0.  */
+       csel    has_nul1, has_nul1, has_nul2, cc
+       mov     len, 8
+       rev     has_nul1, has_nul1
+       clz     tmp1, has_nul1
+       csel    len, xzr, len, cc
+       add     len, len, tmp1, lsr 3
+       ret
 
+       /* The inner loop processes 32 bytes per iteration and uses the fast
+          NUL check.  If we encounter non-ASCII characters, use a second
+          loop with the accurate NUL check.  */
+       .p2align 4
+L(main_loop_entry):
+       bic     src, srcin, 15
+       sub     src, src, 16
+L(main_loop):
+       ldp     data1, data2, [src, 32]!
+L(page_cross_entry):
+       sub     tmp1, data1, zeroones
+       sub     tmp3, data2, zeroones
+       orr     tmp2, tmp1, tmp3
+       tst     tmp2, zeroones, lsl 7
+       bne     1f
+       ldp     data1, data2, [src, 16]
+       sub     tmp1, data1, zeroones
+       sub     tmp3, data2, zeroones
+       orr     tmp2, tmp1, tmp3
+       tst     tmp2, zeroones, lsl 7
+       beq     L(main_loop)
+       add     src, src, 16
+1:
+       /* The fast check failed, so do the slower, accurate NUL check.  */
+       orr     tmp2, data1, REP8_7f
+       orr     tmp4, data2, REP8_7f
+       bics    has_nul1, tmp1, tmp2
+       bic     has_nul2, tmp3, tmp4
+       ccmp    has_nul2, 0, 0, eq
+       beq     L(nonascii_loop)
+
+       /* Enter with C = has_nul1 == 0.  */
+L(tail):
+#ifdef __AARCH64EB__
+       /* For big-endian, carry propagation (if the final byte in the
+          string is 0x01) means we cannot use has_nul1/2 directly.  The
+          easiest way to get the correct byte is to byte-swap the data
+          and calculate the syndrome a second time.  */
+       csel    data1, data1, data2, cc
+       rev     data1, data1
+       sub     tmp1, data1, zeroones
+       orr     tmp2, data1, REP8_7f
+       bic     has_nul1, tmp1, tmp2
+#else
+       csel    has_nul1, has_nul1, has_nul2, cc
+#endif
        sub     len, src, srcin
-       cbz     has_nul1, .Lnul_in_data2
-CPU_BE(        mov     data2, data1 )  /*prepare data to re-calculate the syndrome*/
-       sub     len, len, #8
-       mov     has_nul2, has_nul1
-.Lnul_in_data2:
-       /*
-       * For big-endian, carry propagation (if the final byte in the
-       * string is 0x01) means we cannot use has_nul directly.  The
-       * easiest way to get the correct byte is to byte-swap the data
-       * and calculate the syndrome a second time.
-       */
-CPU_BE( rev    data2, data2 )
-CPU_BE( sub    tmp1, data2, zeroones )
-CPU_BE( orr    tmp2, data2, #REP8_7f )
-CPU_BE( bic    has_nul2, tmp1, tmp2 )
-
-       sub     len, len, #8
-       rev     has_nul2, has_nul2
-       clz     pos, has_nul2
-       add     len, len, pos, lsr #3           /* Bits to bytes.  */
+       rev     has_nul1, has_nul1
+       add     tmp2, len, 8
+       clz     tmp1, has_nul1
+       csel    len, len, tmp2, cc
+       add     len, len, tmp1, lsr 3
        ret
 
-.Lmisaligned:
-       cmp     tmp1, #8
-       neg     tmp1, tmp1
-       ldp     data1, data2, [src], #16
-       lsl     tmp1, tmp1, #3          /* Bytes beyond alignment -> bits.  */
-       mov     tmp2, #~0
-       /* Big-endian.  Early bytes are at MSB.  */
-CPU_BE( lsl    tmp2, tmp2, tmp1 )      /* Shift (tmp1 & 63).  */
+L(nonascii_loop):
+       ldp     data1, data2, [src, 16]!
+       sub     tmp1, data1, zeroones
+       orr     tmp2, data1, REP8_7f
+       sub     tmp3, data2, zeroones
+       orr     tmp4, data2, REP8_7f
+       bics    has_nul1, tmp1, tmp2
+       bic     has_nul2, tmp3, tmp4
+       ccmp    has_nul2, 0, 0, eq
+       bne     L(tail)
+       ldp     data1, data2, [src, 16]!
+       sub     tmp1, data1, zeroones
+       orr     tmp2, data1, REP8_7f
+       sub     tmp3, data2, zeroones
+       orr     tmp4, data2, REP8_7f
+       bics    has_nul1, tmp1, tmp2
+       bic     has_nul2, tmp3, tmp4
+       ccmp    has_nul2, 0, 0, eq
+       beq     L(nonascii_loop)
+       b       L(tail)
+
+       /* Load 16 bytes from [srcin & ~15] and force the bytes that precede
+          srcin to 0x7f, so we ignore any NUL bytes before the string.
+          Then continue in the aligned loop.  */
+L(page_cross):
+       bic     src, srcin, 15
+       ldp     data1, data2, [src]
+       lsl     tmp1, srcin, 3
+       mov     tmp4, -1
+#ifdef __AARCH64EB__
+       /* Big-endian.  Early bytes are at MSB.  */
+       lsr     tmp1, tmp4, tmp1        /* Shift (tmp1 & 63).  */
+#else
        /* Little-endian.  Early bytes are at LSB.  */
-CPU_LE( lsr    tmp2, tmp2, tmp1 )      /* Shift (tmp1 & 63).  */
+       lsl     tmp1, tmp4, tmp1        /* Shift (tmp1 & 63).  */
+#endif
+       orr     tmp1, tmp1, REP8_80
+       orn     data1, data1, tmp1
+       orn     tmp2, data2, tmp1
+       tst     srcin, 8
+       csel    data1, data1, tmp4, eq
+       csel    data2, data2, tmp2, eq
+       b       L(page_cross_entry)
 
-       orr     data1, data1, tmp2
-       orr     data2a, data2, tmp2
-       csinv   data1, data1, xzr, le
-       csel    data2, data2, data2a, le
-       b       .Lrealigned
 SYM_FUNC_END_PI(strlen)
 EXPORT_SYMBOL_NOKASAN(strlen)
index 2a7ee94..48d44f7 100644 (file)
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (C) 2013 ARM Ltd.
- * Copyright (C) 2013 Linaro.
+ * Copyright (c) 2013-2021, Arm Limited.
  *
- * This code is based on glibc cortex strings work originally authored by Linaro
- * be found @
- *
- * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
- * files/head:/src/aarch64/
+ * Adapted from the original at:
+ * https://github.com/ARM-software/optimized-routines/blob/e823e3abf5f89ecb/string/aarch64/strncmp.S
  */
 
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 
-/*
- * compare two strings
+/* Assumptions:
  *
- * Parameters:
- *  x0 - const string 1 pointer
- *  x1 - const string 2 pointer
- *  x2 - the maximal length to be compared
- * Returns:
- *  x0 - an integer less than, equal to, or greater than zero if s1 is found,
- *     respectively, to be less than, to match, or be greater than s2.
+ * ARMv8-a, AArch64
  */
 
+#define L(label) .L ## label
+
 #define REP8_01 0x0101010101010101
 #define REP8_7f 0x7f7f7f7f7f7f7f7f
 #define REP8_80 0x8080808080808080
 
 /* Parameters and result.  */
-src1           .req    x0
-src2           .req    x1
-limit          .req    x2
-result         .req    x0
+#define src1           x0
+#define src2           x1
+#define limit          x2
+#define result         x0
 
 /* Internal variables.  */
-data1          .req    x3
-data1w         .req    w3
-data2          .req    x4
-data2w         .req    w4
-has_nul                .req    x5
-diff           .req    x6
-syndrome       .req    x7
-tmp1           .req    x8
-tmp2           .req    x9
-tmp3           .req    x10
-zeroones       .req    x11
-pos            .req    x12
-limit_wd       .req    x13
-mask           .req    x14
-endloop                .req    x15
+#define data1          x3
+#define data1w         w3
+#define data2          x4
+#define data2w         w4
+#define has_nul                x5
+#define diff           x6
+#define syndrome       x7
+#define tmp1           x8
+#define tmp2           x9
+#define tmp3           x10
+#define zeroones       x11
+#define pos            x12
+#define limit_wd       x13
+#define mask           x14
+#define endloop                x15
+#define count          mask
 
 SYM_FUNC_START_WEAK_PI(strncmp)
-       cbz     limit, .Lret0
+       cbz     limit, L(ret0)
        eor     tmp1, src1, src2
        mov     zeroones, #REP8_01
        tst     tmp1, #7
-       b.ne    .Lmisaligned8
-       ands    tmp1, src1, #7
-       b.ne    .Lmutual_align
+       and     count, src1, #7
+       b.ne    L(misaligned8)
+       cbnz    count, L(mutual_align)
        /* Calculate the number of full and partial words -1.  */
-       /*
-       * when limit is mulitply of 8, if not sub 1,
-       * the judgement of last dword will wrong.
-       */
-       sub     limit_wd, limit, #1 /* limit != 0, so no underflow.  */
-       lsr     limit_wd, limit_wd, #3  /* Convert to Dwords.  */
+       sub     limit_wd, limit, #1     /* limit != 0, so no underflow.  */
+       lsr     limit_wd, limit_wd, #3  /* Convert to Dwords.  */
 
-       /*
-       * NUL detection works on the principle that (X - 1) & (~X) & 0x80
-       * (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
-       * can be done in parallel across the entire word.
-       */
-.Lloop_aligned:
+       /* NUL detection works on the principle that (X - 1) & (~X) & 0x80
+          (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
+          can be done in parallel across the entire word.  */
+       .p2align 4
+L(loop_aligned):
        ldr     data1, [src1], #8
        ldr     data2, [src2], #8
-.Lstart_realigned:
+L(start_realigned):
        subs    limit_wd, limit_wd, #1
        sub     tmp1, data1, zeroones
        orr     tmp2, data1, #REP8_7f
-       eor     diff, data1, data2  /* Non-zero if differences found.  */
-       csinv   endloop, diff, xzr, pl  /* Last Dword or differences.*/
-       bics    has_nul, tmp1, tmp2 /* Non-zero if NUL terminator.  */
+       eor     diff, data1, data2      /* Non-zero if differences found.  */
+       csinv   endloop, diff, xzr, pl  /* Last Dword or differences.  */
+       bics    has_nul, tmp1, tmp2     /* Non-zero if NUL terminator.  */
        ccmp    endloop, #0, #0, eq
-       b.eq    .Lloop_aligned
+       b.eq    L(loop_aligned)
+       /* End of main loop */
 
-       /*Not reached the limit, must have found the end or a diff.  */
-       tbz     limit_wd, #63, .Lnot_limit
+       /* Not reached the limit, must have found the end or a diff.  */
+       tbz     limit_wd, #63, L(not_limit)
 
        /* Limit % 8 == 0 => all bytes significant.  */
        ands    limit, limit, #7
-       b.eq    .Lnot_limit
+       b.eq    L(not_limit)
 
-       lsl     limit, limit, #3    /* Bits -> bytes.  */
+       lsl     limit, limit, #3        /* Bits -> bytes.  */
        mov     mask, #~0
-CPU_BE( lsr    mask, mask, limit )
-CPU_LE( lsl    mask, mask, limit )
+#ifdef __AARCH64EB__
+       lsr     mask, mask, limit
+#else
+       lsl     mask, mask, limit
+#endif
        bic     data1, data1, mask
        bic     data2, data2, mask
 
        /* Make sure that the NUL byte is marked in the syndrome.  */
        orr     has_nul, has_nul, mask
 
-.Lnot_limit:
+L(not_limit):
        orr     syndrome, diff, has_nul
-       b       .Lcal_cmpresult
 
-.Lmutual_align:
-       /*
-       * Sources are mutually aligned, but are not currently at an
-       * alignment boundary.  Round down the addresses and then mask off
-       * the bytes that precede the start point.
-       * We also need to adjust the limit calculations, but without
-       * overflowing if the limit is near ULONG_MAX.
-       */
+#ifndef        __AARCH64EB__
+       rev     syndrome, syndrome
+       rev     data1, data1
+       /* The MS-non-zero bit of the syndrome marks either the first bit
+          that is different, or the top bit of the first zero byte.
+          Shifting left now will bring the critical information into the
+          top bits.  */
+       clz     pos, syndrome
+       rev     data2, data2
+       lsl     data1, data1, pos
+       lsl     data2, data2, pos
+       /* But we need to zero-extend (char is unsigned) the value and then
+          perform a signed 32-bit subtraction.  */
+       lsr     data1, data1, #56
+       sub     result, data1, data2, lsr #56
+       ret
+#else
+       /* For big-endian we cannot use the trick with the syndrome value
+          as carry-propagation can corrupt the upper bits if the trailing
+          bytes in the string contain 0x01.  */
+       /* However, if there is no NUL byte in the dword, we can generate
+          the result directly.  We can't just subtract the bytes as the
+          MSB might be significant.  */
+       cbnz    has_nul, 1f
+       cmp     data1, data2
+       cset    result, ne
+       cneg    result, result, lo
+       ret
+1:
+       /* Re-compute the NUL-byte detection, using a byte-reversed value.  */
+       rev     tmp3, data1
+       sub     tmp1, tmp3, zeroones
+       orr     tmp2, tmp3, #REP8_7f
+       bic     has_nul, tmp1, tmp2
+       rev     has_nul, has_nul
+       orr     syndrome, diff, has_nul
+       clz     pos, syndrome
+       /* The MS-non-zero bit of the syndrome marks either the first bit
+          that is different, or the top bit of the first zero byte.
+          Shifting left now will bring the critical information into the
+          top bits.  */
+       lsl     data1, data1, pos
+       lsl     data2, data2, pos
+       /* But we need to zero-extend (char is unsigned) the value and then
+          perform a signed 32-bit subtraction.  */
+       lsr     data1, data1, #56
+       sub     result, data1, data2, lsr #56
+       ret
+#endif
+
+L(mutual_align):
+       /* Sources are mutually aligned, but are not currently at an
+          alignment boundary.  Round down the addresses and then mask off
+          the bytes that precede the start point.
+          We also need to adjust the limit calculations, but without
+          overflowing if the limit is near ULONG_MAX.  */
        bic     src1, src1, #7
        bic     src2, src2, #7
        ldr     data1, [src1], #8
-       neg     tmp3, tmp1, lsl #3  /* 64 - bits(bytes beyond align). */
+       neg     tmp3, count, lsl #3     /* 64 - bits(bytes beyond align). */
        ldr     data2, [src2], #8
        mov     tmp2, #~0
-       sub     limit_wd, limit, #1 /* limit != 0, so no underflow.  */
+       sub     limit_wd, limit, #1     /* limit != 0, so no underflow.  */
+#ifdef __AARCH64EB__
        /* Big-endian.  Early bytes are at MSB.  */
-CPU_BE( lsl    tmp2, tmp2, tmp3 )      /* Shift (tmp1 & 63).  */
+       lsl     tmp2, tmp2, tmp3        /* Shift (count & 63).  */
+#else
        /* Little-endian.  Early bytes are at LSB.  */
-CPU_LE( lsr    tmp2, tmp2, tmp3 )      /* Shift (tmp1 & 63).  */
-
+       lsr     tmp2, tmp2, tmp3        /* Shift (count & 63).  */
+#endif
        and     tmp3, limit_wd, #7
        lsr     limit_wd, limit_wd, #3
-       /* Adjust the limit. Only low 3 bits used, so overflow irrelevant.*/
-       add     limit, limit, tmp1
-       add     tmp3, tmp3, tmp1
+       /* Adjust the limit. Only low 3 bits used, so overflow irrelevant.  */
+       add     limit, limit, count
+       add     tmp3, tmp3, count
        orr     data1, data1, tmp2
        orr     data2, data2, tmp2
        add     limit_wd, limit_wd, tmp3, lsr #3
-       b       .Lstart_realigned
+       b       L(start_realigned)
+
+       .p2align 4
+       /* Don't bother with dwords for up to 16 bytes.  */
+L(misaligned8):
+       cmp     limit, #16
+       b.hs    L(try_misaligned_words)
 
-/*when src1 offset is not equal to src2 offset...*/
-.Lmisaligned8:
-       cmp     limit, #8
-       b.lo    .Ltiny8proc /*limit < 8... */
-       /*
-       * Get the align offset length to compare per byte first.
-       * After this process, one string's address will be aligned.*/
-       and     tmp1, src1, #7
-       neg     tmp1, tmp1
-       add     tmp1, tmp1, #8
-       and     tmp2, src2, #7
-       neg     tmp2, tmp2
-       add     tmp2, tmp2, #8
-       subs    tmp3, tmp1, tmp2
-       csel    pos, tmp1, tmp2, hi /*Choose the maximum. */
-       /*
-       * Here, limit is not less than 8, so directly run .Ltinycmp
-       * without checking the limit.*/
-       sub     limit, limit, pos
-.Ltinycmp:
+L(byte_loop):
+       /* Perhaps we can do better than this.  */
        ldrb    data1w, [src1], #1
        ldrb    data2w, [src2], #1
-       subs    pos, pos, #1
-       ccmp    data1w, #1, #0, ne  /* NZCV = 0b0000.  */
-       ccmp    data1w, data2w, #0, cs  /* NZCV = 0b0000.  */
-       b.eq    .Ltinycmp
-       cbnz    pos, 1f /*find the null or unequal...*/
-       cmp     data1w, #1
-       ccmp    data1w, data2w, #0, cs
-       b.eq    .Lstart_align /*the last bytes are equal....*/
-1:
+       subs    limit, limit, #1
+       ccmp    data1w, #1, #0, hi      /* NZCV = 0b0000.  */
+       ccmp    data1w, data2w, #0, cs  /* NZCV = 0b0000.  */
+       b.eq    L(byte_loop)
+L(done):
        sub     result, data1, data2
        ret
-
-.Lstart_align:
+       /* Align the SRC1 to a dword by doing a bytewise compare and then do
+          the dword loop.  */
+L(try_misaligned_words):
        lsr     limit_wd, limit, #3
-       cbz     limit_wd, .Lremain8
-       /*process more leading bytes to make str1 aligned...*/
-       ands    xzr, src1, #7
-       b.eq    .Lrecal_offset
-       add     src1, src1, tmp3        /*tmp3 is positive in this branch.*/
-       add     src2, src2, tmp3
-       ldr     data1, [src1], #8
-       ldr     data2, [src2], #8
+       cbz     count, L(do_misaligned)
 
-       sub     limit, limit, tmp3
+       neg     count, count
+       and     count, count, #7
+       sub     limit, limit, count
        lsr     limit_wd, limit, #3
-       subs    limit_wd, limit_wd, #1
 
-       sub     tmp1, data1, zeroones
-       orr     tmp2, data1, #REP8_7f
-       eor     diff, data1, data2  /* Non-zero if differences found.  */
-       csinv   endloop, diff, xzr, ne/*if limit_wd is 0,will finish the cmp*/
-       bics    has_nul, tmp1, tmp2
-       ccmp    endloop, #0, #0, eq /*has_null is ZERO: no null byte*/
-       b.ne    .Lunequal_proc
-       /*How far is the current str2 from the alignment boundary...*/
-       and     tmp3, tmp3, #7
-.Lrecal_offset:
-       neg     pos, tmp3
-.Lloopcmp_proc:
-       /*
-       * Divide the eight bytes into two parts. First,backwards the src2
-       * to an alignment boundary,load eight bytes from the SRC2 alignment
-       * boundary,then compare with the relative bytes from SRC1.
-       * If all 8 bytes are equal,then start the second part's comparison.
-       * Otherwise finish the comparison.
-       * This special handle can garantee all the accesses are in the
-       * thread/task space in avoid to overrange access.
-       */
-       ldr     data1, [src1,pos]
-       ldr     data2, [src2,pos]
-       sub     tmp1, data1, zeroones
-       orr     tmp2, data1, #REP8_7f
-       bics    has_nul, tmp1, tmp2 /* Non-zero if NUL terminator.  */
-       eor     diff, data1, data2  /* Non-zero if differences found.  */
-       csinv   endloop, diff, xzr, eq
-       cbnz    endloop, .Lunequal_proc
+L(page_end_loop):
+       ldrb    data1w, [src1], #1
+       ldrb    data2w, [src2], #1
+       cmp     data1w, #1
+       ccmp    data1w, data2w, #0, cs  /* NZCV = 0b0000.  */
+       b.ne    L(done)
+       subs    count, count, #1
+       b.hi    L(page_end_loop)
+
+L(do_misaligned):
+       /* Prepare ourselves for the next page crossing.  Unlike the aligned
+          loop, we fetch 1 less dword because we risk crossing bounds on
+          SRC2.  */
+       mov     count, #8
+       subs    limit_wd, limit_wd, #1
+       b.lo    L(done_loop)
+L(loop_misaligned):
+       and     tmp2, src2, #0xff8
+       eor     tmp2, tmp2, #0xff8
+       cbz     tmp2, L(page_end_loop)
 
-       /*The second part process*/
        ldr     data1, [src1], #8
        ldr     data2, [src2], #8
-       subs    limit_wd, limit_wd, #1
        sub     tmp1, data1, zeroones
        orr     tmp2, data1, #REP8_7f
-       eor     diff, data1, data2  /* Non-zero if differences found.  */
-       csinv   endloop, diff, xzr, ne/*if limit_wd is 0,will finish the cmp*/
-       bics    has_nul, tmp1, tmp2
-       ccmp    endloop, #0, #0, eq /*has_null is ZERO: no null byte*/
-       b.eq    .Lloopcmp_proc
-
-.Lunequal_proc:
-       orr     syndrome, diff, has_nul
-       cbz     syndrome, .Lremain8
-.Lcal_cmpresult:
-       /*
-       * reversed the byte-order as big-endian,then CLZ can find the most
-       * significant zero bits.
-       */
-CPU_LE( rev    syndrome, syndrome )
-CPU_LE( rev    data1, data1 )
-CPU_LE( rev    data2, data2 )
-       /*
-       * For big-endian we cannot use the trick with the syndrome value
-       * as carry-propagation can corrupt the upper bits if the trailing
-       * bytes in the string contain 0x01.
-       * However, if there is no NUL byte in the dword, we can generate
-       * the result directly.  We can't just subtract the bytes as the
-       * MSB might be significant.
-       */
-CPU_BE( cbnz   has_nul, 1f )
-CPU_BE( cmp    data1, data2 )
-CPU_BE( cset   result, ne )
-CPU_BE( cneg   result, result, lo )
-CPU_BE( ret )
-CPU_BE( 1: )
-       /* Re-compute the NUL-byte detection, using a byte-reversed value.*/
-CPU_BE( rev    tmp3, data1 )
-CPU_BE( sub    tmp1, tmp3, zeroones )
-CPU_BE( orr    tmp2, tmp3, #REP8_7f )
-CPU_BE( bic    has_nul, tmp1, tmp2 )
-CPU_BE( rev    has_nul, has_nul )
-CPU_BE( orr    syndrome, diff, has_nul )
-       /*
-       * The MS-non-zero bit of the syndrome marks either the first bit
-       * that is different, or the top bit of the first zero byte.
-       * Shifting left now will bring the critical information into the
-       * top bits.
-       */
-       clz     pos, syndrome
-       lsl     data1, data1, pos
-       lsl     data2, data2, pos
-       /*
-       * But we need to zero-extend (char is unsigned) the value and then
-       * perform a signed 32-bit subtraction.
-       */
-       lsr     data1, data1, #56
-       sub     result, data1, data2, lsr #56
-       ret
-
-.Lremain8:
-       /* Limit % 8 == 0 => all bytes significant.  */
-       ands    limit, limit, #7
-       b.eq    .Lret0
-.Ltiny8proc:
-       ldrb    data1w, [src1], #1
-       ldrb    data2w, [src2], #1
-       subs    limit, limit, #1
+       eor     diff, data1, data2      /* Non-zero if differences found.  */
+       bics    has_nul, tmp1, tmp2     /* Non-zero if NUL terminator.  */
+       ccmp    diff, #0, #0, eq
+       b.ne    L(not_limit)
+       subs    limit_wd, limit_wd, #1
+       b.pl    L(loop_misaligned)
 
-       ccmp    data1w, #1, #0, ne  /* NZCV = 0b0000.  */
-       ccmp    data1w, data2w, #0, cs  /* NZCV = 0b0000.  */
-       b.eq    .Ltiny8proc
-       sub     result, data1, data2
-       ret
+L(done_loop):
+       /* We found a difference or a NULL before the limit was reached.  */
+       and     limit, limit, #7
+       cbz     limit, L(not_limit)
+       /* Read the last word.  */
+       sub     src1, src1, 8
+       sub     src2, src2, 8
+       ldr     data1, [src1, limit]
+       ldr     data2, [src2, limit]
+       sub     tmp1, data1, zeroones
+       orr     tmp2, data1, #REP8_7f
+       eor     diff, data1, data2      /* Non-zero if differences found.  */
+       bics    has_nul, tmp1, tmp2     /* Non-zero if NUL terminator.  */
+       ccmp    diff, #0, #0, eq
+       b.ne    L(not_limit)
 
-.Lret0:
+L(ret0):
        mov     result, #0
        ret
+
 SYM_FUNC_END_PI(strncmp)
 EXPORT_SYMBOL_NOKASAN(strncmp)
index c83bb5a..baee229 100644 (file)
@@ -15,7 +15,7 @@ void memcpy_flushcache(void *dst, const void *src, size_t cnt)
         * barrier to order the cache maintenance against the memcpy.
         */
        memcpy(dst, src, cnt);
-       __clean_dcache_area_pop(dst, cnt);
+       dcache_clean_pop((unsigned long)dst, (unsigned long)dst + cnt);
 }
 EXPORT_SYMBOL_GPL(memcpy_flushcache);
 
@@ -33,6 +33,6 @@ unsigned long __copy_user_flushcache(void *to, const void __user *from,
        rc = raw_copy_from_user(to, from, n);
 
        /* See above */
-       __clean_dcache_area_pop(to, n - rc);
+       dcache_clean_pop((unsigned long)to, (unsigned long)to + n - rc);
        return rc;
 }
index 2d881f3..5051b3c 100644 (file)
@@ -15,7 +15,7 @@
 #include <asm/asm-uaccess.h>
 
 /*
- *     flush_icache_range(start,end)
+ *     caches_clean_inval_pou_macro(start,end) [fixup]
  *
  *     Ensure that the I and D caches are coherent within specified region.
  *     This is typically used when code has been written to a memory region,
  *
  *     - start   - virtual start address of region
  *     - end     - virtual end address of region
+ *     - fixup   - optional label to branch to on user fault
  */
-SYM_FUNC_START(__flush_icache_range)
-       /* FALLTHROUGH */
+.macro caches_clean_inval_pou_macro, fixup
+alternative_if ARM64_HAS_CACHE_IDC
+       dsb     ishst
+       b       .Ldc_skip_\@
+alternative_else_nop_endif
+       mov     x2, x0
+       mov     x3, x1
+       dcache_by_line_op cvau, ish, x2, x3, x4, x5, \fixup
+.Ldc_skip_\@:
+alternative_if ARM64_HAS_CACHE_DIC
+       isb
+       b       .Lic_skip_\@
+alternative_else_nop_endif
+       invalidate_icache_by_line x0, x1, x2, x3, \fixup
+.Lic_skip_\@:
+.endm
 
 /*
- *     __flush_cache_user_range(start,end)
+ *     caches_clean_inval_pou(start,end)
  *
  *     Ensure that the I and D caches are coherent within specified region.
  *     This is typically used when code has been written to a memory region,
@@ -37,117 +52,103 @@ SYM_FUNC_START(__flush_icache_range)
  *     - start   - virtual start address of region
  *     - end     - virtual end address of region
  */
-SYM_FUNC_START(__flush_cache_user_range)
+SYM_FUNC_START(caches_clean_inval_pou)
+       caches_clean_inval_pou_macro
+       ret
+SYM_FUNC_END(caches_clean_inval_pou)
+
+/*
+ *     caches_clean_inval_user_pou(start,end)
+ *
+ *     Ensure that the I and D caches are coherent within specified region.
+ *     This is typically used when code has been written to a memory region,
+ *     and will be executed.
+ *
+ *     - start   - virtual start address of region
+ *     - end     - virtual end address of region
+ */
+SYM_FUNC_START(caches_clean_inval_user_pou)
        uaccess_ttbr0_enable x2, x3, x4
-alternative_if ARM64_HAS_CACHE_IDC
-       dsb     ishst
-       b       7f
-alternative_else_nop_endif
-       dcache_line_size x2, x3
-       sub     x3, x2, #1
-       bic     x4, x0, x3
-1:
-user_alt 9f, "dc cvau, x4",  "dc civac, x4",  ARM64_WORKAROUND_CLEAN_CACHE
-       add     x4, x4, x2
-       cmp     x4, x1
-       b.lo    1b
-       dsb     ish
 
-7:
-alternative_if ARM64_HAS_CACHE_DIC
-       isb
-       b       8f
-alternative_else_nop_endif
-       invalidate_icache_by_line x0, x1, x2, x3, 9f
-8:     mov     x0, #0
+       caches_clean_inval_pou_macro 2f
+       mov     x0, xzr
 1:
        uaccess_ttbr0_disable x1, x2
        ret
-9:
+2:
        mov     x0, #-EFAULT
        b       1b
-SYM_FUNC_END(__flush_icache_range)
-SYM_FUNC_END(__flush_cache_user_range)
+SYM_FUNC_END(caches_clean_inval_user_pou)
 
 /*
- *     invalidate_icache_range(start,end)
+ *     icache_inval_pou(start,end)
  *
  *     Ensure that the I cache is invalid within specified region.
  *
  *     - start   - virtual start address of region
  *     - end     - virtual end address of region
  */
-SYM_FUNC_START(invalidate_icache_range)
+SYM_FUNC_START(icache_inval_pou)
 alternative_if ARM64_HAS_CACHE_DIC
-       mov     x0, xzr
        isb
        ret
 alternative_else_nop_endif
 
-       uaccess_ttbr0_enable x2, x3, x4
-
-       invalidate_icache_by_line x0, x1, x2, x3, 2f
-       mov     x0, xzr
-1:
-       uaccess_ttbr0_disable x1, x2
+       invalidate_icache_by_line x0, x1, x2, x3
        ret
-2:
-       mov     x0, #-EFAULT
-       b       1b
-SYM_FUNC_END(invalidate_icache_range)
+SYM_FUNC_END(icache_inval_pou)
 
 /*
- *     __flush_dcache_area(kaddr, size)
+ *     dcache_clean_inval_poc(start, end)
  *
- *     Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ *     Ensure that any D-cache lines for the interval [start, end)
  *     are cleaned and invalidated to the PoC.
  *
- *     - kaddr   - kernel address
- *     - size    - size in question
+ *     - start   - virtual start address of region
+ *     - end     - virtual end address of region
  */
-SYM_FUNC_START_PI(__flush_dcache_area)
+SYM_FUNC_START_PI(dcache_clean_inval_poc)
        dcache_by_line_op civac, sy, x0, x1, x2, x3
        ret
-SYM_FUNC_END_PI(__flush_dcache_area)
+SYM_FUNC_END_PI(dcache_clean_inval_poc)
 
 /*
- *     __clean_dcache_area_pou(kaddr, size)
+ *     dcache_clean_pou(start, end)
  *
- *     Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ *     Ensure that any D-cache lines for the interval [start, end)
  *     are cleaned to the PoU.
  *
- *     - kaddr   - kernel address
- *     - size    - size in question
+ *     - start   - virtual start address of region
+ *     - end     - virtual end address of region
  */
-SYM_FUNC_START(__clean_dcache_area_pou)
+SYM_FUNC_START(dcache_clean_pou)
 alternative_if ARM64_HAS_CACHE_IDC
        dsb     ishst
        ret
 alternative_else_nop_endif
        dcache_by_line_op cvau, ish, x0, x1, x2, x3
        ret
-SYM_FUNC_END(__clean_dcache_area_pou)
+SYM_FUNC_END(dcache_clean_pou)
 
 /*
- *     __inval_dcache_area(kaddr, size)
+ *     dcache_inval_poc(start, end)
  *
- *     Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ *     Ensure that any D-cache lines for the interval [start, end)
  *     are invalidated. Any partial lines at the ends of the interval are
  *     also cleaned to PoC to prevent data loss.
  *
- *     - kaddr   - kernel address
- *     - size    - size in question
+ *     - start   - kernel start address of region
+ *     - end     - kernel end address of region
  */
 SYM_FUNC_START_LOCAL(__dma_inv_area)
-SYM_FUNC_START_PI(__inval_dcache_area)
+SYM_FUNC_START_PI(dcache_inval_poc)
        /* FALLTHROUGH */
 
 /*
- *     __dma_inv_area(start, size)
+ *     __dma_inv_area(start, end)
  *     - start   - virtual start address of region
- *     - size    - size in question
+ *     - end     - virtual end address of region
  */
-       add     x1, x1, x0
        dcache_line_size x2, x3
        sub     x3, x2, #1
        tst     x1, x3                          // end cache line aligned?
@@ -165,48 +166,48 @@ SYM_FUNC_START_PI(__inval_dcache_area)
        b.lo    2b
        dsb     sy
        ret
-SYM_FUNC_END_PI(__inval_dcache_area)
+SYM_FUNC_END_PI(dcache_inval_poc)
 SYM_FUNC_END(__dma_inv_area)
 
 /*
- *     __clean_dcache_area_poc(kaddr, size)
+ *     dcache_clean_poc(start, end)
  *
- *     Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ *     Ensure that any D-cache lines for the interval [start, end)
  *     are cleaned to the PoC.
  *
- *     - kaddr   - kernel address
- *     - size    - size in question
+ *     - start   - virtual start address of region
+ *     - end     - virtual end address of region
  */
 SYM_FUNC_START_LOCAL(__dma_clean_area)
-SYM_FUNC_START_PI(__clean_dcache_area_poc)
+SYM_FUNC_START_PI(dcache_clean_poc)
        /* FALLTHROUGH */
 
 /*
- *     __dma_clean_area(start, size)
+ *     __dma_clean_area(start, end)
  *     - start   - virtual start address of region
- *     - size    - size in question
+ *     - end     - virtual end address of region
  */
        dcache_by_line_op cvac, sy, x0, x1, x2, x3
        ret
-SYM_FUNC_END_PI(__clean_dcache_area_poc)
+SYM_FUNC_END_PI(dcache_clean_poc)
 SYM_FUNC_END(__dma_clean_area)
 
 /*
- *     __clean_dcache_area_pop(kaddr, size)
+ *     dcache_clean_pop(start, end)
  *
- *     Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ *     Ensure that any D-cache lines for the interval [start, end)
  *     are cleaned to the PoP.
  *
- *     - kaddr   - kernel address
- *     - size    - size in question
+ *     - start   - virtual start address of region
+ *     - end     - virtual end address of region
  */
-SYM_FUNC_START_PI(__clean_dcache_area_pop)
+SYM_FUNC_START_PI(dcache_clean_pop)
        alternative_if_not ARM64_HAS_DCPOP
-       b       __clean_dcache_area_poc
+       b       dcache_clean_poc
        alternative_else_nop_endif
        dcache_by_line_op cvap, sy, x0, x1, x2, x3
        ret
-SYM_FUNC_END_PI(__clean_dcache_area_pop)
+SYM_FUNC_END_PI(dcache_clean_pop)
 
 /*
  *     __dma_flush_area(start, size)
@@ -217,6 +218,7 @@ SYM_FUNC_END_PI(__clean_dcache_area_pop)
  *     - size    - size in question
  */
 SYM_FUNC_START_PI(__dma_flush_area)
+       add     x1, x0, x1
        dcache_by_line_op civac, sy, x0, x1, x2, x3
        ret
 SYM_FUNC_END_PI(__dma_flush_area)
@@ -228,6 +230,7 @@ SYM_FUNC_END_PI(__dma_flush_area)
  *     - dir   - DMA direction
  */
 SYM_FUNC_START_PI(__dma_map_area)
+       add     x1, x0, x1
        cmp     w2, #DMA_FROM_DEVICE
        b.eq    __dma_inv_area
        b       __dma_clean_area
@@ -240,6 +243,7 @@ SYM_FUNC_END_PI(__dma_map_area)
  *     - dir   - DMA direction
  */
 SYM_FUNC_START_PI(__dma_unmap_area)
+       add     x1, x0, x1
        cmp     w2, #DMA_TO_DEVICE
        b.ne    __dma_inv_area
        ret
index 6d44c02..2aaf950 100644 (file)
 #include <asm/cache.h>
 #include <asm/tlbflush.h>
 
-void sync_icache_aliases(void *kaddr, unsigned long len)
+void sync_icache_aliases(unsigned long start, unsigned long end)
 {
-       unsigned long addr = (unsigned long)kaddr;
-
        if (icache_is_aliasing()) {
-               __clean_dcache_area_pou(kaddr, len);
-               __flush_icache_all();
+               dcache_clean_pou(start, end);
+               icache_inval_all_pou();
        } else {
                /*
                 * Don't issue kick_all_cpus_sync() after I-cache invalidation
                 * for user mappings.
                 */
-               __flush_icache_range(addr, addr + len);
+               caches_clean_inval_pou(start, end);
        }
 }
 
-static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
-                               unsigned long uaddr, void *kaddr,
-                               unsigned long len)
+static void flush_ptrace_access(struct vm_area_struct *vma, unsigned long start,
+                               unsigned long end)
 {
        if (vma->vm_flags & VM_EXEC)
-               sync_icache_aliases(kaddr, len);
+               sync_icache_aliases(start, end);
 }
 
 /*
@@ -48,7 +45,7 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
                       unsigned long len)
 {
        memcpy(dst, src, len);
-       flush_ptrace_access(vma, page, uaddr, dst, len);
+       flush_ptrace_access(vma, (unsigned long)dst, (unsigned long)dst + len);
 }
 
 void __sync_icache_dcache(pte_t pte)
@@ -56,7 +53,9 @@ void __sync_icache_dcache(pte_t pte)
        struct page *page = pte_page(pte);
 
        if (!test_bit(PG_dcache_clean, &page->flags)) {
-               sync_icache_aliases(page_address(page), page_size(page));
+               sync_icache_aliases((unsigned long)page_address(page),
+                                   (unsigned long)page_address(page) +
+                                           page_size(page));
                set_bit(PG_dcache_clean, &page->flags);
        }
 }
@@ -77,20 +76,20 @@ EXPORT_SYMBOL(flush_dcache_page);
 /*
  * Additional functions defined in assembly.
  */
-EXPORT_SYMBOL(__flush_icache_range);
+EXPORT_SYMBOL(caches_clean_inval_pou);
 
 #ifdef CONFIG_ARCH_HAS_PMEM_API
 void arch_wb_cache_pmem(void *addr, size_t size)
 {
        /* Ensure order against any prior non-cacheable writes */
        dmb(osh);
-       __clean_dcache_area_pop(addr, size);
+       dcache_clean_pop((unsigned long)addr, (unsigned long)addr + size);
 }
 EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
 
 void arch_invalidate_pmem(void *addr, size_t size)
 {
-       __inval_dcache_area(addr, size);
+       dcache_inval_poc((unsigned long)addr, (unsigned long)addr + size);
 }
 EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
 #endif
index 97d7bcd..bc555cd 100644 (file)
@@ -83,11 +83,7 @@ SYM_FUNC_START(cpu_do_suspend)
        mrs     x9, mdscr_el1
        mrs     x10, oslsr_el1
        mrs     x11, sctlr_el1
-alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
-       mrs     x12, tpidr_el1
-alternative_else
-       mrs     x12, tpidr_el2
-alternative_endif
+       get_this_cpu_offset x12
        mrs     x13, sp_el0
        stp     x2, x3, [x0]
        stp     x4, x5, [x0, #16]
@@ -145,11 +141,7 @@ SYM_FUNC_START(cpu_do_resume)
        msr     mdscr_el1, x10
 
        msr     sctlr_el1, x12
-alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
-       msr     tpidr_el1, x13
-alternative_else
-       msr     tpidr_el2, x13
-alternative_endif
+       set_this_cpu_offset x13
        msr     sp_el0, x14
        /*
         * Restore oslsr_el1 by writing oslar_el1
index 21fbdda..49305c2 100644 (file)
@@ -3,7 +3,8 @@
 # Internal CPU capabilities constants, keep this list sorted
 
 BTI
-HAS_32BIT_EL0
+# Unreliable: use system_supports_32bit_el0() instead.
+HAS_32BIT_EL0_DO_NOT_USE
 HAS_32BIT_EL1
 HAS_ADDRESS_AUTH
 HAS_ADDRESS_AUTH_ARCH
index 45e8aa3..cb55878 100755 (executable)
@@ -7,7 +7,8 @@ trap "rm -f $tmp_file.o $tmp_file $tmp_file.bin" EXIT
 cat << "END" | $CC -c -x c - -o $tmp_file.o >/dev/null 2>&1
 void *p = &p;
 END
-$LD $tmp_file.o -shared -Bsymbolic --pack-dyn-relocs=relr -o $tmp_file
+$LD $tmp_file.o -shared -Bsymbolic --pack-dyn-relocs=relr \
+  --use-android-relr-tags -o $tmp_file
 
 # Despite printing an error message, GNU nm still exits with exit code 0 if it
 # sees a relr section. So we need to check that nothing is printed to stderr.