Merge branch 'for-next/cpus_have_const_cap' into for-next/core
authorCatalin Marinas <catalin.marinas@arm.com>
Thu, 26 Oct 2023 16:10:18 +0000 (17:10 +0100)
committerCatalin Marinas <catalin.marinas@arm.com>
Thu, 26 Oct 2023 16:10:18 +0000 (17:10 +0100)
* for-next/cpus_have_const_cap: (38 commits)
  : cpus_have_const_cap() removal
  arm64: Remove cpus_have_const_cap()
  arm64: Avoid cpus_have_const_cap() for ARM64_WORKAROUND_REPEAT_TLBI
  arm64: Avoid cpus_have_const_cap() for ARM64_WORKAROUND_NVIDIA_CARMEL_CNP
  arm64: Avoid cpus_have_const_cap() for ARM64_WORKAROUND_CAVIUM_23154
  arm64: Avoid cpus_have_const_cap() for ARM64_WORKAROUND_2645198
  arm64: Avoid cpus_have_const_cap() for ARM64_WORKAROUND_1742098
  arm64: Avoid cpus_have_const_cap() for ARM64_WORKAROUND_1542419
  arm64: Avoid cpus_have_const_cap() for ARM64_WORKAROUND_843419
  arm64: Avoid cpus_have_const_cap() for ARM64_UNMAP_KERNEL_AT_EL0
  arm64: Avoid cpus_have_const_cap() for ARM64_{SVE,SME,SME2,FA64}
  arm64: Avoid cpus_have_const_cap() for ARM64_SPECTRE_V2
  arm64: Avoid cpus_have_const_cap() for ARM64_SSBS
  arm64: Avoid cpus_have_const_cap() for ARM64_MTE
  arm64: Avoid cpus_have_const_cap() for ARM64_HAS_TLB_RANGE
  arm64: Avoid cpus_have_const_cap() for ARM64_HAS_WFXT
  arm64: Avoid cpus_have_const_cap() for ARM64_HAS_RNG
  arm64: Avoid cpus_have_const_cap() for ARM64_HAS_EPAN
  arm64: Avoid cpus_have_const_cap() for ARM64_HAS_PAN
  arm64: Avoid cpus_have_const_cap() for ARM64_HAS_GIC_PRIO_MASKING
  arm64: Avoid cpus_have_const_cap() for ARM64_HAS_DIT
  ...

1  2 
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/fpsimd.h
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/fpsimd.c
arch/arm64/kernel/module-plts.c
arch/arm64/kernel/smp.c
arch/arm64/kvm/guest.c
drivers/clocksource/arm_arch_timer.c
drivers/irqchip/irq-gic-v3.c

Simple merge
@@@ -123,11 -149,13 +149,12 @@@ extern void sme_save_state(void *state
  extern void sme_load_state(void const *state, int zt);
  
  struct arm64_cpu_capabilities;
- extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused);
- extern void sme_kernel_enable(const struct arm64_cpu_capabilities *__unused);
- extern void sme2_kernel_enable(const struct arm64_cpu_capabilities *__unused);
- extern void fa64_kernel_enable(const struct arm64_cpu_capabilities *__unused);
+ extern void cpu_enable_fpsimd(const struct arm64_cpu_capabilities *__unused);
+ extern void cpu_enable_sve(const struct arm64_cpu_capabilities *__unused);
+ extern void cpu_enable_sme(const struct arm64_cpu_capabilities *__unused);
+ extern void cpu_enable_sme2(const struct arm64_cpu_capabilities *__unused);
+ extern void cpu_enable_fa64(const struct arm64_cpu_capabilities *__unused);
  
 -extern u64 read_zcr_features(void);
  extern u64 read_smcr_features(void);
  
  /*
@@@ -1026,21 -1040,30 +1026,26 @@@ void __init init_cpu_features(struct cp
  
        if (IS_ENABLED(CONFIG_ARM64_SVE) &&
            id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1))) {
-               sve_kernel_enable(NULL);
+               unsigned long cpacr = cpacr_save_enable_kernel_sve();
 -              info->reg_zcr = read_zcr_features();
 -              init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr);
                vec_init_vq_map(ARM64_VEC_SVE);
+               cpacr_restore(cpacr);
        }
  
        if (IS_ENABLED(CONFIG_ARM64_SME) &&
            id_aa64pfr1_sme(read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1))) {
-               sme_kernel_enable(NULL);
+               unsigned long cpacr = cpacr_save_enable_kernel_sme();
  
 -              info->reg_smcr = read_smcr_features();
                /*
                 * We mask out SMPS since even if the hardware
                 * supports priorities the kernel does not at present
                 * and we block access to them.
                 */
                info->reg_smidr = read_cpuid(SMIDR_EL1) & ~SMIDR_EL1_SMPS;
 -              init_cpu_ftr_reg(SYS_SMCR_EL1, info->reg_smcr);
                vec_init_vq_map(ARM64_VEC_SME);
+               cpacr_restore(cpacr);
        }
  
        if (id_aa64pfr1_mte(info->reg_id_aa64pfr1))
@@@ -1274,19 -1297,26 +1279,22 @@@ void update_cpu_features(int cpu
        taint |= check_update_ftr_reg(SYS_ID_AA64SMFR0_EL1, cpu,
                                      info->reg_id_aa64smfr0, boot->reg_id_aa64smfr0);
  
 +      /* Probe vector lengths */
        if (IS_ENABLED(CONFIG_ARM64_SVE) &&
            id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1))) {
 -              unsigned long cpacr = cpacr_save_enable_kernel_sve();
 +              if (!system_capabilities_finalized()) {
-                       sve_kernel_enable(NULL);
++                      unsigned long cpacr = cpacr_save_enable_kernel_sve();
 -              info->reg_zcr = read_zcr_features();
 -              taint |= check_update_ftr_reg(SYS_ZCR_EL1, cpu,
 -                                      info->reg_zcr, boot->reg_zcr);
 -
 -              /* Probe vector lengths */
 -              if (!system_capabilities_finalized())
                        vec_update_vq_map(ARM64_VEC_SVE);
 -              cpacr_restore(cpacr);
++                      cpacr_restore(cpacr);
 +              }
        }
  
        if (IS_ENABLED(CONFIG_ARM64_SME) &&
            id_aa64pfr1_sme(read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1))) {
-               sme_kernel_enable(NULL);
+               unsigned long cpacr = cpacr_save_enable_kernel_sme();
  
 -              info->reg_smcr = read_smcr_features();
                /*
                 * We mask out SMPS since even if the hardware
                 * supports priorities the kernel does not at present
@@@ -3115,7 -3182,15 +3138,9 @@@ static void verify_local_elf_hwcaps(voi
  
  static void verify_sve_features(void)
  {
 -      u64 safe_zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
 -      u64 zcr = read_zcr_features();
 -
 -      unsigned int safe_len = safe_zcr & ZCR_ELx_LEN_MASK;
 -      unsigned int len = zcr & ZCR_ELx_LEN_MASK;
 -
 -      if (len < safe_len || vec_verify_vq_map(ARM64_VEC_SVE)) {
+       unsigned long cpacr = cpacr_save_enable_kernel_sve();
 +      if (vec_verify_vq_map(ARM64_VEC_SVE)) {
                pr_crit("CPU%d: SVE: vector length support mismatch\n",
                        smp_processor_id());
                cpu_die_early();
  
  static void verify_sme_features(void)
  {
 -      u64 safe_smcr = read_sanitised_ftr_reg(SYS_SMCR_EL1);
 -      u64 smcr = read_smcr_features();
 -
 -      unsigned int safe_len = safe_smcr & SMCR_ELx_LEN_MASK;
 -      unsigned int len = smcr & SMCR_ELx_LEN_MASK;
 -
 -      if (len < safe_len || vec_verify_vq_map(ARM64_VEC_SME)) {
+       unsigned long cpacr = cpacr_save_enable_kernel_sme();
 +      if (vec_verify_vq_map(ARM64_VEC_SME)) {
                pr_crit("CPU%d: SME: vector length support mismatch\n",
                        smp_processor_id());
                cpu_die_early();
@@@ -3274,33 -3360,40 +3304,50 @@@ unsigned long cpu_get_elf_hwcap2(void
        return elf_hwcap[1];
  }
  
static void __init setup_system_capabilities(void)
void __init setup_system_features(void)
  {
 +      int i;
        /*
-        * We have finalised the system-wide safe feature
-        * registers, finalise the capabilities that depend
-        * on it. Also enable all the available capabilities,
-        * that are not enabled already.
+        * The system-wide safe feature feature register values have been
+        * finalized. Finalize and log the available system capabilities.
         */
        update_cpu_capabilities(SCOPE_SYSTEM);
+       if (IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
+           !cpus_have_cap(ARM64_HAS_PAN))
+               pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
+       /*
+        * Enable all the available capabilities which have not been enabled
+        * already.
+        */
        enable_cpu_capabilities(SCOPE_ALL & ~SCOPE_BOOT_CPU);
  
+       kpti_install_ng_mappings();
+       sve_setup();
+       sme_setup();
+       /*
+        * Check for sane CTR_EL0.CWG value.
+        */
+       if (!cache_type_cwg())
+               pr_warn("No Cache Writeback Granule information, assuming %d\n",
+                       ARCH_DMA_MINALIGN);
++
 +      for (i = 0; i < ARM64_NCAPS; i++) {
 +              const struct arm64_cpu_capabilities *caps = cpucap_ptrs[i];
 +
 +              if (caps && caps->cpus && caps->desc &&
 +                      cpumask_any(caps->cpus) < nr_cpu_ids)
 +                      pr_info("detected: %s on CPU%*pbl\n",
 +                              caps->desc, cpumask_pr_args(caps->cpus));
 +      }
  }
  
- void __init setup_cpu_features(void)
+ void __init setup_user_features(void)
  {
-       u32 cwg;
+       user_feature_fixup();
  
-       setup_system_capabilities();
        setup_elf_hwcaps(arm64_elf_hwcaps);
  
        if (system_supports_32bit_el0()) {
@@@ -1173,11 -1169,30 +1169,11 @@@ void cpu_enable_sve(const struct arm64_
  void __init sve_setup(void)
  {
        struct vl_info *info = &vl_info[ARM64_VEC_SVE];
 -      u64 zcr;
        DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
        unsigned long b;
 +      int max_bit;
  
-       if (!system_supports_sve())
+       if (!cpus_have_cap(ARM64_SVE))
                return;
  
        /*
@@@ -1307,9 -1329,29 +1301,9 @@@ void cpu_enable_fa64(const struct arm64
  void __init sme_setup(void)
  {
        struct vl_info *info = &vl_info[ARM64_VEC_SME];
 -      u64 smcr;
 -      int min_bit;
 +      int min_bit, max_bit;
  
-       if (!system_supports_sme())
+       if (!cpus_have_cap(ARM64_SME))
                return;
  
        /*
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge