Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 6 Dec 2019 22:18:01 +0000 (14:18 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 6 Dec 2019 22:18:01 +0000 (14:18 -0800)
Pull arm64 fixes from Catalin Marinas:

 - ZONE_DMA32 initialisation fix when memblocks fall entirely within the
   first GB (used by ZONE_DMA in 5.5 for Raspberry Pi 4).

 - Couple of ftrace fixes following the FTRACE_WITH_REGS patchset.

 - access_ok() fix for the Tagged Address ABI when called from from a
   kernel thread (asynchronous I/O): the kthread does not have the TIF
   flags of the mm owner, so untag the user address unconditionally.

 - KVM compute_layout() called before the alternatives code patching.

 - Minor clean-ups.

* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: entry: refine comment of stack overflow check
  arm64: ftrace: fix ifdeffery
  arm64: KVM: Invoke compute_layout() before alternatives are applied
  arm64: Validate tagged addresses in access_ok() called from kernel threads
  arm64: mm: Fix column alignment for UXN in kernel_page_tables
  arm64: insn: consistently handle exit text
  arm64: mm: Fix initialisation of DMA zones on non-NUMA systems

1  2 
arch/arm64/include/asm/uaccess.h
arch/arm64/kernel/smp.c
arch/arm64/kernel/vmlinux.lds.S

@@@ -62,8 -62,13 +62,13 @@@ static inline unsigned long __range_ok(
  {
        unsigned long ret, limit = current_thread_info()->addr_limit;
  
+       /*
+        * Asynchronous I/O running in a kernel thread does not have the
+        * TIF_TAGGED_ADDR flag of the process owning the mm, so always untag
+        * the user address before checking.
+        */
        if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI) &&
-           test_thread_flag(TIF_TAGGED_ADDR))
+           (current->flags & PF_KTHREAD || test_thread_flag(TIF_TAGGED_ADDR)))
                addr = untagged_addr(addr);
  
        __chk_user_ptr(addr);
@@@ -378,34 -383,20 +383,34 @@@ do {                                                                    
  extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
  #define raw_copy_from_user(to, from, n)                                       \
  ({                                                                    \
 -      __arch_copy_from_user((to), __uaccess_mask_ptr(from), (n));     \
 +      unsigned long __acfu_ret;                                       \
 +      uaccess_enable_not_uao();                                       \
 +      __acfu_ret = __arch_copy_from_user((to),                        \
 +                                    __uaccess_mask_ptr(from), (n));   \
 +      uaccess_disable_not_uao();                                      \
 +      __acfu_ret;                                                     \
  })
  
  extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
  #define raw_copy_to_user(to, from, n)                                 \
  ({                                                                    \
 -      __arch_copy_to_user(__uaccess_mask_ptr(to), (from), (n));       \
 +      unsigned long __actu_ret;                                       \
 +      uaccess_enable_not_uao();                                       \
 +      __actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to),        \
 +                                  (from), (n));                       \
 +      uaccess_disable_not_uao();                                      \
 +      __actu_ret;                                                     \
  })
  
  extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
  #define raw_copy_in_user(to, from, n)                                 \
  ({                                                                    \
 -      __arch_copy_in_user(__uaccess_mask_ptr(to),                     \
 -                          __uaccess_mask_ptr(from), (n));             \
 +      unsigned long __aciu_ret;                                       \
 +      uaccess_enable_not_uao();                                       \
 +      __aciu_ret = __arch_copy_in_user(__uaccess_mask_ptr(to),        \
 +                                  __uaccess_mask_ptr(from), (n));     \
 +      uaccess_disable_not_uao();                                      \
 +      __aciu_ret;                                                     \
  })
  
  #define INLINE_COPY_TO_USER
  extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
  static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
  {
 -      if (access_ok(to, n))
 +      if (access_ok(to, n)) {
 +              uaccess_enable_not_uao();
                n = __arch_clear_user(__uaccess_mask_ptr(to), n);
 +              uaccess_disable_not_uao();
 +      }
        return n;
  }
  #define clear_user    __clear_user
diff --combined arch/arm64/kernel/smp.c
@@@ -31,6 -31,7 +31,7 @@@
  #include <linux/of.h>
  #include <linux/irq_work.h>
  #include <linux/kexec.h>
+ #include <linux/kvm_host.h>
  
  #include <asm/alternative.h>
  #include <asm/atomic.h>
@@@ -39,6 -40,7 +40,7 @@@
  #include <asm/cputype.h>
  #include <asm/cpu_ops.h>
  #include <asm/daifflags.h>
+ #include <asm/kvm_mmu.h>
  #include <asm/mmu_context.h>
  #include <asm/numa.h>
  #include <asm/pgtable.h>
@@@ -345,7 -347,8 +347,7 @@@ void __cpu_die(unsigned int cpu
         */
        err = op_cpu_kill(cpu);
        if (err)
 -              pr_warn("CPU%d may not have shut down cleanly: %d\n",
 -                      cpu, err);
 +              pr_warn("CPU%d may not have shut down cleanly: %d\n", cpu, err);
  }
  
  /*
@@@ -407,6 -410,8 +409,8 @@@ static void __init hyp_mode_check(void
                           "CPU: CPUs started in inconsistent modes");
        else
                pr_info("CPU: All CPU(s) started at EL1\n");
+       if (IS_ENABLED(CONFIG_KVM_ARM_HOST))
+               kvm_compute_layout();
  }
  
  void __init smp_cpus_done(unsigned int max_cpus)
@@@ -975,8 -980,8 +979,8 @@@ void smp_send_stop(void
                udelay(1);
  
        if (num_online_cpus() > 1)
 -              pr_warning("SMP: failed to stop secondary CPUs %*pbl\n",
 -                         cpumask_pr_args(cpu_online_mask));
 +              pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
 +                      cpumask_pr_args(cpu_online_mask));
  
        sdei_mask_local_cpu();
  }
@@@ -1016,8 -1021,8 +1020,8 @@@ void crash_smp_send_stop(void
                udelay(1);
  
        if (atomic_read(&waiting_for_crash_ipi) > 0)
 -              pr_warning("SMP: failed to stop secondary CPUs %*pbl\n",
 -                         cpumask_pr_args(&mask));
 +              pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
 +                      cpumask_pr_args(&mask));
  
        sdei_mask_local_cpu();
  }
@@@ -5,8 -5,6 +5,8 @@@
   * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
   */
  
 +#define RO_EXCEPTION_TABLE_ALIGN      8
 +
  #include <asm-generic/vmlinux.lds.h>
  #include <asm/cache.h>
  #include <asm/kernel-pgtable.h>
@@@ -134,9 -132,11 +134,9 @@@ SECTION
        . = ALIGN(SEGMENT_ALIGN);
        _etext = .;                     /* End of text section */
  
 -      RO_DATA(PAGE_SIZE)              /* everything from this point to     */
 -      EXCEPTION_TABLE(8)              /* __init_begin will be marked RO NX */
 -      NOTES
 +      /* everything from this point to __init_begin will be marked RO NX */
 +      RO_DATA(PAGE_SIZE)
  
 -      . = ALIGN(PAGE_SIZE);
        idmap_pg_dir = .;
        . += IDMAP_DIR_SIZE;
  
        __inittext_begin = .;
  
        INIT_TEXT_SECTION(8)
+       __exittext_begin = .;
        .exit.text : {
                ARM_EXIT_KEEP(EXIT_TEXT)
        }
+       __exittext_end = .;
  
        . = ALIGN(4);
        .altinstructions : {
  
        _data = .;
        _sdata = .;
 -      RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN)
 +      RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN)
  
        /*
         * Data written with the MMU off but read with the MMU on requires