Merge remote-tracking branch 'tip/sched/arm64' into for-next/core
[linux-2.6-microblaze.git] / arch / arm64 / kernel / process.c
index 5464d57..2bd270c 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/mman.h>
 #include <linux/mm.h>
 #include <linux/nospec.h>
+#include <linux/sched.h>
 #include <linux/stddef.h>
 #include <linux/sysctl.h>
 #include <linux/unistd.h>
@@ -472,15 +473,6 @@ static void erratum_1418040_thread_switch(struct task_struct *prev,
        write_sysreg(val, cntkctl_el1);
 }
 
-static void compat_thread_switch(struct task_struct *next)
-{
-       if (!is_compat_thread(task_thread_info(next)))
-               return;
-
-       if (static_branch_unlikely(&arm64_mismatched_32bit_el0))
-               set_tsk_thread_flag(next, TIF_NOTIFY_RESUME);
-}
-
 /*
  * __switch_to() checks current->thread.sctlr_user as an optimisation. Therefore
  * this function must be called with preemption disabled and the update to
@@ -515,7 +507,6 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
        ssbs_thread_switch(next);
        erratum_1418040_thread_switch(prev, next);
        ptrauth_thread_switch_user(next);
-       compat_thread_switch(next);
 
        /*
         * Complete any pending TLB or cache maintenance on this CPU in case
@@ -576,6 +567,28 @@ unsigned long arch_align_stack(unsigned long sp)
        return sp & ~0xf;
 }
 
+#ifdef CONFIG_COMPAT
+int compat_elf_check_arch(const struct elf32_hdr *hdr)
+{
+       if (!system_supports_32bit_el0())
+               return false;
+
+       if ((hdr)->e_machine != EM_ARM)
+               return false;
+
+       if (!((hdr)->e_flags & EF_ARM_EABI_MASK))
+               return false;
+
+       /*
+        * Prevent execve() of a 32-bit program from a deadline task
+        * if the restricted affinity mask would be inadmissible on an
+        * asymmetric system.
+        */
+       return !static_branch_unlikely(&arm64_mismatched_32bit_el0) ||
+              !dl_task_check_affinity(current, system_32bit_el0_cpumask());
+}
+#endif
+
 /*
  * Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY.
  */
@@ -585,8 +598,20 @@ void arch_setup_new_exec(void)
 
        if (is_compat_task()) {
                mmflags = MMCF_AARCH32;
+
+               /*
+                * Restrict the CPU affinity mask for a 32-bit task so that
+                * it contains only 32-bit-capable CPUs.
+                *
+                * From the perspective of the task, this looks similar to
+                * what would happen if the 64-bit-only CPUs were hot-unplugged
+                * at the point of execve(), although we try a bit harder to
+                * honour the cpuset hierarchy.
+                */
                if (static_branch_unlikely(&arm64_mismatched_32bit_el0))
-                       set_tsk_thread_flag(current, TIF_NOTIFY_RESUME);
+                       force_compatible_cpus_allowed_ptr(current);
+       } else if (static_branch_unlikely(&arm64_mismatched_32bit_el0)) {
+               relax_compatible_cpus_allowed_ptr(current);
        }
 
        current->mm->context.flags = mmflags;