sched: Reject CPU affinity changes based on task_cpu_possible_mask()
authorWill Deacon <will@kernel.org>
Fri, 30 Jul 2021 11:24:32 +0000 (12:24 +0100)
committerPeter Zijlstra <peterz@infradead.org>
Fri, 20 Aug 2021 10:32:59 +0000 (12:32 +0200)
Reject explicit requests to change the affinity mask of a task via
set_cpus_allowed_ptr() if the requested mask is not a subset of the
mask returned by task_cpu_possible_mask(). This ensures that the
'cpus_mask' for a given task cannot contain CPUs which are incapable of
executing it, except in cases where the affinity is forced.

Signed-off-by: Will Deacon <will@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Valentin Schneider <Valentin.Schneider@arm.com>
Reviewed-by: Quentin Perret <qperret@google.com>
Link: https://lore.kernel.org/r/20210730112443.23245-6-will@kernel.org
kernel/sched/core.c

index b9d4bae..8cec0d2 100644 (file)
@@ -2709,7 +2709,9 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
                                  const struct cpumask *new_mask,
                                  u32 flags)
 {
+       const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
        const struct cpumask *cpu_valid_mask = cpu_active_mask;
+       bool kthread = p->flags & PF_KTHREAD;
        unsigned int dest_cpu;
        struct rq_flags rf;
        struct rq *rq;
@@ -2718,7 +2720,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
        rq = task_rq_lock(p, &rf);
        update_rq_clock(rq);
 
-       if (p->flags & PF_KTHREAD || is_migration_disabled(p)) {
+       if (kthread || is_migration_disabled(p)) {
                /*
                 * Kernel threads are allowed on online && !active CPUs,
                 * however, during cpu-hot-unplug, even these might get pushed
@@ -2732,6 +2734,11 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
                cpu_valid_mask = cpu_online_mask;
        }
 
+       if (!kthread && !cpumask_subset(new_mask, cpu_allowed_mask)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
        /*
         * Must re-check here, to close a race against __kthread_bind(),
         * sched_setaffinity() is not guaranteed to observe the flag.