sched: Split the guts of sched_setaffinity() into a helper function
authorWill Deacon <will@kernel.org>
Fri, 30 Jul 2021 11:24:34 +0000 (12:24 +0100)
committerPeter Zijlstra <peterz@infradead.org>
Fri, 20 Aug 2021 10:33:00 +0000 (12:33 +0200)
In preparation for replaying user affinity requests using a saved mask,
split sched_setaffinity() up so that the initial task lookup and
security checks are only performed when the request is coming directly
from userspace.

Signed-off-by: Will Deacon <will@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Valentin Schneider <Valentin.Schneider@arm.com>
Link: https://lore.kernel.org/r/20210730112443.23245-8-will@kernel.org
kernel/sched/core.c

index 360a3ec..672d0fc 100644 (file)
@@ -7594,53 +7594,22 @@ out_unlock:
        return retval;
 }
 
-long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
+static int
+__sched_setaffinity(struct task_struct *p, const struct cpumask *mask)
 {
-       cpumask_var_t cpus_allowed, new_mask;
-       struct task_struct *p;
        int retval;
+       cpumask_var_t cpus_allowed, new_mask;
 
-       rcu_read_lock();
-
-       p = find_process_by_pid(pid);
-       if (!p) {
-               rcu_read_unlock();
-               return -ESRCH;
-       }
-
-       /* Prevent p going away */
-       get_task_struct(p);
-       rcu_read_unlock();
+       if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL))
+               return -ENOMEM;
 
-       if (p->flags & PF_NO_SETAFFINITY) {
-               retval = -EINVAL;
-               goto out_put_task;
-       }
-       if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
-               retval = -ENOMEM;
-               goto out_put_task;
-       }
        if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
                retval = -ENOMEM;
                goto out_free_cpus_allowed;
        }
-       retval = -EPERM;
-       if (!check_same_owner(p)) {
-               rcu_read_lock();
-               if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
-                       rcu_read_unlock();
-                       goto out_free_new_mask;
-               }
-               rcu_read_unlock();
-       }
-
-       retval = security_task_setscheduler(p);
-       if (retval)
-               goto out_free_new_mask;
-
 
        cpuset_cpus_allowed(p, cpus_allowed);
-       cpumask_and(new_mask, in_mask, cpus_allowed);
+       cpumask_and(new_mask, mask, cpus_allowed);
 
        /*
         * Since bandwidth control happens on root_domain basis,
@@ -7661,23 +7630,63 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
 #endif
 again:
        retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK);
+       if (retval)
+               goto out_free_new_mask;
 
-       if (!retval) {
-               cpuset_cpus_allowed(p, cpus_allowed);
-               if (!cpumask_subset(new_mask, cpus_allowed)) {
-                       /*
-                        * We must have raced with a concurrent cpuset
-                        * update. Just reset the cpus_allowed to the
-                        * cpuset's cpus_allowed
-                        */
-                       cpumask_copy(new_mask, cpus_allowed);
-                       goto again;
-               }
+       cpuset_cpus_allowed(p, cpus_allowed);
+       if (!cpumask_subset(new_mask, cpus_allowed)) {
+               /*
+                * We must have raced with a concurrent cpuset update.
+                * Just reset the cpumask to the cpuset's cpus_allowed.
+                */
+               cpumask_copy(new_mask, cpus_allowed);
+               goto again;
        }
+
 out_free_new_mask:
        free_cpumask_var(new_mask);
 out_free_cpus_allowed:
        free_cpumask_var(cpus_allowed);
+       return retval;
+}
+
+long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
+{
+       struct task_struct *p;
+       int retval;
+
+       rcu_read_lock();
+
+       p = find_process_by_pid(pid);
+       if (!p) {
+               rcu_read_unlock();
+               return -ESRCH;
+       }
+
+       /* Prevent p going away */
+       get_task_struct(p);
+       rcu_read_unlock();
+
+       if (p->flags & PF_NO_SETAFFINITY) {
+               retval = -EINVAL;
+               goto out_put_task;
+       }
+
+       if (!check_same_owner(p)) {
+               rcu_read_lock();
+               if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
+                       rcu_read_unlock();
+                       retval = -EPERM;
+                       goto out_put_task;
+               }
+               rcu_read_unlock();
+       }
+
+       retval = security_task_setscheduler(p);
+       if (retval)
+               goto out_put_task;
+
+       retval = __sched_setaffinity(p, in_mask);
 out_put_task:
        put_task_struct(p);
        return retval;