sched: Simplify tg_set_cfs_bandwidth()
authorPeter Zijlstra <peterz@infradead.org>
Fri, 9 Jun 2023 18:45:16 +0000 (20:45 +0200)
committerIngo Molnar <mingo@kernel.org>
Wed, 13 Sep 2023 13:01:42 +0000 (15:01 +0200)
Use guards to reduce gotos and simplify control flow.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
include/linux/cpu.h
kernel/sched/core.c

index 0abd60a..f19f565 100644 (file)
@@ -153,6 +153,8 @@ static inline int remove_cpu(unsigned int cpu) { return -EPERM; }
 static inline void smp_shutdown_nonboot_cpus(unsigned int primary_cpu) { }
 #endif /* !CONFIG_HOTPLUG_CPU */
 
+DEFINE_LOCK_GUARD_0(cpus_read_lock, cpus_read_lock(), cpus_read_unlock())
+
 #ifdef CONFIG_PM_SLEEP_SMP
 extern int freeze_secondary_cpus(int primary);
 extern void thaw_secondary_cpus(void);
index a3f4fb8..5d9f363 100644 (file)
@@ -10802,11 +10802,12 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota,
         * Prevent race between setting of cfs_rq->runtime_enabled and
         * unthrottle_offline_cfs_rqs().
         */
-       cpus_read_lock();
-       mutex_lock(&cfs_constraints_mutex);
+       guard(cpus_read_lock)();
+       guard(mutex)(&cfs_constraints_mutex);
+
        ret = __cfs_schedulable(tg, period, quota);
        if (ret)
-               goto out_unlock;
+               return ret;
 
        runtime_enabled = quota != RUNTIME_INF;
        runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
@@ -10816,39 +10817,38 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota,
         */
        if (runtime_enabled && !runtime_was_enabled)
                cfs_bandwidth_usage_inc();
-       raw_spin_lock_irq(&cfs_b->lock);
-       cfs_b->period = ns_to_ktime(period);
-       cfs_b->quota = quota;
-       cfs_b->burst = burst;
 
-       __refill_cfs_bandwidth_runtime(cfs_b);
+       scoped_guard (raw_spinlock_irq, &cfs_b->lock) {
+               cfs_b->period = ns_to_ktime(period);
+               cfs_b->quota = quota;
+               cfs_b->burst = burst;
 
-       /* Restart the period timer (if active) to handle new period expiry: */
-       if (runtime_enabled)
-               start_cfs_bandwidth(cfs_b);
+               __refill_cfs_bandwidth_runtime(cfs_b);
 
-       raw_spin_unlock_irq(&cfs_b->lock);
+               /*
+                * Restart the period timer (if active) to handle new
+                * period expiry:
+                */
+               if (runtime_enabled)
+                       start_cfs_bandwidth(cfs_b);
+       }
 
        for_each_online_cpu(i) {
                struct cfs_rq *cfs_rq = tg->cfs_rq[i];
                struct rq *rq = cfs_rq->rq;
-               struct rq_flags rf;
 
-               rq_lock_irq(rq, &rf);
+               guard(rq_lock_irq)(rq);
                cfs_rq->runtime_enabled = runtime_enabled;
                cfs_rq->runtime_remaining = 0;
 
                if (cfs_rq->throttled)
                        unthrottle_cfs_rq(cfs_rq);
-               rq_unlock_irq(rq, &rf);
        }
+
        if (runtime_was_enabled && !runtime_enabled)
                cfs_bandwidth_usage_dec();
-out_unlock:
-       mutex_unlock(&cfs_constraints_mutex);
-       cpus_read_unlock();
 
-       return ret;
+       return 0;
 }
 
 static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)