platform/x86: intel_menlow: switch to use <linux/units.h> helpers
[linux-2.6-microblaze.git] / kernel / sched / rt.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
4  * policies)
5  */
6 #include "sched.h"
7
8 #include "pelt.h"
9
10 int sched_rr_timeslice = RR_TIMESLICE;
11 int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
12
13 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
14
15 struct rt_bandwidth def_rt_bandwidth;
16
17 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
18 {
19         struct rt_bandwidth *rt_b =
20                 container_of(timer, struct rt_bandwidth, rt_period_timer);
21         int idle = 0;
22         int overrun;
23
24         raw_spin_lock(&rt_b->rt_runtime_lock);
25         for (;;) {
26                 overrun = hrtimer_forward_now(timer, rt_b->rt_period);
27                 if (!overrun)
28                         break;
29
30                 raw_spin_unlock(&rt_b->rt_runtime_lock);
31                 idle = do_sched_rt_period_timer(rt_b, overrun);
32                 raw_spin_lock(&rt_b->rt_runtime_lock);
33         }
34         if (idle)
35                 rt_b->rt_period_active = 0;
36         raw_spin_unlock(&rt_b->rt_runtime_lock);
37
38         return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
39 }
40
41 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
42 {
43         rt_b->rt_period = ns_to_ktime(period);
44         rt_b->rt_runtime = runtime;
45
46         raw_spin_lock_init(&rt_b->rt_runtime_lock);
47
48         hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC,
49                      HRTIMER_MODE_REL_HARD);
50         rt_b->rt_period_timer.function = sched_rt_period_timer;
51 }
52
53 static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
54 {
55         if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
56                 return;
57
58         raw_spin_lock(&rt_b->rt_runtime_lock);
59         if (!rt_b->rt_period_active) {
60                 rt_b->rt_period_active = 1;
61                 /*
62                  * SCHED_DEADLINE updates the bandwidth, as a run away
63                  * RT task with a DL task could hog a CPU. But DL does
64                  * not reset the period. If a deadline task was running
65                  * without an RT task running, it can cause RT tasks to
66                  * throttle when they start up. Kick the timer right away
67                  * to update the period.
68                  */
69                 hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
70                 hrtimer_start_expires(&rt_b->rt_period_timer,
71                                       HRTIMER_MODE_ABS_PINNED_HARD);
72         }
73         raw_spin_unlock(&rt_b->rt_runtime_lock);
74 }
75
76 void init_rt_rq(struct rt_rq *rt_rq)
77 {
78         struct rt_prio_array *array;
79         int i;
80
81         array = &rt_rq->active;
82         for (i = 0; i < MAX_RT_PRIO; i++) {
83                 INIT_LIST_HEAD(array->queue + i);
84                 __clear_bit(i, array->bitmap);
85         }
86         /* delimiter for bitsearch: */
87         __set_bit(MAX_RT_PRIO, array->bitmap);
88
89 #if defined CONFIG_SMP
90         rt_rq->highest_prio.curr = MAX_RT_PRIO;
91         rt_rq->highest_prio.next = MAX_RT_PRIO;
92         rt_rq->rt_nr_migratory = 0;
93         rt_rq->overloaded = 0;
94         plist_head_init(&rt_rq->pushable_tasks);
95 #endif /* CONFIG_SMP */
96         /* We start is dequeued state, because no RT tasks are queued */
97         rt_rq->rt_queued = 0;
98
99         rt_rq->rt_time = 0;
100         rt_rq->rt_throttled = 0;
101         rt_rq->rt_runtime = 0;
102         raw_spin_lock_init(&rt_rq->rt_runtime_lock);
103 }
104
105 #ifdef CONFIG_RT_GROUP_SCHED
106 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
107 {
108         hrtimer_cancel(&rt_b->rt_period_timer);
109 }
110
111 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
112
113 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
114 {
115 #ifdef CONFIG_SCHED_DEBUG
116         WARN_ON_ONCE(!rt_entity_is_task(rt_se));
117 #endif
118         return container_of(rt_se, struct task_struct, rt);
119 }
120
121 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
122 {
123         return rt_rq->rq;
124 }
125
126 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
127 {
128         return rt_se->rt_rq;
129 }
130
131 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
132 {
133         struct rt_rq *rt_rq = rt_se->rt_rq;
134
135         return rt_rq->rq;
136 }
137
138 void free_rt_sched_group(struct task_group *tg)
139 {
140         int i;
141
142         if (tg->rt_se)
143                 destroy_rt_bandwidth(&tg->rt_bandwidth);
144
145         for_each_possible_cpu(i) {
146                 if (tg->rt_rq)
147                         kfree(tg->rt_rq[i]);
148                 if (tg->rt_se)
149                         kfree(tg->rt_se[i]);
150         }
151
152         kfree(tg->rt_rq);
153         kfree(tg->rt_se);
154 }
155
156 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
157                 struct sched_rt_entity *rt_se, int cpu,
158                 struct sched_rt_entity *parent)
159 {
160         struct rq *rq = cpu_rq(cpu);
161
162         rt_rq->highest_prio.curr = MAX_RT_PRIO;
163         rt_rq->rt_nr_boosted = 0;
164         rt_rq->rq = rq;
165         rt_rq->tg = tg;
166
167         tg->rt_rq[cpu] = rt_rq;
168         tg->rt_se[cpu] = rt_se;
169
170         if (!rt_se)
171                 return;
172
173         if (!parent)
174                 rt_se->rt_rq = &rq->rt;
175         else
176                 rt_se->rt_rq = parent->my_q;
177
178         rt_se->my_q = rt_rq;
179         rt_se->parent = parent;
180         INIT_LIST_HEAD(&rt_se->run_list);
181 }
182
183 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
184 {
185         struct rt_rq *rt_rq;
186         struct sched_rt_entity *rt_se;
187         int i;
188
189         tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL);
190         if (!tg->rt_rq)
191                 goto err;
192         tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL);
193         if (!tg->rt_se)
194                 goto err;
195
196         init_rt_bandwidth(&tg->rt_bandwidth,
197                         ktime_to_ns(def_rt_bandwidth.rt_period), 0);
198
199         for_each_possible_cpu(i) {
200                 rt_rq = kzalloc_node(sizeof(struct rt_rq),
201                                      GFP_KERNEL, cpu_to_node(i));
202                 if (!rt_rq)
203                         goto err;
204
205                 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
206                                      GFP_KERNEL, cpu_to_node(i));
207                 if (!rt_se)
208                         goto err_free_rq;
209
210                 init_rt_rq(rt_rq);
211                 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
212                 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
213         }
214
215         return 1;
216
217 err_free_rq:
218         kfree(rt_rq);
219 err:
220         return 0;
221 }
222
223 #else /* CONFIG_RT_GROUP_SCHED */
224
225 #define rt_entity_is_task(rt_se) (1)
226
227 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
228 {
229         return container_of(rt_se, struct task_struct, rt);
230 }
231
232 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
233 {
234         return container_of(rt_rq, struct rq, rt);
235 }
236
237 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
238 {
239         struct task_struct *p = rt_task_of(rt_se);
240
241         return task_rq(p);
242 }
243
244 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
245 {
246         struct rq *rq = rq_of_rt_se(rt_se);
247
248         return &rq->rt;
249 }
250
251 void free_rt_sched_group(struct task_group *tg) { }
252
253 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
254 {
255         return 1;
256 }
257 #endif /* CONFIG_RT_GROUP_SCHED */
258
259 #ifdef CONFIG_SMP
260
261 static void pull_rt_task(struct rq *this_rq);
262
263 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
264 {
265         /* Try to pull RT tasks here if we lower this rq's prio */
266         return rq->rt.highest_prio.curr > prev->prio;
267 }
268
269 static inline int rt_overloaded(struct rq *rq)
270 {
271         return atomic_read(&rq->rd->rto_count);
272 }
273
274 static inline void rt_set_overload(struct rq *rq)
275 {
276         if (!rq->online)
277                 return;
278
279         cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
280         /*
281          * Make sure the mask is visible before we set
282          * the overload count. That is checked to determine
283          * if we should look at the mask. It would be a shame
284          * if we looked at the mask, but the mask was not
285          * updated yet.
286          *
287          * Matched by the barrier in pull_rt_task().
288          */
289         smp_wmb();
290         atomic_inc(&rq->rd->rto_count);
291 }
292
293 static inline void rt_clear_overload(struct rq *rq)
294 {
295         if (!rq->online)
296                 return;
297
298         /* the order here really doesn't matter */
299         atomic_dec(&rq->rd->rto_count);
300         cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
301 }
302
303 static void update_rt_migration(struct rt_rq *rt_rq)
304 {
305         if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
306                 if (!rt_rq->overloaded) {
307                         rt_set_overload(rq_of_rt_rq(rt_rq));
308                         rt_rq->overloaded = 1;
309                 }
310         } else if (rt_rq->overloaded) {
311                 rt_clear_overload(rq_of_rt_rq(rt_rq));
312                 rt_rq->overloaded = 0;
313         }
314 }
315
316 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
317 {
318         struct task_struct *p;
319
320         if (!rt_entity_is_task(rt_se))
321                 return;
322
323         p = rt_task_of(rt_se);
324         rt_rq = &rq_of_rt_rq(rt_rq)->rt;
325
326         rt_rq->rt_nr_total++;
327         if (p->nr_cpus_allowed > 1)
328                 rt_rq->rt_nr_migratory++;
329
330         update_rt_migration(rt_rq);
331 }
332
333 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
334 {
335         struct task_struct *p;
336
337         if (!rt_entity_is_task(rt_se))
338                 return;
339
340         p = rt_task_of(rt_se);
341         rt_rq = &rq_of_rt_rq(rt_rq)->rt;
342
343         rt_rq->rt_nr_total--;
344         if (p->nr_cpus_allowed > 1)
345                 rt_rq->rt_nr_migratory--;
346
347         update_rt_migration(rt_rq);
348 }
349
350 static inline int has_pushable_tasks(struct rq *rq)
351 {
352         return !plist_head_empty(&rq->rt.pushable_tasks);
353 }
354
355 static DEFINE_PER_CPU(struct callback_head, rt_push_head);
356 static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
357
358 static void push_rt_tasks(struct rq *);
359 static void pull_rt_task(struct rq *);
360
361 static inline void rt_queue_push_tasks(struct rq *rq)
362 {
363         if (!has_pushable_tasks(rq))
364                 return;
365
366         queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
367 }
368
369 static inline void rt_queue_pull_task(struct rq *rq)
370 {
371         queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
372 }
373
374 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
375 {
376         plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
377         plist_node_init(&p->pushable_tasks, p->prio);
378         plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
379
380         /* Update the highest prio pushable task */
381         if (p->prio < rq->rt.highest_prio.next)
382                 rq->rt.highest_prio.next = p->prio;
383 }
384
385 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
386 {
387         plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
388
389         /* Update the new highest prio pushable task */
390         if (has_pushable_tasks(rq)) {
391                 p = plist_first_entry(&rq->rt.pushable_tasks,
392                                       struct task_struct, pushable_tasks);
393                 rq->rt.highest_prio.next = p->prio;
394         } else
395                 rq->rt.highest_prio.next = MAX_RT_PRIO;
396 }
397
398 #else
399
400 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
401 {
402 }
403
404 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
405 {
406 }
407
408 static inline
409 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
410 {
411 }
412
413 static inline
414 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
415 {
416 }
417
418 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
419 {
420         return false;
421 }
422
423 static inline void pull_rt_task(struct rq *this_rq)
424 {
425 }
426
427 static inline void rt_queue_push_tasks(struct rq *rq)
428 {
429 }
430 #endif /* CONFIG_SMP */
431
432 static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
433 static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
434
435 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
436 {
437         return rt_se->on_rq;
438 }
439
440 #ifdef CONFIG_UCLAMP_TASK
441 /*
442  * Verify the fitness of task @p to run on @cpu taking into account the uclamp
443  * settings.
444  *
445  * This check is only important for heterogeneous systems where uclamp_min value
446  * is higher than the capacity of a @cpu. For non-heterogeneous system this
447  * function will always return true.
448  *
449  * The function will return true if the capacity of the @cpu is >= the
450  * uclamp_min and false otherwise.
451  *
452  * Note that uclamp_min will be clamped to uclamp_max if uclamp_min
453  * > uclamp_max.
454  */
455 static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
456 {
457         unsigned int min_cap;
458         unsigned int max_cap;
459         unsigned int cpu_cap;
460
461         /* Only heterogeneous systems can benefit from this check */
462         if (!static_branch_unlikely(&sched_asym_cpucapacity))
463                 return true;
464
465         min_cap = uclamp_eff_value(p, UCLAMP_MIN);
466         max_cap = uclamp_eff_value(p, UCLAMP_MAX);
467
468         cpu_cap = capacity_orig_of(cpu);
469
470         return cpu_cap >= min(min_cap, max_cap);
471 }
472 #else
473 static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
474 {
475         return true;
476 }
477 #endif
478
479 #ifdef CONFIG_RT_GROUP_SCHED
480
481 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
482 {
483         if (!rt_rq->tg)
484                 return RUNTIME_INF;
485
486         return rt_rq->rt_runtime;
487 }
488
489 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
490 {
491         return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
492 }
493
494 typedef struct task_group *rt_rq_iter_t;
495
496 static inline struct task_group *next_task_group(struct task_group *tg)
497 {
498         do {
499                 tg = list_entry_rcu(tg->list.next,
500                         typeof(struct task_group), list);
501         } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
502
503         if (&tg->list == &task_groups)
504                 tg = NULL;
505
506         return tg;
507 }
508
509 #define for_each_rt_rq(rt_rq, iter, rq)                                 \
510         for (iter = container_of(&task_groups, typeof(*iter), list);    \
511                 (iter = next_task_group(iter)) &&                       \
512                 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
513
514 #define for_each_sched_rt_entity(rt_se) \
515         for (; rt_se; rt_se = rt_se->parent)
516
517 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
518 {
519         return rt_se->my_q;
520 }
521
522 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
523 static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
524
525 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
526 {
527         struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
528         struct rq *rq = rq_of_rt_rq(rt_rq);
529         struct sched_rt_entity *rt_se;
530
531         int cpu = cpu_of(rq);
532
533         rt_se = rt_rq->tg->rt_se[cpu];
534
535         if (rt_rq->rt_nr_running) {
536                 if (!rt_se)
537                         enqueue_top_rt_rq(rt_rq);
538                 else if (!on_rt_rq(rt_se))
539                         enqueue_rt_entity(rt_se, 0);
540
541                 if (rt_rq->highest_prio.curr < curr->prio)
542                         resched_curr(rq);
543         }
544 }
545
546 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
547 {
548         struct sched_rt_entity *rt_se;
549         int cpu = cpu_of(rq_of_rt_rq(rt_rq));
550
551         rt_se = rt_rq->tg->rt_se[cpu];
552
553         if (!rt_se) {
554                 dequeue_top_rt_rq(rt_rq);
555                 /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
556                 cpufreq_update_util(rq_of_rt_rq(rt_rq), 0);
557         }
558         else if (on_rt_rq(rt_se))
559                 dequeue_rt_entity(rt_se, 0);
560 }
561
562 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
563 {
564         return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
565 }
566
567 static int rt_se_boosted(struct sched_rt_entity *rt_se)
568 {
569         struct rt_rq *rt_rq = group_rt_rq(rt_se);
570         struct task_struct *p;
571
572         if (rt_rq)
573                 return !!rt_rq->rt_nr_boosted;
574
575         p = rt_task_of(rt_se);
576         return p->prio != p->normal_prio;
577 }
578
579 #ifdef CONFIG_SMP
580 static inline const struct cpumask *sched_rt_period_mask(void)
581 {
582         return this_rq()->rd->span;
583 }
584 #else
585 static inline const struct cpumask *sched_rt_period_mask(void)
586 {
587         return cpu_online_mask;
588 }
589 #endif
590
591 static inline
592 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
593 {
594         return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
595 }
596
597 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
598 {
599         return &rt_rq->tg->rt_bandwidth;
600 }
601
602 #else /* !CONFIG_RT_GROUP_SCHED */
603
604 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
605 {
606         return rt_rq->rt_runtime;
607 }
608
609 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
610 {
611         return ktime_to_ns(def_rt_bandwidth.rt_period);
612 }
613
614 typedef struct rt_rq *rt_rq_iter_t;
615
616 #define for_each_rt_rq(rt_rq, iter, rq) \
617         for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
618
619 #define for_each_sched_rt_entity(rt_se) \
620         for (; rt_se; rt_se = NULL)
621
622 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
623 {
624         return NULL;
625 }
626
627 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
628 {
629         struct rq *rq = rq_of_rt_rq(rt_rq);
630
631         if (!rt_rq->rt_nr_running)
632                 return;
633
634         enqueue_top_rt_rq(rt_rq);
635         resched_curr(rq);
636 }
637
638 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
639 {
640         dequeue_top_rt_rq(rt_rq);
641 }
642
643 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
644 {
645         return rt_rq->rt_throttled;
646 }
647
648 static inline const struct cpumask *sched_rt_period_mask(void)
649 {
650         return cpu_online_mask;
651 }
652
653 static inline
654 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
655 {
656         return &cpu_rq(cpu)->rt;
657 }
658
659 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
660 {
661         return &def_rt_bandwidth;
662 }
663
664 #endif /* CONFIG_RT_GROUP_SCHED */
665
666 bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
667 {
668         struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
669
670         return (hrtimer_active(&rt_b->rt_period_timer) ||
671                 rt_rq->rt_time < rt_b->rt_runtime);
672 }
673
674 #ifdef CONFIG_SMP
675 /*
676  * We ran out of runtime, see if we can borrow some from our neighbours.
677  */
678 static void do_balance_runtime(struct rt_rq *rt_rq)
679 {
680         struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
681         struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
682         int i, weight;
683         u64 rt_period;
684
685         weight = cpumask_weight(rd->span);
686
687         raw_spin_lock(&rt_b->rt_runtime_lock);
688         rt_period = ktime_to_ns(rt_b->rt_period);
689         for_each_cpu(i, rd->span) {
690                 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
691                 s64 diff;
692
693                 if (iter == rt_rq)
694                         continue;
695
696                 raw_spin_lock(&iter->rt_runtime_lock);
697                 /*
698                  * Either all rqs have inf runtime and there's nothing to steal
699                  * or __disable_runtime() below sets a specific rq to inf to
700                  * indicate its been disabled and disalow stealing.
701                  */
702                 if (iter->rt_runtime == RUNTIME_INF)
703                         goto next;
704
705                 /*
706                  * From runqueues with spare time, take 1/n part of their
707                  * spare time, but no more than our period.
708                  */
709                 diff = iter->rt_runtime - iter->rt_time;
710                 if (diff > 0) {
711                         diff = div_u64((u64)diff, weight);
712                         if (rt_rq->rt_runtime + diff > rt_period)
713                                 diff = rt_period - rt_rq->rt_runtime;
714                         iter->rt_runtime -= diff;
715                         rt_rq->rt_runtime += diff;
716                         if (rt_rq->rt_runtime == rt_period) {
717                                 raw_spin_unlock(&iter->rt_runtime_lock);
718                                 break;
719                         }
720                 }
721 next:
722                 raw_spin_unlock(&iter->rt_runtime_lock);
723         }
724         raw_spin_unlock(&rt_b->rt_runtime_lock);
725 }
726
727 /*
728  * Ensure this RQ takes back all the runtime it lend to its neighbours.
729  */
730 static void __disable_runtime(struct rq *rq)
731 {
732         struct root_domain *rd = rq->rd;
733         rt_rq_iter_t iter;
734         struct rt_rq *rt_rq;
735
736         if (unlikely(!scheduler_running))
737                 return;
738
739         for_each_rt_rq(rt_rq, iter, rq) {
740                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
741                 s64 want;
742                 int i;
743
744                 raw_spin_lock(&rt_b->rt_runtime_lock);
745                 raw_spin_lock(&rt_rq->rt_runtime_lock);
746                 /*
747                  * Either we're all inf and nobody needs to borrow, or we're
748                  * already disabled and thus have nothing to do, or we have
749                  * exactly the right amount of runtime to take out.
750                  */
751                 if (rt_rq->rt_runtime == RUNTIME_INF ||
752                                 rt_rq->rt_runtime == rt_b->rt_runtime)
753                         goto balanced;
754                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
755
756                 /*
757                  * Calculate the difference between what we started out with
758                  * and what we current have, that's the amount of runtime
759                  * we lend and now have to reclaim.
760                  */
761                 want = rt_b->rt_runtime - rt_rq->rt_runtime;
762
763                 /*
764                  * Greedy reclaim, take back as much as we can.
765                  */
766                 for_each_cpu(i, rd->span) {
767                         struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
768                         s64 diff;
769
770                         /*
771                          * Can't reclaim from ourselves or disabled runqueues.
772                          */
773                         if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
774                                 continue;
775
776                         raw_spin_lock(&iter->rt_runtime_lock);
777                         if (want > 0) {
778                                 diff = min_t(s64, iter->rt_runtime, want);
779                                 iter->rt_runtime -= diff;
780                                 want -= diff;
781                         } else {
782                                 iter->rt_runtime -= want;
783                                 want -= want;
784                         }
785                         raw_spin_unlock(&iter->rt_runtime_lock);
786
787                         if (!want)
788                                 break;
789                 }
790
791                 raw_spin_lock(&rt_rq->rt_runtime_lock);
792                 /*
793                  * We cannot be left wanting - that would mean some runtime
794                  * leaked out of the system.
795                  */
796                 BUG_ON(want);
797 balanced:
798                 /*
799                  * Disable all the borrow logic by pretending we have inf
800                  * runtime - in which case borrowing doesn't make sense.
801                  */
802                 rt_rq->rt_runtime = RUNTIME_INF;
803                 rt_rq->rt_throttled = 0;
804                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
805                 raw_spin_unlock(&rt_b->rt_runtime_lock);
806
807                 /* Make rt_rq available for pick_next_task() */
808                 sched_rt_rq_enqueue(rt_rq);
809         }
810 }
811
812 static void __enable_runtime(struct rq *rq)
813 {
814         rt_rq_iter_t iter;
815         struct rt_rq *rt_rq;
816
817         if (unlikely(!scheduler_running))
818                 return;
819
820         /*
821          * Reset each runqueue's bandwidth settings
822          */
823         for_each_rt_rq(rt_rq, iter, rq) {
824                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
825
826                 raw_spin_lock(&rt_b->rt_runtime_lock);
827                 raw_spin_lock(&rt_rq->rt_runtime_lock);
828                 rt_rq->rt_runtime = rt_b->rt_runtime;
829                 rt_rq->rt_time = 0;
830                 rt_rq->rt_throttled = 0;
831                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
832                 raw_spin_unlock(&rt_b->rt_runtime_lock);
833         }
834 }
835
836 static void balance_runtime(struct rt_rq *rt_rq)
837 {
838         if (!sched_feat(RT_RUNTIME_SHARE))
839                 return;
840
841         if (rt_rq->rt_time > rt_rq->rt_runtime) {
842                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
843                 do_balance_runtime(rt_rq);
844                 raw_spin_lock(&rt_rq->rt_runtime_lock);
845         }
846 }
847 #else /* !CONFIG_SMP */
848 static inline void balance_runtime(struct rt_rq *rt_rq) {}
849 #endif /* CONFIG_SMP */
850
851 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
852 {
853         int i, idle = 1, throttled = 0;
854         const struct cpumask *span;
855
856         span = sched_rt_period_mask();
857 #ifdef CONFIG_RT_GROUP_SCHED
858         /*
859          * FIXME: isolated CPUs should really leave the root task group,
860          * whether they are isolcpus or were isolated via cpusets, lest
861          * the timer run on a CPU which does not service all runqueues,
862          * potentially leaving other CPUs indefinitely throttled.  If
863          * isolation is really required, the user will turn the throttle
864          * off to kill the perturbations it causes anyway.  Meanwhile,
865          * this maintains functionality for boot and/or troubleshooting.
866          */
867         if (rt_b == &root_task_group.rt_bandwidth)
868                 span = cpu_online_mask;
869 #endif
870         for_each_cpu(i, span) {
871                 int enqueue = 0;
872                 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
873                 struct rq *rq = rq_of_rt_rq(rt_rq);
874                 int skip;
875
876                 /*
877                  * When span == cpu_online_mask, taking each rq->lock
878                  * can be time-consuming. Try to avoid it when possible.
879                  */
880                 raw_spin_lock(&rt_rq->rt_runtime_lock);
881                 if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF)
882                         rt_rq->rt_runtime = rt_b->rt_runtime;
883                 skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
884                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
885                 if (skip)
886                         continue;
887
888                 raw_spin_lock(&rq->lock);
889                 update_rq_clock(rq);
890
891                 if (rt_rq->rt_time) {
892                         u64 runtime;
893
894                         raw_spin_lock(&rt_rq->rt_runtime_lock);
895                         if (rt_rq->rt_throttled)
896                                 balance_runtime(rt_rq);
897                         runtime = rt_rq->rt_runtime;
898                         rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
899                         if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
900                                 rt_rq->rt_throttled = 0;
901                                 enqueue = 1;
902
903                                 /*
904                                  * When we're idle and a woken (rt) task is
905                                  * throttled check_preempt_curr() will set
906                                  * skip_update and the time between the wakeup
907                                  * and this unthrottle will get accounted as
908                                  * 'runtime'.
909                                  */
910                                 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
911                                         rq_clock_cancel_skipupdate(rq);
912                         }
913                         if (rt_rq->rt_time || rt_rq->rt_nr_running)
914                                 idle = 0;
915                         raw_spin_unlock(&rt_rq->rt_runtime_lock);
916                 } else if (rt_rq->rt_nr_running) {
917                         idle = 0;
918                         if (!rt_rq_throttled(rt_rq))
919                                 enqueue = 1;
920                 }
921                 if (rt_rq->rt_throttled)
922                         throttled = 1;
923
924                 if (enqueue)
925                         sched_rt_rq_enqueue(rt_rq);
926                 raw_spin_unlock(&rq->lock);
927         }
928
929         if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
930                 return 1;
931
932         return idle;
933 }
934
935 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
936 {
937 #ifdef CONFIG_RT_GROUP_SCHED
938         struct rt_rq *rt_rq = group_rt_rq(rt_se);
939
940         if (rt_rq)
941                 return rt_rq->highest_prio.curr;
942 #endif
943
944         return rt_task_of(rt_se)->prio;
945 }
946
947 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
948 {
949         u64 runtime = sched_rt_runtime(rt_rq);
950
951         if (rt_rq->rt_throttled)
952                 return rt_rq_throttled(rt_rq);
953
954         if (runtime >= sched_rt_period(rt_rq))
955                 return 0;
956
957         balance_runtime(rt_rq);
958         runtime = sched_rt_runtime(rt_rq);
959         if (runtime == RUNTIME_INF)
960                 return 0;
961
962         if (rt_rq->rt_time > runtime) {
963                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
964
965                 /*
966                  * Don't actually throttle groups that have no runtime assigned
967                  * but accrue some time due to boosting.
968                  */
969                 if (likely(rt_b->rt_runtime)) {
970                         rt_rq->rt_throttled = 1;
971                         printk_deferred_once("sched: RT throttling activated\n");
972                 } else {
973                         /*
974                          * In case we did anyway, make it go away,
975                          * replenishment is a joke, since it will replenish us
976                          * with exactly 0 ns.
977                          */
978                         rt_rq->rt_time = 0;
979                 }
980
981                 if (rt_rq_throttled(rt_rq)) {
982                         sched_rt_rq_dequeue(rt_rq);
983                         return 1;
984                 }
985         }
986
987         return 0;
988 }
989
990 /*
991  * Update the current task's runtime statistics. Skip current tasks that
992  * are not in our scheduling class.
993  */
994 static void update_curr_rt(struct rq *rq)
995 {
996         struct task_struct *curr = rq->curr;
997         struct sched_rt_entity *rt_se = &curr->rt;
998         u64 delta_exec;
999         u64 now;
1000
1001         if (curr->sched_class != &rt_sched_class)
1002                 return;
1003
1004         now = rq_clock_task(rq);
1005         delta_exec = now - curr->se.exec_start;
1006         if (unlikely((s64)delta_exec <= 0))
1007                 return;
1008
1009         schedstat_set(curr->se.statistics.exec_max,
1010                       max(curr->se.statistics.exec_max, delta_exec));
1011
1012         curr->se.sum_exec_runtime += delta_exec;
1013         account_group_exec_runtime(curr, delta_exec);
1014
1015         curr->se.exec_start = now;
1016         cgroup_account_cputime(curr, delta_exec);
1017
1018         if (!rt_bandwidth_enabled())
1019                 return;
1020
1021         for_each_sched_rt_entity(rt_se) {
1022                 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1023
1024                 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
1025                         raw_spin_lock(&rt_rq->rt_runtime_lock);
1026                         rt_rq->rt_time += delta_exec;
1027                         if (sched_rt_runtime_exceeded(rt_rq))
1028                                 resched_curr(rq);
1029                         raw_spin_unlock(&rt_rq->rt_runtime_lock);
1030                 }
1031         }
1032 }
1033
1034 static void
1035 dequeue_top_rt_rq(struct rt_rq *rt_rq)
1036 {
1037         struct rq *rq = rq_of_rt_rq(rt_rq);
1038
1039         BUG_ON(&rq->rt != rt_rq);
1040
1041         if (!rt_rq->rt_queued)
1042                 return;
1043
1044         BUG_ON(!rq->nr_running);
1045
1046         sub_nr_running(rq, rt_rq->rt_nr_running);
1047         rt_rq->rt_queued = 0;
1048
1049 }
1050
1051 static void
1052 enqueue_top_rt_rq(struct rt_rq *rt_rq)
1053 {
1054         struct rq *rq = rq_of_rt_rq(rt_rq);
1055
1056         BUG_ON(&rq->rt != rt_rq);
1057
1058         if (rt_rq->rt_queued)
1059                 return;
1060
1061         if (rt_rq_throttled(rt_rq))
1062                 return;
1063
1064         if (rt_rq->rt_nr_running) {
1065                 add_nr_running(rq, rt_rq->rt_nr_running);
1066                 rt_rq->rt_queued = 1;
1067         }
1068
1069         /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
1070         cpufreq_update_util(rq, 0);
1071 }
1072
1073 #if defined CONFIG_SMP
1074
1075 static void
1076 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1077 {
1078         struct rq *rq = rq_of_rt_rq(rt_rq);
1079
1080 #ifdef CONFIG_RT_GROUP_SCHED
1081         /*
1082          * Change rq's cpupri only if rt_rq is the top queue.
1083          */
1084         if (&rq->rt != rt_rq)
1085                 return;
1086 #endif
1087         if (rq->online && prio < prev_prio)
1088                 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1089 }
1090
1091 static void
1092 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1093 {
1094         struct rq *rq = rq_of_rt_rq(rt_rq);
1095
1096 #ifdef CONFIG_RT_GROUP_SCHED
1097         /*
1098          * Change rq's cpupri only if rt_rq is the top queue.
1099          */
1100         if (&rq->rt != rt_rq)
1101                 return;
1102 #endif
1103         if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1104                 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1105 }
1106
1107 #else /* CONFIG_SMP */
1108
1109 static inline
1110 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1111 static inline
1112 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1113
1114 #endif /* CONFIG_SMP */
1115
1116 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1117 static void
1118 inc_rt_prio(struct rt_rq *rt_rq, int prio)
1119 {
1120         int prev_prio = rt_rq->highest_prio.curr;
1121
1122         if (prio < prev_prio)
1123                 rt_rq->highest_prio.curr = prio;
1124
1125         inc_rt_prio_smp(rt_rq, prio, prev_prio);
1126 }
1127
1128 static void
1129 dec_rt_prio(struct rt_rq *rt_rq, int prio)
1130 {
1131         int prev_prio = rt_rq->highest_prio.curr;
1132
1133         if (rt_rq->rt_nr_running) {
1134
1135                 WARN_ON(prio < prev_prio);
1136
1137                 /*
1138                  * This may have been our highest task, and therefore
1139                  * we may have some recomputation to do
1140                  */
1141                 if (prio == prev_prio) {
1142                         struct rt_prio_array *array = &rt_rq->active;
1143
1144                         rt_rq->highest_prio.curr =
1145                                 sched_find_first_bit(array->bitmap);
1146                 }
1147
1148         } else
1149                 rt_rq->highest_prio.curr = MAX_RT_PRIO;
1150
1151         dec_rt_prio_smp(rt_rq, prio, prev_prio);
1152 }
1153
1154 #else
1155
1156 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1157 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1158
1159 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1160
1161 #ifdef CONFIG_RT_GROUP_SCHED
1162
1163 static void
1164 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1165 {
1166         if (rt_se_boosted(rt_se))
1167                 rt_rq->rt_nr_boosted++;
1168
1169         if (rt_rq->tg)
1170                 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1171 }
1172
1173 static void
1174 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1175 {
1176         if (rt_se_boosted(rt_se))
1177                 rt_rq->rt_nr_boosted--;
1178
1179         WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1180 }
1181
1182 #else /* CONFIG_RT_GROUP_SCHED */
1183
1184 static void
1185 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1186 {
1187         start_rt_bandwidth(&def_rt_bandwidth);
1188 }
1189
1190 static inline
1191 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1192
1193 #endif /* CONFIG_RT_GROUP_SCHED */
1194
1195 static inline
1196 unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
1197 {
1198         struct rt_rq *group_rq = group_rt_rq(rt_se);
1199
1200         if (group_rq)
1201                 return group_rq->rt_nr_running;
1202         else
1203                 return 1;
1204 }
1205
1206 static inline
1207 unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
1208 {
1209         struct rt_rq *group_rq = group_rt_rq(rt_se);
1210         struct task_struct *tsk;
1211
1212         if (group_rq)
1213                 return group_rq->rr_nr_running;
1214
1215         tsk = rt_task_of(rt_se);
1216
1217         return (tsk->policy == SCHED_RR) ? 1 : 0;
1218 }
1219
1220 static inline
1221 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1222 {
1223         int prio = rt_se_prio(rt_se);
1224
1225         WARN_ON(!rt_prio(prio));
1226         rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
1227         rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
1228
1229         inc_rt_prio(rt_rq, prio);
1230         inc_rt_migration(rt_se, rt_rq);
1231         inc_rt_group(rt_se, rt_rq);
1232 }
1233
1234 static inline
1235 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1236 {
1237         WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1238         WARN_ON(!rt_rq->rt_nr_running);
1239         rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
1240         rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
1241
1242         dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1243         dec_rt_migration(rt_se, rt_rq);
1244         dec_rt_group(rt_se, rt_rq);
1245 }
1246
1247 /*
1248  * Change rt_se->run_list location unless SAVE && !MOVE
1249  *
1250  * assumes ENQUEUE/DEQUEUE flags match
1251  */
1252 static inline bool move_entity(unsigned int flags)
1253 {
1254         if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
1255                 return false;
1256
1257         return true;
1258 }
1259
1260 static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
1261 {
1262         list_del_init(&rt_se->run_list);
1263
1264         if (list_empty(array->queue + rt_se_prio(rt_se)))
1265                 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1266
1267         rt_se->on_list = 0;
1268 }
1269
1270 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1271 {
1272         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1273         struct rt_prio_array *array = &rt_rq->active;
1274         struct rt_rq *group_rq = group_rt_rq(rt_se);
1275         struct list_head *queue = array->queue + rt_se_prio(rt_se);
1276
1277         /*
1278          * Don't enqueue the group if its throttled, or when empty.
1279          * The latter is a consequence of the former when a child group
1280          * get throttled and the current group doesn't have any other
1281          * active members.
1282          */
1283         if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
1284                 if (rt_se->on_list)
1285                         __delist_rt_entity(rt_se, array);
1286                 return;
1287         }
1288
1289         if (move_entity(flags)) {
1290                 WARN_ON_ONCE(rt_se->on_list);
1291                 if (flags & ENQUEUE_HEAD)
1292                         list_add(&rt_se->run_list, queue);
1293                 else
1294                         list_add_tail(&rt_se->run_list, queue);
1295
1296                 __set_bit(rt_se_prio(rt_se), array->bitmap);
1297                 rt_se->on_list = 1;
1298         }
1299         rt_se->on_rq = 1;
1300
1301         inc_rt_tasks(rt_se, rt_rq);
1302 }
1303
1304 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1305 {
1306         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1307         struct rt_prio_array *array = &rt_rq->active;
1308
1309         if (move_entity(flags)) {
1310                 WARN_ON_ONCE(!rt_se->on_list);
1311                 __delist_rt_entity(rt_se, array);
1312         }
1313         rt_se->on_rq = 0;
1314
1315         dec_rt_tasks(rt_se, rt_rq);
1316 }
1317
1318 /*
1319  * Because the prio of an upper entry depends on the lower
1320  * entries, we must remove entries top - down.
1321  */
1322 static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
1323 {
1324         struct sched_rt_entity *back = NULL;
1325
1326         for_each_sched_rt_entity(rt_se) {
1327                 rt_se->back = back;
1328                 back = rt_se;
1329         }
1330
1331         dequeue_top_rt_rq(rt_rq_of_se(back));
1332
1333         for (rt_se = back; rt_se; rt_se = rt_se->back) {
1334                 if (on_rt_rq(rt_se))
1335                         __dequeue_rt_entity(rt_se, flags);
1336         }
1337 }
1338
1339 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1340 {
1341         struct rq *rq = rq_of_rt_se(rt_se);
1342
1343         dequeue_rt_stack(rt_se, flags);
1344         for_each_sched_rt_entity(rt_se)
1345                 __enqueue_rt_entity(rt_se, flags);
1346         enqueue_top_rt_rq(&rq->rt);
1347 }
1348
1349 static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1350 {
1351         struct rq *rq = rq_of_rt_se(rt_se);
1352
1353         dequeue_rt_stack(rt_se, flags);
1354
1355         for_each_sched_rt_entity(rt_se) {
1356                 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1357
1358                 if (rt_rq && rt_rq->rt_nr_running)
1359                         __enqueue_rt_entity(rt_se, flags);
1360         }
1361         enqueue_top_rt_rq(&rq->rt);
1362 }
1363
1364 /*
1365  * Adding/removing a task to/from a priority array:
1366  */
1367 static void
1368 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1369 {
1370         struct sched_rt_entity *rt_se = &p->rt;
1371
1372         if (flags & ENQUEUE_WAKEUP)
1373                 rt_se->timeout = 0;
1374
1375         enqueue_rt_entity(rt_se, flags);
1376
1377         if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1378                 enqueue_pushable_task(rq, p);
1379 }
1380
1381 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1382 {
1383         struct sched_rt_entity *rt_se = &p->rt;
1384
1385         update_curr_rt(rq);
1386         dequeue_rt_entity(rt_se, flags);
1387
1388         dequeue_pushable_task(rq, p);
1389 }
1390
1391 /*
1392  * Put task to the head or the end of the run list without the overhead of
1393  * dequeue followed by enqueue.
1394  */
1395 static void
1396 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1397 {
1398         if (on_rt_rq(rt_se)) {
1399                 struct rt_prio_array *array = &rt_rq->active;
1400                 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1401
1402                 if (head)
1403                         list_move(&rt_se->run_list, queue);
1404                 else
1405                         list_move_tail(&rt_se->run_list, queue);
1406         }
1407 }
1408
1409 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1410 {
1411         struct sched_rt_entity *rt_se = &p->rt;
1412         struct rt_rq *rt_rq;
1413
1414         for_each_sched_rt_entity(rt_se) {
1415                 rt_rq = rt_rq_of_se(rt_se);
1416                 requeue_rt_entity(rt_rq, rt_se, head);
1417         }
1418 }
1419
1420 static void yield_task_rt(struct rq *rq)
1421 {
1422         requeue_task_rt(rq, rq->curr, 0);
1423 }
1424
1425 #ifdef CONFIG_SMP
1426 static int find_lowest_rq(struct task_struct *task);
1427
1428 static int
1429 select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
1430 {
1431         struct task_struct *curr;
1432         struct rq *rq;
1433         bool test;
1434
1435         /* For anything but wake ups, just return the task_cpu */
1436         if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1437                 goto out;
1438
1439         rq = cpu_rq(cpu);
1440
1441         rcu_read_lock();
1442         curr = READ_ONCE(rq->curr); /* unlocked access */
1443
1444         /*
1445          * If the current task on @p's runqueue is an RT task, then
1446          * try to see if we can wake this RT task up on another
1447          * runqueue. Otherwise simply start this RT task
1448          * on its current runqueue.
1449          *
1450          * We want to avoid overloading runqueues. If the woken
1451          * task is a higher priority, then it will stay on this CPU
1452          * and the lower prio task should be moved to another CPU.
1453          * Even though this will probably make the lower prio task
1454          * lose its cache, we do not want to bounce a higher task
1455          * around just because it gave up its CPU, perhaps for a
1456          * lock?
1457          *
1458          * For equal prio tasks, we just let the scheduler sort it out.
1459          *
1460          * Otherwise, just let it ride on the affined RQ and the
1461          * post-schedule router will push the preempted task away
1462          *
1463          * This test is optimistic, if we get it wrong the load-balancer
1464          * will have to sort it out.
1465          *
1466          * We take into account the capacity of the CPU to ensure it fits the
1467          * requirement of the task - which is only important on heterogeneous
1468          * systems like big.LITTLE.
1469          */
1470         test = curr &&
1471                unlikely(rt_task(curr)) &&
1472                (curr->nr_cpus_allowed < 2 || curr->prio <= p->prio);
1473
1474         if (test || !rt_task_fits_capacity(p, cpu)) {
1475                 int target = find_lowest_rq(p);
1476
1477                 /*
1478                  * Don't bother moving it if the destination CPU is
1479                  * not running a lower priority task.
1480                  */
1481                 if (target != -1 &&
1482                     p->prio < cpu_rq(target)->rt.highest_prio.curr)
1483                         cpu = target;
1484         }
1485         rcu_read_unlock();
1486
1487 out:
1488         return cpu;
1489 }
1490
1491 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1492 {
1493         /*
1494          * Current can't be migrated, useless to reschedule,
1495          * let's hope p can move out.
1496          */
1497         if (rq->curr->nr_cpus_allowed == 1 ||
1498             !cpupri_find(&rq->rd->cpupri, rq->curr, NULL, NULL))
1499                 return;
1500
1501         /*
1502          * p is migratable, so let's not schedule it and
1503          * see if it is pushed or pulled somewhere else.
1504          */
1505         if (p->nr_cpus_allowed != 1 &&
1506             cpupri_find(&rq->rd->cpupri, p, NULL, NULL))
1507                 return;
1508
1509         /*
1510          * There appear to be other CPUs that can accept
1511          * the current task but none can run 'p', so lets reschedule
1512          * to try and push the current task away:
1513          */
1514         requeue_task_rt(rq, p, 1);
1515         resched_curr(rq);
1516 }
1517
1518 static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1519 {
1520         if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
1521                 /*
1522                  * This is OK, because current is on_cpu, which avoids it being
1523                  * picked for load-balance and preemption/IRQs are still
1524                  * disabled avoiding further scheduler activity on it and we've
1525                  * not yet started the picking loop.
1526                  */
1527                 rq_unpin_lock(rq, rf);
1528                 pull_rt_task(rq);
1529                 rq_repin_lock(rq, rf);
1530         }
1531
1532         return sched_stop_runnable(rq) || sched_dl_runnable(rq) || sched_rt_runnable(rq);
1533 }
1534 #endif /* CONFIG_SMP */
1535
1536 /*
1537  * Preempt the current task with a newly woken task if needed:
1538  */
1539 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1540 {
1541         if (p->prio < rq->curr->prio) {
1542                 resched_curr(rq);
1543                 return;
1544         }
1545
1546 #ifdef CONFIG_SMP
1547         /*
1548          * If:
1549          *
1550          * - the newly woken task is of equal priority to the current task
1551          * - the newly woken task is non-migratable while current is migratable
1552          * - current will be preempted on the next reschedule
1553          *
1554          * we should check to see if current can readily move to a different
1555          * cpu.  If so, we will reschedule to allow the push logic to try
1556          * to move current somewhere else, making room for our non-migratable
1557          * task.
1558          */
1559         if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1560                 check_preempt_equal_prio(rq, p);
1561 #endif
1562 }
1563
1564 static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first)
1565 {
1566         p->se.exec_start = rq_clock_task(rq);
1567
1568         /* The running task is never eligible for pushing */
1569         dequeue_pushable_task(rq, p);
1570
1571         if (!first)
1572                 return;
1573
1574         /*
1575          * If prev task was rt, put_prev_task() has already updated the
1576          * utilization. We only care of the case where we start to schedule a
1577          * rt task
1578          */
1579         if (rq->curr->sched_class != &rt_sched_class)
1580                 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
1581
1582         rt_queue_push_tasks(rq);
1583 }
1584
1585 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1586                                                    struct rt_rq *rt_rq)
1587 {
1588         struct rt_prio_array *array = &rt_rq->active;
1589         struct sched_rt_entity *next = NULL;
1590         struct list_head *queue;
1591         int idx;
1592
1593         idx = sched_find_first_bit(array->bitmap);
1594         BUG_ON(idx >= MAX_RT_PRIO);
1595
1596         queue = array->queue + idx;
1597         next = list_entry(queue->next, struct sched_rt_entity, run_list);
1598
1599         return next;
1600 }
1601
1602 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1603 {
1604         struct sched_rt_entity *rt_se;
1605         struct rt_rq *rt_rq  = &rq->rt;
1606
1607         do {
1608                 rt_se = pick_next_rt_entity(rq, rt_rq);
1609                 BUG_ON(!rt_se);
1610                 rt_rq = group_rt_rq(rt_se);
1611         } while (rt_rq);
1612
1613         return rt_task_of(rt_se);
1614 }
1615
1616 static struct task_struct *pick_next_task_rt(struct rq *rq)
1617 {
1618         struct task_struct *p;
1619
1620         if (!sched_rt_runnable(rq))
1621                 return NULL;
1622
1623         p = _pick_next_task_rt(rq);
1624         set_next_task_rt(rq, p, true);
1625         return p;
1626 }
1627
1628 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1629 {
1630         update_curr_rt(rq);
1631
1632         update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
1633
1634         /*
1635          * The previous task needs to be made eligible for pushing
1636          * if it is still active
1637          */
1638         if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1639                 enqueue_pushable_task(rq, p);
1640 }
1641
1642 #ifdef CONFIG_SMP
1643
1644 /* Only try algorithms three times */
1645 #define RT_MAX_TRIES 3
1646
1647 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1648 {
1649         if (!task_running(rq, p) &&
1650             cpumask_test_cpu(cpu, p->cpus_ptr) &&
1651             rt_task_fits_capacity(p, cpu))
1652                 return 1;
1653
1654         return 0;
1655 }
1656
1657 /*
1658  * Return the highest pushable rq's task, which is suitable to be executed
1659  * on the CPU, NULL otherwise
1660  */
1661 static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1662 {
1663         struct plist_head *head = &rq->rt.pushable_tasks;
1664         struct task_struct *p;
1665
1666         if (!has_pushable_tasks(rq))
1667                 return NULL;
1668
1669         plist_for_each_entry(p, head, pushable_tasks) {
1670                 if (pick_rt_task(rq, p, cpu))
1671                         return p;
1672         }
1673
1674         return NULL;
1675 }
1676
1677 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1678
1679 static int find_lowest_rq(struct task_struct *task)
1680 {
1681         struct sched_domain *sd;
1682         struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
1683         int this_cpu = smp_processor_id();
1684         int cpu      = task_cpu(task);
1685
1686         /* Make sure the mask is initialized first */
1687         if (unlikely(!lowest_mask))
1688                 return -1;
1689
1690         if (task->nr_cpus_allowed == 1)
1691                 return -1; /* No other targets possible */
1692
1693         if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask,
1694                          rt_task_fits_capacity))
1695                 return -1; /* No targets found */
1696
1697         /*
1698          * At this point we have built a mask of CPUs representing the
1699          * lowest priority tasks in the system.  Now we want to elect
1700          * the best one based on our affinity and topology.
1701          *
1702          * We prioritize the last CPU that the task executed on since
1703          * it is most likely cache-hot in that location.
1704          */
1705         if (cpumask_test_cpu(cpu, lowest_mask))
1706                 return cpu;
1707
1708         /*
1709          * Otherwise, we consult the sched_domains span maps to figure
1710          * out which CPU is logically closest to our hot cache data.
1711          */
1712         if (!cpumask_test_cpu(this_cpu, lowest_mask))
1713                 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1714
1715         rcu_read_lock();
1716         for_each_domain(cpu, sd) {
1717                 if (sd->flags & SD_WAKE_AFFINE) {
1718                         int best_cpu;
1719
1720                         /*
1721                          * "this_cpu" is cheaper to preempt than a
1722                          * remote processor.
1723                          */
1724                         if (this_cpu != -1 &&
1725                             cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1726                                 rcu_read_unlock();
1727                                 return this_cpu;
1728                         }
1729
1730                         best_cpu = cpumask_first_and(lowest_mask,
1731                                                      sched_domain_span(sd));
1732                         if (best_cpu < nr_cpu_ids) {
1733                                 rcu_read_unlock();
1734                                 return best_cpu;
1735                         }
1736                 }
1737         }
1738         rcu_read_unlock();
1739
1740         /*
1741          * And finally, if there were no matches within the domains
1742          * just give the caller *something* to work with from the compatible
1743          * locations.
1744          */
1745         if (this_cpu != -1)
1746                 return this_cpu;
1747
1748         cpu = cpumask_any(lowest_mask);
1749         if (cpu < nr_cpu_ids)
1750                 return cpu;
1751
1752         return -1;
1753 }
1754
1755 /* Will lock the rq it finds */
1756 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1757 {
1758         struct rq *lowest_rq = NULL;
1759         int tries;
1760         int cpu;
1761
1762         for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1763                 cpu = find_lowest_rq(task);
1764
1765                 if ((cpu == -1) || (cpu == rq->cpu))
1766                         break;
1767
1768                 lowest_rq = cpu_rq(cpu);
1769
1770                 if (lowest_rq->rt.highest_prio.curr <= task->prio) {
1771                         /*
1772                          * Target rq has tasks of equal or higher priority,
1773                          * retrying does not release any lock and is unlikely
1774                          * to yield a different result.
1775                          */
1776                         lowest_rq = NULL;
1777                         break;
1778                 }
1779
1780                 /* if the prio of this runqueue changed, try again */
1781                 if (double_lock_balance(rq, lowest_rq)) {
1782                         /*
1783                          * We had to unlock the run queue. In
1784                          * the mean time, task could have
1785                          * migrated already or had its affinity changed.
1786                          * Also make sure that it wasn't scheduled on its rq.
1787                          */
1788                         if (unlikely(task_rq(task) != rq ||
1789                                      !cpumask_test_cpu(lowest_rq->cpu, task->cpus_ptr) ||
1790                                      task_running(rq, task) ||
1791                                      !rt_task(task) ||
1792                                      !task_on_rq_queued(task))) {
1793
1794                                 double_unlock_balance(rq, lowest_rq);
1795                                 lowest_rq = NULL;
1796                                 break;
1797                         }
1798                 }
1799
1800                 /* If this rq is still suitable use it. */
1801                 if (lowest_rq->rt.highest_prio.curr > task->prio)
1802                         break;
1803
1804                 /* try again */
1805                 double_unlock_balance(rq, lowest_rq);
1806                 lowest_rq = NULL;
1807         }
1808
1809         return lowest_rq;
1810 }
1811
1812 static struct task_struct *pick_next_pushable_task(struct rq *rq)
1813 {
1814         struct task_struct *p;
1815
1816         if (!has_pushable_tasks(rq))
1817                 return NULL;
1818
1819         p = plist_first_entry(&rq->rt.pushable_tasks,
1820                               struct task_struct, pushable_tasks);
1821
1822         BUG_ON(rq->cpu != task_cpu(p));
1823         BUG_ON(task_current(rq, p));
1824         BUG_ON(p->nr_cpus_allowed <= 1);
1825
1826         BUG_ON(!task_on_rq_queued(p));
1827         BUG_ON(!rt_task(p));
1828
1829         return p;
1830 }
1831
1832 /*
1833  * If the current CPU has more than one RT task, see if the non
1834  * running task can migrate over to a CPU that is running a task
1835  * of lesser priority.
1836  */
1837 static int push_rt_task(struct rq *rq)
1838 {
1839         struct task_struct *next_task;
1840         struct rq *lowest_rq;
1841         int ret = 0;
1842
1843         if (!rq->rt.overloaded)
1844                 return 0;
1845
1846         next_task = pick_next_pushable_task(rq);
1847         if (!next_task)
1848                 return 0;
1849
1850 retry:
1851         if (WARN_ON(next_task == rq->curr))
1852                 return 0;
1853
1854         /*
1855          * It's possible that the next_task slipped in of
1856          * higher priority than current. If that's the case
1857          * just reschedule current.
1858          */
1859         if (unlikely(next_task->prio < rq->curr->prio)) {
1860                 resched_curr(rq);
1861                 return 0;
1862         }
1863
1864         /* We might release rq lock */
1865         get_task_struct(next_task);
1866
1867         /* find_lock_lowest_rq locks the rq if found */
1868         lowest_rq = find_lock_lowest_rq(next_task, rq);
1869         if (!lowest_rq) {
1870                 struct task_struct *task;
1871                 /*
1872                  * find_lock_lowest_rq releases rq->lock
1873                  * so it is possible that next_task has migrated.
1874                  *
1875                  * We need to make sure that the task is still on the same
1876                  * run-queue and is also still the next task eligible for
1877                  * pushing.
1878                  */
1879                 task = pick_next_pushable_task(rq);
1880                 if (task == next_task) {
1881                         /*
1882                          * The task hasn't migrated, and is still the next
1883                          * eligible task, but we failed to find a run-queue
1884                          * to push it to.  Do not retry in this case, since
1885                          * other CPUs will pull from us when ready.
1886                          */
1887                         goto out;
1888                 }
1889
1890                 if (!task)
1891                         /* No more tasks, just exit */
1892                         goto out;
1893
1894                 /*
1895                  * Something has shifted, try again.
1896                  */
1897                 put_task_struct(next_task);
1898                 next_task = task;
1899                 goto retry;
1900         }
1901
1902         deactivate_task(rq, next_task, 0);
1903         set_task_cpu(next_task, lowest_rq->cpu);
1904         activate_task(lowest_rq, next_task, 0);
1905         ret = 1;
1906
1907         resched_curr(lowest_rq);
1908
1909         double_unlock_balance(rq, lowest_rq);
1910
1911 out:
1912         put_task_struct(next_task);
1913
1914         return ret;
1915 }
1916
1917 static void push_rt_tasks(struct rq *rq)
1918 {
1919         /* push_rt_task will return true if it moved an RT */
1920         while (push_rt_task(rq))
1921                 ;
1922 }
1923
1924 #ifdef HAVE_RT_PUSH_IPI
1925
1926 /*
1927  * When a high priority task schedules out from a CPU and a lower priority
1928  * task is scheduled in, a check is made to see if there's any RT tasks
1929  * on other CPUs that are waiting to run because a higher priority RT task
1930  * is currently running on its CPU. In this case, the CPU with multiple RT
1931  * tasks queued on it (overloaded) needs to be notified that a CPU has opened
1932  * up that may be able to run one of its non-running queued RT tasks.
1933  *
1934  * All CPUs with overloaded RT tasks need to be notified as there is currently
1935  * no way to know which of these CPUs have the highest priority task waiting
1936  * to run. Instead of trying to take a spinlock on each of these CPUs,
1937  * which has shown to cause large latency when done on machines with many
1938  * CPUs, sending an IPI to the CPUs to have them push off the overloaded
1939  * RT tasks waiting to run.
1940  *
1941  * Just sending an IPI to each of the CPUs is also an issue, as on large
1942  * count CPU machines, this can cause an IPI storm on a CPU, especially
1943  * if its the only CPU with multiple RT tasks queued, and a large number
1944  * of CPUs scheduling a lower priority task at the same time.
1945  *
1946  * Each root domain has its own irq work function that can iterate over
1947  * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
1948  * tassk must be checked if there's one or many CPUs that are lowering
1949  * their priority, there's a single irq work iterator that will try to
1950  * push off RT tasks that are waiting to run.
1951  *
1952  * When a CPU schedules a lower priority task, it will kick off the
1953  * irq work iterator that will jump to each CPU with overloaded RT tasks.
1954  * As it only takes the first CPU that schedules a lower priority task
1955  * to start the process, the rto_start variable is incremented and if
1956  * the atomic result is one, then that CPU will try to take the rto_lock.
1957  * This prevents high contention on the lock as the process handles all
1958  * CPUs scheduling lower priority tasks.
1959  *
1960  * All CPUs that are scheduling a lower priority task will increment the
1961  * rt_loop_next variable. This will make sure that the irq work iterator
1962  * checks all RT overloaded CPUs whenever a CPU schedules a new lower
1963  * priority task, even if the iterator is in the middle of a scan. Incrementing
1964  * the rt_loop_next will cause the iterator to perform another scan.
1965  *
1966  */
1967 static int rto_next_cpu(struct root_domain *rd)
1968 {
1969         int next;
1970         int cpu;
1971
1972         /*
1973          * When starting the IPI RT pushing, the rto_cpu is set to -1,
1974          * rt_next_cpu() will simply return the first CPU found in
1975          * the rto_mask.
1976          *
1977          * If rto_next_cpu() is called with rto_cpu is a valid CPU, it
1978          * will return the next CPU found in the rto_mask.
1979          *
1980          * If there are no more CPUs left in the rto_mask, then a check is made
1981          * against rto_loop and rto_loop_next. rto_loop is only updated with
1982          * the rto_lock held, but any CPU may increment the rto_loop_next
1983          * without any locking.
1984          */
1985         for (;;) {
1986
1987                 /* When rto_cpu is -1 this acts like cpumask_first() */
1988                 cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
1989
1990                 rd->rto_cpu = cpu;
1991
1992                 if (cpu < nr_cpu_ids)
1993                         return cpu;
1994
1995                 rd->rto_cpu = -1;
1996
1997                 /*
1998                  * ACQUIRE ensures we see the @rto_mask changes
1999                  * made prior to the @next value observed.
2000                  *
2001                  * Matches WMB in rt_set_overload().
2002                  */
2003                 next = atomic_read_acquire(&rd->rto_loop_next);
2004
2005                 if (rd->rto_loop == next)
2006                         break;
2007
2008                 rd->rto_loop = next;
2009         }
2010
2011         return -1;
2012 }
2013
2014 static inline bool rto_start_trylock(atomic_t *v)
2015 {
2016         return !atomic_cmpxchg_acquire(v, 0, 1);
2017 }
2018
2019 static inline void rto_start_unlock(atomic_t *v)
2020 {
2021         atomic_set_release(v, 0);
2022 }
2023
2024 static void tell_cpu_to_push(struct rq *rq)
2025 {
2026         int cpu = -1;
2027
2028         /* Keep the loop going if the IPI is currently active */
2029         atomic_inc(&rq->rd->rto_loop_next);
2030
2031         /* Only one CPU can initiate a loop at a time */
2032         if (!rto_start_trylock(&rq->rd->rto_loop_start))
2033                 return;
2034
2035         raw_spin_lock(&rq->rd->rto_lock);
2036
2037         /*
2038          * The rto_cpu is updated under the lock, if it has a valid CPU
2039          * then the IPI is still running and will continue due to the
2040          * update to loop_next, and nothing needs to be done here.
2041          * Otherwise it is finishing up and an ipi needs to be sent.
2042          */
2043         if (rq->rd->rto_cpu < 0)
2044                 cpu = rto_next_cpu(rq->rd);
2045
2046         raw_spin_unlock(&rq->rd->rto_lock);
2047
2048         rto_start_unlock(&rq->rd->rto_loop_start);
2049
2050         if (cpu >= 0) {
2051                 /* Make sure the rd does not get freed while pushing */
2052                 sched_get_rd(rq->rd);
2053                 irq_work_queue_on(&rq->rd->rto_push_work, cpu);
2054         }
2055 }
2056
2057 /* Called from hardirq context */
2058 void rto_push_irq_work_func(struct irq_work *work)
2059 {
2060         struct root_domain *rd =
2061                 container_of(work, struct root_domain, rto_push_work);
2062         struct rq *rq;
2063         int cpu;
2064
2065         rq = this_rq();
2066
2067         /*
2068          * We do not need to grab the lock to check for has_pushable_tasks.
2069          * When it gets updated, a check is made if a push is possible.
2070          */
2071         if (has_pushable_tasks(rq)) {
2072                 raw_spin_lock(&rq->lock);
2073                 push_rt_tasks(rq);
2074                 raw_spin_unlock(&rq->lock);
2075         }
2076
2077         raw_spin_lock(&rd->rto_lock);
2078
2079         /* Pass the IPI to the next rt overloaded queue */
2080         cpu = rto_next_cpu(rd);
2081
2082         raw_spin_unlock(&rd->rto_lock);
2083
2084         if (cpu < 0) {
2085                 sched_put_rd(rd);
2086                 return;
2087         }
2088
2089         /* Try the next RT overloaded CPU */
2090         irq_work_queue_on(&rd->rto_push_work, cpu);
2091 }
2092 #endif /* HAVE_RT_PUSH_IPI */
2093
2094 static void pull_rt_task(struct rq *this_rq)
2095 {
2096         int this_cpu = this_rq->cpu, cpu;
2097         bool resched = false;
2098         struct task_struct *p;
2099         struct rq *src_rq;
2100         int rt_overload_count = rt_overloaded(this_rq);
2101
2102         if (likely(!rt_overload_count))
2103                 return;
2104
2105         /*
2106          * Match the barrier from rt_set_overloaded; this guarantees that if we
2107          * see overloaded we must also see the rto_mask bit.
2108          */
2109         smp_rmb();
2110
2111         /* If we are the only overloaded CPU do nothing */
2112         if (rt_overload_count == 1 &&
2113             cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
2114                 return;
2115
2116 #ifdef HAVE_RT_PUSH_IPI
2117         if (sched_feat(RT_PUSH_IPI)) {
2118                 tell_cpu_to_push(this_rq);
2119                 return;
2120         }
2121 #endif
2122
2123         for_each_cpu(cpu, this_rq->rd->rto_mask) {
2124                 if (this_cpu == cpu)
2125                         continue;
2126
2127                 src_rq = cpu_rq(cpu);
2128
2129                 /*
2130                  * Don't bother taking the src_rq->lock if the next highest
2131                  * task is known to be lower-priority than our current task.
2132                  * This may look racy, but if this value is about to go
2133                  * logically higher, the src_rq will push this task away.
2134                  * And if its going logically lower, we do not care
2135                  */
2136                 if (src_rq->rt.highest_prio.next >=
2137                     this_rq->rt.highest_prio.curr)
2138                         continue;
2139
2140                 /*
2141                  * We can potentially drop this_rq's lock in
2142                  * double_lock_balance, and another CPU could
2143                  * alter this_rq
2144                  */
2145                 double_lock_balance(this_rq, src_rq);
2146
2147                 /*
2148                  * We can pull only a task, which is pushable
2149                  * on its rq, and no others.
2150                  */
2151                 p = pick_highest_pushable_task(src_rq, this_cpu);
2152
2153                 /*
2154                  * Do we have an RT task that preempts
2155                  * the to-be-scheduled task?
2156                  */
2157                 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
2158                         WARN_ON(p == src_rq->curr);
2159                         WARN_ON(!task_on_rq_queued(p));
2160
2161                         /*
2162                          * There's a chance that p is higher in priority
2163                          * than what's currently running on its CPU.
2164                          * This is just that p is wakeing up and hasn't
2165                          * had a chance to schedule. We only pull
2166                          * p if it is lower in priority than the
2167                          * current task on the run queue
2168                          */
2169                         if (p->prio < src_rq->curr->prio)
2170                                 goto skip;
2171
2172                         resched = true;
2173
2174                         deactivate_task(src_rq, p, 0);
2175                         set_task_cpu(p, this_cpu);
2176                         activate_task(this_rq, p, 0);
2177                         /*
2178                          * We continue with the search, just in
2179                          * case there's an even higher prio task
2180                          * in another runqueue. (low likelihood
2181                          * but possible)
2182                          */
2183                 }
2184 skip:
2185                 double_unlock_balance(this_rq, src_rq);
2186         }
2187
2188         if (resched)
2189                 resched_curr(this_rq);
2190 }
2191
2192 /*
2193  * If we are not running and we are not going to reschedule soon, we should
2194  * try to push tasks away now
2195  */
2196 static void task_woken_rt(struct rq *rq, struct task_struct *p)
2197 {
2198         bool need_to_push = !task_running(rq, p) &&
2199                             !test_tsk_need_resched(rq->curr) &&
2200                             p->nr_cpus_allowed > 1 &&
2201                             (dl_task(rq->curr) || rt_task(rq->curr)) &&
2202                             (rq->curr->nr_cpus_allowed < 2 ||
2203                              rq->curr->prio <= p->prio);
2204
2205         if (need_to_push || !rt_task_fits_capacity(p, cpu_of(rq)))
2206                 push_rt_tasks(rq);
2207 }
2208
2209 /* Assumes rq->lock is held */
2210 static void rq_online_rt(struct rq *rq)
2211 {
2212         if (rq->rt.overloaded)
2213                 rt_set_overload(rq);
2214
2215         __enable_runtime(rq);
2216
2217         cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2218 }
2219
2220 /* Assumes rq->lock is held */
2221 static void rq_offline_rt(struct rq *rq)
2222 {
2223         if (rq->rt.overloaded)
2224                 rt_clear_overload(rq);
2225
2226         __disable_runtime(rq);
2227
2228         cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
2229 }
2230
2231 /*
2232  * When switch from the rt queue, we bring ourselves to a position
2233  * that we might want to pull RT tasks from other runqueues.
2234  */
2235 static void switched_from_rt(struct rq *rq, struct task_struct *p)
2236 {
2237         /*
2238          * If there are other RT tasks then we will reschedule
2239          * and the scheduling of the other RT tasks will handle
2240          * the balancing. But if we are the last RT task
2241          * we may need to handle the pulling of RT tasks
2242          * now.
2243          */
2244         if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
2245                 return;
2246
2247         rt_queue_pull_task(rq);
2248 }
2249
2250 void __init init_sched_rt_class(void)
2251 {
2252         unsigned int i;
2253
2254         for_each_possible_cpu(i) {
2255                 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
2256                                         GFP_KERNEL, cpu_to_node(i));
2257         }
2258 }
2259 #endif /* CONFIG_SMP */
2260
2261 /*
2262  * When switching a task to RT, we may overload the runqueue
2263  * with RT tasks. In this case we try to push them off to
2264  * other runqueues.
2265  */
2266 static void switched_to_rt(struct rq *rq, struct task_struct *p)
2267 {
2268         /*
2269          * If we are already running, then there's nothing
2270          * that needs to be done. But if we are not running
2271          * we may need to preempt the current running task.
2272          * If that current running task is also an RT task
2273          * then see if we can move to another run queue.
2274          */
2275         if (task_on_rq_queued(p) && rq->curr != p) {
2276 #ifdef CONFIG_SMP
2277                 bool need_to_push = rq->rt.overloaded ||
2278                                     !rt_task_fits_capacity(p, cpu_of(rq));
2279
2280                 if (p->nr_cpus_allowed > 1 && need_to_push)
2281                         rt_queue_push_tasks(rq);
2282 #endif /* CONFIG_SMP */
2283                 if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
2284                         resched_curr(rq);
2285         }
2286 }
2287
2288 /*
2289  * Priority of the task has changed. This may cause
2290  * us to initiate a push or pull.
2291  */
2292 static void
2293 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2294 {
2295         if (!task_on_rq_queued(p))
2296                 return;
2297
2298         if (rq->curr == p) {
2299 #ifdef CONFIG_SMP
2300                 /*
2301                  * If our priority decreases while running, we
2302                  * may need to pull tasks to this runqueue.
2303                  */
2304                 if (oldprio < p->prio)
2305                         rt_queue_pull_task(rq);
2306
2307                 /*
2308                  * If there's a higher priority task waiting to run
2309                  * then reschedule.
2310                  */
2311                 if (p->prio > rq->rt.highest_prio.curr)
2312                         resched_curr(rq);
2313 #else
2314                 /* For UP simply resched on drop of prio */
2315                 if (oldprio < p->prio)
2316                         resched_curr(rq);
2317 #endif /* CONFIG_SMP */
2318         } else {
2319                 /*
2320                  * This task is not running, but if it is
2321                  * greater than the current running task
2322                  * then reschedule.
2323                  */
2324                 if (p->prio < rq->curr->prio)
2325                         resched_curr(rq);
2326         }
2327 }
2328
2329 #ifdef CONFIG_POSIX_TIMERS
2330 static void watchdog(struct rq *rq, struct task_struct *p)
2331 {
2332         unsigned long soft, hard;
2333
2334         /* max may change after cur was read, this will be fixed next tick */
2335         soft = task_rlimit(p, RLIMIT_RTTIME);
2336         hard = task_rlimit_max(p, RLIMIT_RTTIME);
2337
2338         if (soft != RLIM_INFINITY) {
2339                 unsigned long next;
2340
2341                 if (p->rt.watchdog_stamp != jiffies) {
2342                         p->rt.timeout++;
2343                         p->rt.watchdog_stamp = jiffies;
2344                 }
2345
2346                 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
2347                 if (p->rt.timeout > next) {
2348                         posix_cputimers_rt_watchdog(&p->posix_cputimers,
2349                                                     p->se.sum_exec_runtime);
2350                 }
2351         }
2352 }
2353 #else
2354 static inline void watchdog(struct rq *rq, struct task_struct *p) { }
2355 #endif
2356
2357 /*
2358  * scheduler tick hitting a task of our scheduling class.
2359  *
2360  * NOTE: This function can be called remotely by the tick offload that
2361  * goes along full dynticks. Therefore no local assumption can be made
2362  * and everything must be accessed through the @rq and @curr passed in
2363  * parameters.
2364  */
2365 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2366 {
2367         struct sched_rt_entity *rt_se = &p->rt;
2368
2369         update_curr_rt(rq);
2370         update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
2371
2372         watchdog(rq, p);
2373
2374         /*
2375          * RR tasks need a special form of timeslice management.
2376          * FIFO tasks have no timeslices.
2377          */
2378         if (p->policy != SCHED_RR)
2379                 return;
2380
2381         if (--p->rt.time_slice)
2382                 return;
2383
2384         p->rt.time_slice = sched_rr_timeslice;
2385
2386         /*
2387          * Requeue to the end of queue if we (and all of our ancestors) are not
2388          * the only element on the queue
2389          */
2390         for_each_sched_rt_entity(rt_se) {
2391                 if (rt_se->run_list.prev != rt_se->run_list.next) {
2392                         requeue_task_rt(rq, p, 0);
2393                         resched_curr(rq);
2394                         return;
2395                 }
2396         }
2397 }
2398
2399 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2400 {
2401         /*
2402          * Time slice is 0 for SCHED_FIFO tasks
2403          */
2404         if (task->policy == SCHED_RR)
2405                 return sched_rr_timeslice;
2406         else
2407                 return 0;
2408 }
2409
2410 const struct sched_class rt_sched_class = {
2411         .next                   = &fair_sched_class,
2412         .enqueue_task           = enqueue_task_rt,
2413         .dequeue_task           = dequeue_task_rt,
2414         .yield_task             = yield_task_rt,
2415
2416         .check_preempt_curr     = check_preempt_curr_rt,
2417
2418         .pick_next_task         = pick_next_task_rt,
2419         .put_prev_task          = put_prev_task_rt,
2420         .set_next_task          = set_next_task_rt,
2421
2422 #ifdef CONFIG_SMP
2423         .balance                = balance_rt,
2424         .select_task_rq         = select_task_rq_rt,
2425         .set_cpus_allowed       = set_cpus_allowed_common,
2426         .rq_online              = rq_online_rt,
2427         .rq_offline             = rq_offline_rt,
2428         .task_woken             = task_woken_rt,
2429         .switched_from          = switched_from_rt,
2430 #endif
2431
2432         .task_tick              = task_tick_rt,
2433
2434         .get_rr_interval        = get_rr_interval_rt,
2435
2436         .prio_changed           = prio_changed_rt,
2437         .switched_to            = switched_to_rt,
2438
2439         .update_curr            = update_curr_rt,
2440
2441 #ifdef CONFIG_UCLAMP_TASK
2442         .uclamp_enabled         = 1,
2443 #endif
2444 };
2445
2446 #ifdef CONFIG_RT_GROUP_SCHED
2447 /*
2448  * Ensure that the real time constraints are schedulable.
2449  */
2450 static DEFINE_MUTEX(rt_constraints_mutex);
2451
2452 /* Must be called with tasklist_lock held */
2453 static inline int tg_has_rt_tasks(struct task_group *tg)
2454 {
2455         struct task_struct *g, *p;
2456
2457         /*
2458          * Autogroups do not have RT tasks; see autogroup_create().
2459          */
2460         if (task_group_is_autogroup(tg))
2461                 return 0;
2462
2463         for_each_process_thread(g, p) {
2464                 if (rt_task(p) && task_group(p) == tg)
2465                         return 1;
2466         }
2467
2468         return 0;
2469 }
2470
2471 struct rt_schedulable_data {
2472         struct task_group *tg;
2473         u64 rt_period;
2474         u64 rt_runtime;
2475 };
2476
2477 static int tg_rt_schedulable(struct task_group *tg, void *data)
2478 {
2479         struct rt_schedulable_data *d = data;
2480         struct task_group *child;
2481         unsigned long total, sum = 0;
2482         u64 period, runtime;
2483
2484         period = ktime_to_ns(tg->rt_bandwidth.rt_period);
2485         runtime = tg->rt_bandwidth.rt_runtime;
2486
2487         if (tg == d->tg) {
2488                 period = d->rt_period;
2489                 runtime = d->rt_runtime;
2490         }
2491
2492         /*
2493          * Cannot have more runtime than the period.
2494          */
2495         if (runtime > period && runtime != RUNTIME_INF)
2496                 return -EINVAL;
2497
2498         /*
2499          * Ensure we don't starve existing RT tasks.
2500          */
2501         if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
2502                 return -EBUSY;
2503
2504         total = to_ratio(period, runtime);
2505
2506         /*
2507          * Nobody can have more than the global setting allows.
2508          */
2509         if (total > to_ratio(global_rt_period(), global_rt_runtime()))
2510                 return -EINVAL;
2511
2512         /*
2513          * The sum of our children's runtime should not exceed our own.
2514          */
2515         list_for_each_entry_rcu(child, &tg->children, siblings) {
2516                 period = ktime_to_ns(child->rt_bandwidth.rt_period);
2517                 runtime = child->rt_bandwidth.rt_runtime;
2518
2519                 if (child == d->tg) {
2520                         period = d->rt_period;
2521                         runtime = d->rt_runtime;
2522                 }
2523
2524                 sum += to_ratio(period, runtime);
2525         }
2526
2527         if (sum > total)
2528                 return -EINVAL;
2529
2530         return 0;
2531 }
2532
2533 static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
2534 {
2535         int ret;
2536
2537         struct rt_schedulable_data data = {
2538                 .tg = tg,
2539                 .rt_period = period,
2540                 .rt_runtime = runtime,
2541         };
2542
2543         rcu_read_lock();
2544         ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
2545         rcu_read_unlock();
2546
2547         return ret;
2548 }
2549
2550 static int tg_set_rt_bandwidth(struct task_group *tg,
2551                 u64 rt_period, u64 rt_runtime)
2552 {
2553         int i, err = 0;
2554
2555         /*
2556          * Disallowing the root group RT runtime is BAD, it would disallow the
2557          * kernel creating (and or operating) RT threads.
2558          */
2559         if (tg == &root_task_group && rt_runtime == 0)
2560                 return -EINVAL;
2561
2562         /* No period doesn't make any sense. */
2563         if (rt_period == 0)
2564                 return -EINVAL;
2565
2566         mutex_lock(&rt_constraints_mutex);
2567         read_lock(&tasklist_lock);
2568         err = __rt_schedulable(tg, rt_period, rt_runtime);
2569         if (err)
2570                 goto unlock;
2571
2572         raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
2573         tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
2574         tg->rt_bandwidth.rt_runtime = rt_runtime;
2575
2576         for_each_possible_cpu(i) {
2577                 struct rt_rq *rt_rq = tg->rt_rq[i];
2578
2579                 raw_spin_lock(&rt_rq->rt_runtime_lock);
2580                 rt_rq->rt_runtime = rt_runtime;
2581                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
2582         }
2583         raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
2584 unlock:
2585         read_unlock(&tasklist_lock);
2586         mutex_unlock(&rt_constraints_mutex);
2587
2588         return err;
2589 }
2590
2591 int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
2592 {
2593         u64 rt_runtime, rt_period;
2594
2595         rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
2596         rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
2597         if (rt_runtime_us < 0)
2598                 rt_runtime = RUNTIME_INF;
2599         else if ((u64)rt_runtime_us > U64_MAX / NSEC_PER_USEC)
2600                 return -EINVAL;
2601
2602         return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
2603 }
2604
2605 long sched_group_rt_runtime(struct task_group *tg)
2606 {
2607         u64 rt_runtime_us;
2608
2609         if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
2610                 return -1;
2611
2612         rt_runtime_us = tg->rt_bandwidth.rt_runtime;
2613         do_div(rt_runtime_us, NSEC_PER_USEC);
2614         return rt_runtime_us;
2615 }
2616
2617 int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
2618 {
2619         u64 rt_runtime, rt_period;
2620
2621         if (rt_period_us > U64_MAX / NSEC_PER_USEC)
2622                 return -EINVAL;
2623
2624         rt_period = rt_period_us * NSEC_PER_USEC;
2625         rt_runtime = tg->rt_bandwidth.rt_runtime;
2626
2627         return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
2628 }
2629
2630 long sched_group_rt_period(struct task_group *tg)
2631 {
2632         u64 rt_period_us;
2633
2634         rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
2635         do_div(rt_period_us, NSEC_PER_USEC);
2636         return rt_period_us;
2637 }
2638
2639 static int sched_rt_global_constraints(void)
2640 {
2641         int ret = 0;
2642
2643         mutex_lock(&rt_constraints_mutex);
2644         read_lock(&tasklist_lock);
2645         ret = __rt_schedulable(NULL, 0, 0);
2646         read_unlock(&tasklist_lock);
2647         mutex_unlock(&rt_constraints_mutex);
2648
2649         return ret;
2650 }
2651
2652 int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
2653 {
2654         /* Don't accept realtime tasks when there is no way for them to run */
2655         if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
2656                 return 0;
2657
2658         return 1;
2659 }
2660
2661 #else /* !CONFIG_RT_GROUP_SCHED */
2662 static int sched_rt_global_constraints(void)
2663 {
2664         unsigned long flags;
2665         int i;
2666
2667         raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
2668         for_each_possible_cpu(i) {
2669                 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
2670
2671                 raw_spin_lock(&rt_rq->rt_runtime_lock);
2672                 rt_rq->rt_runtime = global_rt_runtime();
2673                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
2674         }
2675         raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
2676
2677         return 0;
2678 }
2679 #endif /* CONFIG_RT_GROUP_SCHED */
2680
2681 static int sched_rt_global_validate(void)
2682 {
2683         if (sysctl_sched_rt_period <= 0)
2684                 return -EINVAL;
2685
2686         if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
2687                 (sysctl_sched_rt_runtime > sysctl_sched_rt_period))
2688                 return -EINVAL;
2689
2690         return 0;
2691 }
2692
2693 static void sched_rt_do_global(void)
2694 {
2695         def_rt_bandwidth.rt_runtime = global_rt_runtime();
2696         def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
2697 }
2698
2699 int sched_rt_handler(struct ctl_table *table, int write,
2700                 void __user *buffer, size_t *lenp,
2701                 loff_t *ppos)
2702 {
2703         int old_period, old_runtime;
2704         static DEFINE_MUTEX(mutex);
2705         int ret;
2706
2707         mutex_lock(&mutex);
2708         old_period = sysctl_sched_rt_period;
2709         old_runtime = sysctl_sched_rt_runtime;
2710
2711         ret = proc_dointvec(table, write, buffer, lenp, ppos);
2712
2713         if (!ret && write) {
2714                 ret = sched_rt_global_validate();
2715                 if (ret)
2716                         goto undo;
2717
2718                 ret = sched_dl_global_validate();
2719                 if (ret)
2720                         goto undo;
2721
2722                 ret = sched_rt_global_constraints();
2723                 if (ret)
2724                         goto undo;
2725
2726                 sched_rt_do_global();
2727                 sched_dl_do_global();
2728         }
2729         if (0) {
2730 undo:
2731                 sysctl_sched_rt_period = old_period;
2732                 sysctl_sched_rt_runtime = old_runtime;
2733         }
2734         mutex_unlock(&mutex);
2735
2736         return ret;
2737 }
2738
2739 int sched_rr_handler(struct ctl_table *table, int write,
2740                 void __user *buffer, size_t *lenp,
2741                 loff_t *ppos)
2742 {
2743         int ret;
2744         static DEFINE_MUTEX(mutex);
2745
2746         mutex_lock(&mutex);
2747         ret = proc_dointvec(table, write, buffer, lenp, ppos);
2748         /*
2749          * Make sure that internally we keep jiffies.
2750          * Also, writing zero resets the timeslice to default:
2751          */
2752         if (!ret && write) {
2753                 sched_rr_timeslice =
2754                         sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
2755                         msecs_to_jiffies(sysctl_sched_rr_timeslice);
2756         }
2757         mutex_unlock(&mutex);
2758
2759         return ret;
2760 }
2761
2762 #ifdef CONFIG_SCHED_DEBUG
2763 void print_rt_stats(struct seq_file *m, int cpu)
2764 {
2765         rt_rq_iter_t iter;
2766         struct rt_rq *rt_rq;
2767
2768         rcu_read_lock();
2769         for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2770                 print_rt_rq(m, cpu, rt_rq);
2771         rcu_read_unlock();
2772 }
2773 #endif /* CONFIG_SCHED_DEBUG */