Merge branch 'sched/urgent' into sched/core
[linux-2.6-microblaze.git] / kernel / sched / rt.c
1 /*
2  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3  * policies)
4  */
5
6 #include "sched.h"
7
8 #include <linux/slab.h>
9
10 int sched_rr_timeslice = RR_TIMESLICE;
11
12 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
13
14 struct rt_bandwidth def_rt_bandwidth;
15
16 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
17 {
18         struct rt_bandwidth *rt_b =
19                 container_of(timer, struct rt_bandwidth, rt_period_timer);
20         ktime_t now;
21         int overrun;
22         int idle = 0;
23
24         for (;;) {
25                 now = hrtimer_cb_get_time(timer);
26                 overrun = hrtimer_forward(timer, now, rt_b->rt_period);
27
28                 if (!overrun)
29                         break;
30
31                 idle = do_sched_rt_period_timer(rt_b, overrun);
32         }
33
34         return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
35 }
36
37 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
38 {
39         rt_b->rt_period = ns_to_ktime(period);
40         rt_b->rt_runtime = runtime;
41
42         raw_spin_lock_init(&rt_b->rt_runtime_lock);
43
44         hrtimer_init(&rt_b->rt_period_timer,
45                         CLOCK_MONOTONIC, HRTIMER_MODE_REL);
46         rt_b->rt_period_timer.function = sched_rt_period_timer;
47 }
48
49 static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
50 {
51         if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
52                 return;
53
54         if (hrtimer_active(&rt_b->rt_period_timer))
55                 return;
56
57         raw_spin_lock(&rt_b->rt_runtime_lock);
58         start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period);
59         raw_spin_unlock(&rt_b->rt_runtime_lock);
60 }
61
62 void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
63 {
64         struct rt_prio_array *array;
65         int i;
66
67         array = &rt_rq->active;
68         for (i = 0; i < MAX_RT_PRIO; i++) {
69                 INIT_LIST_HEAD(array->queue + i);
70                 __clear_bit(i, array->bitmap);
71         }
72         /* delimiter for bitsearch: */
73         __set_bit(MAX_RT_PRIO, array->bitmap);
74
75 #if defined CONFIG_SMP
76         rt_rq->highest_prio.curr = MAX_RT_PRIO;
77         rt_rq->highest_prio.next = MAX_RT_PRIO;
78         rt_rq->rt_nr_migratory = 0;
79         rt_rq->overloaded = 0;
80         plist_head_init(&rt_rq->pushable_tasks);
81 #endif
82
83         rt_rq->rt_time = 0;
84         rt_rq->rt_throttled = 0;
85         rt_rq->rt_runtime = 0;
86         raw_spin_lock_init(&rt_rq->rt_runtime_lock);
87 }
88
89 #ifdef CONFIG_RT_GROUP_SCHED
90 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
91 {
92         hrtimer_cancel(&rt_b->rt_period_timer);
93 }
94
95 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
96
97 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
98 {
99 #ifdef CONFIG_SCHED_DEBUG
100         WARN_ON_ONCE(!rt_entity_is_task(rt_se));
101 #endif
102         return container_of(rt_se, struct task_struct, rt);
103 }
104
105 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
106 {
107         return rt_rq->rq;
108 }
109
110 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
111 {
112         return rt_se->rt_rq;
113 }
114
115 void free_rt_sched_group(struct task_group *tg)
116 {
117         int i;
118
119         if (tg->rt_se)
120                 destroy_rt_bandwidth(&tg->rt_bandwidth);
121
122         for_each_possible_cpu(i) {
123                 if (tg->rt_rq)
124                         kfree(tg->rt_rq[i]);
125                 if (tg->rt_se)
126                         kfree(tg->rt_se[i]);
127         }
128
129         kfree(tg->rt_rq);
130         kfree(tg->rt_se);
131 }
132
133 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
134                 struct sched_rt_entity *rt_se, int cpu,
135                 struct sched_rt_entity *parent)
136 {
137         struct rq *rq = cpu_rq(cpu);
138
139         rt_rq->highest_prio.curr = MAX_RT_PRIO;
140         rt_rq->rt_nr_boosted = 0;
141         rt_rq->rq = rq;
142         rt_rq->tg = tg;
143
144         tg->rt_rq[cpu] = rt_rq;
145         tg->rt_se[cpu] = rt_se;
146
147         if (!rt_se)
148                 return;
149
150         if (!parent)
151                 rt_se->rt_rq = &rq->rt;
152         else
153                 rt_se->rt_rq = parent->my_q;
154
155         rt_se->my_q = rt_rq;
156         rt_se->parent = parent;
157         INIT_LIST_HEAD(&rt_se->run_list);
158 }
159
160 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
161 {
162         struct rt_rq *rt_rq;
163         struct sched_rt_entity *rt_se;
164         int i;
165
166         tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
167         if (!tg->rt_rq)
168                 goto err;
169         tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
170         if (!tg->rt_se)
171                 goto err;
172
173         init_rt_bandwidth(&tg->rt_bandwidth,
174                         ktime_to_ns(def_rt_bandwidth.rt_period), 0);
175
176         for_each_possible_cpu(i) {
177                 rt_rq = kzalloc_node(sizeof(struct rt_rq),
178                                      GFP_KERNEL, cpu_to_node(i));
179                 if (!rt_rq)
180                         goto err;
181
182                 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
183                                      GFP_KERNEL, cpu_to_node(i));
184                 if (!rt_se)
185                         goto err_free_rq;
186
187                 init_rt_rq(rt_rq, cpu_rq(i));
188                 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
189                 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
190         }
191
192         return 1;
193
194 err_free_rq:
195         kfree(rt_rq);
196 err:
197         return 0;
198 }
199
200 #else /* CONFIG_RT_GROUP_SCHED */
201
202 #define rt_entity_is_task(rt_se) (1)
203
204 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
205 {
206         return container_of(rt_se, struct task_struct, rt);
207 }
208
209 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
210 {
211         return container_of(rt_rq, struct rq, rt);
212 }
213
214 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
215 {
216         struct task_struct *p = rt_task_of(rt_se);
217         struct rq *rq = task_rq(p);
218
219         return &rq->rt;
220 }
221
222 void free_rt_sched_group(struct task_group *tg) { }
223
224 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
225 {
226         return 1;
227 }
228 #endif /* CONFIG_RT_GROUP_SCHED */
229
230 #ifdef CONFIG_SMP
231
232 static int pull_rt_task(struct rq *this_rq);
233
234 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
235 {
236         /* Try to pull RT tasks here if we lower this rq's prio */
237         return rq->rt.highest_prio.curr > prev->prio;
238 }
239
240 static inline int rt_overloaded(struct rq *rq)
241 {
242         return atomic_read(&rq->rd->rto_count);
243 }
244
245 static inline void rt_set_overload(struct rq *rq)
246 {
247         if (!rq->online)
248                 return;
249
250         cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
251         /*
252          * Make sure the mask is visible before we set
253          * the overload count. That is checked to determine
254          * if we should look at the mask. It would be a shame
255          * if we looked at the mask, but the mask was not
256          * updated yet.
257          *
258          * Matched by the barrier in pull_rt_task().
259          */
260         smp_wmb();
261         atomic_inc(&rq->rd->rto_count);
262 }
263
264 static inline void rt_clear_overload(struct rq *rq)
265 {
266         if (!rq->online)
267                 return;
268
269         /* the order here really doesn't matter */
270         atomic_dec(&rq->rd->rto_count);
271         cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
272 }
273
274 static void update_rt_migration(struct rt_rq *rt_rq)
275 {
276         if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
277                 if (!rt_rq->overloaded) {
278                         rt_set_overload(rq_of_rt_rq(rt_rq));
279                         rt_rq->overloaded = 1;
280                 }
281         } else if (rt_rq->overloaded) {
282                 rt_clear_overload(rq_of_rt_rq(rt_rq));
283                 rt_rq->overloaded = 0;
284         }
285 }
286
287 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
288 {
289         struct task_struct *p;
290
291         if (!rt_entity_is_task(rt_se))
292                 return;
293
294         p = rt_task_of(rt_se);
295         rt_rq = &rq_of_rt_rq(rt_rq)->rt;
296
297         rt_rq->rt_nr_total++;
298         if (p->nr_cpus_allowed > 1)
299                 rt_rq->rt_nr_migratory++;
300
301         update_rt_migration(rt_rq);
302 }
303
304 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
305 {
306         struct task_struct *p;
307
308         if (!rt_entity_is_task(rt_se))
309                 return;
310
311         p = rt_task_of(rt_se);
312         rt_rq = &rq_of_rt_rq(rt_rq)->rt;
313
314         rt_rq->rt_nr_total--;
315         if (p->nr_cpus_allowed > 1)
316                 rt_rq->rt_nr_migratory--;
317
318         update_rt_migration(rt_rq);
319 }
320
321 static inline int has_pushable_tasks(struct rq *rq)
322 {
323         return !plist_head_empty(&rq->rt.pushable_tasks);
324 }
325
326 static inline void set_post_schedule(struct rq *rq)
327 {
328         /*
329          * We detect this state here so that we can avoid taking the RQ
330          * lock again later if there is no need to push
331          */
332         rq->post_schedule = has_pushable_tasks(rq);
333 }
334
335 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
336 {
337         plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
338         plist_node_init(&p->pushable_tasks, p->prio);
339         plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
340
341         /* Update the highest prio pushable task */
342         if (p->prio < rq->rt.highest_prio.next)
343                 rq->rt.highest_prio.next = p->prio;
344 }
345
346 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
347 {
348         plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
349
350         /* Update the new highest prio pushable task */
351         if (has_pushable_tasks(rq)) {
352                 p = plist_first_entry(&rq->rt.pushable_tasks,
353                                       struct task_struct, pushable_tasks);
354                 rq->rt.highest_prio.next = p->prio;
355         } else
356                 rq->rt.highest_prio.next = MAX_RT_PRIO;
357 }
358
359 #else
360
361 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
362 {
363 }
364
365 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
366 {
367 }
368
369 static inline
370 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
371 {
372 }
373
374 static inline
375 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
376 {
377 }
378
379 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
380 {
381         return false;
382 }
383
384 static inline int pull_rt_task(struct rq *this_rq)
385 {
386         return 0;
387 }
388
389 static inline void set_post_schedule(struct rq *rq)
390 {
391 }
392 #endif /* CONFIG_SMP */
393
394 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
395 {
396         return !list_empty(&rt_se->run_list);
397 }
398
399 #ifdef CONFIG_RT_GROUP_SCHED
400
401 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
402 {
403         if (!rt_rq->tg)
404                 return RUNTIME_INF;
405
406         return rt_rq->rt_runtime;
407 }
408
409 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
410 {
411         return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
412 }
413
414 typedef struct task_group *rt_rq_iter_t;
415
416 static inline struct task_group *next_task_group(struct task_group *tg)
417 {
418         do {
419                 tg = list_entry_rcu(tg->list.next,
420                         typeof(struct task_group), list);
421         } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
422
423         if (&tg->list == &task_groups)
424                 tg = NULL;
425
426         return tg;
427 }
428
429 #define for_each_rt_rq(rt_rq, iter, rq)                                 \
430         for (iter = container_of(&task_groups, typeof(*iter), list);    \
431                 (iter = next_task_group(iter)) &&                       \
432                 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
433
434 #define for_each_sched_rt_entity(rt_se) \
435         for (; rt_se; rt_se = rt_se->parent)
436
437 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
438 {
439         return rt_se->my_q;
440 }
441
442 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
443 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
444
445 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
446 {
447         struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
448         struct sched_rt_entity *rt_se;
449
450         int cpu = cpu_of(rq_of_rt_rq(rt_rq));
451
452         rt_se = rt_rq->tg->rt_se[cpu];
453
454         if (rt_rq->rt_nr_running) {
455                 if (rt_se && !on_rt_rq(rt_se))
456                         enqueue_rt_entity(rt_se, false);
457                 if (rt_rq->highest_prio.curr < curr->prio)
458                         resched_task(curr);
459         }
460 }
461
462 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
463 {
464         struct sched_rt_entity *rt_se;
465         int cpu = cpu_of(rq_of_rt_rq(rt_rq));
466
467         rt_se = rt_rq->tg->rt_se[cpu];
468
469         if (rt_se && on_rt_rq(rt_se))
470                 dequeue_rt_entity(rt_se);
471 }
472
473 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
474 {
475         return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
476 }
477
478 static int rt_se_boosted(struct sched_rt_entity *rt_se)
479 {
480         struct rt_rq *rt_rq = group_rt_rq(rt_se);
481         struct task_struct *p;
482
483         if (rt_rq)
484                 return !!rt_rq->rt_nr_boosted;
485
486         p = rt_task_of(rt_se);
487         return p->prio != p->normal_prio;
488 }
489
490 #ifdef CONFIG_SMP
491 static inline const struct cpumask *sched_rt_period_mask(void)
492 {
493         return this_rq()->rd->span;
494 }
495 #else
496 static inline const struct cpumask *sched_rt_period_mask(void)
497 {
498         return cpu_online_mask;
499 }
500 #endif
501
502 static inline
503 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
504 {
505         return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
506 }
507
508 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
509 {
510         return &rt_rq->tg->rt_bandwidth;
511 }
512
513 #else /* !CONFIG_RT_GROUP_SCHED */
514
515 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
516 {
517         return rt_rq->rt_runtime;
518 }
519
520 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
521 {
522         return ktime_to_ns(def_rt_bandwidth.rt_period);
523 }
524
525 typedef struct rt_rq *rt_rq_iter_t;
526
527 #define for_each_rt_rq(rt_rq, iter, rq) \
528         for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
529
530 #define for_each_sched_rt_entity(rt_se) \
531         for (; rt_se; rt_se = NULL)
532
533 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
534 {
535         return NULL;
536 }
537
538 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
539 {
540         if (rt_rq->rt_nr_running)
541                 resched_task(rq_of_rt_rq(rt_rq)->curr);
542 }
543
544 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
545 {
546 }
547
548 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
549 {
550         return rt_rq->rt_throttled;
551 }
552
553 static inline const struct cpumask *sched_rt_period_mask(void)
554 {
555         return cpu_online_mask;
556 }
557
558 static inline
559 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
560 {
561         return &cpu_rq(cpu)->rt;
562 }
563
564 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
565 {
566         return &def_rt_bandwidth;
567 }
568
569 #endif /* CONFIG_RT_GROUP_SCHED */
570
571 bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
572 {
573         struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
574
575         return (hrtimer_active(&rt_b->rt_period_timer) ||
576                 rt_rq->rt_time < rt_b->rt_runtime);
577 }
578
579 #ifdef CONFIG_SMP
580 /*
581  * We ran out of runtime, see if we can borrow some from our neighbours.
582  */
583 static int do_balance_runtime(struct rt_rq *rt_rq)
584 {
585         struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
586         struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
587         int i, weight, more = 0;
588         u64 rt_period;
589
590         weight = cpumask_weight(rd->span);
591
592         raw_spin_lock(&rt_b->rt_runtime_lock);
593         rt_period = ktime_to_ns(rt_b->rt_period);
594         for_each_cpu(i, rd->span) {
595                 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
596                 s64 diff;
597
598                 if (iter == rt_rq)
599                         continue;
600
601                 raw_spin_lock(&iter->rt_runtime_lock);
602                 /*
603                  * Either all rqs have inf runtime and there's nothing to steal
604                  * or __disable_runtime() below sets a specific rq to inf to
605                  * indicate its been disabled and disalow stealing.
606                  */
607                 if (iter->rt_runtime == RUNTIME_INF)
608                         goto next;
609
610                 /*
611                  * From runqueues with spare time, take 1/n part of their
612                  * spare time, but no more than our period.
613                  */
614                 diff = iter->rt_runtime - iter->rt_time;
615                 if (diff > 0) {
616                         diff = div_u64((u64)diff, weight);
617                         if (rt_rq->rt_runtime + diff > rt_period)
618                                 diff = rt_period - rt_rq->rt_runtime;
619                         iter->rt_runtime -= diff;
620                         rt_rq->rt_runtime += diff;
621                         more = 1;
622                         if (rt_rq->rt_runtime == rt_period) {
623                                 raw_spin_unlock(&iter->rt_runtime_lock);
624                                 break;
625                         }
626                 }
627 next:
628                 raw_spin_unlock(&iter->rt_runtime_lock);
629         }
630         raw_spin_unlock(&rt_b->rt_runtime_lock);
631
632         return more;
633 }
634
635 /*
636  * Ensure this RQ takes back all the runtime it lend to its neighbours.
637  */
638 static void __disable_runtime(struct rq *rq)
639 {
640         struct root_domain *rd = rq->rd;
641         rt_rq_iter_t iter;
642         struct rt_rq *rt_rq;
643
644         if (unlikely(!scheduler_running))
645                 return;
646
647         for_each_rt_rq(rt_rq, iter, rq) {
648                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
649                 s64 want;
650                 int i;
651
652                 raw_spin_lock(&rt_b->rt_runtime_lock);
653                 raw_spin_lock(&rt_rq->rt_runtime_lock);
654                 /*
655                  * Either we're all inf and nobody needs to borrow, or we're
656                  * already disabled and thus have nothing to do, or we have
657                  * exactly the right amount of runtime to take out.
658                  */
659                 if (rt_rq->rt_runtime == RUNTIME_INF ||
660                                 rt_rq->rt_runtime == rt_b->rt_runtime)
661                         goto balanced;
662                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
663
664                 /*
665                  * Calculate the difference between what we started out with
666                  * and what we current have, that's the amount of runtime
667                  * we lend and now have to reclaim.
668                  */
669                 want = rt_b->rt_runtime - rt_rq->rt_runtime;
670
671                 /*
672                  * Greedy reclaim, take back as much as we can.
673                  */
674                 for_each_cpu(i, rd->span) {
675                         struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
676                         s64 diff;
677
678                         /*
679                          * Can't reclaim from ourselves or disabled runqueues.
680                          */
681                         if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
682                                 continue;
683
684                         raw_spin_lock(&iter->rt_runtime_lock);
685                         if (want > 0) {
686                                 diff = min_t(s64, iter->rt_runtime, want);
687                                 iter->rt_runtime -= diff;
688                                 want -= diff;
689                         } else {
690                                 iter->rt_runtime -= want;
691                                 want -= want;
692                         }
693                         raw_spin_unlock(&iter->rt_runtime_lock);
694
695                         if (!want)
696                                 break;
697                 }
698
699                 raw_spin_lock(&rt_rq->rt_runtime_lock);
700                 /*
701                  * We cannot be left wanting - that would mean some runtime
702                  * leaked out of the system.
703                  */
704                 BUG_ON(want);
705 balanced:
706                 /*
707                  * Disable all the borrow logic by pretending we have inf
708                  * runtime - in which case borrowing doesn't make sense.
709                  */
710                 rt_rq->rt_runtime = RUNTIME_INF;
711                 rt_rq->rt_throttled = 0;
712                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
713                 raw_spin_unlock(&rt_b->rt_runtime_lock);
714         }
715 }
716
717 static void __enable_runtime(struct rq *rq)
718 {
719         rt_rq_iter_t iter;
720         struct rt_rq *rt_rq;
721
722         if (unlikely(!scheduler_running))
723                 return;
724
725         /*
726          * Reset each runqueue's bandwidth settings
727          */
728         for_each_rt_rq(rt_rq, iter, rq) {
729                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
730
731                 raw_spin_lock(&rt_b->rt_runtime_lock);
732                 raw_spin_lock(&rt_rq->rt_runtime_lock);
733                 rt_rq->rt_runtime = rt_b->rt_runtime;
734                 rt_rq->rt_time = 0;
735                 rt_rq->rt_throttled = 0;
736                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
737                 raw_spin_unlock(&rt_b->rt_runtime_lock);
738         }
739 }
740
741 static int balance_runtime(struct rt_rq *rt_rq)
742 {
743         int more = 0;
744
745         if (!sched_feat(RT_RUNTIME_SHARE))
746                 return more;
747
748         if (rt_rq->rt_time > rt_rq->rt_runtime) {
749                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
750                 more = do_balance_runtime(rt_rq);
751                 raw_spin_lock(&rt_rq->rt_runtime_lock);
752         }
753
754         return more;
755 }
756 #else /* !CONFIG_SMP */
757 static inline int balance_runtime(struct rt_rq *rt_rq)
758 {
759         return 0;
760 }
761 #endif /* CONFIG_SMP */
762
763 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
764 {
765         int i, idle = 1, throttled = 0;
766         const struct cpumask *span;
767
768         span = sched_rt_period_mask();
769 #ifdef CONFIG_RT_GROUP_SCHED
770         /*
771          * FIXME: isolated CPUs should really leave the root task group,
772          * whether they are isolcpus or were isolated via cpusets, lest
773          * the timer run on a CPU which does not service all runqueues,
774          * potentially leaving other CPUs indefinitely throttled.  If
775          * isolation is really required, the user will turn the throttle
776          * off to kill the perturbations it causes anyway.  Meanwhile,
777          * this maintains functionality for boot and/or troubleshooting.
778          */
779         if (rt_b == &root_task_group.rt_bandwidth)
780                 span = cpu_online_mask;
781 #endif
782         for_each_cpu(i, span) {
783                 int enqueue = 0;
784                 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
785                 struct rq *rq = rq_of_rt_rq(rt_rq);
786
787                 raw_spin_lock(&rq->lock);
788                 if (rt_rq->rt_time) {
789                         u64 runtime;
790
791                         raw_spin_lock(&rt_rq->rt_runtime_lock);
792                         if (rt_rq->rt_throttled)
793                                 balance_runtime(rt_rq);
794                         runtime = rt_rq->rt_runtime;
795                         rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
796                         if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
797                                 rt_rq->rt_throttled = 0;
798                                 enqueue = 1;
799
800                                 /*
801                                  * Force a clock update if the CPU was idle,
802                                  * lest wakeup -> unthrottle time accumulate.
803                                  */
804                                 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
805                                         rq->skip_clock_update = -1;
806                         }
807                         if (rt_rq->rt_time || rt_rq->rt_nr_running)
808                                 idle = 0;
809                         raw_spin_unlock(&rt_rq->rt_runtime_lock);
810                 } else if (rt_rq->rt_nr_running) {
811                         idle = 0;
812                         if (!rt_rq_throttled(rt_rq))
813                                 enqueue = 1;
814                 }
815                 if (rt_rq->rt_throttled)
816                         throttled = 1;
817
818                 if (enqueue)
819                         sched_rt_rq_enqueue(rt_rq);
820                 raw_spin_unlock(&rq->lock);
821         }
822
823         if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
824                 return 1;
825
826         return idle;
827 }
828
829 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
830 {
831 #ifdef CONFIG_RT_GROUP_SCHED
832         struct rt_rq *rt_rq = group_rt_rq(rt_se);
833
834         if (rt_rq)
835                 return rt_rq->highest_prio.curr;
836 #endif
837
838         return rt_task_of(rt_se)->prio;
839 }
840
841 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
842 {
843         u64 runtime = sched_rt_runtime(rt_rq);
844
845         if (rt_rq->rt_throttled)
846                 return rt_rq_throttled(rt_rq);
847
848         if (runtime >= sched_rt_period(rt_rq))
849                 return 0;
850
851         balance_runtime(rt_rq);
852         runtime = sched_rt_runtime(rt_rq);
853         if (runtime == RUNTIME_INF)
854                 return 0;
855
856         if (rt_rq->rt_time > runtime) {
857                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
858
859                 /*
860                  * Don't actually throttle groups that have no runtime assigned
861                  * but accrue some time due to boosting.
862                  */
863                 if (likely(rt_b->rt_runtime)) {
864                         static bool once = false;
865
866                         rt_rq->rt_throttled = 1;
867
868                         if (!once) {
869                                 once = true;
870                                 printk_sched("sched: RT throttling activated\n");
871                         }
872                 } else {
873                         /*
874                          * In case we did anyway, make it go away,
875                          * replenishment is a joke, since it will replenish us
876                          * with exactly 0 ns.
877                          */
878                         rt_rq->rt_time = 0;
879                 }
880
881                 if (rt_rq_throttled(rt_rq)) {
882                         sched_rt_rq_dequeue(rt_rq);
883                         return 1;
884                 }
885         }
886
887         return 0;
888 }
889
890 /*
891  * Update the current task's runtime statistics. Skip current tasks that
892  * are not in our scheduling class.
893  */
894 static void update_curr_rt(struct rq *rq)
895 {
896         struct task_struct *curr = rq->curr;
897         struct sched_rt_entity *rt_se = &curr->rt;
898         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
899         u64 delta_exec;
900
901         if (curr->sched_class != &rt_sched_class)
902                 return;
903
904         delta_exec = rq_clock_task(rq) - curr->se.exec_start;
905         if (unlikely((s64)delta_exec <= 0))
906                 return;
907
908         schedstat_set(curr->se.statistics.exec_max,
909                       max(curr->se.statistics.exec_max, delta_exec));
910
911         curr->se.sum_exec_runtime += delta_exec;
912         account_group_exec_runtime(curr, delta_exec);
913
914         curr->se.exec_start = rq_clock_task(rq);
915         cpuacct_charge(curr, delta_exec);
916
917         sched_rt_avg_update(rq, delta_exec);
918
919         if (!rt_bandwidth_enabled())
920                 return;
921
922         for_each_sched_rt_entity(rt_se) {
923                 rt_rq = rt_rq_of_se(rt_se);
924
925                 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
926                         raw_spin_lock(&rt_rq->rt_runtime_lock);
927                         rt_rq->rt_time += delta_exec;
928                         if (sched_rt_runtime_exceeded(rt_rq))
929                                 resched_task(curr);
930                         raw_spin_unlock(&rt_rq->rt_runtime_lock);
931                 }
932         }
933 }
934
935 #if defined CONFIG_SMP
936
937 static void
938 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
939 {
940         struct rq *rq = rq_of_rt_rq(rt_rq);
941
942 #ifdef CONFIG_RT_GROUP_SCHED
943         /*
944          * Change rq's cpupri only if rt_rq is the top queue.
945          */
946         if (&rq->rt != rt_rq)
947                 return;
948 #endif
949         if (rq->online && prio < prev_prio)
950                 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
951 }
952
953 static void
954 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
955 {
956         struct rq *rq = rq_of_rt_rq(rt_rq);
957
958 #ifdef CONFIG_RT_GROUP_SCHED
959         /*
960          * Change rq's cpupri only if rt_rq is the top queue.
961          */
962         if (&rq->rt != rt_rq)
963                 return;
964 #endif
965         if (rq->online && rt_rq->highest_prio.curr != prev_prio)
966                 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
967 }
968
969 #else /* CONFIG_SMP */
970
971 static inline
972 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
973 static inline
974 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
975
976 #endif /* CONFIG_SMP */
977
978 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
979 static void
980 inc_rt_prio(struct rt_rq *rt_rq, int prio)
981 {
982         int prev_prio = rt_rq->highest_prio.curr;
983
984         if (prio < prev_prio)
985                 rt_rq->highest_prio.curr = prio;
986
987         inc_rt_prio_smp(rt_rq, prio, prev_prio);
988 }
989
990 static void
991 dec_rt_prio(struct rt_rq *rt_rq, int prio)
992 {
993         int prev_prio = rt_rq->highest_prio.curr;
994
995         if (rt_rq->rt_nr_running) {
996
997                 WARN_ON(prio < prev_prio);
998
999                 /*
1000                  * This may have been our highest task, and therefore
1001                  * we may have some recomputation to do
1002                  */
1003                 if (prio == prev_prio) {
1004                         struct rt_prio_array *array = &rt_rq->active;
1005
1006                         rt_rq->highest_prio.curr =
1007                                 sched_find_first_bit(array->bitmap);
1008                 }
1009
1010         } else
1011                 rt_rq->highest_prio.curr = MAX_RT_PRIO;
1012
1013         dec_rt_prio_smp(rt_rq, prio, prev_prio);
1014 }
1015
1016 #else
1017
1018 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1019 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1020
1021 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1022
1023 #ifdef CONFIG_RT_GROUP_SCHED
1024
1025 static void
1026 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1027 {
1028         if (rt_se_boosted(rt_se))
1029                 rt_rq->rt_nr_boosted++;
1030
1031         if (rt_rq->tg)
1032                 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1033 }
1034
1035 static void
1036 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1037 {
1038         if (rt_se_boosted(rt_se))
1039                 rt_rq->rt_nr_boosted--;
1040
1041         WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1042 }
1043
1044 #else /* CONFIG_RT_GROUP_SCHED */
1045
1046 static void
1047 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1048 {
1049         start_rt_bandwidth(&def_rt_bandwidth);
1050 }
1051
1052 static inline
1053 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1054
1055 #endif /* CONFIG_RT_GROUP_SCHED */
1056
1057 static inline
1058 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1059 {
1060         int prio = rt_se_prio(rt_se);
1061
1062         WARN_ON(!rt_prio(prio));
1063         rt_rq->rt_nr_running++;
1064
1065         inc_rt_prio(rt_rq, prio);
1066         inc_rt_migration(rt_se, rt_rq);
1067         inc_rt_group(rt_se, rt_rq);
1068 }
1069
1070 static inline
1071 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1072 {
1073         WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1074         WARN_ON(!rt_rq->rt_nr_running);
1075         rt_rq->rt_nr_running--;
1076
1077         dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1078         dec_rt_migration(rt_se, rt_rq);
1079         dec_rt_group(rt_se, rt_rq);
1080 }
1081
1082 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1083 {
1084         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1085         struct rt_prio_array *array = &rt_rq->active;
1086         struct rt_rq *group_rq = group_rt_rq(rt_se);
1087         struct list_head *queue = array->queue + rt_se_prio(rt_se);
1088
1089         /*
1090          * Don't enqueue the group if its throttled, or when empty.
1091          * The latter is a consequence of the former when a child group
1092          * get throttled and the current group doesn't have any other
1093          * active members.
1094          */
1095         if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
1096                 return;
1097
1098         if (head)
1099                 list_add(&rt_se->run_list, queue);
1100         else
1101                 list_add_tail(&rt_se->run_list, queue);
1102         __set_bit(rt_se_prio(rt_se), array->bitmap);
1103
1104         inc_rt_tasks(rt_se, rt_rq);
1105 }
1106
1107 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
1108 {
1109         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1110         struct rt_prio_array *array = &rt_rq->active;
1111
1112         list_del_init(&rt_se->run_list);
1113         if (list_empty(array->queue + rt_se_prio(rt_se)))
1114                 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1115
1116         dec_rt_tasks(rt_se, rt_rq);
1117 }
1118
1119 /*
1120  * Because the prio of an upper entry depends on the lower
1121  * entries, we must remove entries top - down.
1122  */
1123 static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
1124 {
1125         struct sched_rt_entity *back = NULL;
1126
1127         for_each_sched_rt_entity(rt_se) {
1128                 rt_se->back = back;
1129                 back = rt_se;
1130         }
1131
1132         for (rt_se = back; rt_se; rt_se = rt_se->back) {
1133                 if (on_rt_rq(rt_se))
1134                         __dequeue_rt_entity(rt_se);
1135         }
1136 }
1137
1138 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1139 {
1140         dequeue_rt_stack(rt_se);
1141         for_each_sched_rt_entity(rt_se)
1142                 __enqueue_rt_entity(rt_se, head);
1143 }
1144
1145 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
1146 {
1147         dequeue_rt_stack(rt_se);
1148
1149         for_each_sched_rt_entity(rt_se) {
1150                 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1151
1152                 if (rt_rq && rt_rq->rt_nr_running)
1153                         __enqueue_rt_entity(rt_se, false);
1154         }
1155 }
1156
1157 /*
1158  * Adding/removing a task to/from a priority array:
1159  */
1160 static void
1161 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1162 {
1163         struct sched_rt_entity *rt_se = &p->rt;
1164
1165         if (flags & ENQUEUE_WAKEUP)
1166                 rt_se->timeout = 0;
1167
1168         enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
1169
1170         if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1171                 enqueue_pushable_task(rq, p);
1172
1173         inc_nr_running(rq);
1174 }
1175
1176 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1177 {
1178         struct sched_rt_entity *rt_se = &p->rt;
1179
1180         update_curr_rt(rq);
1181         dequeue_rt_entity(rt_se);
1182
1183         dequeue_pushable_task(rq, p);
1184
1185         dec_nr_running(rq);
1186 }
1187
1188 /*
1189  * Put task to the head or the end of the run list without the overhead of
1190  * dequeue followed by enqueue.
1191  */
1192 static void
1193 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1194 {
1195         if (on_rt_rq(rt_se)) {
1196                 struct rt_prio_array *array = &rt_rq->active;
1197                 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1198
1199                 if (head)
1200                         list_move(&rt_se->run_list, queue);
1201                 else
1202                         list_move_tail(&rt_se->run_list, queue);
1203         }
1204 }
1205
1206 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1207 {
1208         struct sched_rt_entity *rt_se = &p->rt;
1209         struct rt_rq *rt_rq;
1210
1211         for_each_sched_rt_entity(rt_se) {
1212                 rt_rq = rt_rq_of_se(rt_se);
1213                 requeue_rt_entity(rt_rq, rt_se, head);
1214         }
1215 }
1216
1217 static void yield_task_rt(struct rq *rq)
1218 {
1219         requeue_task_rt(rq, rq->curr, 0);
1220 }
1221
1222 #ifdef CONFIG_SMP
1223 static int find_lowest_rq(struct task_struct *task);
1224
1225 static int
1226 select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
1227 {
1228         struct task_struct *curr;
1229         struct rq *rq;
1230
1231         if (p->nr_cpus_allowed == 1)
1232                 goto out;
1233
1234         /* For anything but wake ups, just return the task_cpu */
1235         if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1236                 goto out;
1237
1238         rq = cpu_rq(cpu);
1239
1240         rcu_read_lock();
1241         curr = ACCESS_ONCE(rq->curr); /* unlocked access */
1242
1243         /*
1244          * If the current task on @p's runqueue is an RT task, then
1245          * try to see if we can wake this RT task up on another
1246          * runqueue. Otherwise simply start this RT task
1247          * on its current runqueue.
1248          *
1249          * We want to avoid overloading runqueues. If the woken
1250          * task is a higher priority, then it will stay on this CPU
1251          * and the lower prio task should be moved to another CPU.
1252          * Even though this will probably make the lower prio task
1253          * lose its cache, we do not want to bounce a higher task
1254          * around just because it gave up its CPU, perhaps for a
1255          * lock?
1256          *
1257          * For equal prio tasks, we just let the scheduler sort it out.
1258          *
1259          * Otherwise, just let it ride on the affined RQ and the
1260          * post-schedule router will push the preempted task away
1261          *
1262          * This test is optimistic, if we get it wrong the load-balancer
1263          * will have to sort it out.
1264          */
1265         if (curr && unlikely(rt_task(curr)) &&
1266             (curr->nr_cpus_allowed < 2 ||
1267              curr->prio <= p->prio)) {
1268                 int target = find_lowest_rq(p);
1269
1270                 if (target != -1)
1271                         cpu = target;
1272         }
1273         rcu_read_unlock();
1274
1275 out:
1276         return cpu;
1277 }
1278
1279 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1280 {
1281         if (rq->curr->nr_cpus_allowed == 1)
1282                 return;
1283
1284         if (p->nr_cpus_allowed != 1
1285             && cpupri_find(&rq->rd->cpupri, p, NULL))
1286                 return;
1287
1288         if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1289                 return;
1290
1291         /*
1292          * There appears to be other cpus that can accept
1293          * current and none to run 'p', so lets reschedule
1294          * to try and push current away:
1295          */
1296         requeue_task_rt(rq, p, 1);
1297         resched_task(rq->curr);
1298 }
1299
1300 #endif /* CONFIG_SMP */
1301
1302 /*
1303  * Preempt the current task with a newly woken task if needed:
1304  */
1305 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1306 {
1307         if (p->prio < rq->curr->prio) {
1308                 resched_task(rq->curr);
1309                 return;
1310         }
1311
1312 #ifdef CONFIG_SMP
1313         /*
1314          * If:
1315          *
1316          * - the newly woken task is of equal priority to the current task
1317          * - the newly woken task is non-migratable while current is migratable
1318          * - current will be preempted on the next reschedule
1319          *
1320          * we should check to see if current can readily move to a different
1321          * cpu.  If so, we will reschedule to allow the push logic to try
1322          * to move current somewhere else, making room for our non-migratable
1323          * task.
1324          */
1325         if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1326                 check_preempt_equal_prio(rq, p);
1327 #endif
1328 }
1329
1330 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1331                                                    struct rt_rq *rt_rq)
1332 {
1333         struct rt_prio_array *array = &rt_rq->active;
1334         struct sched_rt_entity *next = NULL;
1335         struct list_head *queue;
1336         int idx;
1337
1338         idx = sched_find_first_bit(array->bitmap);
1339         BUG_ON(idx >= MAX_RT_PRIO);
1340
1341         queue = array->queue + idx;
1342         next = list_entry(queue->next, struct sched_rt_entity, run_list);
1343
1344         return next;
1345 }
1346
1347 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1348 {
1349         struct sched_rt_entity *rt_se;
1350         struct task_struct *p;
1351         struct rt_rq *rt_rq  = &rq->rt;
1352
1353         do {
1354                 rt_se = pick_next_rt_entity(rq, rt_rq);
1355                 BUG_ON(!rt_se);
1356                 rt_rq = group_rt_rq(rt_se);
1357         } while (rt_rq);
1358
1359         p = rt_task_of(rt_se);
1360         p->se.exec_start = rq_clock_task(rq);
1361
1362         return p;
1363 }
1364
1365 static struct task_struct *
1366 pick_next_task_rt(struct rq *rq, struct task_struct *prev)
1367 {
1368         struct task_struct *p;
1369         struct rt_rq *rt_rq = &rq->rt;
1370
1371         if (need_pull_rt_task(rq, prev)) {
1372                 pull_rt_task(rq);
1373                 /*
1374                  * pull_rt_task() can drop (and re-acquire) rq->lock; this
1375                  * means a dl task can slip in, in which case we need to
1376                  * re-start task selection.
1377                  */
1378                 if (unlikely(rq->dl.dl_nr_running))
1379                         return RETRY_TASK;
1380         }
1381
1382         if (!rt_rq->rt_nr_running)
1383                 return NULL;
1384
1385         if (rt_rq_throttled(rt_rq))
1386                 return NULL;
1387
1388         put_prev_task(rq, prev);
1389
1390         p = _pick_next_task_rt(rq);
1391
1392         /* The running task is never eligible for pushing */
1393         if (p)
1394                 dequeue_pushable_task(rq, p);
1395
1396         set_post_schedule(rq);
1397
1398         return p;
1399 }
1400
1401 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1402 {
1403         update_curr_rt(rq);
1404
1405         /*
1406          * The previous task needs to be made eligible for pushing
1407          * if it is still active
1408          */
1409         if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1410                 enqueue_pushable_task(rq, p);
1411 }
1412
1413 #ifdef CONFIG_SMP
1414
1415 /* Only try algorithms three times */
1416 #define RT_MAX_TRIES 3
1417
1418 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1419 {
1420         if (!task_running(rq, p) &&
1421             cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1422                 return 1;
1423         return 0;
1424 }
1425
1426 /*
1427  * Return the highest pushable rq's task, which is suitable to be executed
1428  * on the cpu, NULL otherwise
1429  */
1430 static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1431 {
1432         struct plist_head *head = &rq->rt.pushable_tasks;
1433         struct task_struct *p;
1434
1435         if (!has_pushable_tasks(rq))
1436                 return NULL;
1437
1438         plist_for_each_entry(p, head, pushable_tasks) {
1439                 if (pick_rt_task(rq, p, cpu))
1440                         return p;
1441         }
1442
1443         return NULL;
1444 }
1445
1446 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1447
1448 static int find_lowest_rq(struct task_struct *task)
1449 {
1450         struct sched_domain *sd;
1451         struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
1452         int this_cpu = smp_processor_id();
1453         int cpu      = task_cpu(task);
1454
1455         /* Make sure the mask is initialized first */
1456         if (unlikely(!lowest_mask))
1457                 return -1;
1458
1459         if (task->nr_cpus_allowed == 1)
1460                 return -1; /* No other targets possible */
1461
1462         if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1463                 return -1; /* No targets found */
1464
1465         /*
1466          * At this point we have built a mask of cpus representing the
1467          * lowest priority tasks in the system.  Now we want to elect
1468          * the best one based on our affinity and topology.
1469          *
1470          * We prioritize the last cpu that the task executed on since
1471          * it is most likely cache-hot in that location.
1472          */
1473         if (cpumask_test_cpu(cpu, lowest_mask))
1474                 return cpu;
1475
1476         /*
1477          * Otherwise, we consult the sched_domains span maps to figure
1478          * out which cpu is logically closest to our hot cache data.
1479          */
1480         if (!cpumask_test_cpu(this_cpu, lowest_mask))
1481                 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1482
1483         rcu_read_lock();
1484         for_each_domain(cpu, sd) {
1485                 if (sd->flags & SD_WAKE_AFFINE) {
1486                         int best_cpu;
1487
1488                         /*
1489                          * "this_cpu" is cheaper to preempt than a
1490                          * remote processor.
1491                          */
1492                         if (this_cpu != -1 &&
1493                             cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1494                                 rcu_read_unlock();
1495                                 return this_cpu;
1496                         }
1497
1498                         best_cpu = cpumask_first_and(lowest_mask,
1499                                                      sched_domain_span(sd));
1500                         if (best_cpu < nr_cpu_ids) {
1501                                 rcu_read_unlock();
1502                                 return best_cpu;
1503                         }
1504                 }
1505         }
1506         rcu_read_unlock();
1507
1508         /*
1509          * And finally, if there were no matches within the domains
1510          * just give the caller *something* to work with from the compatible
1511          * locations.
1512          */
1513         if (this_cpu != -1)
1514                 return this_cpu;
1515
1516         cpu = cpumask_any(lowest_mask);
1517         if (cpu < nr_cpu_ids)
1518                 return cpu;
1519         return -1;
1520 }
1521
1522 /* Will lock the rq it finds */
1523 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1524 {
1525         struct rq *lowest_rq = NULL;
1526         int tries;
1527         int cpu;
1528
1529         for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1530                 cpu = find_lowest_rq(task);
1531
1532                 if ((cpu == -1) || (cpu == rq->cpu))
1533                         break;
1534
1535                 lowest_rq = cpu_rq(cpu);
1536
1537                 /* if the prio of this runqueue changed, try again */
1538                 if (double_lock_balance(rq, lowest_rq)) {
1539                         /*
1540                          * We had to unlock the run queue. In
1541                          * the mean time, task could have
1542                          * migrated already or had its affinity changed.
1543                          * Also make sure that it wasn't scheduled on its rq.
1544                          */
1545                         if (unlikely(task_rq(task) != rq ||
1546                                      !cpumask_test_cpu(lowest_rq->cpu,
1547                                                        tsk_cpus_allowed(task)) ||
1548                                      task_running(rq, task) ||
1549                                      !task->on_rq)) {
1550
1551                                 double_unlock_balance(rq, lowest_rq);
1552                                 lowest_rq = NULL;
1553                                 break;
1554                         }
1555                 }
1556
1557                 /* If this rq is still suitable use it. */
1558                 if (lowest_rq->rt.highest_prio.curr > task->prio)
1559                         break;
1560
1561                 /* try again */
1562                 double_unlock_balance(rq, lowest_rq);
1563                 lowest_rq = NULL;
1564         }
1565
1566         return lowest_rq;
1567 }
1568
1569 static struct task_struct *pick_next_pushable_task(struct rq *rq)
1570 {
1571         struct task_struct *p;
1572
1573         if (!has_pushable_tasks(rq))
1574                 return NULL;
1575
1576         p = plist_first_entry(&rq->rt.pushable_tasks,
1577                               struct task_struct, pushable_tasks);
1578
1579         BUG_ON(rq->cpu != task_cpu(p));
1580         BUG_ON(task_current(rq, p));
1581         BUG_ON(p->nr_cpus_allowed <= 1);
1582
1583         BUG_ON(!p->on_rq);
1584         BUG_ON(!rt_task(p));
1585
1586         return p;
1587 }
1588
1589 /*
1590  * If the current CPU has more than one RT task, see if the non
1591  * running task can migrate over to a CPU that is running a task
1592  * of lesser priority.
1593  */
1594 static int push_rt_task(struct rq *rq)
1595 {
1596         struct task_struct *next_task;
1597         struct rq *lowest_rq;
1598         int ret = 0;
1599
1600         if (!rq->rt.overloaded)
1601                 return 0;
1602
1603         next_task = pick_next_pushable_task(rq);
1604         if (!next_task)
1605                 return 0;
1606
1607 retry:
1608         if (unlikely(next_task == rq->curr)) {
1609                 WARN_ON(1);
1610                 return 0;
1611         }
1612
1613         /*
1614          * It's possible that the next_task slipped in of
1615          * higher priority than current. If that's the case
1616          * just reschedule current.
1617          */
1618         if (unlikely(next_task->prio < rq->curr->prio)) {
1619                 resched_task(rq->curr);
1620                 return 0;
1621         }
1622
1623         /* We might release rq lock */
1624         get_task_struct(next_task);
1625
1626         /* find_lock_lowest_rq locks the rq if found */
1627         lowest_rq = find_lock_lowest_rq(next_task, rq);
1628         if (!lowest_rq) {
1629                 struct task_struct *task;
1630                 /*
1631                  * find_lock_lowest_rq releases rq->lock
1632                  * so it is possible that next_task has migrated.
1633                  *
1634                  * We need to make sure that the task is still on the same
1635                  * run-queue and is also still the next task eligible for
1636                  * pushing.
1637                  */
1638                 task = pick_next_pushable_task(rq);
1639                 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1640                         /*
1641                          * The task hasn't migrated, and is still the next
1642                          * eligible task, but we failed to find a run-queue
1643                          * to push it to.  Do not retry in this case, since
1644                          * other cpus will pull from us when ready.
1645                          */
1646                         goto out;
1647                 }
1648
1649                 if (!task)
1650                         /* No more tasks, just exit */
1651                         goto out;
1652
1653                 /*
1654                  * Something has shifted, try again.
1655                  */
1656                 put_task_struct(next_task);
1657                 next_task = task;
1658                 goto retry;
1659         }
1660
1661         deactivate_task(rq, next_task, 0);
1662         set_task_cpu(next_task, lowest_rq->cpu);
1663         activate_task(lowest_rq, next_task, 0);
1664         ret = 1;
1665
1666         resched_task(lowest_rq->curr);
1667
1668         double_unlock_balance(rq, lowest_rq);
1669
1670 out:
1671         put_task_struct(next_task);
1672
1673         return ret;
1674 }
1675
1676 static void push_rt_tasks(struct rq *rq)
1677 {
1678         /* push_rt_task will return true if it moved an RT */
1679         while (push_rt_task(rq))
1680                 ;
1681 }
1682
1683 static int pull_rt_task(struct rq *this_rq)
1684 {
1685         int this_cpu = this_rq->cpu, ret = 0, cpu;
1686         struct task_struct *p;
1687         struct rq *src_rq;
1688
1689         if (likely(!rt_overloaded(this_rq)))
1690                 return 0;
1691
1692         /*
1693          * Match the barrier from rt_set_overloaded; this guarantees that if we
1694          * see overloaded we must also see the rto_mask bit.
1695          */
1696         smp_rmb();
1697
1698         for_each_cpu(cpu, this_rq->rd->rto_mask) {
1699                 if (this_cpu == cpu)
1700                         continue;
1701
1702                 src_rq = cpu_rq(cpu);
1703
1704                 /*
1705                  * Don't bother taking the src_rq->lock if the next highest
1706                  * task is known to be lower-priority than our current task.
1707                  * This may look racy, but if this value is about to go
1708                  * logically higher, the src_rq will push this task away.
1709                  * And if its going logically lower, we do not care
1710                  */
1711                 if (src_rq->rt.highest_prio.next >=
1712                     this_rq->rt.highest_prio.curr)
1713                         continue;
1714
1715                 /*
1716                  * We can potentially drop this_rq's lock in
1717                  * double_lock_balance, and another CPU could
1718                  * alter this_rq
1719                  */
1720                 double_lock_balance(this_rq, src_rq);
1721
1722                 /*
1723                  * We can pull only a task, which is pushable
1724                  * on its rq, and no others.
1725                  */
1726                 p = pick_highest_pushable_task(src_rq, this_cpu);
1727
1728                 /*
1729                  * Do we have an RT task that preempts
1730                  * the to-be-scheduled task?
1731                  */
1732                 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1733                         WARN_ON(p == src_rq->curr);
1734                         WARN_ON(!p->on_rq);
1735
1736                         /*
1737                          * There's a chance that p is higher in priority
1738                          * than what's currently running on its cpu.
1739                          * This is just that p is wakeing up and hasn't
1740                          * had a chance to schedule. We only pull
1741                          * p if it is lower in priority than the
1742                          * current task on the run queue
1743                          */
1744                         if (p->prio < src_rq->curr->prio)
1745                                 goto skip;
1746
1747                         ret = 1;
1748
1749                         deactivate_task(src_rq, p, 0);
1750                         set_task_cpu(p, this_cpu);
1751                         activate_task(this_rq, p, 0);
1752                         /*
1753                          * We continue with the search, just in
1754                          * case there's an even higher prio task
1755                          * in another runqueue. (low likelihood
1756                          * but possible)
1757                          */
1758                 }
1759 skip:
1760                 double_unlock_balance(this_rq, src_rq);
1761         }
1762
1763         return ret;
1764 }
1765
1766 static void post_schedule_rt(struct rq *rq)
1767 {
1768         push_rt_tasks(rq);
1769 }
1770
1771 /*
1772  * If we are not running and we are not going to reschedule soon, we should
1773  * try to push tasks away now
1774  */
1775 static void task_woken_rt(struct rq *rq, struct task_struct *p)
1776 {
1777         if (!task_running(rq, p) &&
1778             !test_tsk_need_resched(rq->curr) &&
1779             has_pushable_tasks(rq) &&
1780             p->nr_cpus_allowed > 1 &&
1781             (dl_task(rq->curr) || rt_task(rq->curr)) &&
1782             (rq->curr->nr_cpus_allowed < 2 ||
1783              rq->curr->prio <= p->prio))
1784                 push_rt_tasks(rq);
1785 }
1786
1787 static void set_cpus_allowed_rt(struct task_struct *p,
1788                                 const struct cpumask *new_mask)
1789 {
1790         struct rq *rq;
1791         int weight;
1792
1793         BUG_ON(!rt_task(p));
1794
1795         if (!p->on_rq)
1796                 return;
1797
1798         weight = cpumask_weight(new_mask);
1799
1800         /*
1801          * Only update if the process changes its state from whether it
1802          * can migrate or not.
1803          */
1804         if ((p->nr_cpus_allowed > 1) == (weight > 1))
1805                 return;
1806
1807         rq = task_rq(p);
1808
1809         /*
1810          * The process used to be able to migrate OR it can now migrate
1811          */
1812         if (weight <= 1) {
1813                 if (!task_current(rq, p))
1814                         dequeue_pushable_task(rq, p);
1815                 BUG_ON(!rq->rt.rt_nr_migratory);
1816                 rq->rt.rt_nr_migratory--;
1817         } else {
1818                 if (!task_current(rq, p))
1819                         enqueue_pushable_task(rq, p);
1820                 rq->rt.rt_nr_migratory++;
1821         }
1822
1823         update_rt_migration(&rq->rt);
1824 }
1825
1826 /* Assumes rq->lock is held */
1827 static void rq_online_rt(struct rq *rq)
1828 {
1829         if (rq->rt.overloaded)
1830                 rt_set_overload(rq);
1831
1832         __enable_runtime(rq);
1833
1834         cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1835 }
1836
1837 /* Assumes rq->lock is held */
1838 static void rq_offline_rt(struct rq *rq)
1839 {
1840         if (rq->rt.overloaded)
1841                 rt_clear_overload(rq);
1842
1843         __disable_runtime(rq);
1844
1845         cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1846 }
1847
1848 /*
1849  * When switch from the rt queue, we bring ourselves to a position
1850  * that we might want to pull RT tasks from other runqueues.
1851  */
1852 static void switched_from_rt(struct rq *rq, struct task_struct *p)
1853 {
1854         /*
1855          * If there are other RT tasks then we will reschedule
1856          * and the scheduling of the other RT tasks will handle
1857          * the balancing. But if we are the last RT task
1858          * we may need to handle the pulling of RT tasks
1859          * now.
1860          */
1861         if (!p->on_rq || rq->rt.rt_nr_running)
1862                 return;
1863
1864         if (pull_rt_task(rq))
1865                 resched_task(rq->curr);
1866 }
1867
1868 void __init init_sched_rt_class(void)
1869 {
1870         unsigned int i;
1871
1872         for_each_possible_cpu(i) {
1873                 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
1874                                         GFP_KERNEL, cpu_to_node(i));
1875         }
1876 }
1877 #endif /* CONFIG_SMP */
1878
1879 /*
1880  * When switching a task to RT, we may overload the runqueue
1881  * with RT tasks. In this case we try to push them off to
1882  * other runqueues.
1883  */
1884 static void switched_to_rt(struct rq *rq, struct task_struct *p)
1885 {
1886         int check_resched = 1;
1887
1888         /*
1889          * If we are already running, then there's nothing
1890          * that needs to be done. But if we are not running
1891          * we may need to preempt the current running task.
1892          * If that current running task is also an RT task
1893          * then see if we can move to another run queue.
1894          */
1895         if (p->on_rq && rq->curr != p) {
1896 #ifdef CONFIG_SMP
1897                 if (rq->rt.overloaded && push_rt_task(rq) &&
1898                     /* Don't resched if we changed runqueues */
1899                     rq != task_rq(p))
1900                         check_resched = 0;
1901 #endif /* CONFIG_SMP */
1902                 if (check_resched && p->prio < rq->curr->prio)
1903                         resched_task(rq->curr);
1904         }
1905 }
1906
1907 /*
1908  * Priority of the task has changed. This may cause
1909  * us to initiate a push or pull.
1910  */
1911 static void
1912 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
1913 {
1914         if (!p->on_rq)
1915                 return;
1916
1917         if (rq->curr == p) {
1918 #ifdef CONFIG_SMP
1919                 /*
1920                  * If our priority decreases while running, we
1921                  * may need to pull tasks to this runqueue.
1922                  */
1923                 if (oldprio < p->prio)
1924                         pull_rt_task(rq);
1925                 /*
1926                  * If there's a higher priority task waiting to run
1927                  * then reschedule. Note, the above pull_rt_task
1928                  * can release the rq lock and p could migrate.
1929                  * Only reschedule if p is still on the same runqueue.
1930                  */
1931                 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
1932                         resched_task(p);
1933 #else
1934                 /* For UP simply resched on drop of prio */
1935                 if (oldprio < p->prio)
1936                         resched_task(p);
1937 #endif /* CONFIG_SMP */
1938         } else {
1939                 /*
1940                  * This task is not running, but if it is
1941                  * greater than the current running task
1942                  * then reschedule.
1943                  */
1944                 if (p->prio < rq->curr->prio)
1945                         resched_task(rq->curr);
1946         }
1947 }
1948
1949 static void watchdog(struct rq *rq, struct task_struct *p)
1950 {
1951         unsigned long soft, hard;
1952
1953         /* max may change after cur was read, this will be fixed next tick */
1954         soft = task_rlimit(p, RLIMIT_RTTIME);
1955         hard = task_rlimit_max(p, RLIMIT_RTTIME);
1956
1957         if (soft != RLIM_INFINITY) {
1958                 unsigned long next;
1959
1960                 if (p->rt.watchdog_stamp != jiffies) {
1961                         p->rt.timeout++;
1962                         p->rt.watchdog_stamp = jiffies;
1963                 }
1964
1965                 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1966                 if (p->rt.timeout > next)
1967                         p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
1968         }
1969 }
1970
1971 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1972 {
1973         struct sched_rt_entity *rt_se = &p->rt;
1974
1975         update_curr_rt(rq);
1976
1977         watchdog(rq, p);
1978
1979         /*
1980          * RR tasks need a special form of timeslice management.
1981          * FIFO tasks have no timeslices.
1982          */
1983         if (p->policy != SCHED_RR)
1984                 return;
1985
1986         if (--p->rt.time_slice)
1987                 return;
1988
1989         p->rt.time_slice = sched_rr_timeslice;
1990
1991         /*
1992          * Requeue to the end of queue if we (and all of our ancestors) are not
1993          * the only element on the queue
1994          */
1995         for_each_sched_rt_entity(rt_se) {
1996                 if (rt_se->run_list.prev != rt_se->run_list.next) {
1997                         requeue_task_rt(rq, p, 0);
1998                         set_tsk_need_resched(p);
1999                         return;
2000                 }
2001         }
2002 }
2003
2004 static void set_curr_task_rt(struct rq *rq)
2005 {
2006         struct task_struct *p = rq->curr;
2007
2008         p->se.exec_start = rq_clock_task(rq);
2009
2010         /* The running task is never eligible for pushing */
2011         dequeue_pushable_task(rq, p);
2012 }
2013
2014 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2015 {
2016         /*
2017          * Time slice is 0 for SCHED_FIFO tasks
2018          */
2019         if (task->policy == SCHED_RR)
2020                 return sched_rr_timeslice;
2021         else
2022                 return 0;
2023 }
2024
2025 const struct sched_class rt_sched_class = {
2026         .next                   = &fair_sched_class,
2027         .enqueue_task           = enqueue_task_rt,
2028         .dequeue_task           = dequeue_task_rt,
2029         .yield_task             = yield_task_rt,
2030
2031         .check_preempt_curr     = check_preempt_curr_rt,
2032
2033         .pick_next_task         = pick_next_task_rt,
2034         .put_prev_task          = put_prev_task_rt,
2035
2036 #ifdef CONFIG_SMP
2037         .select_task_rq         = select_task_rq_rt,
2038
2039         .set_cpus_allowed       = set_cpus_allowed_rt,
2040         .rq_online              = rq_online_rt,
2041         .rq_offline             = rq_offline_rt,
2042         .post_schedule          = post_schedule_rt,
2043         .task_woken             = task_woken_rt,
2044         .switched_from          = switched_from_rt,
2045 #endif
2046
2047         .set_curr_task          = set_curr_task_rt,
2048         .task_tick              = task_tick_rt,
2049
2050         .get_rr_interval        = get_rr_interval_rt,
2051
2052         .prio_changed           = prio_changed_rt,
2053         .switched_to            = switched_to_rt,
2054 };
2055
2056 #ifdef CONFIG_SCHED_DEBUG
2057 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2058
2059 void print_rt_stats(struct seq_file *m, int cpu)
2060 {
2061         rt_rq_iter_t iter;
2062         struct rt_rq *rt_rq;
2063
2064         rcu_read_lock();
2065         for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2066                 print_rt_rq(m, cpu, rt_rq);
2067         rcu_read_unlock();
2068 }
2069 #endif /* CONFIG_SCHED_DEBUG */