Merge tag 'pci-v5.11-fixes-1' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaa...
[linux-2.6-microblaze.git] / kernel / sched / stop_task.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * stop-task scheduling class.
4  *
5  * The stop task is the highest priority task in the system, it preempts
6  * everything and will be preempted by nothing.
7  *
8  * See kernel/stop_machine.c
9  */
10 #include "sched.h"
11
12 #ifdef CONFIG_SMP
13 static int
14 select_task_rq_stop(struct task_struct *p, int cpu, int flags)
15 {
16         return task_cpu(p); /* stop tasks as never migrate */
17 }
18
19 static int
20 balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
21 {
22         return sched_stop_runnable(rq);
23 }
24 #endif /* CONFIG_SMP */
25
26 static void
27 check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
28 {
29         /* we're never preempted */
30 }
31
32 static void set_next_task_stop(struct rq *rq, struct task_struct *stop, bool first)
33 {
34         stop->se.exec_start = rq_clock_task(rq);
35 }
36
37 static struct task_struct *pick_next_task_stop(struct rq *rq)
38 {
39         if (!sched_stop_runnable(rq))
40                 return NULL;
41
42         set_next_task_stop(rq, rq->stop, true);
43         return rq->stop;
44 }
45
46 static void
47 enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
48 {
49         add_nr_running(rq, 1);
50 }
51
52 static void
53 dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
54 {
55         sub_nr_running(rq, 1);
56 }
57
58 static void yield_task_stop(struct rq *rq)
59 {
60         BUG(); /* the stop task should never yield, its pointless. */
61 }
62
63 static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
64 {
65         struct task_struct *curr = rq->curr;
66         u64 delta_exec;
67
68         delta_exec = rq_clock_task(rq) - curr->se.exec_start;
69         if (unlikely((s64)delta_exec < 0))
70                 delta_exec = 0;
71
72         schedstat_set(curr->se.statistics.exec_max,
73                         max(curr->se.statistics.exec_max, delta_exec));
74
75         curr->se.sum_exec_runtime += delta_exec;
76         account_group_exec_runtime(curr, delta_exec);
77
78         curr->se.exec_start = rq_clock_task(rq);
79         cgroup_account_cputime(curr, delta_exec);
80 }
81
82 /*
83  * scheduler tick hitting a task of our scheduling class.
84  *
85  * NOTE: This function can be called remotely by the tick offload that
86  * goes along full dynticks. Therefore no local assumption can be made
87  * and everything must be accessed through the @rq and @curr passed in
88  * parameters.
89  */
90 static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
91 {
92 }
93
94 static void switched_to_stop(struct rq *rq, struct task_struct *p)
95 {
96         BUG(); /* its impossible to change to this class */
97 }
98
99 static void
100 prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio)
101 {
102         BUG(); /* how!?, what priority? */
103 }
104
105 static void update_curr_stop(struct rq *rq)
106 {
107 }
108
109 /*
110  * Simple, special scheduling class for the per-CPU stop tasks:
111  */
112 DEFINE_SCHED_CLASS(stop) = {
113
114         .enqueue_task           = enqueue_task_stop,
115         .dequeue_task           = dequeue_task_stop,
116         .yield_task             = yield_task_stop,
117
118         .check_preempt_curr     = check_preempt_curr_stop,
119
120         .pick_next_task         = pick_next_task_stop,
121         .put_prev_task          = put_prev_task_stop,
122         .set_next_task          = set_next_task_stop,
123
124 #ifdef CONFIG_SMP
125         .balance                = balance_stop,
126         .select_task_rq         = select_task_rq_stop,
127         .set_cpus_allowed       = set_cpus_allowed_common,
128 #endif
129
130         .task_tick              = task_tick_stop,
131
132         .prio_changed           = prio_changed_stop,
133         .switched_to            = switched_to_stop,
134         .update_curr            = update_curr_stop,
135 };