Merge tag 'sched-urgent-2022-08-06' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / include / linux / sched.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SCHED_H
3 #define _LINUX_SCHED_H
4
5 /*
6  * Define 'struct task_struct' and provide the main scheduler
7  * APIs (schedule(), wakeup variants, etc.)
8  */
9
10 #include <uapi/linux/sched.h>
11
12 #include <asm/current.h>
13
14 #include <linux/pid.h>
15 #include <linux/sem.h>
16 #include <linux/shm.h>
17 #include <linux/mutex.h>
18 #include <linux/plist.h>
19 #include <linux/hrtimer.h>
20 #include <linux/irqflags.h>
21 #include <linux/seccomp.h>
22 #include <linux/nodemask.h>
23 #include <linux/rcupdate.h>
24 #include <linux/refcount.h>
25 #include <linux/resource.h>
26 #include <linux/latencytop.h>
27 #include <linux/sched/prio.h>
28 #include <linux/sched/types.h>
29 #include <linux/signal_types.h>
30 #include <linux/syscall_user_dispatch.h>
31 #include <linux/mm_types_task.h>
32 #include <linux/task_io_accounting.h>
33 #include <linux/posix-timers.h>
34 #include <linux/rseq.h>
35 #include <linux/seqlock.h>
36 #include <linux/kcsan.h>
37 #include <linux/rv.h>
38 #include <asm/kmap_size.h>
39
40 /* task_struct member predeclarations (sorted alphabetically): */
41 struct audit_context;
42 struct backing_dev_info;
43 struct bio_list;
44 struct blk_plug;
45 struct bpf_local_storage;
46 struct bpf_run_ctx;
47 struct capture_control;
48 struct cfs_rq;
49 struct fs_struct;
50 struct futex_pi_state;
51 struct io_context;
52 struct io_uring_task;
53 struct mempolicy;
54 struct nameidata;
55 struct nsproxy;
56 struct perf_event_context;
57 struct pid_namespace;
58 struct pipe_inode_info;
59 struct rcu_node;
60 struct reclaim_state;
61 struct robust_list_head;
62 struct root_domain;
63 struct rq;
64 struct sched_attr;
65 struct sched_param;
66 struct seq_file;
67 struct sighand_struct;
68 struct signal_struct;
69 struct task_delay_info;
70 struct task_group;
71
72 /*
73  * Task state bitmask. NOTE! These bits are also
74  * encoded in fs/proc/array.c: get_task_state().
75  *
76  * We have two separate sets of flags: task->state
77  * is about runnability, while task->exit_state are
78  * about the task exiting. Confusing, but this way
79  * modifying one set can't modify the other one by
80  * mistake.
81  */
82
83 /* Used in tsk->state: */
84 #define TASK_RUNNING                    0x0000
85 #define TASK_INTERRUPTIBLE              0x0001
86 #define TASK_UNINTERRUPTIBLE            0x0002
87 #define __TASK_STOPPED                  0x0004
88 #define __TASK_TRACED                   0x0008
89 /* Used in tsk->exit_state: */
90 #define EXIT_DEAD                       0x0010
91 #define EXIT_ZOMBIE                     0x0020
92 #define EXIT_TRACE                      (EXIT_ZOMBIE | EXIT_DEAD)
93 /* Used in tsk->state again: */
94 #define TASK_PARKED                     0x0040
95 #define TASK_DEAD                       0x0080
96 #define TASK_WAKEKILL                   0x0100
97 #define TASK_WAKING                     0x0200
98 #define TASK_NOLOAD                     0x0400
99 #define TASK_NEW                        0x0800
100 /* RT specific auxilliary flag to mark RT lock waiters */
101 #define TASK_RTLOCK_WAIT                0x1000
102 #define TASK_STATE_MAX                  0x2000
103
104 /* Convenience macros for the sake of set_current_state: */
105 #define TASK_KILLABLE                   (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
106 #define TASK_STOPPED                    (TASK_WAKEKILL | __TASK_STOPPED)
107 #define TASK_TRACED                     __TASK_TRACED
108
109 #define TASK_IDLE                       (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
110
111 /* Convenience macros for the sake of wake_up(): */
112 #define TASK_NORMAL                     (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
113
114 /* get_task_state(): */
115 #define TASK_REPORT                     (TASK_RUNNING | TASK_INTERRUPTIBLE | \
116                                          TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
117                                          __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
118                                          TASK_PARKED)
119
120 #define task_is_running(task)           (READ_ONCE((task)->__state) == TASK_RUNNING)
121
122 #define task_is_traced(task)            ((READ_ONCE(task->jobctl) & JOBCTL_TRACED) != 0)
123 #define task_is_stopped(task)           ((READ_ONCE(task->jobctl) & JOBCTL_STOPPED) != 0)
124 #define task_is_stopped_or_traced(task) ((READ_ONCE(task->jobctl) & (JOBCTL_STOPPED | JOBCTL_TRACED)) != 0)
125
126 /*
127  * Special states are those that do not use the normal wait-loop pattern. See
128  * the comment with set_special_state().
129  */
130 #define is_special_task_state(state)                            \
131         ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
132
133 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
134 # define debug_normal_state_change(state_value)                         \
135         do {                                                            \
136                 WARN_ON_ONCE(is_special_task_state(state_value));       \
137                 current->task_state_change = _THIS_IP_;                 \
138         } while (0)
139
140 # define debug_special_state_change(state_value)                        \
141         do {                                                            \
142                 WARN_ON_ONCE(!is_special_task_state(state_value));      \
143                 current->task_state_change = _THIS_IP_;                 \
144         } while (0)
145
146 # define debug_rtlock_wait_set_state()                                  \
147         do {                                                             \
148                 current->saved_state_change = current->task_state_change;\
149                 current->task_state_change = _THIS_IP_;                  \
150         } while (0)
151
152 # define debug_rtlock_wait_restore_state()                              \
153         do {                                                             \
154                 current->task_state_change = current->saved_state_change;\
155         } while (0)
156
157 #else
158 # define debug_normal_state_change(cond)        do { } while (0)
159 # define debug_special_state_change(cond)       do { } while (0)
160 # define debug_rtlock_wait_set_state()          do { } while (0)
161 # define debug_rtlock_wait_restore_state()      do { } while (0)
162 #endif
163
164 /*
165  * set_current_state() includes a barrier so that the write of current->state
166  * is correctly serialised wrt the caller's subsequent test of whether to
167  * actually sleep:
168  *
169  *   for (;;) {
170  *      set_current_state(TASK_UNINTERRUPTIBLE);
171  *      if (CONDITION)
172  *         break;
173  *
174  *      schedule();
175  *   }
176  *   __set_current_state(TASK_RUNNING);
177  *
178  * If the caller does not need such serialisation (because, for instance, the
179  * CONDITION test and condition change and wakeup are under the same lock) then
180  * use __set_current_state().
181  *
182  * The above is typically ordered against the wakeup, which does:
183  *
184  *   CONDITION = 1;
185  *   wake_up_state(p, TASK_UNINTERRUPTIBLE);
186  *
187  * where wake_up_state()/try_to_wake_up() executes a full memory barrier before
188  * accessing p->state.
189  *
190  * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
191  * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
192  * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
193  *
194  * However, with slightly different timing the wakeup TASK_RUNNING store can
195  * also collide with the TASK_UNINTERRUPTIBLE store. Losing that store is not
196  * a problem either because that will result in one extra go around the loop
197  * and our @cond test will save the day.
198  *
199  * Also see the comments of try_to_wake_up().
200  */
201 #define __set_current_state(state_value)                                \
202         do {                                                            \
203                 debug_normal_state_change((state_value));               \
204                 WRITE_ONCE(current->__state, (state_value));            \
205         } while (0)
206
207 #define set_current_state(state_value)                                  \
208         do {                                                            \
209                 debug_normal_state_change((state_value));               \
210                 smp_store_mb(current->__state, (state_value));          \
211         } while (0)
212
213 /*
214  * set_special_state() should be used for those states when the blocking task
215  * can not use the regular condition based wait-loop. In that case we must
216  * serialize against wakeups such that any possible in-flight TASK_RUNNING
217  * stores will not collide with our state change.
218  */
219 #define set_special_state(state_value)                                  \
220         do {                                                            \
221                 unsigned long flags; /* may shadow */                   \
222                                                                         \
223                 raw_spin_lock_irqsave(&current->pi_lock, flags);        \
224                 debug_special_state_change((state_value));              \
225                 WRITE_ONCE(current->__state, (state_value));            \
226                 raw_spin_unlock_irqrestore(&current->pi_lock, flags);   \
227         } while (0)
228
229 /*
230  * PREEMPT_RT specific variants for "sleeping" spin/rwlocks
231  *
232  * RT's spin/rwlock substitutions are state preserving. The state of the
233  * task when blocking on the lock is saved in task_struct::saved_state and
234  * restored after the lock has been acquired.  These operations are
235  * serialized by task_struct::pi_lock against try_to_wake_up(). Any non RT
236  * lock related wakeups while the task is blocked on the lock are
237  * redirected to operate on task_struct::saved_state to ensure that these
238  * are not dropped. On restore task_struct::saved_state is set to
239  * TASK_RUNNING so any wakeup attempt redirected to saved_state will fail.
240  *
241  * The lock operation looks like this:
242  *
243  *      current_save_and_set_rtlock_wait_state();
244  *      for (;;) {
245  *              if (try_lock())
246  *                      break;
247  *              raw_spin_unlock_irq(&lock->wait_lock);
248  *              schedule_rtlock();
249  *              raw_spin_lock_irq(&lock->wait_lock);
250  *              set_current_state(TASK_RTLOCK_WAIT);
251  *      }
252  *      current_restore_rtlock_saved_state();
253  */
254 #define current_save_and_set_rtlock_wait_state()                        \
255         do {                                                            \
256                 lockdep_assert_irqs_disabled();                         \
257                 raw_spin_lock(&current->pi_lock);                       \
258                 current->saved_state = current->__state;                \
259                 debug_rtlock_wait_set_state();                          \
260                 WRITE_ONCE(current->__state, TASK_RTLOCK_WAIT);         \
261                 raw_spin_unlock(&current->pi_lock);                     \
262         } while (0);
263
264 #define current_restore_rtlock_saved_state()                            \
265         do {                                                            \
266                 lockdep_assert_irqs_disabled();                         \
267                 raw_spin_lock(&current->pi_lock);                       \
268                 debug_rtlock_wait_restore_state();                      \
269                 WRITE_ONCE(current->__state, current->saved_state);     \
270                 current->saved_state = TASK_RUNNING;                    \
271                 raw_spin_unlock(&current->pi_lock);                     \
272         } while (0);
273
274 #define get_current_state()     READ_ONCE(current->__state)
275
276 /*
277  * Define the task command name length as enum, then it can be visible to
278  * BPF programs.
279  */
280 enum {
281         TASK_COMM_LEN = 16,
282 };
283
284 extern void scheduler_tick(void);
285
286 #define MAX_SCHEDULE_TIMEOUT            LONG_MAX
287
288 extern long schedule_timeout(long timeout);
289 extern long schedule_timeout_interruptible(long timeout);
290 extern long schedule_timeout_killable(long timeout);
291 extern long schedule_timeout_uninterruptible(long timeout);
292 extern long schedule_timeout_idle(long timeout);
293 asmlinkage void schedule(void);
294 extern void schedule_preempt_disabled(void);
295 asmlinkage void preempt_schedule_irq(void);
296 #ifdef CONFIG_PREEMPT_RT
297  extern void schedule_rtlock(void);
298 #endif
299
300 extern int __must_check io_schedule_prepare(void);
301 extern void io_schedule_finish(int token);
302 extern long io_schedule_timeout(long timeout);
303 extern void io_schedule(void);
304
305 /**
306  * struct prev_cputime - snapshot of system and user cputime
307  * @utime: time spent in user mode
308  * @stime: time spent in system mode
309  * @lock: protects the above two fields
310  *
311  * Stores previous user/system time values such that we can guarantee
312  * monotonicity.
313  */
314 struct prev_cputime {
315 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
316         u64                             utime;
317         u64                             stime;
318         raw_spinlock_t                  lock;
319 #endif
320 };
321
322 enum vtime_state {
323         /* Task is sleeping or running in a CPU with VTIME inactive: */
324         VTIME_INACTIVE = 0,
325         /* Task is idle */
326         VTIME_IDLE,
327         /* Task runs in kernelspace in a CPU with VTIME active: */
328         VTIME_SYS,
329         /* Task runs in userspace in a CPU with VTIME active: */
330         VTIME_USER,
331         /* Task runs as guests in a CPU with VTIME active: */
332         VTIME_GUEST,
333 };
334
335 struct vtime {
336         seqcount_t              seqcount;
337         unsigned long long      starttime;
338         enum vtime_state        state;
339         unsigned int            cpu;
340         u64                     utime;
341         u64                     stime;
342         u64                     gtime;
343 };
344
345 /*
346  * Utilization clamp constraints.
347  * @UCLAMP_MIN: Minimum utilization
348  * @UCLAMP_MAX: Maximum utilization
349  * @UCLAMP_CNT: Utilization clamp constraints count
350  */
351 enum uclamp_id {
352         UCLAMP_MIN = 0,
353         UCLAMP_MAX,
354         UCLAMP_CNT
355 };
356
357 #ifdef CONFIG_SMP
358 extern struct root_domain def_root_domain;
359 extern struct mutex sched_domains_mutex;
360 #endif
361
362 struct sched_info {
363 #ifdef CONFIG_SCHED_INFO
364         /* Cumulative counters: */
365
366         /* # of times we have run on this CPU: */
367         unsigned long                   pcount;
368
369         /* Time spent waiting on a runqueue: */
370         unsigned long long              run_delay;
371
372         /* Timestamps: */
373
374         /* When did we last run on a CPU? */
375         unsigned long long              last_arrival;
376
377         /* When were we last queued to run? */
378         unsigned long long              last_queued;
379
380 #endif /* CONFIG_SCHED_INFO */
381 };
382
383 /*
384  * Integer metrics need fixed point arithmetic, e.g., sched/fair
385  * has a few: load, load_avg, util_avg, freq, and capacity.
386  *
387  * We define a basic fixed point arithmetic range, and then formalize
388  * all these metrics based on that basic range.
389  */
390 # define SCHED_FIXEDPOINT_SHIFT         10
391 # define SCHED_FIXEDPOINT_SCALE         (1L << SCHED_FIXEDPOINT_SHIFT)
392
393 /* Increase resolution of cpu_capacity calculations */
394 # define SCHED_CAPACITY_SHIFT           SCHED_FIXEDPOINT_SHIFT
395 # define SCHED_CAPACITY_SCALE           (1L << SCHED_CAPACITY_SHIFT)
396
397 struct load_weight {
398         unsigned long                   weight;
399         u32                             inv_weight;
400 };
401
402 /**
403  * struct util_est - Estimation utilization of FAIR tasks
404  * @enqueued: instantaneous estimated utilization of a task/cpu
405  * @ewma:     the Exponential Weighted Moving Average (EWMA)
406  *            utilization of a task
407  *
408  * Support data structure to track an Exponential Weighted Moving Average
409  * (EWMA) of a FAIR task's utilization. New samples are added to the moving
410  * average each time a task completes an activation. Sample's weight is chosen
411  * so that the EWMA will be relatively insensitive to transient changes to the
412  * task's workload.
413  *
414  * The enqueued attribute has a slightly different meaning for tasks and cpus:
415  * - task:   the task's util_avg at last task dequeue time
416  * - cfs_rq: the sum of util_est.enqueued for each RUNNABLE task on that CPU
417  * Thus, the util_est.enqueued of a task represents the contribution on the
418  * estimated utilization of the CPU where that task is currently enqueued.
419  *
420  * Only for tasks we track a moving average of the past instantaneous
421  * estimated utilization. This allows to absorb sporadic drops in utilization
422  * of an otherwise almost periodic task.
423  *
424  * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg
425  * updates. When a task is dequeued, its util_est should not be updated if its
426  * util_avg has not been updated in the meantime.
427  * This information is mapped into the MSB bit of util_est.enqueued at dequeue
428  * time. Since max value of util_est.enqueued for a task is 1024 (PELT util_avg
429  * for a task) it is safe to use MSB.
430  */
431 struct util_est {
432         unsigned int                    enqueued;
433         unsigned int                    ewma;
434 #define UTIL_EST_WEIGHT_SHIFT           2
435 #define UTIL_AVG_UNCHANGED              0x80000000
436 } __attribute__((__aligned__(sizeof(u64))));
437
438 /*
439  * The load/runnable/util_avg accumulates an infinite geometric series
440  * (see __update_load_avg_cfs_rq() in kernel/sched/pelt.c).
441  *
442  * [load_avg definition]
443  *
444  *   load_avg = runnable% * scale_load_down(load)
445  *
446  * [runnable_avg definition]
447  *
448  *   runnable_avg = runnable% * SCHED_CAPACITY_SCALE
449  *
450  * [util_avg definition]
451  *
452  *   util_avg = running% * SCHED_CAPACITY_SCALE
453  *
454  * where runnable% is the time ratio that a sched_entity is runnable and
455  * running% the time ratio that a sched_entity is running.
456  *
457  * For cfs_rq, they are the aggregated values of all runnable and blocked
458  * sched_entities.
459  *
460  * The load/runnable/util_avg doesn't directly factor frequency scaling and CPU
461  * capacity scaling. The scaling is done through the rq_clock_pelt that is used
462  * for computing those signals (see update_rq_clock_pelt())
463  *
464  * N.B., the above ratios (runnable% and running%) themselves are in the
465  * range of [0, 1]. To do fixed point arithmetics, we therefore scale them
466  * to as large a range as necessary. This is for example reflected by
467  * util_avg's SCHED_CAPACITY_SCALE.
468  *
469  * [Overflow issue]
470  *
471  * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
472  * with the highest load (=88761), always runnable on a single cfs_rq,
473  * and should not overflow as the number already hits PID_MAX_LIMIT.
474  *
475  * For all other cases (including 32-bit kernels), struct load_weight's
476  * weight will overflow first before we do, because:
477  *
478  *    Max(load_avg) <= Max(load.weight)
479  *
480  * Then it is the load_weight's responsibility to consider overflow
481  * issues.
482  */
483 struct sched_avg {
484         u64                             last_update_time;
485         u64                             load_sum;
486         u64                             runnable_sum;
487         u32                             util_sum;
488         u32                             period_contrib;
489         unsigned long                   load_avg;
490         unsigned long                   runnable_avg;
491         unsigned long                   util_avg;
492         struct util_est                 util_est;
493 } ____cacheline_aligned;
494
495 struct sched_statistics {
496 #ifdef CONFIG_SCHEDSTATS
497         u64                             wait_start;
498         u64                             wait_max;
499         u64                             wait_count;
500         u64                             wait_sum;
501         u64                             iowait_count;
502         u64                             iowait_sum;
503
504         u64                             sleep_start;
505         u64                             sleep_max;
506         s64                             sum_sleep_runtime;
507
508         u64                             block_start;
509         u64                             block_max;
510         s64                             sum_block_runtime;
511
512         u64                             exec_max;
513         u64                             slice_max;
514
515         u64                             nr_migrations_cold;
516         u64                             nr_failed_migrations_affine;
517         u64                             nr_failed_migrations_running;
518         u64                             nr_failed_migrations_hot;
519         u64                             nr_forced_migrations;
520
521         u64                             nr_wakeups;
522         u64                             nr_wakeups_sync;
523         u64                             nr_wakeups_migrate;
524         u64                             nr_wakeups_local;
525         u64                             nr_wakeups_remote;
526         u64                             nr_wakeups_affine;
527         u64                             nr_wakeups_affine_attempts;
528         u64                             nr_wakeups_passive;
529         u64                             nr_wakeups_idle;
530
531 #ifdef CONFIG_SCHED_CORE
532         u64                             core_forceidle_sum;
533 #endif
534 #endif /* CONFIG_SCHEDSTATS */
535 } ____cacheline_aligned;
536
537 struct sched_entity {
538         /* For load-balancing: */
539         struct load_weight              load;
540         struct rb_node                  run_node;
541         struct list_head                group_node;
542         unsigned int                    on_rq;
543
544         u64                             exec_start;
545         u64                             sum_exec_runtime;
546         u64                             vruntime;
547         u64                             prev_sum_exec_runtime;
548
549         u64                             nr_migrations;
550
551 #ifdef CONFIG_FAIR_GROUP_SCHED
552         int                             depth;
553         struct sched_entity             *parent;
554         /* rq on which this entity is (to be) queued: */
555         struct cfs_rq                   *cfs_rq;
556         /* rq "owned" by this entity/group: */
557         struct cfs_rq                   *my_q;
558         /* cached value of my_q->h_nr_running */
559         unsigned long                   runnable_weight;
560 #endif
561
562 #ifdef CONFIG_SMP
563         /*
564          * Per entity load average tracking.
565          *
566          * Put into separate cache line so it does not
567          * collide with read-mostly values above.
568          */
569         struct sched_avg                avg;
570 #endif
571 };
572
573 struct sched_rt_entity {
574         struct list_head                run_list;
575         unsigned long                   timeout;
576         unsigned long                   watchdog_stamp;
577         unsigned int                    time_slice;
578         unsigned short                  on_rq;
579         unsigned short                  on_list;
580
581         struct sched_rt_entity          *back;
582 #ifdef CONFIG_RT_GROUP_SCHED
583         struct sched_rt_entity          *parent;
584         /* rq on which this entity is (to be) queued: */
585         struct rt_rq                    *rt_rq;
586         /* rq "owned" by this entity/group: */
587         struct rt_rq                    *my_q;
588 #endif
589 } __randomize_layout;
590
591 struct sched_dl_entity {
592         struct rb_node                  rb_node;
593
594         /*
595          * Original scheduling parameters. Copied here from sched_attr
596          * during sched_setattr(), they will remain the same until
597          * the next sched_setattr().
598          */
599         u64                             dl_runtime;     /* Maximum runtime for each instance    */
600         u64                             dl_deadline;    /* Relative deadline of each instance   */
601         u64                             dl_period;      /* Separation of two instances (period) */
602         u64                             dl_bw;          /* dl_runtime / dl_period               */
603         u64                             dl_density;     /* dl_runtime / dl_deadline             */
604
605         /*
606          * Actual scheduling parameters. Initialized with the values above,
607          * they are continuously updated during task execution. Note that
608          * the remaining runtime could be < 0 in case we are in overrun.
609          */
610         s64                             runtime;        /* Remaining runtime for this instance  */
611         u64                             deadline;       /* Absolute deadline for this instance  */
612         unsigned int                    flags;          /* Specifying the scheduler behaviour   */
613
614         /*
615          * Some bool flags:
616          *
617          * @dl_throttled tells if we exhausted the runtime. If so, the
618          * task has to wait for a replenishment to be performed at the
619          * next firing of dl_timer.
620          *
621          * @dl_yielded tells if task gave up the CPU before consuming
622          * all its available runtime during the last job.
623          *
624          * @dl_non_contending tells if the task is inactive while still
625          * contributing to the active utilization. In other words, it
626          * indicates if the inactive timer has been armed and its handler
627          * has not been executed yet. This flag is useful to avoid race
628          * conditions between the inactive timer handler and the wakeup
629          * code.
630          *
631          * @dl_overrun tells if the task asked to be informed about runtime
632          * overruns.
633          */
634         unsigned int                    dl_throttled      : 1;
635         unsigned int                    dl_yielded        : 1;
636         unsigned int                    dl_non_contending : 1;
637         unsigned int                    dl_overrun        : 1;
638
639         /*
640          * Bandwidth enforcement timer. Each -deadline task has its
641          * own bandwidth to be enforced, thus we need one timer per task.
642          */
643         struct hrtimer                  dl_timer;
644
645         /*
646          * Inactive timer, responsible for decreasing the active utilization
647          * at the "0-lag time". When a -deadline task blocks, it contributes
648          * to GRUB's active utilization until the "0-lag time", hence a
649          * timer is needed to decrease the active utilization at the correct
650          * time.
651          */
652         struct hrtimer inactive_timer;
653
654 #ifdef CONFIG_RT_MUTEXES
655         /*
656          * Priority Inheritance. When a DEADLINE scheduling entity is boosted
657          * pi_se points to the donor, otherwise points to the dl_se it belongs
658          * to (the original one/itself).
659          */
660         struct sched_dl_entity *pi_se;
661 #endif
662 };
663
664 #ifdef CONFIG_UCLAMP_TASK
665 /* Number of utilization clamp buckets (shorter alias) */
666 #define UCLAMP_BUCKETS CONFIG_UCLAMP_BUCKETS_COUNT
667
668 /*
669  * Utilization clamp for a scheduling entity
670  * @value:              clamp value "assigned" to a se
671  * @bucket_id:          bucket index corresponding to the "assigned" value
672  * @active:             the se is currently refcounted in a rq's bucket
673  * @user_defined:       the requested clamp value comes from user-space
674  *
675  * The bucket_id is the index of the clamp bucket matching the clamp value
676  * which is pre-computed and stored to avoid expensive integer divisions from
677  * the fast path.
678  *
679  * The active bit is set whenever a task has got an "effective" value assigned,
680  * which can be different from the clamp value "requested" from user-space.
681  * This allows to know a task is refcounted in the rq's bucket corresponding
682  * to the "effective" bucket_id.
683  *
684  * The user_defined bit is set whenever a task has got a task-specific clamp
685  * value requested from userspace, i.e. the system defaults apply to this task
686  * just as a restriction. This allows to relax default clamps when a less
687  * restrictive task-specific value has been requested, thus allowing to
688  * implement a "nice" semantic. For example, a task running with a 20%
689  * default boost can still drop its own boosting to 0%.
690  */
691 struct uclamp_se {
692         unsigned int value              : bits_per(SCHED_CAPACITY_SCALE);
693         unsigned int bucket_id          : bits_per(UCLAMP_BUCKETS);
694         unsigned int active             : 1;
695         unsigned int user_defined       : 1;
696 };
697 #endif /* CONFIG_UCLAMP_TASK */
698
699 union rcu_special {
700         struct {
701                 u8                      blocked;
702                 u8                      need_qs;
703                 u8                      exp_hint; /* Hint for performance. */
704                 u8                      need_mb; /* Readers need smp_mb(). */
705         } b; /* Bits. */
706         u32 s; /* Set of bits. */
707 };
708
709 enum perf_event_task_context {
710         perf_invalid_context = -1,
711         perf_hw_context = 0,
712         perf_sw_context,
713         perf_nr_task_contexts,
714 };
715
716 struct wake_q_node {
717         struct wake_q_node *next;
718 };
719
720 struct kmap_ctrl {
721 #ifdef CONFIG_KMAP_LOCAL
722         int                             idx;
723         pte_t                           pteval[KM_MAX_IDX];
724 #endif
725 };
726
727 struct task_struct {
728 #ifdef CONFIG_THREAD_INFO_IN_TASK
729         /*
730          * For reasons of header soup (see current_thread_info()), this
731          * must be the first element of task_struct.
732          */
733         struct thread_info              thread_info;
734 #endif
735         unsigned int                    __state;
736
737 #ifdef CONFIG_PREEMPT_RT
738         /* saved state for "spinlock sleepers" */
739         unsigned int                    saved_state;
740 #endif
741
742         /*
743          * This begins the randomizable portion of task_struct. Only
744          * scheduling-critical items should be added above here.
745          */
746         randomized_struct_fields_start
747
748         void                            *stack;
749         refcount_t                      usage;
750         /* Per task flags (PF_*), defined further below: */
751         unsigned int                    flags;
752         unsigned int                    ptrace;
753
754 #ifdef CONFIG_SMP
755         int                             on_cpu;
756         struct __call_single_node       wake_entry;
757         unsigned int                    wakee_flips;
758         unsigned long                   wakee_flip_decay_ts;
759         struct task_struct              *last_wakee;
760
761         /*
762          * recent_used_cpu is initially set as the last CPU used by a task
763          * that wakes affine another task. Waker/wakee relationships can
764          * push tasks around a CPU where each wakeup moves to the next one.
765          * Tracking a recently used CPU allows a quick search for a recently
766          * used CPU that may be idle.
767          */
768         int                             recent_used_cpu;
769         int                             wake_cpu;
770 #endif
771         int                             on_rq;
772
773         int                             prio;
774         int                             static_prio;
775         int                             normal_prio;
776         unsigned int                    rt_priority;
777
778         struct sched_entity             se;
779         struct sched_rt_entity          rt;
780         struct sched_dl_entity          dl;
781         const struct sched_class        *sched_class;
782
783 #ifdef CONFIG_SCHED_CORE
784         struct rb_node                  core_node;
785         unsigned long                   core_cookie;
786         unsigned int                    core_occupation;
787 #endif
788
789 #ifdef CONFIG_CGROUP_SCHED
790         struct task_group               *sched_task_group;
791 #endif
792
793 #ifdef CONFIG_UCLAMP_TASK
794         /*
795          * Clamp values requested for a scheduling entity.
796          * Must be updated with task_rq_lock() held.
797          */
798         struct uclamp_se                uclamp_req[UCLAMP_CNT];
799         /*
800          * Effective clamp values used for a scheduling entity.
801          * Must be updated with task_rq_lock() held.
802          */
803         struct uclamp_se                uclamp[UCLAMP_CNT];
804 #endif
805
806         struct sched_statistics         stats;
807
808 #ifdef CONFIG_PREEMPT_NOTIFIERS
809         /* List of struct preempt_notifier: */
810         struct hlist_head               preempt_notifiers;
811 #endif
812
813 #ifdef CONFIG_BLK_DEV_IO_TRACE
814         unsigned int                    btrace_seq;
815 #endif
816
817         unsigned int                    policy;
818         int                             nr_cpus_allowed;
819         const cpumask_t                 *cpus_ptr;
820         cpumask_t                       *user_cpus_ptr;
821         cpumask_t                       cpus_mask;
822         void                            *migration_pending;
823 #ifdef CONFIG_SMP
824         unsigned short                  migration_disabled;
825 #endif
826         unsigned short                  migration_flags;
827
828 #ifdef CONFIG_PREEMPT_RCU
829         int                             rcu_read_lock_nesting;
830         union rcu_special               rcu_read_unlock_special;
831         struct list_head                rcu_node_entry;
832         struct rcu_node                 *rcu_blocked_node;
833 #endif /* #ifdef CONFIG_PREEMPT_RCU */
834
835 #ifdef CONFIG_TASKS_RCU
836         unsigned long                   rcu_tasks_nvcsw;
837         u8                              rcu_tasks_holdout;
838         u8                              rcu_tasks_idx;
839         int                             rcu_tasks_idle_cpu;
840         struct list_head                rcu_tasks_holdout_list;
841 #endif /* #ifdef CONFIG_TASKS_RCU */
842
843 #ifdef CONFIG_TASKS_TRACE_RCU
844         int                             trc_reader_nesting;
845         int                             trc_ipi_to_cpu;
846         union rcu_special               trc_reader_special;
847         struct list_head                trc_holdout_list;
848         struct list_head                trc_blkd_node;
849         int                             trc_blkd_cpu;
850 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
851
852         struct sched_info               sched_info;
853
854         struct list_head                tasks;
855 #ifdef CONFIG_SMP
856         struct plist_node               pushable_tasks;
857         struct rb_node                  pushable_dl_tasks;
858 #endif
859
860         struct mm_struct                *mm;
861         struct mm_struct                *active_mm;
862
863         /* Per-thread vma caching: */
864         struct vmacache                 vmacache;
865
866 #ifdef SPLIT_RSS_COUNTING
867         struct task_rss_stat            rss_stat;
868 #endif
869         int                             exit_state;
870         int                             exit_code;
871         int                             exit_signal;
872         /* The signal sent when the parent dies: */
873         int                             pdeath_signal;
874         /* JOBCTL_*, siglock protected: */
875         unsigned long                   jobctl;
876
877         /* Used for emulating ABI behavior of previous Linux versions: */
878         unsigned int                    personality;
879
880         /* Scheduler bits, serialized by scheduler locks: */
881         unsigned                        sched_reset_on_fork:1;
882         unsigned                        sched_contributes_to_load:1;
883         unsigned                        sched_migrated:1;
884 #ifdef CONFIG_PSI
885         unsigned                        sched_psi_wake_requeue:1;
886 #endif
887
888         /* Force alignment to the next boundary: */
889         unsigned                        :0;
890
891         /* Unserialized, strictly 'current' */
892
893         /*
894          * This field must not be in the scheduler word above due to wakelist
895          * queueing no longer being serialized by p->on_cpu. However:
896          *
897          * p->XXX = X;                  ttwu()
898          * schedule()                     if (p->on_rq && ..) // false
899          *   smp_mb__after_spinlock();    if (smp_load_acquire(&p->on_cpu) && //true
900          *   deactivate_task()                ttwu_queue_wakelist())
901          *     p->on_rq = 0;                    p->sched_remote_wakeup = Y;
902          *
903          * guarantees all stores of 'current' are visible before
904          * ->sched_remote_wakeup gets used, so it can be in this word.
905          */
906         unsigned                        sched_remote_wakeup:1;
907
908         /* Bit to tell LSMs we're in execve(): */
909         unsigned                        in_execve:1;
910         unsigned                        in_iowait:1;
911 #ifndef TIF_RESTORE_SIGMASK
912         unsigned                        restore_sigmask:1;
913 #endif
914 #ifdef CONFIG_MEMCG
915         unsigned                        in_user_fault:1;
916 #endif
917 #ifdef CONFIG_COMPAT_BRK
918         unsigned                        brk_randomized:1;
919 #endif
920 #ifdef CONFIG_CGROUPS
921         /* disallow userland-initiated cgroup migration */
922         unsigned                        no_cgroup_migration:1;
923         /* task is frozen/stopped (used by the cgroup freezer) */
924         unsigned                        frozen:1;
925 #endif
926 #ifdef CONFIG_BLK_CGROUP
927         unsigned                        use_memdelay:1;
928 #endif
929 #ifdef CONFIG_PSI
930         /* Stalled due to lack of memory */
931         unsigned                        in_memstall:1;
932 #endif
933 #ifdef CONFIG_PAGE_OWNER
934         /* Used by page_owner=on to detect recursion in page tracking. */
935         unsigned                        in_page_owner:1;
936 #endif
937 #ifdef CONFIG_EVENTFD
938         /* Recursion prevention for eventfd_signal() */
939         unsigned                        in_eventfd_signal:1;
940 #endif
941 #ifdef CONFIG_IOMMU_SVA
942         unsigned                        pasid_activated:1;
943 #endif
944 #ifdef  CONFIG_CPU_SUP_INTEL
945         unsigned                        reported_split_lock:1;
946 #endif
947
948         unsigned long                   atomic_flags; /* Flags requiring atomic access. */
949
950         struct restart_block            restart_block;
951
952         pid_t                           pid;
953         pid_t                           tgid;
954
955 #ifdef CONFIG_STACKPROTECTOR
956         /* Canary value for the -fstack-protector GCC feature: */
957         unsigned long                   stack_canary;
958 #endif
959         /*
960          * Pointers to the (original) parent process, youngest child, younger sibling,
961          * older sibling, respectively.  (p->father can be replaced with
962          * p->real_parent->pid)
963          */
964
965         /* Real parent process: */
966         struct task_struct __rcu        *real_parent;
967
968         /* Recipient of SIGCHLD, wait4() reports: */
969         struct task_struct __rcu        *parent;
970
971         /*
972          * Children/sibling form the list of natural children:
973          */
974         struct list_head                children;
975         struct list_head                sibling;
976         struct task_struct              *group_leader;
977
978         /*
979          * 'ptraced' is the list of tasks this task is using ptrace() on.
980          *
981          * This includes both natural children and PTRACE_ATTACH targets.
982          * 'ptrace_entry' is this task's link on the p->parent->ptraced list.
983          */
984         struct list_head                ptraced;
985         struct list_head                ptrace_entry;
986
987         /* PID/PID hash table linkage. */
988         struct pid                      *thread_pid;
989         struct hlist_node               pid_links[PIDTYPE_MAX];
990         struct list_head                thread_group;
991         struct list_head                thread_node;
992
993         struct completion               *vfork_done;
994
995         /* CLONE_CHILD_SETTID: */
996         int __user                      *set_child_tid;
997
998         /* CLONE_CHILD_CLEARTID: */
999         int __user                      *clear_child_tid;
1000
1001         /* PF_KTHREAD | PF_IO_WORKER */
1002         void                            *worker_private;
1003
1004         u64                             utime;
1005         u64                             stime;
1006 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
1007         u64                             utimescaled;
1008         u64                             stimescaled;
1009 #endif
1010         u64                             gtime;
1011         struct prev_cputime             prev_cputime;
1012 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1013         struct vtime                    vtime;
1014 #endif
1015
1016 #ifdef CONFIG_NO_HZ_FULL
1017         atomic_t                        tick_dep_mask;
1018 #endif
1019         /* Context switch counts: */
1020         unsigned long                   nvcsw;
1021         unsigned long                   nivcsw;
1022
1023         /* Monotonic time in nsecs: */
1024         u64                             start_time;
1025
1026         /* Boot based time in nsecs: */
1027         u64                             start_boottime;
1028
1029         /* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */
1030         unsigned long                   min_flt;
1031         unsigned long                   maj_flt;
1032
1033         /* Empty if CONFIG_POSIX_CPUTIMERS=n */
1034         struct posix_cputimers          posix_cputimers;
1035
1036 #ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
1037         struct posix_cputimers_work     posix_cputimers_work;
1038 #endif
1039
1040         /* Process credentials: */
1041
1042         /* Tracer's credentials at attach: */
1043         const struct cred __rcu         *ptracer_cred;
1044
1045         /* Objective and real subjective task credentials (COW): */
1046         const struct cred __rcu         *real_cred;
1047
1048         /* Effective (overridable) subjective task credentials (COW): */
1049         const struct cred __rcu         *cred;
1050
1051 #ifdef CONFIG_KEYS
1052         /* Cached requested key. */
1053         struct key                      *cached_requested_key;
1054 #endif
1055
1056         /*
1057          * executable name, excluding path.
1058          *
1059          * - normally initialized setup_new_exec()
1060          * - access it with [gs]et_task_comm()
1061          * - lock it with task_lock()
1062          */
1063         char                            comm[TASK_COMM_LEN];
1064
1065         struct nameidata                *nameidata;
1066
1067 #ifdef CONFIG_SYSVIPC
1068         struct sysv_sem                 sysvsem;
1069         struct sysv_shm                 sysvshm;
1070 #endif
1071 #ifdef CONFIG_DETECT_HUNG_TASK
1072         unsigned long                   last_switch_count;
1073         unsigned long                   last_switch_time;
1074 #endif
1075         /* Filesystem information: */
1076         struct fs_struct                *fs;
1077
1078         /* Open file information: */
1079         struct files_struct             *files;
1080
1081 #ifdef CONFIG_IO_URING
1082         struct io_uring_task            *io_uring;
1083 #endif
1084
1085         /* Namespaces: */
1086         struct nsproxy                  *nsproxy;
1087
1088         /* Signal handlers: */
1089         struct signal_struct            *signal;
1090         struct sighand_struct __rcu             *sighand;
1091         sigset_t                        blocked;
1092         sigset_t                        real_blocked;
1093         /* Restored if set_restore_sigmask() was used: */
1094         sigset_t                        saved_sigmask;
1095         struct sigpending               pending;
1096         unsigned long                   sas_ss_sp;
1097         size_t                          sas_ss_size;
1098         unsigned int                    sas_ss_flags;
1099
1100         struct callback_head            *task_works;
1101
1102 #ifdef CONFIG_AUDIT
1103 #ifdef CONFIG_AUDITSYSCALL
1104         struct audit_context            *audit_context;
1105 #endif
1106         kuid_t                          loginuid;
1107         unsigned int                    sessionid;
1108 #endif
1109         struct seccomp                  seccomp;
1110         struct syscall_user_dispatch    syscall_dispatch;
1111
1112         /* Thread group tracking: */
1113         u64                             parent_exec_id;
1114         u64                             self_exec_id;
1115
1116         /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */
1117         spinlock_t                      alloc_lock;
1118
1119         /* Protection of the PI data structures: */
1120         raw_spinlock_t                  pi_lock;
1121
1122         struct wake_q_node              wake_q;
1123
1124 #ifdef CONFIG_RT_MUTEXES
1125         /* PI waiters blocked on a rt_mutex held by this task: */
1126         struct rb_root_cached           pi_waiters;
1127         /* Updated under owner's pi_lock and rq lock */
1128         struct task_struct              *pi_top_task;
1129         /* Deadlock detection and priority inheritance handling: */
1130         struct rt_mutex_waiter          *pi_blocked_on;
1131 #endif
1132
1133 #ifdef CONFIG_DEBUG_MUTEXES
1134         /* Mutex deadlock detection: */
1135         struct mutex_waiter             *blocked_on;
1136 #endif
1137
1138 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1139         int                             non_block_count;
1140 #endif
1141
1142 #ifdef CONFIG_TRACE_IRQFLAGS
1143         struct irqtrace_events          irqtrace;
1144         unsigned int                    hardirq_threaded;
1145         u64                             hardirq_chain_key;
1146         int                             softirqs_enabled;
1147         int                             softirq_context;
1148         int                             irq_config;
1149 #endif
1150 #ifdef CONFIG_PREEMPT_RT
1151         int                             softirq_disable_cnt;
1152 #endif
1153
1154 #ifdef CONFIG_LOCKDEP
1155 # define MAX_LOCK_DEPTH                 48UL
1156         u64                             curr_chain_key;
1157         int                             lockdep_depth;
1158         unsigned int                    lockdep_recursion;
1159         struct held_lock                held_locks[MAX_LOCK_DEPTH];
1160 #endif
1161
1162 #if defined(CONFIG_UBSAN) && !defined(CONFIG_UBSAN_TRAP)
1163         unsigned int                    in_ubsan;
1164 #endif
1165
1166         /* Journalling filesystem info: */
1167         void                            *journal_info;
1168
1169         /* Stacked block device info: */
1170         struct bio_list                 *bio_list;
1171
1172         /* Stack plugging: */
1173         struct blk_plug                 *plug;
1174
1175         /* VM state: */
1176         struct reclaim_state            *reclaim_state;
1177
1178         struct backing_dev_info         *backing_dev_info;
1179
1180         struct io_context               *io_context;
1181
1182 #ifdef CONFIG_COMPACTION
1183         struct capture_control          *capture_control;
1184 #endif
1185         /* Ptrace state: */
1186         unsigned long                   ptrace_message;
1187         kernel_siginfo_t                *last_siginfo;
1188
1189         struct task_io_accounting       ioac;
1190 #ifdef CONFIG_PSI
1191         /* Pressure stall state */
1192         unsigned int                    psi_flags;
1193 #endif
1194 #ifdef CONFIG_TASK_XACCT
1195         /* Accumulated RSS usage: */
1196         u64                             acct_rss_mem1;
1197         /* Accumulated virtual memory usage: */
1198         u64                             acct_vm_mem1;
1199         /* stime + utime since last update: */
1200         u64                             acct_timexpd;
1201 #endif
1202 #ifdef CONFIG_CPUSETS
1203         /* Protected by ->alloc_lock: */
1204         nodemask_t                      mems_allowed;
1205         /* Sequence number to catch updates: */
1206         seqcount_spinlock_t             mems_allowed_seq;
1207         int                             cpuset_mem_spread_rotor;
1208         int                             cpuset_slab_spread_rotor;
1209 #endif
1210 #ifdef CONFIG_CGROUPS
1211         /* Control Group info protected by css_set_lock: */
1212         struct css_set __rcu            *cgroups;
1213         /* cg_list protected by css_set_lock and tsk->alloc_lock: */
1214         struct list_head                cg_list;
1215 #endif
1216 #ifdef CONFIG_X86_CPU_RESCTRL
1217         u32                             closid;
1218         u32                             rmid;
1219 #endif
1220 #ifdef CONFIG_FUTEX
1221         struct robust_list_head __user  *robust_list;
1222 #ifdef CONFIG_COMPAT
1223         struct compat_robust_list_head __user *compat_robust_list;
1224 #endif
1225         struct list_head                pi_state_list;
1226         struct futex_pi_state           *pi_state_cache;
1227         struct mutex                    futex_exit_mutex;
1228         unsigned int                    futex_state;
1229 #endif
1230 #ifdef CONFIG_PERF_EVENTS
1231         struct perf_event_context       *perf_event_ctxp[perf_nr_task_contexts];
1232         struct mutex                    perf_event_mutex;
1233         struct list_head                perf_event_list;
1234 #endif
1235 #ifdef CONFIG_DEBUG_PREEMPT
1236         unsigned long                   preempt_disable_ip;
1237 #endif
1238 #ifdef CONFIG_NUMA
1239         /* Protected by alloc_lock: */
1240         struct mempolicy                *mempolicy;
1241         short                           il_prev;
1242         short                           pref_node_fork;
1243 #endif
1244 #ifdef CONFIG_NUMA_BALANCING
1245         int                             numa_scan_seq;
1246         unsigned int                    numa_scan_period;
1247         unsigned int                    numa_scan_period_max;
1248         int                             numa_preferred_nid;
1249         unsigned long                   numa_migrate_retry;
1250         /* Migration stamp: */
1251         u64                             node_stamp;
1252         u64                             last_task_numa_placement;
1253         u64                             last_sum_exec_runtime;
1254         struct callback_head            numa_work;
1255
1256         /*
1257          * This pointer is only modified for current in syscall and
1258          * pagefault context (and for tasks being destroyed), so it can be read
1259          * from any of the following contexts:
1260          *  - RCU read-side critical section
1261          *  - current->numa_group from everywhere
1262          *  - task's runqueue locked, task not running
1263          */
1264         struct numa_group __rcu         *numa_group;
1265
1266         /*
1267          * numa_faults is an array split into four regions:
1268          * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
1269          * in this precise order.
1270          *
1271          * faults_memory: Exponential decaying average of faults on a per-node
1272          * basis. Scheduling placement decisions are made based on these
1273          * counts. The values remain static for the duration of a PTE scan.
1274          * faults_cpu: Track the nodes the process was running on when a NUMA
1275          * hinting fault was incurred.
1276          * faults_memory_buffer and faults_cpu_buffer: Record faults per node
1277          * during the current scan window. When the scan completes, the counts
1278          * in faults_memory and faults_cpu decay and these values are copied.
1279          */
1280         unsigned long                   *numa_faults;
1281         unsigned long                   total_numa_faults;
1282
1283         /*
1284          * numa_faults_locality tracks if faults recorded during the last
1285          * scan window were remote/local or failed to migrate. The task scan
1286          * period is adapted based on the locality of the faults with different
1287          * weights depending on whether they were shared or private faults
1288          */
1289         unsigned long                   numa_faults_locality[3];
1290
1291         unsigned long                   numa_pages_migrated;
1292 #endif /* CONFIG_NUMA_BALANCING */
1293
1294 #ifdef CONFIG_RSEQ
1295         struct rseq __user *rseq;
1296         u32 rseq_sig;
1297         /*
1298          * RmW on rseq_event_mask must be performed atomically
1299          * with respect to preemption.
1300          */
1301         unsigned long rseq_event_mask;
1302 #endif
1303
1304         struct tlbflush_unmap_batch     tlb_ubc;
1305
1306         union {
1307                 refcount_t              rcu_users;
1308                 struct rcu_head         rcu;
1309         };
1310
1311         /* Cache last used pipe for splice(): */
1312         struct pipe_inode_info          *splice_pipe;
1313
1314         struct page_frag                task_frag;
1315
1316 #ifdef CONFIG_TASK_DELAY_ACCT
1317         struct task_delay_info          *delays;
1318 #endif
1319
1320 #ifdef CONFIG_FAULT_INJECTION
1321         int                             make_it_fail;
1322         unsigned int                    fail_nth;
1323 #endif
1324         /*
1325          * When (nr_dirtied >= nr_dirtied_pause), it's time to call
1326          * balance_dirty_pages() for a dirty throttling pause:
1327          */
1328         int                             nr_dirtied;
1329         int                             nr_dirtied_pause;
1330         /* Start of a write-and-pause period: */
1331         unsigned long                   dirty_paused_when;
1332
1333 #ifdef CONFIG_LATENCYTOP
1334         int                             latency_record_count;
1335         struct latency_record           latency_record[LT_SAVECOUNT];
1336 #endif
1337         /*
1338          * Time slack values; these are used to round up poll() and
1339          * select() etc timeout values. These are in nanoseconds.
1340          */
1341         u64                             timer_slack_ns;
1342         u64                             default_timer_slack_ns;
1343
1344 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
1345         unsigned int                    kasan_depth;
1346 #endif
1347
1348 #ifdef CONFIG_KCSAN
1349         struct kcsan_ctx                kcsan_ctx;
1350 #ifdef CONFIG_TRACE_IRQFLAGS
1351         struct irqtrace_events          kcsan_save_irqtrace;
1352 #endif
1353 #ifdef CONFIG_KCSAN_WEAK_MEMORY
1354         int                             kcsan_stack_depth;
1355 #endif
1356 #endif
1357
1358 #if IS_ENABLED(CONFIG_KUNIT)
1359         struct kunit                    *kunit_test;
1360 #endif
1361
1362 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1363         /* Index of current stored address in ret_stack: */
1364         int                             curr_ret_stack;
1365         int                             curr_ret_depth;
1366
1367         /* Stack of return addresses for return function tracing: */
1368         struct ftrace_ret_stack         *ret_stack;
1369
1370         /* Timestamp for last schedule: */
1371         unsigned long long              ftrace_timestamp;
1372
1373         /*
1374          * Number of functions that haven't been traced
1375          * because of depth overrun:
1376          */
1377         atomic_t                        trace_overrun;
1378
1379         /* Pause tracing: */
1380         atomic_t                        tracing_graph_pause;
1381 #endif
1382
1383 #ifdef CONFIG_TRACING
1384         /* State flags for use by tracers: */
1385         unsigned long                   trace;
1386
1387         /* Bitmask and counter of trace recursion: */
1388         unsigned long                   trace_recursion;
1389 #endif /* CONFIG_TRACING */
1390
1391 #ifdef CONFIG_KCOV
1392         /* See kernel/kcov.c for more details. */
1393
1394         /* Coverage collection mode enabled for this task (0 if disabled): */
1395         unsigned int                    kcov_mode;
1396
1397         /* Size of the kcov_area: */
1398         unsigned int                    kcov_size;
1399
1400         /* Buffer for coverage collection: */
1401         void                            *kcov_area;
1402
1403         /* KCOV descriptor wired with this task or NULL: */
1404         struct kcov                     *kcov;
1405
1406         /* KCOV common handle for remote coverage collection: */
1407         u64                             kcov_handle;
1408
1409         /* KCOV sequence number: */
1410         int                             kcov_sequence;
1411
1412         /* Collect coverage from softirq context: */
1413         unsigned int                    kcov_softirq;
1414 #endif
1415
1416 #ifdef CONFIG_MEMCG
1417         struct mem_cgroup               *memcg_in_oom;
1418         gfp_t                           memcg_oom_gfp_mask;
1419         int                             memcg_oom_order;
1420
1421         /* Number of pages to reclaim on returning to userland: */
1422         unsigned int                    memcg_nr_pages_over_high;
1423
1424         /* Used by memcontrol for targeted memcg charge: */
1425         struct mem_cgroup               *active_memcg;
1426 #endif
1427
1428 #ifdef CONFIG_BLK_CGROUP
1429         struct request_queue            *throttle_queue;
1430 #endif
1431
1432 #ifdef CONFIG_UPROBES
1433         struct uprobe_task              *utask;
1434 #endif
1435 #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1436         unsigned int                    sequential_io;
1437         unsigned int                    sequential_io_avg;
1438 #endif
1439         struct kmap_ctrl                kmap_ctrl;
1440 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1441         unsigned long                   task_state_change;
1442 # ifdef CONFIG_PREEMPT_RT
1443         unsigned long                   saved_state_change;
1444 # endif
1445 #endif
1446         int                             pagefault_disabled;
1447 #ifdef CONFIG_MMU
1448         struct task_struct              *oom_reaper_list;
1449         struct timer_list               oom_reaper_timer;
1450 #endif
1451 #ifdef CONFIG_VMAP_STACK
1452         struct vm_struct                *stack_vm_area;
1453 #endif
1454 #ifdef CONFIG_THREAD_INFO_IN_TASK
1455         /* A live task holds one reference: */
1456         refcount_t                      stack_refcount;
1457 #endif
1458 #ifdef CONFIG_LIVEPATCH
1459         int patch_state;
1460 #endif
1461 #ifdef CONFIG_SECURITY
1462         /* Used by LSM modules for access restriction: */
1463         void                            *security;
1464 #endif
1465 #ifdef CONFIG_BPF_SYSCALL
1466         /* Used by BPF task local storage */
1467         struct bpf_local_storage __rcu  *bpf_storage;
1468         /* Used for BPF run context */
1469         struct bpf_run_ctx              *bpf_ctx;
1470 #endif
1471
1472 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
1473         unsigned long                   lowest_stack;
1474         unsigned long                   prev_lowest_stack;
1475 #endif
1476
1477 #ifdef CONFIG_X86_MCE
1478         void __user                     *mce_vaddr;
1479         __u64                           mce_kflags;
1480         u64                             mce_addr;
1481         __u64                           mce_ripv : 1,
1482                                         mce_whole_page : 1,
1483                                         __mce_reserved : 62;
1484         struct callback_head            mce_kill_me;
1485         int                             mce_count;
1486 #endif
1487
1488 #ifdef CONFIG_KRETPROBES
1489         struct llist_head               kretprobe_instances;
1490 #endif
1491 #ifdef CONFIG_RETHOOK
1492         struct llist_head               rethooks;
1493 #endif
1494
1495 #ifdef CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH
1496         /*
1497          * If L1D flush is supported on mm context switch
1498          * then we use this callback head to queue kill work
1499          * to kill tasks that are not running on SMT disabled
1500          * cores
1501          */
1502         struct callback_head            l1d_flush_kill;
1503 #endif
1504
1505 #ifdef CONFIG_RV
1506         /*
1507          * Per-task RV monitor. Nowadays fixed in RV_PER_TASK_MONITORS.
1508          * If we find justification for more monitors, we can think
1509          * about adding more or developing a dynamic method. So far,
1510          * none of these are justified.
1511          */
1512         union rv_task_monitor           rv[RV_PER_TASK_MONITORS];
1513 #endif
1514
1515         /*
1516          * New fields for task_struct should be added above here, so that
1517          * they are included in the randomized portion of task_struct.
1518          */
1519         randomized_struct_fields_end
1520
1521         /* CPU-specific state of this task: */
1522         struct thread_struct            thread;
1523
1524         /*
1525          * WARNING: on x86, 'thread_struct' contains a variable-sized
1526          * structure.  It *MUST* be at the end of 'task_struct'.
1527          *
1528          * Do not put anything below here!
1529          */
1530 };
1531
1532 static inline struct pid *task_pid(struct task_struct *task)
1533 {
1534         return task->thread_pid;
1535 }
1536
1537 /*
1538  * the helpers to get the task's different pids as they are seen
1539  * from various namespaces
1540  *
1541  * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
1542  * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
1543  *                     current.
1544  * task_xid_nr_ns()  : id seen from the ns specified;
1545  *
1546  * see also pid_nr() etc in include/linux/pid.h
1547  */
1548 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
1549
1550 static inline pid_t task_pid_nr(struct task_struct *tsk)
1551 {
1552         return tsk->pid;
1553 }
1554
1555 static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1556 {
1557         return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1558 }
1559
1560 static inline pid_t task_pid_vnr(struct task_struct *tsk)
1561 {
1562         return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1563 }
1564
1565
1566 static inline pid_t task_tgid_nr(struct task_struct *tsk)
1567 {
1568         return tsk->tgid;
1569 }
1570
1571 /**
1572  * pid_alive - check that a task structure is not stale
1573  * @p: Task structure to be checked.
1574  *
1575  * Test if a process is not yet dead (at most zombie state)
1576  * If pid_alive fails, then pointers within the task structure
1577  * can be stale and must not be dereferenced.
1578  *
1579  * Return: 1 if the process is alive. 0 otherwise.
1580  */
1581 static inline int pid_alive(const struct task_struct *p)
1582 {
1583         return p->thread_pid != NULL;
1584 }
1585
1586 static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1587 {
1588         return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1589 }
1590
1591 static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1592 {
1593         return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1594 }
1595
1596
1597 static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1598 {
1599         return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1600 }
1601
1602 static inline pid_t task_session_vnr(struct task_struct *tsk)
1603 {
1604         return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1605 }
1606
1607 static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1608 {
1609         return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns);
1610 }
1611
1612 static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1613 {
1614         return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL);
1615 }
1616
1617 static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1618 {
1619         pid_t pid = 0;
1620
1621         rcu_read_lock();
1622         if (pid_alive(tsk))
1623                 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1624         rcu_read_unlock();
1625
1626         return pid;
1627 }
1628
1629 static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1630 {
1631         return task_ppid_nr_ns(tsk, &init_pid_ns);
1632 }
1633
1634 /* Obsolete, do not use: */
1635 static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1636 {
1637         return task_pgrp_nr_ns(tsk, &init_pid_ns);
1638 }
1639
1640 #define TASK_REPORT_IDLE        (TASK_REPORT + 1)
1641 #define TASK_REPORT_MAX         (TASK_REPORT_IDLE << 1)
1642
1643 static inline unsigned int __task_state_index(unsigned int tsk_state,
1644                                               unsigned int tsk_exit_state)
1645 {
1646         unsigned int state = (tsk_state | tsk_exit_state) & TASK_REPORT;
1647
1648         BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
1649
1650         if (tsk_state == TASK_IDLE)
1651                 state = TASK_REPORT_IDLE;
1652
1653         /*
1654          * We're lying here, but rather than expose a completely new task state
1655          * to userspace, we can make this appear as if the task has gone through
1656          * a regular rt_mutex_lock() call.
1657          */
1658         if (tsk_state == TASK_RTLOCK_WAIT)
1659                 state = TASK_UNINTERRUPTIBLE;
1660
1661         return fls(state);
1662 }
1663
1664 static inline unsigned int task_state_index(struct task_struct *tsk)
1665 {
1666         return __task_state_index(READ_ONCE(tsk->__state), tsk->exit_state);
1667 }
1668
1669 static inline char task_index_to_char(unsigned int state)
1670 {
1671         static const char state_char[] = "RSDTtXZPI";
1672
1673         BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1);
1674
1675         return state_char[state];
1676 }
1677
1678 static inline char task_state_to_char(struct task_struct *tsk)
1679 {
1680         return task_index_to_char(task_state_index(tsk));
1681 }
1682
1683 /**
1684  * is_global_init - check if a task structure is init. Since init
1685  * is free to have sub-threads we need to check tgid.
1686  * @tsk: Task structure to be checked.
1687  *
1688  * Check if a task structure is the first user space task the kernel created.
1689  *
1690  * Return: 1 if the task structure is init. 0 otherwise.
1691  */
1692 static inline int is_global_init(struct task_struct *tsk)
1693 {
1694         return task_tgid_nr(tsk) == 1;
1695 }
1696
1697 extern struct pid *cad_pid;
1698
1699 /*
1700  * Per process flags
1701  */
1702 #define PF_VCPU                 0x00000001      /* I'm a virtual CPU */
1703 #define PF_IDLE                 0x00000002      /* I am an IDLE thread */
1704 #define PF_EXITING              0x00000004      /* Getting shut down */
1705 #define PF_POSTCOREDUMP         0x00000008      /* Coredumps should ignore this task */
1706 #define PF_IO_WORKER            0x00000010      /* Task is an IO worker */
1707 #define PF_WQ_WORKER            0x00000020      /* I'm a workqueue worker */
1708 #define PF_FORKNOEXEC           0x00000040      /* Forked but didn't exec */
1709 #define PF_MCE_PROCESS          0x00000080      /* Process policy on mce errors */
1710 #define PF_SUPERPRIV            0x00000100      /* Used super-user privileges */
1711 #define PF_DUMPCORE             0x00000200      /* Dumped core */
1712 #define PF_SIGNALED             0x00000400      /* Killed by a signal */
1713 #define PF_MEMALLOC             0x00000800      /* Allocating memory */
1714 #define PF_NPROC_EXCEEDED       0x00001000      /* set_user() noticed that RLIMIT_NPROC was exceeded */
1715 #define PF_USED_MATH            0x00002000      /* If unset the fpu must be initialized before use */
1716 #define PF_NOFREEZE             0x00008000      /* This thread should not be frozen */
1717 #define PF_FROZEN               0x00010000      /* Frozen for system suspend */
1718 #define PF_KSWAPD               0x00020000      /* I am kswapd */
1719 #define PF_MEMALLOC_NOFS        0x00040000      /* All allocation requests will inherit GFP_NOFS */
1720 #define PF_MEMALLOC_NOIO        0x00080000      /* All allocation requests will inherit GFP_NOIO */
1721 #define PF_LOCAL_THROTTLE       0x00100000      /* Throttle writes only against the bdi I write to,
1722                                                  * I am cleaning dirty pages from some other bdi. */
1723 #define PF_KTHREAD              0x00200000      /* I am a kernel thread */
1724 #define PF_RANDOMIZE            0x00400000      /* Randomize virtual address space */
1725 #define PF_NO_SETAFFINITY       0x04000000      /* Userland is not allowed to meddle with cpus_mask */
1726 #define PF_MCE_EARLY            0x08000000      /* Early kill for mce process policy */
1727 #define PF_MEMALLOC_PIN         0x10000000      /* Allocation context constrained to zones which allow long term pinning. */
1728 #define PF_FREEZER_SKIP         0x40000000      /* Freezer should not count it as freezable */
1729 #define PF_SUSPEND_TASK         0x80000000      /* This thread called freeze_processes() and should not be frozen */
1730
1731 /*
1732  * Only the _current_ task can read/write to tsk->flags, but other
1733  * tasks can access tsk->flags in readonly mode for example
1734  * with tsk_used_math (like during threaded core dumping).
1735  * There is however an exception to this rule during ptrace
1736  * or during fork: the ptracer task is allowed to write to the
1737  * child->flags of its traced child (same goes for fork, the parent
1738  * can write to the child->flags), because we're guaranteed the
1739  * child is not running and in turn not changing child->flags
1740  * at the same time the parent does it.
1741  */
1742 #define clear_stopped_child_used_math(child)    do { (child)->flags &= ~PF_USED_MATH; } while (0)
1743 #define set_stopped_child_used_math(child)      do { (child)->flags |= PF_USED_MATH; } while (0)
1744 #define clear_used_math()                       clear_stopped_child_used_math(current)
1745 #define set_used_math()                         set_stopped_child_used_math(current)
1746
1747 #define conditional_stopped_child_used_math(condition, child) \
1748         do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1749
1750 #define conditional_used_math(condition)        conditional_stopped_child_used_math(condition, current)
1751
1752 #define copy_to_stopped_child_used_math(child) \
1753         do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1754
1755 /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1756 #define tsk_used_math(p)                        ((p)->flags & PF_USED_MATH)
1757 #define used_math()                             tsk_used_math(current)
1758
1759 static __always_inline bool is_percpu_thread(void)
1760 {
1761 #ifdef CONFIG_SMP
1762         return (current->flags & PF_NO_SETAFFINITY) &&
1763                 (current->nr_cpus_allowed  == 1);
1764 #else
1765         return true;
1766 #endif
1767 }
1768
1769 /* Per-process atomic flags. */
1770 #define PFA_NO_NEW_PRIVS                0       /* May not gain new privileges. */
1771 #define PFA_SPREAD_PAGE                 1       /* Spread page cache over cpuset */
1772 #define PFA_SPREAD_SLAB                 2       /* Spread some slab caches over cpuset */
1773 #define PFA_SPEC_SSB_DISABLE            3       /* Speculative Store Bypass disabled */
1774 #define PFA_SPEC_SSB_FORCE_DISABLE      4       /* Speculative Store Bypass force disabled*/
1775 #define PFA_SPEC_IB_DISABLE             5       /* Indirect branch speculation restricted */
1776 #define PFA_SPEC_IB_FORCE_DISABLE       6       /* Indirect branch speculation permanently restricted */
1777 #define PFA_SPEC_SSB_NOEXEC             7       /* Speculative Store Bypass clear on execve() */
1778
1779 #define TASK_PFA_TEST(name, func)                                       \
1780         static inline bool task_##func(struct task_struct *p)           \
1781         { return test_bit(PFA_##name, &p->atomic_flags); }
1782
1783 #define TASK_PFA_SET(name, func)                                        \
1784         static inline void task_set_##func(struct task_struct *p)       \
1785         { set_bit(PFA_##name, &p->atomic_flags); }
1786
1787 #define TASK_PFA_CLEAR(name, func)                                      \
1788         static inline void task_clear_##func(struct task_struct *p)     \
1789         { clear_bit(PFA_##name, &p->atomic_flags); }
1790
1791 TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
1792 TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
1793
1794 TASK_PFA_TEST(SPREAD_PAGE, spread_page)
1795 TASK_PFA_SET(SPREAD_PAGE, spread_page)
1796 TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
1797
1798 TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
1799 TASK_PFA_SET(SPREAD_SLAB, spread_slab)
1800 TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1801
1802 TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
1803 TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
1804 TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
1805
1806 TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1807 TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1808 TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1809
1810 TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1811 TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1812
1813 TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
1814 TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
1815 TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
1816
1817 TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1818 TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1819
1820 static inline void
1821 current_restore_flags(unsigned long orig_flags, unsigned long flags)
1822 {
1823         current->flags &= ~flags;
1824         current->flags |= orig_flags & flags;
1825 }
1826
1827 extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
1828 extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_effective_cpus);
1829 #ifdef CONFIG_SMP
1830 extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
1831 extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
1832 extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node);
1833 extern void release_user_cpus_ptr(struct task_struct *p);
1834 extern int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask);
1835 extern void force_compatible_cpus_allowed_ptr(struct task_struct *p);
1836 extern void relax_compatible_cpus_allowed_ptr(struct task_struct *p);
1837 #else
1838 static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1839 {
1840 }
1841 static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1842 {
1843         if (!cpumask_test_cpu(0, new_mask))
1844                 return -EINVAL;
1845         return 0;
1846 }
1847 static inline int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node)
1848 {
1849         if (src->user_cpus_ptr)
1850                 return -EINVAL;
1851         return 0;
1852 }
1853 static inline void release_user_cpus_ptr(struct task_struct *p)
1854 {
1855         WARN_ON(p->user_cpus_ptr);
1856 }
1857
1858 static inline int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
1859 {
1860         return 0;
1861 }
1862 #endif
1863
1864 extern int yield_to(struct task_struct *p, bool preempt);
1865 extern void set_user_nice(struct task_struct *p, long nice);
1866 extern int task_prio(const struct task_struct *p);
1867
1868 /**
1869  * task_nice - return the nice value of a given task.
1870  * @p: the task in question.
1871  *
1872  * Return: The nice value [ -20 ... 0 ... 19 ].
1873  */
1874 static inline int task_nice(const struct task_struct *p)
1875 {
1876         return PRIO_TO_NICE((p)->static_prio);
1877 }
1878
1879 extern int can_nice(const struct task_struct *p, const int nice);
1880 extern int task_curr(const struct task_struct *p);
1881 extern int idle_cpu(int cpu);
1882 extern int available_idle_cpu(int cpu);
1883 extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
1884 extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
1885 extern void sched_set_fifo(struct task_struct *p);
1886 extern void sched_set_fifo_low(struct task_struct *p);
1887 extern void sched_set_normal(struct task_struct *p, int nice);
1888 extern int sched_setattr(struct task_struct *, const struct sched_attr *);
1889 extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
1890 extern struct task_struct *idle_task(int cpu);
1891
1892 /**
1893  * is_idle_task - is the specified task an idle task?
1894  * @p: the task in question.
1895  *
1896  * Return: 1 if @p is an idle task. 0 otherwise.
1897  */
1898 static __always_inline bool is_idle_task(const struct task_struct *p)
1899 {
1900         return !!(p->flags & PF_IDLE);
1901 }
1902
1903 extern struct task_struct *curr_task(int cpu);
1904 extern void ia64_set_curr_task(int cpu, struct task_struct *p);
1905
1906 void yield(void);
1907
1908 union thread_union {
1909 #ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK
1910         struct task_struct task;
1911 #endif
1912 #ifndef CONFIG_THREAD_INFO_IN_TASK
1913         struct thread_info thread_info;
1914 #endif
1915         unsigned long stack[THREAD_SIZE/sizeof(long)];
1916 };
1917
1918 #ifndef CONFIG_THREAD_INFO_IN_TASK
1919 extern struct thread_info init_thread_info;
1920 #endif
1921
1922 extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
1923
1924 #ifdef CONFIG_THREAD_INFO_IN_TASK
1925 # define task_thread_info(task) (&(task)->thread_info)
1926 #elif !defined(__HAVE_THREAD_FUNCTIONS)
1927 # define task_thread_info(task) ((struct thread_info *)(task)->stack)
1928 #endif
1929
1930 /*
1931  * find a task by one of its numerical ids
1932  *
1933  * find_task_by_pid_ns():
1934  *      finds a task by its pid in the specified namespace
1935  * find_task_by_vpid():
1936  *      finds a task by its virtual pid
1937  *
1938  * see also find_vpid() etc in include/linux/pid.h
1939  */
1940
1941 extern struct task_struct *find_task_by_vpid(pid_t nr);
1942 extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
1943
1944 /*
1945  * find a task by its virtual pid and get the task struct
1946  */
1947 extern struct task_struct *find_get_task_by_vpid(pid_t nr);
1948
1949 extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1950 extern int wake_up_process(struct task_struct *tsk);
1951 extern void wake_up_new_task(struct task_struct *tsk);
1952
1953 #ifdef CONFIG_SMP
1954 extern void kick_process(struct task_struct *tsk);
1955 #else
1956 static inline void kick_process(struct task_struct *tsk) { }
1957 #endif
1958
1959 extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
1960
1961 static inline void set_task_comm(struct task_struct *tsk, const char *from)
1962 {
1963         __set_task_comm(tsk, from, false);
1964 }
1965
1966 extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
1967 #define get_task_comm(buf, tsk) ({                      \
1968         BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN);     \
1969         __get_task_comm(buf, sizeof(buf), tsk);         \
1970 })
1971
1972 #ifdef CONFIG_SMP
1973 static __always_inline void scheduler_ipi(void)
1974 {
1975         /*
1976          * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
1977          * TIF_NEED_RESCHED remotely (for the first time) will also send
1978          * this IPI.
1979          */
1980         preempt_fold_need_resched();
1981 }
1982 extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state);
1983 #else
1984 static inline void scheduler_ipi(void) { }
1985 static inline unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
1986 {
1987         return 1;
1988 }
1989 #endif
1990
1991 /*
1992  * Set thread flags in other task's structures.
1993  * See asm/thread_info.h for TIF_xxxx flags available:
1994  */
1995 static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
1996 {
1997         set_ti_thread_flag(task_thread_info(tsk), flag);
1998 }
1999
2000 static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2001 {
2002         clear_ti_thread_flag(task_thread_info(tsk), flag);
2003 }
2004
2005 static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag,
2006                                           bool value)
2007 {
2008         update_ti_thread_flag(task_thread_info(tsk), flag, value);
2009 }
2010
2011 static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
2012 {
2013         return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
2014 }
2015
2016 static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2017 {
2018         return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
2019 }
2020
2021 static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
2022 {
2023         return test_ti_thread_flag(task_thread_info(tsk), flag);
2024 }
2025
2026 static inline void set_tsk_need_resched(struct task_struct *tsk)
2027 {
2028         set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2029 }
2030
2031 static inline void clear_tsk_need_resched(struct task_struct *tsk)
2032 {
2033         clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2034 }
2035
2036 static inline int test_tsk_need_resched(struct task_struct *tsk)
2037 {
2038         return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2039 }
2040
2041 /*
2042  * cond_resched() and cond_resched_lock(): latency reduction via
2043  * explicit rescheduling in places that are safe. The return
2044  * value indicates whether a reschedule was done in fact.
2045  * cond_resched_lock() will drop the spinlock before scheduling,
2046  */
2047 #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
2048 extern int __cond_resched(void);
2049
2050 #if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
2051
2052 DECLARE_STATIC_CALL(cond_resched, __cond_resched);
2053
2054 static __always_inline int _cond_resched(void)
2055 {
2056         return static_call_mod(cond_resched)();
2057 }
2058
2059 #elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
2060 extern int dynamic_cond_resched(void);
2061
2062 static __always_inline int _cond_resched(void)
2063 {
2064         return dynamic_cond_resched();
2065 }
2066
2067 #else
2068
2069 static inline int _cond_resched(void)
2070 {
2071         return __cond_resched();
2072 }
2073
2074 #endif /* CONFIG_PREEMPT_DYNAMIC */
2075
2076 #else
2077
2078 static inline int _cond_resched(void) { return 0; }
2079
2080 #endif /* !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) */
2081
2082 #define cond_resched() ({                       \
2083         __might_resched(__FILE__, __LINE__, 0); \
2084         _cond_resched();                        \
2085 })
2086
2087 extern int __cond_resched_lock(spinlock_t *lock);
2088 extern int __cond_resched_rwlock_read(rwlock_t *lock);
2089 extern int __cond_resched_rwlock_write(rwlock_t *lock);
2090
2091 #define MIGHT_RESCHED_RCU_SHIFT         8
2092 #define MIGHT_RESCHED_PREEMPT_MASK      ((1U << MIGHT_RESCHED_RCU_SHIFT) - 1)
2093
2094 #ifndef CONFIG_PREEMPT_RT
2095 /*
2096  * Non RT kernels have an elevated preempt count due to the held lock,
2097  * but are not allowed to be inside a RCU read side critical section
2098  */
2099 # define PREEMPT_LOCK_RESCHED_OFFSETS   PREEMPT_LOCK_OFFSET
2100 #else
2101 /*
2102  * spin/rw_lock() on RT implies rcu_read_lock(). The might_sleep() check in
2103  * cond_resched*lock() has to take that into account because it checks for
2104  * preempt_count() and rcu_preempt_depth().
2105  */
2106 # define PREEMPT_LOCK_RESCHED_OFFSETS   \
2107         (PREEMPT_LOCK_OFFSET + (1U << MIGHT_RESCHED_RCU_SHIFT))
2108 #endif
2109
2110 #define cond_resched_lock(lock) ({                                              \
2111         __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS);      \
2112         __cond_resched_lock(lock);                                              \
2113 })
2114
2115 #define cond_resched_rwlock_read(lock) ({                                       \
2116         __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS);      \
2117         __cond_resched_rwlock_read(lock);                                       \
2118 })
2119
2120 #define cond_resched_rwlock_write(lock) ({                                      \
2121         __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS);      \
2122         __cond_resched_rwlock_write(lock);                                      \
2123 })
2124
2125 static inline void cond_resched_rcu(void)
2126 {
2127 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
2128         rcu_read_unlock();
2129         cond_resched();
2130         rcu_read_lock();
2131 #endif
2132 }
2133
2134 #ifdef CONFIG_PREEMPT_DYNAMIC
2135
2136 extern bool preempt_model_none(void);
2137 extern bool preempt_model_voluntary(void);
2138 extern bool preempt_model_full(void);
2139
2140 #else
2141
2142 static inline bool preempt_model_none(void)
2143 {
2144         return IS_ENABLED(CONFIG_PREEMPT_NONE);
2145 }
2146 static inline bool preempt_model_voluntary(void)
2147 {
2148         return IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY);
2149 }
2150 static inline bool preempt_model_full(void)
2151 {
2152         return IS_ENABLED(CONFIG_PREEMPT);
2153 }
2154
2155 #endif
2156
2157 static inline bool preempt_model_rt(void)
2158 {
2159         return IS_ENABLED(CONFIG_PREEMPT_RT);
2160 }
2161
2162 /*
2163  * Does the preemption model allow non-cooperative preemption?
2164  *
2165  * For !CONFIG_PREEMPT_DYNAMIC kernels this is an exact match with
2166  * CONFIG_PREEMPTION; for CONFIG_PREEMPT_DYNAMIC this doesn't work as the
2167  * kernel is *built* with CONFIG_PREEMPTION=y but may run with e.g. the
2168  * PREEMPT_NONE model.
2169  */
2170 static inline bool preempt_model_preemptible(void)
2171 {
2172         return preempt_model_full() || preempt_model_rt();
2173 }
2174
2175 /*
2176  * Does a critical section need to be broken due to another
2177  * task waiting?: (technically does not depend on CONFIG_PREEMPTION,
2178  * but a general need for low latency)
2179  */
2180 static inline int spin_needbreak(spinlock_t *lock)
2181 {
2182 #ifdef CONFIG_PREEMPTION
2183         return spin_is_contended(lock);
2184 #else
2185         return 0;
2186 #endif
2187 }
2188
2189 /*
2190  * Check if a rwlock is contended.
2191  * Returns non-zero if there is another task waiting on the rwlock.
2192  * Returns zero if the lock is not contended or the system / underlying
2193  * rwlock implementation does not support contention detection.
2194  * Technically does not depend on CONFIG_PREEMPTION, but a general need
2195  * for low latency.
2196  */
2197 static inline int rwlock_needbreak(rwlock_t *lock)
2198 {
2199 #ifdef CONFIG_PREEMPTION
2200         return rwlock_is_contended(lock);
2201 #else
2202         return 0;
2203 #endif
2204 }
2205
2206 static __always_inline bool need_resched(void)
2207 {
2208         return unlikely(tif_need_resched());
2209 }
2210
2211 /*
2212  * Wrappers for p->thread_info->cpu access. No-op on UP.
2213  */
2214 #ifdef CONFIG_SMP
2215
2216 static inline unsigned int task_cpu(const struct task_struct *p)
2217 {
2218         return READ_ONCE(task_thread_info(p)->cpu);
2219 }
2220
2221 extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
2222
2223 #else
2224
2225 static inline unsigned int task_cpu(const struct task_struct *p)
2226 {
2227         return 0;
2228 }
2229
2230 static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2231 {
2232 }
2233
2234 #endif /* CONFIG_SMP */
2235
2236 extern bool sched_task_on_rq(struct task_struct *p);
2237 extern unsigned long get_wchan(struct task_struct *p);
2238 extern struct task_struct *cpu_curr_snapshot(int cpu);
2239
2240 /*
2241  * In order to reduce various lock holder preemption latencies provide an
2242  * interface to see if a vCPU is currently running or not.
2243  *
2244  * This allows us to terminate optimistic spin loops and block, analogous to
2245  * the native optimistic spin heuristic of testing if the lock owner task is
2246  * running or not.
2247  */
2248 #ifndef vcpu_is_preempted
2249 static inline bool vcpu_is_preempted(int cpu)
2250 {
2251         return false;
2252 }
2253 #endif
2254
2255 extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2256 extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2257
2258 #ifndef TASK_SIZE_OF
2259 #define TASK_SIZE_OF(tsk)       TASK_SIZE
2260 #endif
2261
2262 #ifdef CONFIG_SMP
2263 static inline bool owner_on_cpu(struct task_struct *owner)
2264 {
2265         /*
2266          * As lock holder preemption issue, we both skip spinning if
2267          * task is not on cpu or its cpu is preempted
2268          */
2269         return READ_ONCE(owner->on_cpu) && !vcpu_is_preempted(task_cpu(owner));
2270 }
2271
2272 /* Returns effective CPU energy utilization, as seen by the scheduler */
2273 unsigned long sched_cpu_util(int cpu);
2274 #endif /* CONFIG_SMP */
2275
2276 #ifdef CONFIG_RSEQ
2277
2278 /*
2279  * Map the event mask on the user-space ABI enum rseq_cs_flags
2280  * for direct mask checks.
2281  */
2282 enum rseq_event_mask_bits {
2283         RSEQ_EVENT_PREEMPT_BIT  = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
2284         RSEQ_EVENT_SIGNAL_BIT   = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
2285         RSEQ_EVENT_MIGRATE_BIT  = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
2286 };
2287
2288 enum rseq_event_mask {
2289         RSEQ_EVENT_PREEMPT      = (1U << RSEQ_EVENT_PREEMPT_BIT),
2290         RSEQ_EVENT_SIGNAL       = (1U << RSEQ_EVENT_SIGNAL_BIT),
2291         RSEQ_EVENT_MIGRATE      = (1U << RSEQ_EVENT_MIGRATE_BIT),
2292 };
2293
2294 static inline void rseq_set_notify_resume(struct task_struct *t)
2295 {
2296         if (t->rseq)
2297                 set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
2298 }
2299
2300 void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
2301
2302 static inline void rseq_handle_notify_resume(struct ksignal *ksig,
2303                                              struct pt_regs *regs)
2304 {
2305         if (current->rseq)
2306                 __rseq_handle_notify_resume(ksig, regs);
2307 }
2308
2309 static inline void rseq_signal_deliver(struct ksignal *ksig,
2310                                        struct pt_regs *regs)
2311 {
2312         preempt_disable();
2313         __set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
2314         preempt_enable();
2315         rseq_handle_notify_resume(ksig, regs);
2316 }
2317
2318 /* rseq_preempt() requires preemption to be disabled. */
2319 static inline void rseq_preempt(struct task_struct *t)
2320 {
2321         __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
2322         rseq_set_notify_resume(t);
2323 }
2324
2325 /* rseq_migrate() requires preemption to be disabled. */
2326 static inline void rseq_migrate(struct task_struct *t)
2327 {
2328         __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
2329         rseq_set_notify_resume(t);
2330 }
2331
2332 /*
2333  * If parent process has a registered restartable sequences area, the
2334  * child inherits. Unregister rseq for a clone with CLONE_VM set.
2335  */
2336 static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
2337 {
2338         if (clone_flags & CLONE_VM) {
2339                 t->rseq = NULL;
2340                 t->rseq_sig = 0;
2341                 t->rseq_event_mask = 0;
2342         } else {
2343                 t->rseq = current->rseq;
2344                 t->rseq_sig = current->rseq_sig;
2345                 t->rseq_event_mask = current->rseq_event_mask;
2346         }
2347 }
2348
2349 static inline void rseq_execve(struct task_struct *t)
2350 {
2351         t->rseq = NULL;
2352         t->rseq_sig = 0;
2353         t->rseq_event_mask = 0;
2354 }
2355
2356 #else
2357
2358 static inline void rseq_set_notify_resume(struct task_struct *t)
2359 {
2360 }
2361 static inline void rseq_handle_notify_resume(struct ksignal *ksig,
2362                                              struct pt_regs *regs)
2363 {
2364 }
2365 static inline void rseq_signal_deliver(struct ksignal *ksig,
2366                                        struct pt_regs *regs)
2367 {
2368 }
2369 static inline void rseq_preempt(struct task_struct *t)
2370 {
2371 }
2372 static inline void rseq_migrate(struct task_struct *t)
2373 {
2374 }
2375 static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
2376 {
2377 }
2378 static inline void rseq_execve(struct task_struct *t)
2379 {
2380 }
2381
2382 #endif
2383
2384 #ifdef CONFIG_DEBUG_RSEQ
2385
2386 void rseq_syscall(struct pt_regs *regs);
2387
2388 #else
2389
2390 static inline void rseq_syscall(struct pt_regs *regs)
2391 {
2392 }
2393
2394 #endif
2395
2396 #ifdef CONFIG_SCHED_CORE
2397 extern void sched_core_free(struct task_struct *tsk);
2398 extern void sched_core_fork(struct task_struct *p);
2399 extern int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type,
2400                                 unsigned long uaddr);
2401 #else
2402 static inline void sched_core_free(struct task_struct *tsk) { }
2403 static inline void sched_core_fork(struct task_struct *p) { }
2404 #endif
2405
2406 extern void sched_set_stop_task(int cpu, struct task_struct *stop);
2407
2408 #endif