2 * Pressure stall information for CPU, memory and IO
4 * Copyright (c) 2018 Facebook, Inc.
5 * Author: Johannes Weiner <hannes@cmpxchg.org>
7 * Polling support by Suren Baghdasaryan <surenb@google.com>
8 * Copyright (c) 2018 Google, Inc.
10 * When CPU, memory and IO are contended, tasks experience delays that
11 * reduce throughput and introduce latencies into the workload. Memory
12 * and IO contention, in addition, can cause a full loss of forward
13 * progress in which the CPU goes idle.
15 * This code aggregates individual task delays into resource pressure
16 * metrics that indicate problems with both workload health and
17 * resource utilization.
21 * The time in which a task can execute on a CPU is our baseline for
22 * productivity. Pressure expresses the amount of time in which this
23 * potential cannot be realized due to resource contention.
25 * This concept of productivity has two components: the workload and
26 * the CPU. To measure the impact of pressure on both, we define two
27 * contention states for a resource: SOME and FULL.
29 * In the SOME state of a given resource, one or more tasks are
30 * delayed on that resource. This affects the workload's ability to
31 * perform work, but the CPU may still be executing other tasks.
33 * In the FULL state of a given resource, all non-idle tasks are
34 * delayed on that resource such that nobody is advancing and the CPU
35 * goes idle. This leaves both workload and CPU unproductive.
37 * Naturally, the FULL state doesn't exist for the CPU resource at the
38 * system level, but exist at the cgroup level, means all non-idle tasks
39 * in a cgroup are delayed on the CPU resource which used by others outside
40 * of the cgroup or throttled by the cgroup cpu.max configuration.
42 * SOME = nr_delayed_tasks != 0
43 * FULL = nr_delayed_tasks != 0 && nr_running_tasks == 0
45 * The percentage of wallclock time spent in those compound stall
46 * states gives pressure numbers between 0 and 100 for each resource,
47 * where the SOME percentage indicates workload slowdowns and the FULL
48 * percentage indicates reduced CPU utilization:
50 * %SOME = time(SOME) / period
51 * %FULL = time(FULL) / period
55 * The more tasks and available CPUs there are, the more work can be
56 * performed concurrently. This means that the potential that can go
57 * unrealized due to resource contention *also* scales with non-idle
60 * Consider a scenario where 257 number crunching tasks are trying to
61 * run concurrently on 256 CPUs. If we simply aggregated the task
62 * states, we would have to conclude a CPU SOME pressure number of
63 * 100%, since *somebody* is waiting on a runqueue at all
64 * times. However, that is clearly not the amount of contention the
65 * workload is experiencing: only one out of 256 possible exceution
66 * threads will be contended at any given time, or about 0.4%.
68 * Conversely, consider a scenario of 4 tasks and 4 CPUs where at any
69 * given time *one* of the tasks is delayed due to a lack of memory.
70 * Again, looking purely at the task state would yield a memory FULL
71 * pressure number of 0%, since *somebody* is always making forward
72 * progress. But again this wouldn't capture the amount of execution
73 * potential lost, which is 1 out of 4 CPUs, or 25%.
75 * To calculate wasted potential (pressure) with multiple processors,
76 * we have to base our calculation on the number of non-idle tasks in
77 * conjunction with the number of available CPUs, which is the number
78 * of potential execution threads. SOME becomes then the proportion of
79 * delayed tasks to possibe threads, and FULL is the share of possible
80 * threads that are unproductive due to delays:
82 * threads = min(nr_nonidle_tasks, nr_cpus)
83 * SOME = min(nr_delayed_tasks / threads, 1)
84 * FULL = (threads - min(nr_running_tasks, threads)) / threads
86 * For the 257 number crunchers on 256 CPUs, this yields:
88 * threads = min(257, 256)
89 * SOME = min(1 / 256, 1) = 0.4%
90 * FULL = (256 - min(257, 256)) / 256 = 0%
92 * For the 1 out of 4 memory-delayed tasks, this yields:
95 * SOME = min(1 / 4, 1) = 25%
96 * FULL = (4 - min(3, 4)) / 4 = 25%
98 * [ Substitute nr_cpus with 1, and you can see that it's a natural
99 * extension of the single-CPU model. ]
103 * To assess the precise time spent in each such state, we would have
104 * to freeze the system on task changes and start/stop the state
105 * clocks accordingly. Obviously that doesn't scale in practice.
107 * Because the scheduler aims to distribute the compute load evenly
108 * among the available CPUs, we can track task state locally to each
109 * CPU and, at much lower frequency, extrapolate the global state for
110 * the cumulative stall times and the running averages.
112 * For each runqueue, we track:
114 * tSOME[cpu] = time(nr_delayed_tasks[cpu] != 0)
115 * tFULL[cpu] = time(nr_delayed_tasks[cpu] && !nr_running_tasks[cpu])
116 * tNONIDLE[cpu] = time(nr_nonidle_tasks[cpu] != 0)
118 * and then periodically aggregate:
120 * tNONIDLE = sum(tNONIDLE[i])
122 * tSOME = sum(tSOME[i] * tNONIDLE[i]) / tNONIDLE
123 * tFULL = sum(tFULL[i] * tNONIDLE[i]) / tNONIDLE
125 * %SOME = tSOME / period
126 * %FULL = tFULL / period
128 * This gives us an approximation of pressure that is practical
129 * cost-wise, yet way more sensitive and accurate than periodic
130 * sampling of the aggregate task states would be.
133 #include "../workqueue_internal.h"
134 #include <linux/sched/loadavg.h>
135 #include <linux/seq_file.h>
136 #include <linux/proc_fs.h>
137 #include <linux/seqlock.h>
138 #include <linux/uaccess.h>
139 #include <linux/cgroup.h>
140 #include <linux/module.h>
141 #include <linux/sched.h>
142 #include <linux/ctype.h>
143 #include <linux/file.h>
144 #include <linux/poll.h>
145 #include <linux/psi.h>
148 static int psi_bug __read_mostly;
150 DEFINE_STATIC_KEY_FALSE(psi_disabled);
152 #ifdef CONFIG_PSI_DEFAULT_DISABLED
153 static bool psi_enable;
155 static bool psi_enable = true;
157 static int __init setup_psi(char *str)
159 return kstrtobool(str, &psi_enable) == 0;
161 __setup("psi=", setup_psi);
163 /* Running averages - we need to be higher-res than loadavg */
164 #define PSI_FREQ (2*HZ+1) /* 2 sec intervals */
165 #define EXP_10s 1677 /* 1/exp(2s/10s) as fixed-point */
166 #define EXP_60s 1981 /* 1/exp(2s/60s) */
167 #define EXP_300s 2034 /* 1/exp(2s/300s) */
169 /* PSI trigger definitions */
170 #define WINDOW_MIN_US 500000 /* Min window size is 500ms */
171 #define WINDOW_MAX_US 10000000 /* Max window size is 10s */
172 #define UPDATES_PER_WINDOW 10 /* 10 updates per window */
174 /* Sampling frequency in nanoseconds */
175 static u64 psi_period __read_mostly;
177 /* System-level pressure and stall tracking */
178 static DEFINE_PER_CPU(struct psi_group_cpu, system_group_pcpu);
179 struct psi_group psi_system = {
180 .pcpu = &system_group_pcpu,
183 static void psi_avgs_work(struct work_struct *work);
185 static void group_init(struct psi_group *group)
189 for_each_possible_cpu(cpu)
190 seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq);
191 group->avg_last_update = sched_clock();
192 group->avg_next_update = group->avg_last_update + psi_period;
193 INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
194 mutex_init(&group->avgs_lock);
195 /* Init trigger-related members */
196 mutex_init(&group->trigger_lock);
197 INIT_LIST_HEAD(&group->triggers);
198 memset(group->nr_triggers, 0, sizeof(group->nr_triggers));
199 group->poll_states = 0;
200 group->poll_min_period = U32_MAX;
201 memset(group->polling_total, 0, sizeof(group->polling_total));
202 group->polling_next_update = ULLONG_MAX;
203 group->polling_until = 0;
204 rcu_assign_pointer(group->poll_task, NULL);
207 void __init psi_init(void)
210 static_branch_enable(&psi_disabled);
214 psi_period = jiffies_to_nsecs(PSI_FREQ);
215 group_init(&psi_system);
218 static bool test_state(unsigned int *tasks, enum psi_states state)
222 return tasks[NR_IOWAIT];
224 return tasks[NR_IOWAIT] && !tasks[NR_RUNNING];
226 return tasks[NR_MEMSTALL];
228 return tasks[NR_MEMSTALL] && !tasks[NR_RUNNING];
230 return tasks[NR_RUNNING] > tasks[NR_ONCPU];
232 return tasks[NR_RUNNING] && !tasks[NR_ONCPU];
234 return tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] ||
241 static void get_recent_times(struct psi_group *group, int cpu,
242 enum psi_aggregators aggregator, u32 *times,
243 u32 *pchanged_states)
245 struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu);
246 u64 now, state_start;
251 *pchanged_states = 0;
253 /* Snapshot a coherent view of the CPU state */
255 seq = read_seqcount_begin(&groupc->seq);
256 now = cpu_clock(cpu);
257 memcpy(times, groupc->times, sizeof(groupc->times));
258 state_mask = groupc->state_mask;
259 state_start = groupc->state_start;
260 } while (read_seqcount_retry(&groupc->seq, seq));
262 /* Calculate state time deltas against the previous snapshot */
263 for (s = 0; s < NR_PSI_STATES; s++) {
266 * In addition to already concluded states, we also
267 * incorporate currently active states on the CPU,
268 * since states may last for many sampling periods.
270 * This way we keep our delta sampling buckets small
271 * (u32) and our reported pressure close to what's
272 * actually happening.
274 if (state_mask & (1 << s))
275 times[s] += now - state_start;
277 delta = times[s] - groupc->times_prev[aggregator][s];
278 groupc->times_prev[aggregator][s] = times[s];
282 *pchanged_states |= (1 << s);
286 static void calc_avgs(unsigned long avg[3], int missed_periods,
287 u64 time, u64 period)
291 /* Fill in zeroes for periods of no activity */
292 if (missed_periods) {
293 avg[0] = calc_load_n(avg[0], EXP_10s, 0, missed_periods);
294 avg[1] = calc_load_n(avg[1], EXP_60s, 0, missed_periods);
295 avg[2] = calc_load_n(avg[2], EXP_300s, 0, missed_periods);
298 /* Sample the most recent active period */
299 pct = div_u64(time * 100, period);
301 avg[0] = calc_load(avg[0], EXP_10s, pct);
302 avg[1] = calc_load(avg[1], EXP_60s, pct);
303 avg[2] = calc_load(avg[2], EXP_300s, pct);
306 static void collect_percpu_times(struct psi_group *group,
307 enum psi_aggregators aggregator,
308 u32 *pchanged_states)
310 u64 deltas[NR_PSI_STATES - 1] = { 0, };
311 unsigned long nonidle_total = 0;
312 u32 changed_states = 0;
317 * Collect the per-cpu time buckets and average them into a
318 * single time sample that is normalized to wallclock time.
320 * For averaging, each CPU is weighted by its non-idle time in
321 * the sampling period. This eliminates artifacts from uneven
322 * loading, or even entirely idle CPUs.
324 for_each_possible_cpu(cpu) {
325 u32 times[NR_PSI_STATES];
327 u32 cpu_changed_states;
329 get_recent_times(group, cpu, aggregator, times,
330 &cpu_changed_states);
331 changed_states |= cpu_changed_states;
333 nonidle = nsecs_to_jiffies(times[PSI_NONIDLE]);
334 nonidle_total += nonidle;
336 for (s = 0; s < PSI_NONIDLE; s++)
337 deltas[s] += (u64)times[s] * nonidle;
341 * Integrate the sample into the running statistics that are
342 * reported to userspace: the cumulative stall times and the
345 * Pressure percentages are sampled at PSI_FREQ. We might be
346 * called more often when the user polls more frequently than
347 * that; we might be called less often when there is no task
348 * activity, thus no data, and clock ticks are sporadic. The
349 * below handles both.
353 for (s = 0; s < NR_PSI_STATES - 1; s++)
354 group->total[aggregator][s] +=
355 div_u64(deltas[s], max(nonidle_total, 1UL));
358 *pchanged_states = changed_states;
361 static u64 update_averages(struct psi_group *group, u64 now)
363 unsigned long missed_periods = 0;
369 expires = group->avg_next_update;
370 if (now - expires >= psi_period)
371 missed_periods = div_u64(now - expires, psi_period);
374 * The periodic clock tick can get delayed for various
375 * reasons, especially on loaded systems. To avoid clock
376 * drift, we schedule the clock in fixed psi_period intervals.
377 * But the deltas we sample out of the per-cpu buckets above
378 * are based on the actual time elapsing between clock ticks.
380 avg_next_update = expires + ((1 + missed_periods) * psi_period);
381 period = now - (group->avg_last_update + (missed_periods * psi_period));
382 group->avg_last_update = now;
384 for (s = 0; s < NR_PSI_STATES - 1; s++) {
387 sample = group->total[PSI_AVGS][s] - group->avg_total[s];
389 * Due to the lockless sampling of the time buckets,
390 * recorded time deltas can slip into the next period,
391 * which under full pressure can result in samples in
392 * excess of the period length.
394 * We don't want to report non-sensical pressures in
395 * excess of 100%, nor do we want to drop such events
396 * on the floor. Instead we punt any overage into the
397 * future until pressure subsides. By doing this we
398 * don't underreport the occurring pressure curve, we
399 * just report it delayed by one period length.
401 * The error isn't cumulative. As soon as another
402 * delta slips from a period P to P+1, by definition
403 * it frees up its time T in P.
407 group->avg_total[s] += sample;
408 calc_avgs(group->avg[s], missed_periods, sample, period);
411 return avg_next_update;
414 static void psi_avgs_work(struct work_struct *work)
416 struct delayed_work *dwork;
417 struct psi_group *group;
422 dwork = to_delayed_work(work);
423 group = container_of(dwork, struct psi_group, avgs_work);
425 mutex_lock(&group->avgs_lock);
429 collect_percpu_times(group, PSI_AVGS, &changed_states);
430 nonidle = changed_states & (1 << PSI_NONIDLE);
432 * If there is task activity, periodically fold the per-cpu
433 * times and feed samples into the running averages. If things
434 * are idle and there is no data to process, stop the clock.
435 * Once restarted, we'll catch up the running averages in one
436 * go - see calc_avgs() and missed_periods.
438 if (now >= group->avg_next_update)
439 group->avg_next_update = update_averages(group, now);
442 schedule_delayed_work(dwork, nsecs_to_jiffies(
443 group->avg_next_update - now) + 1);
446 mutex_unlock(&group->avgs_lock);
449 /* Trigger tracking window manupulations */
450 static void window_reset(struct psi_window *win, u64 now, u64 value,
453 win->start_time = now;
454 win->start_value = value;
455 win->prev_growth = prev_growth;
459 * PSI growth tracking window update and growth calculation routine.
461 * This approximates a sliding tracking window by interpolating
462 * partially elapsed windows using historical growth data from the
463 * previous intervals. This minimizes memory requirements (by not storing
464 * all the intermediate values in the previous window) and simplifies
465 * the calculations. It works well because PSI signal changes only in
466 * positive direction and over relatively small window sizes the growth
467 * is close to linear.
469 static u64 window_update(struct psi_window *win, u64 now, u64 value)
474 elapsed = now - win->start_time;
475 growth = value - win->start_value;
477 * After each tracking window passes win->start_value and
478 * win->start_time get reset and win->prev_growth stores
479 * the average per-window growth of the previous window.
480 * win->prev_growth is then used to interpolate additional
481 * growth from the previous window assuming it was linear.
483 if (elapsed > win->size)
484 window_reset(win, now, value, growth);
488 remaining = win->size - elapsed;
489 growth += div64_u64(win->prev_growth * remaining, win->size);
495 static void init_triggers(struct psi_group *group, u64 now)
497 struct psi_trigger *t;
499 list_for_each_entry(t, &group->triggers, node)
500 window_reset(&t->win, now,
501 group->total[PSI_POLL][t->state], 0);
502 memcpy(group->polling_total, group->total[PSI_POLL],
503 sizeof(group->polling_total));
504 group->polling_next_update = now + group->poll_min_period;
507 static u64 update_triggers(struct psi_group *group, u64 now)
509 struct psi_trigger *t;
510 bool new_stall = false;
511 u64 *total = group->total[PSI_POLL];
514 * On subsequent updates, calculate growth deltas and let
515 * watchers know when their specified thresholds are exceeded.
517 list_for_each_entry(t, &group->triggers, node) {
520 /* Check for stall activity */
521 if (group->polling_total[t->state] == total[t->state])
525 * Multiple triggers might be looking at the same state,
526 * remember to update group->polling_total[] once we've
527 * been through all of them. Also remember to extend the
528 * polling time if we see new stall activity.
532 /* Calculate growth since last update */
533 growth = window_update(&t->win, now, total[t->state]);
534 if (growth < t->threshold)
537 /* Limit event signaling to once per window */
538 if (now < t->last_event_time + t->win.size)
541 /* Generate an event */
542 if (cmpxchg(&t->event, 0, 1) == 0)
543 wake_up_interruptible(&t->event_wait);
544 t->last_event_time = now;
548 memcpy(group->polling_total, total,
549 sizeof(group->polling_total));
551 return now + group->poll_min_period;
554 /* Schedule polling if it's not already scheduled. */
555 static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay)
557 struct task_struct *task;
560 * Do not reschedule if already scheduled.
561 * Possible race with a timer scheduled after this check but before
562 * mod_timer below can be tolerated because group->polling_next_update
563 * will keep updates on schedule.
565 if (timer_pending(&group->poll_timer))
570 task = rcu_dereference(group->poll_task);
572 * kworker might be NULL in case psi_trigger_destroy races with
573 * psi_task_change (hotpath) which can't use locks
576 mod_timer(&group->poll_timer, jiffies + delay);
581 static void psi_poll_work(struct psi_group *group)
586 mutex_lock(&group->trigger_lock);
590 collect_percpu_times(group, PSI_POLL, &changed_states);
592 if (changed_states & group->poll_states) {
593 /* Initialize trigger windows when entering polling mode */
594 if (now > group->polling_until)
595 init_triggers(group, now);
598 * Keep the monitor active for at least the duration of the
599 * minimum tracking window as long as monitor states are
602 group->polling_until = now +
603 group->poll_min_period * UPDATES_PER_WINDOW;
606 if (now > group->polling_until) {
607 group->polling_next_update = ULLONG_MAX;
611 if (now >= group->polling_next_update)
612 group->polling_next_update = update_triggers(group, now);
614 psi_schedule_poll_work(group,
615 nsecs_to_jiffies(group->polling_next_update - now) + 1);
618 mutex_unlock(&group->trigger_lock);
621 static int psi_poll_worker(void *data)
623 struct psi_group *group = (struct psi_group *)data;
625 sched_set_fifo_low(current);
628 wait_event_interruptible(group->poll_wait,
629 atomic_cmpxchg(&group->poll_wakeup, 1, 0) ||
630 kthread_should_stop());
631 if (kthread_should_stop())
634 psi_poll_work(group);
639 static void poll_timer_fn(struct timer_list *t)
641 struct psi_group *group = from_timer(group, t, poll_timer);
643 atomic_set(&group->poll_wakeup, 1);
644 wake_up_interruptible(&group->poll_wait);
647 static void record_times(struct psi_group_cpu *groupc, int cpu)
652 now = cpu_clock(cpu);
653 delta = now - groupc->state_start;
654 groupc->state_start = now;
656 if (groupc->state_mask & (1 << PSI_IO_SOME)) {
657 groupc->times[PSI_IO_SOME] += delta;
658 if (groupc->state_mask & (1 << PSI_IO_FULL))
659 groupc->times[PSI_IO_FULL] += delta;
662 if (groupc->state_mask & (1 << PSI_MEM_SOME)) {
663 groupc->times[PSI_MEM_SOME] += delta;
664 if (groupc->state_mask & (1 << PSI_MEM_FULL))
665 groupc->times[PSI_MEM_FULL] += delta;
668 if (groupc->state_mask & (1 << PSI_CPU_SOME)) {
669 groupc->times[PSI_CPU_SOME] += delta;
670 if (groupc->state_mask & (1 << PSI_CPU_FULL))
671 groupc->times[PSI_CPU_FULL] += delta;
674 if (groupc->state_mask & (1 << PSI_NONIDLE))
675 groupc->times[PSI_NONIDLE] += delta;
678 static void psi_group_change(struct psi_group *group, int cpu,
679 unsigned int clear, unsigned int set,
682 struct psi_group_cpu *groupc;
687 groupc = per_cpu_ptr(group->pcpu, cpu);
690 * First we assess the aggregate resource states this CPU's
691 * tasks have been in since the last change, and account any
692 * SOME and FULL time these may have resulted in.
694 * Then we update the task counts according to the state
695 * change requested through the @clear and @set bits.
697 write_seqcount_begin(&groupc->seq);
699 record_times(groupc, cpu);
701 for (t = 0, m = clear; m; m &= ~(1 << t), t++) {
704 if (groupc->tasks[t] == 0 && !psi_bug) {
705 printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u %u] clear=%x set=%x\n",
706 cpu, t, groupc->tasks[0],
707 groupc->tasks[1], groupc->tasks[2],
708 groupc->tasks[3], clear, set);
714 for (t = 0; set; set &= ~(1 << t), t++)
718 /* Calculate state mask representing active states */
719 for (s = 0; s < NR_PSI_STATES; s++) {
720 if (test_state(groupc->tasks, s))
721 state_mask |= (1 << s);
725 * Since we care about lost potential, a memstall is FULL
726 * when there are no other working tasks, but also when
727 * the CPU is actively reclaiming and nothing productive
728 * could run even if it were runnable. So when the current
729 * task in a cgroup is in_memstall, the corresponding groupc
730 * on that cpu is in PSI_MEM_FULL state.
732 if (groupc->tasks[NR_ONCPU] && cpu_curr(cpu)->in_memstall)
733 state_mask |= (1 << PSI_MEM_FULL);
735 groupc->state_mask = state_mask;
737 write_seqcount_end(&groupc->seq);
739 if (state_mask & group->poll_states)
740 psi_schedule_poll_work(group, 1);
742 if (wake_clock && !delayed_work_pending(&group->avgs_work))
743 schedule_delayed_work(&group->avgs_work, PSI_FREQ);
746 static struct psi_group *iterate_groups(struct task_struct *task, void **iter)
748 #ifdef CONFIG_CGROUPS
749 struct cgroup *cgroup = NULL;
752 cgroup = task->cgroups->dfl_cgrp;
753 else if (*iter == &psi_system)
756 cgroup = cgroup_parent(*iter);
758 if (cgroup && cgroup_parent(cgroup)) {
760 return cgroup_psi(cgroup);
770 static void psi_flags_change(struct task_struct *task, int clear, int set)
772 if (((task->psi_flags & set) ||
773 (task->psi_flags & clear) != clear) &&
775 printk_deferred(KERN_ERR "psi: inconsistent task state! task=%d:%s cpu=%d psi_flags=%x clear=%x set=%x\n",
776 task->pid, task->comm, task_cpu(task),
777 task->psi_flags, clear, set);
781 task->psi_flags &= ~clear;
782 task->psi_flags |= set;
785 void psi_task_change(struct task_struct *task, int clear, int set)
787 int cpu = task_cpu(task);
788 struct psi_group *group;
789 bool wake_clock = true;
795 psi_flags_change(task, clear, set);
798 * Periodic aggregation shuts off if there is a period of no
799 * task changes, so we wake it back up if necessary. However,
800 * don't do this if the task change is the aggregation worker
801 * itself going to sleep, or we'll ping-pong forever.
803 if (unlikely((clear & TSK_RUNNING) &&
804 (task->flags & PF_WQ_WORKER) &&
805 wq_worker_last_func(task) == psi_avgs_work))
808 while ((group = iterate_groups(task, &iter)))
809 psi_group_change(group, cpu, clear, set, wake_clock);
812 void psi_task_switch(struct task_struct *prev, struct task_struct *next,
815 struct psi_group *group, *common = NULL;
816 int cpu = task_cpu(prev);
820 bool identical_state;
822 psi_flags_change(next, 0, TSK_ONCPU);
824 * When switching between tasks that have an identical
825 * runtime state, the cgroup that contains both tasks
826 * runtime state, the cgroup that contains both tasks
827 * we reach the first common ancestor. Iterate @next's
828 * ancestors only until we encounter @prev's ONCPU.
830 identical_state = prev->psi_flags == next->psi_flags;
832 while ((group = iterate_groups(next, &iter))) {
833 if (identical_state &&
834 per_cpu_ptr(group->pcpu, cpu)->tasks[NR_ONCPU]) {
839 psi_group_change(group, cpu, 0, TSK_ONCPU, true);
844 * If this is a voluntary sleep, dequeue will have taken care
845 * of the outgoing TSK_ONCPU alongside TSK_RUNNING already. We
846 * only need to deal with it during preemption.
852 psi_flags_change(prev, TSK_ONCPU, 0);
855 while ((group = iterate_groups(prev, &iter)) && group != common)
856 psi_group_change(group, cpu, TSK_ONCPU, 0, true);
861 * psi_memstall_enter - mark the beginning of a memory stall section
862 * @flags: flags to handle nested sections
864 * Marks the calling task as being stalled due to a lack of memory,
865 * such as waiting for a refault or performing reclaim.
867 void psi_memstall_enter(unsigned long *flags)
872 if (static_branch_likely(&psi_disabled))
875 *flags = current->in_memstall;
879 * in_memstall setting & accounting needs to be atomic wrt
880 * changes to the task's scheduling state, otherwise we can
881 * race with CPU migration.
883 rq = this_rq_lock_irq(&rf);
885 current->in_memstall = 1;
886 psi_task_change(current, 0, TSK_MEMSTALL);
888 rq_unlock_irq(rq, &rf);
892 * psi_memstall_leave - mark the end of an memory stall section
893 * @flags: flags to handle nested memdelay sections
895 * Marks the calling task as no longer stalled due to lack of memory.
897 void psi_memstall_leave(unsigned long *flags)
902 if (static_branch_likely(&psi_disabled))
908 * in_memstall clearing & accounting needs to be atomic wrt
909 * changes to the task's scheduling state, otherwise we could
910 * race with CPU migration.
912 rq = this_rq_lock_irq(&rf);
914 current->in_memstall = 0;
915 psi_task_change(current, TSK_MEMSTALL, 0);
917 rq_unlock_irq(rq, &rf);
920 #ifdef CONFIG_CGROUPS
921 int psi_cgroup_alloc(struct cgroup *cgroup)
923 if (static_branch_likely(&psi_disabled))
926 cgroup->psi.pcpu = alloc_percpu(struct psi_group_cpu);
927 if (!cgroup->psi.pcpu)
929 group_init(&cgroup->psi);
933 void psi_cgroup_free(struct cgroup *cgroup)
935 if (static_branch_likely(&psi_disabled))
938 cancel_delayed_work_sync(&cgroup->psi.avgs_work);
939 free_percpu(cgroup->psi.pcpu);
940 /* All triggers must be removed by now */
941 WARN_ONCE(cgroup->psi.poll_states, "psi: trigger leak\n");
945 * cgroup_move_task - move task to a different cgroup
947 * @to: the target css_set
949 * Move task to a new cgroup and safely migrate its associated stall
950 * state between the different groups.
952 * This function acquires the task's rq lock to lock out concurrent
953 * changes to the task's scheduling state and - in case the task is
954 * running - concurrent changes to its stall state.
956 void cgroup_move_task(struct task_struct *task, struct css_set *to)
958 unsigned int task_flags = 0;
962 if (static_branch_likely(&psi_disabled)) {
964 * Lame to do this here, but the scheduler cannot be locked
965 * from the outside, so we move cgroups from inside sched/.
967 rcu_assign_pointer(task->cgroups, to);
971 rq = task_rq_lock(task, &rf);
973 if (task_on_rq_queued(task)) {
974 task_flags = TSK_RUNNING;
975 if (task_current(rq, task))
976 task_flags |= TSK_ONCPU;
977 } else if (task->in_iowait)
978 task_flags = TSK_IOWAIT;
980 if (task->in_memstall)
981 task_flags |= TSK_MEMSTALL;
984 psi_task_change(task, task_flags, 0);
986 /* See comment above */
987 rcu_assign_pointer(task->cgroups, to);
990 psi_task_change(task, 0, task_flags);
992 task_rq_unlock(rq, task, &rf);
994 #endif /* CONFIG_CGROUPS */
996 int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
1001 if (static_branch_likely(&psi_disabled))
1004 /* Update averages before reporting them */
1005 mutex_lock(&group->avgs_lock);
1006 now = sched_clock();
1007 collect_percpu_times(group, PSI_AVGS, NULL);
1008 if (now >= group->avg_next_update)
1009 group->avg_next_update = update_averages(group, now);
1010 mutex_unlock(&group->avgs_lock);
1012 for (full = 0; full < 2; full++) {
1013 unsigned long avg[3];
1017 for (w = 0; w < 3; w++)
1018 avg[w] = group->avg[res * 2 + full][w];
1019 total = div_u64(group->total[PSI_AVGS][res * 2 + full],
1022 seq_printf(m, "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n",
1023 full ? "full" : "some",
1024 LOAD_INT(avg[0]), LOAD_FRAC(avg[0]),
1025 LOAD_INT(avg[1]), LOAD_FRAC(avg[1]),
1026 LOAD_INT(avg[2]), LOAD_FRAC(avg[2]),
1033 static int psi_io_show(struct seq_file *m, void *v)
1035 return psi_show(m, &psi_system, PSI_IO);
1038 static int psi_memory_show(struct seq_file *m, void *v)
1040 return psi_show(m, &psi_system, PSI_MEM);
1043 static int psi_cpu_show(struct seq_file *m, void *v)
1045 return psi_show(m, &psi_system, PSI_CPU);
1048 static int psi_io_open(struct inode *inode, struct file *file)
1050 return single_open(file, psi_io_show, NULL);
1053 static int psi_memory_open(struct inode *inode, struct file *file)
1055 return single_open(file, psi_memory_show, NULL);
1058 static int psi_cpu_open(struct inode *inode, struct file *file)
1060 return single_open(file, psi_cpu_show, NULL);
1063 struct psi_trigger *psi_trigger_create(struct psi_group *group,
1064 char *buf, size_t nbytes, enum psi_res res)
1066 struct psi_trigger *t;
1067 enum psi_states state;
1071 if (static_branch_likely(&psi_disabled))
1072 return ERR_PTR(-EOPNOTSUPP);
1074 if (sscanf(buf, "some %u %u", &threshold_us, &window_us) == 2)
1075 state = PSI_IO_SOME + res * 2;
1076 else if (sscanf(buf, "full %u %u", &threshold_us, &window_us) == 2)
1077 state = PSI_IO_FULL + res * 2;
1079 return ERR_PTR(-EINVAL);
1081 if (state >= PSI_NONIDLE)
1082 return ERR_PTR(-EINVAL);
1084 if (window_us < WINDOW_MIN_US ||
1085 window_us > WINDOW_MAX_US)
1086 return ERR_PTR(-EINVAL);
1088 /* Check threshold */
1089 if (threshold_us == 0 || threshold_us > window_us)
1090 return ERR_PTR(-EINVAL);
1092 t = kmalloc(sizeof(*t), GFP_KERNEL);
1094 return ERR_PTR(-ENOMEM);
1098 t->threshold = threshold_us * NSEC_PER_USEC;
1099 t->win.size = window_us * NSEC_PER_USEC;
1100 window_reset(&t->win, 0, 0, 0);
1103 t->last_event_time = 0;
1104 init_waitqueue_head(&t->event_wait);
1105 kref_init(&t->refcount);
1107 mutex_lock(&group->trigger_lock);
1109 if (!rcu_access_pointer(group->poll_task)) {
1110 struct task_struct *task;
1112 task = kthread_create(psi_poll_worker, group, "psimon");
1115 mutex_unlock(&group->trigger_lock);
1116 return ERR_CAST(task);
1118 atomic_set(&group->poll_wakeup, 0);
1119 init_waitqueue_head(&group->poll_wait);
1120 wake_up_process(task);
1121 timer_setup(&group->poll_timer, poll_timer_fn, 0);
1122 rcu_assign_pointer(group->poll_task, task);
1125 list_add(&t->node, &group->triggers);
1126 group->poll_min_period = min(group->poll_min_period,
1127 div_u64(t->win.size, UPDATES_PER_WINDOW));
1128 group->nr_triggers[t->state]++;
1129 group->poll_states |= (1 << t->state);
1131 mutex_unlock(&group->trigger_lock);
1136 static void psi_trigger_destroy(struct kref *ref)
1138 struct psi_trigger *t = container_of(ref, struct psi_trigger, refcount);
1139 struct psi_group *group = t->group;
1140 struct task_struct *task_to_destroy = NULL;
1142 if (static_branch_likely(&psi_disabled))
1146 * Wakeup waiters to stop polling. Can happen if cgroup is deleted
1147 * from under a polling process.
1149 wake_up_interruptible(&t->event_wait);
1151 mutex_lock(&group->trigger_lock);
1153 if (!list_empty(&t->node)) {
1154 struct psi_trigger *tmp;
1155 u64 period = ULLONG_MAX;
1158 group->nr_triggers[t->state]--;
1159 if (!group->nr_triggers[t->state])
1160 group->poll_states &= ~(1 << t->state);
1161 /* reset min update period for the remaining triggers */
1162 list_for_each_entry(tmp, &group->triggers, node)
1163 period = min(period, div_u64(tmp->win.size,
1164 UPDATES_PER_WINDOW));
1165 group->poll_min_period = period;
1166 /* Destroy poll_task when the last trigger is destroyed */
1167 if (group->poll_states == 0) {
1168 group->polling_until = 0;
1169 task_to_destroy = rcu_dereference_protected(
1171 lockdep_is_held(&group->trigger_lock));
1172 rcu_assign_pointer(group->poll_task, NULL);
1176 mutex_unlock(&group->trigger_lock);
1179 * Wait for both *trigger_ptr from psi_trigger_replace and
1180 * poll_task RCUs to complete their read-side critical sections
1181 * before destroying the trigger and optionally the poll_task
1185 * Destroy the kworker after releasing trigger_lock to prevent a
1186 * deadlock while waiting for psi_poll_work to acquire trigger_lock
1188 if (task_to_destroy) {
1190 * After the RCU grace period has expired, the worker
1191 * can no longer be found through group->poll_task.
1192 * But it might have been already scheduled before
1193 * that - deschedule it cleanly before destroying it.
1195 del_timer_sync(&group->poll_timer);
1196 kthread_stop(task_to_destroy);
1201 void psi_trigger_replace(void **trigger_ptr, struct psi_trigger *new)
1203 struct psi_trigger *old = *trigger_ptr;
1205 if (static_branch_likely(&psi_disabled))
1208 rcu_assign_pointer(*trigger_ptr, new);
1210 kref_put(&old->refcount, psi_trigger_destroy);
1213 __poll_t psi_trigger_poll(void **trigger_ptr,
1214 struct file *file, poll_table *wait)
1216 __poll_t ret = DEFAULT_POLLMASK;
1217 struct psi_trigger *t;
1219 if (static_branch_likely(&psi_disabled))
1220 return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
1224 t = rcu_dereference(*(void __rcu __force **)trigger_ptr);
1227 return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
1229 kref_get(&t->refcount);
1233 poll_wait(file, &t->event_wait, wait);
1235 if (cmpxchg(&t->event, 1, 0) == 1)
1238 kref_put(&t->refcount, psi_trigger_destroy);
1243 static ssize_t psi_write(struct file *file, const char __user *user_buf,
1244 size_t nbytes, enum psi_res res)
1248 struct seq_file *seq;
1249 struct psi_trigger *new;
1251 if (static_branch_likely(&psi_disabled))
1257 buf_size = min(nbytes, sizeof(buf));
1258 if (copy_from_user(buf, user_buf, buf_size))
1261 buf[buf_size - 1] = '\0';
1263 new = psi_trigger_create(&psi_system, buf, nbytes, res);
1265 return PTR_ERR(new);
1267 seq = file->private_data;
1268 /* Take seq->lock to protect seq->private from concurrent writes */
1269 mutex_lock(&seq->lock);
1270 psi_trigger_replace(&seq->private, new);
1271 mutex_unlock(&seq->lock);
1276 static ssize_t psi_io_write(struct file *file, const char __user *user_buf,
1277 size_t nbytes, loff_t *ppos)
1279 return psi_write(file, user_buf, nbytes, PSI_IO);
1282 static ssize_t psi_memory_write(struct file *file, const char __user *user_buf,
1283 size_t nbytes, loff_t *ppos)
1285 return psi_write(file, user_buf, nbytes, PSI_MEM);
1288 static ssize_t psi_cpu_write(struct file *file, const char __user *user_buf,
1289 size_t nbytes, loff_t *ppos)
1291 return psi_write(file, user_buf, nbytes, PSI_CPU);
1294 static __poll_t psi_fop_poll(struct file *file, poll_table *wait)
1296 struct seq_file *seq = file->private_data;
1298 return psi_trigger_poll(&seq->private, file, wait);
1301 static int psi_fop_release(struct inode *inode, struct file *file)
1303 struct seq_file *seq = file->private_data;
1305 psi_trigger_replace(&seq->private, NULL);
1306 return single_release(inode, file);
1309 static const struct proc_ops psi_io_proc_ops = {
1310 .proc_open = psi_io_open,
1311 .proc_read = seq_read,
1312 .proc_lseek = seq_lseek,
1313 .proc_write = psi_io_write,
1314 .proc_poll = psi_fop_poll,
1315 .proc_release = psi_fop_release,
1318 static const struct proc_ops psi_memory_proc_ops = {
1319 .proc_open = psi_memory_open,
1320 .proc_read = seq_read,
1321 .proc_lseek = seq_lseek,
1322 .proc_write = psi_memory_write,
1323 .proc_poll = psi_fop_poll,
1324 .proc_release = psi_fop_release,
1327 static const struct proc_ops psi_cpu_proc_ops = {
1328 .proc_open = psi_cpu_open,
1329 .proc_read = seq_read,
1330 .proc_lseek = seq_lseek,
1331 .proc_write = psi_cpu_write,
1332 .proc_poll = psi_fop_poll,
1333 .proc_release = psi_fop_release,
1336 static int __init psi_proc_init(void)
1339 proc_mkdir("pressure", NULL);
1340 proc_create("pressure/io", 0, NULL, &psi_io_proc_ops);
1341 proc_create("pressure/memory", 0, NULL, &psi_memory_proc_ops);
1342 proc_create("pressure/cpu", 0, NULL, &psi_cpu_proc_ops);
1346 module_init(psi_proc_init);