2 * Pressure stall information for CPU, memory and IO
4 * Copyright (c) 2018 Facebook, Inc.
5 * Author: Johannes Weiner <hannes@cmpxchg.org>
7 * When CPU, memory and IO are contended, tasks experience delays that
8 * reduce throughput and introduce latencies into the workload. Memory
9 * and IO contention, in addition, can cause a full loss of forward
10 * progress in which the CPU goes idle.
12 * This code aggregates individual task delays into resource pressure
13 * metrics that indicate problems with both workload health and
14 * resource utilization.
18 * The time in which a task can execute on a CPU is our baseline for
19 * productivity. Pressure expresses the amount of time in which this
20 * potential cannot be realized due to resource contention.
22 * This concept of productivity has two components: the workload and
23 * the CPU. To measure the impact of pressure on both, we define two
24 * contention states for a resource: SOME and FULL.
26 * In the SOME state of a given resource, one or more tasks are
27 * delayed on that resource. This affects the workload's ability to
28 * perform work, but the CPU may still be executing other tasks.
30 * In the FULL state of a given resource, all non-idle tasks are
31 * delayed on that resource such that nobody is advancing and the CPU
32 * goes idle. This leaves both workload and CPU unproductive.
34 * (Naturally, the FULL state doesn't exist for the CPU resource.)
36 * SOME = nr_delayed_tasks != 0
37 * FULL = nr_delayed_tasks != 0 && nr_running_tasks == 0
39 * The percentage of wallclock time spent in those compound stall
40 * states gives pressure numbers between 0 and 100 for each resource,
41 * where the SOME percentage indicates workload slowdowns and the FULL
42 * percentage indicates reduced CPU utilization:
44 * %SOME = time(SOME) / period
45 * %FULL = time(FULL) / period
49 * The more tasks and available CPUs there are, the more work can be
50 * performed concurrently. This means that the potential that can go
51 * unrealized due to resource contention *also* scales with non-idle
54 * Consider a scenario where 257 number crunching tasks are trying to
55 * run concurrently on 256 CPUs. If we simply aggregated the task
56 * states, we would have to conclude a CPU SOME pressure number of
57 * 100%, since *somebody* is waiting on a runqueue at all
58 * times. However, that is clearly not the amount of contention the
59 * workload is experiencing: only one out of 256 possible exceution
60 * threads will be contended at any given time, or about 0.4%.
62 * Conversely, consider a scenario of 4 tasks and 4 CPUs where at any
63 * given time *one* of the tasks is delayed due to a lack of memory.
64 * Again, looking purely at the task state would yield a memory FULL
65 * pressure number of 0%, since *somebody* is always making forward
66 * progress. But again this wouldn't capture the amount of execution
67 * potential lost, which is 1 out of 4 CPUs, or 25%.
69 * To calculate wasted potential (pressure) with multiple processors,
70 * we have to base our calculation on the number of non-idle tasks in
71 * conjunction with the number of available CPUs, which is the number
72 * of potential execution threads. SOME becomes then the proportion of
73 * delayed tasks to possibe threads, and FULL is the share of possible
74 * threads that are unproductive due to delays:
76 * threads = min(nr_nonidle_tasks, nr_cpus)
77 * SOME = min(nr_delayed_tasks / threads, 1)
78 * FULL = (threads - min(nr_running_tasks, threads)) / threads
80 * For the 257 number crunchers on 256 CPUs, this yields:
82 * threads = min(257, 256)
83 * SOME = min(1 / 256, 1) = 0.4%
84 * FULL = (256 - min(257, 256)) / 256 = 0%
86 * For the 1 out of 4 memory-delayed tasks, this yields:
89 * SOME = min(1 / 4, 1) = 25%
90 * FULL = (4 - min(3, 4)) / 4 = 25%
92 * [ Substitute nr_cpus with 1, and you can see that it's a natural
93 * extension of the single-CPU model. ]
97 * To assess the precise time spent in each such state, we would have
98 * to freeze the system on task changes and start/stop the state
99 * clocks accordingly. Obviously that doesn't scale in practice.
101 * Because the scheduler aims to distribute the compute load evenly
102 * among the available CPUs, we can track task state locally to each
103 * CPU and, at much lower frequency, extrapolate the global state for
104 * the cumulative stall times and the running averages.
106 * For each runqueue, we track:
108 * tSOME[cpu] = time(nr_delayed_tasks[cpu] != 0)
109 * tFULL[cpu] = time(nr_delayed_tasks[cpu] && !nr_running_tasks[cpu])
110 * tNONIDLE[cpu] = time(nr_nonidle_tasks[cpu] != 0)
112 * and then periodically aggregate:
114 * tNONIDLE = sum(tNONIDLE[i])
116 * tSOME = sum(tSOME[i] * tNONIDLE[i]) / tNONIDLE
117 * tFULL = sum(tFULL[i] * tNONIDLE[i]) / tNONIDLE
119 * %SOME = tSOME / period
120 * %FULL = tFULL / period
122 * This gives us an approximation of pressure that is practical
123 * cost-wise, yet way more sensitive and accurate than periodic
124 * sampling of the aggregate task states would be.
127 #include "../workqueue_internal.h"
128 #include <linux/sched/loadavg.h>
129 #include <linux/seq_file.h>
130 #include <linux/proc_fs.h>
131 #include <linux/seqlock.h>
132 #include <linux/cgroup.h>
133 #include <linux/module.h>
134 #include <linux/sched.h>
135 #include <linux/psi.h>
138 static int psi_bug __read_mostly;
140 DEFINE_STATIC_KEY_FALSE(psi_disabled);
142 #ifdef CONFIG_PSI_DEFAULT_DISABLED
143 static bool psi_enable;
145 static bool psi_enable = true;
147 static int __init setup_psi(char *str)
149 return kstrtobool(str, &psi_enable) == 0;
151 __setup("psi=", setup_psi);
153 /* Running averages - we need to be higher-res than loadavg */
154 #define PSI_FREQ (2*HZ+1) /* 2 sec intervals */
155 #define EXP_10s 1677 /* 1/exp(2s/10s) as fixed-point */
156 #define EXP_60s 1981 /* 1/exp(2s/60s) */
157 #define EXP_300s 2034 /* 1/exp(2s/300s) */
159 /* Sampling frequency in nanoseconds */
160 static u64 psi_period __read_mostly;
162 /* System-level pressure and stall tracking */
163 static DEFINE_PER_CPU(struct psi_group_cpu, system_group_pcpu);
164 static struct psi_group psi_system = {
165 .pcpu = &system_group_pcpu,
168 static void psi_avgs_work(struct work_struct *work);
170 static void group_init(struct psi_group *group)
174 for_each_possible_cpu(cpu)
175 seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq);
176 group->avg_next_update = sched_clock() + psi_period;
177 INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
178 mutex_init(&group->avgs_lock);
181 void __init psi_init(void)
184 static_branch_enable(&psi_disabled);
188 psi_period = jiffies_to_nsecs(PSI_FREQ);
189 group_init(&psi_system);
192 static bool test_state(unsigned int *tasks, enum psi_states state)
196 return tasks[NR_IOWAIT];
198 return tasks[NR_IOWAIT] && !tasks[NR_RUNNING];
200 return tasks[NR_MEMSTALL];
202 return tasks[NR_MEMSTALL] && !tasks[NR_RUNNING];
204 return tasks[NR_RUNNING] > 1;
206 return tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] ||
213 static void get_recent_times(struct psi_group *group, int cpu, u32 *times,
214 u32 *pchanged_states)
216 struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu);
217 u64 now, state_start;
222 *pchanged_states = 0;
224 /* Snapshot a coherent view of the CPU state */
226 seq = read_seqcount_begin(&groupc->seq);
227 now = cpu_clock(cpu);
228 memcpy(times, groupc->times, sizeof(groupc->times));
229 state_mask = groupc->state_mask;
230 state_start = groupc->state_start;
231 } while (read_seqcount_retry(&groupc->seq, seq));
233 /* Calculate state time deltas against the previous snapshot */
234 for (s = 0; s < NR_PSI_STATES; s++) {
237 * In addition to already concluded states, we also
238 * incorporate currently active states on the CPU,
239 * since states may last for many sampling periods.
241 * This way we keep our delta sampling buckets small
242 * (u32) and our reported pressure close to what's
243 * actually happening.
245 if (state_mask & (1 << s))
246 times[s] += now - state_start;
248 delta = times[s] - groupc->times_prev[s];
249 groupc->times_prev[s] = times[s];
253 *pchanged_states |= (1 << s);
257 static void calc_avgs(unsigned long avg[3], int missed_periods,
258 u64 time, u64 period)
262 /* Fill in zeroes for periods of no activity */
263 if (missed_periods) {
264 avg[0] = calc_load_n(avg[0], EXP_10s, 0, missed_periods);
265 avg[1] = calc_load_n(avg[1], EXP_60s, 0, missed_periods);
266 avg[2] = calc_load_n(avg[2], EXP_300s, 0, missed_periods);
269 /* Sample the most recent active period */
270 pct = div_u64(time * 100, period);
272 avg[0] = calc_load(avg[0], EXP_10s, pct);
273 avg[1] = calc_load(avg[1], EXP_60s, pct);
274 avg[2] = calc_load(avg[2], EXP_300s, pct);
277 static void collect_percpu_times(struct psi_group *group, u32 *pchanged_states)
279 u64 deltas[NR_PSI_STATES - 1] = { 0, };
280 unsigned long nonidle_total = 0;
281 u32 changed_states = 0;
286 * Collect the per-cpu time buckets and average them into a
287 * single time sample that is normalized to wallclock time.
289 * For averaging, each CPU is weighted by its non-idle time in
290 * the sampling period. This eliminates artifacts from uneven
291 * loading, or even entirely idle CPUs.
293 for_each_possible_cpu(cpu) {
294 u32 times[NR_PSI_STATES];
296 u32 cpu_changed_states;
298 get_recent_times(group, cpu, times,
299 &cpu_changed_states);
300 changed_states |= cpu_changed_states;
302 nonidle = nsecs_to_jiffies(times[PSI_NONIDLE]);
303 nonidle_total += nonidle;
305 for (s = 0; s < PSI_NONIDLE; s++)
306 deltas[s] += (u64)times[s] * nonidle;
310 * Integrate the sample into the running statistics that are
311 * reported to userspace: the cumulative stall times and the
314 * Pressure percentages are sampled at PSI_FREQ. We might be
315 * called more often when the user polls more frequently than
316 * that; we might be called less often when there is no task
317 * activity, thus no data, and clock ticks are sporadic. The
318 * below handles both.
322 for (s = 0; s < NR_PSI_STATES - 1; s++)
323 group->total[s] += div_u64(deltas[s], max(nonidle_total, 1UL));
326 *pchanged_states = changed_states;
329 static u64 update_averages(struct psi_group *group, u64 now)
331 unsigned long missed_periods = 0;
337 expires = group->avg_next_update;
338 if (now - expires >= psi_period)
339 missed_periods = div_u64(now - expires, psi_period);
342 * The periodic clock tick can get delayed for various
343 * reasons, especially on loaded systems. To avoid clock
344 * drift, we schedule the clock in fixed psi_period intervals.
345 * But the deltas we sample out of the per-cpu buckets above
346 * are based on the actual time elapsing between clock ticks.
348 avg_next_update = expires + ((1 + missed_periods) * psi_period);
349 period = now - (group->avg_last_update + (missed_periods * psi_period));
350 group->avg_last_update = now;
352 for (s = 0; s < NR_PSI_STATES - 1; s++) {
355 sample = group->total[s] - group->avg_total[s];
357 * Due to the lockless sampling of the time buckets,
358 * recorded time deltas can slip into the next period,
359 * which under full pressure can result in samples in
360 * excess of the period length.
362 * We don't want to report non-sensical pressures in
363 * excess of 100%, nor do we want to drop such events
364 * on the floor. Instead we punt any overage into the
365 * future until pressure subsides. By doing this we
366 * don't underreport the occurring pressure curve, we
367 * just report it delayed by one period length.
369 * The error isn't cumulative. As soon as another
370 * delta slips from a period P to P+1, by definition
371 * it frees up its time T in P.
375 group->avg_total[s] += sample;
376 calc_avgs(group->avg[s], missed_periods, sample, period);
379 return avg_next_update;
382 static void psi_avgs_work(struct work_struct *work)
384 struct delayed_work *dwork;
385 struct psi_group *group;
390 dwork = to_delayed_work(work);
391 group = container_of(dwork, struct psi_group, avgs_work);
393 mutex_lock(&group->avgs_lock);
397 collect_percpu_times(group, &changed_states);
398 nonidle = changed_states & (1 << PSI_NONIDLE);
400 * If there is task activity, periodically fold the per-cpu
401 * times and feed samples into the running averages. If things
402 * are idle and there is no data to process, stop the clock.
403 * Once restarted, we'll catch up the running averages in one
404 * go - see calc_avgs() and missed_periods.
406 if (now >= group->avg_next_update)
407 group->avg_next_update = update_averages(group, now);
410 schedule_delayed_work(dwork, nsecs_to_jiffies(
411 group->avg_next_update - now) + 1);
414 mutex_unlock(&group->avgs_lock);
417 static void record_times(struct psi_group_cpu *groupc, int cpu,
423 now = cpu_clock(cpu);
424 delta = now - groupc->state_start;
425 groupc->state_start = now;
427 if (groupc->state_mask & (1 << PSI_IO_SOME)) {
428 groupc->times[PSI_IO_SOME] += delta;
429 if (groupc->state_mask & (1 << PSI_IO_FULL))
430 groupc->times[PSI_IO_FULL] += delta;
433 if (groupc->state_mask & (1 << PSI_MEM_SOME)) {
434 groupc->times[PSI_MEM_SOME] += delta;
435 if (groupc->state_mask & (1 << PSI_MEM_FULL))
436 groupc->times[PSI_MEM_FULL] += delta;
437 else if (memstall_tick) {
440 * Since we care about lost potential, a
441 * memstall is FULL when there are no other
442 * working tasks, but also when the CPU is
443 * actively reclaiming and nothing productive
444 * could run even if it were runnable.
446 * When the timer tick sees a reclaiming CPU,
447 * regardless of runnable tasks, sample a FULL
448 * tick (or less if it hasn't been a full tick
449 * since the last state change).
451 sample = min(delta, (u32)jiffies_to_nsecs(1));
452 groupc->times[PSI_MEM_FULL] += sample;
456 if (groupc->state_mask & (1 << PSI_CPU_SOME))
457 groupc->times[PSI_CPU_SOME] += delta;
459 if (groupc->state_mask & (1 << PSI_NONIDLE))
460 groupc->times[PSI_NONIDLE] += delta;
463 static void psi_group_change(struct psi_group *group, int cpu,
464 unsigned int clear, unsigned int set)
466 struct psi_group_cpu *groupc;
471 groupc = per_cpu_ptr(group->pcpu, cpu);
474 * First we assess the aggregate resource states this CPU's
475 * tasks have been in since the last change, and account any
476 * SOME and FULL time these may have resulted in.
478 * Then we update the task counts according to the state
479 * change requested through the @clear and @set bits.
481 write_seqcount_begin(&groupc->seq);
483 record_times(groupc, cpu, false);
485 for (t = 0, m = clear; m; m &= ~(1 << t), t++) {
488 if (groupc->tasks[t] == 0 && !psi_bug) {
489 printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u] clear=%x set=%x\n",
490 cpu, t, groupc->tasks[0],
491 groupc->tasks[1], groupc->tasks[2],
498 for (t = 0; set; set &= ~(1 << t), t++)
502 /* Calculate state mask representing active states */
503 for (s = 0; s < NR_PSI_STATES; s++) {
504 if (test_state(groupc->tasks, s))
505 state_mask |= (1 << s);
507 groupc->state_mask = state_mask;
509 write_seqcount_end(&groupc->seq);
512 static struct psi_group *iterate_groups(struct task_struct *task, void **iter)
514 #ifdef CONFIG_CGROUPS
515 struct cgroup *cgroup = NULL;
518 cgroup = task->cgroups->dfl_cgrp;
519 else if (*iter == &psi_system)
522 cgroup = cgroup_parent(*iter);
524 if (cgroup && cgroup_parent(cgroup)) {
526 return cgroup_psi(cgroup);
536 void psi_task_change(struct task_struct *task, int clear, int set)
538 int cpu = task_cpu(task);
539 struct psi_group *group;
540 bool wake_clock = true;
546 if (((task->psi_flags & set) ||
547 (task->psi_flags & clear) != clear) &&
549 printk_deferred(KERN_ERR "psi: inconsistent task state! task=%d:%s cpu=%d psi_flags=%x clear=%x set=%x\n",
550 task->pid, task->comm, cpu,
551 task->psi_flags, clear, set);
555 task->psi_flags &= ~clear;
556 task->psi_flags |= set;
559 * Periodic aggregation shuts off if there is a period of no
560 * task changes, so we wake it back up if necessary. However,
561 * don't do this if the task change is the aggregation worker
562 * itself going to sleep, or we'll ping-pong forever.
564 if (unlikely((clear & TSK_RUNNING) &&
565 (task->flags & PF_WQ_WORKER) &&
566 wq_worker_last_func(task) == psi_avgs_work))
569 while ((group = iterate_groups(task, &iter))) {
570 psi_group_change(group, cpu, clear, set);
571 if (wake_clock && !delayed_work_pending(&group->avgs_work))
572 schedule_delayed_work(&group->avgs_work, PSI_FREQ);
576 void psi_memstall_tick(struct task_struct *task, int cpu)
578 struct psi_group *group;
581 while ((group = iterate_groups(task, &iter))) {
582 struct psi_group_cpu *groupc;
584 groupc = per_cpu_ptr(group->pcpu, cpu);
585 write_seqcount_begin(&groupc->seq);
586 record_times(groupc, cpu, true);
587 write_seqcount_end(&groupc->seq);
592 * psi_memstall_enter - mark the beginning of a memory stall section
593 * @flags: flags to handle nested sections
595 * Marks the calling task as being stalled due to a lack of memory,
596 * such as waiting for a refault or performing reclaim.
598 void psi_memstall_enter(unsigned long *flags)
603 if (static_branch_likely(&psi_disabled))
606 *flags = current->flags & PF_MEMSTALL;
610 * PF_MEMSTALL setting & accounting needs to be atomic wrt
611 * changes to the task's scheduling state, otherwise we can
612 * race with CPU migration.
614 rq = this_rq_lock_irq(&rf);
616 current->flags |= PF_MEMSTALL;
617 psi_task_change(current, 0, TSK_MEMSTALL);
619 rq_unlock_irq(rq, &rf);
623 * psi_memstall_leave - mark the end of an memory stall section
624 * @flags: flags to handle nested memdelay sections
626 * Marks the calling task as no longer stalled due to lack of memory.
628 void psi_memstall_leave(unsigned long *flags)
633 if (static_branch_likely(&psi_disabled))
639 * PF_MEMSTALL clearing & accounting needs to be atomic wrt
640 * changes to the task's scheduling state, otherwise we could
641 * race with CPU migration.
643 rq = this_rq_lock_irq(&rf);
645 current->flags &= ~PF_MEMSTALL;
646 psi_task_change(current, TSK_MEMSTALL, 0);
648 rq_unlock_irq(rq, &rf);
651 #ifdef CONFIG_CGROUPS
652 int psi_cgroup_alloc(struct cgroup *cgroup)
654 if (static_branch_likely(&psi_disabled))
657 cgroup->psi.pcpu = alloc_percpu(struct psi_group_cpu);
658 if (!cgroup->psi.pcpu)
660 group_init(&cgroup->psi);
664 void psi_cgroup_free(struct cgroup *cgroup)
666 if (static_branch_likely(&psi_disabled))
669 cancel_delayed_work_sync(&cgroup->psi.avgs_work);
670 free_percpu(cgroup->psi.pcpu);
674 * cgroup_move_task - move task to a different cgroup
676 * @to: the target css_set
678 * Move task to a new cgroup and safely migrate its associated stall
679 * state between the different groups.
681 * This function acquires the task's rq lock to lock out concurrent
682 * changes to the task's scheduling state and - in case the task is
683 * running - concurrent changes to its stall state.
685 void cgroup_move_task(struct task_struct *task, struct css_set *to)
687 unsigned int task_flags = 0;
691 if (static_branch_likely(&psi_disabled)) {
693 * Lame to do this here, but the scheduler cannot be locked
694 * from the outside, so we move cgroups from inside sched/.
696 rcu_assign_pointer(task->cgroups, to);
700 rq = task_rq_lock(task, &rf);
702 if (task_on_rq_queued(task))
703 task_flags = TSK_RUNNING;
704 else if (task->in_iowait)
705 task_flags = TSK_IOWAIT;
707 if (task->flags & PF_MEMSTALL)
708 task_flags |= TSK_MEMSTALL;
711 psi_task_change(task, task_flags, 0);
713 /* See comment above */
714 rcu_assign_pointer(task->cgroups, to);
717 psi_task_change(task, 0, task_flags);
719 task_rq_unlock(rq, task, &rf);
721 #endif /* CONFIG_CGROUPS */
723 int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
728 if (static_branch_likely(&psi_disabled))
731 /* Update averages before reporting them */
732 mutex_lock(&group->avgs_lock);
734 collect_percpu_times(group, NULL);
735 if (now >= group->avg_next_update)
736 group->avg_next_update = update_averages(group, now);
737 mutex_unlock(&group->avgs_lock);
739 for (full = 0; full < 2 - (res == PSI_CPU); full++) {
740 unsigned long avg[3];
744 for (w = 0; w < 3; w++)
745 avg[w] = group->avg[res * 2 + full][w];
746 total = div_u64(group->total[res * 2 + full], NSEC_PER_USEC);
748 seq_printf(m, "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n",
749 full ? "full" : "some",
750 LOAD_INT(avg[0]), LOAD_FRAC(avg[0]),
751 LOAD_INT(avg[1]), LOAD_FRAC(avg[1]),
752 LOAD_INT(avg[2]), LOAD_FRAC(avg[2]),
759 static int psi_io_show(struct seq_file *m, void *v)
761 return psi_show(m, &psi_system, PSI_IO);
764 static int psi_memory_show(struct seq_file *m, void *v)
766 return psi_show(m, &psi_system, PSI_MEM);
769 static int psi_cpu_show(struct seq_file *m, void *v)
771 return psi_show(m, &psi_system, PSI_CPU);
774 static int psi_io_open(struct inode *inode, struct file *file)
776 return single_open(file, psi_io_show, NULL);
779 static int psi_memory_open(struct inode *inode, struct file *file)
781 return single_open(file, psi_memory_show, NULL);
784 static int psi_cpu_open(struct inode *inode, struct file *file)
786 return single_open(file, psi_cpu_show, NULL);
789 static const struct file_operations psi_io_fops = {
793 .release = single_release,
796 static const struct file_operations psi_memory_fops = {
797 .open = psi_memory_open,
800 .release = single_release,
803 static const struct file_operations psi_cpu_fops = {
804 .open = psi_cpu_open,
807 .release = single_release,
810 static int __init psi_proc_init(void)
812 proc_mkdir("pressure", NULL);
813 proc_create("pressure/io", 0, NULL, &psi_io_fops);
814 proc_create("pressure/memory", 0, NULL, &psi_memory_fops);
815 proc_create("pressure/cpu", 0, NULL, &psi_cpu_fops);
818 module_init(psi_proc_init);