1 // SPDX-License-Identifier: GPL-2.0-only
3 * Simple CPU accounting cgroup controller
7 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
10 * There are no locks covering percpu hardirq/softirq time.
11 * They are only modified in vtime_account, on corresponding CPU
12 * with interrupts disabled. So, writes are safe.
13 * They are read and saved off onto struct rq in update_rq_clock().
14 * This may result in other CPU reading this CPU's irq time and can
15 * race with irq/vtime_account on this CPU. We would either get old
16 * or new value with a side effect of accounting a slice of irq time to wrong
17 * task when irq is in progress while we read rq->clock. That is a worthy
18 * compromise in place of having locks on each irq in account_system_time.
20 DEFINE_PER_CPU(struct irqtime, cpu_irqtime);
22 static int sched_clock_irqtime;
24 void enable_sched_clock_irqtime(void)
26 sched_clock_irqtime = 1;
29 void disable_sched_clock_irqtime(void)
31 sched_clock_irqtime = 0;
34 static void irqtime_account_delta(struct irqtime *irqtime, u64 delta,
35 enum cpu_usage_stat idx)
37 u64 *cpustat = kcpustat_this_cpu->cpustat;
39 u64_stats_update_begin(&irqtime->sync);
40 cpustat[idx] += delta;
41 irqtime->total += delta;
42 irqtime->tick_delta += delta;
43 u64_stats_update_end(&irqtime->sync);
47 * Called before incrementing preempt_count on {soft,}irq_enter
48 * and before decrementing preempt_count on {soft,}irq_exit.
50 void irqtime_account_irq(struct task_struct *curr)
52 struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
56 if (!sched_clock_irqtime)
59 cpu = smp_processor_id();
60 delta = sched_clock_cpu(cpu) - irqtime->irq_start_time;
61 irqtime->irq_start_time += delta;
64 * We do not account for softirq time from ksoftirqd here.
65 * We want to continue accounting softirq time to ksoftirqd thread
66 * in that case, so as not to confuse scheduler with a special task
67 * that do not consume any time, but still wants to run.
70 irqtime_account_delta(irqtime, delta, CPUTIME_IRQ);
71 else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
72 irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ);
75 static u64 irqtime_tick_accounted(u64 maxtime)
77 struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
80 delta = min(irqtime->tick_delta, maxtime);
81 irqtime->tick_delta -= delta;
86 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
88 #define sched_clock_irqtime (0)
90 static u64 irqtime_tick_accounted(u64 dummy)
95 #endif /* !CONFIG_IRQ_TIME_ACCOUNTING */
97 static inline void task_group_account_field(struct task_struct *p, int index,
101 * Since all updates are sure to touch the root cgroup, we
102 * get ourselves ahead and touch it first. If the root cgroup
103 * is the only cgroup, then nothing else should be necessary.
106 __this_cpu_add(kernel_cpustat.cpustat[index], tmp);
108 cgroup_account_cputime_field(p, index, tmp);
112 * Account user CPU time to a process.
113 * @p: the process that the CPU time gets accounted to
114 * @cputime: the CPU time spent in user space since the last update
116 void account_user_time(struct task_struct *p, u64 cputime)
120 /* Add user time to process. */
122 account_group_user_time(p, cputime);
124 index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
126 /* Add user time to cpustat. */
127 task_group_account_field(p, index, cputime);
129 /* Account for user time used */
130 acct_account_cputime(p);
134 * Account guest CPU time to a process.
135 * @p: the process that the CPU time gets accounted to
136 * @cputime: the CPU time spent in virtual machine since the last update
138 void account_guest_time(struct task_struct *p, u64 cputime)
140 u64 *cpustat = kcpustat_this_cpu->cpustat;
142 /* Add guest time to process. */
144 account_group_user_time(p, cputime);
147 /* Add guest time to cpustat. */
148 if (task_nice(p) > 0) {
149 cpustat[CPUTIME_NICE] += cputime;
150 cpustat[CPUTIME_GUEST_NICE] += cputime;
152 cpustat[CPUTIME_USER] += cputime;
153 cpustat[CPUTIME_GUEST] += cputime;
158 * Account system CPU time to a process and desired cpustat field
159 * @p: the process that the CPU time gets accounted to
160 * @cputime: the CPU time spent in kernel space since the last update
161 * @index: pointer to cpustat field that has to be updated
163 void account_system_index_time(struct task_struct *p,
164 u64 cputime, enum cpu_usage_stat index)
166 /* Add system time to process. */
168 account_group_system_time(p, cputime);
170 /* Add system time to cpustat. */
171 task_group_account_field(p, index, cputime);
173 /* Account for system time used */
174 acct_account_cputime(p);
178 * Account system CPU time to a process.
179 * @p: the process that the CPU time gets accounted to
180 * @hardirq_offset: the offset to subtract from hardirq_count()
181 * @cputime: the CPU time spent in kernel space since the last update
183 void account_system_time(struct task_struct *p, int hardirq_offset, u64 cputime)
187 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
188 account_guest_time(p, cputime);
192 if (hardirq_count() - hardirq_offset)
194 else if (in_serving_softirq())
195 index = CPUTIME_SOFTIRQ;
197 index = CPUTIME_SYSTEM;
199 account_system_index_time(p, cputime, index);
203 * Account for involuntary wait time.
204 * @cputime: the CPU time spent in involuntary wait
206 void account_steal_time(u64 cputime)
208 u64 *cpustat = kcpustat_this_cpu->cpustat;
210 cpustat[CPUTIME_STEAL] += cputime;
214 * Account for idle time.
215 * @cputime: the CPU time spent in idle wait
217 void account_idle_time(u64 cputime)
219 u64 *cpustat = kcpustat_this_cpu->cpustat;
220 struct rq *rq = this_rq();
222 if (atomic_read(&rq->nr_iowait) > 0)
223 cpustat[CPUTIME_IOWAIT] += cputime;
225 cpustat[CPUTIME_IDLE] += cputime;
229 * When a guest is interrupted for a longer amount of time, missed clock
230 * ticks are not redelivered later. Due to that, this function may on
231 * occasion account more time than the calling functions think elapsed.
233 static __always_inline u64 steal_account_process_time(u64 maxtime)
235 #ifdef CONFIG_PARAVIRT
236 if (static_key_false(¶virt_steal_enabled)) {
239 steal = paravirt_steal_clock(smp_processor_id());
240 steal -= this_rq()->prev_steal_time;
241 steal = min(steal, maxtime);
242 account_steal_time(steal);
243 this_rq()->prev_steal_time += steal;
252 * Account how much elapsed time was spent in steal, irq, or softirq time.
254 static inline u64 account_other_time(u64 max)
258 lockdep_assert_irqs_disabled();
260 accounted = steal_account_process_time(max);
263 accounted += irqtime_tick_accounted(max - accounted);
269 static inline u64 read_sum_exec_runtime(struct task_struct *t)
271 return t->se.sum_exec_runtime;
274 static u64 read_sum_exec_runtime(struct task_struct *t)
280 rq = task_rq_lock(t, &rf);
281 ns = t->se.sum_exec_runtime;
282 task_rq_unlock(rq, t, &rf);
289 * Accumulate raw cputime values of dead tasks (sig->[us]time) and live
290 * tasks (sum on group iteration) belonging to @tsk's group.
292 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
294 struct signal_struct *sig = tsk->signal;
296 struct task_struct *t;
297 unsigned int seq, nextseq;
301 * Update current task runtime to account pending time since last
302 * scheduler action or thread_group_cputime() call. This thread group
303 * might have other running tasks on different CPUs, but updating
304 * their runtime can affect syscall performance, so we skip account
305 * those pending times and rely only on values updated on tick or
306 * other scheduler action.
308 if (same_thread_group(current, tsk))
309 (void) task_sched_runtime(current);
312 /* Attempt a lockless read on the first round. */
316 flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
317 times->utime = sig->utime;
318 times->stime = sig->stime;
319 times->sum_exec_runtime = sig->sum_sched_runtime;
321 for_each_thread(tsk, t) {
322 task_cputime(t, &utime, &stime);
323 times->utime += utime;
324 times->stime += stime;
325 times->sum_exec_runtime += read_sum_exec_runtime(t);
327 /* If lockless access failed, take the lock. */
329 } while (need_seqretry(&sig->stats_lock, seq));
330 done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
334 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
336 * Account a tick to a process and cpustat
337 * @p: the process that the CPU time gets accounted to
338 * @user_tick: is the tick from userspace
339 * @rq: the pointer to rq
341 * Tick demultiplexing follows the order
342 * - pending hardirq update
343 * - pending softirq update
347 * - check for guest_time
348 * - else account as system_time
350 * Check for hardirq is done both for system and user time as there is
351 * no timer going off while we are on hardirq and hence we may never get an
352 * opportunity to update it solely in system time.
353 * p->stime and friends are only updated on system time and not on irq
354 * softirq as those do not count in task exec_runtime any more.
356 static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
359 u64 other, cputime = TICK_NSEC * ticks;
362 * When returning from idle, many ticks can get accounted at
363 * once, including some ticks of steal, irq, and softirq time.
364 * Subtract those ticks from the amount of time accounted to
365 * idle, or potentially user or system time. Due to rounding,
366 * other time can exceed ticks occasionally.
368 other = account_other_time(ULONG_MAX);
369 if (other >= cputime)
374 if (this_cpu_ksoftirqd() == p) {
376 * ksoftirqd time do not get accounted in cpu_softirq_time.
377 * So, we have to handle it separately here.
378 * Also, p->stime needs to be updated for ksoftirqd.
380 account_system_index_time(p, cputime, CPUTIME_SOFTIRQ);
381 } else if (user_tick) {
382 account_user_time(p, cputime);
383 } else if (p == this_rq()->idle) {
384 account_idle_time(cputime);
385 } else if (p->flags & PF_VCPU) { /* System time or guest time */
386 account_guest_time(p, cputime);
388 account_system_index_time(p, cputime, CPUTIME_SYSTEM);
392 static void irqtime_account_idle_ticks(int ticks)
394 irqtime_account_process_tick(current, 0, ticks);
396 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
397 static inline void irqtime_account_idle_ticks(int ticks) { }
398 static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
400 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
403 * Use precise platform statistics if available:
405 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
407 # ifndef __ARCH_HAS_VTIME_TASK_SWITCH
408 void vtime_task_switch(struct task_struct *prev)
410 if (is_idle_task(prev))
411 vtime_account_idle(prev);
413 vtime_account_kernel(prev);
416 arch_vtime_task_switch(prev);
420 void vtime_account_irq(struct task_struct *tsk)
422 if (hardirq_count()) {
423 vtime_account_hardirq(tsk);
424 } else if (in_serving_softirq()) {
425 vtime_account_softirq(tsk);
426 } else if (!IS_ENABLED(CONFIG_HAVE_VIRT_CPU_ACCOUNTING_IDLE) &&
428 vtime_account_idle(tsk);
430 vtime_account_kernel(tsk);
434 void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
441 void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
446 EXPORT_SYMBOL_GPL(task_cputime_adjusted);
448 void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
450 struct task_cputime cputime;
452 thread_group_cputime(p, &cputime);
458 #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE: */
461 * Account a single tick of CPU time.
462 * @p: the process that the CPU time gets accounted to
463 * @user_tick: indicates if the tick is a user or a system tick
465 void account_process_tick(struct task_struct *p, int user_tick)
469 if (vtime_accounting_enabled_this_cpu())
472 if (sched_clock_irqtime) {
473 irqtime_account_process_tick(p, user_tick, 1);
478 steal = steal_account_process_time(ULONG_MAX);
480 if (steal >= cputime)
486 account_user_time(p, cputime);
487 else if ((p != this_rq()->idle) || (irq_count() != HARDIRQ_OFFSET))
488 account_system_time(p, HARDIRQ_OFFSET, cputime);
490 account_idle_time(cputime);
494 * Account multiple ticks of idle time.
495 * @ticks: number of stolen ticks
497 void account_idle_ticks(unsigned long ticks)
501 if (sched_clock_irqtime) {
502 irqtime_account_idle_ticks(ticks);
506 cputime = ticks * TICK_NSEC;
507 steal = steal_account_process_time(ULONG_MAX);
509 if (steal >= cputime)
513 account_idle_time(cputime);
517 * Adjust tick based cputime random precision against scheduler runtime
520 * Tick based cputime accounting depend on random scheduling timeslices of a
521 * task to be interrupted or not by the timer. Depending on these
522 * circumstances, the number of these interrupts may be over or
523 * under-optimistic, matching the real user and system cputime with a variable
526 * Fix this by scaling these tick based values against the total runtime
527 * accounted by the CFS scheduler.
529 * This code provides the following guarantees:
531 * stime + utime == rtime
532 * stime_i+1 >= stime_i, utime_i+1 >= utime_i
534 * Assuming that rtime_i+1 >= rtime_i.
536 void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
539 u64 rtime, stime, utime;
542 /* Serialize concurrent callers such that we can honour our guarantees */
543 raw_spin_lock_irqsave(&prev->lock, flags);
544 rtime = curr->sum_exec_runtime;
547 * This is possible under two circumstances:
548 * - rtime isn't monotonic after all (a bug);
549 * - we got reordered by the lock.
551 * In both cases this acts as a filter such that the rest of the code
552 * can assume it is monotonic regardless of anything else.
554 if (prev->stime + prev->utime >= rtime)
561 * If either stime or utime are 0, assume all runtime is userspace.
562 * Once a task gets some ticks, the monotonicy code at 'update:'
563 * will ensure things converge to the observed ratio.
575 stime = mul_u64_u64_div_u64(stime, rtime, stime + utime);
579 * Make sure stime doesn't go backwards; this preserves monotonicity
580 * for utime because rtime is monotonic.
582 * utime_i+1 = rtime_i+1 - stime_i
583 * = rtime_i+1 - (rtime_i - utime_i)
584 * = (rtime_i+1 - rtime_i) + utime_i
587 if (stime < prev->stime)
589 utime = rtime - stime;
592 * Make sure utime doesn't go backwards; this still preserves
593 * monotonicity for stime, analogous argument to above.
595 if (utime < prev->utime) {
597 stime = rtime - utime;
605 raw_spin_unlock_irqrestore(&prev->lock, flags);
608 void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
610 struct task_cputime cputime = {
611 .sum_exec_runtime = p->se.sum_exec_runtime,
614 task_cputime(p, &cputime.utime, &cputime.stime);
615 cputime_adjust(&cputime, &p->prev_cputime, ut, st);
617 EXPORT_SYMBOL_GPL(task_cputime_adjusted);
619 void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
621 struct task_cputime cputime;
623 thread_group_cputime(p, &cputime);
624 cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
626 #endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
628 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
629 static u64 vtime_delta(struct vtime *vtime)
631 unsigned long long clock;
633 clock = sched_clock();
634 if (clock < vtime->starttime)
637 return clock - vtime->starttime;
640 static u64 get_vtime_delta(struct vtime *vtime)
642 u64 delta = vtime_delta(vtime);
646 * Unlike tick based timing, vtime based timing never has lost
647 * ticks, and no need for steal time accounting to make up for
648 * lost ticks. Vtime accounts a rounded version of actual
649 * elapsed time. Limit account_other_time to prevent rounding
650 * errors from causing elapsed vtime to go negative.
652 other = account_other_time(delta);
653 WARN_ON_ONCE(vtime->state == VTIME_INACTIVE);
654 vtime->starttime += delta;
656 return delta - other;
659 static void vtime_account_system(struct task_struct *tsk,
662 vtime->stime += get_vtime_delta(vtime);
663 if (vtime->stime >= TICK_NSEC) {
664 account_system_time(tsk, irq_count(), vtime->stime);
669 static void vtime_account_guest(struct task_struct *tsk,
672 vtime->gtime += get_vtime_delta(vtime);
673 if (vtime->gtime >= TICK_NSEC) {
674 account_guest_time(tsk, vtime->gtime);
679 static void __vtime_account_kernel(struct task_struct *tsk,
682 /* We might have scheduled out from guest path */
683 if (vtime->state == VTIME_GUEST)
684 vtime_account_guest(tsk, vtime);
686 vtime_account_system(tsk, vtime);
689 void vtime_account_kernel(struct task_struct *tsk)
691 struct vtime *vtime = &tsk->vtime;
693 if (!vtime_delta(vtime))
696 write_seqcount_begin(&vtime->seqcount);
697 __vtime_account_kernel(tsk, vtime);
698 write_seqcount_end(&vtime->seqcount);
701 void vtime_user_enter(struct task_struct *tsk)
703 struct vtime *vtime = &tsk->vtime;
705 write_seqcount_begin(&vtime->seqcount);
706 vtime_account_system(tsk, vtime);
707 vtime->state = VTIME_USER;
708 write_seqcount_end(&vtime->seqcount);
711 void vtime_user_exit(struct task_struct *tsk)
713 struct vtime *vtime = &tsk->vtime;
715 write_seqcount_begin(&vtime->seqcount);
716 vtime->utime += get_vtime_delta(vtime);
717 if (vtime->utime >= TICK_NSEC) {
718 account_user_time(tsk, vtime->utime);
721 vtime->state = VTIME_SYS;
722 write_seqcount_end(&vtime->seqcount);
725 void vtime_guest_enter(struct task_struct *tsk)
727 struct vtime *vtime = &tsk->vtime;
729 * The flags must be updated under the lock with
730 * the vtime_starttime flush and update.
731 * That enforces a right ordering and update sequence
732 * synchronization against the reader (task_gtime())
733 * that can thus safely catch up with a tickless delta.
735 write_seqcount_begin(&vtime->seqcount);
736 vtime_account_system(tsk, vtime);
737 tsk->flags |= PF_VCPU;
738 vtime->state = VTIME_GUEST;
739 write_seqcount_end(&vtime->seqcount);
741 EXPORT_SYMBOL_GPL(vtime_guest_enter);
743 void vtime_guest_exit(struct task_struct *tsk)
745 struct vtime *vtime = &tsk->vtime;
747 write_seqcount_begin(&vtime->seqcount);
748 vtime_account_guest(tsk, vtime);
749 tsk->flags &= ~PF_VCPU;
750 vtime->state = VTIME_SYS;
751 write_seqcount_end(&vtime->seqcount);
753 EXPORT_SYMBOL_GPL(vtime_guest_exit);
755 void vtime_account_idle(struct task_struct *tsk)
757 account_idle_time(get_vtime_delta(&tsk->vtime));
760 void vtime_task_switch_generic(struct task_struct *prev)
762 struct vtime *vtime = &prev->vtime;
764 write_seqcount_begin(&vtime->seqcount);
765 if (vtime->state == VTIME_IDLE)
766 vtime_account_idle(prev);
768 __vtime_account_kernel(prev, vtime);
769 vtime->state = VTIME_INACTIVE;
771 write_seqcount_end(&vtime->seqcount);
773 vtime = ¤t->vtime;
775 write_seqcount_begin(&vtime->seqcount);
776 if (is_idle_task(current))
777 vtime->state = VTIME_IDLE;
778 else if (current->flags & PF_VCPU)
779 vtime->state = VTIME_GUEST;
781 vtime->state = VTIME_SYS;
782 vtime->starttime = sched_clock();
783 vtime->cpu = smp_processor_id();
784 write_seqcount_end(&vtime->seqcount);
787 void vtime_init_idle(struct task_struct *t, int cpu)
789 struct vtime *vtime = &t->vtime;
792 local_irq_save(flags);
793 write_seqcount_begin(&vtime->seqcount);
794 vtime->state = VTIME_IDLE;
795 vtime->starttime = sched_clock();
797 write_seqcount_end(&vtime->seqcount);
798 local_irq_restore(flags);
801 u64 task_gtime(struct task_struct *t)
803 struct vtime *vtime = &t->vtime;
807 if (!vtime_accounting_enabled())
811 seq = read_seqcount_begin(&vtime->seqcount);
814 if (vtime->state == VTIME_GUEST)
815 gtime += vtime->gtime + vtime_delta(vtime);
817 } while (read_seqcount_retry(&vtime->seqcount, seq));
823 * Fetch cputime raw values from fields of task_struct and
824 * add up the pending nohz execution time since the last
827 void task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
829 struct vtime *vtime = &t->vtime;
833 if (!vtime_accounting_enabled()) {
840 seq = read_seqcount_begin(&vtime->seqcount);
845 /* Task is sleeping or idle, nothing to add */
846 if (vtime->state < VTIME_SYS)
849 delta = vtime_delta(vtime);
852 * Task runs either in user (including guest) or kernel space,
853 * add pending nohz time to the right place.
855 if (vtime->state == VTIME_SYS)
856 *stime += vtime->stime + delta;
858 *utime += vtime->utime + delta;
859 } while (read_seqcount_retry(&vtime->seqcount, seq));
862 static int vtime_state_fetch(struct vtime *vtime, int cpu)
864 int state = READ_ONCE(vtime->state);
867 * We raced against a context switch, fetch the
868 * kcpustat task again.
870 if (vtime->cpu != cpu && vtime->cpu != -1)
874 * Two possible things here:
875 * 1) We are seeing the scheduling out task (prev) or any past one.
876 * 2) We are seeing the scheduling in task (next) but it hasn't
877 * passed though vtime_task_switch() yet so the pending
878 * cputime of the prev task may not be flushed yet.
880 * Case 1) is ok but 2) is not. So wait for a safe VTIME state.
882 if (state == VTIME_INACTIVE)
888 static u64 kcpustat_user_vtime(struct vtime *vtime)
890 if (vtime->state == VTIME_USER)
891 return vtime->utime + vtime_delta(vtime);
892 else if (vtime->state == VTIME_GUEST)
893 return vtime->gtime + vtime_delta(vtime);
897 static int kcpustat_field_vtime(u64 *cpustat,
898 struct task_struct *tsk,
899 enum cpu_usage_stat usage,
902 struct vtime *vtime = &tsk->vtime;
908 seq = read_seqcount_begin(&vtime->seqcount);
910 state = vtime_state_fetch(vtime, cpu);
914 *val = cpustat[usage];
917 * Nice VS unnice cputime accounting may be inaccurate if
918 * the nice value has changed since the last vtime update.
919 * But proper fix would involve interrupting target on nice
920 * updates which is a no go on nohz_full (although the scheduler
921 * may still interrupt the target if rescheduling is needed...)
925 if (state == VTIME_SYS)
926 *val += vtime->stime + vtime_delta(vtime);
929 if (task_nice(tsk) <= 0)
930 *val += kcpustat_user_vtime(vtime);
933 if (task_nice(tsk) > 0)
934 *val += kcpustat_user_vtime(vtime);
937 if (state == VTIME_GUEST && task_nice(tsk) <= 0)
938 *val += vtime->gtime + vtime_delta(vtime);
940 case CPUTIME_GUEST_NICE:
941 if (state == VTIME_GUEST && task_nice(tsk) > 0)
942 *val += vtime->gtime + vtime_delta(vtime);
947 } while (read_seqcount_retry(&vtime->seqcount, seq));
952 u64 kcpustat_field(struct kernel_cpustat *kcpustat,
953 enum cpu_usage_stat usage, int cpu)
955 u64 *cpustat = kcpustat->cpustat;
956 u64 val = cpustat[usage];
960 if (!vtime_accounting_enabled_cpu(cpu))
966 struct task_struct *curr;
969 curr = rcu_dereference(rq->curr);
970 if (WARN_ON_ONCE(!curr)) {
972 return cpustat[usage];
975 err = kcpustat_field_vtime(cpustat, curr, usage, cpu, &val);
984 EXPORT_SYMBOL_GPL(kcpustat_field);
986 static int kcpustat_cpu_fetch_vtime(struct kernel_cpustat *dst,
987 const struct kernel_cpustat *src,
988 struct task_struct *tsk, int cpu)
990 struct vtime *vtime = &tsk->vtime;
998 seq = read_seqcount_begin(&vtime->seqcount);
1000 state = vtime_state_fetch(vtime, cpu);
1005 cpustat = dst->cpustat;
1007 /* Task is sleeping, dead or idle, nothing to add */
1008 if (state < VTIME_SYS)
1011 delta = vtime_delta(vtime);
1014 * Task runs either in user (including guest) or kernel space,
1015 * add pending nohz time to the right place.
1017 if (state == VTIME_SYS) {
1018 cpustat[CPUTIME_SYSTEM] += vtime->stime + delta;
1019 } else if (state == VTIME_USER) {
1020 if (task_nice(tsk) > 0)
1021 cpustat[CPUTIME_NICE] += vtime->utime + delta;
1023 cpustat[CPUTIME_USER] += vtime->utime + delta;
1025 WARN_ON_ONCE(state != VTIME_GUEST);
1026 if (task_nice(tsk) > 0) {
1027 cpustat[CPUTIME_GUEST_NICE] += vtime->gtime + delta;
1028 cpustat[CPUTIME_NICE] += vtime->gtime + delta;
1030 cpustat[CPUTIME_GUEST] += vtime->gtime + delta;
1031 cpustat[CPUTIME_USER] += vtime->gtime + delta;
1034 } while (read_seqcount_retry(&vtime->seqcount, seq));
1039 void kcpustat_cpu_fetch(struct kernel_cpustat *dst, int cpu)
1041 const struct kernel_cpustat *src = &kcpustat_cpu(cpu);
1045 if (!vtime_accounting_enabled_cpu(cpu)) {
1053 struct task_struct *curr;
1056 curr = rcu_dereference(rq->curr);
1057 if (WARN_ON_ONCE(!curr)) {
1063 err = kcpustat_cpu_fetch_vtime(dst, src, curr, cpu);
1072 EXPORT_SYMBOL_GPL(kcpustat_cpu_fetch);
1074 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */