dt-bindings: power: supply: Add device-tree binding for Summit SMB3xx
[linux-2.6-microblaze.git] / arch / s390 / kernel / idle.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Idle functions for s390.
4  *
5  * Copyright IBM Corp. 2014
6  *
7  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/kernel_stat.h>
12 #include <linux/kprobes.h>
13 #include <linux/notifier.h>
14 #include <linux/init.h>
15 #include <linux/cpu.h>
16 #include <linux/sched/cputime.h>
17 #include <trace/events/power.h>
18 #include <asm/nmi.h>
19 #include <asm/smp.h>
20 #include "entry.h"
21
22 static DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
23
24 void enabled_wait(void)
25 {
26         struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
27         unsigned long long idle_time;
28         unsigned long psw_mask, flags;
29
30
31         /* Wait for external, I/O or machine check interrupt. */
32         psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_DAT |
33                 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
34         clear_cpu_flag(CIF_NOHZ_DELAY);
35
36         trace_cpu_idle_rcuidle(1, smp_processor_id());
37         local_irq_save(flags);
38         /* Call the assembler magic in entry.S */
39         psw_idle(idle, psw_mask);
40         local_irq_restore(flags);
41         trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
42
43         /* Account time spent with enabled wait psw loaded as idle time. */
44         write_seqcount_begin(&idle->seqcount);
45         idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
46         idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
47         idle->idle_time += idle_time;
48         idle->idle_count++;
49         account_idle_time(cputime_to_nsecs(idle_time));
50         write_seqcount_end(&idle->seqcount);
51 }
52 NOKPROBE_SYMBOL(enabled_wait);
53
54 static ssize_t show_idle_count(struct device *dev,
55                                 struct device_attribute *attr, char *buf)
56 {
57         struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
58         unsigned long long idle_count;
59         unsigned int seq;
60
61         do {
62                 seq = read_seqcount_begin(&idle->seqcount);
63                 idle_count = READ_ONCE(idle->idle_count);
64                 if (READ_ONCE(idle->clock_idle_enter))
65                         idle_count++;
66         } while (read_seqcount_retry(&idle->seqcount, seq));
67         return sprintf(buf, "%llu\n", idle_count);
68 }
69 DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
70
71 static ssize_t show_idle_time(struct device *dev,
72                                 struct device_attribute *attr, char *buf)
73 {
74         unsigned long long now, idle_time, idle_enter, idle_exit, in_idle;
75         struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
76         unsigned int seq;
77
78         do {
79                 seq = read_seqcount_begin(&idle->seqcount);
80                 idle_time = READ_ONCE(idle->idle_time);
81                 idle_enter = READ_ONCE(idle->clock_idle_enter);
82                 idle_exit = READ_ONCE(idle->clock_idle_exit);
83         } while (read_seqcount_retry(&idle->seqcount, seq));
84         in_idle = 0;
85         now = get_tod_clock();
86         if (idle_enter) {
87                 if (idle_exit) {
88                         in_idle = idle_exit - idle_enter;
89                 } else if (now > idle_enter) {
90                         in_idle = now - idle_enter;
91                 }
92         }
93         idle_time += in_idle;
94         return sprintf(buf, "%llu\n", idle_time >> 12);
95 }
96 DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
97
98 u64 arch_cpu_idle_time(int cpu)
99 {
100         struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
101         unsigned long long now, idle_enter, idle_exit, in_idle;
102         unsigned int seq;
103
104         do {
105                 seq = read_seqcount_begin(&idle->seqcount);
106                 idle_enter = READ_ONCE(idle->clock_idle_enter);
107                 idle_exit = READ_ONCE(idle->clock_idle_exit);
108         } while (read_seqcount_retry(&idle->seqcount, seq));
109         in_idle = 0;
110         now = get_tod_clock();
111         if (idle_enter) {
112                 if (idle_exit) {
113                         in_idle = idle_exit - idle_enter;
114                 } else if (now > idle_enter) {
115                         in_idle = now - idle_enter;
116                 }
117         }
118         return cputime_to_nsecs(in_idle);
119 }
120
121 void arch_cpu_idle_enter(void)
122 {
123 }
124
125 void arch_cpu_idle(void)
126 {
127         enabled_wait();
128         local_irq_enable();
129 }
130
131 void arch_cpu_idle_exit(void)
132 {
133 }
134
135 void arch_cpu_idle_dead(void)
136 {
137         cpu_die();
138 }