1 // SPDX-License-Identifier: GPL-2.0
3 * Performance event support for s390x - CPU-measurement Counter Sets
5 * Copyright IBM Corp. 2019, 2021
6 * Author(s): Hendrik Brueckner <brueckner@linux.ibm.com>
7 * Thomas Richer <tmricht@linux.ibm.com>
9 #define KMSG_COMPONENT "cpum_cf_diag"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 #include <linux/kernel.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/percpu.h>
15 #include <linux/notifier.h>
16 #include <linux/init.h>
17 #include <linux/export.h>
18 #include <linux/slab.h>
19 #include <linux/processor.h>
20 #include <linux/miscdevice.h>
21 #include <linux/mutex.h>
23 #include <asm/ctl_reg.h>
25 #include <asm/cpu_mcf.h>
26 #include <asm/timex.h>
27 #include <asm/debug.h>
29 #include <asm/hwctrset.h>
31 #define CF_DIAG_CTRSET_DEF 0xfeef /* Counter set header mark */
32 /* interval in seconds */
33 static unsigned int cf_diag_cpu_speed;
34 static debug_info_t *cf_diag_dbg;
36 struct cf_diag_csd { /* Counter set data per CPU */
37 size_t used; /* Bytes used in data/start */
38 unsigned char start[PAGE_SIZE]; /* Counter set at event start */
39 unsigned char data[PAGE_SIZE]; /* Counter set at event delete */
40 unsigned int sets; /* # Counter set saved in data */
42 static DEFINE_PER_CPU(struct cf_diag_csd, cf_diag_csd);
44 /* Counter sets are stored as data stream in a page sized memory buffer and
45 * exported to user space via raw data attached to the event sample data.
46 * Each counter set starts with an eight byte header consisting of:
47 * - a two byte eye catcher (0xfeef)
48 * - a one byte counter set number
49 * - a two byte counter set size (indicates the number of counters in this set)
50 * - a three byte reserved value (must be zero) to make the header the same
51 * size as a counter value.
52 * All counter values are eight byte in size.
54 * All counter sets are followed by a 64 byte trailer.
55 * The trailer consists of a:
56 * - flag field indicating valid fields when corresponding bit set
57 * - the counter facility first and second version number
58 * - the CPU speed if nonzero
59 * - the time stamp the counter sets have been collected
60 * - the time of day (TOD) base value
63 * The counter sets are saved when the process is prepared to be executed on a
64 * CPU and saved again when the process is going to be removed from a CPU.
65 * The difference of both counter sets are calculated and stored in the event
69 struct cf_ctrset_entry { /* CPU-M CF counter set entry (8 byte) */
70 unsigned int def:16; /* 0-15 Data Entry Format */
71 unsigned int set:16; /* 16-31 Counter set identifier */
72 unsigned int ctr:16; /* 32-47 Number of stored counters */
73 unsigned int res1:16; /* 48-63 Reserved */
76 struct cf_trailer_entry { /* CPU-M CF_DIAG trailer (64 byte) */
80 unsigned int clock_base:1; /* TOD clock base set */
81 unsigned int speed:1; /* CPU speed set */
82 /* Measurement alerts */
83 unsigned int mtda:1; /* Loss of MT ctr. data alert */
84 unsigned int caca:1; /* Counter auth. change alert */
85 unsigned int lcda:1; /* Loss of counter data alert */
87 unsigned long flags; /* 0-63 All indicators */
90 unsigned int cfvn:16; /* 64-79 Ctr First Version */
91 unsigned int csvn:16; /* 80-95 Ctr Second Version */
92 unsigned int cpu_speed:32; /* 96-127 CPU speed */
94 unsigned long timestamp; /* 128-191 Timestamp (TOD) */
98 unsigned long progusage1;
99 unsigned long progusage2;
100 unsigned long progusage3;
101 unsigned long tod_base;
103 unsigned long progusage[4];
106 unsigned int mach_type:16; /* Machine type */
107 unsigned int res1:16; /* Reserved */
108 unsigned int res2:32; /* Reserved */
111 /* Create the trailer data at the end of a page. */
112 static void cf_diag_trailer(struct cf_trailer_entry *te)
114 struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
117 te->cfvn = cpuhw->info.cfvn; /* Counter version numbers */
118 te->csvn = cpuhw->info.csvn;
120 get_cpu_id(&cpuid); /* Machine type */
121 te->mach_type = cpuid.machine;
122 te->cpu_speed = cf_diag_cpu_speed;
125 te->clock_base = 1; /* Save clock base */
126 te->tod_base = tod_clock_base.tod;
127 te->timestamp = get_tod_clock_fast();
131 * Change the CPUMF state to active.
132 * Enable and activate the CPU-counter sets according
133 * to the per-cpu control state.
135 static void cf_diag_enable(struct pmu *pmu)
137 struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
140 debug_sprintf_event(cf_diag_dbg, 5,
141 "%s pmu %p cpu %d flags %#x state %#llx\n",
142 __func__, pmu, smp_processor_id(), cpuhw->flags,
144 if (cpuhw->flags & PMU_F_ENABLED)
147 err = lcctl(cpuhw->state);
149 pr_err("Enabling the performance measuring unit "
150 "failed with rc=%x\n", err);
153 cpuhw->flags |= PMU_F_ENABLED;
157 * Change the CPUMF state to inactive.
158 * Disable and enable (inactive) the CPU-counter sets according
159 * to the per-cpu control state.
161 static void cf_diag_disable(struct pmu *pmu)
163 struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
167 debug_sprintf_event(cf_diag_dbg, 5,
168 "%s pmu %p cpu %d flags %#x state %#llx\n",
169 __func__, pmu, smp_processor_id(), cpuhw->flags,
171 if (!(cpuhw->flags & PMU_F_ENABLED))
174 inactive = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1);
175 err = lcctl(inactive);
177 pr_err("Disabling the performance measuring unit "
178 "failed with rc=%x\n", err);
181 cpuhw->flags &= ~PMU_F_ENABLED;
184 /* Number of perf events counting hardware events */
185 static atomic_t cf_diag_events = ATOMIC_INIT(0);
186 /* Used to avoid races in calling reserve/release_cpumf_hardware */
187 static DEFINE_MUTEX(cf_diag_reserve_mutex);
189 /* Release the PMU if event is the last perf event */
190 static void cf_diag_perf_event_destroy(struct perf_event *event)
192 debug_sprintf_event(cf_diag_dbg, 5,
193 "%s event %p cpu %d cf_diag_events %d\n",
194 __func__, event, smp_processor_id(),
195 atomic_read(&cf_diag_events));
196 if (atomic_dec_return(&cf_diag_events) == 0)
197 __kernel_cpumcf_end();
200 static int get_authctrsets(void)
202 struct cpu_cf_events *cpuhw;
203 unsigned long auth = 0;
204 enum cpumf_ctr_set i;
206 cpuhw = &get_cpu_var(cpu_cf_events);
207 for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
208 if (cpuhw->info.auth_ctl & cpumf_ctr_ctl[i])
209 auth |= cpumf_ctr_ctl[i];
211 put_cpu_var(cpu_cf_events);
215 /* Setup the event. Test for authorized counter sets and only include counter
216 * sets which are authorized at the time of the setup. Including unauthorized
217 * counter sets result in specification exception (and panic).
219 static int __hw_perf_event_init(struct perf_event *event)
221 struct perf_event_attr *attr = &event->attr;
224 debug_sprintf_event(cf_diag_dbg, 5, "%s event %p cpu %d\n", __func__,
227 event->hw.config = attr->config;
229 /* Add all authorized counter sets to config_base. The
230 * the hardware init function is either called per-cpu or just once
231 * for all CPUS (event->cpu == -1). This depends on the whether
232 * counting is started for all CPUs or on a per workload base where
233 * the perf event moves from one CPU to another CPU.
234 * Checking the authorization on any CPU is fine as the hardware
235 * applies the same authorization settings to all CPUs.
237 event->hw.config_base = get_authctrsets();
239 /* No authorized counter sets, nothing to count/sample */
240 if (!event->hw.config_base) {
245 /* Set sample_period to indicate sampling */
246 event->hw.sample_period = attr->sample_period;
247 local64_set(&event->hw.period_left, event->hw.sample_period);
248 event->hw.last_period = event->hw.sample_period;
250 debug_sprintf_event(cf_diag_dbg, 5, "%s err %d config_base %#lx\n",
251 __func__, err, event->hw.config_base);
255 /* Return 0 if the CPU-measurement counter facility is currently free
256 * and an error otherwise.
258 static int cf_diag_perf_event_inuse(void)
262 if (!atomic_inc_not_zero(&cf_diag_events)) {
263 mutex_lock(&cf_diag_reserve_mutex);
264 if (atomic_read(&cf_diag_events) == 0 &&
265 __kernel_cpumcf_begin())
268 err = atomic_inc_return(&cf_diag_events);
269 mutex_unlock(&cf_diag_reserve_mutex);
274 static int cf_diag_event_init(struct perf_event *event)
276 struct perf_event_attr *attr = &event->attr;
279 debug_sprintf_event(cf_diag_dbg, 5,
280 "%s event %p cpu %d config %#llx type:%u "
281 "sample_type %#llx cf_diag_events %d\n", __func__,
282 event, event->cpu, attr->config, event->pmu->type,
283 attr->sample_type, atomic_read(&cf_diag_events));
285 if (event->attr.config != PERF_EVENT_CPUM_CF_DIAG ||
286 event->attr.type != event->pmu->type)
289 /* Raw events are used to access counters directly,
290 * hence do not permit excludes.
291 * This event is usesless without PERF_SAMPLE_RAW to return counter set
292 * values as raw data.
294 if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv ||
295 !(attr->sample_type & (PERF_SAMPLE_CPU | PERF_SAMPLE_RAW))) {
300 /* Initialize for using the CPU-measurement counter facility */
301 err = cf_diag_perf_event_inuse();
304 event->destroy = cf_diag_perf_event_destroy;
306 err = __hw_perf_event_init(event);
308 event->destroy(event);
310 debug_sprintf_event(cf_diag_dbg, 5, "%s err %d\n", __func__, err);
314 static void cf_diag_read(struct perf_event *event)
316 debug_sprintf_event(cf_diag_dbg, 5, "%s event %p\n", __func__, event);
319 /* Return the maximum possible counter set size (in number of 8 byte counters)
320 * depending on type and model number.
322 static size_t cf_diag_ctrset_size(enum cpumf_ctr_set ctrset,
323 struct cpumf_ctr_info *info)
325 size_t ctrset_size = 0;
328 case CPUMF_CTR_SET_BASIC:
332 case CPUMF_CTR_SET_USER:
335 else if (info->cfvn >= 3)
338 case CPUMF_CTR_SET_CRYPTO:
339 if (info->csvn >= 1 && info->csvn <= 5)
341 else if (info->csvn == 6)
344 case CPUMF_CTR_SET_EXT:
347 else if (info->csvn == 2)
349 else if (info->csvn >= 3 && info->csvn <= 5)
351 else if (info->csvn == 6)
354 case CPUMF_CTR_SET_MT_DIAG:
358 case CPUMF_CTR_SET_MAX:
365 /* Calculate memory needed to store all counter sets together with header and
366 * trailer data. This is independend of the counter set authorization which
367 * can vary depending on the configuration.
369 static size_t cf_diag_ctrset_maxsize(struct cpumf_ctr_info *info)
371 size_t max_size = sizeof(struct cf_trailer_entry);
372 enum cpumf_ctr_set i;
374 for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
375 size_t size = cf_diag_ctrset_size(i, info);
378 max_size += size * sizeof(u64) +
379 sizeof(struct cf_ctrset_entry);
381 debug_sprintf_event(cf_diag_dbg, 5, "%s max_size %zu\n", __func__,
387 /* Read a counter set. The counter set number determines which counter set and
388 * the CPUM-CF first and second version number determine the number of
389 * available counters in this counter set.
390 * Each counter set starts with header containing the counter set number and
391 * the number of 8 byte counters.
393 * The functions returns the number of bytes occupied by this counter set
394 * including the header.
395 * If there is no counter in the counter set, this counter set is useless and
396 * zero is returned on this case.
398 static size_t cf_diag_getctrset(struct cf_ctrset_entry *ctrdata, int ctrset,
401 struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
402 size_t ctrset_size, need = 0;
403 int rc = 3; /* Assume write failure */
405 ctrdata->def = CF_DIAG_CTRSET_DEF;
406 ctrdata->set = ctrset;
408 ctrset_size = cf_diag_ctrset_size(ctrset, &cpuhw->info);
410 if (ctrset_size) { /* Save data */
411 need = ctrset_size * sizeof(u64) + sizeof(*ctrdata);
413 rc = ctr_stcctm(ctrset, ctrset_size,
414 (u64 *)(ctrdata + 1));
416 ctrdata->ctr = ctrset_size;
421 debug_sprintf_event(cf_diag_dbg, 6,
422 "%s ctrset %d ctrset_size %zu cfvn %d csvn %d"
424 __func__, ctrset, ctrset_size, cpuhw->info.cfvn,
425 cpuhw->info.csvn, need, rc);
429 /* Read out all counter sets and save them in the provided data buffer.
430 * The last 64 byte host an artificial trailer entry.
432 static size_t cf_diag_getctr(void *data, size_t sz, unsigned long auth)
434 struct cf_trailer_entry *trailer;
435 size_t offset = 0, done;
439 sz -= sizeof(*trailer); /* Always room for trailer */
440 for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
441 struct cf_ctrset_entry *ctrdata = data + offset;
443 if (!(auth & cpumf_ctr_ctl[i]))
444 continue; /* Counter set not authorized */
446 done = cf_diag_getctrset(ctrdata, i, sz - offset);
448 debug_sprintf_event(cf_diag_dbg, 6,
449 "%s ctrset %d offset %zu done %zu\n",
450 __func__, i, offset, done);
452 trailer = data + offset;
453 cf_diag_trailer(trailer);
454 return offset + sizeof(*trailer);
457 /* Calculate the difference for each counter in a counter set. */
458 static void cf_diag_diffctrset(u64 *pstart, u64 *pstop, int counters)
460 for (; --counters >= 0; ++pstart, ++pstop)
461 if (*pstop >= *pstart)
464 *pstop = *pstart - *pstop;
467 /* Scan the counter sets and calculate the difference of each counter
468 * in each set. The result is the increment of each counter during the
469 * period the counter set has been activated.
471 * Return true on success.
473 static int cf_diag_diffctr(struct cf_diag_csd *csd, unsigned long auth)
475 struct cf_trailer_entry *trailer_start, *trailer_stop;
476 struct cf_ctrset_entry *ctrstart, *ctrstop;
479 auth &= (1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1;
481 ctrstart = (struct cf_ctrset_entry *)(csd->start + offset);
482 ctrstop = (struct cf_ctrset_entry *)(csd->data + offset);
484 if (memcmp(ctrstop, ctrstart, sizeof(*ctrstop))) {
485 pr_err("cpum_cf_diag counter set compare error "
486 "in set %i\n", ctrstart->set);
489 auth &= ~cpumf_ctr_ctl[ctrstart->set];
490 if (ctrstart->def == CF_DIAG_CTRSET_DEF) {
491 cf_diag_diffctrset((u64 *)(ctrstart + 1),
492 (u64 *)(ctrstop + 1), ctrstart->ctr);
493 offset += ctrstart->ctr * sizeof(u64) +
496 debug_sprintf_event(cf_diag_dbg, 6,
497 "%s set %d ctr %d offset %zu auth %lx\n",
498 __func__, ctrstart->set, ctrstart->ctr,
500 } while (ctrstart->def && auth);
502 /* Save time_stamp from start of event in stop's trailer */
503 trailer_start = (struct cf_trailer_entry *)(csd->start + offset);
504 trailer_stop = (struct cf_trailer_entry *)(csd->data + offset);
505 trailer_stop->progusage[0] = trailer_start->timestamp;
510 /* Create perf event sample with the counter sets as raw data. The sample
511 * is then pushed to the event subsystem and the function checks for
512 * possible event overflows. If an event overflow occurs, the PMU is
515 * Return non-zero if an event overflow occurred.
517 static int cf_diag_push_sample(struct perf_event *event,
518 struct cf_diag_csd *csd)
520 struct perf_sample_data data;
521 struct perf_raw_record raw;
525 /* Setup perf sample */
526 perf_sample_data_init(&data, 0, event->hw.last_period);
527 memset(®s, 0, sizeof(regs));
528 memset(&raw, 0, sizeof(raw));
530 if (event->attr.sample_type & PERF_SAMPLE_CPU)
531 data.cpu_entry.cpu = event->cpu;
532 if (event->attr.sample_type & PERF_SAMPLE_RAW) {
533 raw.frag.size = csd->used;
534 raw.frag.data = csd->data;
535 raw.size = csd->used;
539 overflow = perf_event_overflow(event, &data, ®s);
540 debug_sprintf_event(cf_diag_dbg, 6,
541 "%s event %p cpu %d sample_type %#llx raw %d "
542 "ov %d\n", __func__, event, event->cpu,
543 event->attr.sample_type, raw.size, overflow);
545 event->pmu->stop(event, 0);
547 perf_event_update_userpage(event);
551 static void cf_diag_start(struct perf_event *event, int flags)
553 struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
554 struct cf_diag_csd *csd = this_cpu_ptr(&cf_diag_csd);
555 struct hw_perf_event *hwc = &event->hw;
557 debug_sprintf_event(cf_diag_dbg, 5,
558 "%s event %p cpu %d flags %#x hwc-state %#x\n",
559 __func__, event, event->cpu, flags, hwc->state);
560 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
563 /* (Re-)enable and activate all counter sets */
564 lcctl(0); /* Reset counter sets */
566 ctr_set_multiple_enable(&cpuhw->state, hwc->config_base);
567 lcctl(cpuhw->state); /* Enable counter sets */
568 csd->used = cf_diag_getctr(csd->start, sizeof(csd->start),
569 event->hw.config_base);
570 ctr_set_multiple_start(&cpuhw->state, hwc->config_base);
571 /* Function cf_diag_enable() starts the counter sets. */
574 static void cf_diag_stop(struct perf_event *event, int flags)
576 struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
577 struct cf_diag_csd *csd = this_cpu_ptr(&cf_diag_csd);
578 struct hw_perf_event *hwc = &event->hw;
580 debug_sprintf_event(cf_diag_dbg, 5,
581 "%s event %p cpu %d flags %#x hwc-state %#x\n",
582 __func__, event, event->cpu, flags, hwc->state);
584 /* Deactivate all counter sets */
585 ctr_set_multiple_stop(&cpuhw->state, hwc->config_base);
586 local64_inc(&event->count);
587 csd->used = cf_diag_getctr(csd->data, sizeof(csd->data),
588 event->hw.config_base);
589 if (cf_diag_diffctr(csd, event->hw.config_base))
590 cf_diag_push_sample(event, csd);
591 hwc->state |= PERF_HES_STOPPED;
594 static int cf_diag_add(struct perf_event *event, int flags)
596 struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
599 debug_sprintf_event(cf_diag_dbg, 5,
600 "%s event %p cpu %d flags %#x cpuhw %p\n",
601 __func__, event, event->cpu, flags, cpuhw);
603 if (cpuhw->flags & PMU_F_IN_USE) {
608 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
610 cpuhw->flags |= PMU_F_IN_USE;
611 if (flags & PERF_EF_START)
612 cf_diag_start(event, PERF_EF_RELOAD);
614 debug_sprintf_event(cf_diag_dbg, 5, "%s err %d\n", __func__, err);
618 static void cf_diag_del(struct perf_event *event, int flags)
620 struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
622 debug_sprintf_event(cf_diag_dbg, 5,
623 "%s event %p cpu %d flags %#x\n",
624 __func__, event, event->cpu, flags);
626 cf_diag_stop(event, PERF_EF_UPDATE);
627 ctr_set_multiple_stop(&cpuhw->state, event->hw.config_base);
628 ctr_set_multiple_disable(&cpuhw->state, event->hw.config_base);
629 cpuhw->flags &= ~PMU_F_IN_USE;
632 /* Default counter set events and format attribute groups */
634 CPUMF_EVENT_ATTR(CF_DIAG, CF_DIAG, PERF_EVENT_CPUM_CF_DIAG);
636 static struct attribute *cf_diag_events_attr[] = {
637 CPUMF_EVENT_PTR(CF_DIAG, CF_DIAG),
641 PMU_FORMAT_ATTR(event, "config:0-63");
643 static struct attribute *cf_diag_format_attr[] = {
644 &format_attr_event.attr,
648 static struct attribute_group cf_diag_events_group = {
650 .attrs = cf_diag_events_attr,
652 static struct attribute_group cf_diag_format_group = {
654 .attrs = cf_diag_format_attr,
656 static const struct attribute_group *cf_diag_attr_groups[] = {
657 &cf_diag_events_group,
658 &cf_diag_format_group,
662 /* Performance monitoring unit for s390x */
663 static struct pmu cf_diag = {
664 .task_ctx_nr = perf_sw_context,
665 .pmu_enable = cf_diag_enable,
666 .pmu_disable = cf_diag_disable,
667 .event_init = cf_diag_event_init,
670 .start = cf_diag_start,
671 .stop = cf_diag_stop,
672 .read = cf_diag_read,
674 .attr_groups = cf_diag_attr_groups
677 /* Get the CPU speed, try sampling facility first and CPU attributes second. */
678 static void cf_diag_get_cpu_speed(void)
680 if (cpum_sf_avail()) { /* Sampling facility first */
681 struct hws_qsi_info_block si;
683 memset(&si, 0, sizeof(si));
685 cf_diag_cpu_speed = si.cpu_speed;
690 if (test_facility(34)) { /* CPU speed extract static part */
691 unsigned long mhz = __ecag(ECAG_CPU_ATTRIBUTE, 0);
694 cf_diag_cpu_speed = mhz & 0xffffffff;
698 /* Code to create device and file I/O operations */
699 static atomic_t ctrset_opencnt = ATOMIC_INIT(0); /* Excl. access */
701 static int cf_diag_open(struct inode *inode, struct file *file)
705 if (!capable(CAP_SYS_ADMIN))
707 if (atomic_xchg(&ctrset_opencnt, 1))
710 /* Avoid concurrent access with perf_event_open() system call */
711 mutex_lock(&cf_diag_reserve_mutex);
712 if (atomic_read(&cf_diag_events) || __kernel_cpumcf_begin())
714 mutex_unlock(&cf_diag_reserve_mutex);
716 atomic_set(&ctrset_opencnt, 0);
719 file->private_data = NULL;
720 debug_sprintf_event(cf_diag_dbg, 2, "%s\n", __func__);
721 /* nonseekable_open() never fails */
722 return nonseekable_open(inode, file);
725 /* Variables for ioctl() interface support */
726 static DEFINE_MUTEX(cf_diag_ctrset_mutex);
727 static struct cf_diag_ctrset {
728 unsigned long ctrset; /* Bit mask of counter set to read */
729 cpumask_t mask; /* CPU mask to read from */
732 static void cf_diag_ctrset_clear(void)
734 cpumask_clear(&cf_diag_ctrset.mask);
735 cf_diag_ctrset.ctrset = 0;
738 static void cf_diag_release_cpu(void *p)
740 struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
742 debug_sprintf_event(cf_diag_dbg, 3, "%s cpu %d\n", __func__,
744 lcctl(0); /* Reset counter sets */
745 cpuhw->state = 0; /* Save state in CPU hardware state */
748 /* Release function is also called when application gets terminated without
749 * doing a proper ioctl(..., S390_HWCTR_STOP, ...) command.
750 * Since only one application is allowed to open the device, simple stop all
753 static int cf_diag_release(struct inode *inode, struct file *file)
755 on_each_cpu(cf_diag_release_cpu, NULL, 1);
756 cf_diag_ctrset_clear();
757 atomic_set(&ctrset_opencnt, 0);
758 __kernel_cpumcf_end();
759 debug_sprintf_event(cf_diag_dbg, 2, "%s\n", __func__);
763 struct cf_diag_call_on_cpu_parm { /* Parm struct for smp_call_on_cpu */
764 unsigned int sets; /* Counter set bit mask */
765 atomic_t cpus_ack; /* # CPUs successfully executed func */
768 static int cf_diag_all_copy(unsigned long arg, cpumask_t *mask)
770 struct s390_ctrset_read __user *ctrset_read;
771 unsigned int cpu, cpus, rc;
774 ctrset_read = (struct s390_ctrset_read __user *)arg;
775 uptr = ctrset_read->data;
776 for_each_cpu(cpu, mask) {
777 struct cf_diag_csd *csd = per_cpu_ptr(&cf_diag_csd, cpu);
778 struct s390_ctrset_cpudata __user *ctrset_cpudata;
780 ctrset_cpudata = uptr;
781 debug_sprintf_event(cf_diag_dbg, 5, "%s cpu %d used %zd\n",
782 __func__, cpu, csd->used);
783 rc = put_user(cpu, &ctrset_cpudata->cpu_nr);
784 rc |= put_user(csd->sets, &ctrset_cpudata->no_sets);
785 rc |= copy_to_user(ctrset_cpudata->data, csd->data, csd->used);
788 uptr += sizeof(struct s390_ctrset_cpudata) + csd->used;
791 cpus = cpumask_weight(mask);
792 if (put_user(cpus, &ctrset_read->no_cpus))
794 debug_sprintf_event(cf_diag_dbg, 5, "%s copied %ld\n",
795 __func__, uptr - (void __user *)ctrset_read->data);
799 static size_t cf_diag_cpuset_read(struct s390_ctrset_setdata *p, int ctrset,
800 int ctrset_size, size_t room)
805 need = sizeof(*p) + sizeof(u64) * ctrset_size;
806 debug_sprintf_event(cf_diag_dbg, 5,
807 "%s room %zd need %zd set %#x set_size %d\n",
808 __func__, room, need, ctrset, ctrset_size);
810 p->set = cpumf_ctr_ctl[ctrset];
811 p->no_cnts = ctrset_size;
812 rc = ctr_stcctm(ctrset, ctrset_size, (u64 *)p->cv);
813 if (rc == 3) /* Nothing stored */
816 debug_sprintf_event(cf_diag_dbg, 5, "%s need %zd rc %d\n", __func__,
821 /* Read all counter sets. Since the perf_event_open() system call with
822 * event cpum_cf_diag/.../ is blocked when this interface is active, reuse
823 * the perf_event_open() data buffer to store the counter sets.
825 static void cf_diag_cpu_read(void *parm)
827 struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
828 struct cf_diag_csd *csd = this_cpu_ptr(&cf_diag_csd);
829 struct cf_diag_call_on_cpu_parm *p = parm;
833 debug_sprintf_event(cf_diag_dbg, 5,
834 "%s new %#x flags %#x state %#llx\n",
835 __func__, p->sets, cpuhw->flags,
837 /* No data saved yet */
840 memset(csd->data, 0, sizeof(csd->data));
842 /* Scan the counter sets */
843 for (set = CPUMF_CTR_SET_BASIC; set < CPUMF_CTR_SET_MAX; ++set) {
844 struct s390_ctrset_setdata *sp = (void *)csd->data + csd->used;
846 if (!(p->sets & cpumf_ctr_ctl[set]))
847 continue; /* Counter set not in list */
848 set_size = cf_diag_ctrset_size(set, &cpuhw->info);
849 space = sizeof(csd->data) - csd->used;
850 space = cf_diag_cpuset_read(sp, set, set_size, space);
855 debug_sprintf_event(cf_diag_dbg, 5, "%s sp %px space %zd\n",
856 __func__, sp, space);
858 debug_sprintf_event(cf_diag_dbg, 5, "%s sets %d used %zd\n", __func__,
859 csd->sets, csd->used);
862 static int cf_diag_all_read(unsigned long arg)
864 struct cf_diag_call_on_cpu_parm p;
868 debug_sprintf_event(cf_diag_dbg, 5, "%s\n", __func__);
869 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
872 p.sets = cf_diag_ctrset.ctrset;
873 cpumask_and(mask, &cf_diag_ctrset.mask, cpu_online_mask);
874 on_each_cpu_mask(mask, cf_diag_cpu_read, &p, 1);
875 rc = cf_diag_all_copy(arg, mask);
876 free_cpumask_var(mask);
877 debug_sprintf_event(cf_diag_dbg, 5, "%s rc %d\n", __func__, rc);
881 /* Stop all counter sets via ioctl interface */
882 static void cf_diag_ioctl_off(void *parm)
884 struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
885 struct cf_diag_call_on_cpu_parm *p = parm;
888 debug_sprintf_event(cf_diag_dbg, 5,
889 "%s new %#x flags %#x state %#llx\n",
890 __func__, p->sets, cpuhw->flags,
893 ctr_set_multiple_disable(&cpuhw->state, p->sets);
894 ctr_set_multiple_stop(&cpuhw->state, p->sets);
895 rc = lcctl(cpuhw->state); /* Stop counter sets */
897 cpuhw->flags &= ~PMU_F_IN_USE;
898 debug_sprintf_event(cf_diag_dbg, 5,
899 "%s rc %d flags %#x state %#llx\n", __func__,
900 rc, cpuhw->flags, cpuhw->state);
903 /* Start counter sets on particular CPU */
904 static void cf_diag_ioctl_on(void *parm)
906 struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
907 struct cf_diag_call_on_cpu_parm *p = parm;
910 debug_sprintf_event(cf_diag_dbg, 5,
911 "%s new %#x flags %#x state %#llx\n",
912 __func__, p->sets, cpuhw->flags,
915 if (!(cpuhw->flags & PMU_F_IN_USE))
917 cpuhw->flags |= PMU_F_IN_USE;
918 rc = lcctl(cpuhw->state); /* Reset unused counter sets */
919 ctr_set_multiple_enable(&cpuhw->state, p->sets);
920 ctr_set_multiple_start(&cpuhw->state, p->sets);
921 rc |= lcctl(cpuhw->state); /* Start counter sets */
923 atomic_inc(&p->cpus_ack);
924 debug_sprintf_event(cf_diag_dbg, 5, "%s rc %d state %#llx\n",
925 __func__, rc, cpuhw->state);
928 static int cf_diag_all_stop(void)
930 struct cf_diag_call_on_cpu_parm p = {
931 .sets = cf_diag_ctrset.ctrset,
935 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
937 cpumask_and(mask, &cf_diag_ctrset.mask, cpu_online_mask);
938 on_each_cpu_mask(mask, cf_diag_ioctl_off, &p, 1);
939 free_cpumask_var(mask);
943 static int cf_diag_all_start(void)
945 struct cf_diag_call_on_cpu_parm p = {
946 .sets = cf_diag_ctrset.ctrset,
947 .cpus_ack = ATOMIC_INIT(0),
952 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
954 cpumask_and(mask, &cf_diag_ctrset.mask, cpu_online_mask);
955 on_each_cpu_mask(mask, cf_diag_ioctl_on, &p, 1);
956 if (atomic_read(&p.cpus_ack) != cpumask_weight(mask)) {
957 on_each_cpu_mask(mask, cf_diag_ioctl_off, &p, 1);
960 free_cpumask_var(mask);
964 /* Return the maximum required space for all possible CPUs in case one
965 * CPU will be onlined during the START, READ, STOP cycles.
966 * To find out the size of the counter sets, any one CPU will do. They
967 * all have the same counter sets.
969 static size_t cf_diag_needspace(unsigned int sets)
971 struct cpu_cf_events *cpuhw = get_cpu_ptr(&cpu_cf_events);
975 for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
976 if (!(sets & cpumf_ctr_ctl[i]))
978 bytes += cf_diag_ctrset_size(i, &cpuhw->info) * sizeof(u64) +
979 sizeof(((struct s390_ctrset_setdata *)0)->set) +
980 sizeof(((struct s390_ctrset_setdata *)0)->no_cnts);
982 bytes = sizeof(((struct s390_ctrset_read *)0)->no_cpus) + nr_cpu_ids *
983 (bytes + sizeof(((struct s390_ctrset_cpudata *)0)->cpu_nr) +
984 sizeof(((struct s390_ctrset_cpudata *)0)->no_sets));
985 debug_sprintf_event(cf_diag_dbg, 5, "%s bytes %ld\n", __func__,
987 put_cpu_ptr(&cpu_cf_events);
991 static long cf_diag_ioctl_read(unsigned long arg)
993 struct s390_ctrset_read read;
996 debug_sprintf_event(cf_diag_dbg, 5, "%s\n", __func__);
997 if (copy_from_user(&read, (char __user *)arg, sizeof(read)))
999 ret = cf_diag_all_read(arg);
1000 debug_sprintf_event(cf_diag_dbg, 5, "%s ret %d\n", __func__, ret);
1004 static long cf_diag_ioctl_stop(void)
1008 debug_sprintf_event(cf_diag_dbg, 5, "%s\n", __func__);
1009 ret = cf_diag_all_stop();
1010 cf_diag_ctrset_clear();
1011 debug_sprintf_event(cf_diag_dbg, 5, "%s ret %d\n", __func__, ret);
1015 static long cf_diag_ioctl_start(unsigned long arg)
1017 struct s390_ctrset_start __user *ustart;
1018 struct s390_ctrset_start start;
1024 if (cf_diag_ctrset.ctrset)
1026 ustart = (struct s390_ctrset_start __user *)arg;
1027 if (copy_from_user(&start, ustart, sizeof(start)))
1029 if (start.version != S390_HWCTR_START_VERSION)
1031 if (start.counter_sets & ~(cpumf_ctr_ctl[CPUMF_CTR_SET_BASIC] |
1032 cpumf_ctr_ctl[CPUMF_CTR_SET_USER] |
1033 cpumf_ctr_ctl[CPUMF_CTR_SET_CRYPTO] |
1034 cpumf_ctr_ctl[CPUMF_CTR_SET_EXT] |
1035 cpumf_ctr_ctl[CPUMF_CTR_SET_MT_DIAG]))
1036 return -EINVAL; /* Invalid counter set */
1037 if (!start.counter_sets)
1038 return -EINVAL; /* No counter set at all? */
1039 cpumask_clear(&cf_diag_ctrset.mask);
1040 len = min_t(u64, start.cpumask_len, cpumask_size());
1041 umask = (void __user *)start.cpumask;
1042 if (copy_from_user(&cf_diag_ctrset.mask, umask, len))
1044 if (cpumask_empty(&cf_diag_ctrset.mask))
1046 need = cf_diag_needspace(start.counter_sets);
1047 if (put_user(need, &ustart->data_bytes))
1051 cf_diag_ctrset.ctrset = start.counter_sets;
1052 ret = cf_diag_all_start();
1055 cf_diag_ctrset_clear();
1056 debug_sprintf_event(cf_diag_dbg, 2, "%s sets %#lx need %ld ret %d\n",
1057 __func__, cf_diag_ctrset.ctrset, need, ret);
1061 static long cf_diag_ioctl(struct file *file, unsigned int cmd,
1066 debug_sprintf_event(cf_diag_dbg, 2, "%s cmd %#x arg %lx\n", __func__,
1069 mutex_lock(&cf_diag_ctrset_mutex);
1071 case S390_HWCTR_START:
1072 ret = cf_diag_ioctl_start(arg);
1074 case S390_HWCTR_STOP:
1075 ret = cf_diag_ioctl_stop();
1077 case S390_HWCTR_READ:
1078 ret = cf_diag_ioctl_read(arg);
1084 mutex_unlock(&cf_diag_ctrset_mutex);
1086 debug_sprintf_event(cf_diag_dbg, 2, "%s ret %d\n", __func__, ret);
1090 static const struct file_operations cf_diag_fops = {
1091 .owner = THIS_MODULE,
1092 .open = cf_diag_open,
1093 .release = cf_diag_release,
1094 .unlocked_ioctl = cf_diag_ioctl,
1095 .compat_ioctl = cf_diag_ioctl,
1099 static struct miscdevice cf_diag_dev = {
1100 .name = S390_HWCTR_DEVICE,
1101 .minor = MISC_DYNAMIC_MINOR,
1102 .fops = &cf_diag_fops,
1105 static int cf_diag_online_cpu(unsigned int cpu)
1107 struct cf_diag_call_on_cpu_parm p;
1109 mutex_lock(&cf_diag_ctrset_mutex);
1110 if (!cf_diag_ctrset.ctrset)
1112 p.sets = cf_diag_ctrset.ctrset;
1113 cf_diag_ioctl_on(&p);
1115 mutex_unlock(&cf_diag_ctrset_mutex);
1119 static int cf_diag_offline_cpu(unsigned int cpu)
1121 struct cf_diag_call_on_cpu_parm p;
1123 mutex_lock(&cf_diag_ctrset_mutex);
1124 if (!cf_diag_ctrset.ctrset)
1126 p.sets = cf_diag_ctrset.ctrset;
1127 cf_diag_ioctl_off(&p);
1129 mutex_unlock(&cf_diag_ctrset_mutex);
1133 /* Initialize the counter set PMU to generate complete counter set data as
1134 * event raw data. This relies on the CPU Measurement Counter Facility device
1135 * already being loaded and initialized.
1137 static int __init cf_diag_init(void)
1139 struct cpumf_ctr_info info;
1143 if (!kernel_cpumcf_avail() || !stccm_avail() || qctri(&info))
1145 cf_diag_get_cpu_speed();
1147 /* Make sure the counter set data fits into predefined buffer. */
1148 need = cf_diag_ctrset_maxsize(&info);
1149 if (need > sizeof(((struct cf_diag_csd *)0)->start)) {
1150 pr_err("Insufficient memory for PMU(cpum_cf_diag) need=%zu\n",
1155 rc = misc_register(&cf_diag_dev);
1157 pr_err("Registration of /dev/" S390_HWCTR_DEVICE
1158 "failed rc=%d\n", rc);
1162 /* Setup s390dbf facility */
1163 cf_diag_dbg = debug_register(KMSG_COMPONENT, 2, 1, 128);
1165 pr_err("Registration of s390dbf(cpum_cf_diag) failed\n");
1169 debug_register_view(cf_diag_dbg, &debug_sprintf_view);
1171 rc = perf_pmu_register(&cf_diag, "cpum_cf_diag", -1);
1173 pr_err("Registration of PMU(cpum_cf_diag) failed with rc=%i\n",
1177 rc = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_S390_CFD_ONLINE,
1178 "perf/s390/cfd:online",
1179 cf_diag_online_cpu, cf_diag_offline_cpu);
1183 pr_err("Registration of CPUHP_AP_PERF_S390_CFD_ONLINE failed rc=%i\n",
1185 perf_pmu_unregister(&cf_diag);
1187 debug_unregister_view(cf_diag_dbg, &debug_sprintf_view);
1188 debug_unregister(cf_diag_dbg);
1190 misc_deregister(&cf_diag_dev);
1194 device_initcall(cf_diag_init);