1 // SPDX-License-Identifier: GPL-2.0
3 * Performance event support for s390x - CPU-measurement Counter Facility
5 * Copyright IBM Corp. 2012, 2019
6 * Author(s): Hendrik Brueckner <brueckner@linux.ibm.com>
8 #define KMSG_COMPONENT "cpum_cf"
9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 #include <linux/kernel.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/percpu.h>
14 #include <linux/notifier.h>
15 #include <linux/init.h>
16 #include <linux/export.h>
17 #include <asm/cpu_mcf.h>
19 static enum cpumf_ctr_set get_counter_set(u64 event)
21 int set = CPUMF_CTR_SET_MAX;
24 set = CPUMF_CTR_SET_BASIC;
26 set = CPUMF_CTR_SET_USER;
28 set = CPUMF_CTR_SET_CRYPTO;
30 set = CPUMF_CTR_SET_EXT;
31 else if (event >= 448 && event < 496)
32 set = CPUMF_CTR_SET_MT_DIAG;
37 static int validate_ctr_version(const struct hw_perf_event *hwc)
39 struct cpu_cf_events *cpuhw;
43 cpuhw = &get_cpu_var(cpu_cf_events);
45 /* check required version for counter sets */
46 switch (hwc->config_base) {
47 case CPUMF_CTR_SET_BASIC:
48 case CPUMF_CTR_SET_USER:
49 if (cpuhw->info.cfvn < 1)
52 case CPUMF_CTR_SET_CRYPTO:
53 if ((cpuhw->info.csvn >= 1 && cpuhw->info.csvn <= 5 &&
55 (cpuhw->info.csvn >= 6 && hwc->config > 83))
58 case CPUMF_CTR_SET_EXT:
59 if (cpuhw->info.csvn < 1)
61 if ((cpuhw->info.csvn == 1 && hwc->config > 159) ||
62 (cpuhw->info.csvn == 2 && hwc->config > 175) ||
63 (cpuhw->info.csvn >= 3 && cpuhw->info.csvn <= 5
64 && hwc->config > 255) ||
65 (cpuhw->info.csvn >= 6 && hwc->config > 287))
68 case CPUMF_CTR_SET_MT_DIAG:
69 if (cpuhw->info.csvn <= 3)
72 * MT-diagnostic counters are read-only. The counter set
73 * is automatically enabled and activated on all CPUs with
74 * multithreading (SMT). Deactivation of multithreading
75 * also disables the counter set. State changes are ignored
76 * by lcctl(). Because Linux controls SMT enablement through
77 * a kernel parameter only, the counter set is either disabled
78 * or enabled and active.
80 * Thus, the counters can only be used if SMT is on and the
81 * counter set is enabled and active.
83 mtdiag_ctl = cpumf_ctr_ctl[CPUMF_CTR_SET_MT_DIAG];
84 if (!((cpuhw->info.auth_ctl & mtdiag_ctl) &&
85 (cpuhw->info.enable_ctl & mtdiag_ctl) &&
86 (cpuhw->info.act_ctl & mtdiag_ctl)))
91 put_cpu_var(cpu_cf_events);
95 static int validate_ctr_auth(const struct hw_perf_event *hwc)
97 struct cpu_cf_events *cpuhw;
101 cpuhw = &get_cpu_var(cpu_cf_events);
103 /* Check authorization for cpu counter sets.
104 * If the particular CPU counter set is not authorized,
105 * return with -ENOENT in order to fall back to other
106 * PMUs that might suffice the event request.
108 ctrs_state = cpumf_ctr_ctl[hwc->config_base];
109 if (!(ctrs_state & cpuhw->info.auth_ctl))
112 put_cpu_var(cpu_cf_events);
117 * Change the CPUMF state to active.
118 * Enable and activate the CPU-counter sets according
119 * to the per-cpu control state.
121 static void cpumf_pmu_enable(struct pmu *pmu)
123 struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
126 if (cpuhw->flags & PMU_F_ENABLED)
129 err = lcctl(cpuhw->state);
131 pr_err("Enabling the performance measuring unit "
132 "failed with rc=%x\n", err);
136 cpuhw->flags |= PMU_F_ENABLED;
140 * Change the CPUMF state to inactive.
141 * Disable and enable (inactive) the CPU-counter sets according
142 * to the per-cpu control state.
144 static void cpumf_pmu_disable(struct pmu *pmu)
146 struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
150 if (!(cpuhw->flags & PMU_F_ENABLED))
153 inactive = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1);
154 err = lcctl(inactive);
156 pr_err("Disabling the performance measuring unit "
157 "failed with rc=%x\n", err);
161 cpuhw->flags &= ~PMU_F_ENABLED;
165 /* Number of perf events counting hardware events */
166 static atomic_t num_events = ATOMIC_INIT(0);
167 /* Used to avoid races in calling reserve/release_cpumf_hardware */
168 static DEFINE_MUTEX(pmc_reserve_mutex);
170 /* Release the PMU if event is the last perf event */
171 static void hw_perf_event_destroy(struct perf_event *event)
173 if (!atomic_add_unless(&num_events, -1, 1)) {
174 mutex_lock(&pmc_reserve_mutex);
175 if (atomic_dec_return(&num_events) == 0)
176 __kernel_cpumcf_end();
177 mutex_unlock(&pmc_reserve_mutex);
181 /* CPUMF <-> perf event mappings for kernel+userspace (basic set) */
182 static const int cpumf_generic_events_basic[] = {
183 [PERF_COUNT_HW_CPU_CYCLES] = 0,
184 [PERF_COUNT_HW_INSTRUCTIONS] = 1,
185 [PERF_COUNT_HW_CACHE_REFERENCES] = -1,
186 [PERF_COUNT_HW_CACHE_MISSES] = -1,
187 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = -1,
188 [PERF_COUNT_HW_BRANCH_MISSES] = -1,
189 [PERF_COUNT_HW_BUS_CYCLES] = -1,
191 /* CPUMF <-> perf event mappings for userspace (problem-state set) */
192 static const int cpumf_generic_events_user[] = {
193 [PERF_COUNT_HW_CPU_CYCLES] = 32,
194 [PERF_COUNT_HW_INSTRUCTIONS] = 33,
195 [PERF_COUNT_HW_CACHE_REFERENCES] = -1,
196 [PERF_COUNT_HW_CACHE_MISSES] = -1,
197 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = -1,
198 [PERF_COUNT_HW_BRANCH_MISSES] = -1,
199 [PERF_COUNT_HW_BUS_CYCLES] = -1,
202 static int __hw_perf_event_init(struct perf_event *event, unsigned int type)
204 struct perf_event_attr *attr = &event->attr;
205 struct hw_perf_event *hwc = &event->hw;
206 enum cpumf_ctr_set set;
212 /* Raw events are used to access counters directly,
213 * hence do not permit excludes */
214 if (attr->exclude_kernel || attr->exclude_user ||
220 case PERF_TYPE_HARDWARE:
221 if (is_sampling_event(event)) /* No sampling support */
224 /* Count user space (problem-state) only */
225 if (!attr->exclude_user && attr->exclude_kernel) {
226 if (ev >= ARRAY_SIZE(cpumf_generic_events_user))
228 ev = cpumf_generic_events_user[ev];
230 /* No support for kernel space counters only */
231 } else if (!attr->exclude_kernel && attr->exclude_user) {
233 } else { /* Count user and kernel space */
234 if (ev >= ARRAY_SIZE(cpumf_generic_events_basic))
236 ev = cpumf_generic_events_basic[ev];
247 if (ev > PERF_CPUM_CF_MAX_CTR)
250 /* Obtain the counter set to which the specified counter belongs */
251 set = get_counter_set(ev);
253 case CPUMF_CTR_SET_BASIC:
254 case CPUMF_CTR_SET_USER:
255 case CPUMF_CTR_SET_CRYPTO:
256 case CPUMF_CTR_SET_EXT:
257 case CPUMF_CTR_SET_MT_DIAG:
259 * Use the hardware perf event structure to store the
260 * counter number in the 'config' member and the counter
261 * set number in the 'config_base'. The counter set number
262 * is then later used to enable/disable the counter(s).
265 hwc->config_base = set;
267 case CPUMF_CTR_SET_MAX:
268 /* The counter could not be associated to a counter set */
272 /* Initialize for using the CPU-measurement counter facility */
273 if (!atomic_inc_not_zero(&num_events)) {
274 mutex_lock(&pmc_reserve_mutex);
275 if (atomic_read(&num_events) == 0 && __kernel_cpumcf_begin())
278 atomic_inc(&num_events);
279 mutex_unlock(&pmc_reserve_mutex);
283 event->destroy = hw_perf_event_destroy;
285 /* Finally, validate version and authorization of the counter set */
286 err = validate_ctr_auth(hwc);
288 err = validate_ctr_version(hwc);
293 static int cpumf_pmu_event_init(struct perf_event *event)
295 unsigned int type = event->attr.type;
298 if (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_RAW)
299 err = __hw_perf_event_init(event, type);
300 else if (event->pmu->type == type)
301 /* Registered as unknown PMU */
302 err = __hw_perf_event_init(event, PERF_TYPE_RAW);
306 if (unlikely(err) && event->destroy)
307 event->destroy(event);
312 static int hw_perf_event_reset(struct perf_event *event)
318 prev = local64_read(&event->hw.prev_count);
319 err = ecctr(event->hw.config, &new);
323 /* The counter is not (yet) available. This
324 * might happen if the counter set to which
325 * this counter belongs is in the disabled
330 } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev);
335 static void hw_perf_event_update(struct perf_event *event)
337 u64 prev, new, delta;
341 prev = local64_read(&event->hw.prev_count);
342 err = ecctr(event->hw.config, &new);
345 } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev);
347 delta = (prev <= new) ? new - prev
348 : (-1ULL - prev) + new + 1; /* overflow */
349 local64_add(delta, &event->count);
352 static void cpumf_pmu_read(struct perf_event *event)
354 if (event->hw.state & PERF_HES_STOPPED)
357 hw_perf_event_update(event);
360 static void cpumf_pmu_start(struct perf_event *event, int flags)
362 struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
363 struct hw_perf_event *hwc = &event->hw;
365 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
368 if (WARN_ON_ONCE(hwc->config == -1))
371 if (flags & PERF_EF_RELOAD)
372 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
376 /* (Re-)enable and activate the counter set */
377 ctr_set_enable(&cpuhw->state, hwc->config_base);
378 ctr_set_start(&cpuhw->state, hwc->config_base);
380 /* The counter set to which this counter belongs can be already active.
381 * Because all counters in a set are active, the event->hw.prev_count
382 * needs to be synchronized. At this point, the counter set can be in
383 * the inactive or disabled state.
385 hw_perf_event_reset(event);
387 /* increment refcount for this counter set */
388 atomic_inc(&cpuhw->ctr_set[hwc->config_base]);
391 static void cpumf_pmu_stop(struct perf_event *event, int flags)
393 struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
394 struct hw_perf_event *hwc = &event->hw;
396 if (!(hwc->state & PERF_HES_STOPPED)) {
397 /* Decrement reference count for this counter set and if this
398 * is the last used counter in the set, clear activation
399 * control and set the counter set state to inactive.
401 if (!atomic_dec_return(&cpuhw->ctr_set[hwc->config_base]))
402 ctr_set_stop(&cpuhw->state, hwc->config_base);
403 hwc->state |= PERF_HES_STOPPED;
406 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
407 hw_perf_event_update(event);
408 hwc->state |= PERF_HES_UPTODATE;
412 static int cpumf_pmu_add(struct perf_event *event, int flags)
414 struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
416 /* Check authorization for the counter set to which this
418 * For group events transaction, the authorization check is
419 * done in cpumf_pmu_commit_txn().
421 if (!(cpuhw->txn_flags & PERF_PMU_TXN_ADD))
422 if (validate_ctr_auth(&event->hw))
425 ctr_set_enable(&cpuhw->state, event->hw.config_base);
426 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
428 if (flags & PERF_EF_START)
429 cpumf_pmu_start(event, PERF_EF_RELOAD);
434 static void cpumf_pmu_del(struct perf_event *event, int flags)
436 struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
438 cpumf_pmu_stop(event, PERF_EF_UPDATE);
440 /* Check if any counter in the counter set is still used. If not used,
441 * change the counter set to the disabled state. This also clears the
442 * content of all counters in the set.
444 * When a new perf event has been added but not yet started, this can
445 * clear enable control and resets all counters in a set. Therefore,
446 * cpumf_pmu_start() always has to reenable a counter set.
448 if (!atomic_read(&cpuhw->ctr_set[event->hw.config_base]))
449 ctr_set_disable(&cpuhw->state, event->hw.config_base);
453 * Start group events scheduling transaction.
454 * Set flags to perform a single test at commit time.
456 * We only support PERF_PMU_TXN_ADD transactions. Save the
457 * transaction flags but otherwise ignore non-PERF_PMU_TXN_ADD
460 static void cpumf_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
462 struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
464 WARN_ON_ONCE(cpuhw->txn_flags); /* txn already in flight */
466 cpuhw->txn_flags = txn_flags;
467 if (txn_flags & ~PERF_PMU_TXN_ADD)
470 perf_pmu_disable(pmu);
471 cpuhw->tx_state = cpuhw->state;
475 * Stop and cancel a group events scheduling tranctions.
476 * Assumes cpumf_pmu_del() is called for each successful added
477 * cpumf_pmu_add() during the transaction.
479 static void cpumf_pmu_cancel_txn(struct pmu *pmu)
481 unsigned int txn_flags;
482 struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
484 WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */
486 txn_flags = cpuhw->txn_flags;
487 cpuhw->txn_flags = 0;
488 if (txn_flags & ~PERF_PMU_TXN_ADD)
491 WARN_ON(cpuhw->tx_state != cpuhw->state);
493 perf_pmu_enable(pmu);
497 * Commit the group events scheduling transaction. On success, the
498 * transaction is closed. On error, the transaction is kept open
499 * until cpumf_pmu_cancel_txn() is called.
501 static int cpumf_pmu_commit_txn(struct pmu *pmu)
503 struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
506 WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */
508 if (cpuhw->txn_flags & ~PERF_PMU_TXN_ADD) {
509 cpuhw->txn_flags = 0;
513 /* check if the updated state can be scheduled */
514 state = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1);
515 state >>= CPUMF_LCCTL_ENABLE_SHIFT;
516 if ((state & cpuhw->info.auth_ctl) != state)
519 cpuhw->txn_flags = 0;
520 perf_pmu_enable(pmu);
524 /* Performance monitoring unit for s390x */
525 static struct pmu cpumf_pmu = {
526 .task_ctx_nr = perf_sw_context,
527 .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
528 .pmu_enable = cpumf_pmu_enable,
529 .pmu_disable = cpumf_pmu_disable,
530 .event_init = cpumf_pmu_event_init,
531 .add = cpumf_pmu_add,
532 .del = cpumf_pmu_del,
533 .start = cpumf_pmu_start,
534 .stop = cpumf_pmu_stop,
535 .read = cpumf_pmu_read,
536 .start_txn = cpumf_pmu_start_txn,
537 .commit_txn = cpumf_pmu_commit_txn,
538 .cancel_txn = cpumf_pmu_cancel_txn,
541 static int __init cpumf_pmu_init(void)
545 if (!kernel_cpumcf_avail())
548 cpumf_pmu.attr_groups = cpumf_cf_event_group();
549 rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", -1);
551 pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc);
554 subsys_initcall(cpumf_pmu_init);