1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013 Advanced Micro Devices, Inc.
5 * Author: Steven Kinney <Steven.Kinney@amd.com>
6 * Author: Suravee Suthikulpanit <Suraveee.Suthikulpanit@amd.com>
8 * Perf: amd_iommu - AMD IOMMU Performance Counter PMU implementation
11 #define pr_fmt(fmt) "perf/amd_iommu: " fmt
13 #include <linux/perf_event.h>
14 #include <linux/init.h>
15 #include <linux/cpumask.h>
16 #include <linux/slab.h>
17 #include <linux/amd-iommu.h>
19 #include "../perf_event.h"
22 #define COUNTER_SHIFT 16
24 /* iommu pmu conf masks */
25 #define GET_CSOURCE(x) ((x)->conf & 0xFFULL)
26 #define GET_DEVID(x) (((x)->conf >> 8) & 0xFFFFULL)
27 #define GET_DOMID(x) (((x)->conf >> 24) & 0xFFFFULL)
28 #define GET_PASID(x) (((x)->conf >> 40) & 0xFFFFFULL)
30 /* iommu pmu conf1 masks */
31 #define GET_DEVID_MASK(x) ((x)->conf1 & 0xFFFFULL)
32 #define GET_DOMID_MASK(x) (((x)->conf1 >> 16) & 0xFFFFULL)
33 #define GET_PASID_MASK(x) (((x)->conf1 >> 32) & 0xFFFFFULL)
35 #define IOMMU_NAME_SIZE 16
37 struct perf_amd_iommu {
38 struct list_head list;
40 struct amd_iommu *iommu;
41 char name[IOMMU_NAME_SIZE];
48 static LIST_HEAD(perf_amd_iommu_list);
50 /*---------------------------------------------
51 * sysfs format attributes
52 *---------------------------------------------*/
53 PMU_FORMAT_ATTR(csource, "config:0-7");
54 PMU_FORMAT_ATTR(devid, "config:8-23");
55 PMU_FORMAT_ATTR(domid, "config:24-39");
56 PMU_FORMAT_ATTR(pasid, "config:40-59");
57 PMU_FORMAT_ATTR(devid_mask, "config1:0-15");
58 PMU_FORMAT_ATTR(domid_mask, "config1:16-31");
59 PMU_FORMAT_ATTR(pasid_mask, "config1:32-51");
61 static struct attribute *iommu_format_attrs[] = {
62 &format_attr_csource.attr,
63 &format_attr_devid.attr,
64 &format_attr_pasid.attr,
65 &format_attr_domid.attr,
66 &format_attr_devid_mask.attr,
67 &format_attr_pasid_mask.attr,
68 &format_attr_domid_mask.attr,
72 static struct attribute_group amd_iommu_format_group = {
74 .attrs = iommu_format_attrs,
77 /*---------------------------------------------
78 * sysfs events attributes
79 *---------------------------------------------*/
80 static struct attribute_group amd_iommu_events_group = {
84 struct amd_iommu_event_desc {
85 struct device_attribute attr;
89 static ssize_t _iommu_event_show(struct device *dev,
90 struct device_attribute *attr, char *buf)
92 struct amd_iommu_event_desc *event =
93 container_of(attr, struct amd_iommu_event_desc, attr);
94 return sprintf(buf, "%s\n", event->event);
97 #define AMD_IOMMU_EVENT_DESC(_name, _event) \
99 .attr = __ATTR(_name, 0444, _iommu_event_show, NULL), \
103 static struct amd_iommu_event_desc amd_iommu_v2_event_descs[] = {
104 AMD_IOMMU_EVENT_DESC(mem_pass_untrans, "csource=0x01"),
105 AMD_IOMMU_EVENT_DESC(mem_pass_pretrans, "csource=0x02"),
106 AMD_IOMMU_EVENT_DESC(mem_pass_excl, "csource=0x03"),
107 AMD_IOMMU_EVENT_DESC(mem_target_abort, "csource=0x04"),
108 AMD_IOMMU_EVENT_DESC(mem_trans_total, "csource=0x05"),
109 AMD_IOMMU_EVENT_DESC(mem_iommu_tlb_pte_hit, "csource=0x06"),
110 AMD_IOMMU_EVENT_DESC(mem_iommu_tlb_pte_mis, "csource=0x07"),
111 AMD_IOMMU_EVENT_DESC(mem_iommu_tlb_pde_hit, "csource=0x08"),
112 AMD_IOMMU_EVENT_DESC(mem_iommu_tlb_pde_mis, "csource=0x09"),
113 AMD_IOMMU_EVENT_DESC(mem_dte_hit, "csource=0x0a"),
114 AMD_IOMMU_EVENT_DESC(mem_dte_mis, "csource=0x0b"),
115 AMD_IOMMU_EVENT_DESC(page_tbl_read_tot, "csource=0x0c"),
116 AMD_IOMMU_EVENT_DESC(page_tbl_read_nst, "csource=0x0d"),
117 AMD_IOMMU_EVENT_DESC(page_tbl_read_gst, "csource=0x0e"),
118 AMD_IOMMU_EVENT_DESC(int_dte_hit, "csource=0x0f"),
119 AMD_IOMMU_EVENT_DESC(int_dte_mis, "csource=0x10"),
120 AMD_IOMMU_EVENT_DESC(cmd_processed, "csource=0x11"),
121 AMD_IOMMU_EVENT_DESC(cmd_processed_inv, "csource=0x12"),
122 AMD_IOMMU_EVENT_DESC(tlb_inv, "csource=0x13"),
123 AMD_IOMMU_EVENT_DESC(ign_rd_wr_mmio_1ff8h, "csource=0x14"),
124 AMD_IOMMU_EVENT_DESC(vapic_int_non_guest, "csource=0x15"),
125 AMD_IOMMU_EVENT_DESC(vapic_int_guest, "csource=0x16"),
126 AMD_IOMMU_EVENT_DESC(smi_recv, "csource=0x17"),
127 AMD_IOMMU_EVENT_DESC(smi_blk, "csource=0x18"),
128 { /* end: all zeroes */ },
131 /*---------------------------------------------
132 * sysfs cpumask attributes
133 *---------------------------------------------*/
134 static cpumask_t iommu_cpumask;
136 static ssize_t _iommu_cpumask_show(struct device *dev,
137 struct device_attribute *attr,
140 return cpumap_print_to_pagebuf(true, buf, &iommu_cpumask);
142 static DEVICE_ATTR(cpumask, S_IRUGO, _iommu_cpumask_show, NULL);
144 static struct attribute *iommu_cpumask_attrs[] = {
145 &dev_attr_cpumask.attr,
149 static struct attribute_group amd_iommu_cpumask_group = {
150 .attrs = iommu_cpumask_attrs,
153 /*---------------------------------------------*/
155 static int get_next_avail_iommu_bnk_cntr(struct perf_event *event)
157 struct perf_amd_iommu *piommu = container_of(event->pmu, struct perf_amd_iommu, pmu);
158 int max_cntrs = piommu->max_counters;
159 int max_banks = piommu->max_banks;
160 u32 shift, bank, cntr;
164 raw_spin_lock_irqsave(&piommu->lock, flags);
166 for (bank = 0, shift = 0; bank < max_banks; bank++) {
167 for (cntr = 0; cntr < max_cntrs; cntr++) {
168 shift = bank + (bank*3) + cntr;
169 if (piommu->cntr_assign_mask & BIT_ULL(shift)) {
172 piommu->cntr_assign_mask |= BIT_ULL(shift);
173 event->hw.iommu_bank = bank;
174 event->hw.iommu_cntr = cntr;
182 raw_spin_unlock_irqrestore(&piommu->lock, flags);
186 static int clear_avail_iommu_bnk_cntr(struct perf_amd_iommu *perf_iommu,
190 int max_banks, max_cntrs;
193 max_banks = perf_iommu->max_banks;
194 max_cntrs = perf_iommu->max_counters;
196 if ((bank > max_banks) || (cntr > max_cntrs))
199 shift = bank + cntr + (bank*3);
201 raw_spin_lock_irqsave(&perf_iommu->lock, flags);
202 perf_iommu->cntr_assign_mask &= ~(1ULL<<shift);
203 raw_spin_unlock_irqrestore(&perf_iommu->lock, flags);
208 static int perf_iommu_event_init(struct perf_event *event)
210 struct hw_perf_event *hwc = &event->hw;
212 /* test the event attr type check for PMU enumeration */
213 if (event->attr.type != event->pmu->type)
217 * IOMMU counters are shared across all cores.
218 * Therefore, it does not support per-process mode.
219 * Also, it does not support event sampling mode.
221 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
227 /* update the hw_perf_event struct with the iommu config data */
228 hwc->conf = event->attr.config;
229 hwc->conf1 = event->attr.config1;
234 static inline struct amd_iommu *perf_event_2_iommu(struct perf_event *ev)
236 return (container_of(ev->pmu, struct perf_amd_iommu, pmu))->iommu;
239 static void perf_iommu_enable_event(struct perf_event *ev)
241 struct amd_iommu *iommu = perf_event_2_iommu(ev);
242 struct hw_perf_event *hwc = &ev->hw;
243 u8 bank = hwc->iommu_bank;
244 u8 cntr = hwc->iommu_cntr;
247 reg = GET_CSOURCE(hwc);
248 amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_COUNTER_SRC_REG, ®);
250 reg = GET_DEVID_MASK(hwc);
251 reg = GET_DEVID(hwc) | (reg << 32);
254 amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_DEVID_MATCH_REG, ®);
256 reg = GET_PASID_MASK(hwc);
257 reg = GET_PASID(hwc) | (reg << 32);
260 amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_PASID_MATCH_REG, ®);
262 reg = GET_DOMID_MASK(hwc);
263 reg = GET_DOMID(hwc) | (reg << 32);
266 amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_DOMID_MATCH_REG, ®);
269 static void perf_iommu_disable_event(struct perf_event *event)
271 struct amd_iommu *iommu = perf_event_2_iommu(event);
272 struct hw_perf_event *hwc = &event->hw;
275 amd_iommu_pc_set_reg(iommu, hwc->iommu_bank, hwc->iommu_cntr,
276 IOMMU_PC_COUNTER_SRC_REG, ®);
279 static void perf_iommu_start(struct perf_event *event, int flags)
281 struct hw_perf_event *hwc = &event->hw;
283 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
286 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
289 if (flags & PERF_EF_RELOAD) {
290 u64 prev_raw_count = local64_read(&hwc->prev_count);
291 struct amd_iommu *iommu = perf_event_2_iommu(event);
293 amd_iommu_pc_set_reg(iommu, hwc->iommu_bank, hwc->iommu_cntr,
294 IOMMU_PC_COUNTER_REG, &prev_raw_count);
297 perf_iommu_enable_event(event);
298 perf_event_update_userpage(event);
302 static void perf_iommu_read(struct perf_event *event)
304 u64 count, prev, delta;
305 struct hw_perf_event *hwc = &event->hw;
306 struct amd_iommu *iommu = perf_event_2_iommu(event);
308 if (amd_iommu_pc_get_reg(iommu, hwc->iommu_bank, hwc->iommu_cntr,
309 IOMMU_PC_COUNTER_REG, &count))
312 /* IOMMU pc counter register is only 48 bits */
313 count &= GENMASK_ULL(47, 0);
315 prev = local64_read(&hwc->prev_count);
316 if (local64_cmpxchg(&hwc->prev_count, prev, count) != prev)
319 /* Handle 48-bit counter overflow */
320 delta = (count << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
321 delta >>= COUNTER_SHIFT;
322 local64_add(delta, &event->count);
325 static void perf_iommu_stop(struct perf_event *event, int flags)
327 struct hw_perf_event *hwc = &event->hw;
329 if (hwc->state & PERF_HES_UPTODATE)
332 perf_iommu_disable_event(event);
333 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
334 hwc->state |= PERF_HES_STOPPED;
336 if (hwc->state & PERF_HES_UPTODATE)
339 perf_iommu_read(event);
340 hwc->state |= PERF_HES_UPTODATE;
343 static int perf_iommu_add(struct perf_event *event, int flags)
347 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
349 /* request an iommu bank/counter */
350 retval = get_next_avail_iommu_bnk_cntr(event);
354 if (flags & PERF_EF_START)
355 perf_iommu_start(event, PERF_EF_RELOAD);
360 static void perf_iommu_del(struct perf_event *event, int flags)
362 struct hw_perf_event *hwc = &event->hw;
363 struct perf_amd_iommu *perf_iommu =
364 container_of(event->pmu, struct perf_amd_iommu, pmu);
366 perf_iommu_stop(event, PERF_EF_UPDATE);
368 /* clear the assigned iommu bank/counter */
369 clear_avail_iommu_bnk_cntr(perf_iommu,
370 hwc->iommu_bank, hwc->iommu_cntr);
372 perf_event_update_userpage(event);
375 static __init int _init_events_attrs(void)
378 struct attribute **attrs;
380 while (amd_iommu_v2_event_descs[i].attr.attr.name)
383 attrs = kcalloc(i + 1, sizeof(*attrs), GFP_KERNEL);
387 for (j = 0; j < i; j++)
388 attrs[j] = &amd_iommu_v2_event_descs[j].attr.attr;
390 amd_iommu_events_group.attrs = attrs;
394 static const struct attribute_group *amd_iommu_attr_groups[] = {
395 &amd_iommu_format_group,
396 &amd_iommu_cpumask_group,
397 &amd_iommu_events_group,
401 static const struct pmu iommu_pmu __initconst = {
402 .event_init = perf_iommu_event_init,
403 .add = perf_iommu_add,
404 .del = perf_iommu_del,
405 .start = perf_iommu_start,
406 .stop = perf_iommu_stop,
407 .read = perf_iommu_read,
408 .task_ctx_nr = perf_invalid_context,
409 .attr_groups = amd_iommu_attr_groups,
410 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
413 static __init int init_one_iommu(unsigned int idx)
415 struct perf_amd_iommu *perf_iommu;
418 perf_iommu = kzalloc(sizeof(struct perf_amd_iommu), GFP_KERNEL);
422 raw_spin_lock_init(&perf_iommu->lock);
424 perf_iommu->pmu = iommu_pmu;
425 perf_iommu->iommu = get_amd_iommu(idx);
426 perf_iommu->max_banks = amd_iommu_pc_get_max_banks(idx);
427 perf_iommu->max_counters = amd_iommu_pc_get_max_counters(idx);
429 if (!perf_iommu->iommu ||
430 !perf_iommu->max_banks ||
431 !perf_iommu->max_counters) {
436 snprintf(perf_iommu->name, IOMMU_NAME_SIZE, "amd_iommu_%u", idx);
438 ret = perf_pmu_register(&perf_iommu->pmu, perf_iommu->name, -1);
440 pr_info("Detected AMD IOMMU #%d (%d banks, %d counters/bank).\n",
441 idx, perf_iommu->max_banks, perf_iommu->max_counters);
442 list_add_tail(&perf_iommu->list, &perf_amd_iommu_list);
444 pr_warn("Error initializing IOMMU %d.\n", idx);
450 static __init int amd_iommu_pc_init(void)
452 unsigned int i, cnt = 0;
455 /* Make sure the IOMMU PC resource is available */
456 if (!amd_iommu_pc_supported())
459 ret = _init_events_attrs();
464 * An IOMMU PMU is specific to an IOMMU, and can function independently.
465 * So we go through all IOMMUs and ignore the one that fails init
466 * unless all IOMMU are failing.
468 for (i = 0; i < amd_iommu_get_num_iommus(); i++) {
469 ret = init_one_iommu(i);
475 kfree(amd_iommu_events_group.attrs);
479 /* Init cpumask attributes to only core 0 */
480 cpumask_set_cpu(0, &iommu_cpumask);
484 device_initcall(amd_iommu_pc_init);