1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Support Intel uncore PerfMon discovery mechanism.
4 * Copyright(c) 2021 Intel Corporation.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include "uncore_discovery.h"
11 static struct rb_root discovery_tables = RB_ROOT;
12 static int num_discovered_types[UNCORE_ACCESS_MAX];
14 static bool has_generic_discovery_table(void)
19 dev = pci_get_device(PCI_VENDOR_ID_INTEL, UNCORE_DISCOVERY_TABLE_DEVICE, NULL);
23 /* A discovery table device has the unique capability ID. */
24 dvsec = pci_find_next_ext_capability(dev, 0, UNCORE_EXT_CAP_ID_DISCOVERY);
32 static int logical_die_id;
34 static int get_device_die_id(struct pci_dev *dev)
36 int cpu, node = pcibus_to_node(dev->bus);
39 * If the NUMA info is not available, assume that the logical die id is
40 * continuous in the order in which the discovery table devices are
44 return logical_die_id++;
46 for_each_cpu(cpu, cpumask_of_node(node)) {
47 struct cpuinfo_x86 *c = &cpu_data(cpu);
49 if (c->initialized && cpu_to_node(cpu) == node)
50 return c->logical_die_id;
54 * All CPUs of a node may be offlined. For this case,
55 * the PCI and MMIO type of uncore blocks which are
56 * enumerated by the device will be unavailable.
61 #define __node_2_type(cur) \
62 rb_entry((cur), struct intel_uncore_discovery_type, node)
64 static inline int __type_cmp(const void *key, const struct rb_node *b)
66 struct intel_uncore_discovery_type *type_b = __node_2_type(b);
67 const u16 *type_id = key;
69 if (type_b->type > *type_id)
71 else if (type_b->type < *type_id)
77 static inline struct intel_uncore_discovery_type *
78 search_uncore_discovery_type(u16 type_id)
80 struct rb_node *node = rb_find(&type_id, &discovery_tables, __type_cmp);
82 return (node) ? __node_2_type(node) : NULL;
85 static inline bool __type_less(struct rb_node *a, const struct rb_node *b)
87 return (__node_2_type(a)->type < __node_2_type(b)->type);
90 static struct intel_uncore_discovery_type *
91 add_uncore_discovery_type(struct uncore_unit_discovery *unit)
93 struct intel_uncore_discovery_type *type;
95 if (unit->access_type >= UNCORE_ACCESS_MAX) {
96 pr_warn("Unsupported access type %d\n", unit->access_type);
100 type = kzalloc(sizeof(struct intel_uncore_discovery_type), GFP_KERNEL);
104 type->box_ctrl_die = kcalloc(__uncore_max_dies, sizeof(u64), GFP_KERNEL);
105 if (!type->box_ctrl_die)
108 type->access_type = unit->access_type;
109 num_discovered_types[type->access_type]++;
110 type->type = unit->box_type;
112 rb_add(&type->node, &discovery_tables, __type_less);
123 static struct intel_uncore_discovery_type *
124 get_uncore_discovery_type(struct uncore_unit_discovery *unit)
126 struct intel_uncore_discovery_type *type;
128 type = search_uncore_discovery_type(unit->box_type);
132 return add_uncore_discovery_type(unit);
136 uncore_insert_box_info(struct uncore_unit_discovery *unit,
137 int die, bool parsed)
139 struct intel_uncore_discovery_type *type;
140 unsigned int *box_offset, *ids;
143 if (WARN_ON_ONCE(!unit->ctl || !unit->ctl_offset || !unit->ctr_offset))
147 type = search_uncore_discovery_type(unit->box_type);
148 if (WARN_ON_ONCE(!type))
150 /* Store the first box of each die */
151 if (!type->box_ctrl_die[die])
152 type->box_ctrl_die[die] = unit->ctl;
156 type = get_uncore_discovery_type(unit);
160 box_offset = kcalloc(type->num_boxes + 1, sizeof(unsigned int), GFP_KERNEL);
164 ids = kcalloc(type->num_boxes + 1, sizeof(unsigned int), GFP_KERNEL);
166 goto free_box_offset;
168 /* Store generic information for the first box */
169 if (!type->num_boxes) {
170 type->box_ctrl = unit->ctl;
171 type->box_ctrl_die[die] = unit->ctl;
172 type->num_counters = unit->num_regs;
173 type->counter_width = unit->bit_width;
174 type->ctl_offset = unit->ctl_offset;
175 type->ctr_offset = unit->ctr_offset;
180 for (i = 0; i < type->num_boxes; i++) {
181 ids[i] = type->ids[i];
182 box_offset[i] = type->box_offset[i];
184 if (WARN_ON_ONCE(unit->box_id == ids[i]))
187 ids[i] = unit->box_id;
188 box_offset[i] = unit->ctl - type->box_ctrl;
190 kfree(type->box_offset);
193 type->box_offset = box_offset;
205 static int parse_discovery_table(struct pci_dev *dev, int die,
206 u32 bar_offset, bool *parsed)
208 struct uncore_global_discovery global;
209 struct uncore_unit_discovery unit;
210 void __iomem *io_addr;
211 resource_size_t addr;
216 pci_read_config_dword(dev, bar_offset, &val);
218 if (val & UNCORE_DISCOVERY_MASK)
221 addr = (resource_size_t)(val & ~UNCORE_DISCOVERY_MASK);
222 size = UNCORE_DISCOVERY_GLOBAL_MAP_SIZE;
223 io_addr = ioremap(addr, size);
227 /* Read Global Discovery State */
228 memcpy_fromio(&global, io_addr, sizeof(struct uncore_global_discovery));
229 if (uncore_discovery_invalid_unit(global)) {
230 pr_info("Invalid Global Discovery State: 0x%llx 0x%llx 0x%llx\n",
231 global.table1, global.ctl, global.table3);
237 size = (1 + global.max_units) * global.stride * 8;
238 io_addr = ioremap(addr, size);
242 /* Parsing Unit Discovery State */
243 for (i = 0; i < global.max_units; i++) {
244 memcpy_fromio(&unit, io_addr + (i + 1) * (global.stride * 8),
245 sizeof(struct uncore_unit_discovery));
247 if (uncore_discovery_invalid_unit(unit))
250 if (unit.access_type >= UNCORE_ACCESS_MAX)
253 uncore_insert_box_info(&unit, die, *parsed);
261 bool intel_uncore_has_discovery_tables(void)
263 u32 device, val, entry_id, bar_offset;
264 int die, dvsec = 0, ret = true;
265 struct pci_dev *dev = NULL;
268 if (has_generic_discovery_table())
269 device = UNCORE_DISCOVERY_TABLE_DEVICE;
274 * Start a new search and iterates through the list of
275 * the discovery table devices.
277 while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, dev)) != NULL) {
278 while ((dvsec = pci_find_next_ext_capability(dev, dvsec, UNCORE_EXT_CAP_ID_DISCOVERY))) {
279 pci_read_config_dword(dev, dvsec + UNCORE_DISCOVERY_DVSEC_OFFSET, &val);
280 entry_id = val & UNCORE_DISCOVERY_DVSEC_ID_MASK;
281 if (entry_id != UNCORE_DISCOVERY_DVSEC_ID_PMON)
284 pci_read_config_dword(dev, dvsec + UNCORE_DISCOVERY_DVSEC2_OFFSET, &val);
286 if (val & ~UNCORE_DISCOVERY_DVSEC2_BIR_MASK) {
290 bar_offset = UNCORE_DISCOVERY_BIR_BASE +
291 (val & UNCORE_DISCOVERY_DVSEC2_BIR_MASK) * UNCORE_DISCOVERY_BIR_STEP;
293 die = get_device_die_id(dev);
297 parse_discovery_table(dev, die, bar_offset, &parsed);
301 /* None of the discovery tables are available */
310 void intel_uncore_clear_discovery_tables(void)
312 struct intel_uncore_discovery_type *type, *next;
314 rbtree_postorder_for_each_entry_safe(type, next, &discovery_tables, node) {
315 kfree(type->box_ctrl_die);