1 // SPDX-License-Identifier: GPL-2.0-only
3 * HiSilicon SoC DDRC uncore Hardware event counters support
5 * Copyright (C) 2017 Hisilicon Limited
6 * Author: Shaokun Zhang <zhangshaokun@hisilicon.com>
7 * Anurup M <anurup.m@huawei.com>
9 * This code is based on the uncore PMUs like arm-cci and arm-ccn.
11 #include <linux/acpi.h>
12 #include <linux/bug.h>
13 #include <linux/cpuhotplug.h>
14 #include <linux/interrupt.h>
15 #include <linux/irq.h>
16 #include <linux/list.h>
17 #include <linux/platform_device.h>
18 #include <linux/smp.h>
20 #include "hisi_uncore_pmu.h"
22 /* DDRC register definition */
23 #define DDRC_PERF_CTRL 0x010
24 #define DDRC_FLUX_WR 0x380
25 #define DDRC_FLUX_RD 0x384
26 #define DDRC_FLUX_WCMD 0x388
27 #define DDRC_FLUX_RCMD 0x38c
28 #define DDRC_PRE_CMD 0x3c0
29 #define DDRC_ACT_CMD 0x3c4
30 #define DDRC_RNK_CHG 0x3cc
31 #define DDRC_RW_CHG 0x3d0
32 #define DDRC_EVENT_CTRL 0x6C0
33 #define DDRC_INT_MASK 0x6c8
34 #define DDRC_INT_STATUS 0x6cc
35 #define DDRC_INT_CLEAR 0x6d0
37 /* DDRC has 8-counters */
38 #define DDRC_NR_COUNTERS 0x8
39 #define DDRC_PERF_CTRL_EN 0x2
42 * For DDRC PMU, there are eight-events and every event has been mapped
43 * to fixed-purpose counters which register offset is not consistent.
44 * Therefore there is no write event type and we assume that event
45 * code (0 to 7) is equal to counter index in PMU driver.
47 #define GET_DDRC_EVENTID(hwc) (hwc->config_base & 0x7)
49 static const u32 ddrc_reg_off[] = {
50 DDRC_FLUX_WR, DDRC_FLUX_RD, DDRC_FLUX_WCMD, DDRC_FLUX_RCMD,
51 DDRC_PRE_CMD, DDRC_ACT_CMD, DDRC_RNK_CHG, DDRC_RW_CHG
55 * Select the counter register offset using the counter index.
56 * In DDRC there are no programmable counter, the count
57 * is readed form the statistics counter register itself.
59 static u32 hisi_ddrc_pmu_get_counter_offset(int cntr_idx)
61 return ddrc_reg_off[cntr_idx];
64 static u64 hisi_ddrc_pmu_read_counter(struct hisi_pmu *ddrc_pmu,
65 struct hw_perf_event *hwc)
67 /* Use event code as counter index */
68 u32 idx = GET_DDRC_EVENTID(hwc);
70 if (!hisi_uncore_pmu_counter_valid(ddrc_pmu, idx)) {
71 dev_err(ddrc_pmu->dev, "Unsupported event index:%d!\n", idx);
75 return readl(ddrc_pmu->base + hisi_ddrc_pmu_get_counter_offset(idx));
78 static void hisi_ddrc_pmu_write_counter(struct hisi_pmu *ddrc_pmu,
79 struct hw_perf_event *hwc, u64 val)
81 u32 idx = GET_DDRC_EVENTID(hwc);
83 if (!hisi_uncore_pmu_counter_valid(ddrc_pmu, idx)) {
84 dev_err(ddrc_pmu->dev, "Unsupported event index:%d!\n", idx);
89 ddrc_pmu->base + hisi_ddrc_pmu_get_counter_offset(idx));
93 * For DDRC PMU, event has been mapped to fixed-purpose counter by hardware,
94 * so there is no need to write event type.
96 static void hisi_ddrc_pmu_write_evtype(struct hisi_pmu *hha_pmu, int idx,
101 static void hisi_ddrc_pmu_start_counters(struct hisi_pmu *ddrc_pmu)
105 /* Set perf_enable in DDRC_PERF_CTRL to start event counting */
106 val = readl(ddrc_pmu->base + DDRC_PERF_CTRL);
107 val |= DDRC_PERF_CTRL_EN;
108 writel(val, ddrc_pmu->base + DDRC_PERF_CTRL);
111 static void hisi_ddrc_pmu_stop_counters(struct hisi_pmu *ddrc_pmu)
115 /* Clear perf_enable in DDRC_PERF_CTRL to stop event counting */
116 val = readl(ddrc_pmu->base + DDRC_PERF_CTRL);
117 val &= ~DDRC_PERF_CTRL_EN;
118 writel(val, ddrc_pmu->base + DDRC_PERF_CTRL);
121 static void hisi_ddrc_pmu_enable_counter(struct hisi_pmu *ddrc_pmu,
122 struct hw_perf_event *hwc)
126 /* Set counter index(event code) in DDRC_EVENT_CTRL register */
127 val = readl(ddrc_pmu->base + DDRC_EVENT_CTRL);
128 val |= (1 << GET_DDRC_EVENTID(hwc));
129 writel(val, ddrc_pmu->base + DDRC_EVENT_CTRL);
132 static void hisi_ddrc_pmu_disable_counter(struct hisi_pmu *ddrc_pmu,
133 struct hw_perf_event *hwc)
137 /* Clear counter index(event code) in DDRC_EVENT_CTRL register */
138 val = readl(ddrc_pmu->base + DDRC_EVENT_CTRL);
139 val &= ~(1 << GET_DDRC_EVENTID(hwc));
140 writel(val, ddrc_pmu->base + DDRC_EVENT_CTRL);
143 static int hisi_ddrc_pmu_get_event_idx(struct perf_event *event)
145 struct hisi_pmu *ddrc_pmu = to_hisi_pmu(event->pmu);
146 unsigned long *used_mask = ddrc_pmu->pmu_events.used_mask;
147 struct hw_perf_event *hwc = &event->hw;
148 /* For DDRC PMU, we use event code as counter index */
149 int idx = GET_DDRC_EVENTID(hwc);
151 if (test_bit(idx, used_mask))
154 set_bit(idx, used_mask);
159 static void hisi_ddrc_pmu_enable_counter_int(struct hisi_pmu *ddrc_pmu,
160 struct hw_perf_event *hwc)
164 /* Write 0 to enable interrupt */
165 val = readl(ddrc_pmu->base + DDRC_INT_MASK);
166 val &= ~(1 << GET_DDRC_EVENTID(hwc));
167 writel(val, ddrc_pmu->base + DDRC_INT_MASK);
170 static void hisi_ddrc_pmu_disable_counter_int(struct hisi_pmu *ddrc_pmu,
171 struct hw_perf_event *hwc)
175 /* Write 1 to mask interrupt */
176 val = readl(ddrc_pmu->base + DDRC_INT_MASK);
177 val |= (1 << GET_DDRC_EVENTID(hwc));
178 writel(val, ddrc_pmu->base + DDRC_INT_MASK);
181 static irqreturn_t hisi_ddrc_pmu_isr(int irq, void *dev_id)
183 struct hisi_pmu *ddrc_pmu = dev_id;
184 struct perf_event *event;
185 unsigned long overflown;
188 /* Read the DDRC_INT_STATUS register */
189 overflown = readl(ddrc_pmu->base + DDRC_INT_STATUS);
194 * Find the counter index which overflowed if the bit was set
197 for_each_set_bit(idx, &overflown, DDRC_NR_COUNTERS) {
198 /* Write 1 to clear the IRQ status flag */
199 writel((1 << idx), ddrc_pmu->base + DDRC_INT_CLEAR);
201 /* Get the corresponding event struct */
202 event = ddrc_pmu->pmu_events.hw_events[idx];
206 hisi_uncore_pmu_event_update(event);
207 hisi_uncore_pmu_set_event_period(event);
213 static int hisi_ddrc_pmu_init_irq(struct hisi_pmu *ddrc_pmu,
214 struct platform_device *pdev)
218 /* Read and init IRQ */
219 irq = platform_get_irq(pdev, 0);
221 dev_err(&pdev->dev, "DDRC PMU get irq fail; irq:%d\n", irq);
225 ret = devm_request_irq(&pdev->dev, irq, hisi_ddrc_pmu_isr,
226 IRQF_NOBALANCING | IRQF_NO_THREAD,
227 dev_name(&pdev->dev), ddrc_pmu);
230 "Fail to request IRQ:%d ret:%d\n", irq, ret);
239 static const struct acpi_device_id hisi_ddrc_pmu_acpi_match[] = {
243 MODULE_DEVICE_TABLE(acpi, hisi_ddrc_pmu_acpi_match);
245 static int hisi_ddrc_pmu_init_data(struct platform_device *pdev,
246 struct hisi_pmu *ddrc_pmu)
248 struct resource *res;
251 * Use the SCCL_ID and DDRC channel ID to identify the
252 * DDRC PMU, while SCCL_ID is in MPIDR[aff2].
254 if (device_property_read_u32(&pdev->dev, "hisilicon,ch-id",
255 &ddrc_pmu->index_id)) {
256 dev_err(&pdev->dev, "Can not read ddrc channel-id!\n");
260 if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
261 &ddrc_pmu->sccl_id)) {
262 dev_err(&pdev->dev, "Can not read ddrc sccl-id!\n");
265 /* DDRC PMUs only share the same SCCL */
266 ddrc_pmu->ccl_id = -1;
268 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
269 ddrc_pmu->base = devm_ioremap_resource(&pdev->dev, res);
270 if (IS_ERR(ddrc_pmu->base)) {
271 dev_err(&pdev->dev, "ioremap failed for ddrc_pmu resource\n");
272 return PTR_ERR(ddrc_pmu->base);
278 static struct attribute *hisi_ddrc_pmu_format_attr[] = {
279 HISI_PMU_FORMAT_ATTR(event, "config:0-4"),
283 static const struct attribute_group hisi_ddrc_pmu_format_group = {
285 .attrs = hisi_ddrc_pmu_format_attr,
288 static struct attribute *hisi_ddrc_pmu_events_attr[] = {
289 HISI_PMU_EVENT_ATTR(flux_wr, 0x00),
290 HISI_PMU_EVENT_ATTR(flux_rd, 0x01),
291 HISI_PMU_EVENT_ATTR(flux_wcmd, 0x02),
292 HISI_PMU_EVENT_ATTR(flux_rcmd, 0x03),
293 HISI_PMU_EVENT_ATTR(pre_cmd, 0x04),
294 HISI_PMU_EVENT_ATTR(act_cmd, 0x05),
295 HISI_PMU_EVENT_ATTR(rnk_chg, 0x06),
296 HISI_PMU_EVENT_ATTR(rw_chg, 0x07),
300 static const struct attribute_group hisi_ddrc_pmu_events_group = {
302 .attrs = hisi_ddrc_pmu_events_attr,
305 static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
307 static struct attribute *hisi_ddrc_pmu_cpumask_attrs[] = {
308 &dev_attr_cpumask.attr,
312 static const struct attribute_group hisi_ddrc_pmu_cpumask_attr_group = {
313 .attrs = hisi_ddrc_pmu_cpumask_attrs,
316 static const struct attribute_group *hisi_ddrc_pmu_attr_groups[] = {
317 &hisi_ddrc_pmu_format_group,
318 &hisi_ddrc_pmu_events_group,
319 &hisi_ddrc_pmu_cpumask_attr_group,
323 static const struct hisi_uncore_ops hisi_uncore_ddrc_ops = {
324 .write_evtype = hisi_ddrc_pmu_write_evtype,
325 .get_event_idx = hisi_ddrc_pmu_get_event_idx,
326 .start_counters = hisi_ddrc_pmu_start_counters,
327 .stop_counters = hisi_ddrc_pmu_stop_counters,
328 .enable_counter = hisi_ddrc_pmu_enable_counter,
329 .disable_counter = hisi_ddrc_pmu_disable_counter,
330 .enable_counter_int = hisi_ddrc_pmu_enable_counter_int,
331 .disable_counter_int = hisi_ddrc_pmu_disable_counter_int,
332 .write_counter = hisi_ddrc_pmu_write_counter,
333 .read_counter = hisi_ddrc_pmu_read_counter,
336 static int hisi_ddrc_pmu_dev_probe(struct platform_device *pdev,
337 struct hisi_pmu *ddrc_pmu)
341 ret = hisi_ddrc_pmu_init_data(pdev, ddrc_pmu);
345 ret = hisi_ddrc_pmu_init_irq(ddrc_pmu, pdev);
349 ddrc_pmu->num_counters = DDRC_NR_COUNTERS;
350 ddrc_pmu->counter_bits = 32;
351 ddrc_pmu->ops = &hisi_uncore_ddrc_ops;
352 ddrc_pmu->dev = &pdev->dev;
353 ddrc_pmu->on_cpu = -1;
354 ddrc_pmu->check_event = 7;
359 static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
361 struct hisi_pmu *ddrc_pmu;
365 ddrc_pmu = devm_kzalloc(&pdev->dev, sizeof(*ddrc_pmu), GFP_KERNEL);
369 platform_set_drvdata(pdev, ddrc_pmu);
371 ret = hisi_ddrc_pmu_dev_probe(pdev, ddrc_pmu);
375 ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
378 dev_err(&pdev->dev, "Error %d registering hotplug;\n", ret);
382 name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_ddrc%u",
383 ddrc_pmu->sccl_id, ddrc_pmu->index_id);
384 ddrc_pmu->pmu = (struct pmu) {
386 .task_ctx_nr = perf_invalid_context,
387 .event_init = hisi_uncore_pmu_event_init,
388 .pmu_enable = hisi_uncore_pmu_enable,
389 .pmu_disable = hisi_uncore_pmu_disable,
390 .add = hisi_uncore_pmu_add,
391 .del = hisi_uncore_pmu_del,
392 .start = hisi_uncore_pmu_start,
393 .stop = hisi_uncore_pmu_stop,
394 .read = hisi_uncore_pmu_read,
395 .attr_groups = hisi_ddrc_pmu_attr_groups,
396 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
399 ret = perf_pmu_register(&ddrc_pmu->pmu, name, -1);
401 dev_err(ddrc_pmu->dev, "DDRC PMU register failed!\n");
402 cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
409 static int hisi_ddrc_pmu_remove(struct platform_device *pdev)
411 struct hisi_pmu *ddrc_pmu = platform_get_drvdata(pdev);
413 perf_pmu_unregister(&ddrc_pmu->pmu);
414 cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
420 static struct platform_driver hisi_ddrc_pmu_driver = {
422 .name = "hisi_ddrc_pmu",
423 .acpi_match_table = ACPI_PTR(hisi_ddrc_pmu_acpi_match),
425 .probe = hisi_ddrc_pmu_probe,
426 .remove = hisi_ddrc_pmu_remove,
429 static int __init hisi_ddrc_pmu_module_init(void)
433 ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
434 "AP_PERF_ARM_HISI_DDRC_ONLINE",
435 hisi_uncore_pmu_online_cpu,
436 hisi_uncore_pmu_offline_cpu);
438 pr_err("DDRC PMU: setup hotplug, ret = %d\n", ret);
442 ret = platform_driver_register(&hisi_ddrc_pmu_driver);
444 cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE);
448 module_init(hisi_ddrc_pmu_module_init);
450 static void __exit hisi_ddrc_pmu_module_exit(void)
452 platform_driver_unregister(&hisi_ddrc_pmu_driver);
453 cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE);
456 module_exit(hisi_ddrc_pmu_module_exit);
458 MODULE_DESCRIPTION("HiSilicon SoC DDRC uncore PMU driver");
459 MODULE_LICENSE("GPL v2");
460 MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
461 MODULE_AUTHOR("Anurup M <anurup.m@huawei.com>");