Merge branch 'for-next/perf' into for-next/core
authorWill Deacon <will@kernel.org>
Mon, 25 Jul 2022 09:57:14 +0000 (10:57 +0100)
committerWill Deacon <will@kernel.org>
Mon, 25 Jul 2022 09:57:14 +0000 (10:57 +0100)
* for-next/perf:
  drivers/perf: arm_spe: Fix consistency of SYS_PMSCR_EL1.CX
  perf: RISC-V: Add of_node_put() when breaking out of for_each_of_cpu_node()
  docs: perf: Include hns3-pmu.rst in toctree to fix 'htmldocs' WARNING
  drivers/perf: hisi: add driver for HNS3 PMU
  drivers/perf: hisi: Add description for HNS3 PMU driver
  drivers/perf: riscv_pmu_sbi: perf format
  perf/arm-cci: Use the bitmap API to allocate bitmaps
  drivers/perf: riscv_pmu: Add riscv pmu pm notifier
  perf: hisi: Extract hisi_pmu_init
  perf/marvell_cn10k: Fix TAD PMU register offset
  perf/marvell_cn10k: Remove useless license text when SPDX-License-Identifier is already used
  arm64: cpufeature: Allow different PMU versions in ID_DFR0_EL1
  perf/arm-cci: fix typo in comment
  drivers/perf:Directly use ida_alloc()/free()
  drivers/perf: Directly use ida_alloc()/free()

23 files changed:
Documentation/admin-guide/perf/hns3-pmu.rst [new file with mode: 0644]
Documentation/admin-guide/perf/index.rst
MAINTAINERS
arch/arm64/kernel/cpufeature.c
drivers/perf/arm-cci.c
drivers/perf/arm-ccn.c
drivers/perf/arm_spe_pmu.c
drivers/perf/fsl_imx8_ddr_perf.c
drivers/perf/hisilicon/Kconfig
drivers/perf/hisilicon/Makefile
drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
drivers/perf/hisilicon/hisi_uncore_pmu.c
drivers/perf/hisilicon/hisi_uncore_pmu.h
drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
drivers/perf/hisilicon/hns3_pmu.c [new file with mode: 0644]
drivers/perf/marvell_cn10k_tad_pmu.c
drivers/perf/riscv_pmu.c
drivers/perf/riscv_pmu_sbi.c
include/linux/cpuhotplug.h
include/linux/perf/riscv_pmu.h

diff --git a/Documentation/admin-guide/perf/hns3-pmu.rst b/Documentation/admin-guide/perf/hns3-pmu.rst
new file mode 100644 (file)
index 0000000..578407e
--- /dev/null
@@ -0,0 +1,136 @@
+======================================
+HNS3 Performance Monitoring Unit (PMU)
+======================================
+
+HNS3(HiSilicon network system 3) Performance Monitoring Unit (PMU) is an
+End Point device to collect performance statistics of HiSilicon SoC NIC.
+On Hip09, each SICL(Super I/O cluster) has one PMU device.
+
+HNS3 PMU supports collection of performance statistics such as bandwidth,
+latency, packet rate and interrupt rate.
+
+Each HNS3 PMU supports 8 hardware events.
+
+HNS3 PMU driver
+===============
+
+The HNS3 PMU driver registers a perf PMU with the name of its sicl id.::
+
+  /sys/devices/hns3_pmu_sicl_<sicl_id>
+
+PMU driver provides description of available events, filter modes, format,
+identifier and cpumask in sysfs.
+
+The "events" directory describes the event code of all supported events
+shown in perf list.
+
+The "filtermode" directory describes the supported filter modes of each
+event.
+
+The "format" directory describes all formats of the config (events) and
+config1 (filter options) fields of the perf_event_attr structure.
+
+The "identifier" file shows version of PMU hardware device.
+
+The "bdf_min" and "bdf_max" files show the supported bdf range of each
+pmu device.
+
+The "hw_clk_freq" file shows the hardware clock frequency of each pmu
+device.
+
+Example usage of checking event code and subevent code::
+
+  $# cat /sys/devices/hns3_pmu_sicl_0/events/dly_tx_normal_to_mac_time
+  config=0x00204
+  $# cat /sys/devices/hns3_pmu_sicl_0/events/dly_tx_normal_to_mac_packet_num
+  config=0x10204
+
+Each performance statistic has a pair of events to get two values to
+calculate real performance data in userspace.
+
+The bits 0~15 of config (here 0x0204) are the true hardware event code. If
+two events have same value of bits 0~15 of config, that means they are
+event pair. And the bit 16 of config indicates getting counter 0 or
+counter 1 of hardware event.
+
+After getting two values of event pair in usersapce, the formula of
+computation to calculate real performance data is:::
+
+  counter 0 / counter 1
+
+Example usage of checking supported filter mode::
+
+  $# cat /sys/devices/hns3_pmu_sicl_0/filtermode/bw_ssu_rpu_byte_num
+  filter mode supported: global/port/port-tc/func/func-queue/
+
+Example usage of perf::
+
+  $# perf list
+  hns3_pmu_sicl_0/bw_ssu_rpu_byte_num/ [kernel PMU event]
+  hns3_pmu_sicl_0/bw_ssu_rpu_time/     [kernel PMU event]
+  ------------------------------------------
+
+  $# perf stat -g -e hns3_pmu_sicl_0/bw_ssu_rpu_byte_num,global=1/ -e hns3_pmu_sicl_0/bw_ssu_rpu_time,global=1/ -I 1000
+  or
+  $# perf stat -g -e hns3_pmu_sicl_0/config=0x00002,global=1/ -e hns3_pmu_sicl_0/config=0x10002,global=1/ -I 1000
+
+
+Filter modes
+--------------
+
+1. global mode
+PMU collect performance statistics for all HNS3 PCIe functions of IO DIE.
+Set the "global" filter option to 1 will enable this mode.
+Example usage of perf::
+
+  $# perf stat -a -e hns3_pmu_sicl_0/config=0x1020F,global=1/ -I 1000
+
+2. port mode
+PMU collect performance statistic of one whole physical port. The port id
+is same as mac id. The "tc" filter option must be set to 0xF in this mode,
+here tc stands for traffic class.
+
+Example usage of perf::
+
+  $# perf stat -a -e hns3_pmu_sicl_0/config=0x1020F,port=0,tc=0xF/ -I 1000
+
+3. port-tc mode
+PMU collect performance statistic of one tc of physical port. The port id
+is same as mac id. The "tc" filter option must be set to 0 ~ 7 in this
+mode.
+Example usage of perf::
+
+  $# perf stat -a -e hns3_pmu_sicl_0/config=0x1020F,port=0,tc=0/ -I 1000
+
+4. func mode
+PMU collect performance statistic of one PF/VF. The function id is BDF of
+PF/VF, its conversion formula::
+
+  func = (bus << 8) + (device << 3) + (function)
+
+for example:
+  BDF         func
+  35:00.0    0x3500
+  35:00.1    0x3501
+  35:01.0    0x3508
+
+In this mode, the "queue" filter option must be set to 0xFFFF.
+Example usage of perf::
+
+  $# perf stat -a -e hns3_pmu_sicl_0/config=0x1020F,bdf=0x3500,queue=0xFFFF/ -I 1000
+
+5. func-queue mode
+PMU collect performance statistic of one queue of PF/VF. The function id
+is BDF of PF/VF, the "queue" filter option must be set to the exact queue
+id of function.
+Example usage of perf::
+
+  $# perf stat -a -e hns3_pmu_sicl_0/config=0x1020F,bdf=0x3500,queue=0/ -I 1000
+
+6. func-intr mode
+PMU collect performance statistic of one interrupt of PF/VF. The function
+id is BDF of PF/VF, the "intr" filter option must be set to the exact
+interrupt id of function.
+Example usage of perf::
+
+  $# perf stat -a -e hns3_pmu_sicl_0/config=0x00301,bdf=0x3500,intr=0/ -I 1000
index 69b23f0..9c9ece8 100644 (file)
@@ -9,6 +9,7 @@ Performance monitor support
 
    hisi-pmu
    hisi-pcie-pmu
+   hns3-pmu
    imx-ddr
    qcom_l2_pmu
    qcom_l3_pmu
index 3cf9842..6fc770e 100644 (file)
@@ -8944,6 +8944,12 @@ F:       Documentation/admin-guide/perf/hisi-pcie-pmu.rst
 F:     Documentation/admin-guide/perf/hisi-pmu.rst
 F:     drivers/perf/hisilicon
 
+HISILICON HNS3 PMU DRIVER
+M:     Guangbin Huang <huangguangbin2@huawei.com>
+S:     Supported
+F:     Documentation/admin-guide/perf/hns3-pmu.rst
+F:     drivers/perf/hisilicon/hns3_pmu.c
+
 HISILICON QM AND ZIP Controller DRIVER
 M:     Zhou Wang <wangzhou1@hisilicon.com>
 L:     linux-crypto@vger.kernel.org
index 8c300fa..abef010 100644 (file)
@@ -562,7 +562,7 @@ static const struct arm64_ftr_bits ftr_id_pfr2[] = {
 
 static const struct arm64_ftr_bits ftr_id_dfr0[] = {
        /* [31:28] TraceFilt */
-       S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_PERFMON_SHIFT, 4, 0xf),
+       S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_DFR0_PERFMON_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_MPROFDBG_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_MMAPTRC_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_COPTRC_SHIFT, 4, 0),
index 96e09fa..03b1309 100644 (file)
@@ -1139,7 +1139,7 @@ static void cci_pmu_start(struct perf_event *event, int pmu_flags)
 
        /*
         * To handle interrupt latency, we always reprogram the period
-        * regardlesss of PERF_EF_RELOAD.
+        * regardless of PERF_EF_RELOAD.
         */
        if (pmu_flags & PERF_EF_RELOAD)
                WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
@@ -1261,7 +1261,7 @@ static int validate_group(struct perf_event *event)
                 */
                .used_mask = mask,
        };
-       memset(mask, 0, BITS_TO_LONGS(cci_pmu->num_cntrs) * sizeof(unsigned long));
+       bitmap_zero(mask, cci_pmu->num_cntrs);
 
        if (!validate_event(event->pmu, &fake_pmu, leader))
                return -EINVAL;
@@ -1629,10 +1629,9 @@ static struct cci_pmu *cci_pmu_alloc(struct device *dev)
                                             GFP_KERNEL);
        if (!cci_pmu->hw_events.events)
                return ERR_PTR(-ENOMEM);
-       cci_pmu->hw_events.used_mask = devm_kcalloc(dev,
-                                               BITS_TO_LONGS(CCI_PMU_MAX_HW_CNTRS(model)),
-                                               sizeof(*cci_pmu->hw_events.used_mask),
-                                               GFP_KERNEL);
+       cci_pmu->hw_events.used_mask = devm_bitmap_zalloc(dev,
+                                                         CCI_PMU_MAX_HW_CNTRS(model),
+                                                         GFP_KERNEL);
        if (!cci_pmu->hw_events.used_mask)
                return ERR_PTR(-ENOMEM);
 
index 40b352e..728d13d 100644 (file)
@@ -1250,7 +1250,7 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
        ccn->dt.cmp_mask[CCN_IDX_MASK_OPCODE].h = ~(0x1f << 9);
 
        /* Get a convenient /sys/event_source/devices/ name */
-       ccn->dt.id = ida_simple_get(&arm_ccn_pmu_ida, 0, 0, GFP_KERNEL);
+       ccn->dt.id = ida_alloc(&arm_ccn_pmu_ida, GFP_KERNEL);
        if (ccn->dt.id == 0) {
                name = "ccn";
        } else {
@@ -1312,7 +1312,7 @@ error_pmu_register:
                                            &ccn->dt.node);
 error_set_affinity:
 error_choose_name:
-       ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
+       ida_free(&arm_ccn_pmu_ida, ccn->dt.id);
        for (i = 0; i < ccn->num_xps; i++)
                writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
        writel(0, ccn->dt.base + CCN_DT_PMCR);
@@ -1329,7 +1329,7 @@ static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn)
                writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
        writel(0, ccn->dt.base + CCN_DT_PMCR);
        perf_pmu_unregister(&ccn->dt.pmu);
-       ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
+       ida_free(&arm_ccn_pmu_ida, ccn->dt.id);
 }
 
 static int arm_ccn_for_each_valid_region(struct arm_ccn *ccn,
index db670b2..b65a7d9 100644 (file)
 #include <asm/mmu.h>
 #include <asm/sysreg.h>
 
+/*
+ * Cache if the event is allowed to trace Context information.
+ * This allows us to perform the check, i.e, perfmon_capable(),
+ * in the context of the event owner, once, during the event_init().
+ */
+#define SPE_PMU_HW_FLAGS_CX                    BIT(0)
+
+static void set_spe_event_has_cx(struct perf_event *event)
+{
+       if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && perfmon_capable())
+               event->hw.flags |= SPE_PMU_HW_FLAGS_CX;
+}
+
+static bool get_spe_event_has_cx(struct perf_event *event)
+{
+       return !!(event->hw.flags & SPE_PMU_HW_FLAGS_CX);
+}
+
 #define ARM_SPE_BUF_PAD_BYTE                   0
 
 struct arm_spe_pmu_buf {
@@ -272,7 +290,7 @@ static u64 arm_spe_event_to_pmscr(struct perf_event *event)
        if (!attr->exclude_kernel)
                reg |= BIT(SYS_PMSCR_EL1_E1SPE_SHIFT);
 
-       if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && perfmon_capable())
+       if (get_spe_event_has_cx(event))
                reg |= BIT(SYS_PMSCR_EL1_CX_SHIFT);
 
        return reg;
@@ -709,10 +727,10 @@ static int arm_spe_pmu_event_init(struct perf_event *event)
            !(spe_pmu->features & SPE_PMU_FEAT_FILT_LAT))
                return -EOPNOTSUPP;
 
+       set_spe_event_has_cx(event);
        reg = arm_spe_event_to_pmscr(event);
        if (!perfmon_capable() &&
            (reg & (BIT(SYS_PMSCR_EL1_PA_SHIFT) |
-                   BIT(SYS_PMSCR_EL1_CX_SHIFT) |
                    BIT(SYS_PMSCR_EL1_PCT_SHIFT))))
                return -EACCES;
 
index b1b2a55..8e058e0 100644 (file)
@@ -611,7 +611,7 @@ static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base,
                .dev = dev,
        };
 
-       pmu->id = ida_simple_get(&ddr_ida, 0, 0, GFP_KERNEL);
+       pmu->id = ida_alloc(&ddr_ida, GFP_KERNEL);
        return pmu->id;
 }
 
@@ -765,7 +765,7 @@ ddr_perf_err:
 cpuhp_instance_err:
        cpuhp_remove_multi_state(pmu->cpuhp_state);
 cpuhp_state_err:
-       ida_simple_remove(&ddr_ida, pmu->id);
+       ida_free(&ddr_ida, pmu->id);
        dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret);
        return ret;
 }
@@ -779,7 +779,7 @@ static int ddr_perf_remove(struct platform_device *pdev)
 
        perf_pmu_unregister(&pmu->pmu);
 
-       ida_simple_remove(&ddr_ida, pmu->id);
+       ida_free(&ddr_ida, pmu->id);
        return 0;
 }
 
index 5546218..171bfc1 100644 (file)
@@ -14,3 +14,13 @@ config HISI_PCIE_PMU
          RCiEP devices.
          Adds the PCIe PMU into perf events system for monitoring latency,
          bandwidth etc.
+
+config HNS3_PMU
+       tristate "HNS3 PERF PMU"
+       depends on ARM64 || COMPILE_TEST
+       depends on PCI
+       help
+         Provide support for HNS3 performance monitoring unit (PMU) RCiEP
+         devices.
+         Adds the HNS3 PMU into perf events system for monitoring latency,
+         bandwidth etc.
index 6be8351..4d2c9ab 100644 (file)
@@ -4,3 +4,4 @@ obj-$(CONFIG_HISI_PMU) += hisi_uncore_pmu.o hisi_uncore_l3c_pmu.o \
                          hisi_uncore_pa_pmu.o hisi_uncore_cpa_pmu.o
 
 obj-$(CONFIG_HISI_PCIE_PMU) += hisi_pcie_pmu.o
+obj-$(CONFIG_HNS3_PMU) += hns3_pmu.o
index 62299ab..50d0c0a 100644 (file)
@@ -516,21 +516,7 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
                                      "hisi_sccl%u_ddrc%u", ddrc_pmu->sccl_id,
                                      ddrc_pmu->index_id);
 
-       ddrc_pmu->pmu = (struct pmu) {
-               .name           = name,
-               .module         = THIS_MODULE,
-               .task_ctx_nr    = perf_invalid_context,
-               .event_init     = hisi_uncore_pmu_event_init,
-               .pmu_enable     = hisi_uncore_pmu_enable,
-               .pmu_disable    = hisi_uncore_pmu_disable,
-               .add            = hisi_uncore_pmu_add,
-               .del            = hisi_uncore_pmu_del,
-               .start          = hisi_uncore_pmu_start,
-               .stop           = hisi_uncore_pmu_stop,
-               .read           = hisi_uncore_pmu_read,
-               .attr_groups    = ddrc_pmu->pmu_events.attr_groups,
-               .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
-       };
+       hisi_pmu_init(&ddrc_pmu->pmu, name, ddrc_pmu->pmu_events.attr_groups, THIS_MODULE);
 
        ret = perf_pmu_register(&ddrc_pmu->pmu, name, -1);
        if (ret) {
index 3935131..13017b3 100644 (file)
@@ -519,21 +519,7 @@ static int hisi_hha_pmu_probe(struct platform_device *pdev)
 
        name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_hha%u",
                              hha_pmu->sccl_id, hha_pmu->index_id);
-       hha_pmu->pmu = (struct pmu) {
-               .name           = name,
-               .module         = THIS_MODULE,
-               .task_ctx_nr    = perf_invalid_context,
-               .event_init     = hisi_uncore_pmu_event_init,
-               .pmu_enable     = hisi_uncore_pmu_enable,
-               .pmu_disable    = hisi_uncore_pmu_disable,
-               .add            = hisi_uncore_pmu_add,
-               .del            = hisi_uncore_pmu_del,
-               .start          = hisi_uncore_pmu_start,
-               .stop           = hisi_uncore_pmu_stop,
-               .read           = hisi_uncore_pmu_read,
-               .attr_groups    = hha_pmu->pmu_events.attr_groups,
-               .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
-       };
+       hisi_pmu_init(&hha_pmu->pmu, name, hha_pmu->pmu_events.attr_groups, THIS_MODULE);
 
        ret = perf_pmu_register(&hha_pmu->pmu, name, -1);
        if (ret) {
index 560ab96..2995f36 100644 (file)
@@ -557,21 +557,7 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev)
         */
        name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_l3c%u",
                              l3c_pmu->sccl_id, l3c_pmu->ccl_id);
-       l3c_pmu->pmu = (struct pmu) {
-               .name           = name,
-               .module         = THIS_MODULE,
-               .task_ctx_nr    = perf_invalid_context,
-               .event_init     = hisi_uncore_pmu_event_init,
-               .pmu_enable     = hisi_uncore_pmu_enable,
-               .pmu_disable    = hisi_uncore_pmu_disable,
-               .add            = hisi_uncore_pmu_add,
-               .del            = hisi_uncore_pmu_del,
-               .start          = hisi_uncore_pmu_start,
-               .stop           = hisi_uncore_pmu_stop,
-               .read           = hisi_uncore_pmu_read,
-               .attr_groups    = l3c_pmu->pmu_events.attr_groups,
-               .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
-       };
+       hisi_pmu_init(&l3c_pmu->pmu, name, l3c_pmu->pmu_events.attr_groups, THIS_MODULE);
 
        ret = perf_pmu_register(&l3c_pmu->pmu, name, -1);
        if (ret) {
index a0ee84d..47d3cc9 100644 (file)
@@ -412,21 +412,7 @@ static int hisi_pa_pmu_probe(struct platform_device *pdev)
                return ret;
        }
 
-       pa_pmu->pmu = (struct pmu) {
-               .module         = THIS_MODULE,
-               .task_ctx_nr    = perf_invalid_context,
-               .event_init     = hisi_uncore_pmu_event_init,
-               .pmu_enable     = hisi_uncore_pmu_enable,
-               .pmu_disable    = hisi_uncore_pmu_disable,
-               .add            = hisi_uncore_pmu_add,
-               .del            = hisi_uncore_pmu_del,
-               .start          = hisi_uncore_pmu_start,
-               .stop           = hisi_uncore_pmu_stop,
-               .read           = hisi_uncore_pmu_read,
-               .attr_groups    = pa_pmu->pmu_events.attr_groups,
-               .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
-       };
-
+       hisi_pmu_init(&pa_pmu->pmu, name, pa_pmu->pmu_events.attr_groups, THIS_MODULE);
        ret = perf_pmu_register(&pa_pmu->pmu, name, -1);
        if (ret) {
                dev_err(pa_pmu->dev, "PMU register failed, ret = %d\n", ret);
index 980b9ee..fbc8a93 100644 (file)
@@ -531,4 +531,22 @@ int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
 }
 EXPORT_SYMBOL_GPL(hisi_uncore_pmu_offline_cpu);
 
+void hisi_pmu_init(struct pmu *pmu, const char *name,
+               const struct attribute_group **attr_groups, struct module *module)
+{
+       pmu->name               = name;
+       pmu->module             = module;
+       pmu->task_ctx_nr        = perf_invalid_context;
+       pmu->event_init         = hisi_uncore_pmu_event_init;
+       pmu->pmu_enable         = hisi_uncore_pmu_enable;
+       pmu->pmu_disable        = hisi_uncore_pmu_disable;
+       pmu->add                = hisi_uncore_pmu_add;
+       pmu->del                = hisi_uncore_pmu_del;
+       pmu->start              = hisi_uncore_pmu_start;
+       pmu->stop               = hisi_uncore_pmu_stop;
+       pmu->read               = hisi_uncore_pmu_read;
+       pmu->attr_groups        = attr_groups;
+}
+EXPORT_SYMBOL_GPL(hisi_pmu_init);
+
 MODULE_LICENSE("GPL v2");
index 96eedda..b59de33 100644 (file)
@@ -121,4 +121,6 @@ ssize_t hisi_uncore_pmu_identifier_attr_show(struct device *dev,
 int hisi_uncore_pmu_init_irq(struct hisi_pmu *hisi_pmu,
                             struct platform_device *pdev);
 
+void hisi_pmu_init(struct pmu *pmu, const char *name,
+               const struct attribute_group **attr_groups, struct module *module);
 #endif /* __HISI_UNCORE_PMU_H__ */
index 6aedc30..b9c79f1 100644 (file)
@@ -445,20 +445,7 @@ static int hisi_sllc_pmu_probe(struct platform_device *pdev)
                return ret;
        }
 
-       sllc_pmu->pmu = (struct pmu) {
-               .module         = THIS_MODULE,
-               .task_ctx_nr    = perf_invalid_context,
-               .event_init     = hisi_uncore_pmu_event_init,
-               .pmu_enable     = hisi_uncore_pmu_enable,
-               .pmu_disable    = hisi_uncore_pmu_disable,
-               .add            = hisi_uncore_pmu_add,
-               .del            = hisi_uncore_pmu_del,
-               .start          = hisi_uncore_pmu_start,
-               .stop           = hisi_uncore_pmu_stop,
-               .read           = hisi_uncore_pmu_read,
-               .attr_groups    = sllc_pmu->pmu_events.attr_groups,
-               .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
-       };
+       hisi_pmu_init(&sllc_pmu->pmu, name, sllc_pmu->pmu_events.attr_groups, THIS_MODULE);
 
        ret = perf_pmu_register(&sllc_pmu->pmu, name, -1);
        if (ret) {
diff --git a/drivers/perf/hisilicon/hns3_pmu.c b/drivers/perf/hisilicon/hns3_pmu.c
new file mode 100644 (file)
index 0000000..e0457d8
--- /dev/null
@@ -0,0 +1,1671 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * This driver adds support for HNS3 PMU iEP device. Related perf events are
+ * bandwidth, latency, packet rate, interrupt rate etc.
+ *
+ * Copyright (C) 2022 HiSilicon Limited
+ */
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/bug.h>
+#include <linux/cpuhotplug.h>
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pci-epf.h>
+#include <linux/perf_event.h>
+#include <linux/smp.h>
+
+/* registers offset address */
+#define HNS3_PMU_REG_GLOBAL_CTRL               0x0000
+#define HNS3_PMU_REG_CLOCK_FREQ                        0x0020
+#define HNS3_PMU_REG_BDF                       0x0fe0
+#define HNS3_PMU_REG_VERSION                   0x0fe4
+#define HNS3_PMU_REG_DEVICE_ID                 0x0fe8
+
+#define HNS3_PMU_REG_EVENT_OFFSET              0x1000
+#define HNS3_PMU_REG_EVENT_SIZE                        0x1000
+#define HNS3_PMU_REG_EVENT_CTRL_LOW            0x00
+#define HNS3_PMU_REG_EVENT_CTRL_HIGH           0x04
+#define HNS3_PMU_REG_EVENT_INTR_STATUS         0x08
+#define HNS3_PMU_REG_EVENT_INTR_MASK           0x0c
+#define HNS3_PMU_REG_EVENT_COUNTER             0x10
+#define HNS3_PMU_REG_EVENT_EXT_COUNTER         0x18
+#define HNS3_PMU_REG_EVENT_QID_CTRL            0x28
+#define HNS3_PMU_REG_EVENT_QID_PARA            0x2c
+
+#define HNS3_PMU_FILTER_SUPPORT_GLOBAL         BIT(0)
+#define HNS3_PMU_FILTER_SUPPORT_PORT           BIT(1)
+#define HNS3_PMU_FILTER_SUPPORT_PORT_TC                BIT(2)
+#define HNS3_PMU_FILTER_SUPPORT_FUNC           BIT(3)
+#define HNS3_PMU_FILTER_SUPPORT_FUNC_QUEUE     BIT(4)
+#define HNS3_PMU_FILTER_SUPPORT_FUNC_INTR      BIT(5)
+
+#define HNS3_PMU_FILTER_ALL_TC                 0xf
+#define HNS3_PMU_FILTER_ALL_QUEUE              0xffff
+
+#define HNS3_PMU_CTRL_SUBEVENT_S               4
+#define HNS3_PMU_CTRL_FILTER_MODE_S            24
+
+#define HNS3_PMU_GLOBAL_START                  BIT(0)
+
+#define HNS3_PMU_EVENT_STATUS_RESET            BIT(11)
+#define HNS3_PMU_EVENT_EN                      BIT(12)
+#define HNS3_PMU_EVENT_OVERFLOW_RESTART                BIT(15)
+
+#define HNS3_PMU_QID_PARA_FUNC_S               0
+#define HNS3_PMU_QID_PARA_QUEUE_S              16
+
+#define HNS3_PMU_QID_CTRL_REQ_ENABLE           BIT(0)
+#define HNS3_PMU_QID_CTRL_DONE                 BIT(1)
+#define HNS3_PMU_QID_CTRL_MISS                 BIT(2)
+
+#define HNS3_PMU_INTR_MASK_OVERFLOW            BIT(1)
+
+#define HNS3_PMU_MAX_HW_EVENTS                 8
+
+/*
+ * Each hardware event contains two registers (counter and ext_counter) for
+ * bandwidth, packet rate, latency and interrupt rate. These two registers will
+ * be triggered to run at the same when a hardware event is enabled. The meaning
+ * of counter and ext_counter of different event type are different, their
+ * meaning show as follow:
+ *
+ * +----------------+------------------+---------------+
+ * |   event type   |     counter      |  ext_counter  |
+ * +----------------+------------------+---------------+
+ * | bandwidth      | byte number      | cycle number  |
+ * +----------------+------------------+---------------+
+ * | packet rate    | packet number    | cycle number  |
+ * +----------------+------------------+---------------+
+ * | latency        | cycle number     | packet number |
+ * +----------------+------------------+---------------+
+ * | interrupt rate | interrupt number | cycle number  |
+ * +----------------+------------------+---------------+
+ *
+ * The cycle number indicates increment of counter of hardware timer, the
+ * frequency of hardware timer can be read from hw_clk_freq file.
+ *
+ * Performance of each hardware event is calculated by: counter / ext_counter.
+ *
+ * Since processing of data is preferred to be done in userspace, we expose
+ * ext_counter as a separate event for userspace and use bit 16 to indicate it.
+ * For example, event 0x00001 and 0x10001 are actually one event for hardware
+ * because bit 0-15 are same. If the bit 16 of one event is 0 means to read
+ * counter register, otherwise means to read ext_counter register.
+ */
+/* bandwidth events */
+#define HNS3_PMU_EVT_BW_SSU_EGU_BYTE_NUM               0x00001
+#define HNS3_PMU_EVT_BW_SSU_EGU_TIME                   0x10001
+#define HNS3_PMU_EVT_BW_SSU_RPU_BYTE_NUM               0x00002
+#define HNS3_PMU_EVT_BW_SSU_RPU_TIME                   0x10002
+#define HNS3_PMU_EVT_BW_SSU_ROCE_BYTE_NUM              0x00003
+#define HNS3_PMU_EVT_BW_SSU_ROCE_TIME                  0x10003
+#define HNS3_PMU_EVT_BW_ROCE_SSU_BYTE_NUM              0x00004
+#define HNS3_PMU_EVT_BW_ROCE_SSU_TIME                  0x10004
+#define HNS3_PMU_EVT_BW_TPU_SSU_BYTE_NUM               0x00005
+#define HNS3_PMU_EVT_BW_TPU_SSU_TIME                   0x10005
+#define HNS3_PMU_EVT_BW_RPU_RCBRX_BYTE_NUM             0x00006
+#define HNS3_PMU_EVT_BW_RPU_RCBRX_TIME                 0x10006
+#define HNS3_PMU_EVT_BW_RCBTX_TXSCH_BYTE_NUM           0x00008
+#define HNS3_PMU_EVT_BW_RCBTX_TXSCH_TIME               0x10008
+#define HNS3_PMU_EVT_BW_WR_FBD_BYTE_NUM                        0x00009
+#define HNS3_PMU_EVT_BW_WR_FBD_TIME                    0x10009
+#define HNS3_PMU_EVT_BW_WR_EBD_BYTE_NUM                        0x0000a
+#define HNS3_PMU_EVT_BW_WR_EBD_TIME                    0x1000a
+#define HNS3_PMU_EVT_BW_RD_FBD_BYTE_NUM                        0x0000b
+#define HNS3_PMU_EVT_BW_RD_FBD_TIME                    0x1000b
+#define HNS3_PMU_EVT_BW_RD_EBD_BYTE_NUM                        0x0000c
+#define HNS3_PMU_EVT_BW_RD_EBD_TIME                    0x1000c
+#define HNS3_PMU_EVT_BW_RD_PAY_M0_BYTE_NUM             0x0000d
+#define HNS3_PMU_EVT_BW_RD_PAY_M0_TIME                 0x1000d
+#define HNS3_PMU_EVT_BW_RD_PAY_M1_BYTE_NUM             0x0000e
+#define HNS3_PMU_EVT_BW_RD_PAY_M1_TIME                 0x1000e
+#define HNS3_PMU_EVT_BW_WR_PAY_M0_BYTE_NUM             0x0000f
+#define HNS3_PMU_EVT_BW_WR_PAY_M0_TIME                 0x1000f
+#define HNS3_PMU_EVT_BW_WR_PAY_M1_BYTE_NUM             0x00010
+#define HNS3_PMU_EVT_BW_WR_PAY_M1_TIME                 0x10010
+
+/* packet rate events */
+#define HNS3_PMU_EVT_PPS_IGU_SSU_PACKET_NUM            0x00100
+#define HNS3_PMU_EVT_PPS_IGU_SSU_TIME                  0x10100
+#define HNS3_PMU_EVT_PPS_SSU_EGU_PACKET_NUM            0x00101
+#define HNS3_PMU_EVT_PPS_SSU_EGU_TIME                  0x10101
+#define HNS3_PMU_EVT_PPS_SSU_RPU_PACKET_NUM            0x00102
+#define HNS3_PMU_EVT_PPS_SSU_RPU_TIME                  0x10102
+#define HNS3_PMU_EVT_PPS_SSU_ROCE_PACKET_NUM           0x00103
+#define HNS3_PMU_EVT_PPS_SSU_ROCE_TIME                 0x10103
+#define HNS3_PMU_EVT_PPS_ROCE_SSU_PACKET_NUM           0x00104
+#define HNS3_PMU_EVT_PPS_ROCE_SSU_TIME                 0x10104
+#define HNS3_PMU_EVT_PPS_TPU_SSU_PACKET_NUM            0x00105
+#define HNS3_PMU_EVT_PPS_TPU_SSU_TIME                  0x10105
+#define HNS3_PMU_EVT_PPS_RPU_RCBRX_PACKET_NUM          0x00106
+#define HNS3_PMU_EVT_PPS_RPU_RCBRX_TIME                        0x10106
+#define HNS3_PMU_EVT_PPS_RCBTX_TPU_PACKET_NUM          0x00107
+#define HNS3_PMU_EVT_PPS_RCBTX_TPU_TIME                        0x10107
+#define HNS3_PMU_EVT_PPS_RCBTX_TXSCH_PACKET_NUM                0x00108
+#define HNS3_PMU_EVT_PPS_RCBTX_TXSCH_TIME              0x10108
+#define HNS3_PMU_EVT_PPS_WR_FBD_PACKET_NUM             0x00109
+#define HNS3_PMU_EVT_PPS_WR_FBD_TIME                   0x10109
+#define HNS3_PMU_EVT_PPS_WR_EBD_PACKET_NUM             0x0010a
+#define HNS3_PMU_EVT_PPS_WR_EBD_TIME                   0x1010a
+#define HNS3_PMU_EVT_PPS_RD_FBD_PACKET_NUM             0x0010b
+#define HNS3_PMU_EVT_PPS_RD_FBD_TIME                   0x1010b
+#define HNS3_PMU_EVT_PPS_RD_EBD_PACKET_NUM             0x0010c
+#define HNS3_PMU_EVT_PPS_RD_EBD_TIME                   0x1010c
+#define HNS3_PMU_EVT_PPS_RD_PAY_M0_PACKET_NUM          0x0010d
+#define HNS3_PMU_EVT_PPS_RD_PAY_M0_TIME                        0x1010d
+#define HNS3_PMU_EVT_PPS_RD_PAY_M1_PACKET_NUM          0x0010e
+#define HNS3_PMU_EVT_PPS_RD_PAY_M1_TIME                        0x1010e
+#define HNS3_PMU_EVT_PPS_WR_PAY_M0_PACKET_NUM          0x0010f
+#define HNS3_PMU_EVT_PPS_WR_PAY_M0_TIME                        0x1010f
+#define HNS3_PMU_EVT_PPS_WR_PAY_M1_PACKET_NUM          0x00110
+#define HNS3_PMU_EVT_PPS_WR_PAY_M1_TIME                        0x10110
+#define HNS3_PMU_EVT_PPS_NICROH_TX_PRE_PACKET_NUM      0x00111
+#define HNS3_PMU_EVT_PPS_NICROH_TX_PRE_TIME            0x10111
+#define HNS3_PMU_EVT_PPS_NICROH_RX_PRE_PACKET_NUM      0x00112
+#define HNS3_PMU_EVT_PPS_NICROH_RX_PRE_TIME            0x10112
+
+/* latency events */
+#define HNS3_PMU_EVT_DLY_TX_PUSH_TIME                  0x00202
+#define HNS3_PMU_EVT_DLY_TX_PUSH_PACKET_NUM            0x10202
+#define HNS3_PMU_EVT_DLY_TX_TIME                       0x00204
+#define HNS3_PMU_EVT_DLY_TX_PACKET_NUM                 0x10204
+#define HNS3_PMU_EVT_DLY_SSU_TX_NIC_TIME               0x00206
+#define HNS3_PMU_EVT_DLY_SSU_TX_NIC_PACKET_NUM         0x10206
+#define HNS3_PMU_EVT_DLY_SSU_TX_ROCE_TIME              0x00207
+#define HNS3_PMU_EVT_DLY_SSU_TX_ROCE_PACKET_NUM                0x10207
+#define HNS3_PMU_EVT_DLY_SSU_RX_NIC_TIME               0x00208
+#define HNS3_PMU_EVT_DLY_SSU_RX_NIC_PACKET_NUM         0x10208
+#define HNS3_PMU_EVT_DLY_SSU_RX_ROCE_TIME              0x00209
+#define HNS3_PMU_EVT_DLY_SSU_RX_ROCE_PACKET_NUM                0x10209
+#define HNS3_PMU_EVT_DLY_RPU_TIME                      0x0020e
+#define HNS3_PMU_EVT_DLY_RPU_PACKET_NUM                        0x1020e
+#define HNS3_PMU_EVT_DLY_TPU_TIME                      0x0020f
+#define HNS3_PMU_EVT_DLY_TPU_PACKET_NUM                        0x1020f
+#define HNS3_PMU_EVT_DLY_RPE_TIME                      0x00210
+#define HNS3_PMU_EVT_DLY_RPE_PACKET_NUM                        0x10210
+#define HNS3_PMU_EVT_DLY_TPE_TIME                      0x00211
+#define HNS3_PMU_EVT_DLY_TPE_PACKET_NUM                        0x10211
+#define HNS3_PMU_EVT_DLY_TPE_PUSH_TIME                 0x00212
+#define HNS3_PMU_EVT_DLY_TPE_PUSH_PACKET_NUM           0x10212
+#define HNS3_PMU_EVT_DLY_WR_FBD_TIME                   0x00213
+#define HNS3_PMU_EVT_DLY_WR_FBD_PACKET_NUM             0x10213
+#define HNS3_PMU_EVT_DLY_WR_EBD_TIME                   0x00214
+#define HNS3_PMU_EVT_DLY_WR_EBD_PACKET_NUM             0x10214
+#define HNS3_PMU_EVT_DLY_RD_FBD_TIME                   0x00215
+#define HNS3_PMU_EVT_DLY_RD_FBD_PACKET_NUM             0x10215
+#define HNS3_PMU_EVT_DLY_RD_EBD_TIME                   0x00216
+#define HNS3_PMU_EVT_DLY_RD_EBD_PACKET_NUM             0x10216
+#define HNS3_PMU_EVT_DLY_RD_PAY_M0_TIME                        0x00217
+#define HNS3_PMU_EVT_DLY_RD_PAY_M0_PACKET_NUM          0x10217
+#define HNS3_PMU_EVT_DLY_RD_PAY_M1_TIME                        0x00218
+#define HNS3_PMU_EVT_DLY_RD_PAY_M1_PACKET_NUM          0x10218
+#define HNS3_PMU_EVT_DLY_WR_PAY_M0_TIME                        0x00219
+#define HNS3_PMU_EVT_DLY_WR_PAY_M0_PACKET_NUM          0x10219
+#define HNS3_PMU_EVT_DLY_WR_PAY_M1_TIME                        0x0021a
+#define HNS3_PMU_EVT_DLY_WR_PAY_M1_PACKET_NUM          0x1021a
+#define HNS3_PMU_EVT_DLY_MSIX_WRITE_TIME               0x0021c
+#define HNS3_PMU_EVT_DLY_MSIX_WRITE_PACKET_NUM         0x1021c
+
+/* interrupt rate events */
+#define HNS3_PMU_EVT_PPS_MSIX_NIC_INTR_NUM             0x00300
+#define HNS3_PMU_EVT_PPS_MSIX_NIC_TIME                 0x10300
+
+/* filter mode supported by each bandwidth event */
+#define HNS3_PMU_FILTER_BW_SSU_EGU             0x07
+#define HNS3_PMU_FILTER_BW_SSU_RPU             0x1f
+#define HNS3_PMU_FILTER_BW_SSU_ROCE            0x0f
+#define HNS3_PMU_FILTER_BW_ROCE_SSU            0x0f
+#define HNS3_PMU_FILTER_BW_TPU_SSU             0x1f
+#define HNS3_PMU_FILTER_BW_RPU_RCBRX           0x11
+#define HNS3_PMU_FILTER_BW_RCBTX_TXSCH         0x11
+#define HNS3_PMU_FILTER_BW_WR_FBD              0x1b
+#define HNS3_PMU_FILTER_BW_WR_EBD              0x11
+#define HNS3_PMU_FILTER_BW_RD_FBD              0x01
+#define HNS3_PMU_FILTER_BW_RD_EBD              0x1b
+#define HNS3_PMU_FILTER_BW_RD_PAY_M0           0x01
+#define HNS3_PMU_FILTER_BW_RD_PAY_M1           0x01
+#define HNS3_PMU_FILTER_BW_WR_PAY_M0           0x01
+#define HNS3_PMU_FILTER_BW_WR_PAY_M1           0x01
+
+/* filter mode supported by each packet rate event */
+#define HNS3_PMU_FILTER_PPS_IGU_SSU            0x07
+#define HNS3_PMU_FILTER_PPS_SSU_EGU            0x07
+#define HNS3_PMU_FILTER_PPS_SSU_RPU            0x1f
+#define HNS3_PMU_FILTER_PPS_SSU_ROCE           0x0f
+#define HNS3_PMU_FILTER_PPS_ROCE_SSU           0x0f
+#define HNS3_PMU_FILTER_PPS_TPU_SSU            0x1f
+#define HNS3_PMU_FILTER_PPS_RPU_RCBRX          0x11
+#define HNS3_PMU_FILTER_PPS_RCBTX_TPU          0x1f
+#define HNS3_PMU_FILTER_PPS_RCBTX_TXSCH                0x11
+#define HNS3_PMU_FILTER_PPS_WR_FBD             0x1b
+#define HNS3_PMU_FILTER_PPS_WR_EBD             0x11
+#define HNS3_PMU_FILTER_PPS_RD_FBD             0x01
+#define HNS3_PMU_FILTER_PPS_RD_EBD             0x1b
+#define HNS3_PMU_FILTER_PPS_RD_PAY_M0          0x01
+#define HNS3_PMU_FILTER_PPS_RD_PAY_M1          0x01
+#define HNS3_PMU_FILTER_PPS_WR_PAY_M0          0x01
+#define HNS3_PMU_FILTER_PPS_WR_PAY_M1          0x01
+#define HNS3_PMU_FILTER_PPS_NICROH_TX_PRE      0x01
+#define HNS3_PMU_FILTER_PPS_NICROH_RX_PRE      0x01
+
+/* filter mode supported by each latency event */
+#define HNS3_PMU_FILTER_DLY_TX_PUSH            0x01
+#define HNS3_PMU_FILTER_DLY_TX                 0x01
+#define HNS3_PMU_FILTER_DLY_SSU_TX_NIC         0x07
+#define HNS3_PMU_FILTER_DLY_SSU_TX_ROCE                0x07
+#define HNS3_PMU_FILTER_DLY_SSU_RX_NIC         0x07
+#define HNS3_PMU_FILTER_DLY_SSU_RX_ROCE                0x07
+#define HNS3_PMU_FILTER_DLY_RPU                        0x11
+#define HNS3_PMU_FILTER_DLY_TPU                        0x1f
+#define HNS3_PMU_FILTER_DLY_RPE                        0x01
+#define HNS3_PMU_FILTER_DLY_TPE                        0x0b
+#define HNS3_PMU_FILTER_DLY_TPE_PUSH           0x1b
+#define HNS3_PMU_FILTER_DLY_WR_FBD             0x1b
+#define HNS3_PMU_FILTER_DLY_WR_EBD             0x11
+#define HNS3_PMU_FILTER_DLY_RD_FBD             0x01
+#define HNS3_PMU_FILTER_DLY_RD_EBD             0x1b
+#define HNS3_PMU_FILTER_DLY_RD_PAY_M0          0x01
+#define HNS3_PMU_FILTER_DLY_RD_PAY_M1          0x01
+#define HNS3_PMU_FILTER_DLY_WR_PAY_M0          0x01
+#define HNS3_PMU_FILTER_DLY_WR_PAY_M1          0x01
+#define HNS3_PMU_FILTER_DLY_MSIX_WRITE         0x01
+
+/* filter mode supported by each interrupt rate event */
+#define HNS3_PMU_FILTER_INTR_MSIX_NIC          0x01
+
+enum hns3_pmu_hw_filter_mode {
+       HNS3_PMU_HW_FILTER_GLOBAL,
+       HNS3_PMU_HW_FILTER_PORT,
+       HNS3_PMU_HW_FILTER_PORT_TC,
+       HNS3_PMU_HW_FILTER_FUNC,
+       HNS3_PMU_HW_FILTER_FUNC_QUEUE,
+       HNS3_PMU_HW_FILTER_FUNC_INTR,
+};
+
+struct hns3_pmu_event_attr {
+       u32 event;
+       u16 filter_support;
+};
+
+struct hns3_pmu {
+       struct perf_event *hw_events[HNS3_PMU_MAX_HW_EVENTS];
+       struct hlist_node node;
+       struct pci_dev *pdev;
+       struct pmu pmu;
+       void __iomem *base;
+       int irq;
+       int on_cpu;
+       u32 identifier;
+       u32 hw_clk_freq; /* hardware clock frequency of PMU */
+       /* maximum and minimum bdf allowed by PMU */
+       u16 bdf_min;
+       u16 bdf_max;
+};
+
+#define to_hns3_pmu(p)  (container_of((p), struct hns3_pmu, pmu))
+
+#define GET_PCI_DEVFN(bdf)  ((bdf) & 0xff)
+
+#define FILTER_CONDITION_PORT(port) ((1 << (port)) & 0xff)
+#define FILTER_CONDITION_PORT_TC(port, tc) (((port) << 3) | ((tc) & 0x07))
+#define FILTER_CONDITION_FUNC_INTR(func, intr) (((intr) << 8) | (func))
+
+#define HNS3_PMU_FILTER_ATTR(_name, _config, _start, _end)               \
+       static inline u64 hns3_pmu_get_##_name(struct perf_event *event) \
+       {                                                                \
+               return FIELD_GET(GENMASK_ULL(_end, _start),              \
+                                event->attr._config);                   \
+       }
+
+HNS3_PMU_FILTER_ATTR(subevent, config, 0, 7);
+HNS3_PMU_FILTER_ATTR(event_type, config, 8, 15);
+HNS3_PMU_FILTER_ATTR(ext_counter_used, config, 16, 16);
+HNS3_PMU_FILTER_ATTR(port, config1, 0, 3);
+HNS3_PMU_FILTER_ATTR(tc, config1, 4, 7);
+HNS3_PMU_FILTER_ATTR(bdf, config1, 8, 23);
+HNS3_PMU_FILTER_ATTR(queue, config1, 24, 39);
+HNS3_PMU_FILTER_ATTR(intr, config1, 40, 51);
+HNS3_PMU_FILTER_ATTR(global, config1, 52, 52);
+
+#define HNS3_BW_EVT_BYTE_NUM(_name)    (&(struct hns3_pmu_event_attr) {\
+       HNS3_PMU_EVT_BW_##_name##_BYTE_NUM,                             \
+       HNS3_PMU_FILTER_BW_##_name})
+#define HNS3_BW_EVT_TIME(_name)                (&(struct hns3_pmu_event_attr) {\
+       HNS3_PMU_EVT_BW_##_name##_TIME,                                 \
+       HNS3_PMU_FILTER_BW_##_name})
+#define HNS3_PPS_EVT_PACKET_NUM(_name) (&(struct hns3_pmu_event_attr) {\
+       HNS3_PMU_EVT_PPS_##_name##_PACKET_NUM,                          \
+       HNS3_PMU_FILTER_PPS_##_name})
+#define HNS3_PPS_EVT_TIME(_name)       (&(struct hns3_pmu_event_attr) {\
+       HNS3_PMU_EVT_PPS_##_name##_TIME,                                \
+       HNS3_PMU_FILTER_PPS_##_name})
+#define HNS3_DLY_EVT_TIME(_name)       (&(struct hns3_pmu_event_attr) {\
+       HNS3_PMU_EVT_DLY_##_name##_TIME,                                \
+       HNS3_PMU_FILTER_DLY_##_name})
+#define HNS3_DLY_EVT_PACKET_NUM(_name) (&(struct hns3_pmu_event_attr) {\
+       HNS3_PMU_EVT_DLY_##_name##_PACKET_NUM,                          \
+       HNS3_PMU_FILTER_DLY_##_name})
+#define HNS3_INTR_EVT_INTR_NUM(_name)  (&(struct hns3_pmu_event_attr) {\
+       HNS3_PMU_EVT_PPS_##_name##_INTR_NUM,                            \
+       HNS3_PMU_FILTER_INTR_##_name})
+#define HNS3_INTR_EVT_TIME(_name)      (&(struct hns3_pmu_event_attr) {\
+       HNS3_PMU_EVT_PPS_##_name##_TIME,                                \
+       HNS3_PMU_FILTER_INTR_##_name})
+
+static ssize_t hns3_pmu_format_show(struct device *dev,
+                                   struct device_attribute *attr, char *buf)
+{
+       struct dev_ext_attribute *eattr;
+
+       eattr = container_of(attr, struct dev_ext_attribute, attr);
+
+       return sysfs_emit(buf, "%s\n", (char *)eattr->var);
+}
+
+static ssize_t hns3_pmu_event_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct hns3_pmu_event_attr *event;
+       struct dev_ext_attribute *eattr;
+
+       eattr = container_of(attr, struct dev_ext_attribute, attr);
+       event = eattr->var;
+
+       return sysfs_emit(buf, "config=0x%x\n", event->event);
+}
+
+static ssize_t hns3_pmu_filter_mode_show(struct device *dev,
+                                        struct device_attribute *attr,
+                                        char *buf)
+{
+       struct hns3_pmu_event_attr *event;
+       struct dev_ext_attribute *eattr;
+       int len;
+
+       eattr = container_of(attr, struct dev_ext_attribute, attr);
+       event = eattr->var;
+
+       len = sysfs_emit_at(buf, 0, "filter mode supported: ");
+       if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_GLOBAL)
+               len += sysfs_emit_at(buf, len, "global ");
+       if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT)
+               len += sysfs_emit_at(buf, len, "port ");
+       if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT_TC)
+               len += sysfs_emit_at(buf, len, "port-tc ");
+       if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC)
+               len += sysfs_emit_at(buf, len, "func ");
+       if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_QUEUE)
+               len += sysfs_emit_at(buf, len, "func-queue ");
+       if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_INTR)
+               len += sysfs_emit_at(buf, len, "func-intr ");
+
+       len += sysfs_emit_at(buf, len, "\n");
+
+       return len;
+}
+
+#define HNS3_PMU_ATTR(_name, _func, _config)                           \
+       (&((struct dev_ext_attribute[]) {                               \
+               { __ATTR(_name, 0444, _func, NULL), (void *)_config }   \
+       })[0].attr.attr)
+
+#define HNS3_PMU_FORMAT_ATTR(_name, _format) \
+       HNS3_PMU_ATTR(_name, hns3_pmu_format_show, (void *)_format)
+#define HNS3_PMU_EVENT_ATTR(_name, _event) \
+       HNS3_PMU_ATTR(_name, hns3_pmu_event_show, (void *)_event)
+#define HNS3_PMU_FLT_MODE_ATTR(_name, _event) \
+       HNS3_PMU_ATTR(_name, hns3_pmu_filter_mode_show, (void *)_event)
+
+#define HNS3_PMU_BW_EVT_PAIR(_name, _macro) \
+       HNS3_PMU_EVENT_ATTR(_name##_byte_num, HNS3_BW_EVT_BYTE_NUM(_macro)), \
+       HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_BW_EVT_TIME(_macro))
+#define HNS3_PMU_PPS_EVT_PAIR(_name, _macro) \
+       HNS3_PMU_EVENT_ATTR(_name##_packet_num, HNS3_PPS_EVT_PACKET_NUM(_macro)), \
+       HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_PPS_EVT_TIME(_macro))
+#define HNS3_PMU_DLY_EVT_PAIR(_name, _macro) \
+       HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_DLY_EVT_TIME(_macro)), \
+       HNS3_PMU_EVENT_ATTR(_name##_packet_num, HNS3_DLY_EVT_PACKET_NUM(_macro))
+#define HNS3_PMU_INTR_EVT_PAIR(_name, _macro) \
+       HNS3_PMU_EVENT_ATTR(_name##_intr_num, HNS3_INTR_EVT_INTR_NUM(_macro)), \
+       HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_INTR_EVT_TIME(_macro))
+
+#define HNS3_PMU_BW_FLT_MODE_PAIR(_name, _macro) \
+       HNS3_PMU_FLT_MODE_ATTR(_name##_byte_num, HNS3_BW_EVT_BYTE_NUM(_macro)), \
+       HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_BW_EVT_TIME(_macro))
+#define HNS3_PMU_PPS_FLT_MODE_PAIR(_name, _macro) \
+       HNS3_PMU_FLT_MODE_ATTR(_name##_packet_num, HNS3_PPS_EVT_PACKET_NUM(_macro)), \
+       HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_PPS_EVT_TIME(_macro))
+#define HNS3_PMU_DLY_FLT_MODE_PAIR(_name, _macro) \
+       HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_DLY_EVT_TIME(_macro)), \
+       HNS3_PMU_FLT_MODE_ATTR(_name##_packet_num, HNS3_DLY_EVT_PACKET_NUM(_macro))
+#define HNS3_PMU_INTR_FLT_MODE_PAIR(_name, _macro) \
+       HNS3_PMU_FLT_MODE_ATTR(_name##_intr_num, HNS3_INTR_EVT_INTR_NUM(_macro)), \
+       HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_INTR_EVT_TIME(_macro))
+
+static u8 hns3_pmu_hw_filter_modes[] = {
+       HNS3_PMU_HW_FILTER_GLOBAL,
+       HNS3_PMU_HW_FILTER_PORT,
+       HNS3_PMU_HW_FILTER_PORT_TC,
+       HNS3_PMU_HW_FILTER_FUNC,
+       HNS3_PMU_HW_FILTER_FUNC_QUEUE,
+       HNS3_PMU_HW_FILTER_FUNC_INTR,
+};
+
+#define HNS3_PMU_SET_HW_FILTER(_hwc, _mode) \
+       ((_hwc)->addr_filters = (void *)&hns3_pmu_hw_filter_modes[(_mode)])
+
+static ssize_t identifier_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev));
+
+       return sysfs_emit(buf, "0x%x\n", hns3_pmu->identifier);
+}
+static DEVICE_ATTR_RO(identifier);
+
+static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr,
+                           char *buf)
+{
+       struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev));
+
+       return sysfs_emit(buf, "%d\n", hns3_pmu->on_cpu);
+}
+static DEVICE_ATTR_RO(cpumask);
+
+static ssize_t bdf_min_show(struct device *dev, struct device_attribute *attr,
+                           char *buf)
+{
+       struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev));
+       u16 bdf = hns3_pmu->bdf_min;
+
+       return sysfs_emit(buf, "%02x:%02x.%x\n", PCI_BUS_NUM(bdf),
+                         PCI_SLOT(bdf), PCI_FUNC(bdf));
+}
+static DEVICE_ATTR_RO(bdf_min);
+
+static ssize_t bdf_max_show(struct device *dev, struct device_attribute *attr,
+                           char *buf)
+{
+       struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev));
+       u16 bdf = hns3_pmu->bdf_max;
+
+       return sysfs_emit(buf, "%02x:%02x.%x\n", PCI_BUS_NUM(bdf),
+                         PCI_SLOT(bdf), PCI_FUNC(bdf));
+}
+static DEVICE_ATTR_RO(bdf_max);
+
+static ssize_t hw_clk_freq_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev));
+
+       return sysfs_emit(buf, "%u\n", hns3_pmu->hw_clk_freq);
+}
+static DEVICE_ATTR_RO(hw_clk_freq);
+
+static struct attribute *hns3_pmu_events_attr[] = {
+       /* bandwidth events */
+       HNS3_PMU_BW_EVT_PAIR(bw_ssu_egu, SSU_EGU),
+       HNS3_PMU_BW_EVT_PAIR(bw_ssu_rpu, SSU_RPU),
+       HNS3_PMU_BW_EVT_PAIR(bw_ssu_roce, SSU_ROCE),
+       HNS3_PMU_BW_EVT_PAIR(bw_roce_ssu, ROCE_SSU),
+       HNS3_PMU_BW_EVT_PAIR(bw_tpu_ssu, TPU_SSU),
+       HNS3_PMU_BW_EVT_PAIR(bw_rpu_rcbrx, RPU_RCBRX),
+       HNS3_PMU_BW_EVT_PAIR(bw_rcbtx_txsch, RCBTX_TXSCH),
+       HNS3_PMU_BW_EVT_PAIR(bw_wr_fbd, WR_FBD),
+       HNS3_PMU_BW_EVT_PAIR(bw_wr_ebd, WR_EBD),
+       HNS3_PMU_BW_EVT_PAIR(bw_rd_fbd, RD_FBD),
+       HNS3_PMU_BW_EVT_PAIR(bw_rd_ebd, RD_EBD),
+       HNS3_PMU_BW_EVT_PAIR(bw_rd_pay_m0, RD_PAY_M0),
+       HNS3_PMU_BW_EVT_PAIR(bw_rd_pay_m1, RD_PAY_M1),
+       HNS3_PMU_BW_EVT_PAIR(bw_wr_pay_m0, WR_PAY_M0),
+       HNS3_PMU_BW_EVT_PAIR(bw_wr_pay_m1, WR_PAY_M1),
+
+       /* packet rate events */
+       HNS3_PMU_PPS_EVT_PAIR(pps_igu_ssu, IGU_SSU),
+       HNS3_PMU_PPS_EVT_PAIR(pps_ssu_egu, SSU_EGU),
+       HNS3_PMU_PPS_EVT_PAIR(pps_ssu_rpu, SSU_RPU),
+       HNS3_PMU_PPS_EVT_PAIR(pps_ssu_roce, SSU_ROCE),
+       HNS3_PMU_PPS_EVT_PAIR(pps_roce_ssu, ROCE_SSU),
+       HNS3_PMU_PPS_EVT_PAIR(pps_tpu_ssu, TPU_SSU),
+       HNS3_PMU_PPS_EVT_PAIR(pps_rpu_rcbrx, RPU_RCBRX),
+       HNS3_PMU_PPS_EVT_PAIR(pps_rcbtx_tpu, RCBTX_TPU),
+       HNS3_PMU_PPS_EVT_PAIR(pps_rcbtx_txsch, RCBTX_TXSCH),
+       HNS3_PMU_PPS_EVT_PAIR(pps_wr_fbd, WR_FBD),
+       HNS3_PMU_PPS_EVT_PAIR(pps_wr_ebd, WR_EBD),
+       HNS3_PMU_PPS_EVT_PAIR(pps_rd_fbd, RD_FBD),
+       HNS3_PMU_PPS_EVT_PAIR(pps_rd_ebd, RD_EBD),
+       HNS3_PMU_PPS_EVT_PAIR(pps_rd_pay_m0, RD_PAY_M0),
+       HNS3_PMU_PPS_EVT_PAIR(pps_rd_pay_m1, RD_PAY_M1),
+       HNS3_PMU_PPS_EVT_PAIR(pps_wr_pay_m0, WR_PAY_M0),
+       HNS3_PMU_PPS_EVT_PAIR(pps_wr_pay_m1, WR_PAY_M1),
+       HNS3_PMU_PPS_EVT_PAIR(pps_intr_nicroh_tx_pre, NICROH_TX_PRE),
+       HNS3_PMU_PPS_EVT_PAIR(pps_intr_nicroh_rx_pre, NICROH_RX_PRE),
+
+       /* latency events */
+       HNS3_PMU_DLY_EVT_PAIR(dly_tx_push_to_mac, TX_PUSH),
+       HNS3_PMU_DLY_EVT_PAIR(dly_tx_normal_to_mac, TX),
+       HNS3_PMU_DLY_EVT_PAIR(dly_ssu_tx_th_nic, SSU_TX_NIC),
+       HNS3_PMU_DLY_EVT_PAIR(dly_ssu_tx_th_roce, SSU_TX_ROCE),
+       HNS3_PMU_DLY_EVT_PAIR(dly_ssu_rx_th_nic, SSU_RX_NIC),
+       HNS3_PMU_DLY_EVT_PAIR(dly_ssu_rx_th_roce, SSU_RX_ROCE),
+       HNS3_PMU_DLY_EVT_PAIR(dly_rpu, RPU),
+       HNS3_PMU_DLY_EVT_PAIR(dly_tpu, TPU),
+       HNS3_PMU_DLY_EVT_PAIR(dly_rpe, RPE),
+       HNS3_PMU_DLY_EVT_PAIR(dly_tpe_normal, TPE),
+       HNS3_PMU_DLY_EVT_PAIR(dly_tpe_push, TPE_PUSH),
+       HNS3_PMU_DLY_EVT_PAIR(dly_wr_fbd, WR_FBD),
+       HNS3_PMU_DLY_EVT_PAIR(dly_wr_ebd, WR_EBD),
+       HNS3_PMU_DLY_EVT_PAIR(dly_rd_fbd, RD_FBD),
+       HNS3_PMU_DLY_EVT_PAIR(dly_rd_ebd, RD_EBD),
+       HNS3_PMU_DLY_EVT_PAIR(dly_rd_pay_m0, RD_PAY_M0),
+       HNS3_PMU_DLY_EVT_PAIR(dly_rd_pay_m1, RD_PAY_M1),
+       HNS3_PMU_DLY_EVT_PAIR(dly_wr_pay_m0, WR_PAY_M0),
+       HNS3_PMU_DLY_EVT_PAIR(dly_wr_pay_m1, WR_PAY_M1),
+       HNS3_PMU_DLY_EVT_PAIR(dly_msix_write, MSIX_WRITE),
+
+       /* interrupt rate events */
+       HNS3_PMU_INTR_EVT_PAIR(pps_intr_msix_nic, MSIX_NIC),
+
+       NULL
+};
+
+static struct attribute *hns3_pmu_filter_mode_attr[] = {
+       /* bandwidth events */
+       HNS3_PMU_BW_FLT_MODE_PAIR(bw_ssu_egu, SSU_EGU),
+       HNS3_PMU_BW_FLT_MODE_PAIR(bw_ssu_rpu, SSU_RPU),
+       HNS3_PMU_BW_FLT_MODE_PAIR(bw_ssu_roce, SSU_ROCE),
+       HNS3_PMU_BW_FLT_MODE_PAIR(bw_roce_ssu, ROCE_SSU),
+       HNS3_PMU_BW_FLT_MODE_PAIR(bw_tpu_ssu, TPU_SSU),
+       HNS3_PMU_BW_FLT_MODE_PAIR(bw_rpu_rcbrx, RPU_RCBRX),
+       HNS3_PMU_BW_FLT_MODE_PAIR(bw_rcbtx_txsch, RCBTX_TXSCH),
+       HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_fbd, WR_FBD),
+       HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_ebd, WR_EBD),
+       HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_fbd, RD_FBD),
+       HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_ebd, RD_EBD),
+       HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_pay_m0, RD_PAY_M0),
+       HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_pay_m1, RD_PAY_M1),
+       HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_pay_m0, WR_PAY_M0),
+       HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_pay_m1, WR_PAY_M1),
+
+       /* packet rate events */
+       HNS3_PMU_PPS_FLT_MODE_PAIR(pps_igu_ssu, IGU_SSU),
+       HNS3_PMU_PPS_FLT_MODE_PAIR(pps_ssu_egu, SSU_EGU),
+       HNS3_PMU_PPS_FLT_MODE_PAIR(pps_ssu_rpu, SSU_RPU),
+       HNS3_PMU_PPS_FLT_MODE_PAIR(pps_ssu_roce, SSU_ROCE),
+       HNS3_PMU_PPS_FLT_MODE_PAIR(pps_roce_ssu, ROCE_SSU),
+       HNS3_PMU_PPS_FLT_MODE_PAIR(pps_tpu_ssu, TPU_SSU),
+       HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rpu_rcbrx, RPU_RCBRX),
+       HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rcbtx_tpu, RCBTX_TPU),
+       HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rcbtx_txsch, RCBTX_TXSCH),
+       HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_fbd, WR_FBD),
+       HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_ebd, WR_EBD),
+       HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_fbd, RD_FBD),
+       HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_ebd, RD_EBD),
+       HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_pay_m0, RD_PAY_M0),
+       HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_pay_m1, RD_PAY_M1),
+       HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_pay_m0, WR_PAY_M0),
+       HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_pay_m1, WR_PAY_M1),
+       HNS3_PMU_PPS_FLT_MODE_PAIR(pps_intr_nicroh_tx_pre, NICROH_TX_PRE),
+       HNS3_PMU_PPS_FLT_MODE_PAIR(pps_intr_nicroh_rx_pre, NICROH_RX_PRE),
+
+       /* latency events */
+       HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tx_push_to_mac, TX_PUSH),
+       HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tx_normal_to_mac, TX),
+       HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_tx_th_nic, SSU_TX_NIC),
+       HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_tx_th_roce, SSU_TX_ROCE),
+       HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_rx_th_nic, SSU_RX_NIC),
+       HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_rx_th_roce, SSU_RX_ROCE),
+       HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rpu, RPU),
+       HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tpu, TPU),
+       HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rpe, RPE),
+       HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tpe_normal, TPE),
+       HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tpe_push, TPE_PUSH),
+       HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_fbd, WR_FBD),
+       HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_ebd, WR_EBD),
+       HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_fbd, RD_FBD),
+       HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_ebd, RD_EBD),
+       HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_pay_m0, RD_PAY_M0),
+       HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_pay_m1, RD_PAY_M1),
+       HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_pay_m0, WR_PAY_M0),
+       HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_pay_m1, WR_PAY_M1),
+       HNS3_PMU_DLY_FLT_MODE_PAIR(dly_msix_write, MSIX_WRITE),
+
+       /* interrupt rate events */
+       HNS3_PMU_INTR_FLT_MODE_PAIR(pps_intr_msix_nic, MSIX_NIC),
+
+       NULL
+};
+
+static struct attribute_group hns3_pmu_events_group = {
+       .name = "events",
+       .attrs = hns3_pmu_events_attr,
+};
+
+static struct attribute_group hns3_pmu_filter_mode_group = {
+       .name = "filtermode",
+       .attrs = hns3_pmu_filter_mode_attr,
+};
+
+static struct attribute *hns3_pmu_format_attr[] = {
+       HNS3_PMU_FORMAT_ATTR(subevent, "config:0-7"),
+       HNS3_PMU_FORMAT_ATTR(event_type, "config:8-15"),
+       HNS3_PMU_FORMAT_ATTR(ext_counter_used, "config:16"),
+       HNS3_PMU_FORMAT_ATTR(port, "config1:0-3"),
+       HNS3_PMU_FORMAT_ATTR(tc, "config1:4-7"),
+       HNS3_PMU_FORMAT_ATTR(bdf, "config1:8-23"),
+       HNS3_PMU_FORMAT_ATTR(queue, "config1:24-39"),
+       HNS3_PMU_FORMAT_ATTR(intr, "config1:40-51"),
+       HNS3_PMU_FORMAT_ATTR(global, "config1:52"),
+       NULL
+};
+
+static struct attribute_group hns3_pmu_format_group = {
+       .name = "format",
+       .attrs = hns3_pmu_format_attr,
+};
+
+static struct attribute *hns3_pmu_cpumask_attrs[] = {
+       &dev_attr_cpumask.attr,
+       NULL
+};
+
+static struct attribute_group hns3_pmu_cpumask_attr_group = {
+       .attrs = hns3_pmu_cpumask_attrs,
+};
+
+static struct attribute *hns3_pmu_identifier_attrs[] = {
+       &dev_attr_identifier.attr,
+       NULL
+};
+
+static struct attribute_group hns3_pmu_identifier_attr_group = {
+       .attrs = hns3_pmu_identifier_attrs,
+};
+
+static struct attribute *hns3_pmu_bdf_range_attrs[] = {
+       &dev_attr_bdf_min.attr,
+       &dev_attr_bdf_max.attr,
+       NULL
+};
+
+static struct attribute_group hns3_pmu_bdf_range_attr_group = {
+       .attrs = hns3_pmu_bdf_range_attrs,
+};
+
+static struct attribute *hns3_pmu_hw_clk_freq_attrs[] = {
+       &dev_attr_hw_clk_freq.attr,
+       NULL
+};
+
+static struct attribute_group hns3_pmu_hw_clk_freq_attr_group = {
+       .attrs = hns3_pmu_hw_clk_freq_attrs,
+};
+
+static const struct attribute_group *hns3_pmu_attr_groups[] = {
+       &hns3_pmu_events_group,
+       &hns3_pmu_filter_mode_group,
+       &hns3_pmu_format_group,
+       &hns3_pmu_cpumask_attr_group,
+       &hns3_pmu_identifier_attr_group,
+       &hns3_pmu_bdf_range_attr_group,
+       &hns3_pmu_hw_clk_freq_attr_group,
+       NULL
+};
+
+static u32 hns3_pmu_get_event(struct perf_event *event)
+{
+       return hns3_pmu_get_ext_counter_used(event) << 16 |
+              hns3_pmu_get_event_type(event) << 8 |
+              hns3_pmu_get_subevent(event);
+}
+
+static u32 hns3_pmu_get_real_event(struct perf_event *event)
+{
+       return hns3_pmu_get_event_type(event) << 8 |
+              hns3_pmu_get_subevent(event);
+}
+
+static u32 hns3_pmu_get_offset(u32 offset, u32 idx)
+{
+       return offset + HNS3_PMU_REG_EVENT_OFFSET +
+              HNS3_PMU_REG_EVENT_SIZE * idx;
+}
+
+static u32 hns3_pmu_readl(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx)
+{
+       u32 offset = hns3_pmu_get_offset(reg_offset, idx);
+
+       return readl(hns3_pmu->base + offset);
+}
+
+static void hns3_pmu_writel(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx,
+                           u32 val)
+{
+       u32 offset = hns3_pmu_get_offset(reg_offset, idx);
+
+       writel(val, hns3_pmu->base + offset);
+}
+
+static u64 hns3_pmu_readq(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx)
+{
+       u32 offset = hns3_pmu_get_offset(reg_offset, idx);
+
+       return readq(hns3_pmu->base + offset);
+}
+
+static void hns3_pmu_writeq(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx,
+                           u64 val)
+{
+       u32 offset = hns3_pmu_get_offset(reg_offset, idx);
+
+       writeq(val, hns3_pmu->base + offset);
+}
+
+static bool hns3_pmu_cmp_event(struct perf_event *target,
+                              struct perf_event *event)
+{
+       return hns3_pmu_get_real_event(target) == hns3_pmu_get_real_event(event);
+}
+
+static int hns3_pmu_find_related_event_idx(struct hns3_pmu *hns3_pmu,
+                                          struct perf_event *event)
+{
+       struct perf_event *sibling;
+       int hw_event_used = 0;
+       int idx;
+
+       for (idx = 0; idx < HNS3_PMU_MAX_HW_EVENTS; idx++) {
+               sibling = hns3_pmu->hw_events[idx];
+               if (!sibling)
+                       continue;
+
+               hw_event_used++;
+
+               if (!hns3_pmu_cmp_event(sibling, event))
+                       continue;
+
+               /* Related events is used in group */
+               if (sibling->group_leader == event->group_leader)
+                       return idx;
+       }
+
+       /* No related event and all hardware events are used up */
+       if (hw_event_used >= HNS3_PMU_MAX_HW_EVENTS)
+               return -EBUSY;
+
+       /* No related event and there is extra hardware events can be use */
+       return -ENOENT;
+}
+
+static int hns3_pmu_get_event_idx(struct hns3_pmu *hns3_pmu)
+{
+       int idx;
+
+       for (idx = 0; idx < HNS3_PMU_MAX_HW_EVENTS; idx++) {
+               if (!hns3_pmu->hw_events[idx])
+                       return idx;
+       }
+
+       return -EBUSY;
+}
+
+static bool hns3_pmu_valid_bdf(struct hns3_pmu *hns3_pmu, u16 bdf)
+{
+       struct pci_dev *pdev;
+
+       if (bdf < hns3_pmu->bdf_min || bdf > hns3_pmu->bdf_max) {
+               pci_err(hns3_pmu->pdev, "Invalid EP device: %#x!\n", bdf);
+               return false;
+       }
+
+       pdev = pci_get_domain_bus_and_slot(pci_domain_nr(hns3_pmu->pdev->bus),
+                                          PCI_BUS_NUM(bdf),
+                                          GET_PCI_DEVFN(bdf));
+       if (!pdev) {
+               pci_err(hns3_pmu->pdev, "Nonexistent EP device: %#x!\n", bdf);
+               return false;
+       }
+
+       pci_dev_put(pdev);
+       return true;
+}
+
+static void hns3_pmu_set_qid_para(struct hns3_pmu *hns3_pmu, u32 idx, u16 bdf,
+                                 u16 queue)
+{
+       u32 val;
+
+       val = GET_PCI_DEVFN(bdf);
+       val |= (u32)queue << HNS3_PMU_QID_PARA_QUEUE_S;
+       hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_QID_PARA, idx, val);
+}
+
+static bool hns3_pmu_qid_req_start(struct hns3_pmu *hns3_pmu, u32 idx)
+{
+       bool queue_id_valid = false;
+       u32 reg_qid_ctrl, val;
+       int err;
+
+       /* enable queue id request */
+       hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_QID_CTRL, idx,
+                       HNS3_PMU_QID_CTRL_REQ_ENABLE);
+
+       reg_qid_ctrl = hns3_pmu_get_offset(HNS3_PMU_REG_EVENT_QID_CTRL, idx);
+       err = readl_poll_timeout(hns3_pmu->base + reg_qid_ctrl, val,
+                                val & HNS3_PMU_QID_CTRL_DONE, 1, 1000);
+       if (err == -ETIMEDOUT) {
+               pci_err(hns3_pmu->pdev, "QID request timeout!\n");
+               goto out;
+       }
+
+       queue_id_valid = !(val & HNS3_PMU_QID_CTRL_MISS);
+
+out:
+       /* disable qid request and clear status */
+       hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_QID_CTRL, idx, 0);
+
+       return queue_id_valid;
+}
+
+static bool hns3_pmu_valid_queue(struct hns3_pmu *hns3_pmu, u32 idx, u16 bdf,
+                                u16 queue)
+{
+       hns3_pmu_set_qid_para(hns3_pmu, idx, bdf, queue);
+
+       return hns3_pmu_qid_req_start(hns3_pmu, idx);
+}
+
+static struct hns3_pmu_event_attr *hns3_pmu_get_pmu_event(u32 event)
+{
+       struct hns3_pmu_event_attr *pmu_event;
+       struct dev_ext_attribute *eattr;
+       struct device_attribute *dattr;
+       struct attribute *attr;
+       u32 i;
+
+       for (i = 0; i < ARRAY_SIZE(hns3_pmu_events_attr) - 1; i++) {
+               attr = hns3_pmu_events_attr[i];
+               dattr = container_of(attr, struct device_attribute, attr);
+               eattr = container_of(dattr, struct dev_ext_attribute, attr);
+               pmu_event = eattr->var;
+
+               if (event == pmu_event->event)
+                       return pmu_event;
+       }
+
+       return NULL;
+}
+
+static int hns3_pmu_set_func_mode(struct perf_event *event,
+                                 struct hns3_pmu *hns3_pmu)
+{
+       struct hw_perf_event *hwc = &event->hw;
+       u16 bdf = hns3_pmu_get_bdf(event);
+
+       if (!hns3_pmu_valid_bdf(hns3_pmu, bdf))
+               return -ENOENT;
+
+       HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_FUNC);
+
+       return 0;
+}
+
+static int hns3_pmu_set_func_queue_mode(struct perf_event *event,
+                                       struct hns3_pmu *hns3_pmu)
+{
+       u16 queue_id = hns3_pmu_get_queue(event);
+       struct hw_perf_event *hwc = &event->hw;
+       u16 bdf = hns3_pmu_get_bdf(event);
+
+       if (!hns3_pmu_valid_bdf(hns3_pmu, bdf))
+               return -ENOENT;
+
+       if (!hns3_pmu_valid_queue(hns3_pmu, hwc->idx, bdf, queue_id)) {
+               pci_err(hns3_pmu->pdev, "Invalid queue: %u\n", queue_id);
+               return -ENOENT;
+       }
+
+       HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_FUNC_QUEUE);
+
+       return 0;
+}
+
+static bool
+hns3_pmu_is_enabled_global_mode(struct perf_event *event,
+                               struct hns3_pmu_event_attr *pmu_event)
+{
+       u8 global = hns3_pmu_get_global(event);
+
+       if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_GLOBAL))
+               return false;
+
+       return global;
+}
+
+static bool hns3_pmu_is_enabled_func_mode(struct perf_event *event,
+                                         struct hns3_pmu_event_attr *pmu_event)
+{
+       u16 queue_id = hns3_pmu_get_queue(event);
+       u16 bdf = hns3_pmu_get_bdf(event);
+
+       if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC))
+               return false;
+       else if (queue_id != HNS3_PMU_FILTER_ALL_QUEUE)
+               return false;
+
+       return bdf;
+}
+
+static bool
+hns3_pmu_is_enabled_func_queue_mode(struct perf_event *event,
+                                   struct hns3_pmu_event_attr *pmu_event)
+{
+       u16 queue_id = hns3_pmu_get_queue(event);
+       u16 bdf = hns3_pmu_get_bdf(event);
+
+       if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_QUEUE))
+               return false;
+       else if (queue_id == HNS3_PMU_FILTER_ALL_QUEUE)
+               return false;
+
+       return bdf;
+}
+
+static bool hns3_pmu_is_enabled_port_mode(struct perf_event *event,
+                                         struct hns3_pmu_event_attr *pmu_event)
+{
+       u8 tc_id = hns3_pmu_get_tc(event);
+
+       if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT))
+               return false;
+
+       return tc_id == HNS3_PMU_FILTER_ALL_TC;
+}
+
+static bool
+hns3_pmu_is_enabled_port_tc_mode(struct perf_event *event,
+                                struct hns3_pmu_event_attr *pmu_event)
+{
+       u8 tc_id = hns3_pmu_get_tc(event);
+
+       if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT_TC))
+               return false;
+
+       return tc_id != HNS3_PMU_FILTER_ALL_TC;
+}
+
+static bool
+hns3_pmu_is_enabled_func_intr_mode(struct perf_event *event,
+                                  struct hns3_pmu *hns3_pmu,
+                                  struct hns3_pmu_event_attr *pmu_event)
+{
+       u16 bdf = hns3_pmu_get_bdf(event);
+
+       if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_INTR))
+               return false;
+
+       return hns3_pmu_valid_bdf(hns3_pmu, bdf);
+}
+
+static int hns3_pmu_select_filter_mode(struct perf_event *event,
+                                      struct hns3_pmu *hns3_pmu)
+{
+       u32 event_id = hns3_pmu_get_event(event);
+       struct hw_perf_event *hwc = &event->hw;
+       struct hns3_pmu_event_attr *pmu_event;
+
+       pmu_event = hns3_pmu_get_pmu_event(event_id);
+       if (!pmu_event) {
+               pci_err(hns3_pmu->pdev, "Invalid pmu event\n");
+               return -ENOENT;
+       }
+
+       if (hns3_pmu_is_enabled_global_mode(event, pmu_event)) {
+               HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_GLOBAL);
+               return 0;
+       }
+
+       if (hns3_pmu_is_enabled_func_mode(event, pmu_event))
+               return hns3_pmu_set_func_mode(event, hns3_pmu);
+
+       if (hns3_pmu_is_enabled_func_queue_mode(event, pmu_event))
+               return hns3_pmu_set_func_queue_mode(event, hns3_pmu);
+
+       if (hns3_pmu_is_enabled_port_mode(event, pmu_event)) {
+               HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_PORT);
+               return 0;
+       }
+
+       if (hns3_pmu_is_enabled_port_tc_mode(event, pmu_event)) {
+               HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_PORT_TC);
+               return 0;
+       }
+
+       if (hns3_pmu_is_enabled_func_intr_mode(event, hns3_pmu, pmu_event)) {
+               HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_FUNC_INTR);
+               return 0;
+       }
+
+       return -ENOENT;
+}
+
+static bool hns3_pmu_validate_event_group(struct perf_event *event)
+{
+       struct perf_event *sibling, *leader = event->group_leader;
+       struct perf_event *event_group[HNS3_PMU_MAX_HW_EVENTS];
+       int counters = 1;
+       int num;
+
+       event_group[0] = leader;
+       if (!is_software_event(leader)) {
+               if (leader->pmu != event->pmu)
+                       return false;
+
+               if (leader != event && !hns3_pmu_cmp_event(leader, event))
+                       event_group[counters++] = event;
+       }
+
+       for_each_sibling_event(sibling, event->group_leader) {
+               if (is_software_event(sibling))
+                       continue;
+
+               if (sibling->pmu != event->pmu)
+                       return false;
+
+               for (num = 0; num < counters; num++) {
+                       if (hns3_pmu_cmp_event(event_group[num], sibling))
+                               break;
+               }
+
+               if (num == counters)
+                       event_group[counters++] = sibling;
+       }
+
+       return counters <= HNS3_PMU_MAX_HW_EVENTS;
+}
+
+static u32 hns3_pmu_get_filter_condition(struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+       u16 intr_id = hns3_pmu_get_intr(event);
+       u8 port_id = hns3_pmu_get_port(event);
+       u16 bdf = hns3_pmu_get_bdf(event);
+       u8 tc_id = hns3_pmu_get_tc(event);
+       u8 filter_mode;
+
+       filter_mode = *(u8 *)hwc->addr_filters;
+       switch (filter_mode) {
+       case HNS3_PMU_HW_FILTER_PORT:
+               return FILTER_CONDITION_PORT(port_id);
+       case HNS3_PMU_HW_FILTER_PORT_TC:
+               return FILTER_CONDITION_PORT_TC(port_id, tc_id);
+       case HNS3_PMU_HW_FILTER_FUNC:
+       case HNS3_PMU_HW_FILTER_FUNC_QUEUE:
+               return GET_PCI_DEVFN(bdf);
+       case HNS3_PMU_HW_FILTER_FUNC_INTR:
+               return FILTER_CONDITION_FUNC_INTR(GET_PCI_DEVFN(bdf), intr_id);
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+static void hns3_pmu_config_filter(struct perf_event *event)
+{
+       struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
+       u8 event_type = hns3_pmu_get_event_type(event);
+       u8 subevent_id = hns3_pmu_get_subevent(event);
+       u16 queue_id = hns3_pmu_get_queue(event);
+       struct hw_perf_event *hwc = &event->hw;
+       u8 filter_mode = *(u8 *)hwc->addr_filters;
+       u16 bdf = hns3_pmu_get_bdf(event);
+       u32 idx = hwc->idx;
+       u32 val;
+
+       val = event_type;
+       val |= subevent_id << HNS3_PMU_CTRL_SUBEVENT_S;
+       val |= filter_mode << HNS3_PMU_CTRL_FILTER_MODE_S;
+       val |= HNS3_PMU_EVENT_OVERFLOW_RESTART;
+       hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val);
+
+       val = hns3_pmu_get_filter_condition(event);
+       hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_HIGH, idx, val);
+
+       if (filter_mode == HNS3_PMU_HW_FILTER_FUNC_QUEUE)
+               hns3_pmu_set_qid_para(hns3_pmu, idx, bdf, queue_id);
+}
+
+static void hns3_pmu_enable_counter(struct hns3_pmu *hns3_pmu,
+                                   struct hw_perf_event *hwc)
+{
+       u32 idx = hwc->idx;
+       u32 val;
+
+       val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx);
+       val |= HNS3_PMU_EVENT_EN;
+       hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val);
+}
+
+static void hns3_pmu_disable_counter(struct hns3_pmu *hns3_pmu,
+                                    struct hw_perf_event *hwc)
+{
+       u32 idx = hwc->idx;
+       u32 val;
+
+       val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx);
+       val &= ~HNS3_PMU_EVENT_EN;
+       hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val);
+}
+
+static void hns3_pmu_enable_intr(struct hns3_pmu *hns3_pmu,
+                                struct hw_perf_event *hwc)
+{
+       u32 idx = hwc->idx;
+       u32 val;
+
+       val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx);
+       val &= ~HNS3_PMU_INTR_MASK_OVERFLOW;
+       hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx, val);
+}
+
+static void hns3_pmu_disable_intr(struct hns3_pmu *hns3_pmu,
+                                 struct hw_perf_event *hwc)
+{
+       u32 idx = hwc->idx;
+       u32 val;
+
+       val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx);
+       val |= HNS3_PMU_INTR_MASK_OVERFLOW;
+       hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx, val);
+}
+
+static void hns3_pmu_clear_intr_status(struct hns3_pmu *hns3_pmu, u32 idx)
+{
+       u32 val;
+
+       val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx);
+       val |= HNS3_PMU_EVENT_STATUS_RESET;
+       hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val);
+
+       val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx);
+       val &= ~HNS3_PMU_EVENT_STATUS_RESET;
+       hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val);
+}
+
+static u64 hns3_pmu_read_counter(struct perf_event *event)
+{
+       struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
+
+       return hns3_pmu_readq(hns3_pmu, event->hw.event_base, event->hw.idx);
+}
+
+static void hns3_pmu_write_counter(struct perf_event *event, u64 value)
+{
+       struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
+       u32 idx = event->hw.idx;
+
+       hns3_pmu_writeq(hns3_pmu, HNS3_PMU_REG_EVENT_COUNTER, idx, value);
+       hns3_pmu_writeq(hns3_pmu, HNS3_PMU_REG_EVENT_EXT_COUNTER, idx, value);
+}
+
+static void hns3_pmu_init_counter(struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+
+       local64_set(&hwc->prev_count, 0);
+       hns3_pmu_write_counter(event, 0);
+}
+
+static int hns3_pmu_event_init(struct perf_event *event)
+{
+       struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
+       struct hw_perf_event *hwc = &event->hw;
+       int idx;
+       int ret;
+
+       if (event->attr.type != event->pmu->type)
+               return -ENOENT;
+
+       /* Sampling is not supported */
+       if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
+               return -EOPNOTSUPP;
+
+       event->cpu = hns3_pmu->on_cpu;
+
+       idx = hns3_pmu_get_event_idx(hns3_pmu);
+       if (idx < 0) {
+               pci_err(hns3_pmu->pdev, "Up to %u events are supported!\n",
+                       HNS3_PMU_MAX_HW_EVENTS);
+               return -EBUSY;
+       }
+
+       hwc->idx = idx;
+
+       ret = hns3_pmu_select_filter_mode(event, hns3_pmu);
+       if (ret) {
+               pci_err(hns3_pmu->pdev, "Invalid filter, ret = %d.\n", ret);
+               return ret;
+       }
+
+       if (!hns3_pmu_validate_event_group(event)) {
+               pci_err(hns3_pmu->pdev, "Invalid event group.\n");
+               return -EINVAL;
+       }
+
+       if (hns3_pmu_get_ext_counter_used(event))
+               hwc->event_base = HNS3_PMU_REG_EVENT_EXT_COUNTER;
+       else
+               hwc->event_base = HNS3_PMU_REG_EVENT_COUNTER;
+
+       return 0;
+}
+
+static void hns3_pmu_read(struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+       u64 new_cnt, prev_cnt, delta;
+
+       do {
+               prev_cnt = local64_read(&hwc->prev_count);
+               new_cnt = hns3_pmu_read_counter(event);
+       } while (local64_cmpxchg(&hwc->prev_count, prev_cnt, new_cnt) !=
+                prev_cnt);
+
+       delta = new_cnt - prev_cnt;
+       local64_add(delta, &event->count);
+}
+
+static void hns3_pmu_start(struct perf_event *event, int flags)
+{
+       struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
+       struct hw_perf_event *hwc = &event->hw;
+
+       if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
+               return;
+
+       WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+       hwc->state = 0;
+
+       hns3_pmu_config_filter(event);
+       hns3_pmu_init_counter(event);
+       hns3_pmu_enable_intr(hns3_pmu, hwc);
+       hns3_pmu_enable_counter(hns3_pmu, hwc);
+
+       perf_event_update_userpage(event);
+}
+
+static void hns3_pmu_stop(struct perf_event *event, int flags)
+{
+       struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
+       struct hw_perf_event *hwc = &event->hw;
+
+       hns3_pmu_disable_counter(hns3_pmu, hwc);
+       hns3_pmu_disable_intr(hns3_pmu, hwc);
+
+       WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
+       hwc->state |= PERF_HES_STOPPED;
+
+       if (hwc->state & PERF_HES_UPTODATE)
+               return;
+
+       /* Read hardware counter and update the perf counter statistics */
+       hns3_pmu_read(event);
+       hwc->state |= PERF_HES_UPTODATE;
+}
+
+static int hns3_pmu_add(struct perf_event *event, int flags)
+{
+       struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
+       struct hw_perf_event *hwc = &event->hw;
+       int idx;
+
+       hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+
+       /* Check all working events to find a related event. */
+       idx = hns3_pmu_find_related_event_idx(hns3_pmu, event);
+       if (idx < 0 && idx != -ENOENT)
+               return idx;
+
+       /* Current event shares an enabled hardware event with related event */
+       if (idx >= 0 && idx < HNS3_PMU_MAX_HW_EVENTS) {
+               hwc->idx = idx;
+               goto start_count;
+       }
+
+       idx = hns3_pmu_get_event_idx(hns3_pmu);
+       if (idx < 0)
+               return idx;
+
+       hwc->idx = idx;
+       hns3_pmu->hw_events[idx] = event;
+
+start_count:
+       if (flags & PERF_EF_START)
+               hns3_pmu_start(event, PERF_EF_RELOAD);
+
+       return 0;
+}
+
+static void hns3_pmu_del(struct perf_event *event, int flags)
+{
+       struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
+       struct hw_perf_event *hwc = &event->hw;
+
+       hns3_pmu_stop(event, PERF_EF_UPDATE);
+       hns3_pmu->hw_events[hwc->idx] = NULL;
+       perf_event_update_userpage(event);
+}
+
+static void hns3_pmu_enable(struct pmu *pmu)
+{
+       struct hns3_pmu *hns3_pmu = to_hns3_pmu(pmu);
+       u32 val;
+
+       val = readl(hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL);
+       val |= HNS3_PMU_GLOBAL_START;
+       writel(val, hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL);
+}
+
+static void hns3_pmu_disable(struct pmu *pmu)
+{
+       struct hns3_pmu *hns3_pmu = to_hns3_pmu(pmu);
+       u32 val;
+
+       val = readl(hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL);
+       val &= ~HNS3_PMU_GLOBAL_START;
+       writel(val, hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL);
+}
+
+static int hns3_pmu_alloc_pmu(struct pci_dev *pdev, struct hns3_pmu *hns3_pmu)
+{
+       u16 device_id;
+       char *name;
+       u32 val;
+
+       hns3_pmu->base = pcim_iomap_table(pdev)[BAR_2];
+       if (!hns3_pmu->base) {
+               pci_err(pdev, "ioremap failed\n");
+               return -ENOMEM;
+       }
+
+       hns3_pmu->hw_clk_freq = readl(hns3_pmu->base + HNS3_PMU_REG_CLOCK_FREQ);
+
+       val = readl(hns3_pmu->base + HNS3_PMU_REG_BDF);
+       hns3_pmu->bdf_min = val & 0xffff;
+       hns3_pmu->bdf_max = val >> 16;
+
+       val = readl(hns3_pmu->base + HNS3_PMU_REG_DEVICE_ID);
+       device_id = val & 0xffff;
+       name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hns3_pmu_sicl_%u", device_id);
+       if (!name)
+               return -ENOMEM;
+
+       hns3_pmu->pdev = pdev;
+       hns3_pmu->on_cpu = -1;
+       hns3_pmu->identifier = readl(hns3_pmu->base + HNS3_PMU_REG_VERSION);
+       hns3_pmu->pmu = (struct pmu) {
+               .name           = name,
+               .module         = THIS_MODULE,
+               .event_init     = hns3_pmu_event_init,
+               .pmu_enable     = hns3_pmu_enable,
+               .pmu_disable    = hns3_pmu_disable,
+               .add            = hns3_pmu_add,
+               .del            = hns3_pmu_del,
+               .start          = hns3_pmu_start,
+               .stop           = hns3_pmu_stop,
+               .read           = hns3_pmu_read,
+               .task_ctx_nr    = perf_invalid_context,
+               .attr_groups    = hns3_pmu_attr_groups,
+               .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
+       };
+
+       return 0;
+}
+
+static irqreturn_t hns3_pmu_irq(int irq, void *data)
+{
+       struct hns3_pmu *hns3_pmu = data;
+       u32 intr_status, idx;
+
+       for (idx = 0; idx < HNS3_PMU_MAX_HW_EVENTS; idx++) {
+               intr_status = hns3_pmu_readl(hns3_pmu,
+                                            HNS3_PMU_REG_EVENT_INTR_STATUS,
+                                            idx);
+
+               /*
+                * As each counter will restart from 0 when it is overflowed,
+                * extra processing is no need, just clear interrupt status.
+                */
+               if (intr_status)
+                       hns3_pmu_clear_intr_status(hns3_pmu, idx);
+       }
+
+       return IRQ_HANDLED;
+}
+
+static int hns3_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
+{
+       struct hns3_pmu *hns3_pmu;
+
+       hns3_pmu = hlist_entry_safe(node, struct hns3_pmu, node);
+       if (!hns3_pmu)
+               return -ENODEV;
+
+       if (hns3_pmu->on_cpu == -1) {
+               hns3_pmu->on_cpu = cpu;
+               irq_set_affinity(hns3_pmu->irq, cpumask_of(cpu));
+       }
+
+       return 0;
+}
+
+static int hns3_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+       struct hns3_pmu *hns3_pmu;
+       unsigned int target;
+
+       hns3_pmu = hlist_entry_safe(node, struct hns3_pmu, node);
+       if (!hns3_pmu)
+               return -ENODEV;
+
+       /* Nothing to do if this CPU doesn't own the PMU */
+       if (hns3_pmu->on_cpu != cpu)
+               return 0;
+
+       /* Choose a new CPU from all online cpus */
+       target = cpumask_any_but(cpu_online_mask, cpu);
+       if (target >= nr_cpu_ids)
+               return 0;
+
+       perf_pmu_migrate_context(&hns3_pmu->pmu, cpu, target);
+       hns3_pmu->on_cpu = target;
+       irq_set_affinity(hns3_pmu->irq, cpumask_of(target));
+
+       return 0;
+}
+
+static void hns3_pmu_free_irq(void *data)
+{
+       struct pci_dev *pdev = data;
+
+       pci_free_irq_vectors(pdev);
+}
+
+static int hns3_pmu_irq_register(struct pci_dev *pdev,
+                                struct hns3_pmu *hns3_pmu)
+{
+       int irq, ret;
+
+       ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+       if (ret < 0) {
+               pci_err(pdev, "failed to enable MSI vectors, ret = %d.\n", ret);
+               return ret;
+       }
+
+       ret = devm_add_action(&pdev->dev, hns3_pmu_free_irq, pdev);
+       if (ret) {
+               pci_err(pdev, "failed to add free irq action, ret = %d.\n", ret);
+               return ret;
+       }
+
+       irq = pci_irq_vector(pdev, 0);
+       ret = devm_request_irq(&pdev->dev, irq, hns3_pmu_irq, 0,
+                              hns3_pmu->pmu.name, hns3_pmu);
+       if (ret) {
+               pci_err(pdev, "failed to register irq, ret = %d.\n", ret);
+               return ret;
+       }
+
+       hns3_pmu->irq = irq;
+
+       return 0;
+}
+
+static int hns3_pmu_init_pmu(struct pci_dev *pdev, struct hns3_pmu *hns3_pmu)
+{
+       int ret;
+
+       ret = hns3_pmu_alloc_pmu(pdev, hns3_pmu);
+       if (ret)
+               return ret;
+
+       ret = hns3_pmu_irq_register(pdev, hns3_pmu);
+       if (ret)
+               return ret;
+
+       ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
+                                      &hns3_pmu->node);
+       if (ret) {
+               pci_err(pdev, "failed to register hotplug, ret = %d.\n", ret);
+               return ret;
+       }
+
+       ret = perf_pmu_register(&hns3_pmu->pmu, hns3_pmu->pmu.name, -1);
+       if (ret) {
+               pci_err(pdev, "failed to register perf PMU, ret = %d.\n", ret);
+               cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
+                                           &hns3_pmu->node);
+       }
+
+       return ret;
+}
+
+static void hns3_pmu_uninit_pmu(struct pci_dev *pdev)
+{
+       struct hns3_pmu *hns3_pmu = pci_get_drvdata(pdev);
+
+       perf_pmu_unregister(&hns3_pmu->pmu);
+       cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
+                                   &hns3_pmu->node);
+}
+
+static int hns3_pmu_init_dev(struct pci_dev *pdev)
+{
+       int ret;
+
+       ret = pcim_enable_device(pdev);
+       if (ret) {
+               pci_err(pdev, "failed to enable pci device, ret = %d.\n", ret);
+               return ret;
+       }
+
+       ret = pcim_iomap_regions(pdev, BIT(BAR_2), "hns3_pmu");
+       if (ret < 0) {
+               pci_err(pdev, "failed to request pci region, ret = %d.\n", ret);
+               return ret;
+       }
+
+       pci_set_master(pdev);
+
+       return 0;
+}
+
+static int hns3_pmu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+       struct hns3_pmu *hns3_pmu;
+       int ret;
+
+       hns3_pmu = devm_kzalloc(&pdev->dev, sizeof(*hns3_pmu), GFP_KERNEL);
+       if (!hns3_pmu)
+               return -ENOMEM;
+
+       ret = hns3_pmu_init_dev(pdev);
+       if (ret)
+               return ret;
+
+       ret = hns3_pmu_init_pmu(pdev, hns3_pmu);
+       if (ret) {
+               pci_clear_master(pdev);
+               return ret;
+       }
+
+       pci_set_drvdata(pdev, hns3_pmu);
+
+       return ret;
+}
+
+static void hns3_pmu_remove(struct pci_dev *pdev)
+{
+       hns3_pmu_uninit_pmu(pdev);
+       pci_clear_master(pdev);
+       pci_set_drvdata(pdev, NULL);
+}
+
+static const struct pci_device_id hns3_pmu_ids[] = {
+       { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa22b) },
+       { 0, }
+};
+MODULE_DEVICE_TABLE(pci, hns3_pmu_ids);
+
+static struct pci_driver hns3_pmu_driver = {
+       .name = "hns3_pmu",
+       .id_table = hns3_pmu_ids,
+       .probe = hns3_pmu_probe,
+       .remove = hns3_pmu_remove,
+};
+
+static int __init hns3_pmu_module_init(void)
+{
+       int ret;
+
+       ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
+                                     "AP_PERF_ARM_HNS3_PMU_ONLINE",
+                                     hns3_pmu_online_cpu,
+                                     hns3_pmu_offline_cpu);
+       if (ret) {
+               pr_err("failed to setup HNS3 PMU hotplug, ret = %d.\n", ret);
+               return ret;
+       }
+
+       ret = pci_register_driver(&hns3_pmu_driver);
+       if (ret) {
+               pr_err("failed to register pci driver, ret = %d.\n", ret);
+               cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE);
+       }
+
+       return ret;
+}
+module_init(hns3_pmu_module_init);
+
+static void __exit hns3_pmu_module_exit(void)
+{
+       pci_unregister_driver(&hns3_pmu_driver);
+       cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE);
+}
+module_exit(hns3_pmu_module_exit);
+
+MODULE_DESCRIPTION("HNS3 PMU driver");
+MODULE_LICENSE("GPL v2");
index 282d3a0..69c3050 100644 (file)
@@ -2,10 +2,6 @@
 /* Marvell CN10K LLC-TAD perf driver
  *
  * Copyright (C) 2021 Marvell
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
  */
 
 #define pr_fmt(fmt) "tad_pmu: " fmt
@@ -18,9 +14,9 @@
 #include <linux/perf_event.h>
 #include <linux/platform_device.h>
 
-#define TAD_PFC_OFFSET         0x0
+#define TAD_PFC_OFFSET         0x800
 #define TAD_PFC(counter)       (TAD_PFC_OFFSET | (counter << 3))
-#define TAD_PRF_OFFSET         0x100
+#define TAD_PRF_OFFSET         0x900
 #define TAD_PRF(counter)       (TAD_PRF_OFFSET | (counter << 3))
 #define TAD_PRF_CNTSEL_MASK    0xFF
 #define TAD_MAX_COUNTERS       8
@@ -100,9 +96,7 @@ static void tad_pmu_event_counter_start(struct perf_event *event, int flags)
         * which sets TAD()_PRF()[CNTSEL] != 0
         */
        for (i = 0; i < tad_pmu->region_cnt; i++) {
-               reg_val = readq_relaxed(tad_pmu->regions[i].base +
-                                       TAD_PRF(counter_idx));
-               reg_val |= (event_idx & 0xFF);
+               reg_val = event_idx & 0xFF;
                writeq_relaxed(reg_val, tad_pmu->regions[i].base +
                               TAD_PRF(counter_idx));
        }
index b2b8d20..2c96183 100644 (file)
@@ -121,7 +121,7 @@ u64 riscv_pmu_event_update(struct perf_event *event)
        return delta;
 }
 
-static void riscv_pmu_stop(struct perf_event *event, int flags)
+void riscv_pmu_stop(struct perf_event *event, int flags)
 {
        struct hw_perf_event *hwc = &event->hw;
        struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
@@ -175,7 +175,7 @@ int riscv_pmu_event_set_period(struct perf_event *event)
        return overflow;
 }
 
-static void riscv_pmu_start(struct perf_event *event, int flags)
+void riscv_pmu_start(struct perf_event *event, int flags)
 {
        struct hw_perf_event *hwc = &event->hw;
        struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
index dca3537..79a3de5 100644 (file)
 #include <linux/irqdomain.h>
 #include <linux/of_irq.h>
 #include <linux/of.h>
+#include <linux/cpu_pm.h>
 
 #include <asm/sbi.h>
 #include <asm/hwcap.h>
 
+PMU_FORMAT_ATTR(event, "config:0-47");
+PMU_FORMAT_ATTR(firmware, "config:63");
+
+static struct attribute *riscv_arch_formats_attr[] = {
+       &format_attr_event.attr,
+       &format_attr_firmware.attr,
+       NULL,
+};
+
+static struct attribute_group riscv_pmu_format_group = {
+       .name = "format",
+       .attrs = riscv_arch_formats_attr,
+};
+
+static const struct attribute_group *riscv_pmu_attr_groups[] = {
+       &riscv_pmu_format_group,
+       NULL,
+};
+
 union sbi_pmu_ctr_info {
        unsigned long value;
        struct {
@@ -666,12 +686,15 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde
                child = of_get_compatible_child(cpu, "riscv,cpu-intc");
                if (!child) {
                        pr_err("Failed to find INTC node\n");
+                       of_node_put(cpu);
                        return -ENODEV;
                }
                domain = irq_find_host(child);
                of_node_put(child);
-               if (domain)
+               if (domain) {
+                       of_node_put(cpu);
                        break;
+               }
        }
        if (!domain) {
                pr_err("Failed to find INTC IRQ root domain\n");
@@ -693,6 +716,73 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde
        return 0;
 }
 
+#ifdef CONFIG_CPU_PM
+static int riscv_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
+                               void *v)
+{
+       struct riscv_pmu *rvpmu = container_of(b, struct riscv_pmu, riscv_pm_nb);
+       struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
+       int enabled = bitmap_weight(cpuc->used_hw_ctrs, RISCV_MAX_COUNTERS);
+       struct perf_event *event;
+       int idx;
+
+       if (!enabled)
+               return NOTIFY_OK;
+
+       for (idx = 0; idx < RISCV_MAX_COUNTERS; idx++) {
+               event = cpuc->events[idx];
+               if (!event)
+                       continue;
+
+               switch (cmd) {
+               case CPU_PM_ENTER:
+                       /*
+                        * Stop and update the counter
+                        */
+                       riscv_pmu_stop(event, PERF_EF_UPDATE);
+                       break;
+               case CPU_PM_EXIT:
+               case CPU_PM_ENTER_FAILED:
+                       /*
+                        * Restore and enable the counter.
+                        *
+                        * Requires RCU read locking to be functional,
+                        * wrap the call within RCU_NONIDLE to make the
+                        * RCU subsystem aware this cpu is not idle from
+                        * an RCU perspective for the riscv_pmu_start() call
+                        * duration.
+                        */
+                       RCU_NONIDLE(riscv_pmu_start(event, PERF_EF_RELOAD));
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       return NOTIFY_OK;
+}
+
+static int riscv_pm_pmu_register(struct riscv_pmu *pmu)
+{
+       pmu->riscv_pm_nb.notifier_call = riscv_pm_pmu_notify;
+       return cpu_pm_register_notifier(&pmu->riscv_pm_nb);
+}
+
+static void riscv_pm_pmu_unregister(struct riscv_pmu *pmu)
+{
+       cpu_pm_unregister_notifier(&pmu->riscv_pm_nb);
+}
+#else
+static inline int riscv_pm_pmu_register(struct riscv_pmu *pmu) { return 0; }
+static inline void riscv_pm_pmu_unregister(struct riscv_pmu *pmu) { }
+#endif
+
+static void riscv_pmu_destroy(struct riscv_pmu *pmu)
+{
+       riscv_pm_pmu_unregister(pmu);
+       cpuhp_state_remove_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node);
+}
+
 static int pmu_sbi_device_probe(struct platform_device *pdev)
 {
        struct riscv_pmu *pmu = NULL;
@@ -720,6 +810,7 @@ static int pmu_sbi_device_probe(struct platform_device *pdev)
                pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
                pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE;
        }
+       pmu->pmu.attr_groups = riscv_pmu_attr_groups;
        pmu->num_counters = num_counters;
        pmu->ctr_start = pmu_sbi_ctr_start;
        pmu->ctr_stop = pmu_sbi_ctr_stop;
@@ -733,14 +824,19 @@ static int pmu_sbi_device_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
+       ret = riscv_pm_pmu_register(pmu);
+       if (ret)
+               goto out_unregister;
+
        ret = perf_pmu_register(&pmu->pmu, "cpu", PERF_TYPE_RAW);
-       if (ret) {
-               cpuhp_state_remove_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node);
-               return ret;
-       }
+       if (ret)
+               goto out_unregister;
 
        return 0;
 
+out_unregister:
+       riscv_pmu_destroy(pmu);
+
 out_free:
        kfree(pmu);
        return ret;
index 19f0dbf..3e99fb4 100644 (file)
@@ -230,6 +230,7 @@ enum cpuhp_state {
        CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
        CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
        CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE,
+       CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
        CPUHP_AP_PERF_ARM_L2X0_ONLINE,
        CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
        CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE,
index 46f9b6f..bf66fe0 100644 (file)
@@ -56,9 +56,13 @@ struct riscv_pmu {
 
        struct cpu_hw_events    __percpu *hw_events;
        struct hlist_node       node;
+       struct notifier_block   riscv_pm_nb;
 };
 
 #define to_riscv_pmu(p) (container_of(p, struct riscv_pmu, pmu))
+
+void riscv_pmu_start(struct perf_event *event, int flags);
+void riscv_pmu_stop(struct perf_event *event, int flags);
 unsigned long riscv_pmu_ctr_read_csr(unsigned long csr);
 int riscv_pmu_event_set_period(struct perf_event *event);
 uint64_t riscv_pmu_ctr_get_width_mask(struct perf_event *event);