Merge tag 'defconfig-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[linux-2.6-microblaze.git] / arch / x86 / events / intel / uncore_snbep.c
index 609c24a..5ddc0f3 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 /* SandyBridge-EP/IvyTown uncore support */
 #include "uncore.h"
+#include "uncore_discovery.h"
 
 /* SNB-EP pci bus to socket mapping */
 #define SNBEP_CPUNODEID                        0x40
 #define ICX_NUMBER_IMC_CHN                     2
 #define ICX_IMC_MEM_STRIDE                     0x4
 
+/* SPR */
+#define SPR_RAW_EVENT_MASK_EXT                 0xffffff
+
+/* SPR CHA */
+#define SPR_CHA_PMON_CTL_TID_EN                        (1 << 16)
+#define SPR_CHA_PMON_EVENT_MASK                        (SNBEP_PMON_RAW_EVENT_MASK | \
+                                                SPR_CHA_PMON_CTL_TID_EN)
+#define SPR_CHA_PMON_BOX_FILTER_TID            0x3ff
+
+#define SPR_C0_MSR_PMON_BOX_FILTER0            0x200e
+
 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
@@ -466,6 +478,7 @@ DEFINE_UNCORE_FORMAT_ATTR(umask_ext4, umask, "config:8-15,32-55");
 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
+DEFINE_UNCORE_FORMAT_ATTR(tid_en2, tid_en, "config:16");
 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
 DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
@@ -3838,26 +3851,32 @@ clear_attr_update:
        return ret;
 }
 
-static int skx_iio_set_mapping(struct intel_uncore_type *type)
-{
-       return pmu_iio_set_mapping(type, &skx_iio_mapping_group);
-}
-
-static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
+static void
+pmu_iio_cleanup_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
 {
-       struct attribute **attr = skx_iio_mapping_group.attrs;
+       struct attribute **attr = ag->attrs;
 
        if (!attr)
                return;
 
        for (; *attr; attr++)
                kfree((*attr)->name);
-       kfree(attr_to_ext_attr(*skx_iio_mapping_group.attrs));
-       kfree(skx_iio_mapping_group.attrs);
-       skx_iio_mapping_group.attrs = NULL;
+       kfree(attr_to_ext_attr(*ag->attrs));
+       kfree(ag->attrs);
+       ag->attrs = NULL;
        kfree(type->topology);
 }
 
+static int skx_iio_set_mapping(struct intel_uncore_type *type)
+{
+       return pmu_iio_set_mapping(type, &skx_iio_mapping_group);
+}
+
+static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
+{
+       pmu_iio_cleanup_mapping(type, &skx_iio_mapping_group);
+}
+
 static struct intel_uncore_type skx_uncore_iio = {
        .name                   = "iio",
        .num_counters           = 4,
@@ -4501,6 +4520,11 @@ static int snr_iio_set_mapping(struct intel_uncore_type *type)
        return pmu_iio_set_mapping(type, &snr_iio_mapping_group);
 }
 
+static void snr_iio_cleanup_mapping(struct intel_uncore_type *type)
+{
+       pmu_iio_cleanup_mapping(type, &snr_iio_mapping_group);
+}
+
 static struct intel_uncore_type snr_uncore_iio = {
        .name                   = "iio",
        .num_counters           = 4,
@@ -4517,7 +4541,7 @@ static struct intel_uncore_type snr_uncore_iio = {
        .attr_update            = snr_iio_attr_update,
        .get_topology           = snr_iio_get_topology,
        .set_mapping            = snr_iio_set_mapping,
-       .cleanup_mapping        = skx_iio_cleanup_mapping,
+       .cleanup_mapping        = snr_iio_cleanup_mapping,
 };
 
 static struct intel_uncore_type snr_uncore_irp = {
@@ -4783,13 +4807,15 @@ int snr_uncore_pci_init(void)
        return 0;
 }
 
-static struct pci_dev *snr_uncore_get_mc_dev(int id)
+#define SNR_MC_DEVICE_ID       0x3451
+
+static struct pci_dev *snr_uncore_get_mc_dev(unsigned int device, int id)
 {
        struct pci_dev *mc_dev = NULL;
        int pkg;
 
        while (1) {
-               mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3451, mc_dev);
+               mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, mc_dev);
                if (!mc_dev)
                        break;
                pkg = uncore_pcibus_to_dieid(mc_dev->bus);
@@ -4799,19 +4825,20 @@ static struct pci_dev *snr_uncore_get_mc_dev(int id)
        return mc_dev;
 }
 
-static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
-                                      unsigned int box_ctl, int mem_offset)
+static int snr_uncore_mmio_map(struct intel_uncore_box *box,
+                              unsigned int box_ctl, int mem_offset,
+                              unsigned int device)
 {
-       struct pci_dev *pdev = snr_uncore_get_mc_dev(box->dieid);
+       struct pci_dev *pdev = snr_uncore_get_mc_dev(device, box->dieid);
        struct intel_uncore_type *type = box->pmu->type;
        resource_size_t addr;
        u32 pci_dword;
 
        if (!pdev)
-               return;
+               return -ENODEV;
 
        pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
-       addr = (pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
+       addr = ((resource_size_t)pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
 
        pci_read_config_dword(pdev, mem_offset, &pci_dword);
        addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12;
@@ -4821,16 +4848,25 @@ static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
        box->io_addr = ioremap(addr, type->mmio_map_size);
        if (!box->io_addr) {
                pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
-               return;
+               return -EINVAL;
        }
 
-       writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr);
+       return 0;
+}
+
+static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
+                                      unsigned int box_ctl, int mem_offset,
+                                      unsigned int device)
+{
+       if (!snr_uncore_mmio_map(box, box_ctl, mem_offset, device))
+               writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr);
 }
 
 static void snr_uncore_mmio_init_box(struct intel_uncore_box *box)
 {
        __snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box),
-                                  SNR_IMC_MMIO_MEM0_OFFSET);
+                                  SNR_IMC_MMIO_MEM0_OFFSET,
+                                  SNR_MC_DEVICE_ID);
 }
 
 static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box)
@@ -5092,6 +5128,11 @@ static int icx_iio_set_mapping(struct intel_uncore_type *type)
        return pmu_iio_set_mapping(type, &icx_iio_mapping_group);
 }
 
+static void icx_iio_cleanup_mapping(struct intel_uncore_type *type)
+{
+       pmu_iio_cleanup_mapping(type, &icx_iio_mapping_group);
+}
+
 static struct intel_uncore_type icx_uncore_iio = {
        .name                   = "iio",
        .num_counters           = 4,
@@ -5109,7 +5150,7 @@ static struct intel_uncore_type icx_uncore_iio = {
        .attr_update            = icx_iio_attr_update,
        .get_topology           = icx_iio_get_topology,
        .set_mapping            = icx_iio_set_mapping,
-       .cleanup_mapping        = skx_iio_cleanup_mapping,
+       .cleanup_mapping        = icx_iio_cleanup_mapping,
 };
 
 static struct intel_uncore_type icx_uncore_irp = {
@@ -5405,7 +5446,8 @@ static void icx_uncore_imc_init_box(struct intel_uncore_box *box)
        int mem_offset = (box->pmu->pmu_idx / ICX_NUMBER_IMC_CHN) * ICX_IMC_MEM_STRIDE +
                         SNR_IMC_MMIO_MEM0_OFFSET;
 
-       __snr_uncore_mmio_init_box(box, box_ctl, mem_offset);
+       __snr_uncore_mmio_init_box(box, box_ctl, mem_offset,
+                                  SNR_MC_DEVICE_ID);
 }
 
 static struct intel_uncore_ops icx_uncore_mmio_ops = {
@@ -5475,7 +5517,8 @@ static void icx_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
        int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE +
                         SNR_IMC_MMIO_MEM0_OFFSET;
 
-       __snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box), mem_offset);
+       snr_uncore_mmio_map(box, uncore_mmio_box_ctl(box),
+                           mem_offset, SNR_MC_DEVICE_ID);
 }
 
 static struct intel_uncore_ops icx_uncore_imc_freerunning_ops = {
@@ -5509,3 +5552,497 @@ void icx_uncore_mmio_init(void)
 }
 
 /* end of ICX uncore support */
+
+/* SPR uncore support */
+
+static void spr_uncore_msr_enable_event(struct intel_uncore_box *box,
+                                       struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+       struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+
+       if (reg1->idx != EXTRA_REG_NONE)
+               wrmsrl(reg1->reg, reg1->config);
+
+       wrmsrl(hwc->config_base, hwc->config);
+}
+
+static void spr_uncore_msr_disable_event(struct intel_uncore_box *box,
+                                        struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+       struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+
+       if (reg1->idx != EXTRA_REG_NONE)
+               wrmsrl(reg1->reg, 0);
+
+       wrmsrl(hwc->config_base, 0);
+}
+
+static int spr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
+{
+       struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
+       bool tie_en = !!(event->hw.config & SPR_CHA_PMON_CTL_TID_EN);
+       struct intel_uncore_type *type = box->pmu->type;
+
+       if (tie_en) {
+               reg1->reg = SPR_C0_MSR_PMON_BOX_FILTER0 +
+                           HSWEP_CBO_MSR_OFFSET * type->box_ids[box->pmu->pmu_idx];
+               reg1->config = event->attr.config1 & SPR_CHA_PMON_BOX_FILTER_TID;
+               reg1->idx = 0;
+       }
+
+       return 0;
+}
+
+static struct intel_uncore_ops spr_uncore_chabox_ops = {
+       .init_box               = intel_generic_uncore_msr_init_box,
+       .disable_box            = intel_generic_uncore_msr_disable_box,
+       .enable_box             = intel_generic_uncore_msr_enable_box,
+       .disable_event          = spr_uncore_msr_disable_event,
+       .enable_event           = spr_uncore_msr_enable_event,
+       .read_counter           = uncore_msr_read_counter,
+       .hw_config              = spr_cha_hw_config,
+       .get_constraint         = uncore_get_constraint,
+       .put_constraint         = uncore_put_constraint,
+};
+
+static struct attribute *spr_uncore_cha_formats_attr[] = {
+       &format_attr_event.attr,
+       &format_attr_umask_ext4.attr,
+       &format_attr_tid_en2.attr,
+       &format_attr_edge.attr,
+       &format_attr_inv.attr,
+       &format_attr_thresh8.attr,
+       &format_attr_filter_tid5.attr,
+       NULL,
+};
+static const struct attribute_group spr_uncore_chabox_format_group = {
+       .name = "format",
+       .attrs = spr_uncore_cha_formats_attr,
+};
+
+static ssize_t alias_show(struct device *dev,
+                         struct device_attribute *attr,
+                         char *buf)
+{
+       struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
+       char pmu_name[UNCORE_PMU_NAME_LEN];
+
+       uncore_get_alias_name(pmu_name, pmu);
+       return sysfs_emit(buf, "%s\n", pmu_name);
+}
+
+static DEVICE_ATTR_RO(alias);
+
+static struct attribute *uncore_alias_attrs[] = {
+       &dev_attr_alias.attr,
+       NULL
+};
+
+ATTRIBUTE_GROUPS(uncore_alias);
+
+static struct intel_uncore_type spr_uncore_chabox = {
+       .name                   = "cha",
+       .event_mask             = SPR_CHA_PMON_EVENT_MASK,
+       .event_mask_ext         = SPR_RAW_EVENT_MASK_EXT,
+       .num_shared_regs        = 1,
+       .ops                    = &spr_uncore_chabox_ops,
+       .format_group           = &spr_uncore_chabox_format_group,
+       .attr_update            = uncore_alias_groups,
+};
+
+static struct intel_uncore_type spr_uncore_iio = {
+       .name                   = "iio",
+       .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
+       .event_mask_ext         = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
+       .format_group           = &snr_uncore_iio_format_group,
+       .attr_update            = uncore_alias_groups,
+};
+
+static struct attribute *spr_uncore_raw_formats_attr[] = {
+       &format_attr_event.attr,
+       &format_attr_umask_ext4.attr,
+       &format_attr_edge.attr,
+       &format_attr_inv.attr,
+       &format_attr_thresh8.attr,
+       NULL,
+};
+
+static const struct attribute_group spr_uncore_raw_format_group = {
+       .name                   = "format",
+       .attrs                  = spr_uncore_raw_formats_attr,
+};
+
+#define SPR_UNCORE_COMMON_FORMAT()                             \
+       .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,    \
+       .event_mask_ext         = SPR_RAW_EVENT_MASK_EXT,       \
+       .format_group           = &spr_uncore_raw_format_group, \
+       .attr_update            = uncore_alias_groups
+
+static struct intel_uncore_type spr_uncore_irp = {
+       SPR_UNCORE_COMMON_FORMAT(),
+       .name                   = "irp",
+
+};
+
+static struct intel_uncore_type spr_uncore_m2pcie = {
+       SPR_UNCORE_COMMON_FORMAT(),
+       .name                   = "m2pcie",
+};
+
+static struct intel_uncore_type spr_uncore_pcu = {
+       .name                   = "pcu",
+       .attr_update            = uncore_alias_groups,
+};
+
+static void spr_uncore_mmio_enable_event(struct intel_uncore_box *box,
+                                        struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+
+       if (!box->io_addr)
+               return;
+
+       if (uncore_pmc_fixed(hwc->idx))
+               writel(SNBEP_PMON_CTL_EN, box->io_addr + hwc->config_base);
+       else
+               writel(hwc->config, box->io_addr + hwc->config_base);
+}
+
+static struct intel_uncore_ops spr_uncore_mmio_ops = {
+       .init_box               = intel_generic_uncore_mmio_init_box,
+       .exit_box               = uncore_mmio_exit_box,
+       .disable_box            = intel_generic_uncore_mmio_disable_box,
+       .enable_box             = intel_generic_uncore_mmio_enable_box,
+       .disable_event          = intel_generic_uncore_mmio_disable_event,
+       .enable_event           = spr_uncore_mmio_enable_event,
+       .read_counter           = uncore_mmio_read_counter,
+};
+
+static struct intel_uncore_type spr_uncore_imc = {
+       SPR_UNCORE_COMMON_FORMAT(),
+       .name                   = "imc",
+       .fixed_ctr_bits         = 48,
+       .fixed_ctr              = SNR_IMC_MMIO_PMON_FIXED_CTR,
+       .fixed_ctl              = SNR_IMC_MMIO_PMON_FIXED_CTL,
+       .ops                    = &spr_uncore_mmio_ops,
+};
+
+static void spr_uncore_pci_enable_event(struct intel_uncore_box *box,
+                                       struct perf_event *event)
+{
+       struct pci_dev *pdev = box->pci_dev;
+       struct hw_perf_event *hwc = &event->hw;
+
+       pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
+       pci_write_config_dword(pdev, hwc->config_base, (u32)hwc->config);
+}
+
+static struct intel_uncore_ops spr_uncore_pci_ops = {
+       .init_box               = intel_generic_uncore_pci_init_box,
+       .disable_box            = intel_generic_uncore_pci_disable_box,
+       .enable_box             = intel_generic_uncore_pci_enable_box,
+       .disable_event          = intel_generic_uncore_pci_disable_event,
+       .enable_event           = spr_uncore_pci_enable_event,
+       .read_counter           = intel_generic_uncore_pci_read_counter,
+};
+
+#define SPR_UNCORE_PCI_COMMON_FORMAT()                 \
+       SPR_UNCORE_COMMON_FORMAT(),                     \
+       .ops                    = &spr_uncore_pci_ops
+
+static struct intel_uncore_type spr_uncore_m2m = {
+       SPR_UNCORE_PCI_COMMON_FORMAT(),
+       .name                   = "m2m",
+};
+
+static struct intel_uncore_type spr_uncore_upi = {
+       SPR_UNCORE_PCI_COMMON_FORMAT(),
+       .name                   = "upi",
+};
+
+static struct intel_uncore_type spr_uncore_m3upi = {
+       SPR_UNCORE_PCI_COMMON_FORMAT(),
+       .name                   = "m3upi",
+};
+
+static struct intel_uncore_type spr_uncore_mdf = {
+       SPR_UNCORE_COMMON_FORMAT(),
+       .name                   = "mdf",
+};
+
+#define UNCORE_SPR_NUM_UNCORE_TYPES            12
+#define UNCORE_SPR_IIO                         1
+#define UNCORE_SPR_IMC                         6
+
+static struct intel_uncore_type *spr_uncores[UNCORE_SPR_NUM_UNCORE_TYPES] = {
+       &spr_uncore_chabox,
+       &spr_uncore_iio,
+       &spr_uncore_irp,
+       &spr_uncore_m2pcie,
+       &spr_uncore_pcu,
+       NULL,
+       &spr_uncore_imc,
+       &spr_uncore_m2m,
+       &spr_uncore_upi,
+       &spr_uncore_m3upi,
+       NULL,
+       &spr_uncore_mdf,
+};
+
+enum perf_uncore_spr_iio_freerunning_type_id {
+       SPR_IIO_MSR_IOCLK,
+       SPR_IIO_MSR_BW_IN,
+       SPR_IIO_MSR_BW_OUT,
+
+       SPR_IIO_FREERUNNING_TYPE_MAX,
+};
+
+static struct freerunning_counters spr_iio_freerunning[] = {
+       [SPR_IIO_MSR_IOCLK]     = { 0x340e, 0x1, 0x10, 1, 48 },
+       [SPR_IIO_MSR_BW_IN]     = { 0x3800, 0x1, 0x10, 8, 48 },
+       [SPR_IIO_MSR_BW_OUT]    = { 0x3808, 0x1, 0x10, 8, 48 },
+};
+
+static struct uncore_event_desc spr_uncore_iio_freerunning_events[] = {
+       /* Free-Running IIO CLOCKS Counter */
+       INTEL_UNCORE_EVENT_DESC(ioclk,                  "event=0xff,umask=0x10"),
+       /* Free-Running IIO BANDWIDTH IN Counters */
+       INTEL_UNCORE_EVENT_DESC(bw_in_port0,            "event=0xff,umask=0x20"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,      "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,       "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port1,            "event=0xff,umask=0x21"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,      "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,       "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port2,            "event=0xff,umask=0x22"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,      "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,       "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port3,            "event=0xff,umask=0x23"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,      "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,       "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port4,            "event=0xff,umask=0x24"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,      "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,       "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port5,            "event=0xff,umask=0x25"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,      "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,       "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port6,            "event=0xff,umask=0x26"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,      "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,       "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port7,            "event=0xff,umask=0x27"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,      "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,       "MiB"),
+       /* Free-Running IIO BANDWIDTH OUT Counters */
+       INTEL_UNCORE_EVENT_DESC(bw_out_port0,           "event=0xff,umask=0x30"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale,     "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit,      "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port1,           "event=0xff,umask=0x31"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale,     "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit,      "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port2,           "event=0xff,umask=0x32"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale,     "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit,      "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port3,           "event=0xff,umask=0x33"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale,     "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit,      "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port4,           "event=0xff,umask=0x34"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port4.scale,     "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port4.unit,      "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port5,           "event=0xff,umask=0x35"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port5.scale,     "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port5.unit,      "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port6,           "event=0xff,umask=0x36"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port6.scale,     "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port6.unit,      "MiB"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port7,           "event=0xff,umask=0x37"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port7.scale,     "3.814697266e-6"),
+       INTEL_UNCORE_EVENT_DESC(bw_out_port7.unit,      "MiB"),
+       { /* end: all zeroes */ },
+};
+
+static struct intel_uncore_type spr_uncore_iio_free_running = {
+       .name                   = "iio_free_running",
+       .num_counters           = 17,
+       .num_freerunning_types  = SPR_IIO_FREERUNNING_TYPE_MAX,
+       .freerunning            = spr_iio_freerunning,
+       .ops                    = &skx_uncore_iio_freerunning_ops,
+       .event_descs            = spr_uncore_iio_freerunning_events,
+       .format_group           = &skx_uncore_iio_freerunning_format_group,
+};
+
+enum perf_uncore_spr_imc_freerunning_type_id {
+       SPR_IMC_DCLK,
+       SPR_IMC_PQ_CYCLES,
+
+       SPR_IMC_FREERUNNING_TYPE_MAX,
+};
+
+static struct freerunning_counters spr_imc_freerunning[] = {
+       [SPR_IMC_DCLK]          = { 0x22b0, 0x0, 0, 1, 48 },
+       [SPR_IMC_PQ_CYCLES]     = { 0x2318, 0x8, 0, 2, 48 },
+};
+
+static struct uncore_event_desc spr_uncore_imc_freerunning_events[] = {
+       INTEL_UNCORE_EVENT_DESC(dclk,                   "event=0xff,umask=0x10"),
+
+       INTEL_UNCORE_EVENT_DESC(rpq_cycles,             "event=0xff,umask=0x20"),
+       INTEL_UNCORE_EVENT_DESC(wpq_cycles,             "event=0xff,umask=0x21"),
+       { /* end: all zeroes */ },
+};
+
+#define SPR_MC_DEVICE_ID       0x3251
+
+static void spr_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
+{
+       int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE + SNR_IMC_MMIO_MEM0_OFFSET;
+
+       snr_uncore_mmio_map(box, uncore_mmio_box_ctl(box),
+                           mem_offset, SPR_MC_DEVICE_ID);
+}
+
+static struct intel_uncore_ops spr_uncore_imc_freerunning_ops = {
+       .init_box       = spr_uncore_imc_freerunning_init_box,
+       .exit_box       = uncore_mmio_exit_box,
+       .read_counter   = uncore_mmio_read_counter,
+       .hw_config      = uncore_freerunning_hw_config,
+};
+
+static struct intel_uncore_type spr_uncore_imc_free_running = {
+       .name                   = "imc_free_running",
+       .num_counters           = 3,
+       .mmio_map_size          = SNR_IMC_MMIO_SIZE,
+       .num_freerunning_types  = SPR_IMC_FREERUNNING_TYPE_MAX,
+       .freerunning            = spr_imc_freerunning,
+       .ops                    = &spr_uncore_imc_freerunning_ops,
+       .event_descs            = spr_uncore_imc_freerunning_events,
+       .format_group           = &skx_uncore_iio_freerunning_format_group,
+};
+
+#define UNCORE_SPR_MSR_EXTRA_UNCORES           1
+#define UNCORE_SPR_MMIO_EXTRA_UNCORES          1
+
+static struct intel_uncore_type *spr_msr_uncores[UNCORE_SPR_MSR_EXTRA_UNCORES] = {
+       &spr_uncore_iio_free_running,
+};
+
+static struct intel_uncore_type *spr_mmio_uncores[UNCORE_SPR_MMIO_EXTRA_UNCORES] = {
+       &spr_uncore_imc_free_running,
+};
+
+static void uncore_type_customized_copy(struct intel_uncore_type *to_type,
+                                       struct intel_uncore_type *from_type)
+{
+       if (!to_type || !from_type)
+               return;
+
+       if (from_type->name)
+               to_type->name = from_type->name;
+       if (from_type->fixed_ctr_bits)
+               to_type->fixed_ctr_bits = from_type->fixed_ctr_bits;
+       if (from_type->event_mask)
+               to_type->event_mask = from_type->event_mask;
+       if (from_type->event_mask_ext)
+               to_type->event_mask_ext = from_type->event_mask_ext;
+       if (from_type->fixed_ctr)
+               to_type->fixed_ctr = from_type->fixed_ctr;
+       if (from_type->fixed_ctl)
+               to_type->fixed_ctl = from_type->fixed_ctl;
+       if (from_type->fixed_ctr_bits)
+               to_type->fixed_ctr_bits = from_type->fixed_ctr_bits;
+       if (from_type->num_shared_regs)
+               to_type->num_shared_regs = from_type->num_shared_regs;
+       if (from_type->constraints)
+               to_type->constraints = from_type->constraints;
+       if (from_type->ops)
+               to_type->ops = from_type->ops;
+       if (from_type->event_descs)
+               to_type->event_descs = from_type->event_descs;
+       if (from_type->format_group)
+               to_type->format_group = from_type->format_group;
+       if (from_type->attr_update)
+               to_type->attr_update = from_type->attr_update;
+}
+
+static struct intel_uncore_type **
+uncore_get_uncores(enum uncore_access_type type_id, int num_extra,
+                   struct intel_uncore_type **extra)
+{
+       struct intel_uncore_type **types, **start_types;
+       int i;
+
+       start_types = types = intel_uncore_generic_init_uncores(type_id, num_extra);
+
+       /* Only copy the customized features */
+       for (; *types; types++) {
+               if ((*types)->type_id >= UNCORE_SPR_NUM_UNCORE_TYPES)
+                       continue;
+               uncore_type_customized_copy(*types, spr_uncores[(*types)->type_id]);
+       }
+
+       for (i = 0; i < num_extra; i++, types++)
+               *types = extra[i];
+
+       return start_types;
+}
+
+static struct intel_uncore_type *
+uncore_find_type_by_id(struct intel_uncore_type **types, int type_id)
+{
+       for (; *types; types++) {
+               if (type_id == (*types)->type_id)
+                       return *types;
+       }
+
+       return NULL;
+}
+
+static int uncore_type_max_boxes(struct intel_uncore_type **types,
+                                int type_id)
+{
+       struct intel_uncore_type *type;
+       int i, max = 0;
+
+       type = uncore_find_type_by_id(types, type_id);
+       if (!type)
+               return 0;
+
+       for (i = 0; i < type->num_boxes; i++) {
+               if (type->box_ids[i] > max)
+                       max = type->box_ids[i];
+       }
+
+       return max + 1;
+}
+
+void spr_uncore_cpu_init(void)
+{
+       uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR,
+                                               UNCORE_SPR_MSR_EXTRA_UNCORES,
+                                               spr_msr_uncores);
+
+       spr_uncore_iio_free_running.num_boxes = uncore_type_max_boxes(uncore_msr_uncores, UNCORE_SPR_IIO);
+}
+
+int spr_uncore_pci_init(void)
+{
+       uncore_pci_uncores = uncore_get_uncores(UNCORE_ACCESS_PCI, 0, NULL);
+       return 0;
+}
+
+void spr_uncore_mmio_init(void)
+{
+       int ret = snbep_pci2phy_map_init(0x3250, SKX_CPUNODEID, SKX_GIDNIDMAP, true);
+
+       if (ret)
+               uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, 0, NULL);
+       else {
+               uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO,
+                                                        UNCORE_SPR_MMIO_EXTRA_UNCORES,
+                                                        spr_mmio_uncores);
+
+               spr_uncore_imc_free_running.num_boxes = uncore_type_max_boxes(uncore_mmio_uncores, UNCORE_SPR_IMC) / 2;
+       }
+}
+
+/* end of SPR uncore support */