perf: RISC-V: Introduce Andes PMU to support perf event sampling
[linux-2.6-microblaze.git] / drivers / perf / riscv_pmu_sbi.c
index 2edbc37..bbd6fe0 100644 (file)
 #include <linux/of.h>
 #include <linux/cpu_pm.h>
 #include <linux/sched/clock.h>
+#include <linux/soc/andes/irq.h>
 
 #include <asm/errata_list.h>
 #include <asm/sbi.h>
 #include <asm/cpufeature.h>
 
+#define ALT_SBI_PMU_OVERFLOW(__ovl)                                    \
+asm volatile(ALTERNATIVE_2(                                            \
+       "csrr %0, " __stringify(CSR_SSCOUNTOVF),                        \
+       "csrr %0, " __stringify(THEAD_C9XX_CSR_SCOUNTEROF),             \
+               THEAD_VENDOR_ID, ERRATA_THEAD_PMU,                      \
+               CONFIG_ERRATA_THEAD_PMU,                                \
+       "csrr %0, " __stringify(ANDES_CSR_SCOUNTEROF),                  \
+               0, RISCV_ISA_EXT_XANDESPMU,                             \
+               CONFIG_ANDES_CUSTOM_PMU)                                \
+       : "=r" (__ovl) :                                                \
+       : "memory")
+
+#define ALT_SBI_PMU_OVF_CLEAR_PENDING(__irq_mask)                      \
+asm volatile(ALTERNATIVE(                                              \
+       "csrc " __stringify(CSR_IP) ", %0\n\t",                         \
+       "csrc " __stringify(ANDES_CSR_SLIP) ", %0\n\t",                 \
+               0, RISCV_ISA_EXT_XANDESPMU,                             \
+               CONFIG_ANDES_CUSTOM_PMU)                                \
+       : : "r"(__irq_mask)                                             \
+       : "memory")
+
 #define SYSCTL_NO_USER_ACCESS  0
 #define SYSCTL_USER_ACCESS     1
 #define SYSCTL_LEGACY          2
@@ -61,6 +83,7 @@ static int sysctl_perf_user_access __read_mostly = SYSCTL_USER_ACCESS;
 static union sbi_pmu_ctr_info *pmu_ctr_list;
 static bool riscv_pmu_use_irq;
 static unsigned int riscv_pmu_irq_num;
+static unsigned int riscv_pmu_irq_mask;
 static unsigned int riscv_pmu_irq;
 
 /* Cache the available counters in a bitmask */
@@ -694,7 +717,7 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
 
        event = cpu_hw_evt->events[fidx];
        if (!event) {
-               csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
+               ALT_SBI_PMU_OVF_CLEAR_PENDING(riscv_pmu_irq_mask);
                return IRQ_NONE;
        }
 
@@ -708,7 +731,7 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
         * Overflow interrupt pending bit should only be cleared after stopping
         * all the counters to avoid any race condition.
         */
-       csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
+       ALT_SBI_PMU_OVF_CLEAR_PENDING(riscv_pmu_irq_mask);
 
        /* No overflow bit is set */
        if (!overflow)
@@ -780,7 +803,7 @@ static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node)
 
        if (riscv_pmu_use_irq) {
                cpu_hw_evt->irq = riscv_pmu_irq;
-               csr_clear(CSR_IP, BIT(riscv_pmu_irq_num));
+               ALT_SBI_PMU_OVF_CLEAR_PENDING(riscv_pmu_irq_mask);
                enable_percpu_irq(riscv_pmu_irq, IRQ_TYPE_NONE);
        }
 
@@ -814,8 +837,14 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde
                   riscv_cached_mimpid(0) == 0) {
                riscv_pmu_irq_num = THEAD_C9XX_RV_IRQ_PMU;
                riscv_pmu_use_irq = true;
+       } else if (riscv_isa_extension_available(NULL, XANDESPMU) &&
+                  IS_ENABLED(CONFIG_ANDES_CUSTOM_PMU)) {
+               riscv_pmu_irq_num = ANDES_SLI_CAUSE_BASE + ANDES_RV_IRQ_PMOVI;
+               riscv_pmu_use_irq = true;
        }
 
+       riscv_pmu_irq_mask = BIT(riscv_pmu_irq_num % BITS_PER_LONG);
+
        if (!riscv_pmu_use_irq)
                return -EOPNOTSUPP;