Merge branch 'for-3.9' of git://linux-nfs.org/~bfields/linux
[linux-2.6-microblaze.git] / arch / arm / mach-mvebu / irq-armada-370-xp.c
index 5f5f939..274ff58 100644 (file)
@@ -24,6 +24,8 @@
 #include <linux/irqdomain.h>
 #include <asm/mach/arch.h>
 #include <asm/exception.h>
+#include <asm/smp_plat.h>
+#include <asm/hardware/cache-l2x0.h>
 
 /* Interrupt Controller Registers Map */
 #define ARMADA_370_XP_INT_SET_MASK_OFFS                (0x48)
 #define ARMADA_370_XP_INT_CONTROL              (0x00)
 #define ARMADA_370_XP_INT_SET_ENABLE_OFFS      (0x30)
 #define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS    (0x34)
+#define ARMADA_370_XP_INT_SOURCE_CTL(irq)      (0x100 + irq*4)
 
 #define ARMADA_370_XP_CPU_INTACK_OFFS          (0x44)
 
+#define ARMADA_370_XP_SW_TRIG_INT_OFFS           (0x4)
+#define ARMADA_370_XP_IN_DRBEL_MSK_OFFS          (0xc)
+#define ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS        (0x8)
+
+#define ARMADA_370_XP_MAX_PER_CPU_IRQS         (28)
+
+#define ACTIVE_DOORBELLS                       (8)
+
+static DEFINE_RAW_SPINLOCK(irq_controller_lock);
+
 static void __iomem *per_cpu_int_base;
 static void __iomem *main_int_base;
 static struct irq_domain *armada_370_xp_mpic_domain;
 
+/*
+ * In SMP mode:
+ * For shared global interrupts, mask/unmask global enable bit
+ * For CPU interrtups, mask/unmask the calling CPU's bit
+ */
 static void armada_370_xp_irq_mask(struct irq_data *d)
 {
+#ifdef CONFIG_SMP
+       irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+       if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS)
+               writel(hwirq, main_int_base +
+                               ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
+       else
+               writel(hwirq, per_cpu_int_base +
+                               ARMADA_370_XP_INT_SET_MASK_OFFS);
+#else
        writel(irqd_to_hwirq(d),
               per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS);
+#endif
 }
 
 static void armada_370_xp_irq_unmask(struct irq_data *d)
 {
+#ifdef CONFIG_SMP
+       irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+       if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS)
+               writel(hwirq, main_int_base +
+                               ARMADA_370_XP_INT_SET_ENABLE_OFFS);
+       else
+               writel(hwirq, per_cpu_int_base +
+                               ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+#else
        writel(irqd_to_hwirq(d),
               per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+#endif
+}
+
+#ifdef CONFIG_SMP
+static int armada_xp_set_affinity(struct irq_data *d,
+                                 const struct cpumask *mask_val, bool force)
+{
+       unsigned long reg;
+       unsigned long new_mask = 0;
+       unsigned long online_mask = 0;
+       unsigned long count = 0;
+       irq_hw_number_t hwirq = irqd_to_hwirq(d);
+       int cpu;
+
+       for_each_cpu(cpu, mask_val) {
+               new_mask |= 1 << cpu_logical_map(cpu);
+               count++;
+       }
+
+       /*
+        * Forbid mutlicore interrupt affinity
+        * This is required since the MPIC HW doesn't limit
+        * several CPUs from acknowledging the same interrupt.
+        */
+       if (count > 1)
+               return -EINVAL;
+
+       for_each_cpu(cpu, cpu_online_mask)
+               online_mask |= 1 << cpu_logical_map(cpu);
+
+       raw_spin_lock(&irq_controller_lock);
+
+       reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
+       reg = (reg & (~online_mask)) | new_mask;
+       writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
+
+       raw_spin_unlock(&irq_controller_lock);
+
+       return 0;
 }
+#endif
 
 static struct irq_chip armada_370_xp_irq_chip = {
        .name           = "armada_370_xp_irq",
        .irq_mask       = armada_370_xp_irq_mask,
        .irq_mask_ack   = armada_370_xp_irq_mask,
        .irq_unmask     = armada_370_xp_irq_unmask,
+#ifdef CONFIG_SMP
+       .irq_set_affinity = armada_xp_set_affinity,
+#endif
 };
 
 static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
@@ -63,15 +145,57 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
 {
        armada_370_xp_irq_mask(irq_get_irq_data(virq));
        writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
-
-       irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
-                                handle_level_irq);
        irq_set_status_flags(virq, IRQ_LEVEL);
+
+       if (hw < ARMADA_370_XP_MAX_PER_CPU_IRQS) {
+               irq_set_percpu_devid(virq);
+               irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
+                                       handle_percpu_devid_irq);
+
+       } else {
+               irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
+                                       handle_level_irq);
+       }
        set_irq_flags(virq, IRQF_VALID | IRQF_PROBE);
 
        return 0;
 }
 
+#ifdef CONFIG_SMP
+void armada_mpic_send_doorbell(const struct cpumask *mask, unsigned int irq)
+{
+       int cpu;
+       unsigned long map = 0;
+
+       /* Convert our logical CPU mask into a physical one. */
+       for_each_cpu(cpu, mask)
+               map |= 1 << cpu_logical_map(cpu);
+
+       /*
+        * Ensure that stores to Normal memory are visible to the
+        * other CPUs before issuing the IPI.
+        */
+       dsb();
+
+       /* submit softirq */
+       writel((map << 8) | irq, main_int_base +
+               ARMADA_370_XP_SW_TRIG_INT_OFFS);
+}
+
+void armada_xp_mpic_smp_cpu_init(void)
+{
+       /* Clear pending IPIs */
+       writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
+
+       /* Enable first 8 IPIs */
+       writel((1 << ACTIVE_DOORBELLS) - 1, per_cpu_int_base +
+               ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+
+       /* Unmask IPI interrupt */
+       writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+}
+#endif /* CONFIG_SMP */
+
 static struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
        .map = armada_370_xp_mpic_irq_map,
        .xlate = irq_domain_xlate_onecell,
@@ -91,13 +215,27 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
        control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
 
        armada_370_xp_mpic_domain =
-           irq_domain_add_linear(node, (control >> 2) & 0x3ff,
-                                 &armada_370_xp_mpic_irq_ops, NULL);
+               irq_domain_add_linear(node, (control >> 2) & 0x3ff,
+                               &armada_370_xp_mpic_irq_ops, NULL);
 
        if (!armada_370_xp_mpic_domain)
                panic("Unable to add Armada_370_Xp MPIC irq domain (DT)\n");
 
        irq_set_default_host(armada_370_xp_mpic_domain);
+
+#ifdef CONFIG_SMP
+       armada_xp_mpic_smp_cpu_init();
+
+       /*
+        * Set the default affinity from all CPUs to the boot cpu.
+        * This is required since the MPIC doesn't limit several CPUs
+        * from acknowledging the same interrupt.
+        */
+       cpumask_clear(irq_default_affinity);
+       cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
+
+#endif
+
        return 0;
 }
 
@@ -111,14 +249,36 @@ asmlinkage void __exception_irq_entry armada_370_xp_handle_irq(struct pt_regs
                                        ARMADA_370_XP_CPU_INTACK_OFFS);
                irqnr = irqstat & 0x3FF;
 
-               if (irqnr < 1023) {
-                       irqnr =
-                           irq_find_mapping(armada_370_xp_mpic_domain, irqnr);
+               if (irqnr > 1022)
+                       break;
+
+               if (irqnr > 0) {
+                       irqnr = irq_find_mapping(armada_370_xp_mpic_domain,
+                                       irqnr);
                        handle_IRQ(irqnr, regs);
                        continue;
                }
+#ifdef CONFIG_SMP
+               /* IPI Handling */
+               if (irqnr == 0) {
+                       u32 ipimask, ipinr;
+
+                       ipimask = readl_relaxed(per_cpu_int_base +
+                                               ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
+                               & 0xFF;
+
+                       writel(0x0, per_cpu_int_base +
+                               ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
+
+                       /* Handle all pending doorbells */
+                       for (ipinr = 0; ipinr < ACTIVE_DOORBELLS; ipinr++) {
+                               if (ipimask & (0x1 << ipinr))
+                                       handle_IPI(ipinr, regs);
+                       }
+                       continue;
+               }
+#endif
 
-               break;
        } while (1);
 }
 
@@ -130,4 +290,7 @@ static const struct of_device_id mpic_of_match[] __initconst = {
 void __init armada_370_xp_init_irq(void)
 {
        of_irq_init(mpic_of_match);
+#ifdef CONFIG_CACHE_L2X0
+       l2x0_of_init(0, ~0UL);
+#endif
 }