2 * arch/arm/mach-vexpress/dcscb.c - Dual Cluster System Configuration Block
4 * Created by: Nicolas Pitre, May 2012
5 * Copyright: (C) 2012-2013 Linaro Limited
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/init.h>
13 #include <linux/kernel.h>
15 #include <linux/spinlock.h>
16 #include <linux/errno.h>
17 #include <linux/of_address.h>
18 #include <linux/vexpress.h>
19 #include <linux/arm-cci.h>
22 #include <asm/proc-fns.h>
23 #include <asm/cacheflush.h>
24 #include <asm/cputype.h>
30 #define SYS_SWRESET 0x8
32 #define RST_STAT1 0x10
33 #define EAG_CFG_R 0x20
34 #define EAG_CFG_W 0x24
35 #define KFC_CFG_R 0x28
36 #define KFC_CFG_W 0x2c
37 #define DCS_CFG_R 0x30
40 * We can't use regular spinlocks. In the switcher case, it is possible
41 * for an outbound CPU to call power_down() while its inbound counterpart
42 * is already live using the same logical CPU number which trips lockdep
45 static arch_spinlock_t dcscb_lock = __ARCH_SPIN_LOCK_UNLOCKED;
47 static void __iomem *dcscb_base;
48 static int dcscb_use_count[4][2];
49 static int dcscb_allcpus_mask[2];
51 static int dcscb_power_up(unsigned int cpu, unsigned int cluster)
53 unsigned int rst_hold, cpumask = (1 << cpu);
54 unsigned int all_mask = dcscb_allcpus_mask[cluster];
56 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
57 if (cpu >= 4 || cluster >= 2)
61 * Since this is called with IRQs enabled, and no arch_spin_lock_irq
62 * variant exists, we need to disable IRQs manually here.
65 arch_spin_lock(&dcscb_lock);
67 dcscb_use_count[cpu][cluster]++;
68 if (dcscb_use_count[cpu][cluster] == 1) {
69 rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
70 if (rst_hold & (1 << 8)) {
71 /* remove cluster reset and add individual CPU's reset */
72 rst_hold &= ~(1 << 8);
75 rst_hold &= ~(cpumask | (cpumask << 4));
76 writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
77 } else if (dcscb_use_count[cpu][cluster] != 2) {
79 * The only possible values are:
82 * 2 = CPU requested to be up before it had a chance
83 * to actually make itself down.
84 * Any other value is a bug.
89 arch_spin_unlock(&dcscb_lock);
95 static void dcscb_power_down(void)
97 unsigned int mpidr, cpu, cluster, rst_hold, cpumask, all_mask;
98 bool last_man = false, skip_wfi = false;
100 mpidr = read_cpuid_mpidr();
101 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
102 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
103 cpumask = (1 << cpu);
104 all_mask = dcscb_allcpus_mask[cluster];
106 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
107 BUG_ON(cpu >= 4 || cluster >= 2);
109 __mcpm_cpu_going_down(cpu, cluster);
111 arch_spin_lock(&dcscb_lock);
112 BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
113 dcscb_use_count[cpu][cluster]--;
114 if (dcscb_use_count[cpu][cluster] == 0) {
115 rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
117 if (((rst_hold | (rst_hold >> 4)) & all_mask) == all_mask) {
118 rst_hold |= (1 << 8);
121 writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
122 } else if (dcscb_use_count[cpu][cluster] == 1) {
124 * A power_up request went ahead of us.
125 * Even if we do not want to shut this CPU down,
126 * the caller expects a certain state as if the WFI
127 * was aborted. So let's continue with cache cleaning.
133 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
134 arch_spin_unlock(&dcscb_lock);
136 /* Flush all cache levels for this cluster. */
137 v7_exit_coherency_flush(all);
140 * A full outer cache flush could be needed at this point
141 * on platforms with such a cache, depending on where the
142 * outer cache sits. In some cases the notion of a "last
143 * cluster standing" would need to be implemented if the
144 * outer cache is shared across clusters. In any case, when
145 * the outer cache needs flushing, there is no concurrent
146 * access to the cache controller to worry about and no
147 * special locking besides what is already provided by the
148 * MCPM state machinery is needed.
152 * Disable cluster-level coherency by masking
153 * incoming snoops and DVM messages:
155 cci_disable_port_by_cpu(mpidr);
157 __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
159 arch_spin_unlock(&dcscb_lock);
161 /* Disable and flush the local CPU cache. */
162 v7_exit_coherency_flush(louis);
165 __mcpm_cpu_down(cpu, cluster);
167 /* Now we are prepared for power-down, do it: */
172 /* Not dead at this point? Let our caller cope. */
175 static const struct mcpm_platform_ops dcscb_power_ops = {
176 .power_up = dcscb_power_up,
177 .power_down = dcscb_power_down,
180 static void __init dcscb_usage_count_init(void)
182 unsigned int mpidr, cpu, cluster;
184 mpidr = read_cpuid_mpidr();
185 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
186 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
188 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
189 BUG_ON(cpu >= 4 || cluster >= 2);
190 dcscb_use_count[cpu][cluster] = 1;
193 extern void dcscb_power_up_setup(unsigned int affinity_level);
195 static int __init dcscb_init(void)
197 struct device_node *node;
204 node = of_find_compatible_node(NULL, NULL, "arm,rtsm,dcscb");
207 dcscb_base = of_iomap(node, 0);
209 return -EADDRNOTAVAIL;
210 cfg = readl_relaxed(dcscb_base + DCS_CFG_R);
211 dcscb_allcpus_mask[0] = (1 << (((cfg >> 16) >> (0 << 2)) & 0xf)) - 1;
212 dcscb_allcpus_mask[1] = (1 << (((cfg >> 16) >> (1 << 2)) & 0xf)) - 1;
213 dcscb_usage_count_init();
215 ret = mcpm_platform_register(&dcscb_power_ops);
217 ret = mcpm_sync_init(dcscb_power_up_setup);
223 pr_info("VExpress DCSCB support installed\n");
226 * Future entries into the kernel can now go
227 * through the cluster entry vectors.
229 vexpress_flags_set(virt_to_phys(mcpm_entry_point));
234 early_initcall(dcscb_init);