2 * arch/arm64/kernel/topology.c
4 * Copyright (C) 2011,2013,2014 Linaro Limited.
6 * Based on the arm32 version written by Vincent Guittot in turn based on
7 * arch/sh/kernel/topology.c
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
14 #include <linux/acpi.h>
15 #include <linux/arch_topology.h>
16 #include <linux/cacheinfo.h>
17 #include <linux/cpufreq.h>
18 #include <linux/init.h>
19 #include <linux/percpu.h>
22 #include <asm/cputype.h>
23 #include <asm/topology.h>
25 void store_cpu_topology(unsigned int cpuid)
27 struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
30 if (cpuid_topo->package_id != -1)
31 goto topology_populated;
33 mpidr = read_cpuid_mpidr();
35 /* Uniprocessor systems can rely on default topology values */
36 if (mpidr & MPIDR_UP_BITMASK)
39 /* Create cpu topology mapping based on MPIDR. */
40 if (mpidr & MPIDR_MT_BITMASK) {
41 /* Multiprocessor system : Multi-threads per core */
42 cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
43 cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
44 cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 2) |
45 MPIDR_AFFINITY_LEVEL(mpidr, 3) << 8;
47 /* Multiprocessor system : Single-thread per core */
48 cpuid_topo->thread_id = -1;
49 cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
50 cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 1) |
51 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8 |
52 MPIDR_AFFINITY_LEVEL(mpidr, 3) << 16;
55 pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
56 cpuid, cpuid_topo->package_id, cpuid_topo->core_id,
57 cpuid_topo->thread_id, mpidr);
60 update_siblings_masks(cpuid);
64 static bool __init acpi_cpu_is_threaded(int cpu)
66 int is_threaded = acpi_pptt_cpu_is_thread(cpu);
69 * if the PPTT doesn't have thread information, assume a homogeneous
70 * machine and return the current CPU's thread state.
73 is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK;
79 * Propagate the topology information of the processor_topology_node tree to the
82 int __init parse_acpi_topology(void)
89 for_each_possible_cpu(cpu) {
92 topology_id = find_acpi_cpu_topology(cpu, 0);
96 if (acpi_cpu_is_threaded(cpu)) {
97 cpu_topology[cpu].thread_id = topology_id;
98 topology_id = find_acpi_cpu_topology(cpu, 1);
99 cpu_topology[cpu].core_id = topology_id;
101 cpu_topology[cpu].thread_id = -1;
102 cpu_topology[cpu].core_id = topology_id;
104 topology_id = find_acpi_cpu_topology_package(cpu);
105 cpu_topology[cpu].package_id = topology_id;
107 i = acpi_find_last_cache_level(cpu);
111 * this is the only part of cpu_topology that has
112 * a direct relationship with the cache topology
114 cache_id = find_acpi_cpu_cache_topology(cpu, i);
116 cpu_topology[cpu].llc_id = cache_id;
124 #ifdef CONFIG_ARM64_AMU_EXTN
127 #define pr_fmt(fmt) "AMU: " fmt
129 static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, arch_max_freq_scale);
130 static DEFINE_PER_CPU(u64, arch_const_cycles_prev);
131 static DEFINE_PER_CPU(u64, arch_core_cycles_prev);
132 static cpumask_var_t amu_fie_cpus;
134 /* Initialize counter reference per-cpu variables for the current CPU */
135 void init_cpu_freq_invariance_counters(void)
137 this_cpu_write(arch_core_cycles_prev,
138 read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0));
139 this_cpu_write(arch_const_cycles_prev,
140 read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0));
143 static int validate_cpu_freq_invariance_counters(int cpu)
145 u64 max_freq_hz, ratio;
147 if (!cpu_has_amu_feat(cpu)) {
148 pr_debug("CPU%d: counters are not supported.\n", cpu);
152 if (unlikely(!per_cpu(arch_const_cycles_prev, cpu) ||
153 !per_cpu(arch_core_cycles_prev, cpu))) {
154 pr_debug("CPU%d: cycle counters are not enabled.\n", cpu);
158 /* Convert maximum frequency from KHz to Hz and validate */
159 max_freq_hz = cpufreq_get_hw_max_freq(cpu) * 1000;
160 if (unlikely(!max_freq_hz)) {
161 pr_debug("CPU%d: invalid maximum frequency.\n", cpu);
166 * Pre-compute the fixed ratio between the frequency of the constant
167 * counter and the maximum frequency of the CPU.
170 * arch_max_freq_scale = ---------------- * SCHED_CAPACITY_SCALEĀ²
173 * We use a factor of 2 * SCHED_CAPACITY_SHIFT -> SCHED_CAPACITY_SCALEĀ²
174 * in order to ensure a good resolution for arch_max_freq_scale for
175 * very low arch timer frequencies (down to the KHz range which should
178 ratio = (u64)arch_timer_get_rate() << (2 * SCHED_CAPACITY_SHIFT);
179 ratio = div64_u64(ratio, max_freq_hz);
181 WARN_ONCE(1, "System timer frequency too low.\n");
185 per_cpu(arch_max_freq_scale, cpu) = (unsigned long)ratio;
191 enable_policy_freq_counters(int cpu, cpumask_var_t valid_cpus)
193 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
196 pr_debug("CPU%d: No cpufreq policy found.\n", cpu);
200 if (cpumask_subset(policy->related_cpus, valid_cpus))
201 cpumask_or(amu_fie_cpus, policy->related_cpus,
204 cpufreq_cpu_put(policy);
209 static DEFINE_STATIC_KEY_FALSE(amu_fie_key);
210 #define amu_freq_invariant() static_branch_unlikely(&amu_fie_key)
212 static int __init init_amu_fie(void)
214 cpumask_var_t valid_cpus;
215 bool have_policy = false;
219 if (!zalloc_cpumask_var(&valid_cpus, GFP_KERNEL))
222 if (!zalloc_cpumask_var(&amu_fie_cpus, GFP_KERNEL)) {
224 goto free_valid_mask;
227 for_each_present_cpu(cpu) {
228 if (validate_cpu_freq_invariance_counters(cpu))
230 cpumask_set_cpu(cpu, valid_cpus);
231 have_policy |= enable_policy_freq_counters(cpu, valid_cpus);
235 * If we are not restricted by cpufreq policies, we only enable
236 * the use of the AMU feature for FIE if all CPUs support AMU.
237 * Otherwise, enable_policy_freq_counters has already enabled
240 if (!have_policy && cpumask_equal(valid_cpus, cpu_present_mask))
241 cpumask_or(amu_fie_cpus, amu_fie_cpus, valid_cpus);
243 if (!cpumask_empty(amu_fie_cpus)) {
244 pr_info("CPUs[%*pbl]: counters will be used for FIE.",
245 cpumask_pr_args(amu_fie_cpus));
246 static_branch_enable(&amu_fie_key);
250 free_cpumask_var(valid_cpus);
254 late_initcall_sync(init_amu_fie);
256 bool arch_freq_counters_available(struct cpumask *cpus)
258 return amu_freq_invariant() &&
259 cpumask_subset(cpus, amu_fie_cpus);
262 void topology_scale_freq_tick(void)
264 u64 prev_core_cnt, prev_const_cnt;
265 u64 core_cnt, const_cnt, scale;
266 int cpu = smp_processor_id();
268 if (!amu_freq_invariant())
271 if (!cpumask_test_cpu(cpu, amu_fie_cpus))
274 const_cnt = read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0);
275 core_cnt = read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0);
276 prev_const_cnt = this_cpu_read(arch_const_cycles_prev);
277 prev_core_cnt = this_cpu_read(arch_core_cycles_prev);
279 if (unlikely(core_cnt <= prev_core_cnt ||
280 const_cnt <= prev_const_cnt))
284 * /\core arch_max_freq_scale
285 * scale = ------- * --------------------
286 * /\const SCHED_CAPACITY_SCALE
288 * See validate_cpu_freq_invariance_counters() for details on
289 * arch_max_freq_scale and the use of SCHED_CAPACITY_SHIFT.
291 scale = core_cnt - prev_core_cnt;
292 scale *= this_cpu_read(arch_max_freq_scale);
293 scale = div64_u64(scale >> SCHED_CAPACITY_SHIFT,
294 const_cnt - prev_const_cnt);
296 scale = min_t(unsigned long, scale, SCHED_CAPACITY_SCALE);
297 this_cpu_write(freq_scale, (unsigned long)scale);
300 this_cpu_write(arch_core_cycles_prev, core_cnt);
301 this_cpu_write(arch_const_cycles_prev, const_cnt);
303 #endif /* CONFIG_ARM64_AMU_EXTN */