2 * cacheinfo support - processor cache information via sysfs
4 * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
5 * Author: Sudeep Holla <sudeep.holla@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12 * kind, whether express or implied; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/acpi.h>
22 #include <linux/bitops.h>
23 #include <linux/cacheinfo.h>
24 #include <linux/compiler.h>
25 #include <linux/cpu.h>
26 #include <linux/device.h>
27 #include <linux/init.h>
29 #include <linux/sched.h>
30 #include <linux/slab.h>
31 #include <linux/smp.h>
32 #include <linux/sysfs.h>
34 /* pointer to per cpu cacheinfo */
35 static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
36 #define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu))
37 #define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves)
38 #define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list)
40 struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
42 return ci_cacheinfo(cpu);
46 static int cache_setup_of_node(unsigned int cpu)
48 struct device_node *np;
49 struct cacheinfo *this_leaf;
50 struct device *cpu_dev = get_cpu_device(cpu);
51 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
52 unsigned int index = 0;
54 /* skip if of_node is already populated */
55 if (this_cpu_ci->info_list->of_node)
59 pr_err("No cpu device for CPU %d\n", cpu);
62 np = cpu_dev->of_node;
64 pr_err("Failed to find cpu%d device node\n", cpu);
68 while (index < cache_leaves(cpu)) {
69 this_leaf = this_cpu_ci->info_list + index;
70 if (this_leaf->level != 1)
71 np = of_find_next_cache_node(np);
73 np = of_node_get(np);/* cpu node itself */
76 this_leaf->of_node = np;
80 if (index != cache_leaves(cpu)) /* not all OF nodes populated */
86 static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
87 struct cacheinfo *sib_leaf)
89 return sib_leaf->of_node == this_leaf->of_node;
92 /* OF properties to query for a given cache type */
93 struct cache_type_info {
94 const char *size_prop;
95 const char *line_size_props[2];
96 const char *nr_sets_prop;
99 static const struct cache_type_info cache_type_info[] = {
101 .size_prop = "cache-size",
102 .line_size_props = { "cache-line-size",
103 "cache-block-size", },
104 .nr_sets_prop = "cache-sets",
106 .size_prop = "i-cache-size",
107 .line_size_props = { "i-cache-line-size",
108 "i-cache-block-size", },
109 .nr_sets_prop = "i-cache-sets",
111 .size_prop = "d-cache-size",
112 .line_size_props = { "d-cache-line-size",
113 "d-cache-block-size", },
114 .nr_sets_prop = "d-cache-sets",
118 static inline int get_cacheinfo_idx(enum cache_type type)
120 if (type == CACHE_TYPE_UNIFIED)
125 static void cache_size(struct cacheinfo *this_leaf)
127 const char *propname;
128 const __be32 *cache_size;
131 ct_idx = get_cacheinfo_idx(this_leaf->type);
132 propname = cache_type_info[ct_idx].size_prop;
134 cache_size = of_get_property(this_leaf->of_node, propname, NULL);
136 this_leaf->size = of_read_number(cache_size, 1);
139 /* not cache_line_size() because that's a macro in include/linux/cache.h */
140 static void cache_get_line_size(struct cacheinfo *this_leaf)
142 const __be32 *line_size;
145 ct_idx = get_cacheinfo_idx(this_leaf->type);
146 lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);
148 for (i = 0; i < lim; i++) {
149 const char *propname;
151 propname = cache_type_info[ct_idx].line_size_props[i];
152 line_size = of_get_property(this_leaf->of_node, propname, NULL);
158 this_leaf->coherency_line_size = of_read_number(line_size, 1);
161 static void cache_nr_sets(struct cacheinfo *this_leaf)
163 const char *propname;
164 const __be32 *nr_sets;
167 ct_idx = get_cacheinfo_idx(this_leaf->type);
168 propname = cache_type_info[ct_idx].nr_sets_prop;
170 nr_sets = of_get_property(this_leaf->of_node, propname, NULL);
172 this_leaf->number_of_sets = of_read_number(nr_sets, 1);
175 static void cache_associativity(struct cacheinfo *this_leaf)
177 unsigned int line_size = this_leaf->coherency_line_size;
178 unsigned int nr_sets = this_leaf->number_of_sets;
179 unsigned int size = this_leaf->size;
182 * If the cache is fully associative, there is no need to
183 * check the other properties.
185 if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0))
186 this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
189 static bool cache_node_is_unified(struct cacheinfo *this_leaf)
191 return of_property_read_bool(this_leaf->of_node, "cache-unified");
194 static void cache_of_override_properties(unsigned int cpu)
197 struct cacheinfo *this_leaf;
198 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
200 for (index = 0; index < cache_leaves(cpu); index++) {
201 this_leaf = this_cpu_ci->info_list + index;
203 * init_cache_level must setup the cache level correctly
204 * overriding the architecturally specified levels, so
205 * if type is NONE at this stage, it should be unified
207 if (this_leaf->type == CACHE_TYPE_NOCACHE &&
208 cache_node_is_unified(this_leaf))
209 this_leaf->type = CACHE_TYPE_UNIFIED;
210 cache_size(this_leaf);
211 cache_get_line_size(this_leaf);
212 cache_nr_sets(this_leaf);
213 cache_associativity(this_leaf);
217 static void cache_of_override_properties(unsigned int cpu) { }
218 static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
219 static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
220 struct cacheinfo *sib_leaf)
223 * For non-DT systems, assume unique level 1 cache, system-wide
224 * shared caches for all other levels. This will be used only if
225 * arch specific code has not populated shared_cpu_map
227 return !(this_leaf->level == 1);
231 static int cache_shared_cpu_map_setup(unsigned int cpu)
233 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
234 struct cacheinfo *this_leaf, *sib_leaf;
238 if (this_cpu_ci->cpu_map_populated)
241 if (of_have_populated_dt())
242 ret = cache_setup_of_node(cpu);
243 else if (!acpi_disabled)
244 /* No cache property/hierarchy support yet in ACPI */
249 for (index = 0; index < cache_leaves(cpu); index++) {
252 this_leaf = this_cpu_ci->info_list + index;
253 /* skip if shared_cpu_map is already populated */
254 if (!cpumask_empty(&this_leaf->shared_cpu_map))
257 cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
258 for_each_online_cpu(i) {
259 struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
261 if (i == cpu || !sib_cpu_ci->info_list)
262 continue;/* skip if itself or no cacheinfo */
263 sib_leaf = sib_cpu_ci->info_list + index;
264 if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
265 cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
266 cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
274 static void cache_shared_cpu_map_remove(unsigned int cpu)
276 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
277 struct cacheinfo *this_leaf, *sib_leaf;
278 unsigned int sibling, index;
280 for (index = 0; index < cache_leaves(cpu); index++) {
281 this_leaf = this_cpu_ci->info_list + index;
282 for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
283 struct cpu_cacheinfo *sib_cpu_ci;
285 if (sibling == cpu) /* skip itself */
288 sib_cpu_ci = get_cpu_cacheinfo(sibling);
289 if (!sib_cpu_ci->info_list)
292 sib_leaf = sib_cpu_ci->info_list + index;
293 cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
294 cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
296 of_node_put(this_leaf->of_node);
300 static void cache_override_properties(unsigned int cpu)
302 if (of_have_populated_dt())
303 return cache_of_override_properties(cpu);
306 static void free_cache_attributes(unsigned int cpu)
308 if (!per_cpu_cacheinfo(cpu))
311 cache_shared_cpu_map_remove(cpu);
313 kfree(per_cpu_cacheinfo(cpu));
314 per_cpu_cacheinfo(cpu) = NULL;
317 int __weak init_cache_level(unsigned int cpu)
322 int __weak populate_cache_leaves(unsigned int cpu)
327 static int detect_cache_attributes(unsigned int cpu)
331 if (init_cache_level(cpu) || !cache_leaves(cpu))
334 per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
335 sizeof(struct cacheinfo), GFP_KERNEL);
336 if (per_cpu_cacheinfo(cpu) == NULL)
339 ret = populate_cache_leaves(cpu);
343 * For systems using DT for cache hierarchy, of_node and shared_cpu_map
344 * will be set up here only if they are not populated already
346 ret = cache_shared_cpu_map_setup(cpu);
348 pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
352 cache_override_properties(cpu);
356 free_cache_attributes(cpu);
360 /* pointer to cpuX/cache device */
361 static DEFINE_PER_CPU(struct device *, ci_cache_dev);
362 #define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu))
364 static cpumask_t cache_dev_map;
366 /* pointer to array of devices for cpuX/cache/indexY */
367 static DEFINE_PER_CPU(struct device **, ci_index_dev);
368 #define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu))
369 #define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx])
371 #define show_one(file_name, object) \
372 static ssize_t file_name##_show(struct device *dev, \
373 struct device_attribute *attr, char *buf) \
375 struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
376 return sprintf(buf, "%u\n", this_leaf->object); \
380 show_one(level, level);
381 show_one(coherency_line_size, coherency_line_size);
382 show_one(number_of_sets, number_of_sets);
383 show_one(physical_line_partition, physical_line_partition);
384 show_one(ways_of_associativity, ways_of_associativity);
386 static ssize_t size_show(struct device *dev,
387 struct device_attribute *attr, char *buf)
389 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
391 return sprintf(buf, "%uK\n", this_leaf->size >> 10);
394 static ssize_t shared_cpumap_show_func(struct device *dev, bool list, char *buf)
396 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
397 const struct cpumask *mask = &this_leaf->shared_cpu_map;
399 return cpumap_print_to_pagebuf(list, buf, mask);
402 static ssize_t shared_cpu_map_show(struct device *dev,
403 struct device_attribute *attr, char *buf)
405 return shared_cpumap_show_func(dev, false, buf);
408 static ssize_t shared_cpu_list_show(struct device *dev,
409 struct device_attribute *attr, char *buf)
411 return shared_cpumap_show_func(dev, true, buf);
414 static ssize_t type_show(struct device *dev,
415 struct device_attribute *attr, char *buf)
417 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
419 switch (this_leaf->type) {
420 case CACHE_TYPE_DATA:
421 return sprintf(buf, "Data\n");
422 case CACHE_TYPE_INST:
423 return sprintf(buf, "Instruction\n");
424 case CACHE_TYPE_UNIFIED:
425 return sprintf(buf, "Unified\n");
431 static ssize_t allocation_policy_show(struct device *dev,
432 struct device_attribute *attr, char *buf)
434 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
435 unsigned int ci_attr = this_leaf->attributes;
438 if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE))
439 n = sprintf(buf, "ReadWriteAllocate\n");
440 else if (ci_attr & CACHE_READ_ALLOCATE)
441 n = sprintf(buf, "ReadAllocate\n");
442 else if (ci_attr & CACHE_WRITE_ALLOCATE)
443 n = sprintf(buf, "WriteAllocate\n");
447 static ssize_t write_policy_show(struct device *dev,
448 struct device_attribute *attr, char *buf)
450 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
451 unsigned int ci_attr = this_leaf->attributes;
454 if (ci_attr & CACHE_WRITE_THROUGH)
455 n = sprintf(buf, "WriteThrough\n");
456 else if (ci_attr & CACHE_WRITE_BACK)
457 n = sprintf(buf, "WriteBack\n");
461 static DEVICE_ATTR_RO(id);
462 static DEVICE_ATTR_RO(level);
463 static DEVICE_ATTR_RO(type);
464 static DEVICE_ATTR_RO(coherency_line_size);
465 static DEVICE_ATTR_RO(ways_of_associativity);
466 static DEVICE_ATTR_RO(number_of_sets);
467 static DEVICE_ATTR_RO(size);
468 static DEVICE_ATTR_RO(allocation_policy);
469 static DEVICE_ATTR_RO(write_policy);
470 static DEVICE_ATTR_RO(shared_cpu_map);
471 static DEVICE_ATTR_RO(shared_cpu_list);
472 static DEVICE_ATTR_RO(physical_line_partition);
474 static struct attribute *cache_default_attrs[] = {
477 &dev_attr_level.attr,
478 &dev_attr_shared_cpu_map.attr,
479 &dev_attr_shared_cpu_list.attr,
480 &dev_attr_coherency_line_size.attr,
481 &dev_attr_ways_of_associativity.attr,
482 &dev_attr_number_of_sets.attr,
484 &dev_attr_allocation_policy.attr,
485 &dev_attr_write_policy.attr,
486 &dev_attr_physical_line_partition.attr,
491 cache_default_attrs_is_visible(struct kobject *kobj,
492 struct attribute *attr, int unused)
494 struct device *dev = kobj_to_dev(kobj);
495 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
496 const struct cpumask *mask = &this_leaf->shared_cpu_map;
497 umode_t mode = attr->mode;
499 if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID))
501 if ((attr == &dev_attr_type.attr) && this_leaf->type)
503 if ((attr == &dev_attr_level.attr) && this_leaf->level)
505 if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask))
507 if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask))
509 if ((attr == &dev_attr_coherency_line_size.attr) &&
510 this_leaf->coherency_line_size)
512 if ((attr == &dev_attr_ways_of_associativity.attr) &&
513 this_leaf->size) /* allow 0 = full associativity */
515 if ((attr == &dev_attr_number_of_sets.attr) &&
516 this_leaf->number_of_sets)
518 if ((attr == &dev_attr_size.attr) && this_leaf->size)
520 if ((attr == &dev_attr_write_policy.attr) &&
521 (this_leaf->attributes & CACHE_WRITE_POLICY_MASK))
523 if ((attr == &dev_attr_allocation_policy.attr) &&
524 (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK))
526 if ((attr == &dev_attr_physical_line_partition.attr) &&
527 this_leaf->physical_line_partition)
533 static const struct attribute_group cache_default_group = {
534 .attrs = cache_default_attrs,
535 .is_visible = cache_default_attrs_is_visible,
538 static const struct attribute_group *cache_default_groups[] = {
539 &cache_default_group,
543 static const struct attribute_group *cache_private_groups[] = {
544 &cache_default_group,
545 NULL, /* Place holder for private group */
549 const struct attribute_group *
550 __weak cache_get_priv_group(struct cacheinfo *this_leaf)
555 static const struct attribute_group **
556 cache_get_attribute_groups(struct cacheinfo *this_leaf)
558 const struct attribute_group *priv_group =
559 cache_get_priv_group(this_leaf);
562 return cache_default_groups;
564 if (!cache_private_groups[1])
565 cache_private_groups[1] = priv_group;
567 return cache_private_groups;
570 /* Add/Remove cache interface for CPU device */
571 static void cpu_cache_sysfs_exit(unsigned int cpu)
574 struct device *ci_dev;
576 if (per_cpu_index_dev(cpu)) {
577 for (i = 0; i < cache_leaves(cpu); i++) {
578 ci_dev = per_cache_index_dev(cpu, i);
581 device_unregister(ci_dev);
583 kfree(per_cpu_index_dev(cpu));
584 per_cpu_index_dev(cpu) = NULL;
586 device_unregister(per_cpu_cache_dev(cpu));
587 per_cpu_cache_dev(cpu) = NULL;
590 static int cpu_cache_sysfs_init(unsigned int cpu)
592 struct device *dev = get_cpu_device(cpu);
594 if (per_cpu_cacheinfo(cpu) == NULL)
597 per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache");
598 if (IS_ERR(per_cpu_cache_dev(cpu)))
599 return PTR_ERR(per_cpu_cache_dev(cpu));
601 /* Allocate all required memory */
602 per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu),
603 sizeof(struct device *), GFP_KERNEL);
604 if (unlikely(per_cpu_index_dev(cpu) == NULL))
610 cpu_cache_sysfs_exit(cpu);
614 static int cache_add_dev(unsigned int cpu)
618 struct device *ci_dev, *parent;
619 struct cacheinfo *this_leaf;
620 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
621 const struct attribute_group **cache_groups;
623 rc = cpu_cache_sysfs_init(cpu);
624 if (unlikely(rc < 0))
627 parent = per_cpu_cache_dev(cpu);
628 for (i = 0; i < cache_leaves(cpu); i++) {
629 this_leaf = this_cpu_ci->info_list + i;
630 if (this_leaf->disable_sysfs)
632 cache_groups = cache_get_attribute_groups(this_leaf);
633 ci_dev = cpu_device_create(parent, this_leaf, cache_groups,
635 if (IS_ERR(ci_dev)) {
636 rc = PTR_ERR(ci_dev);
639 per_cache_index_dev(cpu, i) = ci_dev;
641 cpumask_set_cpu(cpu, &cache_dev_map);
645 cpu_cache_sysfs_exit(cpu);
649 static int cacheinfo_cpu_online(unsigned int cpu)
651 int rc = detect_cache_attributes(cpu);
655 rc = cache_add_dev(cpu);
657 free_cache_attributes(cpu);
661 static int cacheinfo_cpu_pre_down(unsigned int cpu)
663 if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map))
664 cpu_cache_sysfs_exit(cpu);
666 free_cache_attributes(cpu);
670 static int __init cacheinfo_sysfs_init(void)
672 return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "base/cacheinfo:online",
673 cacheinfo_cpu_online, cacheinfo_cpu_pre_down);
675 device_initcall(cacheinfo_sysfs_init);