Merge tag 'perf-tools-for-v5.15-2021-09-11' of git://git.kernel.org/pub/scm/linux...
[linux-2.6-microblaze.git] / arch / riscv / kernel / cacheinfo.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017 SiFive
4  */
5
6 #include <linux/cpu.h>
7 #include <linux/of.h>
8 #include <linux/of_device.h>
9 #include <asm/cacheinfo.h>
10
11 static struct riscv_cacheinfo_ops *rv_cache_ops;
12
13 void riscv_set_cacheinfo_ops(struct riscv_cacheinfo_ops *ops)
14 {
15         rv_cache_ops = ops;
16 }
17 EXPORT_SYMBOL_GPL(riscv_set_cacheinfo_ops);
18
19 const struct attribute_group *
20 cache_get_priv_group(struct cacheinfo *this_leaf)
21 {
22         if (rv_cache_ops && rv_cache_ops->get_priv_group)
23                 return rv_cache_ops->get_priv_group(this_leaf);
24         return NULL;
25 }
26
27 static struct cacheinfo *get_cacheinfo(u32 level, enum cache_type type)
28 {
29         /*
30          * Using raw_smp_processor_id() elides a preemptability check, but this
31          * is really indicative of a larger problem: the cacheinfo UABI assumes
32          * that cores have a homonogenous view of the cache hierarchy.  That
33          * happens to be the case for the current set of RISC-V systems, but
34          * likely won't be true in general.  Since there's no way to provide
35          * correct information for these systems via the current UABI we're
36          * just eliding the check for now.
37          */
38         struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(raw_smp_processor_id());
39         struct cacheinfo *this_leaf;
40         int index;
41
42         for (index = 0; index < this_cpu_ci->num_leaves; index++) {
43                 this_leaf = this_cpu_ci->info_list + index;
44                 if (this_leaf->level == level && this_leaf->type == type)
45                         return this_leaf;
46         }
47
48         return NULL;
49 }
50
51 uintptr_t get_cache_size(u32 level, enum cache_type type)
52 {
53         struct cacheinfo *this_leaf = get_cacheinfo(level, type);
54
55         return this_leaf ? this_leaf->size : 0;
56 }
57
58 uintptr_t get_cache_geometry(u32 level, enum cache_type type)
59 {
60         struct cacheinfo *this_leaf = get_cacheinfo(level, type);
61
62         return this_leaf ? (this_leaf->ways_of_associativity << 16 |
63                             this_leaf->coherency_line_size) :
64                            0;
65 }
66
67 static void ci_leaf_init(struct cacheinfo *this_leaf, enum cache_type type,
68                          unsigned int level, unsigned int size,
69                          unsigned int sets, unsigned int line_size)
70 {
71         this_leaf->level = level;
72         this_leaf->type = type;
73         this_leaf->size = size;
74         this_leaf->number_of_sets = sets;
75         this_leaf->coherency_line_size = line_size;
76
77         /*
78          * If the cache is fully associative, there is no need to
79          * check the other properties.
80          */
81         if (sets == 1)
82                 return;
83
84         /*
85          * Set the ways number for n-ways associative, make sure
86          * all properties are big than zero.
87          */
88         if (sets > 0 && size > 0 && line_size > 0)
89                 this_leaf->ways_of_associativity = (size / sets) / line_size;
90 }
91
92 static void fill_cacheinfo(struct cacheinfo **this_leaf,
93                            struct device_node *node, unsigned int level)
94 {
95         unsigned int size, sets, line_size;
96
97         if (!of_property_read_u32(node, "cache-size", &size) &&
98             !of_property_read_u32(node, "cache-block-size", &line_size) &&
99             !of_property_read_u32(node, "cache-sets", &sets)) {
100                 ci_leaf_init((*this_leaf)++, CACHE_TYPE_UNIFIED, level, size, sets, line_size);
101         }
102
103         if (!of_property_read_u32(node, "i-cache-size", &size) &&
104             !of_property_read_u32(node, "i-cache-sets", &sets) &&
105             !of_property_read_u32(node, "i-cache-block-size", &line_size)) {
106                 ci_leaf_init((*this_leaf)++, CACHE_TYPE_INST, level, size, sets, line_size);
107         }
108
109         if (!of_property_read_u32(node, "d-cache-size", &size) &&
110             !of_property_read_u32(node, "d-cache-sets", &sets) &&
111             !of_property_read_u32(node, "d-cache-block-size", &line_size)) {
112                 ci_leaf_init((*this_leaf)++, CACHE_TYPE_DATA, level, size, sets, line_size);
113         }
114 }
115
116 int init_cache_level(unsigned int cpu)
117 {
118         struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
119         struct device_node *np = of_cpu_device_node_get(cpu);
120         struct device_node *prev = NULL;
121         int levels = 0, leaves = 0, level;
122
123         if (of_property_read_bool(np, "cache-size"))
124                 ++leaves;
125         if (of_property_read_bool(np, "i-cache-size"))
126                 ++leaves;
127         if (of_property_read_bool(np, "d-cache-size"))
128                 ++leaves;
129         if (leaves > 0)
130                 levels = 1;
131
132         prev = np;
133         while ((np = of_find_next_cache_node(np))) {
134                 of_node_put(prev);
135                 prev = np;
136                 if (!of_device_is_compatible(np, "cache"))
137                         break;
138                 if (of_property_read_u32(np, "cache-level", &level))
139                         break;
140                 if (level <= levels)
141                         break;
142                 if (of_property_read_bool(np, "cache-size"))
143                         ++leaves;
144                 if (of_property_read_bool(np, "i-cache-size"))
145                         ++leaves;
146                 if (of_property_read_bool(np, "d-cache-size"))
147                         ++leaves;
148                 levels = level;
149         }
150
151         of_node_put(np);
152         this_cpu_ci->num_levels = levels;
153         this_cpu_ci->num_leaves = leaves;
154
155         return 0;
156 }
157
158 int populate_cache_leaves(unsigned int cpu)
159 {
160         struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
161         struct cacheinfo *this_leaf = this_cpu_ci->info_list;
162         struct device_node *np = of_cpu_device_node_get(cpu);
163         struct device_node *prev = NULL;
164         int levels = 1, level = 1;
165
166         /* Level 1 caches in cpu node */
167         fill_cacheinfo(&this_leaf, np, level);
168
169         /* Next level caches in cache nodes */
170         prev = np;
171         while ((np = of_find_next_cache_node(np))) {
172                 of_node_put(prev);
173                 prev = np;
174
175                 if (!of_device_is_compatible(np, "cache"))
176                         break;
177                 if (of_property_read_u32(np, "cache-level", &level))
178                         break;
179                 if (level <= levels)
180                         break;
181
182                 fill_cacheinfo(&this_leaf, np, level);
183
184                 levels = level;
185         }
186         of_node_put(np);
187
188         return 0;
189 }