1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2016 Thomas Gleixner.
4 * Copyright (C) 2016-2017 Christoph Hellwig.
6 #include <linux/interrupt.h>
7 #include <linux/kernel.h>
8 #include <linux/slab.h>
11 static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk,
12 unsigned int cpus_per_vec)
14 const struct cpumask *siblmsk;
17 for ( ; cpus_per_vec > 0; ) {
18 cpu = cpumask_first(nmsk);
20 /* Should not happen, but I'm too lazy to think about it */
21 if (cpu >= nr_cpu_ids)
24 cpumask_clear_cpu(cpu, nmsk);
25 cpumask_set_cpu(cpu, irqmsk);
28 /* If the cpu has siblings, use them first */
29 siblmsk = topology_sibling_cpumask(cpu);
30 for (sibl = -1; cpus_per_vec > 0; ) {
31 sibl = cpumask_next(sibl, siblmsk);
32 if (sibl >= nr_cpu_ids)
34 if (!cpumask_test_and_clear_cpu(sibl, nmsk))
36 cpumask_set_cpu(sibl, irqmsk);
42 static cpumask_var_t *alloc_node_to_cpumask(void)
47 masks = kcalloc(nr_node_ids, sizeof(cpumask_var_t), GFP_KERNEL);
51 for (node = 0; node < nr_node_ids; node++) {
52 if (!zalloc_cpumask_var(&masks[node], GFP_KERNEL))
60 free_cpumask_var(masks[node]);
65 static void free_node_to_cpumask(cpumask_var_t *masks)
69 for (node = 0; node < nr_node_ids; node++)
70 free_cpumask_var(masks[node]);
74 static void build_node_to_cpumask(cpumask_var_t *masks)
78 for_each_possible_cpu(cpu)
79 cpumask_set_cpu(cpu, masks[cpu_to_node(cpu)]);
82 static int get_nodes_in_cpumask(cpumask_var_t *node_to_cpumask,
83 const struct cpumask *mask, nodemask_t *nodemsk)
87 /* Calculate the number of nodes in the supplied affinity mask */
89 if (cpumask_intersects(mask, node_to_cpumask[n])) {
90 node_set(n, *nodemsk);
97 static int __irq_build_affinity_masks(unsigned int startvec,
99 unsigned int firstvec,
100 cpumask_var_t *node_to_cpumask,
101 const struct cpumask *cpu_mask,
102 struct cpumask *nmsk,
103 struct irq_affinity_desc *masks)
105 unsigned int n, nodes, cpus_per_vec, extra_vecs, done = 0;
106 unsigned int last_affv = firstvec + numvecs;
107 unsigned int curvec = startvec;
108 nodemask_t nodemsk = NODE_MASK_NONE;
110 if (!cpumask_weight(cpu_mask))
113 nodes = get_nodes_in_cpumask(node_to_cpumask, cpu_mask, &nodemsk);
116 * If the number of nodes in the mask is greater than or equal the
117 * number of vectors we just spread the vectors across the nodes.
119 if (numvecs <= nodes) {
120 for_each_node_mask(n, nodemsk) {
121 cpumask_or(&masks[curvec].mask, &masks[curvec].mask,
123 if (++curvec == last_affv)
129 for_each_node_mask(n, nodemsk) {
130 unsigned int ncpus, v, vecs_to_assign, vecs_per_node;
132 /* Get the cpus on this node which are in the mask */
133 cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
134 ncpus = cpumask_weight(nmsk);
139 * Calculate the number of cpus per vector
141 * Spread the vectors evenly per node. If the requested
142 * vector number has been reached, simply allocate one
143 * vector for each remaining node so that all nodes can
147 vecs_per_node = max_t(unsigned,
148 (numvecs - done) / nodes, 1);
152 vecs_to_assign = min(vecs_per_node, ncpus);
154 /* Account for rounding errors */
155 extra_vecs = ncpus - vecs_to_assign * (ncpus / vecs_to_assign);
157 for (v = 0; curvec < last_affv && v < vecs_to_assign;
159 cpus_per_vec = ncpus / vecs_to_assign;
161 /* Account for extra vectors to compensate rounding errors */
166 irq_spread_init_one(&masks[curvec].mask, nmsk,
171 if (curvec >= last_affv)
175 return done < numvecs ? done : numvecs;
179 * build affinity in two stages:
180 * 1) spread present CPU on these vectors
181 * 2) spread other possible CPUs on these vectors
183 static int irq_build_affinity_masks(unsigned int startvec, unsigned int numvecs,
184 unsigned int firstvec,
185 struct irq_affinity_desc *masks)
187 unsigned int curvec = startvec, nr_present, nr_others;
188 cpumask_var_t *node_to_cpumask;
189 cpumask_var_t nmsk, npresmsk;
192 if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
195 if (!zalloc_cpumask_var(&npresmsk, GFP_KERNEL))
198 node_to_cpumask = alloc_node_to_cpumask();
199 if (!node_to_cpumask)
203 /* Stabilize the cpumasks */
205 build_node_to_cpumask(node_to_cpumask);
207 /* Spread on present CPUs starting from affd->pre_vectors */
208 nr_present = __irq_build_affinity_masks(curvec, numvecs,
209 firstvec, node_to_cpumask,
210 cpu_present_mask, nmsk, masks);
213 * Spread on non present CPUs starting from the next vector to be
214 * handled. If the spreading of present CPUs already exhausted the
215 * vector space, assign the non present CPUs to the already spread
218 if (nr_present >= numvecs)
221 curvec = firstvec + nr_present;
222 cpumask_andnot(npresmsk, cpu_possible_mask, cpu_present_mask);
223 nr_others = __irq_build_affinity_masks(curvec, numvecs,
224 firstvec, node_to_cpumask,
225 npresmsk, nmsk, masks);
228 if (nr_present < numvecs)
229 WARN_ON(nr_present + nr_others < numvecs);
231 free_node_to_cpumask(node_to_cpumask);
234 free_cpumask_var(npresmsk);
237 free_cpumask_var(nmsk);
241 static void default_calc_sets(struct irq_affinity *affd, unsigned int affvecs)
244 affd->set_size[0] = affvecs;
248 * irq_create_affinity_masks - Create affinity masks for multiqueue spreading
249 * @nvecs: The total number of vectors
250 * @affd: Description of the affinity requirements
252 * Returns the irq_affinity_desc pointer or NULL if allocation failed.
254 struct irq_affinity_desc *
255 irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd)
257 unsigned int affvecs, curvec, usedvecs, i;
258 struct irq_affinity_desc *masks = NULL;
261 * Determine the number of vectors which need interrupt affinities
262 * assigned. If the pre/post request exhausts the available vectors
263 * then nothing to do here except for invoking the calc_sets()
264 * callback so the device driver can adjust to the situation.
266 if (nvecs > affd->pre_vectors + affd->post_vectors)
267 affvecs = nvecs - affd->pre_vectors - affd->post_vectors;
272 * Simple invocations do not provide a calc_sets() callback. Install
275 if (!affd->calc_sets)
276 affd->calc_sets = default_calc_sets;
278 /* Recalculate the sets */
279 affd->calc_sets(affd, affvecs);
281 if (WARN_ON_ONCE(affd->nr_sets > IRQ_AFFINITY_MAX_SETS))
284 /* Nothing to assign? */
288 masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL);
292 /* Fill out vectors at the beginning that don't need affinity */
293 for (curvec = 0; curvec < affd->pre_vectors; curvec++)
294 cpumask_copy(&masks[curvec].mask, irq_default_affinity);
297 * Spread on present CPUs starting from affd->pre_vectors. If we
298 * have multiple sets, build each sets affinity mask separately.
300 for (i = 0, usedvecs = 0; i < affd->nr_sets; i++) {
301 unsigned int this_vecs = affd->set_size[i];
304 ret = irq_build_affinity_masks(curvec, this_vecs,
311 usedvecs += this_vecs;
314 /* Fill out vectors at the end that don't need affinity */
315 if (usedvecs >= affvecs)
316 curvec = affd->pre_vectors + affvecs;
318 curvec = affd->pre_vectors + usedvecs;
319 for (; curvec < nvecs; curvec++)
320 cpumask_copy(&masks[curvec].mask, irq_default_affinity);
322 /* Mark the managed interrupts */
323 for (i = affd->pre_vectors; i < nvecs - affd->post_vectors; i++)
324 masks[i].is_managed = 1;
330 * irq_calc_affinity_vectors - Calculate the optimal number of vectors
331 * @minvec: The minimum number of vectors available
332 * @maxvec: The maximum number of vectors available
333 * @affd: Description of the affinity requirements
335 unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
336 const struct irq_affinity *affd)
338 unsigned int resv = affd->pre_vectors + affd->post_vectors;
339 unsigned int set_vecs;
344 if (affd->calc_sets) {
345 set_vecs = maxvec - resv;
348 set_vecs = cpumask_weight(cpu_possible_mask);
352 return resv + min(set_vecs, maxvec - resv);