1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
4 #include <linux/interrupt.h>
5 #include <linux/notifier.h>
6 #include <linux/module.h>
7 #include <linux/mlx5/driver.h>
9 #ifdef CONFIG_RFS_ACCEL
10 #include <linux/cpu_rmap.h>
13 #define MLX5_MAX_IRQ_NAME (32)
16 struct atomic_notifier_head nh;
18 char name[MLX5_MAX_IRQ_NAME];
21 struct mlx5_irq_table {
24 #ifdef CONFIG_RFS_ACCEL
25 struct cpu_rmap *rmap;
29 int mlx5_irq_table_init(struct mlx5_core_dev *dev)
31 struct mlx5_irq_table *irq_table;
33 if (mlx5_core_is_sf(dev))
36 irq_table = kvzalloc(sizeof(*irq_table), GFP_KERNEL);
40 dev->priv.irq_table = irq_table;
44 void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev)
46 if (mlx5_core_is_sf(dev))
49 kvfree(dev->priv.irq_table);
52 int mlx5_irq_get_num_comp(struct mlx5_irq_table *table)
54 return table->nvec - MLX5_IRQ_VEC_COMP_BASE;
57 static struct mlx5_irq *mlx5_irq_get(struct mlx5_core_dev *dev, int vecidx)
59 struct mlx5_irq_table *irq_table = dev->priv.irq_table;
61 return &irq_table->irq[vecidx];
65 * mlx5_get_default_msix_vec_count - Get the default number of MSI-X vectors
66 * to be ssigned to each VF.
68 * @num_vfs: Number of enabled VFs
70 int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs)
72 int num_vf_msix, min_msix, max_msix;
74 num_vf_msix = MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix);
78 min_msix = MLX5_CAP_GEN(dev, min_dynamic_vf_msix_table_size);
79 max_msix = MLX5_CAP_GEN(dev, max_dynamic_vf_msix_table_size);
81 /* Limit maximum number of MSI-X vectors so the default configuration
82 * has some available in the pool. This will allow the user to increase
83 * the number of vectors in a VF without having to first size-down other
86 return max(min(num_vf_msix / num_vfs, max_msix / 2), min_msix);
90 * mlx5_set_msix_vec_count - Set dynamically allocated MSI-X on the VF
92 * @function_id: Internal PCI VF function IDd
93 * @msix_vec_count: Number of MSI-X vectors to set
95 int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
98 int sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
99 int num_vf_msix, min_msix, max_msix;
103 num_vf_msix = MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix);
107 if (!MLX5_CAP_GEN(dev, vport_group_manager) || !mlx5_core_is_pf(dev))
110 min_msix = MLX5_CAP_GEN(dev, min_dynamic_vf_msix_table_size);
111 max_msix = MLX5_CAP_GEN(dev, max_dynamic_vf_msix_table_size);
113 if (msix_vec_count < min_msix)
116 if (msix_vec_count > max_msix)
119 hca_cap = kzalloc(sz, GFP_KERNEL);
123 cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability);
124 MLX5_SET(cmd_hca_cap, cap, dynamic_msix_table_size, msix_vec_count);
126 MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
127 MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1);
128 MLX5_SET(set_hca_cap_in, hca_cap, function_id, function_id);
130 MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
131 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1);
132 ret = mlx5_cmd_exec_in(dev, set_hca_cap, hca_cap);
137 int mlx5_irq_attach_nb(struct mlx5_irq_table *irq_table, int vecidx,
138 struct notifier_block *nb)
140 struct mlx5_irq *irq;
142 irq = &irq_table->irq[vecidx];
143 return atomic_notifier_chain_register(&irq->nh, nb);
146 int mlx5_irq_detach_nb(struct mlx5_irq_table *irq_table, int vecidx,
147 struct notifier_block *nb)
149 struct mlx5_irq *irq;
151 irq = &irq_table->irq[vecidx];
152 return atomic_notifier_chain_unregister(&irq->nh, nb);
155 static irqreturn_t mlx5_irq_int_handler(int irq, void *nh)
157 atomic_notifier_call_chain(nh, 0, NULL);
161 static void irq_set_name(char *name, int vecidx)
164 snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_async");
168 snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d",
169 vecidx - MLX5_IRQ_VEC_COMP_BASE);
172 static int request_irqs(struct mlx5_core_dev *dev, int nvec)
174 char name[MLX5_MAX_IRQ_NAME];
178 for (i = 0; i < nvec; i++) {
179 struct mlx5_irq *irq = mlx5_irq_get(dev, i);
180 int irqn = pci_irq_vector(dev->pdev, i);
182 irq_set_name(name, i);
183 ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh);
184 snprintf(irq->name, MLX5_MAX_IRQ_NAME,
185 "%s@pci:%s", name, pci_name(dev->pdev));
186 err = request_irq(irqn, mlx5_irq_int_handler, 0, irq->name,
189 mlx5_core_err(dev, "Failed to request irq\n");
190 goto err_request_irq;
197 struct mlx5_irq *irq = mlx5_irq_get(dev, i);
198 int irqn = pci_irq_vector(dev->pdev, i);
200 free_irq(irqn, &irq->nh);
205 static void irq_clear_rmap(struct mlx5_core_dev *dev)
207 #ifdef CONFIG_RFS_ACCEL
208 struct mlx5_irq_table *irq_table = dev->priv.irq_table;
210 free_irq_cpu_rmap(irq_table->rmap);
214 static int irq_set_rmap(struct mlx5_core_dev *mdev)
217 #ifdef CONFIG_RFS_ACCEL
218 struct mlx5_irq_table *irq_table = mdev->priv.irq_table;
219 int num_affinity_vec;
222 num_affinity_vec = mlx5_irq_get_num_comp(irq_table);
223 irq_table->rmap = alloc_irq_cpu_rmap(num_affinity_vec);
224 if (!irq_table->rmap) {
226 mlx5_core_err(mdev, "Failed to allocate cpu_rmap. err %d", err);
230 vecidx = MLX5_IRQ_VEC_COMP_BASE;
231 for (; vecidx < irq_table->nvec; vecidx++) {
232 err = irq_cpu_rmap_add(irq_table->rmap,
233 pci_irq_vector(mdev->pdev, vecidx));
235 mlx5_core_err(mdev, "irq_cpu_rmap_add failed. err %d",
237 goto err_irq_cpu_rmap_add;
242 err_irq_cpu_rmap_add:
243 irq_clear_rmap(mdev);
249 /* Completion IRQ vectors */
251 static int set_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
253 int vecidx = MLX5_IRQ_VEC_COMP_BASE + i;
254 struct mlx5_irq *irq;
257 irq = mlx5_irq_get(mdev, vecidx);
258 irqn = pci_irq_vector(mdev->pdev, vecidx);
259 if (!zalloc_cpumask_var(&irq->mask, GFP_KERNEL)) {
260 mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
264 cpumask_set_cpu(cpumask_local_spread(i, mdev->priv.numa_node),
266 if (IS_ENABLED(CONFIG_SMP) &&
267 irq_set_affinity_hint(irqn, irq->mask))
268 mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x",
274 static void clear_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
276 int vecidx = MLX5_IRQ_VEC_COMP_BASE + i;
277 struct mlx5_irq *irq;
280 irq = mlx5_irq_get(mdev, vecidx);
281 irqn = pci_irq_vector(mdev->pdev, vecidx);
282 irq_set_affinity_hint(irqn, NULL);
283 free_cpumask_var(irq->mask);
286 static int set_comp_irq_affinity_hints(struct mlx5_core_dev *mdev)
288 int nvec = mlx5_irq_get_num_comp(mdev->priv.irq_table);
292 for (i = 0; i < nvec; i++) {
293 err = set_comp_irq_affinity_hint(mdev, i);
301 for (i--; i >= 0; i--)
302 clear_comp_irq_affinity_hint(mdev, i);
307 static void clear_comp_irqs_affinity_hints(struct mlx5_core_dev *mdev)
309 int nvec = mlx5_irq_get_num_comp(mdev->priv.irq_table);
312 for (i = 0; i < nvec; i++)
313 clear_comp_irq_affinity_hint(mdev, i);
317 mlx5_irq_get_affinity_mask(struct mlx5_irq_table *irq_table, int vecidx)
319 return irq_table->irq[vecidx].mask;
322 #ifdef CONFIG_RFS_ACCEL
323 struct cpu_rmap *mlx5_irq_get_rmap(struct mlx5_irq_table *irq_table)
325 return irq_table->rmap;
329 static void unrequest_irqs(struct mlx5_core_dev *dev)
331 struct mlx5_irq_table *table = dev->priv.irq_table;
334 for (i = 0; i < table->nvec; i++)
335 free_irq(pci_irq_vector(dev->pdev, i),
336 &mlx5_irq_get(dev, i)->nh);
339 int mlx5_irq_table_create(struct mlx5_core_dev *dev)
341 struct mlx5_priv *priv = &dev->priv;
342 struct mlx5_irq_table *table = priv->irq_table;
343 int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
344 MLX5_CAP_GEN(dev, max_num_eqs) :
345 1 << MLX5_CAP_GEN(dev, log_max_eq);
349 if (mlx5_core_is_sf(dev))
352 nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
353 MLX5_IRQ_VEC_COMP_BASE;
354 nvec = min_t(int, nvec, num_eqs);
355 if (nvec <= MLX5_IRQ_VEC_COMP_BASE)
358 table->irq = kcalloc(nvec, sizeof(*table->irq), GFP_KERNEL);
362 nvec = pci_alloc_irq_vectors(dev->pdev, MLX5_IRQ_VEC_COMP_BASE + 1,
371 err = irq_set_rmap(dev);
375 err = request_irqs(dev, nvec);
377 goto err_request_irqs;
379 err = set_comp_irq_affinity_hints(dev);
381 mlx5_core_err(dev, "Failed to alloc affinity hint cpumask\n");
382 goto err_set_affinity;
392 pci_free_irq_vectors(dev->pdev);
398 void mlx5_irq_table_destroy(struct mlx5_core_dev *dev)
400 struct mlx5_irq_table *table = dev->priv.irq_table;
403 if (mlx5_core_is_sf(dev))
406 /* free_irq requires that affinity and rmap will be cleared
407 * before calling it. This is why there is asymmetry with set_rmap
408 * which should be called after alloc_irq but before request_irq.
411 clear_comp_irqs_affinity_hints(dev);
412 for (i = 0; i < table->nvec; i++)
413 free_irq(pci_irq_vector(dev->pdev, i),
414 &mlx5_irq_get(dev, i)->nh);
415 pci_free_irq_vectors(dev->pdev);
419 struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev)
421 #ifdef CONFIG_MLX5_SF
422 if (mlx5_core_is_sf(dev))
423 return dev->priv.parent_mdev->priv.irq_table;
425 return dev->priv.irq_table;