1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2013-2021, Mellanox Technologies inc. All rights reserved.
6 #include <linux/interrupt.h>
7 #include <linux/notifier.h>
8 #include <linux/module.h>
9 #include <linux/mlx5/driver.h>
10 #include <linux/mlx5/vport.h>
11 #include <linux/mlx5/eq.h>
12 #ifdef CONFIG_RFS_ACCEL
13 #include <linux/cpu_rmap.h>
15 #include "mlx5_core.h"
17 #include "fpga/core.h"
19 #include "lib/clock.h"
20 #include "diag/fw_tracer.h"
24 MLX5_EQE_OWNER_INIT_VAL = 0x1,
28 MLX5_EQ_STATE_ARMED = 0x9,
29 MLX5_EQ_STATE_FIRED = 0xa,
30 MLX5_EQ_STATE_ALWAYS_ARMED = 0xb,
34 MLX5_EQ_DOORBEL_OFFSET = 0x40,
37 /* budget must be smaller than MLX5_NUM_SPARE_EQE to guarantee that we update
38 * the ci before we polled all the entries in the EQ. MLX5_NUM_SPARE_EQE is
39 * used to set the EQ size, budget must be smaller than the EQ size.
42 MLX5_EQ_POLLING_BUDGET = 128,
45 static_assert(MLX5_EQ_POLLING_BUDGET <= MLX5_NUM_SPARE_EQE);
47 struct mlx5_eq_table {
48 struct list_head comp_eqs_list;
49 struct mlx5_eq_async pages_eq;
50 struct mlx5_eq_async cmd_eq;
51 struct mlx5_eq_async async_eq;
53 struct atomic_notifier_head nh[MLX5_EVENT_TYPE_MAX];
55 /* Since CQ DB is stored in async_eq */
56 struct mlx5_nb cq_err_nb;
58 struct mutex lock; /* sync async eqs creations */
60 struct mlx5_irq_table *irq_table;
61 #ifdef CONFIG_RFS_ACCEL
62 struct cpu_rmap *rmap;
66 #define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
67 (1ull << MLX5_EVENT_TYPE_COMM_EST) | \
68 (1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \
69 (1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \
70 (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \
71 (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \
72 (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
73 (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \
74 (1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \
75 (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \
76 (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \
77 (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
79 static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
81 u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {};
83 MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ);
84 MLX5_SET(destroy_eq_in, in, eq_number, eqn);
85 return mlx5_cmd_exec_in(dev, destroy_eq, in);
88 /* caller must eventually call mlx5_cq_put on the returned cq */
89 static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
91 struct mlx5_cq_table *table = &eq->cq_table;
92 struct mlx5_core_cq *cq = NULL;
95 cq = radix_tree_lookup(&table->tree, cqn);
103 static int mlx5_eq_comp_int(struct notifier_block *nb,
104 __always_unused unsigned long action,
105 __always_unused void *data)
107 struct mlx5_eq_comp *eq_comp =
108 container_of(nb, struct mlx5_eq_comp, irq_nb);
109 struct mlx5_eq *eq = &eq_comp->core;
110 struct mlx5_eqe *eqe;
114 eqe = next_eqe_sw(eq);
119 struct mlx5_core_cq *cq;
121 /* Make sure we read EQ entry contents after we've
122 * checked the ownership bit.
125 /* Assume (eqe->type) is always MLX5_EVENT_TYPE_COMP */
126 cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
128 cq = mlx5_eq_cq_get(eq, cqn);
134 dev_dbg_ratelimited(eq->dev->device,
135 "Completion event for bogus CQ 0x%x\n", cqn);
140 } while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
146 tasklet_schedule(&eq_comp->tasklet_ctx.task);
151 /* Some architectures don't latch interrupts when they are disabled, so using
152 * mlx5_eq_poll_irq_disabled could end up losing interrupts while trying to
153 * avoid losing them. It is not recommended to use it, unless this is the last
156 u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq)
160 disable_irq(eq->core.irqn);
161 count_eqe = eq->core.cons_index;
162 mlx5_eq_comp_int(&eq->irq_nb, 0, NULL);
163 count_eqe = eq->core.cons_index - count_eqe;
164 enable_irq(eq->core.irqn);
169 static void mlx5_eq_async_int_lock(struct mlx5_eq_async *eq, bool recovery,
170 unsigned long *flags)
171 __acquires(&eq->lock)
174 spin_lock(&eq->lock);
176 spin_lock_irqsave(&eq->lock, *flags);
179 static void mlx5_eq_async_int_unlock(struct mlx5_eq_async *eq, bool recovery,
180 unsigned long *flags)
181 __releases(&eq->lock)
184 spin_unlock(&eq->lock);
186 spin_unlock_irqrestore(&eq->lock, *flags);
189 enum async_eq_nb_action {
190 ASYNC_EQ_IRQ_HANDLER = 0,
191 ASYNC_EQ_RECOVER = 1,
194 static int mlx5_eq_async_int(struct notifier_block *nb,
195 unsigned long action, void *data)
197 struct mlx5_eq_async *eq_async =
198 container_of(nb, struct mlx5_eq_async, irq_nb);
199 struct mlx5_eq *eq = &eq_async->core;
200 struct mlx5_eq_table *eqt;
201 struct mlx5_core_dev *dev;
202 struct mlx5_eqe *eqe;
208 eqt = dev->priv.eq_table;
210 recovery = action == ASYNC_EQ_RECOVER;
211 mlx5_eq_async_int_lock(eq_async, recovery, &flags);
213 eqe = next_eqe_sw(eq);
219 * Make sure we read EQ entry contents after we've
220 * checked the ownership bit.
224 atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe);
225 atomic_notifier_call_chain(&eqt->nh[MLX5_EVENT_TYPE_NOTIFY_ANY], eqe->type, eqe);
229 } while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
233 mlx5_eq_async_int_unlock(eq_async, recovery, &flags);
235 return unlikely(recovery) ? num_eqes : 0;
238 void mlx5_cmd_eq_recover(struct mlx5_core_dev *dev)
240 struct mlx5_eq_async *eq = &dev->priv.eq_table->cmd_eq;
243 eqes = mlx5_eq_async_int(&eq->irq_nb, ASYNC_EQ_RECOVER, NULL);
245 mlx5_core_warn(dev, "Recovered %d EQEs on cmd_eq\n", eqes);
248 static void init_eq_buf(struct mlx5_eq *eq)
250 struct mlx5_eqe *eqe;
253 for (i = 0; i < eq_get_size(eq); i++) {
254 eqe = get_eqe(eq, i);
255 eqe->owner = MLX5_EQE_OWNER_INIT_VAL;
260 create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
261 struct mlx5_eq_param *param)
263 u8 log_eq_size = order_base_2(param->nent + MLX5_NUM_SPARE_EQE);
264 struct mlx5_cq_table *cq_table = &eq->cq_table;
265 u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
266 u8 log_eq_stride = ilog2(MLX5_EQE_SIZE);
267 struct mlx5_priv *priv = &dev->priv;
268 u16 vecidx = param->irq_index;
277 memset(cq_table, 0, sizeof(*cq_table));
278 spin_lock_init(&cq_table->lock);
279 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
283 err = mlx5_frag_buf_alloc_node(dev, wq_get_byte_sz(log_eq_size, log_eq_stride),
284 &eq->frag_buf, dev->priv.numa_node);
288 mlx5_init_fbc(eq->frag_buf.frags, log_eq_stride, log_eq_size, &eq->fbc);
291 eq->irq = mlx5_irq_request(dev, vecidx, param->affinity);
292 if (IS_ERR(eq->irq)) {
293 err = PTR_ERR(eq->irq);
297 vecidx = mlx5_irq_get_index(eq->irq);
298 inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
299 MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->frag_buf.npages;
301 in = kvzalloc(inlen, GFP_KERNEL);
307 pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas);
308 mlx5_fill_page_frag_array(&eq->frag_buf, pas);
310 MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
311 if (!param->mask[0] && MLX5_CAP_GEN(dev, log_max_uctx))
312 MLX5_SET(create_eq_in, in, uid, MLX5_SHARED_RESOURCE_UID);
314 for (i = 0; i < 4; i++)
315 MLX5_ARRAY_SET64(create_eq_in, in, event_bitmask, i,
318 eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
319 MLX5_SET(eqc, eqc, log_eq_size, eq->fbc.log_sz);
320 MLX5_SET(eqc, eqc, uar_page, priv->uar->index);
321 MLX5_SET(eqc, eqc, intr, vecidx);
322 MLX5_SET(eqc, eqc, log_page_size,
323 eq->frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
325 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
330 eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
331 eq->irqn = pci_irq_vector(dev->pdev, vecidx);
333 eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET;
335 err = mlx5_debug_eq_add(dev, eq);
343 mlx5_cmd_destroy_eq(dev, eq->eqn);
349 mlx5_irq_release(eq->irq);
351 mlx5_frag_buf_free(dev, &eq->frag_buf);
356 * mlx5_eq_enable - Enable EQ for receiving EQEs
357 * @dev : Device which owns the eq
359 * @nb : Notifier call block
361 * Must be called after EQ is created in device.
363 * @return: 0 if no error
365 int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
366 struct notifier_block *nb)
370 err = mlx5_irq_attach_nb(eq->irq, nb);
376 EXPORT_SYMBOL(mlx5_eq_enable);
379 * mlx5_eq_disable - Disable EQ for receiving EQEs
380 * @dev : Device which owns the eq
381 * @eq : EQ to disable
382 * @nb : Notifier call block
384 * Must be called before EQ is destroyed.
386 void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
387 struct notifier_block *nb)
389 mlx5_irq_detach_nb(eq->irq, nb);
391 EXPORT_SYMBOL(mlx5_eq_disable);
393 static int destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
397 mlx5_debug_eq_remove(dev, eq);
399 err = mlx5_cmd_destroy_eq(dev, eq->eqn);
401 mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
403 mlx5_irq_release(eq->irq);
405 mlx5_frag_buf_free(dev, &eq->frag_buf);
409 int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
411 struct mlx5_cq_table *table = &eq->cq_table;
414 spin_lock(&table->lock);
415 err = radix_tree_insert(&table->tree, cq->cqn, cq);
416 spin_unlock(&table->lock);
421 void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
423 struct mlx5_cq_table *table = &eq->cq_table;
424 struct mlx5_core_cq *tmp;
426 spin_lock(&table->lock);
427 tmp = radix_tree_delete(&table->tree, cq->cqn);
428 spin_unlock(&table->lock);
431 mlx5_core_dbg(eq->dev, "cq 0x%x not found in eq 0x%x tree\n",
437 mlx5_core_dbg(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n",
441 int mlx5_eq_table_init(struct mlx5_core_dev *dev)
443 struct mlx5_eq_table *eq_table;
446 eq_table = kvzalloc(sizeof(*eq_table), GFP_KERNEL);
450 dev->priv.eq_table = eq_table;
452 mlx5_eq_debugfs_init(dev);
454 mutex_init(&eq_table->lock);
455 for (i = 0; i < MLX5_EVENT_TYPE_MAX; i++)
456 ATOMIC_INIT_NOTIFIER_HEAD(&eq_table->nh[i]);
458 eq_table->irq_table = mlx5_irq_table_get(dev);
462 void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev)
464 mlx5_eq_debugfs_cleanup(dev);
465 kvfree(dev->priv.eq_table);
470 static int create_async_eq(struct mlx5_core_dev *dev,
471 struct mlx5_eq *eq, struct mlx5_eq_param *param)
473 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
476 mutex_lock(&eq_table->lock);
477 err = create_map_eq(dev, eq, param);
478 mutex_unlock(&eq_table->lock);
482 static int destroy_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
484 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
487 mutex_lock(&eq_table->lock);
488 err = destroy_unmap_eq(dev, eq);
489 mutex_unlock(&eq_table->lock);
493 static int cq_err_event_notifier(struct notifier_block *nb,
494 unsigned long type, void *data)
496 struct mlx5_eq_table *eqt;
497 struct mlx5_core_cq *cq;
498 struct mlx5_eqe *eqe;
502 /* type == MLX5_EVENT_TYPE_CQ_ERROR */
504 eqt = mlx5_nb_cof(nb, struct mlx5_eq_table, cq_err_nb);
505 eq = &eqt->async_eq.core;
508 cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
509 mlx5_core_warn(eq->dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
510 cqn, eqe->data.cq_err.syndrome);
512 cq = mlx5_eq_cq_get(eq, cqn);
514 mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn);
526 static void gather_user_async_events(struct mlx5_core_dev *dev, u64 mask[4])
528 __be64 *user_unaffiliated_events;
529 __be64 *user_affiliated_events;
532 user_affiliated_events =
533 MLX5_CAP_DEV_EVENT(dev, user_affiliated_events);
534 user_unaffiliated_events =
535 MLX5_CAP_DEV_EVENT(dev, user_unaffiliated_events);
537 for (i = 0; i < 4; i++)
538 mask[i] |= be64_to_cpu(user_affiliated_events[i] |
539 user_unaffiliated_events[i]);
542 static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4])
544 u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
546 if (MLX5_VPORT_MANAGER(dev))
547 async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
549 if (MLX5_CAP_GEN(dev, general_notification_event))
550 async_event_mask |= (1ull << MLX5_EVENT_TYPE_GENERAL_EVENT);
552 if (MLX5_CAP_GEN(dev, port_module_event))
553 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PORT_MODULE_EVENT);
555 mlx5_core_dbg(dev, "port_module_event is not set\n");
557 if (MLX5_PPS_CAP(dev))
558 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT);
560 if (MLX5_CAP_GEN(dev, fpga))
561 async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR) |
562 (1ull << MLX5_EVENT_TYPE_FPGA_QP_ERROR);
563 if (MLX5_CAP_GEN_MAX(dev, dct))
564 async_event_mask |= (1ull << MLX5_EVENT_TYPE_DCT_DRAINED);
566 if (MLX5_CAP_GEN(dev, temp_warn_event))
567 async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT);
569 if (MLX5_CAP_MCAM_REG(dev, tracer_registers))
570 async_event_mask |= (1ull << MLX5_EVENT_TYPE_DEVICE_TRACER);
572 if (MLX5_CAP_GEN(dev, max_num_of_monitor_counters))
573 async_event_mask |= (1ull << MLX5_EVENT_TYPE_MONITOR_COUNTER);
575 if (mlx5_eswitch_is_funcs_handler(dev))
577 (1ull << MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED);
579 if (MLX5_CAP_GEN_MAX(dev, vhca_state))
580 async_event_mask |= (1ull << MLX5_EVENT_TYPE_VHCA_STATE_CHANGE);
582 mask[0] = async_event_mask;
584 if (MLX5_CAP_GEN(dev, event_cap))
585 gather_user_async_events(dev, mask);
589 setup_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq_async *eq,
590 struct mlx5_eq_param *param, const char *name)
594 eq->irq_nb.notifier_call = mlx5_eq_async_int;
595 spin_lock_init(&eq->lock);
596 if (!zalloc_cpumask_var(¶m->affinity, GFP_KERNEL))
599 err = create_async_eq(dev, &eq->core, param);
600 free_cpumask_var(param->affinity);
602 mlx5_core_warn(dev, "failed to create %s EQ %d\n", name, err);
605 err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
607 mlx5_core_warn(dev, "failed to enable %s EQ %d\n", name, err);
608 destroy_async_eq(dev, &eq->core);
613 static void cleanup_async_eq(struct mlx5_core_dev *dev,
614 struct mlx5_eq_async *eq, const char *name)
618 mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
619 err = destroy_async_eq(dev, &eq->core);
621 mlx5_core_err(dev, "failed to destroy %s eq, err(%d)\n",
625 static int create_async_eqs(struct mlx5_core_dev *dev)
627 struct mlx5_eq_table *table = dev->priv.eq_table;
628 struct mlx5_eq_param param = {};
631 MLX5_NB_INIT(&table->cq_err_nb, cq_err_event_notifier, CQ_ERROR);
632 mlx5_eq_notifier_register(dev, &table->cq_err_nb);
634 param = (struct mlx5_eq_param) {
635 .nent = MLX5_NUM_CMD_EQE,
636 .mask[0] = 1ull << MLX5_EVENT_TYPE_CMD,
638 mlx5_cmd_allowed_opcode(dev, MLX5_CMD_OP_CREATE_EQ);
639 err = setup_async_eq(dev, &table->cmd_eq, ¶m, "cmd");
643 mlx5_cmd_use_events(dev);
644 mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
646 param = (struct mlx5_eq_param) {
647 .nent = MLX5_NUM_ASYNC_EQE,
650 gather_async_events_mask(dev, param.mask);
651 err = setup_async_eq(dev, &table->async_eq, ¶m, "async");
655 param = (struct mlx5_eq_param) {
656 .nent = /* TODO: sriov max_vf + */ 1,
657 .mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST,
660 err = setup_async_eq(dev, &table->pages_eq, ¶m, "pages");
667 cleanup_async_eq(dev, &table->async_eq, "async");
669 mlx5_cmd_use_polling(dev);
670 cleanup_async_eq(dev, &table->cmd_eq, "cmd");
672 mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
673 mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
677 static void destroy_async_eqs(struct mlx5_core_dev *dev)
679 struct mlx5_eq_table *table = dev->priv.eq_table;
681 cleanup_async_eq(dev, &table->pages_eq, "pages");
682 cleanup_async_eq(dev, &table->async_eq, "async");
683 mlx5_cmd_allowed_opcode(dev, MLX5_CMD_OP_DESTROY_EQ);
684 mlx5_cmd_use_polling(dev);
685 cleanup_async_eq(dev, &table->cmd_eq, "cmd");
686 mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
687 mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
690 struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev)
692 return &dev->priv.eq_table->async_eq.core;
695 void mlx5_eq_synchronize_async_irq(struct mlx5_core_dev *dev)
697 synchronize_irq(dev->priv.eq_table->async_eq.core.irqn);
700 void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev)
702 synchronize_irq(dev->priv.eq_table->cmd_eq.core.irqn);
705 /* Generic EQ API for mlx5_core consumers
706 * Needed For RDMA ODP EQ for now
709 mlx5_eq_create_generic(struct mlx5_core_dev *dev,
710 struct mlx5_eq_param *param)
712 struct mlx5_eq *eq = kvzalloc(sizeof(*eq), GFP_KERNEL);
715 if (!cpumask_available(param->affinity))
716 return ERR_PTR(-EINVAL);
719 return ERR_PTR(-ENOMEM);
721 err = create_async_eq(dev, eq, param);
729 EXPORT_SYMBOL(mlx5_eq_create_generic);
731 int mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
738 err = destroy_async_eq(dev, eq);
746 EXPORT_SYMBOL(mlx5_eq_destroy_generic);
748 struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc)
750 u32 ci = eq->cons_index + cc;
751 u32 nent = eq_get_size(eq);
752 struct mlx5_eqe *eqe;
754 eqe = get_eqe(eq, ci & (nent - 1));
755 eqe = ((eqe->owner & 1) ^ !!(ci & nent)) ? NULL : eqe;
756 /* Make sure we read EQ entry contents after we've
757 * checked the ownership bit.
764 EXPORT_SYMBOL(mlx5_eq_get_eqe);
766 void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm)
768 __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
771 eq->cons_index += cc;
772 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
774 __raw_writel((__force u32)cpu_to_be32(val), addr);
775 /* We still want ordering, just not swabbing, so add a barrier */
778 EXPORT_SYMBOL(mlx5_eq_update_ci);
780 static void destroy_comp_eqs(struct mlx5_core_dev *dev)
782 struct mlx5_eq_table *table = dev->priv.eq_table;
783 struct mlx5_eq_comp *eq, *n;
785 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
787 mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
788 if (destroy_unmap_eq(dev, &eq->core))
789 mlx5_core_warn(dev, "failed to destroy comp EQ 0x%x\n",
791 tasklet_disable(&eq->tasklet_ctx.task);
796 static int create_comp_eqs(struct mlx5_core_dev *dev)
798 struct mlx5_eq_table *table = dev->priv.eq_table;
799 struct mlx5_eq_comp *eq;
805 INIT_LIST_HEAD(&table->comp_eqs_list);
806 ncomp_eqs = table->num_comp_eqs;
807 nent = MLX5_COMP_EQ_SIZE;
808 for (i = 0; i < ncomp_eqs; i++) {
809 int vecidx = i + MLX5_IRQ_VEC_COMP_BASE;
810 struct mlx5_eq_param param = {};
812 eq = kzalloc(sizeof(*eq), GFP_KERNEL);
818 INIT_LIST_HEAD(&eq->tasklet_ctx.list);
819 INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
820 spin_lock_init(&eq->tasklet_ctx.lock);
821 tasklet_setup(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb);
823 eq->irq_nb.notifier_call = mlx5_eq_comp_int;
824 param = (struct mlx5_eq_param) {
829 if (!zalloc_cpumask_var(¶m.affinity, GFP_KERNEL)) {
833 cpumask_set_cpu(cpumask_local_spread(i, dev->priv.numa_node),
835 err = create_map_eq(dev, &eq->core, ¶m);
836 free_cpumask_var(param.affinity);
839 err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
841 destroy_unmap_eq(dev, &eq->core);
845 mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->core.eqn);
846 /* add tail, to keep the list ordered, for mlx5_vector2eqn to work */
847 list_add_tail(&eq->list, &table->comp_eqs_list);
854 destroy_comp_eqs(dev);
858 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
861 struct mlx5_eq_table *table = dev->priv.eq_table;
862 struct mlx5_eq_comp *eq, *n;
866 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
869 *irqn = eq->core.irqn;
877 EXPORT_SYMBOL(mlx5_vector2eqn);
879 unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev)
881 return dev->priv.eq_table->num_comp_eqs;
883 EXPORT_SYMBOL(mlx5_comp_vectors_count);
886 mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
888 struct mlx5_eq_table *table = dev->priv.eq_table;
889 struct mlx5_eq_comp *eq, *n;
892 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
897 return mlx5_irq_get_affinity_mask(eq->core.irq);
899 EXPORT_SYMBOL(mlx5_comp_irq_get_affinity_mask);
901 #ifdef CONFIG_RFS_ACCEL
902 struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev)
904 return dev->priv.eq_table->rmap;
908 struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn)
910 struct mlx5_eq_table *table = dev->priv.eq_table;
911 struct mlx5_eq_comp *eq;
913 list_for_each_entry(eq, &table->comp_eqs_list, list) {
914 if (eq->core.eqn == eqn)
918 return ERR_PTR(-ENOENT);
921 static void clear_rmap(struct mlx5_core_dev *dev)
923 #ifdef CONFIG_RFS_ACCEL
924 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
926 free_irq_cpu_rmap(eq_table->rmap);
930 static int set_rmap(struct mlx5_core_dev *mdev)
933 #ifdef CONFIG_RFS_ACCEL
934 struct mlx5_eq_table *eq_table = mdev->priv.eq_table;
937 eq_table->rmap = alloc_irq_cpu_rmap(eq_table->num_comp_eqs);
938 if (!eq_table->rmap) {
940 mlx5_core_err(mdev, "Failed to allocate cpu_rmap. err %d", err);
944 vecidx = MLX5_IRQ_VEC_COMP_BASE;
945 for (; vecidx < eq_table->num_comp_eqs + MLX5_IRQ_VEC_COMP_BASE;
947 err = irq_cpu_rmap_add(eq_table->rmap,
948 pci_irq_vector(mdev->pdev, vecidx));
950 mlx5_core_err(mdev, "irq_cpu_rmap_add failed. err %d",
952 goto err_irq_cpu_rmap_add;
957 err_irq_cpu_rmap_add:
964 /* This function should only be called after mlx5_cmd_force_teardown_hca */
965 void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
967 struct mlx5_eq_table *table = dev->priv.eq_table;
969 mutex_lock(&table->lock); /* sync with create/destroy_async_eq */
970 if (!mlx5_core_is_sf(dev))
972 mlx5_irq_table_destroy(dev);
973 mutex_unlock(&table->lock);
976 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
977 #define MLX5_MAX_ASYNC_EQS 4
979 #define MLX5_MAX_ASYNC_EQS 3
982 int mlx5_eq_table_create(struct mlx5_core_dev *dev)
984 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
985 int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
986 MLX5_CAP_GEN(dev, max_num_eqs) :
987 1 << MLX5_CAP_GEN(dev, log_max_eq);
991 eq_table->num_comp_eqs =
993 mlx5_irq_table_get_num_comp(eq_table->irq_table),
994 num_eqs - MLX5_MAX_ASYNC_EQS);
995 if (mlx5_core_is_sf(dev)) {
996 max_eqs_sf = min_t(int, MLX5_COMP_EQS_PER_SF,
997 mlx5_irq_table_get_sfs_vec(eq_table->irq_table));
998 eq_table->num_comp_eqs = min_t(int, eq_table->num_comp_eqs,
1002 err = create_async_eqs(dev);
1004 mlx5_core_err(dev, "Failed to create async EQs\n");
1008 if (!mlx5_core_is_sf(dev)) {
1009 /* rmap is a mapping between irq number and queue number.
1010 * each irq can be assign only to a single rmap.
1011 * since SFs share IRQs, rmap mapping cannot function correctly
1012 * for irqs that are shared for different core/netdev RX rings.
1013 * Hence we don't allow netdev rmap for SFs
1015 err = set_rmap(dev);
1020 err = create_comp_eqs(dev);
1022 mlx5_core_err(dev, "Failed to create completion EQs\n");
1028 if (!mlx5_core_is_sf(dev))
1031 destroy_async_eqs(dev);
1036 void mlx5_eq_table_destroy(struct mlx5_core_dev *dev)
1038 if (!mlx5_core_is_sf(dev))
1040 destroy_comp_eqs(dev);
1041 destroy_async_eqs(dev);
1044 int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
1046 struct mlx5_eq_table *eqt = dev->priv.eq_table;
1048 return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb);
1050 EXPORT_SYMBOL(mlx5_eq_notifier_register);
1052 int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
1054 struct mlx5_eq_table *eqt = dev->priv.eq_table;
1056 return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb);
1058 EXPORT_SYMBOL(mlx5_eq_notifier_unregister);