2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/interrupt.h>
34 #include <linux/notifier.h>
35 #include <linux/module.h>
36 #include <linux/mlx5/driver.h>
37 #include <linux/mlx5/vport.h>
38 #include <linux/mlx5/eq.h>
39 #include <linux/mlx5/cmd.h>
40 #ifdef CONFIG_RFS_ACCEL
41 #include <linux/cpu_rmap.h>
43 #include "mlx5_core.h"
45 #include "fpga/core.h"
47 #include "lib/clock.h"
48 #include "diag/fw_tracer.h"
51 MLX5_EQE_OWNER_INIT_VAL = 0x1,
55 MLX5_EQ_STATE_ARMED = 0x9,
56 MLX5_EQ_STATE_FIRED = 0xa,
57 MLX5_EQ_STATE_ALWAYS_ARMED = 0xb,
61 MLX5_EQ_DOORBEL_OFFSET = 0x40,
64 /* budget must be smaller than MLX5_NUM_SPARE_EQE to guarantee that we update
65 * the ci before we polled all the entries in the EQ. MLX5_NUM_SPARE_EQE is
66 * used to set the EQ size, budget must be smaller than the EQ size.
69 MLX5_EQ_POLLING_BUDGET = 128,
72 static_assert(MLX5_EQ_POLLING_BUDGET <= MLX5_NUM_SPARE_EQE);
74 struct mlx5_eq_table {
75 struct list_head comp_eqs_list;
76 struct mlx5_eq_async pages_eq;
77 struct mlx5_eq_async cmd_eq;
78 struct mlx5_eq_async async_eq;
80 struct atomic_notifier_head nh[MLX5_EVENT_TYPE_MAX];
82 /* Since CQ DB is stored in async_eq */
83 struct mlx5_nb cq_err_nb;
85 struct mutex lock; /* sync async eqs creations */
87 struct mlx5_irq_table *irq_table;
90 #define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
91 (1ull << MLX5_EVENT_TYPE_COMM_EST) | \
92 (1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \
93 (1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \
94 (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \
95 (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \
96 (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
97 (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \
98 (1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \
99 (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \
100 (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \
101 (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
103 static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
105 u32 out[MLX5_ST_SZ_DW(destroy_eq_out)] = {0};
106 u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {0};
108 MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ);
109 MLX5_SET(destroy_eq_in, in, eq_number, eqn);
110 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
113 /* caller must eventually call mlx5_cq_put on the returned cq */
114 static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
116 struct mlx5_cq_table *table = &eq->cq_table;
117 struct mlx5_core_cq *cq = NULL;
120 cq = radix_tree_lookup(&table->tree, cqn);
128 static int mlx5_eq_comp_int(struct notifier_block *nb,
129 __always_unused unsigned long action,
130 __always_unused void *data)
132 struct mlx5_eq_comp *eq_comp =
133 container_of(nb, struct mlx5_eq_comp, irq_nb);
134 struct mlx5_eq *eq = &eq_comp->core;
135 struct mlx5_eqe *eqe;
139 eqe = next_eqe_sw(eq);
144 struct mlx5_core_cq *cq;
146 /* Make sure we read EQ entry contents after we've
147 * checked the ownership bit.
150 /* Assume (eqe->type) is always MLX5_EVENT_TYPE_COMP */
151 cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
153 cq = mlx5_eq_cq_get(eq, cqn);
159 dev_dbg_ratelimited(eq->dev->device,
160 "Completion event for bogus CQ 0x%x\n", cqn);
165 } while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
171 tasklet_schedule(&eq_comp->tasklet_ctx.task);
176 /* Some architectures don't latch interrupts when they are disabled, so using
177 * mlx5_eq_poll_irq_disabled could end up losing interrupts while trying to
178 * avoid losing them. It is not recommended to use it, unless this is the last
181 u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq)
185 disable_irq(eq->core.irqn);
186 count_eqe = eq->core.cons_index;
187 mlx5_eq_comp_int(&eq->irq_nb, 0, NULL);
188 count_eqe = eq->core.cons_index - count_eqe;
189 enable_irq(eq->core.irqn);
194 static int mlx5_eq_async_int(struct notifier_block *nb,
195 unsigned long action, void *data)
197 struct mlx5_eq_async *eq_async =
198 container_of(nb, struct mlx5_eq_async, irq_nb);
199 struct mlx5_eq *eq = &eq_async->core;
200 struct mlx5_eq_table *eqt;
201 struct mlx5_core_dev *dev;
202 struct mlx5_eqe *eqe;
206 eqt = dev->priv.eq_table;
208 eqe = next_eqe_sw(eq);
214 * Make sure we read EQ entry contents after we've
215 * checked the ownership bit.
219 atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe);
220 atomic_notifier_call_chain(&eqt->nh[MLX5_EVENT_TYPE_NOTIFY_ANY], eqe->type, eqe);
224 } while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
232 static void init_eq_buf(struct mlx5_eq *eq)
234 struct mlx5_eqe *eqe;
237 for (i = 0; i < eq->nent; i++) {
238 eqe = get_eqe(eq, i);
239 eqe->owner = MLX5_EQE_OWNER_INIT_VAL;
244 create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
245 struct mlx5_eq_param *param)
247 struct mlx5_cq_table *cq_table = &eq->cq_table;
248 u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
249 struct mlx5_priv *priv = &dev->priv;
250 u8 vecidx = param->irq_index;
259 memset(cq_table, 0, sizeof(*cq_table));
260 spin_lock_init(&cq_table->lock);
261 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
263 eq->nent = roundup_pow_of_two(param->nent + MLX5_NUM_SPARE_EQE);
265 err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf);
271 inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
272 MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages;
274 in = kvzalloc(inlen, GFP_KERNEL);
280 pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas);
281 mlx5_fill_page_array(&eq->buf, pas);
283 MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
284 if (!param->mask[0] && MLX5_CAP_GEN(dev, log_max_uctx))
285 MLX5_SET(create_eq_in, in, uid, MLX5_SHARED_RESOURCE_UID);
287 for (i = 0; i < 4; i++)
288 MLX5_ARRAY_SET64(create_eq_in, in, event_bitmask, i,
291 eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
292 MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent));
293 MLX5_SET(eqc, eqc, uar_page, priv->uar->index);
294 MLX5_SET(eqc, eqc, intr, vecidx);
295 MLX5_SET(eqc, eqc, log_page_size,
296 eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
298 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
303 eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
304 eq->irqn = pci_irq_vector(dev->pdev, vecidx);
306 eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET;
308 err = mlx5_debug_eq_add(dev, eq);
316 mlx5_cmd_destroy_eq(dev, eq->eqn);
322 mlx5_buf_free(dev, &eq->buf);
327 * mlx5_eq_enable - Enable EQ for receiving EQEs
328 * @dev : Device which owns the eq
330 * @nb : Notifier call block
332 * Must be called after EQ is created in device.
334 * @return: 0 if no error
336 int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
337 struct notifier_block *nb)
339 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
342 err = mlx5_irq_attach_nb(eq_table->irq_table, eq->vecidx, nb);
348 EXPORT_SYMBOL(mlx5_eq_enable);
351 * mlx5_eq_disable - Disable EQ for receiving EQEs
352 * @dev : Device which owns the eq
353 * @eq : EQ to disable
354 * @nb : Notifier call block
356 * Must be called before EQ is destroyed.
358 void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
359 struct notifier_block *nb)
361 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
363 mlx5_irq_detach_nb(eq_table->irq_table, eq->vecidx, nb);
365 EXPORT_SYMBOL(mlx5_eq_disable);
367 static int destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
371 mlx5_debug_eq_remove(dev, eq);
373 err = mlx5_cmd_destroy_eq(dev, eq->eqn);
375 mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
377 synchronize_irq(eq->irqn);
379 mlx5_buf_free(dev, &eq->buf);
384 int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
386 struct mlx5_cq_table *table = &eq->cq_table;
389 spin_lock(&table->lock);
390 err = radix_tree_insert(&table->tree, cq->cqn, cq);
391 spin_unlock(&table->lock);
396 void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
398 struct mlx5_cq_table *table = &eq->cq_table;
399 struct mlx5_core_cq *tmp;
401 spin_lock(&table->lock);
402 tmp = radix_tree_delete(&table->tree, cq->cqn);
403 spin_unlock(&table->lock);
406 mlx5_core_dbg(eq->dev, "cq 0x%x not found in eq 0x%x tree\n",
412 mlx5_core_dbg(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n",
416 int mlx5_eq_table_init(struct mlx5_core_dev *dev)
418 struct mlx5_eq_table *eq_table;
421 eq_table = kvzalloc(sizeof(*eq_table), GFP_KERNEL);
425 dev->priv.eq_table = eq_table;
427 mlx5_eq_debugfs_init(dev);
429 mutex_init(&eq_table->lock);
430 for (i = 0; i < MLX5_EVENT_TYPE_MAX; i++)
431 ATOMIC_INIT_NOTIFIER_HEAD(&eq_table->nh[i]);
433 eq_table->irq_table = dev->priv.irq_table;
437 void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev)
439 mlx5_eq_debugfs_cleanup(dev);
440 kvfree(dev->priv.eq_table);
445 static int create_async_eq(struct mlx5_core_dev *dev,
446 struct mlx5_eq *eq, struct mlx5_eq_param *param)
448 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
451 mutex_lock(&eq_table->lock);
452 /* Async EQs must share irq index 0 */
453 if (param->irq_index != 0) {
458 err = create_map_eq(dev, eq, param);
460 mutex_unlock(&eq_table->lock);
464 static int destroy_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
466 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
469 mutex_lock(&eq_table->lock);
470 err = destroy_unmap_eq(dev, eq);
471 mutex_unlock(&eq_table->lock);
475 static int cq_err_event_notifier(struct notifier_block *nb,
476 unsigned long type, void *data)
478 struct mlx5_eq_table *eqt;
479 struct mlx5_core_cq *cq;
480 struct mlx5_eqe *eqe;
484 /* type == MLX5_EVENT_TYPE_CQ_ERROR */
486 eqt = mlx5_nb_cof(nb, struct mlx5_eq_table, cq_err_nb);
487 eq = &eqt->async_eq.core;
490 cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
491 mlx5_core_warn(eq->dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
492 cqn, eqe->data.cq_err.syndrome);
494 cq = mlx5_eq_cq_get(eq, cqn);
496 mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn);
508 static void gather_user_async_events(struct mlx5_core_dev *dev, u64 mask[4])
510 __be64 *user_unaffiliated_events;
511 __be64 *user_affiliated_events;
514 user_affiliated_events =
515 MLX5_CAP_DEV_EVENT(dev, user_affiliated_events);
516 user_unaffiliated_events =
517 MLX5_CAP_DEV_EVENT(dev, user_unaffiliated_events);
519 for (i = 0; i < 4; i++)
520 mask[i] |= be64_to_cpu(user_affiliated_events[i] |
521 user_unaffiliated_events[i]);
524 static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4])
526 u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
528 if (MLX5_VPORT_MANAGER(dev))
529 async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
531 if (MLX5_CAP_GEN(dev, general_notification_event))
532 async_event_mask |= (1ull << MLX5_EVENT_TYPE_GENERAL_EVENT);
534 if (MLX5_CAP_GEN(dev, port_module_event))
535 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PORT_MODULE_EVENT);
537 mlx5_core_dbg(dev, "port_module_event is not set\n");
539 if (MLX5_PPS_CAP(dev))
540 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT);
542 if (MLX5_CAP_GEN(dev, fpga))
543 async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR) |
544 (1ull << MLX5_EVENT_TYPE_FPGA_QP_ERROR);
545 if (MLX5_CAP_GEN_MAX(dev, dct))
546 async_event_mask |= (1ull << MLX5_EVENT_TYPE_DCT_DRAINED);
548 if (MLX5_CAP_GEN(dev, temp_warn_event))
549 async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT);
551 if (MLX5_CAP_MCAM_REG(dev, tracer_registers))
552 async_event_mask |= (1ull << MLX5_EVENT_TYPE_DEVICE_TRACER);
554 if (MLX5_CAP_GEN(dev, max_num_of_monitor_counters))
555 async_event_mask |= (1ull << MLX5_EVENT_TYPE_MONITOR_COUNTER);
557 if (mlx5_eswitch_is_funcs_handler(dev))
559 (1ull << MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED);
561 mask[0] = async_event_mask;
563 if (MLX5_CAP_GEN(dev, event_cap))
564 gather_user_async_events(dev, mask);
568 setup_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq_async *eq,
569 struct mlx5_eq_param *param, const char *name)
573 eq->irq_nb.notifier_call = mlx5_eq_async_int;
575 err = create_async_eq(dev, &eq->core, param);
577 mlx5_core_warn(dev, "failed to create %s EQ %d\n", name, err);
580 err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
582 mlx5_core_warn(dev, "failed to enable %s EQ %d\n", name, err);
583 destroy_async_eq(dev, &eq->core);
588 static void cleanup_async_eq(struct mlx5_core_dev *dev,
589 struct mlx5_eq_async *eq, const char *name)
593 mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
594 err = destroy_async_eq(dev, &eq->core);
596 mlx5_core_err(dev, "failed to destroy %s eq, err(%d)\n",
600 static int create_async_eqs(struct mlx5_core_dev *dev)
602 struct mlx5_eq_table *table = dev->priv.eq_table;
603 struct mlx5_eq_param param = {};
606 MLX5_NB_INIT(&table->cq_err_nb, cq_err_event_notifier, CQ_ERROR);
607 mlx5_eq_notifier_register(dev, &table->cq_err_nb);
609 param = (struct mlx5_eq_param) {
611 .nent = MLX5_NUM_CMD_EQE,
612 .mask[0] = 1ull << MLX5_EVENT_TYPE_CMD,
614 mlx5_cmd_allowed_opcode(dev, MLX5_CMD_OP_CREATE_EQ);
615 err = setup_async_eq(dev, &table->cmd_eq, ¶m, "cmd");
619 mlx5_cmd_use_events(dev);
620 mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
622 param = (struct mlx5_eq_param) {
624 .nent = MLX5_NUM_ASYNC_EQE,
627 gather_async_events_mask(dev, param.mask);
628 err = setup_async_eq(dev, &table->async_eq, ¶m, "async");
632 param = (struct mlx5_eq_param) {
634 .nent = /* TODO: sriov max_vf + */ 1,
635 .mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST,
638 err = setup_async_eq(dev, &table->pages_eq, ¶m, "pages");
645 cleanup_async_eq(dev, &table->async_eq, "async");
647 mlx5_cmd_use_polling(dev);
648 cleanup_async_eq(dev, &table->cmd_eq, "cmd");
650 mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
651 mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
655 static void destroy_async_eqs(struct mlx5_core_dev *dev)
657 struct mlx5_eq_table *table = dev->priv.eq_table;
659 cleanup_async_eq(dev, &table->pages_eq, "pages");
660 cleanup_async_eq(dev, &table->async_eq, "async");
661 mlx5_cmd_use_polling(dev);
662 cleanup_async_eq(dev, &table->cmd_eq, "cmd");
663 mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
666 struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev)
668 return &dev->priv.eq_table->async_eq.core;
671 void mlx5_eq_synchronize_async_irq(struct mlx5_core_dev *dev)
673 synchronize_irq(dev->priv.eq_table->async_eq.core.irqn);
676 void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev)
678 synchronize_irq(dev->priv.eq_table->cmd_eq.core.irqn);
681 /* Generic EQ API for mlx5_core consumers
682 * Needed For RDMA ODP EQ for now
685 mlx5_eq_create_generic(struct mlx5_core_dev *dev,
686 struct mlx5_eq_param *param)
688 struct mlx5_eq *eq = kvzalloc(sizeof(*eq), GFP_KERNEL);
692 return ERR_PTR(-ENOMEM);
694 err = create_async_eq(dev, eq, param);
702 EXPORT_SYMBOL(mlx5_eq_create_generic);
704 int mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
711 err = destroy_async_eq(dev, eq);
719 EXPORT_SYMBOL(mlx5_eq_destroy_generic);
721 struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc)
723 u32 ci = eq->cons_index + cc;
724 struct mlx5_eqe *eqe;
726 eqe = get_eqe(eq, ci & (eq->nent - 1));
727 eqe = ((eqe->owner & 1) ^ !!(ci & eq->nent)) ? NULL : eqe;
728 /* Make sure we read EQ entry contents after we've
729 * checked the ownership bit.
736 EXPORT_SYMBOL(mlx5_eq_get_eqe);
738 void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm)
740 __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
743 eq->cons_index += cc;
744 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
746 __raw_writel((__force u32)cpu_to_be32(val), addr);
747 /* We still want ordering, just not swabbing, so add a barrier */
750 EXPORT_SYMBOL(mlx5_eq_update_ci);
752 static void destroy_comp_eqs(struct mlx5_core_dev *dev)
754 struct mlx5_eq_table *table = dev->priv.eq_table;
755 struct mlx5_eq_comp *eq, *n;
757 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
759 mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
760 if (destroy_unmap_eq(dev, &eq->core))
761 mlx5_core_warn(dev, "failed to destroy comp EQ 0x%x\n",
763 tasklet_disable(&eq->tasklet_ctx.task);
768 static int create_comp_eqs(struct mlx5_core_dev *dev)
770 struct mlx5_eq_table *table = dev->priv.eq_table;
771 struct mlx5_eq_comp *eq;
777 INIT_LIST_HEAD(&table->comp_eqs_list);
778 ncomp_eqs = table->num_comp_eqs;
779 nent = MLX5_COMP_EQ_SIZE;
780 for (i = 0; i < ncomp_eqs; i++) {
781 int vecidx = i + MLX5_IRQ_VEC_COMP_BASE;
782 struct mlx5_eq_param param = {};
784 eq = kzalloc(sizeof(*eq), GFP_KERNEL);
790 INIT_LIST_HEAD(&eq->tasklet_ctx.list);
791 INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
792 spin_lock_init(&eq->tasklet_ctx.lock);
793 tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb,
794 (unsigned long)&eq->tasklet_ctx);
796 eq->irq_nb.notifier_call = mlx5_eq_comp_int;
797 param = (struct mlx5_eq_param) {
801 err = create_map_eq(dev, &eq->core, ¶m);
806 err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
808 destroy_unmap_eq(dev, &eq->core);
813 mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->core.eqn);
814 /* add tail, to keep the list ordered, for mlx5_vector2eqn to work */
815 list_add_tail(&eq->list, &table->comp_eqs_list);
821 destroy_comp_eqs(dev);
825 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
828 struct mlx5_eq_table *table = dev->priv.eq_table;
829 struct mlx5_eq_comp *eq, *n;
833 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
836 *irqn = eq->core.irqn;
844 EXPORT_SYMBOL(mlx5_vector2eqn);
846 unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev)
848 return dev->priv.eq_table->num_comp_eqs;
850 EXPORT_SYMBOL(mlx5_comp_vectors_count);
853 mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
855 int vecidx = vector + MLX5_IRQ_VEC_COMP_BASE;
857 return mlx5_irq_get_affinity_mask(dev->priv.eq_table->irq_table,
860 EXPORT_SYMBOL(mlx5_comp_irq_get_affinity_mask);
862 #ifdef CONFIG_RFS_ACCEL
863 struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev)
865 return mlx5_irq_get_rmap(dev->priv.eq_table->irq_table);
869 struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn)
871 struct mlx5_eq_table *table = dev->priv.eq_table;
872 struct mlx5_eq_comp *eq;
874 list_for_each_entry(eq, &table->comp_eqs_list, list) {
875 if (eq->core.eqn == eqn)
879 return ERR_PTR(-ENOENT);
882 /* This function should only be called after mlx5_cmd_force_teardown_hca */
883 void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
885 struct mlx5_eq_table *table = dev->priv.eq_table;
887 mutex_lock(&table->lock); /* sync with create/destroy_async_eq */
888 mlx5_irq_table_destroy(dev);
889 mutex_unlock(&table->lock);
892 int mlx5_eq_table_create(struct mlx5_core_dev *dev)
894 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
897 eq_table->num_comp_eqs =
898 mlx5_irq_get_num_comp(eq_table->irq_table);
900 err = create_async_eqs(dev);
902 mlx5_core_err(dev, "Failed to create async EQs\n");
906 err = create_comp_eqs(dev);
908 mlx5_core_err(dev, "Failed to create completion EQs\n");
914 destroy_async_eqs(dev);
919 void mlx5_eq_table_destroy(struct mlx5_core_dev *dev)
921 destroy_comp_eqs(dev);
922 destroy_async_eqs(dev);
925 int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
927 struct mlx5_eq_table *eqt = dev->priv.eq_table;
929 return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb);
931 EXPORT_SYMBOL(mlx5_eq_notifier_register);
933 int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
935 struct mlx5_eq_table *eqt = dev->priv.eq_table;
937 return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb);
939 EXPORT_SYMBOL(mlx5_eq_notifier_unregister);