2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/interrupt.h>
34 #include <linux/notifier.h>
35 #include <linux/module.h>
36 #include <linux/mlx5/driver.h>
37 #include <linux/mlx5/eq.h>
38 #include <linux/mlx5/cmd.h>
39 #ifdef CONFIG_RFS_ACCEL
40 #include <linux/cpu_rmap.h>
42 #include "mlx5_core.h"
44 #include "fpga/core.h"
46 #include "lib/clock.h"
47 #include "diag/fw_tracer.h"
50 MLX5_EQE_OWNER_INIT_VAL = 0x1,
54 MLX5_EQ_STATE_ARMED = 0x9,
55 MLX5_EQ_STATE_FIRED = 0xa,
56 MLX5_EQ_STATE_ALWAYS_ARMED = 0xb,
60 MLX5_EQ_DOORBEL_OFFSET = 0x40,
63 struct mlx5_irq_info {
65 char name[MLX5_MAX_IRQ_NAME];
66 void *context; /* dev_id provided to request_irq */
69 struct mlx5_eq_table {
70 struct list_head comp_eqs_list;
71 struct mlx5_eq pages_eq;
72 struct mlx5_eq cmd_eq;
73 struct mlx5_eq async_eq;
75 struct atomic_notifier_head nh[MLX5_EVENT_TYPE_MAX];
77 /* Since CQ DB is stored in async_eq */
78 struct mlx5_nb cq_err_nb;
80 struct mutex lock; /* sync async eqs creations */
82 struct mlx5_irq_info *irq_info;
83 #ifdef CONFIG_RFS_ACCEL
84 struct cpu_rmap *rmap;
88 #define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
89 (1ull << MLX5_EVENT_TYPE_COMM_EST) | \
90 (1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \
91 (1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \
92 (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \
93 (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \
94 (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
95 (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \
96 (1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \
97 (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \
98 (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \
99 (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
101 static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
103 u32 out[MLX5_ST_SZ_DW(destroy_eq_out)] = {0};
104 u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {0};
106 MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ);
107 MLX5_SET(destroy_eq_in, in, eq_number, eqn);
108 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
111 /* caller must eventually call mlx5_cq_put on the returned cq */
112 static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
114 struct mlx5_cq_table *table = &eq->cq_table;
115 struct mlx5_core_cq *cq = NULL;
117 spin_lock(&table->lock);
118 cq = radix_tree_lookup(&table->tree, cqn);
121 spin_unlock(&table->lock);
126 static irqreturn_t mlx5_eq_comp_int(int irq, void *eq_ptr)
128 struct mlx5_eq_comp *eq_comp = eq_ptr;
129 struct mlx5_eq *eq = eq_ptr;
130 struct mlx5_eqe *eqe;
134 while ((eqe = next_eqe_sw(eq))) {
135 struct mlx5_core_cq *cq;
136 /* Make sure we read EQ entry contents after we've
137 * checked the ownership bit.
140 /* Assume (eqe->type) is always MLX5_EVENT_TYPE_COMP */
141 cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
143 cq = mlx5_eq_cq_get(eq, cqn);
149 mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn);
155 /* The HCA will think the queue has overflowed if we
156 * don't tell it we've been processing events. We
157 * create our EQs with MLX5_NUM_SPARE_EQE extra
158 * entries, so we must update our consumer index at
161 if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
170 tasklet_schedule(&eq_comp->tasklet_ctx.task);
175 /* Some architectures don't latch interrupts when they are disabled, so using
176 * mlx5_eq_poll_irq_disabled could end up losing interrupts while trying to
177 * avoid losing them. It is not recommended to use it, unless this is the last
180 u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq)
184 disable_irq(eq->core.irqn);
185 count_eqe = eq->core.cons_index;
186 mlx5_eq_comp_int(eq->core.irqn, eq);
187 count_eqe = eq->core.cons_index - count_eqe;
188 enable_irq(eq->core.irqn);
193 static irqreturn_t mlx5_eq_async_int(int irq, void *eq_ptr)
195 struct mlx5_eq *eq = eq_ptr;
196 struct mlx5_eq_table *eqt;
197 struct mlx5_core_dev *dev;
198 struct mlx5_eqe *eqe;
202 eqt = dev->priv.eq_table;
204 while ((eqe = next_eqe_sw(eq))) {
206 * Make sure we read EQ entry contents after we've
207 * checked the ownership bit.
211 if (likely(eqe->type < MLX5_EVENT_TYPE_MAX))
212 atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe);
214 mlx5_core_warn_once(dev, "notifier_call_chain is not setup for eqe: %d\n", eqe->type);
216 atomic_notifier_call_chain(&eqt->nh[MLX5_EVENT_TYPE_NOTIFY_ANY], eqe->type, eqe);
221 /* The HCA will think the queue has overflowed if we
222 * don't tell it we've been processing events. We
223 * create our EQs with MLX5_NUM_SPARE_EQE extra
224 * entries, so we must update our consumer index at
227 if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
238 static void init_eq_buf(struct mlx5_eq *eq)
240 struct mlx5_eqe *eqe;
243 for (i = 0; i < eq->nent; i++) {
244 eqe = get_eqe(eq, i);
245 eqe->owner = MLX5_EQE_OWNER_INIT_VAL;
250 create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, const char *name,
251 struct mlx5_eq_param *param)
253 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
254 struct mlx5_cq_table *cq_table = &eq->cq_table;
255 u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
256 struct mlx5_priv *priv = &dev->priv;
257 u8 vecidx = param->index;
264 if (eq_table->irq_info[vecidx].context)
268 memset(cq_table, 0, sizeof(*cq_table));
269 spin_lock_init(&cq_table->lock);
270 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
272 eq->nent = roundup_pow_of_two(param->nent + MLX5_NUM_SPARE_EQE);
274 err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf);
280 inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
281 MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages;
283 in = kvzalloc(inlen, GFP_KERNEL);
289 pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas);
290 mlx5_fill_page_array(&eq->buf, pas);
292 MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
293 MLX5_SET64(create_eq_in, in, event_bitmask, param->mask);
295 eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
296 MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent));
297 MLX5_SET(eqc, eqc, uar_page, priv->uar->index);
298 MLX5_SET(eqc, eqc, intr, vecidx);
299 MLX5_SET(eqc, eqc, log_page_size,
300 eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
302 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
306 snprintf(eq_table->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s",
307 name, pci_name(dev->pdev));
308 eq_table->irq_info[vecidx].context = param->context;
311 eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
312 eq->irqn = pci_irq_vector(dev->pdev, vecidx);
314 eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET;
315 err = request_irq(eq->irqn, param->handler, 0,
316 eq_table->irq_info[vecidx].name, param->context);
320 err = mlx5_debug_eq_add(dev, eq);
324 /* EQs are created in ARMED state
332 free_irq(eq->irqn, eq);
335 mlx5_cmd_destroy_eq(dev, eq->eqn);
341 mlx5_buf_free(dev, &eq->buf);
345 static int destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
347 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
348 struct mlx5_irq_info *irq_info;
351 irq_info = &eq_table->irq_info[eq->vecidx];
353 mlx5_debug_eq_remove(dev, eq);
355 free_irq(eq->irqn, irq_info->context);
356 irq_info->context = NULL;
358 err = mlx5_cmd_destroy_eq(dev, eq->eqn);
360 mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
362 synchronize_irq(eq->irqn);
364 mlx5_buf_free(dev, &eq->buf);
369 int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
371 struct mlx5_cq_table *table = &eq->cq_table;
374 spin_lock_irq(&table->lock);
375 err = radix_tree_insert(&table->tree, cq->cqn, cq);
376 spin_unlock_irq(&table->lock);
381 int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
383 struct mlx5_cq_table *table = &eq->cq_table;
384 struct mlx5_core_cq *tmp;
386 spin_lock_irq(&table->lock);
387 tmp = radix_tree_delete(&table->tree, cq->cqn);
388 spin_unlock_irq(&table->lock);
391 mlx5_core_warn(eq->dev, "cq 0x%x not found in eq 0x%x tree\n", eq->eqn, cq->cqn);
396 mlx5_core_warn(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n", eq->eqn, cq->cqn);
403 int mlx5_eq_table_init(struct mlx5_core_dev *dev)
405 struct mlx5_eq_table *eq_table;
408 eq_table = kvzalloc(sizeof(*eq_table), GFP_KERNEL);
412 dev->priv.eq_table = eq_table;
414 err = mlx5_eq_debugfs_init(dev);
416 goto kvfree_eq_table;
418 mutex_init(&eq_table->lock);
419 for (i = 0; i < MLX5_EVENT_TYPE_MAX; i++)
420 ATOMIC_INIT_NOTIFIER_HEAD(&eq_table->nh[i]);
426 dev->priv.eq_table = NULL;
430 void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev)
432 mlx5_eq_debugfs_cleanup(dev);
433 kvfree(dev->priv.eq_table);
438 static int create_async_eq(struct mlx5_core_dev *dev, const char *name,
439 struct mlx5_eq *eq, struct mlx5_eq_param *param)
441 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
444 mutex_lock(&eq_table->lock);
445 if (param->index >= MLX5_EQ_MAX_ASYNC_EQS) {
450 err = create_map_eq(dev, eq, name, param);
452 mutex_unlock(&eq_table->lock);
456 static int destroy_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
458 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
461 mutex_lock(&eq_table->lock);
462 err = destroy_unmap_eq(dev, eq);
463 mutex_unlock(&eq_table->lock);
467 static int cq_err_event_notifier(struct notifier_block *nb,
468 unsigned long type, void *data)
470 struct mlx5_eq_table *eqt;
471 struct mlx5_core_cq *cq;
472 struct mlx5_eqe *eqe;
476 /* type == MLX5_EVENT_TYPE_CQ_ERROR */
478 eqt = mlx5_nb_cof(nb, struct mlx5_eq_table, cq_err_nb);
482 cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
483 mlx5_core_warn(eq->dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
484 cqn, eqe->data.cq_err.syndrome);
486 cq = mlx5_eq_cq_get(eq, cqn);
488 mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn);
499 static u64 gather_async_events_mask(struct mlx5_core_dev *dev)
501 u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
503 if (MLX5_VPORT_MANAGER(dev))
504 async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
506 if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH &&
507 MLX5_CAP_GEN(dev, general_notification_event))
508 async_event_mask |= (1ull << MLX5_EVENT_TYPE_GENERAL_EVENT);
510 if (MLX5_CAP_GEN(dev, port_module_event))
511 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PORT_MODULE_EVENT);
513 mlx5_core_dbg(dev, "port_module_event is not set\n");
515 if (MLX5_PPS_CAP(dev))
516 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT);
518 if (MLX5_CAP_GEN(dev, fpga))
519 async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR) |
520 (1ull << MLX5_EVENT_TYPE_FPGA_QP_ERROR);
521 if (MLX5_CAP_GEN_MAX(dev, dct))
522 async_event_mask |= (1ull << MLX5_EVENT_TYPE_DCT_DRAINED);
524 if (MLX5_CAP_GEN(dev, temp_warn_event))
525 async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT);
527 if (MLX5_CAP_MCAM_REG(dev, tracer_registers))
528 async_event_mask |= (1ull << MLX5_EVENT_TYPE_DEVICE_TRACER);
530 if (MLX5_CAP_GEN(dev, max_num_of_monitor_counters))
531 async_event_mask |= (1ull << MLX5_EVENT_TYPE_MONITOR_COUNTER);
533 return async_event_mask;
536 static int create_async_eqs(struct mlx5_core_dev *dev)
538 struct mlx5_eq_table *table = dev->priv.eq_table;
539 struct mlx5_eq_param param = {};
542 MLX5_NB_INIT(&table->cq_err_nb, cq_err_event_notifier, CQ_ERROR);
543 mlx5_eq_notifier_register(dev, &table->cq_err_nb);
545 param = (struct mlx5_eq_param) {
546 .index = MLX5_EQ_CMD_IDX,
547 .mask = 1ull << MLX5_EVENT_TYPE_CMD,
548 .nent = MLX5_NUM_CMD_EQE,
549 .context = &table->cmd_eq,
550 .handler = mlx5_eq_async_int,
552 err = create_async_eq(dev, "mlx5_cmd_eq", &table->cmd_eq, ¶m);
554 mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
558 mlx5_cmd_use_events(dev);
560 param = (struct mlx5_eq_param) {
561 .index = MLX5_EQ_ASYNC_IDX,
562 .mask = gather_async_events_mask(dev),
563 .nent = MLX5_NUM_ASYNC_EQE,
564 .context = &table->async_eq,
565 .handler = mlx5_eq_async_int,
567 err = create_async_eq(dev, "mlx5_async_eq", &table->async_eq, ¶m);
569 mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
573 param = (struct mlx5_eq_param) {
574 .index = MLX5_EQ_PAGEREQ_IDX,
575 .mask = 1 << MLX5_EVENT_TYPE_PAGE_REQUEST,
576 .nent = /* TODO: sriov max_vf + */ 1,
577 .context = &table->pages_eq,
578 .handler = mlx5_eq_async_int,
580 err = create_async_eq(dev, "mlx5_pages_eq", &table->pages_eq, ¶m);
582 mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
589 destroy_async_eq(dev, &table->async_eq);
592 mlx5_cmd_use_polling(dev);
593 destroy_async_eq(dev, &table->cmd_eq);
595 mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
599 static void destroy_async_eqs(struct mlx5_core_dev *dev)
601 struct mlx5_eq_table *table = dev->priv.eq_table;
604 err = destroy_async_eq(dev, &table->pages_eq);
606 mlx5_core_err(dev, "failed to destroy pages eq, err(%d)\n",
609 err = destroy_async_eq(dev, &table->async_eq);
611 mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n",
614 mlx5_cmd_use_polling(dev);
616 err = destroy_async_eq(dev, &table->cmd_eq);
618 mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n",
621 mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
624 struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev)
626 return &dev->priv.eq_table->async_eq;
629 void mlx5_eq_synchronize_async_irq(struct mlx5_core_dev *dev)
631 synchronize_irq(dev->priv.eq_table->async_eq.irqn);
634 void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev)
636 synchronize_irq(dev->priv.eq_table->cmd_eq.irqn);
639 /* Generic EQ API for mlx5_core consumers
640 * Needed For RDMA ODP EQ for now
643 mlx5_eq_create_generic(struct mlx5_core_dev *dev, const char *name,
644 struct mlx5_eq_param *param)
646 struct mlx5_eq *eq = kvzalloc(sizeof(*eq), GFP_KERNEL);
650 return ERR_PTR(-ENOMEM);
652 err = create_async_eq(dev, name, eq, param);
660 EXPORT_SYMBOL(mlx5_eq_create_generic);
662 int mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
669 err = destroy_async_eq(dev, eq);
677 EXPORT_SYMBOL(mlx5_eq_destroy_generic);
679 struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc)
681 u32 ci = eq->cons_index + cc;
682 struct mlx5_eqe *eqe;
684 eqe = get_eqe(eq, ci & (eq->nent - 1));
685 eqe = ((eqe->owner & 1) ^ !!(ci & eq->nent)) ? NULL : eqe;
686 /* Make sure we read EQ entry contents after we've
687 * checked the ownership bit.
694 EXPORT_SYMBOL(mlx5_eq_get_eqe);
696 void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm)
698 __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
701 eq->cons_index += cc;
702 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
704 __raw_writel((__force u32)cpu_to_be32(val), addr);
705 /* We still want ordering, just not swabbing, so add a barrier */
708 EXPORT_SYMBOL(mlx5_eq_update_ci);
712 static int set_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
714 struct mlx5_priv *priv = &mdev->priv;
715 int vecidx = MLX5_EQ_VEC_COMP_BASE + i;
716 int irq = pci_irq_vector(mdev->pdev, vecidx);
717 struct mlx5_irq_info *irq_info = &priv->eq_table->irq_info[vecidx];
719 if (!zalloc_cpumask_var(&irq_info->mask, GFP_KERNEL)) {
720 mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
724 cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
727 if (IS_ENABLED(CONFIG_SMP) &&
728 irq_set_affinity_hint(irq, irq_info->mask))
729 mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
734 static void clear_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
736 int vecidx = MLX5_EQ_VEC_COMP_BASE + i;
737 struct mlx5_priv *priv = &mdev->priv;
738 int irq = pci_irq_vector(mdev->pdev, vecidx);
739 struct mlx5_irq_info *irq_info = &priv->eq_table->irq_info[vecidx];
741 irq_set_affinity_hint(irq, NULL);
742 free_cpumask_var(irq_info->mask);
745 static int set_comp_irq_affinity_hints(struct mlx5_core_dev *mdev)
750 for (i = 0; i < mdev->priv.eq_table->num_comp_vectors; i++) {
751 err = set_comp_irq_affinity_hint(mdev, i);
759 for (i--; i >= 0; i--)
760 clear_comp_irq_affinity_hint(mdev, i);
765 static void clear_comp_irqs_affinity_hints(struct mlx5_core_dev *mdev)
769 for (i = 0; i < mdev->priv.eq_table->num_comp_vectors; i++)
770 clear_comp_irq_affinity_hint(mdev, i);
773 static void destroy_comp_eqs(struct mlx5_core_dev *dev)
775 struct mlx5_eq_table *table = dev->priv.eq_table;
776 struct mlx5_eq_comp *eq, *n;
778 clear_comp_irqs_affinity_hints(dev);
780 #ifdef CONFIG_RFS_ACCEL
782 free_irq_cpu_rmap(table->rmap);
786 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
788 if (destroy_unmap_eq(dev, &eq->core))
789 mlx5_core_warn(dev, "failed to destroy comp EQ 0x%x\n",
791 tasklet_disable(&eq->tasklet_ctx.task);
796 static int create_comp_eqs(struct mlx5_core_dev *dev)
798 struct mlx5_eq_table *table = dev->priv.eq_table;
799 char name[MLX5_MAX_IRQ_NAME];
800 struct mlx5_eq_comp *eq;
806 INIT_LIST_HEAD(&table->comp_eqs_list);
807 ncomp_vec = table->num_comp_vectors;
808 nent = MLX5_COMP_EQ_SIZE;
809 #ifdef CONFIG_RFS_ACCEL
810 table->rmap = alloc_irq_cpu_rmap(ncomp_vec);
814 for (i = 0; i < ncomp_vec; i++) {
815 int vecidx = i + MLX5_EQ_VEC_COMP_BASE;
816 struct mlx5_eq_param param = {};
818 eq = kzalloc(sizeof(*eq), GFP_KERNEL);
824 INIT_LIST_HEAD(&eq->tasklet_ctx.list);
825 INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
826 spin_lock_init(&eq->tasklet_ctx.lock);
827 tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb,
828 (unsigned long)&eq->tasklet_ctx);
830 #ifdef CONFIG_RFS_ACCEL
831 irq_cpu_rmap_add(table->rmap, pci_irq_vector(dev->pdev, vecidx));
833 snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
834 param = (struct mlx5_eq_param) {
838 .context = &eq->core,
839 .handler = mlx5_eq_comp_int
841 err = create_map_eq(dev, &eq->core, name, ¶m);
846 mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->core.eqn);
847 /* add tail, to keep the list ordered, for mlx5_vector2eqn to work */
848 list_add_tail(&eq->list, &table->comp_eqs_list);
851 err = set_comp_irq_affinity_hints(dev);
853 mlx5_core_err(dev, "Failed to alloc affinity hint cpumask\n");
860 destroy_comp_eqs(dev);
864 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
867 struct mlx5_eq_table *table = dev->priv.eq_table;
868 struct mlx5_eq_comp *eq, *n;
872 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
875 *irqn = eq->core.irqn;
883 EXPORT_SYMBOL(mlx5_vector2eqn);
885 unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev)
887 return dev->priv.eq_table->num_comp_vectors;
889 EXPORT_SYMBOL(mlx5_comp_vectors_count);
892 mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
894 /* TODO: consider irq_get_affinity_mask(irq) */
895 return dev->priv.eq_table->irq_info[vector + MLX5_EQ_VEC_COMP_BASE].mask;
897 EXPORT_SYMBOL(mlx5_comp_irq_get_affinity_mask);
899 struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev)
901 #ifdef CONFIG_RFS_ACCEL
902 return dev->priv.eq_table->rmap;
908 struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn)
910 struct mlx5_eq_table *table = dev->priv.eq_table;
911 struct mlx5_eq_comp *eq;
913 list_for_each_entry(eq, &table->comp_eqs_list, list) {
914 if (eq->core.eqn == eqn)
918 return ERR_PTR(-ENOENT);
921 /* This function should only be called after mlx5_cmd_force_teardown_hca */
922 void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
924 struct mlx5_eq_table *table = dev->priv.eq_table;
927 clear_comp_irqs_affinity_hints(dev);
929 #ifdef CONFIG_RFS_ACCEL
931 free_irq_cpu_rmap(table->rmap);
936 mutex_lock(&table->lock); /* sync with create/destroy_async_eq */
937 max_eqs = table->num_comp_vectors + MLX5_EQ_VEC_COMP_BASE;
938 for (i = max_eqs - 1; i >= 0; i--) {
939 if (!table->irq_info[i].context)
941 free_irq(pci_irq_vector(dev->pdev, i), table->irq_info[i].context);
942 table->irq_info[i].context = NULL;
944 mutex_unlock(&table->lock);
945 pci_free_irq_vectors(dev->pdev);
948 static int alloc_irq_vectors(struct mlx5_core_dev *dev)
950 struct mlx5_priv *priv = &dev->priv;
951 struct mlx5_eq_table *table = priv->eq_table;
952 int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
953 MLX5_CAP_GEN(dev, max_num_eqs) :
954 1 << MLX5_CAP_GEN(dev, log_max_eq);
958 nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
959 MLX5_EQ_VEC_COMP_BASE;
960 nvec = min_t(int, nvec, num_eqs);
961 if (nvec <= MLX5_EQ_VEC_COMP_BASE)
964 table->irq_info = kcalloc(nvec, sizeof(*table->irq_info), GFP_KERNEL);
965 if (!table->irq_info)
968 nvec = pci_alloc_irq_vectors(dev->pdev, MLX5_EQ_VEC_COMP_BASE + 1,
972 goto err_free_irq_info;
975 table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
980 kfree(table->irq_info);
984 static void free_irq_vectors(struct mlx5_core_dev *dev)
986 struct mlx5_priv *priv = &dev->priv;
988 pci_free_irq_vectors(dev->pdev);
989 kfree(priv->eq_table->irq_info);
992 int mlx5_eq_table_create(struct mlx5_core_dev *dev)
996 err = alloc_irq_vectors(dev);
998 mlx5_core_err(dev, "alloc irq vectors failed\n");
1002 err = create_async_eqs(dev);
1004 mlx5_core_err(dev, "Failed to create async EQs\n");
1008 err = create_comp_eqs(dev);
1010 mlx5_core_err(dev, "Failed to create completion EQs\n");
1016 destroy_async_eqs(dev);
1018 free_irq_vectors(dev);
1022 void mlx5_eq_table_destroy(struct mlx5_core_dev *dev)
1024 destroy_comp_eqs(dev);
1025 destroy_async_eqs(dev);
1026 free_irq_vectors(dev);
1029 int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
1031 struct mlx5_eq_table *eqt = dev->priv.eq_table;
1033 if (nb->event_type >= MLX5_EVENT_TYPE_MAX)
1036 return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb);
1039 int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
1041 struct mlx5_eq_table *eqt = dev->priv.eq_table;
1043 if (nb->event_type >= MLX5_EVENT_TYPE_MAX)
1046 return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb);