2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/interrupt.h>
34 #include <linux/notifier.h>
35 #include <linux/module.h>
36 #include <linux/mlx5/driver.h>
37 #include <linux/mlx5/vport.h>
38 #include <linux/mlx5/eq.h>
39 #include <linux/mlx5/cmd.h>
40 #ifdef CONFIG_RFS_ACCEL
41 #include <linux/cpu_rmap.h>
43 #include "mlx5_core.h"
45 #include "fpga/core.h"
47 #include "lib/clock.h"
48 #include "diag/fw_tracer.h"
51 MLX5_EQE_OWNER_INIT_VAL = 0x1,
55 MLX5_EQ_STATE_ARMED = 0x9,
56 MLX5_EQ_STATE_FIRED = 0xa,
57 MLX5_EQ_STATE_ALWAYS_ARMED = 0xb,
61 MLX5_EQ_DOORBEL_OFFSET = 0x40,
64 struct mlx5_irq_info {
66 char name[MLX5_MAX_IRQ_NAME];
67 void *context; /* dev_id provided to request_irq */
70 struct mlx5_eq_table {
71 struct list_head comp_eqs_list;
72 struct mlx5_eq pages_eq;
73 struct mlx5_eq cmd_eq;
74 struct mlx5_eq async_eq;
76 struct atomic_notifier_head nh[MLX5_EVENT_TYPE_MAX];
78 /* Since CQ DB is stored in async_eq */
79 struct mlx5_nb cq_err_nb;
81 struct mutex lock; /* sync async eqs creations */
83 struct mlx5_irq_info *irq_info;
84 #ifdef CONFIG_RFS_ACCEL
85 struct cpu_rmap *rmap;
89 #define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
90 (1ull << MLX5_EVENT_TYPE_COMM_EST) | \
91 (1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \
92 (1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \
93 (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \
94 (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \
95 (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
96 (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \
97 (1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \
98 (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \
99 (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \
100 (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
102 static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
104 u32 out[MLX5_ST_SZ_DW(destroy_eq_out)] = {0};
105 u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {0};
107 MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ);
108 MLX5_SET(destroy_eq_in, in, eq_number, eqn);
109 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
112 /* caller must eventually call mlx5_cq_put on the returned cq */
113 static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
115 struct mlx5_cq_table *table = &eq->cq_table;
116 struct mlx5_core_cq *cq = NULL;
119 cq = radix_tree_lookup(&table->tree, cqn);
127 static irqreturn_t mlx5_eq_comp_int(int irq, void *eq_ptr)
129 struct mlx5_eq_comp *eq_comp = eq_ptr;
130 struct mlx5_eq *eq = eq_ptr;
131 struct mlx5_eqe *eqe;
135 while ((eqe = next_eqe_sw(eq))) {
136 struct mlx5_core_cq *cq;
137 /* Make sure we read EQ entry contents after we've
138 * checked the ownership bit.
141 /* Assume (eqe->type) is always MLX5_EVENT_TYPE_COMP */
142 cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
144 cq = mlx5_eq_cq_get(eq, cqn);
150 mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn);
156 /* The HCA will think the queue has overflowed if we
157 * don't tell it we've been processing events. We
158 * create our EQs with MLX5_NUM_SPARE_EQE extra
159 * entries, so we must update our consumer index at
162 if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
171 tasklet_schedule(&eq_comp->tasklet_ctx.task);
176 /* Some architectures don't latch interrupts when they are disabled, so using
177 * mlx5_eq_poll_irq_disabled could end up losing interrupts while trying to
178 * avoid losing them. It is not recommended to use it, unless this is the last
181 u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq)
185 disable_irq(eq->core.irqn);
186 count_eqe = eq->core.cons_index;
187 mlx5_eq_comp_int(eq->core.irqn, eq);
188 count_eqe = eq->core.cons_index - count_eqe;
189 enable_irq(eq->core.irqn);
194 static irqreturn_t mlx5_eq_async_int(int irq, void *eq_ptr)
196 struct mlx5_eq *eq = eq_ptr;
197 struct mlx5_eq_table *eqt;
198 struct mlx5_core_dev *dev;
199 struct mlx5_eqe *eqe;
203 eqt = dev->priv.eq_table;
205 while ((eqe = next_eqe_sw(eq))) {
207 * Make sure we read EQ entry contents after we've
208 * checked the ownership bit.
212 if (likely(eqe->type < MLX5_EVENT_TYPE_MAX))
213 atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe);
215 mlx5_core_warn_once(dev, "notifier_call_chain is not setup for eqe: %d\n", eqe->type);
217 atomic_notifier_call_chain(&eqt->nh[MLX5_EVENT_TYPE_NOTIFY_ANY], eqe->type, eqe);
222 /* The HCA will think the queue has overflowed if we
223 * don't tell it we've been processing events. We
224 * create our EQs with MLX5_NUM_SPARE_EQE extra
225 * entries, so we must update our consumer index at
228 if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
239 static void init_eq_buf(struct mlx5_eq *eq)
241 struct mlx5_eqe *eqe;
244 for (i = 0; i < eq->nent; i++) {
245 eqe = get_eqe(eq, i);
246 eqe->owner = MLX5_EQE_OWNER_INIT_VAL;
251 create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, const char *name,
252 struct mlx5_eq_param *param)
254 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
255 struct mlx5_cq_table *cq_table = &eq->cq_table;
256 u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
257 struct mlx5_priv *priv = &dev->priv;
258 u8 vecidx = param->index;
265 if (eq_table->irq_info[vecidx].context)
269 memset(cq_table, 0, sizeof(*cq_table));
270 spin_lock_init(&cq_table->lock);
271 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
273 eq->nent = roundup_pow_of_two(param->nent + MLX5_NUM_SPARE_EQE);
275 err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf);
281 inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
282 MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages;
284 in = kvzalloc(inlen, GFP_KERNEL);
290 pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas);
291 mlx5_fill_page_array(&eq->buf, pas);
293 MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
294 MLX5_SET64(create_eq_in, in, event_bitmask, param->mask);
296 eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
297 MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent));
298 MLX5_SET(eqc, eqc, uar_page, priv->uar->index);
299 MLX5_SET(eqc, eqc, intr, vecidx);
300 MLX5_SET(eqc, eqc, log_page_size,
301 eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
303 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
307 snprintf(eq_table->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s",
308 name, pci_name(dev->pdev));
309 eq_table->irq_info[vecidx].context = param->context;
312 eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
313 eq->irqn = pci_irq_vector(dev->pdev, vecidx);
315 eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET;
316 err = request_irq(eq->irqn, param->handler, 0,
317 eq_table->irq_info[vecidx].name, param->context);
321 err = mlx5_debug_eq_add(dev, eq);
325 /* EQs are created in ARMED state
333 free_irq(eq->irqn, eq);
336 mlx5_cmd_destroy_eq(dev, eq->eqn);
342 mlx5_buf_free(dev, &eq->buf);
346 static int destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
348 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
349 struct mlx5_irq_info *irq_info;
352 irq_info = &eq_table->irq_info[eq->vecidx];
354 mlx5_debug_eq_remove(dev, eq);
356 free_irq(eq->irqn, irq_info->context);
357 irq_info->context = NULL;
359 err = mlx5_cmd_destroy_eq(dev, eq->eqn);
361 mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
363 synchronize_irq(eq->irqn);
365 mlx5_buf_free(dev, &eq->buf);
370 int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
372 struct mlx5_cq_table *table = &eq->cq_table;
375 spin_lock(&table->lock);
376 err = radix_tree_insert(&table->tree, cq->cqn, cq);
377 spin_unlock(&table->lock);
382 int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
384 struct mlx5_cq_table *table = &eq->cq_table;
385 struct mlx5_core_cq *tmp;
387 spin_lock(&table->lock);
388 tmp = radix_tree_delete(&table->tree, cq->cqn);
389 spin_unlock(&table->lock);
392 mlx5_core_warn(eq->dev, "cq 0x%x not found in eq 0x%x tree\n", eq->eqn, cq->cqn);
397 mlx5_core_warn(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n", eq->eqn, cq->cqn);
404 int mlx5_eq_table_init(struct mlx5_core_dev *dev)
406 struct mlx5_eq_table *eq_table;
409 eq_table = kvzalloc(sizeof(*eq_table), GFP_KERNEL);
413 dev->priv.eq_table = eq_table;
415 err = mlx5_eq_debugfs_init(dev);
417 goto kvfree_eq_table;
419 mutex_init(&eq_table->lock);
420 for (i = 0; i < MLX5_EVENT_TYPE_MAX; i++)
421 ATOMIC_INIT_NOTIFIER_HEAD(&eq_table->nh[i]);
427 dev->priv.eq_table = NULL;
431 void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev)
433 mlx5_eq_debugfs_cleanup(dev);
434 kvfree(dev->priv.eq_table);
439 static int create_async_eq(struct mlx5_core_dev *dev, const char *name,
440 struct mlx5_eq *eq, struct mlx5_eq_param *param)
442 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
445 mutex_lock(&eq_table->lock);
446 if (param->index >= MLX5_EQ_MAX_ASYNC_EQS) {
451 err = create_map_eq(dev, eq, name, param);
453 mutex_unlock(&eq_table->lock);
457 static int destroy_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
459 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
462 mutex_lock(&eq_table->lock);
463 err = destroy_unmap_eq(dev, eq);
464 mutex_unlock(&eq_table->lock);
468 static int cq_err_event_notifier(struct notifier_block *nb,
469 unsigned long type, void *data)
471 struct mlx5_eq_table *eqt;
472 struct mlx5_core_cq *cq;
473 struct mlx5_eqe *eqe;
477 /* type == MLX5_EVENT_TYPE_CQ_ERROR */
479 eqt = mlx5_nb_cof(nb, struct mlx5_eq_table, cq_err_nb);
483 cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
484 mlx5_core_warn(eq->dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
485 cqn, eqe->data.cq_err.syndrome);
487 cq = mlx5_eq_cq_get(eq, cqn);
489 mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn);
500 static u64 gather_async_events_mask(struct mlx5_core_dev *dev)
502 u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
504 if (MLX5_VPORT_MANAGER(dev))
505 async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
507 if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH &&
508 MLX5_CAP_GEN(dev, general_notification_event))
509 async_event_mask |= (1ull << MLX5_EVENT_TYPE_GENERAL_EVENT);
511 if (MLX5_CAP_GEN(dev, port_module_event))
512 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PORT_MODULE_EVENT);
514 mlx5_core_dbg(dev, "port_module_event is not set\n");
516 if (MLX5_PPS_CAP(dev))
517 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT);
519 if (MLX5_CAP_GEN(dev, fpga))
520 async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR) |
521 (1ull << MLX5_EVENT_TYPE_FPGA_QP_ERROR);
522 if (MLX5_CAP_GEN_MAX(dev, dct))
523 async_event_mask |= (1ull << MLX5_EVENT_TYPE_DCT_DRAINED);
525 if (MLX5_CAP_GEN(dev, temp_warn_event))
526 async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT);
528 if (MLX5_CAP_MCAM_REG(dev, tracer_registers))
529 async_event_mask |= (1ull << MLX5_EVENT_TYPE_DEVICE_TRACER);
531 if (MLX5_CAP_GEN(dev, max_num_of_monitor_counters))
532 async_event_mask |= (1ull << MLX5_EVENT_TYPE_MONITOR_COUNTER);
534 if (mlx5_core_is_ecpf_esw_manager(dev))
535 async_event_mask |= (1ull << MLX5_EVENT_TYPE_HOST_PARAMS_CHANGE);
537 return async_event_mask;
540 static int create_async_eqs(struct mlx5_core_dev *dev)
542 struct mlx5_eq_table *table = dev->priv.eq_table;
543 struct mlx5_eq_param param = {};
546 MLX5_NB_INIT(&table->cq_err_nb, cq_err_event_notifier, CQ_ERROR);
547 mlx5_eq_notifier_register(dev, &table->cq_err_nb);
549 param = (struct mlx5_eq_param) {
550 .index = MLX5_EQ_CMD_IDX,
551 .mask = 1ull << MLX5_EVENT_TYPE_CMD,
552 .nent = MLX5_NUM_CMD_EQE,
553 .context = &table->cmd_eq,
554 .handler = mlx5_eq_async_int,
556 err = create_async_eq(dev, "mlx5_cmd_eq", &table->cmd_eq, ¶m);
558 mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
562 mlx5_cmd_use_events(dev);
564 param = (struct mlx5_eq_param) {
565 .index = MLX5_EQ_ASYNC_IDX,
566 .mask = gather_async_events_mask(dev),
567 .nent = MLX5_NUM_ASYNC_EQE,
568 .context = &table->async_eq,
569 .handler = mlx5_eq_async_int,
571 err = create_async_eq(dev, "mlx5_async_eq", &table->async_eq, ¶m);
573 mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
577 param = (struct mlx5_eq_param) {
578 .index = MLX5_EQ_PAGEREQ_IDX,
579 .mask = 1 << MLX5_EVENT_TYPE_PAGE_REQUEST,
580 .nent = /* TODO: sriov max_vf + */ 1,
581 .context = &table->pages_eq,
582 .handler = mlx5_eq_async_int,
584 err = create_async_eq(dev, "mlx5_pages_eq", &table->pages_eq, ¶m);
586 mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
593 destroy_async_eq(dev, &table->async_eq);
596 mlx5_cmd_use_polling(dev);
597 destroy_async_eq(dev, &table->cmd_eq);
599 mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
603 static void destroy_async_eqs(struct mlx5_core_dev *dev)
605 struct mlx5_eq_table *table = dev->priv.eq_table;
608 err = destroy_async_eq(dev, &table->pages_eq);
610 mlx5_core_err(dev, "failed to destroy pages eq, err(%d)\n",
613 err = destroy_async_eq(dev, &table->async_eq);
615 mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n",
618 mlx5_cmd_use_polling(dev);
620 err = destroy_async_eq(dev, &table->cmd_eq);
622 mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n",
625 mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
628 struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev)
630 return &dev->priv.eq_table->async_eq;
633 void mlx5_eq_synchronize_async_irq(struct mlx5_core_dev *dev)
635 synchronize_irq(dev->priv.eq_table->async_eq.irqn);
638 void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev)
640 synchronize_irq(dev->priv.eq_table->cmd_eq.irqn);
643 /* Generic EQ API for mlx5_core consumers
644 * Needed For RDMA ODP EQ for now
647 mlx5_eq_create_generic(struct mlx5_core_dev *dev, const char *name,
648 struct mlx5_eq_param *param)
650 struct mlx5_eq *eq = kvzalloc(sizeof(*eq), GFP_KERNEL);
654 return ERR_PTR(-ENOMEM);
656 err = create_async_eq(dev, name, eq, param);
664 EXPORT_SYMBOL(mlx5_eq_create_generic);
666 int mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
673 err = destroy_async_eq(dev, eq);
681 EXPORT_SYMBOL(mlx5_eq_destroy_generic);
683 struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc)
685 u32 ci = eq->cons_index + cc;
686 struct mlx5_eqe *eqe;
688 eqe = get_eqe(eq, ci & (eq->nent - 1));
689 eqe = ((eqe->owner & 1) ^ !!(ci & eq->nent)) ? NULL : eqe;
690 /* Make sure we read EQ entry contents after we've
691 * checked the ownership bit.
698 EXPORT_SYMBOL(mlx5_eq_get_eqe);
700 void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm)
702 __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
705 eq->cons_index += cc;
706 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
708 __raw_writel((__force u32)cpu_to_be32(val), addr);
709 /* We still want ordering, just not swabbing, so add a barrier */
712 EXPORT_SYMBOL(mlx5_eq_update_ci);
716 static int set_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
718 struct mlx5_priv *priv = &mdev->priv;
719 int vecidx = MLX5_EQ_VEC_COMP_BASE + i;
720 int irq = pci_irq_vector(mdev->pdev, vecidx);
721 struct mlx5_irq_info *irq_info = &priv->eq_table->irq_info[vecidx];
723 if (!zalloc_cpumask_var(&irq_info->mask, GFP_KERNEL)) {
724 mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
728 cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
731 if (IS_ENABLED(CONFIG_SMP) &&
732 irq_set_affinity_hint(irq, irq_info->mask))
733 mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
738 static void clear_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
740 int vecidx = MLX5_EQ_VEC_COMP_BASE + i;
741 struct mlx5_priv *priv = &mdev->priv;
742 int irq = pci_irq_vector(mdev->pdev, vecidx);
743 struct mlx5_irq_info *irq_info = &priv->eq_table->irq_info[vecidx];
745 irq_set_affinity_hint(irq, NULL);
746 free_cpumask_var(irq_info->mask);
749 static int set_comp_irq_affinity_hints(struct mlx5_core_dev *mdev)
754 for (i = 0; i < mdev->priv.eq_table->num_comp_vectors; i++) {
755 err = set_comp_irq_affinity_hint(mdev, i);
763 for (i--; i >= 0; i--)
764 clear_comp_irq_affinity_hint(mdev, i);
769 static void clear_comp_irqs_affinity_hints(struct mlx5_core_dev *mdev)
773 for (i = 0; i < mdev->priv.eq_table->num_comp_vectors; i++)
774 clear_comp_irq_affinity_hint(mdev, i);
777 static void destroy_comp_eqs(struct mlx5_core_dev *dev)
779 struct mlx5_eq_table *table = dev->priv.eq_table;
780 struct mlx5_eq_comp *eq, *n;
782 clear_comp_irqs_affinity_hints(dev);
784 #ifdef CONFIG_RFS_ACCEL
786 free_irq_cpu_rmap(table->rmap);
790 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
792 if (destroy_unmap_eq(dev, &eq->core))
793 mlx5_core_warn(dev, "failed to destroy comp EQ 0x%x\n",
795 tasklet_disable(&eq->tasklet_ctx.task);
800 static int create_comp_eqs(struct mlx5_core_dev *dev)
802 struct mlx5_eq_table *table = dev->priv.eq_table;
803 char name[MLX5_MAX_IRQ_NAME];
804 struct mlx5_eq_comp *eq;
810 INIT_LIST_HEAD(&table->comp_eqs_list);
811 ncomp_vec = table->num_comp_vectors;
812 nent = MLX5_COMP_EQ_SIZE;
813 #ifdef CONFIG_RFS_ACCEL
814 table->rmap = alloc_irq_cpu_rmap(ncomp_vec);
818 for (i = 0; i < ncomp_vec; i++) {
819 int vecidx = i + MLX5_EQ_VEC_COMP_BASE;
820 struct mlx5_eq_param param = {};
822 eq = kzalloc(sizeof(*eq), GFP_KERNEL);
828 INIT_LIST_HEAD(&eq->tasklet_ctx.list);
829 INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
830 spin_lock_init(&eq->tasklet_ctx.lock);
831 tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb,
832 (unsigned long)&eq->tasklet_ctx);
834 #ifdef CONFIG_RFS_ACCEL
835 irq_cpu_rmap_add(table->rmap, pci_irq_vector(dev->pdev, vecidx));
837 snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
838 param = (struct mlx5_eq_param) {
842 .context = &eq->core,
843 .handler = mlx5_eq_comp_int
845 err = create_map_eq(dev, &eq->core, name, ¶m);
850 mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->core.eqn);
851 /* add tail, to keep the list ordered, for mlx5_vector2eqn to work */
852 list_add_tail(&eq->list, &table->comp_eqs_list);
855 err = set_comp_irq_affinity_hints(dev);
857 mlx5_core_err(dev, "Failed to alloc affinity hint cpumask\n");
864 destroy_comp_eqs(dev);
868 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
871 struct mlx5_eq_table *table = dev->priv.eq_table;
872 struct mlx5_eq_comp *eq, *n;
876 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
879 *irqn = eq->core.irqn;
887 EXPORT_SYMBOL(mlx5_vector2eqn);
889 unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev)
891 return dev->priv.eq_table->num_comp_vectors;
893 EXPORT_SYMBOL(mlx5_comp_vectors_count);
896 mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
898 /* TODO: consider irq_get_affinity_mask(irq) */
899 return dev->priv.eq_table->irq_info[vector + MLX5_EQ_VEC_COMP_BASE].mask;
901 EXPORT_SYMBOL(mlx5_comp_irq_get_affinity_mask);
903 struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev)
905 #ifdef CONFIG_RFS_ACCEL
906 return dev->priv.eq_table->rmap;
912 struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn)
914 struct mlx5_eq_table *table = dev->priv.eq_table;
915 struct mlx5_eq_comp *eq;
917 list_for_each_entry(eq, &table->comp_eqs_list, list) {
918 if (eq->core.eqn == eqn)
922 return ERR_PTR(-ENOENT);
925 /* This function should only be called after mlx5_cmd_force_teardown_hca */
926 void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
928 struct mlx5_eq_table *table = dev->priv.eq_table;
931 clear_comp_irqs_affinity_hints(dev);
933 #ifdef CONFIG_RFS_ACCEL
935 free_irq_cpu_rmap(table->rmap);
940 mutex_lock(&table->lock); /* sync with create/destroy_async_eq */
941 max_eqs = table->num_comp_vectors + MLX5_EQ_VEC_COMP_BASE;
942 for (i = max_eqs - 1; i >= 0; i--) {
943 if (!table->irq_info[i].context)
945 free_irq(pci_irq_vector(dev->pdev, i), table->irq_info[i].context);
946 table->irq_info[i].context = NULL;
948 mutex_unlock(&table->lock);
949 pci_free_irq_vectors(dev->pdev);
952 static int alloc_irq_vectors(struct mlx5_core_dev *dev)
954 struct mlx5_priv *priv = &dev->priv;
955 struct mlx5_eq_table *table = priv->eq_table;
956 int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
957 MLX5_CAP_GEN(dev, max_num_eqs) :
958 1 << MLX5_CAP_GEN(dev, log_max_eq);
962 nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
963 MLX5_EQ_VEC_COMP_BASE;
964 nvec = min_t(int, nvec, num_eqs);
965 if (nvec <= MLX5_EQ_VEC_COMP_BASE)
968 table->irq_info = kcalloc(nvec, sizeof(*table->irq_info), GFP_KERNEL);
969 if (!table->irq_info)
972 nvec = pci_alloc_irq_vectors(dev->pdev, MLX5_EQ_VEC_COMP_BASE + 1,
976 goto err_free_irq_info;
979 table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
984 kfree(table->irq_info);
988 static void free_irq_vectors(struct mlx5_core_dev *dev)
990 struct mlx5_priv *priv = &dev->priv;
992 pci_free_irq_vectors(dev->pdev);
993 kfree(priv->eq_table->irq_info);
996 int mlx5_eq_table_create(struct mlx5_core_dev *dev)
1000 err = alloc_irq_vectors(dev);
1002 mlx5_core_err(dev, "alloc irq vectors failed\n");
1006 err = create_async_eqs(dev);
1008 mlx5_core_err(dev, "Failed to create async EQs\n");
1012 err = create_comp_eqs(dev);
1014 mlx5_core_err(dev, "Failed to create completion EQs\n");
1020 destroy_async_eqs(dev);
1022 free_irq_vectors(dev);
1026 void mlx5_eq_table_destroy(struct mlx5_core_dev *dev)
1028 destroy_comp_eqs(dev);
1029 destroy_async_eqs(dev);
1030 free_irq_vectors(dev);
1033 int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
1035 struct mlx5_eq_table *eqt = dev->priv.eq_table;
1037 if (nb->event_type >= MLX5_EVENT_TYPE_MAX)
1040 return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb);
1043 int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
1045 struct mlx5_eq_table *eqt = dev->priv.eq_table;
1047 if (nb->event_type >= MLX5_EVENT_TYPE_MAX)
1050 return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb);