2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/interrupt.h>
34 #include <linux/module.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/eq.h>
37 #include <linux/mlx5/cmd.h>
38 #ifdef CONFIG_RFS_ACCEL
39 #include <linux/cpu_rmap.h>
41 #include "mlx5_core.h"
43 #include "fpga/core.h"
45 #include "lib/clock.h"
46 #include "diag/fw_tracer.h"
49 MLX5_EQE_SIZE = sizeof(struct mlx5_eqe),
50 MLX5_EQE_OWNER_INIT_VAL = 0x1,
54 MLX5_EQ_STATE_ARMED = 0x9,
55 MLX5_EQ_STATE_FIRED = 0xa,
56 MLX5_EQ_STATE_ALWAYS_ARMED = 0xb,
60 MLX5_NUM_SPARE_EQE = 0x80,
61 MLX5_NUM_ASYNC_EQE = 0x1000,
62 MLX5_NUM_CMD_EQE = 32,
63 MLX5_NUM_PF_DRAIN = 64,
67 MLX5_EQ_DOORBEL_OFFSET = 0x40,
70 struct mlx5_irq_info {
72 char name[MLX5_MAX_IRQ_NAME];
73 void *context; /* dev_id provided to request_irq */
76 struct mlx5_eq_table {
77 struct list_head comp_eqs_list;
78 struct mlx5_eq pages_eq;
79 struct mlx5_eq async_eq;
80 struct mlx5_eq cmd_eq;
82 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
83 struct mlx5_eq_pagefault pfault_eq;
85 struct mutex lock; /* sync async eqs creations */
87 struct mlx5_irq_info *irq_info;
88 #ifdef CONFIG_RFS_ACCEL
89 struct cpu_rmap *rmap;
93 #define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
94 (1ull << MLX5_EVENT_TYPE_COMM_EST) | \
95 (1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \
96 (1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \
97 (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \
98 (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \
99 (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
100 (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \
101 (1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \
102 (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \
103 (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \
104 (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
106 static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
108 u32 out[MLX5_ST_SZ_DW(destroy_eq_out)] = {0};
109 u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {0};
111 MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ);
112 MLX5_SET(destroy_eq_in, in, eq_number, eqn);
113 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
116 static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry)
118 return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE);
121 static struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq)
123 struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1));
125 return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe;
128 static const char *eqe_type_str(u8 type)
131 case MLX5_EVENT_TYPE_COMP:
132 return "MLX5_EVENT_TYPE_COMP";
133 case MLX5_EVENT_TYPE_PATH_MIG:
134 return "MLX5_EVENT_TYPE_PATH_MIG";
135 case MLX5_EVENT_TYPE_COMM_EST:
136 return "MLX5_EVENT_TYPE_COMM_EST";
137 case MLX5_EVENT_TYPE_SQ_DRAINED:
138 return "MLX5_EVENT_TYPE_SQ_DRAINED";
139 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
140 return "MLX5_EVENT_TYPE_SRQ_LAST_WQE";
141 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
142 return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT";
143 case MLX5_EVENT_TYPE_CQ_ERROR:
144 return "MLX5_EVENT_TYPE_CQ_ERROR";
145 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
146 return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR";
147 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
148 return "MLX5_EVENT_TYPE_PATH_MIG_FAILED";
149 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
150 return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR";
151 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
152 return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR";
153 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
154 return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR";
155 case MLX5_EVENT_TYPE_INTERNAL_ERROR:
156 return "MLX5_EVENT_TYPE_INTERNAL_ERROR";
157 case MLX5_EVENT_TYPE_PORT_CHANGE:
158 return "MLX5_EVENT_TYPE_PORT_CHANGE";
159 case MLX5_EVENT_TYPE_GPIO_EVENT:
160 return "MLX5_EVENT_TYPE_GPIO_EVENT";
161 case MLX5_EVENT_TYPE_PORT_MODULE_EVENT:
162 return "MLX5_EVENT_TYPE_PORT_MODULE_EVENT";
163 case MLX5_EVENT_TYPE_TEMP_WARN_EVENT:
164 return "MLX5_EVENT_TYPE_TEMP_WARN_EVENT";
165 case MLX5_EVENT_TYPE_REMOTE_CONFIG:
166 return "MLX5_EVENT_TYPE_REMOTE_CONFIG";
167 case MLX5_EVENT_TYPE_DB_BF_CONGESTION:
168 return "MLX5_EVENT_TYPE_DB_BF_CONGESTION";
169 case MLX5_EVENT_TYPE_STALL_EVENT:
170 return "MLX5_EVENT_TYPE_STALL_EVENT";
171 case MLX5_EVENT_TYPE_CMD:
172 return "MLX5_EVENT_TYPE_CMD";
173 case MLX5_EVENT_TYPE_PAGE_REQUEST:
174 return "MLX5_EVENT_TYPE_PAGE_REQUEST";
175 case MLX5_EVENT_TYPE_PAGE_FAULT:
176 return "MLX5_EVENT_TYPE_PAGE_FAULT";
177 case MLX5_EVENT_TYPE_PPS_EVENT:
178 return "MLX5_EVENT_TYPE_PPS_EVENT";
179 case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
180 return "MLX5_EVENT_TYPE_NIC_VPORT_CHANGE";
181 case MLX5_EVENT_TYPE_FPGA_ERROR:
182 return "MLX5_EVENT_TYPE_FPGA_ERROR";
183 case MLX5_EVENT_TYPE_FPGA_QP_ERROR:
184 return "MLX5_EVENT_TYPE_FPGA_QP_ERROR";
185 case MLX5_EVENT_TYPE_GENERAL_EVENT:
186 return "MLX5_EVENT_TYPE_GENERAL_EVENT";
187 case MLX5_EVENT_TYPE_DEVICE_TRACER:
188 return "MLX5_EVENT_TYPE_DEVICE_TRACER";
190 return "Unrecognized event";
194 static enum mlx5_dev_event port_subtype_event(u8 subtype)
197 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
198 return MLX5_DEV_EVENT_PORT_DOWN;
199 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
200 return MLX5_DEV_EVENT_PORT_UP;
201 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
202 return MLX5_DEV_EVENT_PORT_INITIALIZED;
203 case MLX5_PORT_CHANGE_SUBTYPE_LID:
204 return MLX5_DEV_EVENT_LID_CHANGE;
205 case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
206 return MLX5_DEV_EVENT_PKEY_CHANGE;
207 case MLX5_PORT_CHANGE_SUBTYPE_GUID:
208 return MLX5_DEV_EVENT_GUID_CHANGE;
209 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
210 return MLX5_DEV_EVENT_CLIENT_REREG;
215 static void eq_update_ci(struct mlx5_eq *eq, int arm)
217 __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
218 u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
220 __raw_writel((__force u32)cpu_to_be32(val), addr);
221 /* We still want ordering, just not swabbing, so add a barrier */
225 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
226 static void eqe_pf_action(struct work_struct *work)
228 struct mlx5_pagefault *pfault = container_of(work,
229 struct mlx5_pagefault,
231 struct mlx5_eq_pagefault *eq = pfault->eq;
233 mlx5_core_page_fault(eq->core->dev, pfault);
234 mempool_free(pfault, eq->pool);
237 static void eq_pf_process(struct mlx5_eq_pagefault *eq)
239 struct mlx5_core_dev *dev = eq->core->dev;
240 struct mlx5_eqe_page_fault *pf_eqe;
241 struct mlx5_pagefault *pfault;
242 struct mlx5_eqe *eqe;
245 while ((eqe = next_eqe_sw(eq->core))) {
246 pfault = mempool_alloc(eq->pool, GFP_ATOMIC);
248 schedule_work(&eq->work);
253 pf_eqe = &eqe->data.page_fault;
254 pfault->event_subtype = eqe->sub_type;
255 pfault->bytes_committed = be32_to_cpu(pf_eqe->bytes_committed);
258 "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x\n",
259 eqe->sub_type, pfault->bytes_committed);
261 switch (eqe->sub_type) {
262 case MLX5_PFAULT_SUBTYPE_RDMA:
263 /* RDMA based event */
265 be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24;
267 be32_to_cpu(pf_eqe->rdma.pftype_token) &
270 be32_to_cpu(pf_eqe->rdma.r_key);
271 pfault->rdma.packet_size =
272 be16_to_cpu(pf_eqe->rdma.packet_length);
273 pfault->rdma.rdma_op_len =
274 be32_to_cpu(pf_eqe->rdma.rdma_op_len);
275 pfault->rdma.rdma_va =
276 be64_to_cpu(pf_eqe->rdma.rdma_va);
278 "PAGE_FAULT: type:0x%x, token: 0x%06x, r_key: 0x%08x\n",
279 pfault->type, pfault->token,
282 "PAGE_FAULT: rdma_op_len: 0x%08x, rdma_va: 0x%016llx\n",
283 pfault->rdma.rdma_op_len,
284 pfault->rdma.rdma_va);
287 case MLX5_PFAULT_SUBTYPE_WQE:
288 /* WQE based event */
290 (be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24) & 0x7;
292 be32_to_cpu(pf_eqe->wqe.token);
294 be32_to_cpu(pf_eqe->wqe.pftype_wq) &
296 pfault->wqe.wqe_index =
297 be16_to_cpu(pf_eqe->wqe.wqe_index);
298 pfault->wqe.packet_size =
299 be16_to_cpu(pf_eqe->wqe.packet_length);
301 "PAGE_FAULT: type:0x%x, token: 0x%06x, wq_num: 0x%06x, wqe_index: 0x%04x\n",
302 pfault->type, pfault->token,
304 pfault->wqe.wqe_index);
309 "Unsupported page fault event sub-type: 0x%02hhx\n",
311 /* Unsupported page faults should still be
312 * resolved by the page fault handler
317 INIT_WORK(&pfault->work, eqe_pf_action);
318 queue_work(eq->wq, &pfault->work);
320 ++eq->core->cons_index;
323 if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
324 eq_update_ci(eq->core, 0);
329 eq_update_ci(eq->core, 1);
332 static irqreturn_t mlx5_eq_pf_int(int irq, void *eq_ptr)
334 struct mlx5_eq_pagefault *eq = eq_ptr;
337 if (spin_trylock_irqsave(&eq->lock, flags)) {
339 spin_unlock_irqrestore(&eq->lock, flags);
341 schedule_work(&eq->work);
347 /* mempool_refill() was proposed but unfortunately wasn't accepted
348 * http://lkml.iu.edu/hypermail/linux/kernel/1512.1/05073.html
351 static void mempool_refill(mempool_t *pool)
353 while (pool->curr_nr < pool->min_nr)
354 mempool_free(mempool_alloc(pool, GFP_KERNEL), pool);
357 static void eq_pf_action(struct work_struct *work)
359 struct mlx5_eq_pagefault *eq =
360 container_of(work, struct mlx5_eq_pagefault, work);
362 mempool_refill(eq->pool);
364 spin_lock_irq(&eq->lock);
366 spin_unlock_irq(&eq->lock);
370 create_pf_eq(struct mlx5_core_dev *dev, struct mlx5_eq_pagefault *eq)
372 struct mlx5_eq_param param = {};
375 spin_lock_init(&eq->lock);
376 INIT_WORK(&eq->work, eq_pf_action);
378 eq->pool = mempool_create_kmalloc_pool(MLX5_NUM_PF_DRAIN,
379 sizeof(struct mlx5_pagefault));
383 eq->wq = alloc_workqueue("mlx5_page_fault",
384 WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM,
391 param = (struct mlx5_eq_param) {
392 .index = MLX5_EQ_PFAULT_IDX,
393 .mask = 1 << MLX5_EVENT_TYPE_PAGE_FAULT,
394 .nent = MLX5_NUM_ASYNC_EQE,
396 .handler = mlx5_eq_pf_int
399 eq->core = mlx5_eq_create_generic(dev, "mlx5_page_fault_eq", ¶m);
400 if (IS_ERR(eq->core)) {
401 err = PTR_ERR(eq->core);
407 destroy_workqueue(eq->wq);
409 mempool_destroy(eq->pool);
413 static int destroy_pf_eq(struct mlx5_core_dev *dev, struct mlx5_eq_pagefault *eq)
417 err = mlx5_eq_destroy_generic(dev, eq->core);
418 cancel_work_sync(&eq->work);
419 destroy_workqueue(eq->wq);
420 mempool_destroy(eq->pool);
425 int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token,
426 u32 wq_num, u8 type, int error)
428 u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = {0};
429 u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {0};
431 MLX5_SET(page_fault_resume_in, in, opcode,
432 MLX5_CMD_OP_PAGE_FAULT_RESUME);
433 MLX5_SET(page_fault_resume_in, in, error, !!error);
434 MLX5_SET(page_fault_resume_in, in, page_fault_type, type);
435 MLX5_SET(page_fault_resume_in, in, wq_number, wq_num);
436 MLX5_SET(page_fault_resume_in, in, token, token);
438 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
440 EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume);
443 static void general_event_handler(struct mlx5_core_dev *dev,
444 struct mlx5_eqe *eqe)
446 switch (eqe->sub_type) {
447 case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT:
449 dev->event(dev, MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT, 0);
452 mlx5_core_dbg(dev, "General event with unrecognized subtype: sub_type %d\n",
457 static void mlx5_temp_warning_event(struct mlx5_core_dev *dev,
458 struct mlx5_eqe *eqe)
463 value_lsb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_lsb);
464 value_msb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_msb);
467 "High temperature on sensors with bit set %llx %llx",
468 value_msb, value_lsb);
471 /* caller must eventually call mlx5_cq_put on the returned cq */
472 static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
474 struct mlx5_cq_table *table = &eq->cq_table;
475 struct mlx5_core_cq *cq = NULL;
477 spin_lock(&table->lock);
478 cq = radix_tree_lookup(&table->tree, cqn);
481 spin_unlock(&table->lock);
486 static void mlx5_eq_cq_event(struct mlx5_eq *eq, u32 cqn, int event_type)
488 struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn);
491 mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn);
495 cq->event(cq, event_type);
500 static irqreturn_t mlx5_eq_comp_int(int irq, void *eq_ptr)
502 struct mlx5_eq_comp *eq_comp = eq_ptr;
503 struct mlx5_eq *eq = eq_ptr;
504 struct mlx5_eqe *eqe;
508 while ((eqe = next_eqe_sw(eq))) {
509 struct mlx5_core_cq *cq;
510 /* Make sure we read EQ entry contents after we've
511 * checked the ownership bit.
514 /* Assume (eqe->type) is always MLX5_EVENT_TYPE_COMP */
515 cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
517 cq = mlx5_eq_cq_get(eq, cqn);
523 mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn);
529 /* The HCA will think the queue has overflowed if we
530 * don't tell it we've been processing events. We
531 * create our EQs with MLX5_NUM_SPARE_EQE extra
532 * entries, so we must update our consumer index at
535 if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
544 tasklet_schedule(&eq_comp->tasklet_ctx.task);
549 /* Some architectures don't latch interrupts when they are disabled, so using
550 * mlx5_eq_poll_irq_disabled could end up losing interrupts while trying to
551 * avoid losing them. It is not recommended to use it, unless this is the last
554 u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq)
558 disable_irq(eq->core.irqn);
559 count_eqe = eq->core.cons_index;
560 mlx5_eq_comp_int(eq->core.irqn, eq);
561 count_eqe = eq->core.cons_index - count_eqe;
562 enable_irq(eq->core.irqn);
567 static irqreturn_t mlx5_eq_async_int(int irq, void *eq_ptr)
569 struct mlx5_eq *eq = eq_ptr;
570 struct mlx5_core_dev *dev = eq->dev;
571 struct mlx5_eqe *eqe;
577 while ((eqe = next_eqe_sw(eq))) {
579 * Make sure we read EQ entry contents after we've
580 * checked the ownership bit.
584 mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n",
585 eq->eqn, eqe_type_str(eqe->type));
587 case MLX5_EVENT_TYPE_DCT_DRAINED:
588 rsn = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
589 rsn |= (MLX5_RES_DCT << MLX5_USER_INDEX_LEN);
590 mlx5_rsc_event(dev, rsn, eqe->type);
592 case MLX5_EVENT_TYPE_PATH_MIG:
593 case MLX5_EVENT_TYPE_COMM_EST:
594 case MLX5_EVENT_TYPE_SQ_DRAINED:
595 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
596 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
597 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
598 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
599 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
600 rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
601 rsn |= (eqe->data.qp_srq.type << MLX5_USER_INDEX_LEN);
602 mlx5_core_dbg(dev, "event %s(%d) arrived on resource 0x%x\n",
603 eqe_type_str(eqe->type), eqe->type, rsn);
604 mlx5_rsc_event(dev, rsn, eqe->type);
607 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
608 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
609 rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
610 mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n",
611 eqe_type_str(eqe->type), eqe->type, rsn);
612 mlx5_srq_event(dev, rsn, eqe->type);
615 case MLX5_EVENT_TYPE_CMD:
616 mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
619 case MLX5_EVENT_TYPE_PORT_CHANGE:
620 port = (eqe->data.port.port >> 4) & 0xf;
621 switch (eqe->sub_type) {
622 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
623 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
624 case MLX5_PORT_CHANGE_SUBTYPE_LID:
625 case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
626 case MLX5_PORT_CHANGE_SUBTYPE_GUID:
627 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
628 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
630 dev->event(dev, port_subtype_event(eqe->sub_type),
631 (unsigned long)port);
634 mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n",
635 port, eqe->sub_type);
638 case MLX5_EVENT_TYPE_CQ_ERROR:
639 cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
640 mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
641 cqn, eqe->data.cq_err.syndrome);
642 mlx5_eq_cq_event(eq, cqn, eqe->type);
645 case MLX5_EVENT_TYPE_PAGE_REQUEST:
647 u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
648 s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
650 mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
652 mlx5_core_req_pages_handler(dev, func_id, npages);
656 case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
657 mlx5_eswitch_vport_event(dev->priv.eswitch, eqe);
660 case MLX5_EVENT_TYPE_PORT_MODULE_EVENT:
661 mlx5_port_module_event(dev, eqe);
664 case MLX5_EVENT_TYPE_PPS_EVENT:
665 mlx5_pps_event(dev, eqe);
668 case MLX5_EVENT_TYPE_FPGA_ERROR:
669 case MLX5_EVENT_TYPE_FPGA_QP_ERROR:
670 mlx5_fpga_event(dev, eqe->type, &eqe->data.raw);
673 case MLX5_EVENT_TYPE_TEMP_WARN_EVENT:
674 mlx5_temp_warning_event(dev, eqe);
677 case MLX5_EVENT_TYPE_GENERAL_EVENT:
678 general_event_handler(dev, eqe);
681 case MLX5_EVENT_TYPE_DEVICE_TRACER:
682 mlx5_fw_tracer_event(dev, eqe);
686 mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
694 /* The HCA will think the queue has overflowed if we
695 * don't tell it we've been processing events. We
696 * create our EQs with MLX5_NUM_SPARE_EQE extra
697 * entries, so we must update our consumer index at
700 if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
711 static void init_eq_buf(struct mlx5_eq *eq)
713 struct mlx5_eqe *eqe;
716 for (i = 0; i < eq->nent; i++) {
717 eqe = get_eqe(eq, i);
718 eqe->owner = MLX5_EQE_OWNER_INIT_VAL;
723 create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, const char *name,
724 struct mlx5_eq_param *param)
726 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
727 struct mlx5_cq_table *cq_table = &eq->cq_table;
728 u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
729 struct mlx5_priv *priv = &dev->priv;
730 u8 vecidx = param->index;
737 if (eq_table->irq_info[vecidx].context)
741 memset(cq_table, 0, sizeof(*cq_table));
742 spin_lock_init(&cq_table->lock);
743 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
745 eq->nent = roundup_pow_of_two(param->nent + MLX5_NUM_SPARE_EQE);
747 err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf);
753 inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
754 MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages;
756 in = kvzalloc(inlen, GFP_KERNEL);
762 pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas);
763 mlx5_fill_page_array(&eq->buf, pas);
765 MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
766 MLX5_SET64(create_eq_in, in, event_bitmask, param->mask);
768 eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
769 MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent));
770 MLX5_SET(eqc, eqc, uar_page, priv->uar->index);
771 MLX5_SET(eqc, eqc, intr, vecidx);
772 MLX5_SET(eqc, eqc, log_page_size,
773 eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
775 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
779 snprintf(eq_table->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s",
780 name, pci_name(dev->pdev));
781 eq_table->irq_info[vecidx].context = param->context;
784 eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
785 eq->irqn = pci_irq_vector(dev->pdev, vecidx);
787 eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET;
788 err = request_irq(eq->irqn, param->handler, 0,
789 eq_table->irq_info[vecidx].name, param->context);
793 err = mlx5_debug_eq_add(dev, eq);
797 /* EQs are created in ARMED state
805 free_irq(eq->irqn, eq);
808 mlx5_cmd_destroy_eq(dev, eq->eqn);
814 mlx5_buf_free(dev, &eq->buf);
818 static int destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
820 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
821 struct mlx5_irq_info *irq_info;
824 irq_info = &eq_table->irq_info[eq->vecidx];
826 mlx5_debug_eq_remove(dev, eq);
828 free_irq(eq->irqn, irq_info->context);
829 irq_info->context = NULL;
831 err = mlx5_cmd_destroy_eq(dev, eq->eqn);
833 mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
835 synchronize_irq(eq->irqn);
837 mlx5_buf_free(dev, &eq->buf);
842 int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
844 struct mlx5_cq_table *table = &eq->cq_table;
847 spin_lock_irq(&table->lock);
848 err = radix_tree_insert(&table->tree, cq->cqn, cq);
849 spin_unlock_irq(&table->lock);
854 int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
856 struct mlx5_cq_table *table = &eq->cq_table;
857 struct mlx5_core_cq *tmp;
859 spin_lock_irq(&table->lock);
860 tmp = radix_tree_delete(&table->tree, cq->cqn);
861 spin_unlock_irq(&table->lock);
864 mlx5_core_warn(eq->dev, "cq 0x%x not found in eq 0x%x tree\n", eq->eqn, cq->cqn);
869 mlx5_core_warn(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n", eq->eqn, cq->cqn);
876 int mlx5_eq_table_init(struct mlx5_core_dev *dev)
878 struct mlx5_eq_table *eq_table;
881 eq_table = kvzalloc(sizeof(*eq_table), GFP_KERNEL);
885 dev->priv.eq_table = eq_table;
887 err = mlx5_eq_debugfs_init(dev);
889 goto kvfree_eq_table;
891 mutex_init(&eq_table->lock);
897 dev->priv.eq_table = NULL;
901 void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev)
903 mlx5_eq_debugfs_cleanup(dev);
904 kvfree(dev->priv.eq_table);
909 static int create_async_eq(struct mlx5_core_dev *dev, const char *name,
910 struct mlx5_eq *eq, struct mlx5_eq_param *param)
912 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
915 mutex_lock(&eq_table->lock);
916 if (param->index >= MLX5_EQ_MAX_ASYNC_EQS) {
921 err = create_map_eq(dev, eq, name, param);
923 mutex_unlock(&eq_table->lock);
927 static int destroy_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
929 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
932 mutex_lock(&eq_table->lock);
933 err = destroy_unmap_eq(dev, eq);
934 mutex_unlock(&eq_table->lock);
938 static u64 gather_async_events_mask(struct mlx5_core_dev *dev)
940 u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
942 if (MLX5_VPORT_MANAGER(dev))
943 async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
945 if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH &&
946 MLX5_CAP_GEN(dev, general_notification_event))
947 async_event_mask |= (1ull << MLX5_EVENT_TYPE_GENERAL_EVENT);
949 if (MLX5_CAP_GEN(dev, port_module_event))
950 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PORT_MODULE_EVENT);
952 mlx5_core_dbg(dev, "port_module_event is not set\n");
954 if (MLX5_PPS_CAP(dev))
955 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT);
957 if (MLX5_CAP_GEN(dev, fpga))
958 async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR) |
959 (1ull << MLX5_EVENT_TYPE_FPGA_QP_ERROR);
960 if (MLX5_CAP_GEN_MAX(dev, dct))
961 async_event_mask |= (1ull << MLX5_EVENT_TYPE_DCT_DRAINED);
963 if (MLX5_CAP_GEN(dev, temp_warn_event))
964 async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT);
966 if (MLX5_CAP_MCAM_REG(dev, tracer_registers))
967 async_event_mask |= (1ull << MLX5_EVENT_TYPE_DEVICE_TRACER);
969 return async_event_mask;
972 static int create_async_eqs(struct mlx5_core_dev *dev)
974 struct mlx5_eq_table *table = dev->priv.eq_table;
975 struct mlx5_eq_param param = {};
978 param = (struct mlx5_eq_param) {
979 .index = MLX5_EQ_CMD_IDX,
980 .mask = 1ull << MLX5_EVENT_TYPE_CMD,
981 .nent = MLX5_NUM_CMD_EQE,
982 .context = &table->cmd_eq,
983 .handler = mlx5_eq_async_int,
985 err = create_async_eq(dev, "mlx5_cmd_eq", &table->cmd_eq, ¶m);
987 mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
991 mlx5_cmd_use_events(dev);
993 param = (struct mlx5_eq_param) {
994 .index = MLX5_EQ_ASYNC_IDX,
995 .mask = gather_async_events_mask(dev),
996 .nent = MLX5_NUM_ASYNC_EQE,
997 .context = &table->async_eq,
998 .handler = mlx5_eq_async_int,
1000 err = create_async_eq(dev, "mlx5_async_eq", &table->async_eq, ¶m);
1002 mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
1006 param = (struct mlx5_eq_param) {
1007 .index = MLX5_EQ_PAGEREQ_IDX,
1008 .mask = 1 << MLX5_EVENT_TYPE_PAGE_REQUEST,
1009 .nent = /* TODO: sriov max_vf + */ 1,
1010 .context = &table->pages_eq,
1011 .handler = mlx5_eq_async_int,
1013 err = create_async_eq(dev, "mlx5_pages_eq", &table->pages_eq, ¶m);
1015 mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
1019 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1020 if (MLX5_CAP_GEN(dev, pg)) {
1021 err = create_pf_eq(dev, &table->pfault_eq);
1023 mlx5_core_warn(dev, "failed to create page fault EQ %d\n",
1031 destroy_async_eq(dev, &table->pages_eq);
1037 destroy_async_eq(dev, &table->async_eq);
1040 mlx5_cmd_use_polling(dev);
1041 destroy_async_eq(dev, &table->cmd_eq);
1045 static void destroy_async_eqs(struct mlx5_core_dev *dev)
1047 struct mlx5_eq_table *table = dev->priv.eq_table;
1050 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1051 if (MLX5_CAP_GEN(dev, pg)) {
1052 err = destroy_pf_eq(dev, &table->pfault_eq);
1054 mlx5_core_err(dev, "failed to destroy page fault eq, err(%d)\n",
1059 err = destroy_async_eq(dev, &table->pages_eq);
1061 mlx5_core_err(dev, "failed to destroy pages eq, err(%d)\n",
1064 err = destroy_async_eq(dev, &table->async_eq);
1066 mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n",
1068 mlx5_cmd_use_polling(dev);
1070 err = destroy_async_eq(dev, &table->cmd_eq);
1072 mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n",
1076 struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev)
1078 return &dev->priv.eq_table->async_eq;
1081 void mlx5_eq_synchronize_async_irq(struct mlx5_core_dev *dev)
1083 synchronize_irq(dev->priv.eq_table->async_eq.irqn);
1086 void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev)
1088 synchronize_irq(dev->priv.eq_table->cmd_eq.irqn);
1091 /* Generic EQ API for mlx5_core consumers
1092 * Needed For RDMA ODP EQ for now
1095 mlx5_eq_create_generic(struct mlx5_core_dev *dev, const char *name,
1096 struct mlx5_eq_param *param)
1098 struct mlx5_eq *eq = kvzalloc(sizeof(*eq), GFP_KERNEL);
1102 return ERR_PTR(-ENOMEM);
1104 err = create_async_eq(dev, name, eq, param);
1112 EXPORT_SYMBOL(mlx5_eq_create_generic);
1114 int mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
1121 err = destroy_async_eq(dev, eq);
1129 EXPORT_SYMBOL(mlx5_eq_destroy_generic);
1131 struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc)
1133 u32 ci = eq->cons_index + cc;
1134 struct mlx5_eqe *eqe;
1136 eqe = get_eqe(eq, ci & (eq->nent - 1));
1137 eqe = ((eqe->owner & 1) ^ !!(ci & eq->nent)) ? NULL : eqe;
1138 /* Make sure we read EQ entry contents after we've
1139 * checked the ownership bit.
1146 EXPORT_SYMBOL(mlx5_eq_get_eqe);
1148 void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm)
1150 __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
1153 eq->cons_index += cc;
1154 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
1156 __raw_writel((__force u32)cpu_to_be32(val), addr);
1157 /* We still want ordering, just not swabbing, so add a barrier */
1160 EXPORT_SYMBOL(mlx5_eq_update_ci);
1162 /* Completion EQs */
1164 static int set_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
1166 struct mlx5_priv *priv = &mdev->priv;
1167 int vecidx = MLX5_EQ_VEC_COMP_BASE + i;
1168 int irq = pci_irq_vector(mdev->pdev, vecidx);
1169 struct mlx5_irq_info *irq_info = &priv->eq_table->irq_info[vecidx];
1171 if (!zalloc_cpumask_var(&irq_info->mask, GFP_KERNEL)) {
1172 mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
1176 cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
1179 if (IS_ENABLED(CONFIG_SMP) &&
1180 irq_set_affinity_hint(irq, irq_info->mask))
1181 mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
1186 static void clear_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
1188 int vecidx = MLX5_EQ_VEC_COMP_BASE + i;
1189 struct mlx5_priv *priv = &mdev->priv;
1190 int irq = pci_irq_vector(mdev->pdev, vecidx);
1191 struct mlx5_irq_info *irq_info = &priv->eq_table->irq_info[vecidx];
1193 irq_set_affinity_hint(irq, NULL);
1194 free_cpumask_var(irq_info->mask);
1197 static int set_comp_irq_affinity_hints(struct mlx5_core_dev *mdev)
1202 for (i = 0; i < mdev->priv.eq_table->num_comp_vectors; i++) {
1203 err = set_comp_irq_affinity_hint(mdev, i);
1211 for (i--; i >= 0; i--)
1212 clear_comp_irq_affinity_hint(mdev, i);
1217 static void clear_comp_irqs_affinity_hints(struct mlx5_core_dev *mdev)
1221 for (i = 0; i < mdev->priv.eq_table->num_comp_vectors; i++)
1222 clear_comp_irq_affinity_hint(mdev, i);
1225 static void destroy_comp_eqs(struct mlx5_core_dev *dev)
1227 struct mlx5_eq_table *table = dev->priv.eq_table;
1228 struct mlx5_eq_comp *eq, *n;
1230 clear_comp_irqs_affinity_hints(dev);
1232 #ifdef CONFIG_RFS_ACCEL
1234 free_irq_cpu_rmap(table->rmap);
1238 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
1239 list_del(&eq->list);
1240 if (destroy_unmap_eq(dev, &eq->core))
1241 mlx5_core_warn(dev, "failed to destroy comp EQ 0x%x\n",
1243 tasklet_disable(&eq->tasklet_ctx.task);
1248 static int create_comp_eqs(struct mlx5_core_dev *dev)
1250 struct mlx5_eq_table *table = dev->priv.eq_table;
1251 char name[MLX5_MAX_IRQ_NAME];
1252 struct mlx5_eq_comp *eq;
1258 INIT_LIST_HEAD(&table->comp_eqs_list);
1259 ncomp_vec = table->num_comp_vectors;
1260 nent = MLX5_COMP_EQ_SIZE;
1261 #ifdef CONFIG_RFS_ACCEL
1262 table->rmap = alloc_irq_cpu_rmap(ncomp_vec);
1266 for (i = 0; i < ncomp_vec; i++) {
1267 int vecidx = i + MLX5_EQ_VEC_COMP_BASE;
1268 struct mlx5_eq_param param = {};
1270 eq = kzalloc(sizeof(*eq), GFP_KERNEL);
1276 INIT_LIST_HEAD(&eq->tasklet_ctx.list);
1277 INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
1278 spin_lock_init(&eq->tasklet_ctx.lock);
1279 tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb,
1280 (unsigned long)&eq->tasklet_ctx);
1282 #ifdef CONFIG_RFS_ACCEL
1283 irq_cpu_rmap_add(table->rmap, pci_irq_vector(dev->pdev, vecidx));
1285 snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
1286 param = (struct mlx5_eq_param) {
1290 .context = &eq->core,
1291 .handler = mlx5_eq_comp_int
1293 err = create_map_eq(dev, &eq->core, name, ¶m);
1298 mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->core.eqn);
1299 /* add tail, to keep the list ordered, for mlx5_vector2eqn to work */
1300 list_add_tail(&eq->list, &table->comp_eqs_list);
1303 err = set_comp_irq_affinity_hints(dev);
1305 mlx5_core_err(dev, "Failed to alloc affinity hint cpumask\n");
1312 destroy_comp_eqs(dev);
1316 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
1319 struct mlx5_eq_table *table = dev->priv.eq_table;
1320 struct mlx5_eq_comp *eq, *n;
1324 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
1325 if (i++ == vector) {
1326 *eqn = eq->core.eqn;
1327 *irqn = eq->core.irqn;
1335 EXPORT_SYMBOL(mlx5_vector2eqn);
1337 unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev)
1339 return dev->priv.eq_table->num_comp_vectors;
1341 EXPORT_SYMBOL(mlx5_comp_vectors_count);
1344 mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
1346 /* TODO: consider irq_get_affinity_mask(irq) */
1347 return dev->priv.eq_table->irq_info[vector + MLX5_EQ_VEC_COMP_BASE].mask;
1349 EXPORT_SYMBOL(mlx5_comp_irq_get_affinity_mask);
1351 struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev)
1353 #ifdef CONFIG_RFS_ACCEL
1354 return dev->priv.eq_table->rmap;
1360 struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn)
1362 struct mlx5_eq_table *table = dev->priv.eq_table;
1363 struct mlx5_eq_comp *eq;
1365 list_for_each_entry(eq, &table->comp_eqs_list, list) {
1366 if (eq->core.eqn == eqn)
1370 return ERR_PTR(-ENOENT);
1373 /* This function should only be called after mlx5_cmd_force_teardown_hca */
1374 void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
1376 struct mlx5_eq_table *table = dev->priv.eq_table;
1379 clear_comp_irqs_affinity_hints(dev);
1381 #ifdef CONFIG_RFS_ACCEL
1383 free_irq_cpu_rmap(table->rmap);
1388 mutex_lock(&table->lock); /* sync with create/destroy_async_eq */
1389 max_eqs = table->num_comp_vectors + MLX5_EQ_VEC_COMP_BASE;
1390 for (i = max_eqs - 1; i >= 0; i--) {
1391 if (!table->irq_info[i].context)
1393 free_irq(pci_irq_vector(dev->pdev, i), table->irq_info[i].context);
1394 table->irq_info[i].context = NULL;
1396 mutex_unlock(&table->lock);
1397 pci_free_irq_vectors(dev->pdev);
1400 static int alloc_irq_vectors(struct mlx5_core_dev *dev)
1402 struct mlx5_priv *priv = &dev->priv;
1403 struct mlx5_eq_table *table = priv->eq_table;
1404 int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
1405 MLX5_CAP_GEN(dev, max_num_eqs) :
1406 1 << MLX5_CAP_GEN(dev, log_max_eq);
1410 nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
1411 MLX5_EQ_VEC_COMP_BASE;
1412 nvec = min_t(int, nvec, num_eqs);
1413 if (nvec <= MLX5_EQ_VEC_COMP_BASE)
1416 table->irq_info = kcalloc(nvec, sizeof(*table->irq_info), GFP_KERNEL);
1417 if (!table->irq_info)
1420 nvec = pci_alloc_irq_vectors(dev->pdev, MLX5_EQ_VEC_COMP_BASE + 1,
1421 nvec, PCI_IRQ_MSIX);
1424 goto err_free_irq_info;
1427 table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
1432 kfree(table->irq_info);
1436 static void free_irq_vectors(struct mlx5_core_dev *dev)
1438 struct mlx5_priv *priv = &dev->priv;
1440 pci_free_irq_vectors(dev->pdev);
1441 kfree(priv->eq_table->irq_info);
1444 int mlx5_eq_table_create(struct mlx5_core_dev *dev)
1448 err = alloc_irq_vectors(dev);
1450 mlx5_core_err(dev, "alloc irq vectors failed\n");
1454 err = create_async_eqs(dev);
1456 mlx5_core_err(dev, "Failed to create async EQs\n");
1460 err = create_comp_eqs(dev);
1462 mlx5_core_err(dev, "Failed to create completion EQs\n");
1468 destroy_async_eqs(dev);
1470 free_irq_vectors(dev);
1474 void mlx5_eq_table_destroy(struct mlx5_core_dev *dev)
1476 destroy_comp_eqs(dev);
1477 destroy_async_eqs(dev);
1478 free_irq_vectors(dev);