2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/interrupt.h>
34 #include <linux/module.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/cmd.h>
37 #ifdef CONFIG_RFS_ACCEL
38 #include <linux/cpu_rmap.h>
40 #include "mlx5_core.h"
41 #include "fpga/core.h"
43 #include "lib/clock.h"
44 #include "diag/fw_tracer.h"
47 MLX5_EQE_SIZE = sizeof(struct mlx5_eqe),
48 MLX5_EQE_OWNER_INIT_VAL = 0x1,
52 MLX5_EQ_STATE_ARMED = 0x9,
53 MLX5_EQ_STATE_FIRED = 0xa,
54 MLX5_EQ_STATE_ALWAYS_ARMED = 0xb,
58 MLX5_NUM_SPARE_EQE = 0x80,
59 MLX5_NUM_ASYNC_EQE = 0x1000,
60 MLX5_NUM_CMD_EQE = 32,
61 MLX5_NUM_PF_DRAIN = 64,
65 MLX5_EQ_DOORBEL_OFFSET = 0x40,
68 #define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
69 (1ull << MLX5_EVENT_TYPE_COMM_EST) | \
70 (1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \
71 (1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \
72 (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \
73 (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \
74 (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
75 (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \
76 (1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \
77 (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \
78 (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \
79 (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
92 static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
94 u32 out[MLX5_ST_SZ_DW(destroy_eq_out)] = {0};
95 u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {0};
97 MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ);
98 MLX5_SET(destroy_eq_in, in, eq_number, eqn);
99 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
102 static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry)
104 return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE);
107 static struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq)
109 struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1));
111 return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe;
114 static const char *eqe_type_str(u8 type)
117 case MLX5_EVENT_TYPE_COMP:
118 return "MLX5_EVENT_TYPE_COMP";
119 case MLX5_EVENT_TYPE_PATH_MIG:
120 return "MLX5_EVENT_TYPE_PATH_MIG";
121 case MLX5_EVENT_TYPE_COMM_EST:
122 return "MLX5_EVENT_TYPE_COMM_EST";
123 case MLX5_EVENT_TYPE_SQ_DRAINED:
124 return "MLX5_EVENT_TYPE_SQ_DRAINED";
125 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
126 return "MLX5_EVENT_TYPE_SRQ_LAST_WQE";
127 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
128 return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT";
129 case MLX5_EVENT_TYPE_CQ_ERROR:
130 return "MLX5_EVENT_TYPE_CQ_ERROR";
131 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
132 return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR";
133 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
134 return "MLX5_EVENT_TYPE_PATH_MIG_FAILED";
135 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
136 return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR";
137 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
138 return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR";
139 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
140 return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR";
141 case MLX5_EVENT_TYPE_INTERNAL_ERROR:
142 return "MLX5_EVENT_TYPE_INTERNAL_ERROR";
143 case MLX5_EVENT_TYPE_PORT_CHANGE:
144 return "MLX5_EVENT_TYPE_PORT_CHANGE";
145 case MLX5_EVENT_TYPE_GPIO_EVENT:
146 return "MLX5_EVENT_TYPE_GPIO_EVENT";
147 case MLX5_EVENT_TYPE_PORT_MODULE_EVENT:
148 return "MLX5_EVENT_TYPE_PORT_MODULE_EVENT";
149 case MLX5_EVENT_TYPE_TEMP_WARN_EVENT:
150 return "MLX5_EVENT_TYPE_TEMP_WARN_EVENT";
151 case MLX5_EVENT_TYPE_REMOTE_CONFIG:
152 return "MLX5_EVENT_TYPE_REMOTE_CONFIG";
153 case MLX5_EVENT_TYPE_DB_BF_CONGESTION:
154 return "MLX5_EVENT_TYPE_DB_BF_CONGESTION";
155 case MLX5_EVENT_TYPE_STALL_EVENT:
156 return "MLX5_EVENT_TYPE_STALL_EVENT";
157 case MLX5_EVENT_TYPE_CMD:
158 return "MLX5_EVENT_TYPE_CMD";
159 case MLX5_EVENT_TYPE_PAGE_REQUEST:
160 return "MLX5_EVENT_TYPE_PAGE_REQUEST";
161 case MLX5_EVENT_TYPE_PAGE_FAULT:
162 return "MLX5_EVENT_TYPE_PAGE_FAULT";
163 case MLX5_EVENT_TYPE_PPS_EVENT:
164 return "MLX5_EVENT_TYPE_PPS_EVENT";
165 case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
166 return "MLX5_EVENT_TYPE_NIC_VPORT_CHANGE";
167 case MLX5_EVENT_TYPE_FPGA_ERROR:
168 return "MLX5_EVENT_TYPE_FPGA_ERROR";
169 case MLX5_EVENT_TYPE_FPGA_QP_ERROR:
170 return "MLX5_EVENT_TYPE_FPGA_QP_ERROR";
171 case MLX5_EVENT_TYPE_GENERAL_EVENT:
172 return "MLX5_EVENT_TYPE_GENERAL_EVENT";
173 case MLX5_EVENT_TYPE_DEVICE_TRACER:
174 return "MLX5_EVENT_TYPE_DEVICE_TRACER";
176 return "Unrecognized event";
180 static enum mlx5_dev_event port_subtype_event(u8 subtype)
183 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
184 return MLX5_DEV_EVENT_PORT_DOWN;
185 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
186 return MLX5_DEV_EVENT_PORT_UP;
187 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
188 return MLX5_DEV_EVENT_PORT_INITIALIZED;
189 case MLX5_PORT_CHANGE_SUBTYPE_LID:
190 return MLX5_DEV_EVENT_LID_CHANGE;
191 case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
192 return MLX5_DEV_EVENT_PKEY_CHANGE;
193 case MLX5_PORT_CHANGE_SUBTYPE_GUID:
194 return MLX5_DEV_EVENT_GUID_CHANGE;
195 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
196 return MLX5_DEV_EVENT_CLIENT_REREG;
201 static void eq_update_ci(struct mlx5_eq *eq, int arm)
203 __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
204 u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
206 __raw_writel((__force u32)cpu_to_be32(val), addr);
207 /* We still want ordering, just not swabbing, so add a barrier */
211 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
212 static void eqe_pf_action(struct work_struct *work)
214 struct mlx5_pagefault *pfault = container_of(work,
215 struct mlx5_pagefault,
217 struct mlx5_eq *eq = pfault->eq;
219 mlx5_core_page_fault(eq->dev, pfault);
220 mempool_free(pfault, eq->pf_ctx.pool);
223 static void eq_pf_process(struct mlx5_eq *eq)
225 struct mlx5_core_dev *dev = eq->dev;
226 struct mlx5_eqe_page_fault *pf_eqe;
227 struct mlx5_pagefault *pfault;
228 struct mlx5_eqe *eqe;
231 while ((eqe = next_eqe_sw(eq))) {
232 pfault = mempool_alloc(eq->pf_ctx.pool, GFP_ATOMIC);
234 schedule_work(&eq->pf_ctx.work);
239 pf_eqe = &eqe->data.page_fault;
240 pfault->event_subtype = eqe->sub_type;
241 pfault->bytes_committed = be32_to_cpu(pf_eqe->bytes_committed);
244 "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x\n",
245 eqe->sub_type, pfault->bytes_committed);
247 switch (eqe->sub_type) {
248 case MLX5_PFAULT_SUBTYPE_RDMA:
249 /* RDMA based event */
251 be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24;
253 be32_to_cpu(pf_eqe->rdma.pftype_token) &
256 be32_to_cpu(pf_eqe->rdma.r_key);
257 pfault->rdma.packet_size =
258 be16_to_cpu(pf_eqe->rdma.packet_length);
259 pfault->rdma.rdma_op_len =
260 be32_to_cpu(pf_eqe->rdma.rdma_op_len);
261 pfault->rdma.rdma_va =
262 be64_to_cpu(pf_eqe->rdma.rdma_va);
264 "PAGE_FAULT: type:0x%x, token: 0x%06x, r_key: 0x%08x\n",
265 pfault->type, pfault->token,
268 "PAGE_FAULT: rdma_op_len: 0x%08x, rdma_va: 0x%016llx\n",
269 pfault->rdma.rdma_op_len,
270 pfault->rdma.rdma_va);
273 case MLX5_PFAULT_SUBTYPE_WQE:
274 /* WQE based event */
276 (be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24) & 0x7;
278 be32_to_cpu(pf_eqe->wqe.token);
280 be32_to_cpu(pf_eqe->wqe.pftype_wq) &
282 pfault->wqe.wqe_index =
283 be16_to_cpu(pf_eqe->wqe.wqe_index);
284 pfault->wqe.packet_size =
285 be16_to_cpu(pf_eqe->wqe.packet_length);
287 "PAGE_FAULT: type:0x%x, token: 0x%06x, wq_num: 0x%06x, wqe_index: 0x%04x\n",
288 pfault->type, pfault->token,
290 pfault->wqe.wqe_index);
295 "Unsupported page fault event sub-type: 0x%02hhx\n",
297 /* Unsupported page faults should still be
298 * resolved by the page fault handler
303 INIT_WORK(&pfault->work, eqe_pf_action);
304 queue_work(eq->pf_ctx.wq, &pfault->work);
309 if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
318 static irqreturn_t mlx5_eq_pf_int(int irq, void *eq_ptr)
320 struct mlx5_eq *eq = eq_ptr;
323 if (spin_trylock_irqsave(&eq->pf_ctx.lock, flags)) {
325 spin_unlock_irqrestore(&eq->pf_ctx.lock, flags);
327 schedule_work(&eq->pf_ctx.work);
333 /* mempool_refill() was proposed but unfortunately wasn't accepted
334 * http://lkml.iu.edu/hypermail/linux/kernel/1512.1/05073.html
337 static void mempool_refill(mempool_t *pool)
339 while (pool->curr_nr < pool->min_nr)
340 mempool_free(mempool_alloc(pool, GFP_KERNEL), pool);
343 static void eq_pf_action(struct work_struct *work)
345 struct mlx5_eq *eq = container_of(work, struct mlx5_eq, pf_ctx.work);
347 mempool_refill(eq->pf_ctx.pool);
349 spin_lock_irq(&eq->pf_ctx.lock);
351 spin_unlock_irq(&eq->pf_ctx.lock);
354 static int init_pf_ctx(struct mlx5_eq_pagefault *pf_ctx, const char *name)
356 spin_lock_init(&pf_ctx->lock);
357 INIT_WORK(&pf_ctx->work, eq_pf_action);
359 pf_ctx->wq = alloc_ordered_workqueue(name,
364 pf_ctx->pool = mempool_create_kmalloc_pool
365 (MLX5_NUM_PF_DRAIN, sizeof(struct mlx5_pagefault));
371 destroy_workqueue(pf_ctx->wq);
375 int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token,
376 u32 wq_num, u8 type, int error)
378 u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = {0};
379 u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {0};
381 MLX5_SET(page_fault_resume_in, in, opcode,
382 MLX5_CMD_OP_PAGE_FAULT_RESUME);
383 MLX5_SET(page_fault_resume_in, in, error, !!error);
384 MLX5_SET(page_fault_resume_in, in, page_fault_type, type);
385 MLX5_SET(page_fault_resume_in, in, wq_number, wq_num);
386 MLX5_SET(page_fault_resume_in, in, token, token);
388 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
390 EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume);
393 static void general_event_handler(struct mlx5_core_dev *dev,
394 struct mlx5_eqe *eqe)
396 switch (eqe->sub_type) {
397 case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT:
399 dev->event(dev, MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT, 0);
402 mlx5_core_dbg(dev, "General event with unrecognized subtype: sub_type %d\n",
407 static void mlx5_temp_warning_event(struct mlx5_core_dev *dev,
408 struct mlx5_eqe *eqe)
413 value_lsb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_lsb);
414 value_msb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_msb);
417 "High temperature on sensors with bit set %llx %llx",
418 value_msb, value_lsb);
421 /* caller must eventually call mlx5_cq_put on the returned cq */
422 static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
424 struct mlx5_cq_table *table = &eq->cq_table;
425 struct mlx5_core_cq *cq = NULL;
427 spin_lock(&table->lock);
428 cq = radix_tree_lookup(&table->tree, cqn);
431 spin_unlock(&table->lock);
436 static void mlx5_eq_cq_completion(struct mlx5_eq *eq, u32 cqn)
438 struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn);
441 mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn);
452 static void mlx5_eq_cq_event(struct mlx5_eq *eq, u32 cqn, int event_type)
454 struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn);
457 mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn);
461 cq->event(cq, event_type);
466 static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
468 struct mlx5_eq *eq = eq_ptr;
469 struct mlx5_core_dev *dev = eq->dev;
470 struct mlx5_eqe *eqe;
476 while ((eqe = next_eqe_sw(eq))) {
478 * Make sure we read EQ entry contents after we've
479 * checked the ownership bit.
483 mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n",
484 eq->eqn, eqe_type_str(eqe->type));
486 case MLX5_EVENT_TYPE_COMP:
487 cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
488 mlx5_eq_cq_completion(eq, cqn);
490 case MLX5_EVENT_TYPE_DCT_DRAINED:
491 rsn = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
492 rsn |= (MLX5_RES_DCT << MLX5_USER_INDEX_LEN);
493 mlx5_rsc_event(dev, rsn, eqe->type);
495 case MLX5_EVENT_TYPE_PATH_MIG:
496 case MLX5_EVENT_TYPE_COMM_EST:
497 case MLX5_EVENT_TYPE_SQ_DRAINED:
498 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
499 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
500 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
501 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
502 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
503 rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
504 rsn |= (eqe->data.qp_srq.type << MLX5_USER_INDEX_LEN);
505 mlx5_core_dbg(dev, "event %s(%d) arrived on resource 0x%x\n",
506 eqe_type_str(eqe->type), eqe->type, rsn);
507 mlx5_rsc_event(dev, rsn, eqe->type);
510 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
511 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
512 rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
513 mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n",
514 eqe_type_str(eqe->type), eqe->type, rsn);
515 mlx5_srq_event(dev, rsn, eqe->type);
518 case MLX5_EVENT_TYPE_CMD:
519 mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
522 case MLX5_EVENT_TYPE_PORT_CHANGE:
523 port = (eqe->data.port.port >> 4) & 0xf;
524 switch (eqe->sub_type) {
525 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
526 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
527 case MLX5_PORT_CHANGE_SUBTYPE_LID:
528 case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
529 case MLX5_PORT_CHANGE_SUBTYPE_GUID:
530 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
531 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
533 dev->event(dev, port_subtype_event(eqe->sub_type),
534 (unsigned long)port);
537 mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n",
538 port, eqe->sub_type);
541 case MLX5_EVENT_TYPE_CQ_ERROR:
542 cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
543 mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
544 cqn, eqe->data.cq_err.syndrome);
545 mlx5_eq_cq_event(eq, cqn, eqe->type);
548 case MLX5_EVENT_TYPE_PAGE_REQUEST:
550 u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
551 s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
553 mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
555 mlx5_core_req_pages_handler(dev, func_id, npages);
559 case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
560 mlx5_eswitch_vport_event(dev->priv.eswitch, eqe);
563 case MLX5_EVENT_TYPE_PORT_MODULE_EVENT:
564 mlx5_port_module_event(dev, eqe);
567 case MLX5_EVENT_TYPE_PPS_EVENT:
568 mlx5_pps_event(dev, eqe);
571 case MLX5_EVENT_TYPE_FPGA_ERROR:
572 case MLX5_EVENT_TYPE_FPGA_QP_ERROR:
573 mlx5_fpga_event(dev, eqe->type, &eqe->data.raw);
576 case MLX5_EVENT_TYPE_TEMP_WARN_EVENT:
577 mlx5_temp_warning_event(dev, eqe);
580 case MLX5_EVENT_TYPE_GENERAL_EVENT:
581 general_event_handler(dev, eqe);
584 case MLX5_EVENT_TYPE_DEVICE_TRACER:
585 mlx5_fw_tracer_event(dev, eqe);
589 mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
597 /* The HCA will think the queue has overflowed if we
598 * don't tell it we've been processing events. We
599 * create our EQs with MLX5_NUM_SPARE_EQE extra
600 * entries, so we must update our consumer index at
603 if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
612 tasklet_schedule(&eq->tasklet_ctx.task);
617 /* Some architectures don't latch interrupts when they are disabled, so using
618 * mlx5_eq_poll_irq_disabled could end up losing interrupts while trying to
619 * avoid losing them. It is not recommended to use it, unless this is the last
622 u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq *eq)
626 disable_irq(eq->irqn);
627 count_eqe = eq->cons_index;
628 mlx5_eq_int(eq->irqn, eq);
629 count_eqe = eq->cons_index - count_eqe;
630 enable_irq(eq->irqn);
635 static void init_eq_buf(struct mlx5_eq *eq)
637 struct mlx5_eqe *eqe;
640 for (i = 0; i < eq->nent; i++) {
641 eqe = get_eqe(eq, i);
642 eqe->owner = MLX5_EQE_OWNER_INIT_VAL;
646 int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
647 int nent, u64 mask, const char *name,
648 enum mlx5_eq_type type)
650 struct mlx5_cq_table *cq_table = &eq->cq_table;
651 u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
652 struct mlx5_priv *priv = &dev->priv;
653 irq_handler_t handler;
661 memset(cq_table, 0, sizeof(*cq_table));
662 spin_lock_init(&cq_table->lock);
663 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
666 eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
668 err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf);
672 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
673 if (type == MLX5_EQ_TYPE_PF)
674 handler = mlx5_eq_pf_int;
677 handler = mlx5_eq_int;
681 inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
682 MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages;
684 in = kvzalloc(inlen, GFP_KERNEL);
690 pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas);
691 mlx5_fill_page_array(&eq->buf, pas);
693 MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
694 MLX5_SET64(create_eq_in, in, event_bitmask, mask);
696 eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
697 MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent));
698 MLX5_SET(eqc, eqc, uar_page, priv->uar->index);
699 MLX5_SET(eqc, eqc, intr, vecidx);
700 MLX5_SET(eqc, eqc, log_page_size,
701 eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
703 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
707 snprintf(priv->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s",
708 name, pci_name(dev->pdev));
710 eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
711 eq->irqn = pci_irq_vector(dev->pdev, vecidx);
713 eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET;
714 err = request_irq(eq->irqn, handler, 0,
715 priv->irq_info[vecidx].name, eq);
719 err = mlx5_debug_eq_add(dev, eq);
723 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
724 if (type == MLX5_EQ_TYPE_PF) {
725 err = init_pf_ctx(&eq->pf_ctx, name);
731 INIT_LIST_HEAD(&eq->tasklet_ctx.list);
732 INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
733 spin_lock_init(&eq->tasklet_ctx.lock);
734 tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb,
735 (unsigned long)&eq->tasklet_ctx);
738 /* EQs are created in ARMED state
746 free_irq(eq->irqn, eq);
749 mlx5_cmd_destroy_eq(dev, eq->eqn);
755 mlx5_buf_free(dev, &eq->buf);
759 int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
763 mlx5_debug_eq_remove(dev, eq);
764 free_irq(eq->irqn, eq);
765 err = mlx5_cmd_destroy_eq(dev, eq->eqn);
767 mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
769 synchronize_irq(eq->irqn);
771 if (eq->type == MLX5_EQ_TYPE_COMP) {
772 tasklet_disable(&eq->tasklet_ctx.task);
773 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
774 } else if (eq->type == MLX5_EQ_TYPE_PF) {
775 cancel_work_sync(&eq->pf_ctx.work);
776 destroy_workqueue(eq->pf_ctx.wq);
777 mempool_destroy(eq->pf_ctx.pool);
780 mlx5_buf_free(dev, &eq->buf);
785 int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
787 struct mlx5_cq_table *table = &eq->cq_table;
790 spin_lock_irq(&table->lock);
791 err = radix_tree_insert(&table->tree, cq->cqn, cq);
792 spin_unlock_irq(&table->lock);
797 int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
799 struct mlx5_cq_table *table = &eq->cq_table;
800 struct mlx5_core_cq *tmp;
802 spin_lock_irq(&table->lock);
803 tmp = radix_tree_delete(&table->tree, cq->cqn);
804 spin_unlock_irq(&table->lock);
807 mlx5_core_warn(eq->dev, "cq 0x%x not found in eq 0x%x tree\n", eq->eqn, cq->cqn);
812 mlx5_core_warn(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n", eq->eqn, cq->cqn);
819 int mlx5_eq_init(struct mlx5_core_dev *dev)
823 spin_lock_init(&dev->priv.eq_table.lock);
825 err = mlx5_eq_debugfs_init(dev);
830 void mlx5_eq_cleanup(struct mlx5_core_dev *dev)
832 mlx5_eq_debugfs_cleanup(dev);
835 int mlx5_start_eqs(struct mlx5_core_dev *dev)
837 struct mlx5_eq_table *table = &dev->priv.eq_table;
838 u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
841 if (MLX5_VPORT_MANAGER(dev))
842 async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
844 if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH &&
845 MLX5_CAP_GEN(dev, general_notification_event))
846 async_event_mask |= (1ull << MLX5_EVENT_TYPE_GENERAL_EVENT);
848 if (MLX5_CAP_GEN(dev, port_module_event))
849 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PORT_MODULE_EVENT);
851 mlx5_core_dbg(dev, "port_module_event is not set\n");
853 if (MLX5_PPS_CAP(dev))
854 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT);
856 if (MLX5_CAP_GEN(dev, fpga))
857 async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR) |
858 (1ull << MLX5_EVENT_TYPE_FPGA_QP_ERROR);
859 if (MLX5_CAP_GEN_MAX(dev, dct))
860 async_event_mask |= (1ull << MLX5_EVENT_TYPE_DCT_DRAINED);
862 if (MLX5_CAP_GEN(dev, temp_warn_event))
863 async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT);
865 if (MLX5_CAP_MCAM_REG(dev, tracer_registers))
866 async_event_mask |= (1ull << MLX5_EVENT_TYPE_DEVICE_TRACER);
868 err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
869 MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
870 "mlx5_cmd_eq", MLX5_EQ_TYPE_ASYNC);
872 mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
876 mlx5_cmd_use_events(dev);
878 err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC,
879 MLX5_NUM_ASYNC_EQE, async_event_mask,
880 "mlx5_async_eq", MLX5_EQ_TYPE_ASYNC);
882 mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
886 err = mlx5_create_map_eq(dev, &table->pages_eq,
888 /* TODO: sriov max_vf + */ 1,
889 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq",
892 mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
896 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
897 if (MLX5_CAP_GEN(dev, pg)) {
898 err = mlx5_create_map_eq(dev, &table->pfault_eq,
901 1 << MLX5_EVENT_TYPE_PAGE_FAULT,
902 "mlx5_page_fault_eq",
905 mlx5_core_warn(dev, "failed to create page fault EQ %d\n",
913 mlx5_destroy_unmap_eq(dev, &table->pages_eq);
919 mlx5_destroy_unmap_eq(dev, &table->async_eq);
922 mlx5_cmd_use_polling(dev);
923 mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
927 void mlx5_stop_eqs(struct mlx5_core_dev *dev)
929 struct mlx5_eq_table *table = &dev->priv.eq_table;
932 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
933 if (MLX5_CAP_GEN(dev, pg)) {
934 err = mlx5_destroy_unmap_eq(dev, &table->pfault_eq);
936 mlx5_core_err(dev, "failed to destroy page fault eq, err(%d)\n",
941 err = mlx5_destroy_unmap_eq(dev, &table->pages_eq);
943 mlx5_core_err(dev, "failed to destroy pages eq, err(%d)\n",
946 err = mlx5_destroy_unmap_eq(dev, &table->async_eq);
948 mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n",
950 mlx5_cmd_use_polling(dev);
952 err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
954 mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n",
958 int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
959 u32 *out, int outlen)
961 u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {0};
963 MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ);
964 MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
965 return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
968 /* This function should only be called after mlx5_cmd_force_teardown_hca */
969 void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
971 struct mlx5_eq_table *table = &dev->priv.eq_table;
974 #ifdef CONFIG_RFS_ACCEL
976 free_irq_cpu_rmap(dev->rmap);
980 list_for_each_entry(eq, &table->comp_eqs_list, list)
981 free_irq(eq->irqn, eq);
983 free_irq(table->pages_eq.irqn, &table->pages_eq);
984 free_irq(table->async_eq.irqn, &table->async_eq);
985 free_irq(table->cmd_eq.irqn, &table->cmd_eq);
986 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
987 if (MLX5_CAP_GEN(dev, pg))
988 free_irq(table->pfault_eq.irqn, &table->pfault_eq);
990 pci_free_irq_vectors(dev->pdev);