2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/interrupt.h>
34 #include <linux/notifier.h>
35 #include <linux/module.h>
36 #include <linux/mlx5/driver.h>
37 #include <linux/mlx5/eq.h>
38 #include <linux/mlx5/cmd.h>
39 #ifdef CONFIG_RFS_ACCEL
40 #include <linux/cpu_rmap.h>
42 #include "mlx5_core.h"
44 #include "fpga/core.h"
46 #include "lib/clock.h"
47 #include "diag/fw_tracer.h"
50 MLX5_EQE_OWNER_INIT_VAL = 0x1,
54 MLX5_EQ_STATE_ARMED = 0x9,
55 MLX5_EQ_STATE_FIRED = 0xa,
56 MLX5_EQ_STATE_ALWAYS_ARMED = 0xb,
60 MLX5_EQ_DOORBEL_OFFSET = 0x40,
63 struct mlx5_irq_info {
65 char name[MLX5_MAX_IRQ_NAME];
66 void *context; /* dev_id provided to request_irq */
69 struct mlx5_eq_table {
70 struct list_head comp_eqs_list;
71 struct mlx5_eq pages_eq;
72 struct mlx5_eq cmd_eq;
73 struct mlx5_eq async_eq;
75 struct atomic_notifier_head nh[MLX5_EVENT_TYPE_MAX];
77 struct mutex lock; /* sync async eqs creations */
79 struct mlx5_irq_info *irq_info;
80 #ifdef CONFIG_RFS_ACCEL
81 struct cpu_rmap *rmap;
85 #define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
86 (1ull << MLX5_EVENT_TYPE_COMM_EST) | \
87 (1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \
88 (1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \
89 (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \
90 (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \
91 (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
92 (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \
93 (1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \
94 (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \
95 (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \
96 (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
98 static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
100 u32 out[MLX5_ST_SZ_DW(destroy_eq_out)] = {0};
101 u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {0};
103 MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ);
104 MLX5_SET(destroy_eq_in, in, eq_number, eqn);
105 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
108 static const char *eqe_type_str(u8 type)
111 case MLX5_EVENT_TYPE_COMP:
112 return "MLX5_EVENT_TYPE_COMP";
113 case MLX5_EVENT_TYPE_PATH_MIG:
114 return "MLX5_EVENT_TYPE_PATH_MIG";
115 case MLX5_EVENT_TYPE_COMM_EST:
116 return "MLX5_EVENT_TYPE_COMM_EST";
117 case MLX5_EVENT_TYPE_SQ_DRAINED:
118 return "MLX5_EVENT_TYPE_SQ_DRAINED";
119 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
120 return "MLX5_EVENT_TYPE_SRQ_LAST_WQE";
121 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
122 return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT";
123 case MLX5_EVENT_TYPE_CQ_ERROR:
124 return "MLX5_EVENT_TYPE_CQ_ERROR";
125 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
126 return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR";
127 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
128 return "MLX5_EVENT_TYPE_PATH_MIG_FAILED";
129 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
130 return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR";
131 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
132 return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR";
133 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
134 return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR";
135 case MLX5_EVENT_TYPE_INTERNAL_ERROR:
136 return "MLX5_EVENT_TYPE_INTERNAL_ERROR";
137 case MLX5_EVENT_TYPE_PORT_CHANGE:
138 return "MLX5_EVENT_TYPE_PORT_CHANGE";
139 case MLX5_EVENT_TYPE_GPIO_EVENT:
140 return "MLX5_EVENT_TYPE_GPIO_EVENT";
141 case MLX5_EVENT_TYPE_PORT_MODULE_EVENT:
142 return "MLX5_EVENT_TYPE_PORT_MODULE_EVENT";
143 case MLX5_EVENT_TYPE_TEMP_WARN_EVENT:
144 return "MLX5_EVENT_TYPE_TEMP_WARN_EVENT";
145 case MLX5_EVENT_TYPE_REMOTE_CONFIG:
146 return "MLX5_EVENT_TYPE_REMOTE_CONFIG";
147 case MLX5_EVENT_TYPE_DB_BF_CONGESTION:
148 return "MLX5_EVENT_TYPE_DB_BF_CONGESTION";
149 case MLX5_EVENT_TYPE_STALL_EVENT:
150 return "MLX5_EVENT_TYPE_STALL_EVENT";
151 case MLX5_EVENT_TYPE_CMD:
152 return "MLX5_EVENT_TYPE_CMD";
153 case MLX5_EVENT_TYPE_PAGE_REQUEST:
154 return "MLX5_EVENT_TYPE_PAGE_REQUEST";
155 case MLX5_EVENT_TYPE_PAGE_FAULT:
156 return "MLX5_EVENT_TYPE_PAGE_FAULT";
157 case MLX5_EVENT_TYPE_PPS_EVENT:
158 return "MLX5_EVENT_TYPE_PPS_EVENT";
159 case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
160 return "MLX5_EVENT_TYPE_NIC_VPORT_CHANGE";
161 case MLX5_EVENT_TYPE_FPGA_ERROR:
162 return "MLX5_EVENT_TYPE_FPGA_ERROR";
163 case MLX5_EVENT_TYPE_FPGA_QP_ERROR:
164 return "MLX5_EVENT_TYPE_FPGA_QP_ERROR";
165 case MLX5_EVENT_TYPE_GENERAL_EVENT:
166 return "MLX5_EVENT_TYPE_GENERAL_EVENT";
167 case MLX5_EVENT_TYPE_DEVICE_TRACER:
168 return "MLX5_EVENT_TYPE_DEVICE_TRACER";
170 return "Unrecognized event";
174 static enum mlx5_dev_event port_subtype_event(u8 subtype)
177 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
178 return MLX5_DEV_EVENT_PORT_DOWN;
179 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
180 return MLX5_DEV_EVENT_PORT_UP;
181 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
182 return MLX5_DEV_EVENT_PORT_INITIALIZED;
183 case MLX5_PORT_CHANGE_SUBTYPE_LID:
184 return MLX5_DEV_EVENT_LID_CHANGE;
185 case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
186 return MLX5_DEV_EVENT_PKEY_CHANGE;
187 case MLX5_PORT_CHANGE_SUBTYPE_GUID:
188 return MLX5_DEV_EVENT_GUID_CHANGE;
189 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
190 return MLX5_DEV_EVENT_CLIENT_REREG;
195 static void general_event_handler(struct mlx5_core_dev *dev,
196 struct mlx5_eqe *eqe)
198 switch (eqe->sub_type) {
199 case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT:
201 dev->event(dev, MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT, 0);
204 mlx5_core_dbg(dev, "General event with unrecognized subtype: sub_type %d\n",
209 static void mlx5_temp_warning_event(struct mlx5_core_dev *dev,
210 struct mlx5_eqe *eqe)
215 value_lsb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_lsb);
216 value_msb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_msb);
219 "High temperature on sensors with bit set %llx %llx",
220 value_msb, value_lsb);
223 /* caller must eventually call mlx5_cq_put on the returned cq */
224 static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
226 struct mlx5_cq_table *table = &eq->cq_table;
227 struct mlx5_core_cq *cq = NULL;
229 spin_lock(&table->lock);
230 cq = radix_tree_lookup(&table->tree, cqn);
233 spin_unlock(&table->lock);
238 static void mlx5_eq_cq_event(struct mlx5_eq *eq, u32 cqn, int event_type)
240 struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn);
243 mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn);
247 cq->event(cq, event_type);
252 static irqreturn_t mlx5_eq_comp_int(int irq, void *eq_ptr)
254 struct mlx5_eq_comp *eq_comp = eq_ptr;
255 struct mlx5_eq *eq = eq_ptr;
256 struct mlx5_eqe *eqe;
260 while ((eqe = next_eqe_sw(eq))) {
261 struct mlx5_core_cq *cq;
262 /* Make sure we read EQ entry contents after we've
263 * checked the ownership bit.
266 /* Assume (eqe->type) is always MLX5_EVENT_TYPE_COMP */
267 cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
269 cq = mlx5_eq_cq_get(eq, cqn);
275 mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn);
281 /* The HCA will think the queue has overflowed if we
282 * don't tell it we've been processing events. We
283 * create our EQs with MLX5_NUM_SPARE_EQE extra
284 * entries, so we must update our consumer index at
287 if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
296 tasklet_schedule(&eq_comp->tasklet_ctx.task);
301 /* Some architectures don't latch interrupts when they are disabled, so using
302 * mlx5_eq_poll_irq_disabled could end up losing interrupts while trying to
303 * avoid losing them. It is not recommended to use it, unless this is the last
306 u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq)
310 disable_irq(eq->core.irqn);
311 count_eqe = eq->core.cons_index;
312 mlx5_eq_comp_int(eq->core.irqn, eq);
313 count_eqe = eq->core.cons_index - count_eqe;
314 enable_irq(eq->core.irqn);
319 static irqreturn_t mlx5_eq_async_int(int irq, void *eq_ptr)
321 struct mlx5_eq *eq = eq_ptr;
322 struct mlx5_eq_table *eqt;
323 struct mlx5_core_dev *dev;
324 struct mlx5_eqe *eqe;
331 eqt = dev->priv.eq_table;
333 while ((eqe = next_eqe_sw(eq))) {
335 * Make sure we read EQ entry contents after we've
336 * checked the ownership bit.
340 mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n",
341 eq->eqn, eqe_type_str(eqe->type));
343 case MLX5_EVENT_TYPE_DCT_DRAINED:
344 rsn = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
345 rsn |= (MLX5_RES_DCT << MLX5_USER_INDEX_LEN);
346 mlx5_rsc_event(dev, rsn, eqe->type);
348 case MLX5_EVENT_TYPE_PATH_MIG:
349 case MLX5_EVENT_TYPE_COMM_EST:
350 case MLX5_EVENT_TYPE_SQ_DRAINED:
351 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
352 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
353 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
354 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
355 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
356 rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
357 rsn |= (eqe->data.qp_srq.type << MLX5_USER_INDEX_LEN);
358 mlx5_core_dbg(dev, "event %s(%d) arrived on resource 0x%x\n",
359 eqe_type_str(eqe->type), eqe->type, rsn);
360 mlx5_rsc_event(dev, rsn, eqe->type);
363 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
364 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
365 rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
366 mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n",
367 eqe_type_str(eqe->type), eqe->type, rsn);
368 mlx5_srq_event(dev, rsn, eqe->type);
371 case MLX5_EVENT_TYPE_CMD:
372 mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
375 case MLX5_EVENT_TYPE_PORT_CHANGE:
376 port = (eqe->data.port.port >> 4) & 0xf;
377 switch (eqe->sub_type) {
378 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
379 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
380 case MLX5_PORT_CHANGE_SUBTYPE_LID:
381 case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
382 case MLX5_PORT_CHANGE_SUBTYPE_GUID:
383 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
384 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
386 dev->event(dev, port_subtype_event(eqe->sub_type),
387 (unsigned long)port);
390 mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n",
391 port, eqe->sub_type);
394 case MLX5_EVENT_TYPE_CQ_ERROR:
395 cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
396 mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
397 cqn, eqe->data.cq_err.syndrome);
398 mlx5_eq_cq_event(eq, cqn, eqe->type);
401 case MLX5_EVENT_TYPE_PAGE_REQUEST:
403 u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
404 s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
406 mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
408 mlx5_core_req_pages_handler(dev, func_id, npages);
412 case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
413 mlx5_eswitch_vport_event(dev->priv.eswitch, eqe);
416 case MLX5_EVENT_TYPE_PORT_MODULE_EVENT:
417 mlx5_port_module_event(dev, eqe);
420 case MLX5_EVENT_TYPE_PPS_EVENT:
421 mlx5_pps_event(dev, eqe);
424 case MLX5_EVENT_TYPE_FPGA_ERROR:
425 case MLX5_EVENT_TYPE_FPGA_QP_ERROR:
426 mlx5_fpga_event(dev, eqe->type, &eqe->data.raw);
429 case MLX5_EVENT_TYPE_TEMP_WARN_EVENT:
430 mlx5_temp_warning_event(dev, eqe);
433 case MLX5_EVENT_TYPE_GENERAL_EVENT:
434 general_event_handler(dev, eqe);
438 mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
443 if (likely(eqe->type < MLX5_EVENT_TYPE_MAX))
444 atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe);
446 mlx5_core_warn_once(dev, "notifier_call_chain is not setup for eqe: %d\n", eqe->type);
448 atomic_notifier_call_chain(&eqt->nh[MLX5_EVENT_TYPE_NOTIFY_ANY], eqe->type, eqe);
453 /* The HCA will think the queue has overflowed if we
454 * don't tell it we've been processing events. We
455 * create our EQs with MLX5_NUM_SPARE_EQE extra
456 * entries, so we must update our consumer index at
459 if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
470 static void init_eq_buf(struct mlx5_eq *eq)
472 struct mlx5_eqe *eqe;
475 for (i = 0; i < eq->nent; i++) {
476 eqe = get_eqe(eq, i);
477 eqe->owner = MLX5_EQE_OWNER_INIT_VAL;
482 create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, const char *name,
483 struct mlx5_eq_param *param)
485 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
486 struct mlx5_cq_table *cq_table = &eq->cq_table;
487 u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
488 struct mlx5_priv *priv = &dev->priv;
489 u8 vecidx = param->index;
496 if (eq_table->irq_info[vecidx].context)
500 memset(cq_table, 0, sizeof(*cq_table));
501 spin_lock_init(&cq_table->lock);
502 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
504 eq->nent = roundup_pow_of_two(param->nent + MLX5_NUM_SPARE_EQE);
506 err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf);
512 inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
513 MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages;
515 in = kvzalloc(inlen, GFP_KERNEL);
521 pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas);
522 mlx5_fill_page_array(&eq->buf, pas);
524 MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
525 MLX5_SET64(create_eq_in, in, event_bitmask, param->mask);
527 eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
528 MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent));
529 MLX5_SET(eqc, eqc, uar_page, priv->uar->index);
530 MLX5_SET(eqc, eqc, intr, vecidx);
531 MLX5_SET(eqc, eqc, log_page_size,
532 eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
534 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
538 snprintf(eq_table->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s",
539 name, pci_name(dev->pdev));
540 eq_table->irq_info[vecidx].context = param->context;
543 eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
544 eq->irqn = pci_irq_vector(dev->pdev, vecidx);
546 eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET;
547 err = request_irq(eq->irqn, param->handler, 0,
548 eq_table->irq_info[vecidx].name, param->context);
552 err = mlx5_debug_eq_add(dev, eq);
556 /* EQs are created in ARMED state
564 free_irq(eq->irqn, eq);
567 mlx5_cmd_destroy_eq(dev, eq->eqn);
573 mlx5_buf_free(dev, &eq->buf);
577 static int destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
579 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
580 struct mlx5_irq_info *irq_info;
583 irq_info = &eq_table->irq_info[eq->vecidx];
585 mlx5_debug_eq_remove(dev, eq);
587 free_irq(eq->irqn, irq_info->context);
588 irq_info->context = NULL;
590 err = mlx5_cmd_destroy_eq(dev, eq->eqn);
592 mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
594 synchronize_irq(eq->irqn);
596 mlx5_buf_free(dev, &eq->buf);
601 int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
603 struct mlx5_cq_table *table = &eq->cq_table;
606 spin_lock_irq(&table->lock);
607 err = radix_tree_insert(&table->tree, cq->cqn, cq);
608 spin_unlock_irq(&table->lock);
613 int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
615 struct mlx5_cq_table *table = &eq->cq_table;
616 struct mlx5_core_cq *tmp;
618 spin_lock_irq(&table->lock);
619 tmp = radix_tree_delete(&table->tree, cq->cqn);
620 spin_unlock_irq(&table->lock);
623 mlx5_core_warn(eq->dev, "cq 0x%x not found in eq 0x%x tree\n", eq->eqn, cq->cqn);
628 mlx5_core_warn(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n", eq->eqn, cq->cqn);
635 int mlx5_eq_table_init(struct mlx5_core_dev *dev)
637 struct mlx5_eq_table *eq_table;
640 eq_table = kvzalloc(sizeof(*eq_table), GFP_KERNEL);
644 dev->priv.eq_table = eq_table;
646 err = mlx5_eq_debugfs_init(dev);
648 goto kvfree_eq_table;
650 mutex_init(&eq_table->lock);
651 for (i = 0; i < MLX5_EVENT_TYPE_MAX; i++)
652 ATOMIC_INIT_NOTIFIER_HEAD(&eq_table->nh[i]);
658 dev->priv.eq_table = NULL;
662 void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev)
664 mlx5_eq_debugfs_cleanup(dev);
665 kvfree(dev->priv.eq_table);
670 static int create_async_eq(struct mlx5_core_dev *dev, const char *name,
671 struct mlx5_eq *eq, struct mlx5_eq_param *param)
673 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
676 mutex_lock(&eq_table->lock);
677 if (param->index >= MLX5_EQ_MAX_ASYNC_EQS) {
682 err = create_map_eq(dev, eq, name, param);
684 mutex_unlock(&eq_table->lock);
688 static int destroy_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
690 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
693 mutex_lock(&eq_table->lock);
694 err = destroy_unmap_eq(dev, eq);
695 mutex_unlock(&eq_table->lock);
699 static u64 gather_async_events_mask(struct mlx5_core_dev *dev)
701 u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
703 if (MLX5_VPORT_MANAGER(dev))
704 async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
706 if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH &&
707 MLX5_CAP_GEN(dev, general_notification_event))
708 async_event_mask |= (1ull << MLX5_EVENT_TYPE_GENERAL_EVENT);
710 if (MLX5_CAP_GEN(dev, port_module_event))
711 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PORT_MODULE_EVENT);
713 mlx5_core_dbg(dev, "port_module_event is not set\n");
715 if (MLX5_PPS_CAP(dev))
716 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT);
718 if (MLX5_CAP_GEN(dev, fpga))
719 async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR) |
720 (1ull << MLX5_EVENT_TYPE_FPGA_QP_ERROR);
721 if (MLX5_CAP_GEN_MAX(dev, dct))
722 async_event_mask |= (1ull << MLX5_EVENT_TYPE_DCT_DRAINED);
724 if (MLX5_CAP_GEN(dev, temp_warn_event))
725 async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT);
727 if (MLX5_CAP_MCAM_REG(dev, tracer_registers))
728 async_event_mask |= (1ull << MLX5_EVENT_TYPE_DEVICE_TRACER);
730 return async_event_mask;
733 static int create_async_eqs(struct mlx5_core_dev *dev)
735 struct mlx5_eq_table *table = dev->priv.eq_table;
736 struct mlx5_eq_param param = {};
739 param = (struct mlx5_eq_param) {
740 .index = MLX5_EQ_CMD_IDX,
741 .mask = 1ull << MLX5_EVENT_TYPE_CMD,
742 .nent = MLX5_NUM_CMD_EQE,
743 .context = &table->cmd_eq,
744 .handler = mlx5_eq_async_int,
746 err = create_async_eq(dev, "mlx5_cmd_eq", &table->cmd_eq, ¶m);
748 mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
752 mlx5_cmd_use_events(dev);
754 param = (struct mlx5_eq_param) {
755 .index = MLX5_EQ_ASYNC_IDX,
756 .mask = gather_async_events_mask(dev),
757 .nent = MLX5_NUM_ASYNC_EQE,
758 .context = &table->async_eq,
759 .handler = mlx5_eq_async_int,
761 err = create_async_eq(dev, "mlx5_async_eq", &table->async_eq, ¶m);
763 mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
767 param = (struct mlx5_eq_param) {
768 .index = MLX5_EQ_PAGEREQ_IDX,
769 .mask = 1 << MLX5_EVENT_TYPE_PAGE_REQUEST,
770 .nent = /* TODO: sriov max_vf + */ 1,
771 .context = &table->pages_eq,
772 .handler = mlx5_eq_async_int,
774 err = create_async_eq(dev, "mlx5_pages_eq", &table->pages_eq, ¶m);
776 mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
783 destroy_async_eq(dev, &table->async_eq);
786 mlx5_cmd_use_polling(dev);
787 destroy_async_eq(dev, &table->cmd_eq);
791 static void destroy_async_eqs(struct mlx5_core_dev *dev)
793 struct mlx5_eq_table *table = dev->priv.eq_table;
796 err = destroy_async_eq(dev, &table->pages_eq);
798 mlx5_core_err(dev, "failed to destroy pages eq, err(%d)\n",
801 err = destroy_async_eq(dev, &table->async_eq);
803 mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n",
805 mlx5_cmd_use_polling(dev);
807 err = destroy_async_eq(dev, &table->cmd_eq);
809 mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n",
813 struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev)
815 return &dev->priv.eq_table->async_eq;
818 void mlx5_eq_synchronize_async_irq(struct mlx5_core_dev *dev)
820 synchronize_irq(dev->priv.eq_table->async_eq.irqn);
823 void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev)
825 synchronize_irq(dev->priv.eq_table->cmd_eq.irqn);
828 /* Generic EQ API for mlx5_core consumers
829 * Needed For RDMA ODP EQ for now
832 mlx5_eq_create_generic(struct mlx5_core_dev *dev, const char *name,
833 struct mlx5_eq_param *param)
835 struct mlx5_eq *eq = kvzalloc(sizeof(*eq), GFP_KERNEL);
839 return ERR_PTR(-ENOMEM);
841 err = create_async_eq(dev, name, eq, param);
849 EXPORT_SYMBOL(mlx5_eq_create_generic);
851 int mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
858 err = destroy_async_eq(dev, eq);
866 EXPORT_SYMBOL(mlx5_eq_destroy_generic);
868 struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc)
870 u32 ci = eq->cons_index + cc;
871 struct mlx5_eqe *eqe;
873 eqe = get_eqe(eq, ci & (eq->nent - 1));
874 eqe = ((eqe->owner & 1) ^ !!(ci & eq->nent)) ? NULL : eqe;
875 /* Make sure we read EQ entry contents after we've
876 * checked the ownership bit.
883 EXPORT_SYMBOL(mlx5_eq_get_eqe);
885 void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm)
887 __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
890 eq->cons_index += cc;
891 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
893 __raw_writel((__force u32)cpu_to_be32(val), addr);
894 /* We still want ordering, just not swabbing, so add a barrier */
897 EXPORT_SYMBOL(mlx5_eq_update_ci);
901 static int set_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
903 struct mlx5_priv *priv = &mdev->priv;
904 int vecidx = MLX5_EQ_VEC_COMP_BASE + i;
905 int irq = pci_irq_vector(mdev->pdev, vecidx);
906 struct mlx5_irq_info *irq_info = &priv->eq_table->irq_info[vecidx];
908 if (!zalloc_cpumask_var(&irq_info->mask, GFP_KERNEL)) {
909 mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
913 cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
916 if (IS_ENABLED(CONFIG_SMP) &&
917 irq_set_affinity_hint(irq, irq_info->mask))
918 mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
923 static void clear_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
925 int vecidx = MLX5_EQ_VEC_COMP_BASE + i;
926 struct mlx5_priv *priv = &mdev->priv;
927 int irq = pci_irq_vector(mdev->pdev, vecidx);
928 struct mlx5_irq_info *irq_info = &priv->eq_table->irq_info[vecidx];
930 irq_set_affinity_hint(irq, NULL);
931 free_cpumask_var(irq_info->mask);
934 static int set_comp_irq_affinity_hints(struct mlx5_core_dev *mdev)
939 for (i = 0; i < mdev->priv.eq_table->num_comp_vectors; i++) {
940 err = set_comp_irq_affinity_hint(mdev, i);
948 for (i--; i >= 0; i--)
949 clear_comp_irq_affinity_hint(mdev, i);
954 static void clear_comp_irqs_affinity_hints(struct mlx5_core_dev *mdev)
958 for (i = 0; i < mdev->priv.eq_table->num_comp_vectors; i++)
959 clear_comp_irq_affinity_hint(mdev, i);
962 static void destroy_comp_eqs(struct mlx5_core_dev *dev)
964 struct mlx5_eq_table *table = dev->priv.eq_table;
965 struct mlx5_eq_comp *eq, *n;
967 clear_comp_irqs_affinity_hints(dev);
969 #ifdef CONFIG_RFS_ACCEL
971 free_irq_cpu_rmap(table->rmap);
975 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
977 if (destroy_unmap_eq(dev, &eq->core))
978 mlx5_core_warn(dev, "failed to destroy comp EQ 0x%x\n",
980 tasklet_disable(&eq->tasklet_ctx.task);
985 static int create_comp_eqs(struct mlx5_core_dev *dev)
987 struct mlx5_eq_table *table = dev->priv.eq_table;
988 char name[MLX5_MAX_IRQ_NAME];
989 struct mlx5_eq_comp *eq;
995 INIT_LIST_HEAD(&table->comp_eqs_list);
996 ncomp_vec = table->num_comp_vectors;
997 nent = MLX5_COMP_EQ_SIZE;
998 #ifdef CONFIG_RFS_ACCEL
999 table->rmap = alloc_irq_cpu_rmap(ncomp_vec);
1003 for (i = 0; i < ncomp_vec; i++) {
1004 int vecidx = i + MLX5_EQ_VEC_COMP_BASE;
1005 struct mlx5_eq_param param = {};
1007 eq = kzalloc(sizeof(*eq), GFP_KERNEL);
1013 INIT_LIST_HEAD(&eq->tasklet_ctx.list);
1014 INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
1015 spin_lock_init(&eq->tasklet_ctx.lock);
1016 tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb,
1017 (unsigned long)&eq->tasklet_ctx);
1019 #ifdef CONFIG_RFS_ACCEL
1020 irq_cpu_rmap_add(table->rmap, pci_irq_vector(dev->pdev, vecidx));
1022 snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
1023 param = (struct mlx5_eq_param) {
1027 .context = &eq->core,
1028 .handler = mlx5_eq_comp_int
1030 err = create_map_eq(dev, &eq->core, name, ¶m);
1035 mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->core.eqn);
1036 /* add tail, to keep the list ordered, for mlx5_vector2eqn to work */
1037 list_add_tail(&eq->list, &table->comp_eqs_list);
1040 err = set_comp_irq_affinity_hints(dev);
1042 mlx5_core_err(dev, "Failed to alloc affinity hint cpumask\n");
1049 destroy_comp_eqs(dev);
1053 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
1056 struct mlx5_eq_table *table = dev->priv.eq_table;
1057 struct mlx5_eq_comp *eq, *n;
1061 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
1062 if (i++ == vector) {
1063 *eqn = eq->core.eqn;
1064 *irqn = eq->core.irqn;
1072 EXPORT_SYMBOL(mlx5_vector2eqn);
1074 unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev)
1076 return dev->priv.eq_table->num_comp_vectors;
1078 EXPORT_SYMBOL(mlx5_comp_vectors_count);
1081 mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
1083 /* TODO: consider irq_get_affinity_mask(irq) */
1084 return dev->priv.eq_table->irq_info[vector + MLX5_EQ_VEC_COMP_BASE].mask;
1086 EXPORT_SYMBOL(mlx5_comp_irq_get_affinity_mask);
1088 struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev)
1090 #ifdef CONFIG_RFS_ACCEL
1091 return dev->priv.eq_table->rmap;
1097 struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn)
1099 struct mlx5_eq_table *table = dev->priv.eq_table;
1100 struct mlx5_eq_comp *eq;
1102 list_for_each_entry(eq, &table->comp_eqs_list, list) {
1103 if (eq->core.eqn == eqn)
1107 return ERR_PTR(-ENOENT);
1110 /* This function should only be called after mlx5_cmd_force_teardown_hca */
1111 void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
1113 struct mlx5_eq_table *table = dev->priv.eq_table;
1116 clear_comp_irqs_affinity_hints(dev);
1118 #ifdef CONFIG_RFS_ACCEL
1120 free_irq_cpu_rmap(table->rmap);
1125 mutex_lock(&table->lock); /* sync with create/destroy_async_eq */
1126 max_eqs = table->num_comp_vectors + MLX5_EQ_VEC_COMP_BASE;
1127 for (i = max_eqs - 1; i >= 0; i--) {
1128 if (!table->irq_info[i].context)
1130 free_irq(pci_irq_vector(dev->pdev, i), table->irq_info[i].context);
1131 table->irq_info[i].context = NULL;
1133 mutex_unlock(&table->lock);
1134 pci_free_irq_vectors(dev->pdev);
1137 static int alloc_irq_vectors(struct mlx5_core_dev *dev)
1139 struct mlx5_priv *priv = &dev->priv;
1140 struct mlx5_eq_table *table = priv->eq_table;
1141 int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
1142 MLX5_CAP_GEN(dev, max_num_eqs) :
1143 1 << MLX5_CAP_GEN(dev, log_max_eq);
1147 nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
1148 MLX5_EQ_VEC_COMP_BASE;
1149 nvec = min_t(int, nvec, num_eqs);
1150 if (nvec <= MLX5_EQ_VEC_COMP_BASE)
1153 table->irq_info = kcalloc(nvec, sizeof(*table->irq_info), GFP_KERNEL);
1154 if (!table->irq_info)
1157 nvec = pci_alloc_irq_vectors(dev->pdev, MLX5_EQ_VEC_COMP_BASE + 1,
1158 nvec, PCI_IRQ_MSIX);
1161 goto err_free_irq_info;
1164 table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
1169 kfree(table->irq_info);
1173 static void free_irq_vectors(struct mlx5_core_dev *dev)
1175 struct mlx5_priv *priv = &dev->priv;
1177 pci_free_irq_vectors(dev->pdev);
1178 kfree(priv->eq_table->irq_info);
1181 int mlx5_eq_table_create(struct mlx5_core_dev *dev)
1185 err = alloc_irq_vectors(dev);
1187 mlx5_core_err(dev, "alloc irq vectors failed\n");
1191 err = create_async_eqs(dev);
1193 mlx5_core_err(dev, "Failed to create async EQs\n");
1197 err = create_comp_eqs(dev);
1199 mlx5_core_err(dev, "Failed to create completion EQs\n");
1205 destroy_async_eqs(dev);
1207 free_irq_vectors(dev);
1211 void mlx5_eq_table_destroy(struct mlx5_core_dev *dev)
1213 destroy_comp_eqs(dev);
1214 destroy_async_eqs(dev);
1215 free_irq_vectors(dev);
1218 int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
1220 struct mlx5_eq_table *eqt = dev->priv.eq_table;
1222 if (nb->event_type >= MLX5_EVENT_TYPE_MAX)
1225 return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb);
1228 int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
1230 struct mlx5_eq_table *eqt = dev->priv.eq_table;
1232 if (nb->event_type >= MLX5_EVENT_TYPE_MAX)
1235 return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb);