2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kref.h>
34 #include <rdma/ib_umem.h>
35 #include <rdma/ib_user_verbs.h>
36 #include <rdma/ib_cache.h>
41 static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe)
43 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
45 ibcq->comp_handler(ibcq, ibcq->cq_context);
48 static void mlx5_ib_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event type)
50 struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq);
51 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
52 struct ib_cq *ibcq = &cq->ibcq;
53 struct ib_event event;
55 if (type != MLX5_EVENT_TYPE_CQ_ERROR) {
56 mlx5_ib_warn(dev, "Unexpected event type %d on CQ %06x\n",
61 if (ibcq->event_handler) {
62 event.device = &dev->ib_dev;
63 event.event = IB_EVENT_CQ_ERR;
64 event.element.cq = ibcq;
65 ibcq->event_handler(&event, ibcq->cq_context);
69 static void *get_cqe(struct mlx5_ib_cq *cq, int n)
71 return mlx5_frag_buf_get_wqe(&cq->buf.fbc, n);
74 static u8 sw_ownership_bit(int n, int nent)
76 return (n & nent) ? 1 : 0;
79 static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n)
81 void *cqe = get_cqe(cq, n & cq->ibcq.cqe);
82 struct mlx5_cqe64 *cqe64;
84 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
86 if (likely(get_cqe_opcode(cqe64) != MLX5_CQE_INVALID) &&
87 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) {
94 static void *next_cqe_sw(struct mlx5_ib_cq *cq)
96 return get_sw_cqe(cq, cq->mcq.cons_index);
99 static enum ib_wc_opcode get_umr_comp(struct mlx5_ib_wq *wq, int idx)
101 switch (wq->wr_data[idx]) {
105 case IB_WR_LOCAL_INV:
106 return IB_WC_LOCAL_INV;
112 pr_warn("unknown completion status\n");
117 static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
118 struct mlx5_ib_wq *wq, int idx)
121 switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) {
122 case MLX5_OPCODE_RDMA_WRITE_IMM:
123 wc->wc_flags |= IB_WC_WITH_IMM;
125 case MLX5_OPCODE_RDMA_WRITE:
126 wc->opcode = IB_WC_RDMA_WRITE;
128 case MLX5_OPCODE_SEND_IMM:
129 wc->wc_flags |= IB_WC_WITH_IMM;
131 case MLX5_OPCODE_SEND:
132 case MLX5_OPCODE_SEND_INVAL:
133 wc->opcode = IB_WC_SEND;
135 case MLX5_OPCODE_RDMA_READ:
136 wc->opcode = IB_WC_RDMA_READ;
137 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
139 case MLX5_OPCODE_ATOMIC_CS:
140 wc->opcode = IB_WC_COMP_SWAP;
143 case MLX5_OPCODE_ATOMIC_FA:
144 wc->opcode = IB_WC_FETCH_ADD;
147 case MLX5_OPCODE_ATOMIC_MASKED_CS:
148 wc->opcode = IB_WC_MASKED_COMP_SWAP;
151 case MLX5_OPCODE_ATOMIC_MASKED_FA:
152 wc->opcode = IB_WC_MASKED_FETCH_ADD;
155 case MLX5_OPCODE_UMR:
156 wc->opcode = get_umr_comp(wq, idx);
162 MLX5_GRH_IN_BUFFER = 1,
166 static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
167 struct mlx5_ib_qp *qp)
169 enum rdma_link_layer ll = rdma_port_get_link_layer(qp->ibqp.device, 1);
170 struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
171 struct mlx5_ib_srq *srq;
172 struct mlx5_ib_wq *wq;
178 if (qp->ibqp.srq || qp->ibqp.xrcd) {
179 struct mlx5_core_srq *msrq = NULL;
182 msrq = mlx5_cmd_get_srq(dev, be32_to_cpu(cqe->srqn));
183 srq = to_mibsrq(msrq);
185 srq = to_msrq(qp->ibqp.srq);
188 wqe_ctr = be16_to_cpu(cqe->wqe_counter);
189 wc->wr_id = srq->wrid[wqe_ctr];
190 mlx5_ib_free_srq_wqe(srq, wqe_ctr);
192 mlx5_core_res_put(&msrq->common);
196 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
199 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
201 switch (get_cqe_opcode(cqe)) {
202 case MLX5_CQE_RESP_WR_IMM:
203 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
204 wc->wc_flags = IB_WC_WITH_IMM;
205 wc->ex.imm_data = cqe->immediate;
207 case MLX5_CQE_RESP_SEND:
208 wc->opcode = IB_WC_RECV;
209 wc->wc_flags = IB_WC_IP_CSUM_OK;
210 if (unlikely(!((cqe->hds_ip_ext & CQE_L3_OK) &&
211 (cqe->hds_ip_ext & CQE_L4_OK))))
214 case MLX5_CQE_RESP_SEND_IMM:
215 wc->opcode = IB_WC_RECV;
216 wc->wc_flags = IB_WC_WITH_IMM;
217 wc->ex.imm_data = cqe->immediate;
219 case MLX5_CQE_RESP_SEND_INV:
220 wc->opcode = IB_WC_RECV;
221 wc->wc_flags = IB_WC_WITH_INVALIDATE;
222 wc->ex.invalidate_rkey = be32_to_cpu(cqe->inval_rkey);
225 wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;
226 wc->dlid_path_bits = cqe->ml_path;
227 g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
228 wc->wc_flags |= g ? IB_WC_GRH : 0;
229 if (unlikely(is_qp1(qp->ibqp.qp_type))) {
230 u16 pkey = be32_to_cpu(cqe->pkey) & 0xffff;
232 ib_find_cached_pkey(&dev->ib_dev, qp->port, pkey,
238 if (ll != IB_LINK_LAYER_ETHERNET) {
239 wc->slid = be16_to_cpu(cqe->slid);
240 wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf;
245 vlan_present = cqe->l4_l3_hdr_type & 0x1;
246 roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3;
248 wc->vlan_id = (be16_to_cpu(cqe->vlan_info)) & 0xfff;
249 wc->sl = (be16_to_cpu(cqe->vlan_info) >> 13) & 0x7;
250 wc->wc_flags |= IB_WC_WITH_VLAN;
255 switch (roce_packet_type) {
256 case MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH:
257 wc->network_hdr_type = RDMA_NETWORK_IB;
259 case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6:
260 wc->network_hdr_type = RDMA_NETWORK_IPV6;
262 case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4:
263 wc->network_hdr_type = RDMA_NETWORK_IPV4;
266 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
269 static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe)
271 mlx5_ib_warn(dev, "dump error cqe\n");
272 mlx5_dump_err_cqe(dev->mdev, cqe);
275 static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev,
276 struct mlx5_err_cqe *cqe,
281 switch (cqe->syndrome) {
282 case MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR:
283 wc->status = IB_WC_LOC_LEN_ERR;
285 case MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR:
286 wc->status = IB_WC_LOC_QP_OP_ERR;
288 case MLX5_CQE_SYNDROME_LOCAL_PROT_ERR:
289 wc->status = IB_WC_LOC_PROT_ERR;
291 case MLX5_CQE_SYNDROME_WR_FLUSH_ERR:
293 wc->status = IB_WC_WR_FLUSH_ERR;
295 case MLX5_CQE_SYNDROME_MW_BIND_ERR:
296 wc->status = IB_WC_MW_BIND_ERR;
298 case MLX5_CQE_SYNDROME_BAD_RESP_ERR:
299 wc->status = IB_WC_BAD_RESP_ERR;
301 case MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR:
302 wc->status = IB_WC_LOC_ACCESS_ERR;
304 case MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
305 wc->status = IB_WC_REM_INV_REQ_ERR;
307 case MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR:
308 wc->status = IB_WC_REM_ACCESS_ERR;
310 case MLX5_CQE_SYNDROME_REMOTE_OP_ERR:
311 wc->status = IB_WC_REM_OP_ERR;
313 case MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
314 wc->status = IB_WC_RETRY_EXC_ERR;
317 case MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
318 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
321 case MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR:
322 wc->status = IB_WC_REM_ABORT_ERR;
325 wc->status = IB_WC_GENERAL_ERR;
329 wc->vendor_err = cqe->vendor_err_synd;
334 static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
340 idx = tail & (qp->sq.wqe_cnt - 1);
344 tail = qp->sq.w_list[idx].next;
346 tail = qp->sq.w_list[idx].next;
347 qp->sq.last_poll = tail;
350 static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
352 mlx5_frag_buf_free(dev->mdev, &buf->frag_buf);
355 static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
356 struct ib_sig_err *item)
358 u16 syndrome = be16_to_cpu(cqe->syndrome);
360 #define GUARD_ERR (1 << 13)
361 #define APPTAG_ERR (1 << 12)
362 #define REFTAG_ERR (1 << 11)
364 if (syndrome & GUARD_ERR) {
365 item->err_type = IB_SIG_BAD_GUARD;
366 item->expected = be32_to_cpu(cqe->expected_trans_sig) >> 16;
367 item->actual = be32_to_cpu(cqe->actual_trans_sig) >> 16;
369 if (syndrome & REFTAG_ERR) {
370 item->err_type = IB_SIG_BAD_REFTAG;
371 item->expected = be32_to_cpu(cqe->expected_reftag);
372 item->actual = be32_to_cpu(cqe->actual_reftag);
374 if (syndrome & APPTAG_ERR) {
375 item->err_type = IB_SIG_BAD_APPTAG;
376 item->expected = be32_to_cpu(cqe->expected_trans_sig) & 0xffff;
377 item->actual = be32_to_cpu(cqe->actual_trans_sig) & 0xffff;
379 pr_err("Got signature completion error with bad syndrome %04x\n",
383 item->sig_err_offset = be64_to_cpu(cqe->err_offset);
384 item->key = be32_to_cpu(cqe->mkey);
387 static void sw_comp(struct mlx5_ib_qp *qp, int num_entries, struct ib_wc *wc,
388 int *npolled, bool is_send)
390 struct mlx5_ib_wq *wq;
395 wq = (is_send) ? &qp->sq : &qp->rq;
396 cur = wq->head - wq->tail;
402 for (i = 0; i < cur && np < num_entries; i++) {
405 idx = (is_send) ? wq->last_poll : wq->tail;
406 idx &= (wq->wqe_cnt - 1);
407 wc->wr_id = wq->wrid[idx];
408 wc->status = IB_WC_WR_FLUSH_ERR;
409 wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
412 wq->last_poll = wq->w_list[idx].next;
420 static void mlx5_ib_poll_sw_comp(struct mlx5_ib_cq *cq, int num_entries,
421 struct ib_wc *wc, int *npolled)
423 struct mlx5_ib_qp *qp;
426 /* Find uncompleted WQEs belonging to that cq and return mmics ones */
427 list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) {
428 sw_comp(qp, num_entries, wc + *npolled, npolled, true);
429 if (*npolled >= num_entries)
433 list_for_each_entry(qp, &cq->list_recv_qp, cq_recv_list) {
434 sw_comp(qp, num_entries, wc + *npolled, npolled, false);
435 if (*npolled >= num_entries)
440 static int mlx5_poll_one(struct mlx5_ib_cq *cq,
441 struct mlx5_ib_qp **cur_qp,
444 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
445 struct mlx5_err_cqe *err_cqe;
446 struct mlx5_cqe64 *cqe64;
447 struct mlx5_core_qp *mqp;
448 struct mlx5_ib_wq *wq;
456 cqe = next_cqe_sw(cq);
460 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
462 ++cq->mcq.cons_index;
464 /* Make sure we read CQ entry contents after we've checked the
469 opcode = get_cqe_opcode(cqe64);
470 if (unlikely(opcode == MLX5_CQE_RESIZE_CQ)) {
471 if (likely(cq->resize_buf)) {
472 free_cq_buf(dev, &cq->buf);
473 cq->buf = *cq->resize_buf;
474 kfree(cq->resize_buf);
475 cq->resize_buf = NULL;
478 mlx5_ib_warn(dev, "unexpected resize cqe\n");
482 qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff;
483 if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) {
484 /* We do not have to take the QP table lock here,
485 * because CQs will be locked while QPs are removed
488 mqp = radix_tree_lookup(&dev->qp_table.tree, qpn);
489 *cur_qp = to_mibqp(mqp);
492 wc->qp = &(*cur_qp)->ibqp;
496 wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
497 idx = wqe_ctr & (wq->wqe_cnt - 1);
498 handle_good_req(wc, cqe64, wq, idx);
499 handle_atomics(*cur_qp, cqe64, wq->last_poll, idx);
500 wc->wr_id = wq->wrid[idx];
501 wq->tail = wq->wqe_head[idx] + 1;
502 wc->status = IB_WC_SUCCESS;
504 case MLX5_CQE_RESP_WR_IMM:
505 case MLX5_CQE_RESP_SEND:
506 case MLX5_CQE_RESP_SEND_IMM:
507 case MLX5_CQE_RESP_SEND_INV:
508 handle_responder(wc, cqe64, *cur_qp);
509 wc->status = IB_WC_SUCCESS;
511 case MLX5_CQE_RESIZE_CQ:
513 case MLX5_CQE_REQ_ERR:
514 case MLX5_CQE_RESP_ERR:
515 err_cqe = (struct mlx5_err_cqe *)cqe64;
516 mlx5_handle_error_cqe(dev, err_cqe, wc);
517 mlx5_ib_dbg(dev, "%s error cqe on cqn 0x%x:\n",
518 opcode == MLX5_CQE_REQ_ERR ?
519 "Requestor" : "Responder", cq->mcq.cqn);
520 mlx5_ib_dbg(dev, "syndrome 0x%x, vendor syndrome 0x%x\n",
521 err_cqe->syndrome, err_cqe->vendor_err_synd);
522 if (opcode == MLX5_CQE_REQ_ERR) {
524 wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
525 idx = wqe_ctr & (wq->wqe_cnt - 1);
526 wc->wr_id = wq->wrid[idx];
527 wq->tail = wq->wqe_head[idx] + 1;
529 struct mlx5_ib_srq *srq;
531 if ((*cur_qp)->ibqp.srq) {
532 srq = to_msrq((*cur_qp)->ibqp.srq);
533 wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
534 wc->wr_id = srq->wrid[wqe_ctr];
535 mlx5_ib_free_srq_wqe(srq, wqe_ctr);
538 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
543 case MLX5_CQE_SIG_ERR: {
544 struct mlx5_sig_err_cqe *sig_err_cqe =
545 (struct mlx5_sig_err_cqe *)cqe64;
546 struct mlx5_core_sig_ctx *sig;
548 xa_lock(&dev->sig_mrs);
549 sig = xa_load(&dev->sig_mrs,
550 mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
551 get_sig_err_item(sig_err_cqe, &sig->err_item);
552 sig->sig_err_exists = true;
555 mlx5_ib_warn(dev, "CQN: 0x%x Got SIGERR on key: 0x%x err_type %x err_offset %llx expected %x actual %x\n",
556 cq->mcq.cqn, sig->err_item.key,
557 sig->err_item.err_type,
558 sig->err_item.sig_err_offset,
559 sig->err_item.expected,
560 sig->err_item.actual);
562 xa_unlock(&dev->sig_mrs);
570 static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries,
571 struct ib_wc *wc, bool is_fatal_err)
573 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
574 struct mlx5_ib_wc *soft_wc, *next;
577 list_for_each_entry_safe(soft_wc, next, &cq->wc_list, list) {
578 if (npolled >= num_entries)
581 mlx5_ib_dbg(dev, "polled software generated completion on CQ 0x%x\n",
584 if (unlikely(is_fatal_err)) {
585 soft_wc->wc.status = IB_WC_WR_FLUSH_ERR;
586 soft_wc->wc.vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
588 wc[npolled++] = soft_wc->wc;
589 list_del(&soft_wc->list);
596 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
598 struct mlx5_ib_cq *cq = to_mcq(ibcq);
599 struct mlx5_ib_qp *cur_qp = NULL;
600 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
601 struct mlx5_core_dev *mdev = dev->mdev;
606 spin_lock_irqsave(&cq->lock, flags);
607 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
608 /* make sure no soft wqe's are waiting */
609 if (unlikely(!list_empty(&cq->wc_list)))
610 soft_polled = poll_soft_wc(cq, num_entries, wc, true);
612 mlx5_ib_poll_sw_comp(cq, num_entries - soft_polled,
613 wc + soft_polled, &npolled);
617 if (unlikely(!list_empty(&cq->wc_list)))
618 soft_polled = poll_soft_wc(cq, num_entries, wc, false);
620 for (npolled = 0; npolled < num_entries - soft_polled; npolled++) {
621 if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled))
626 mlx5_cq_set_ci(&cq->mcq);
628 spin_unlock_irqrestore(&cq->lock, flags);
630 return soft_polled + npolled;
633 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
635 struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev;
636 struct mlx5_ib_cq *cq = to_mcq(ibcq);
637 void __iomem *uar_page = mdev->priv.uar->map;
638 unsigned long irq_flags;
641 spin_lock_irqsave(&cq->lock, irq_flags);
642 if (cq->notify_flags != IB_CQ_NEXT_COMP)
643 cq->notify_flags = flags & IB_CQ_SOLICITED_MASK;
645 if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !list_empty(&cq->wc_list))
647 spin_unlock_irqrestore(&cq->lock, irq_flags);
649 mlx5_cq_arm(&cq->mcq,
650 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
651 MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
652 uar_page, to_mcq(ibcq)->mcq.cons_index);
657 static int alloc_cq_frag_buf(struct mlx5_ib_dev *dev,
658 struct mlx5_ib_cq_buf *buf,
662 struct mlx5_frag_buf *frag_buf = &buf->frag_buf;
663 u8 log_wq_stride = 6 + (cqe_size == 128 ? 1 : 0);
664 u8 log_wq_sz = ilog2(cqe_size);
667 err = mlx5_frag_buf_alloc_node(dev->mdev,
670 dev->mdev->priv.numa_node);
674 mlx5_init_fbc(frag_buf->frags, log_wq_stride, log_wq_sz, &buf->fbc);
676 buf->cqe_size = cqe_size;
683 MLX5_CQE_RES_FORMAT_HASH = 0,
684 MLX5_CQE_RES_FORMAT_CSUM = 1,
685 MLX5_CQE_RES_FORMAT_CSUM_STRIDX = 3,
688 static int mini_cqe_res_format_to_hw(struct mlx5_ib_dev *dev, u8 format)
691 case MLX5_IB_CQE_RES_FORMAT_HASH:
692 return MLX5_CQE_RES_FORMAT_HASH;
693 case MLX5_IB_CQE_RES_FORMAT_CSUM:
694 return MLX5_CQE_RES_FORMAT_CSUM;
695 case MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX:
696 if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index))
697 return MLX5_CQE_RES_FORMAT_CSUM_STRIDX;
704 static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
705 struct mlx5_ib_cq *cq, int entries, u32 **cqb,
706 int *cqe_size, int *index, int *inlen)
708 struct mlx5_ib_create_cq ucmd = {};
716 struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
717 udata, struct mlx5_ib_ucontext, ibucontext);
719 ucmdlen = min(udata->inlen, sizeof(ucmd));
720 if (ucmdlen < offsetof(struct mlx5_ib_create_cq, flags))
723 if (ib_copy_from_udata(&ucmd, udata, ucmdlen))
726 if ((ucmd.flags & ~(MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD |
727 MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX)))
730 if ((ucmd.cqe_size != 64 && ucmd.cqe_size != 128) ||
731 ucmd.reserved0 || ucmd.reserved1)
734 *cqe_size = ucmd.cqe_size;
737 ib_umem_get(&dev->ib_dev, ucmd.buf_addr,
738 entries * ucmd.cqe_size, IB_ACCESS_LOCAL_WRITE);
739 if (IS_ERR(cq->buf.umem)) {
740 err = PTR_ERR(cq->buf.umem);
744 err = mlx5_ib_db_map_user(context, udata, ucmd.db_addr, &cq->db);
748 mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, 0, &npages, &page_shift,
750 mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n",
751 ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont);
753 *inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
754 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * ncont;
755 *cqb = kvzalloc(*inlen, GFP_KERNEL);
761 pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
762 mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, pas, 0);
764 cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
765 MLX5_SET(cqc, cqc, log_page_size,
766 page_shift - MLX5_ADAPTER_PAGE_SHIFT);
768 if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX) {
769 *index = ucmd.uar_page_index;
770 } else if (context->bfregi.lib_uar_dyn) {
774 *index = context->bfregi.sys_pages[0];
777 if (ucmd.cqe_comp_en == 1) {
780 if (!((*cqe_size == 128 &&
781 MLX5_CAP_GEN(dev->mdev, cqe_compression_128)) ||
783 MLX5_CAP_GEN(dev->mdev, cqe_compression)))) {
785 mlx5_ib_warn(dev, "CQE compression is not supported for size %d!\n",
791 mini_cqe_res_format_to_hw(dev,
792 ucmd.cqe_comp_res_format);
793 if (mini_cqe_format < 0) {
794 err = mini_cqe_format;
795 mlx5_ib_dbg(dev, "CQE compression res format %d error: %d\n",
796 ucmd.cqe_comp_res_format, err);
800 MLX5_SET(cqc, cqc, cqe_comp_en, 1);
801 MLX5_SET(cqc, cqc, mini_cqe_res_format, mini_cqe_format);
804 if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD) {
805 if (*cqe_size != 128 ||
806 !MLX5_CAP_GEN(dev->mdev, cqe_128_always)) {
809 "CQE padding is not supported for CQE size of %dB!\n",
814 cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD;
817 MLX5_SET(create_cq_in, *cqb, uid, context->devx_uid);
824 mlx5_ib_db_unmap_user(context, &cq->db);
827 ib_umem_release(cq->buf.umem);
831 static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_udata *udata)
833 struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
834 udata, struct mlx5_ib_ucontext, ibucontext);
836 mlx5_ib_db_unmap_user(context, &cq->db);
837 ib_umem_release(cq->buf.umem);
840 static void init_cq_frag_buf(struct mlx5_ib_cq *cq,
841 struct mlx5_ib_cq_buf *buf)
845 struct mlx5_cqe64 *cqe64;
847 for (i = 0; i < buf->nent; i++) {
848 cqe = get_cqe(cq, i);
849 cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64;
850 cqe64->op_own = MLX5_CQE_INVALID << 4;
854 static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
855 int entries, int cqe_size,
856 u32 **cqb, int *index, int *inlen)
862 err = mlx5_db_alloc(dev->mdev, &cq->db);
866 cq->mcq.set_ci_db = cq->db.db;
867 cq->mcq.arm_db = cq->db.db + 1;
868 cq->mcq.cqe_sz = cqe_size;
870 err = alloc_cq_frag_buf(dev, &cq->buf, entries, cqe_size);
874 init_cq_frag_buf(cq, &cq->buf);
876 *inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
877 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) *
878 cq->buf.frag_buf.npages;
879 *cqb = kvzalloc(*inlen, GFP_KERNEL);
885 pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
886 mlx5_fill_page_frag_array(&cq->buf.frag_buf, pas);
888 cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
889 MLX5_SET(cqc, cqc, log_page_size,
890 cq->buf.frag_buf.page_shift -
891 MLX5_ADAPTER_PAGE_SHIFT);
893 *index = dev->mdev->priv.uar->index;
898 free_cq_buf(dev, &cq->buf);
901 mlx5_db_free(dev->mdev, &cq->db);
905 static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
907 free_cq_buf(dev, &cq->buf);
908 mlx5_db_free(dev->mdev, &cq->db);
911 static void notify_soft_wc_handler(struct work_struct *work)
913 struct mlx5_ib_cq *cq = container_of(work, struct mlx5_ib_cq,
916 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
919 int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
920 struct ib_udata *udata)
922 struct ib_device *ibdev = ibcq->device;
923 int entries = attr->cqe;
924 int vector = attr->comp_vector;
925 struct mlx5_ib_dev *dev = to_mdev(ibdev);
926 struct mlx5_ib_cq *cq = to_mcq(ibcq);
927 u32 out[MLX5_ST_SZ_DW(create_cq_out)];
938 (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))))
941 if (check_cq_create_flags(attr->flags))
944 entries = roundup_pow_of_two(entries + 1);
945 if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))
948 cq->ibcq.cqe = entries - 1;
949 mutex_init(&cq->resize_mutex);
950 spin_lock_init(&cq->lock);
951 cq->resize_buf = NULL;
952 cq->resize_umem = NULL;
953 cq->create_flags = attr->flags;
954 INIT_LIST_HEAD(&cq->list_send_qp);
955 INIT_LIST_HEAD(&cq->list_recv_qp);
958 err = create_cq_user(dev, udata, cq, entries, &cqb, &cqe_size,
963 cqe_size = cache_line_size() == 128 ? 128 : 64;
964 err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb,
969 INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
972 err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn);
976 cq->cqe_size = cqe_size;
978 cqc = MLX5_ADDR_OF(create_cq_in, cqb, cq_context);
979 MLX5_SET(cqc, cqc, cqe_sz,
980 cqe_sz_to_mlx_sz(cqe_size,
982 MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD));
983 MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
984 MLX5_SET(cqc, cqc, uar_page, index);
985 MLX5_SET(cqc, cqc, c_eqn, eqn);
986 MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma);
987 if (cq->create_flags & IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN)
988 MLX5_SET(cqc, cqc, oi, 1);
990 err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen, out, sizeof(out));
994 mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn);
997 cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp;
999 cq->mcq.comp = mlx5_ib_cq_comp;
1000 cq->mcq.event = mlx5_ib_cq_event;
1002 INIT_LIST_HEAD(&cq->wc_list);
1005 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) {
1015 mlx5_core_destroy_cq(dev->mdev, &cq->mcq);
1020 destroy_cq_user(cq, udata);
1022 destroy_cq_kernel(dev, cq);
1026 void mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
1028 struct mlx5_ib_dev *dev = to_mdev(cq->device);
1029 struct mlx5_ib_cq *mcq = to_mcq(cq);
1031 mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
1033 destroy_cq_user(mcq, udata);
1035 destroy_cq_kernel(dev, mcq);
1038 static int is_equal_rsn(struct mlx5_cqe64 *cqe64, u32 rsn)
1040 return rsn == (ntohl(cqe64->sop_drop_qpn) & 0xffffff);
1043 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq)
1045 struct mlx5_cqe64 *cqe64, *dest64;
1054 /* First we need to find the current producer index, so we
1055 * know where to start cleaning from. It doesn't matter if HW
1056 * adds new entries after this loop -- the QP we're worried
1057 * about is already in RESET, so the new entries won't come
1058 * from our QP and therefore don't need to be checked.
1060 for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); prod_index++)
1061 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
1064 /* Now sweep backwards through the CQ, removing CQ entries
1065 * that match our QP by copying older entries on top of them.
1067 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
1068 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
1069 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
1070 if (is_equal_rsn(cqe64, rsn)) {
1071 if (srq && (ntohl(cqe64->srqn) & 0xffffff))
1072 mlx5_ib_free_srq_wqe(srq, be16_to_cpu(cqe64->wqe_counter));
1074 } else if (nfreed) {
1075 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
1076 dest64 = (cq->mcq.cqe_sz == 64) ? dest : dest + 64;
1077 owner_bit = dest64->op_own & MLX5_CQE_OWNER_MASK;
1078 memcpy(dest, cqe, cq->mcq.cqe_sz);
1079 dest64->op_own = owner_bit |
1080 (dest64->op_own & ~MLX5_CQE_OWNER_MASK);
1085 cq->mcq.cons_index += nfreed;
1086 /* Make sure update of buffer contents is done before
1087 * updating consumer index.
1090 mlx5_cq_set_ci(&cq->mcq);
1094 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq)
1099 spin_lock_irq(&cq->lock);
1100 __mlx5_ib_cq_clean(cq, qpn, srq);
1101 spin_unlock_irq(&cq->lock);
1104 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
1106 struct mlx5_ib_dev *dev = to_mdev(cq->device);
1107 struct mlx5_ib_cq *mcq = to_mcq(cq);
1110 if (!MLX5_CAP_GEN(dev->mdev, cq_moderation))
1113 if (cq_period > MLX5_MAX_CQ_PERIOD)
1116 err = mlx5_core_modify_cq_moderation(dev->mdev, &mcq->mcq,
1117 cq_period, cq_count);
1119 mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn);
1124 static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
1125 int entries, struct ib_udata *udata, int *npas,
1126 int *page_shift, int *cqe_size)
1128 struct mlx5_ib_resize_cq ucmd;
1129 struct ib_umem *umem;
1133 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
1137 if (ucmd.reserved0 || ucmd.reserved1)
1140 /* check multiplication overflow */
1141 if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1)
1144 umem = ib_umem_get(&dev->ib_dev, ucmd.buf_addr,
1145 (size_t)ucmd.cqe_size * entries,
1146 IB_ACCESS_LOCAL_WRITE);
1148 err = PTR_ERR(umem);
1152 mlx5_ib_cont_pages(umem, ucmd.buf_addr, 0, &npages, page_shift,
1155 cq->resize_umem = umem;
1156 *cqe_size = ucmd.cqe_size;
1161 static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
1162 int entries, int cqe_size)
1166 cq->resize_buf = kzalloc(sizeof(*cq->resize_buf), GFP_KERNEL);
1167 if (!cq->resize_buf)
1170 err = alloc_cq_frag_buf(dev, cq->resize_buf, entries, cqe_size);
1174 init_cq_frag_buf(cq, cq->resize_buf);
1179 kfree(cq->resize_buf);
1183 static int copy_resize_cqes(struct mlx5_ib_cq *cq)
1185 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
1186 struct mlx5_cqe64 *scqe64;
1187 struct mlx5_cqe64 *dcqe64;
1196 ssize = cq->buf.cqe_size;
1197 dsize = cq->resize_buf->cqe_size;
1198 if (ssize != dsize) {
1199 mlx5_ib_warn(dev, "resize from different cqe size is not supported\n");
1203 i = cq->mcq.cons_index;
1204 scqe = get_sw_cqe(cq, i);
1205 scqe64 = ssize == 64 ? scqe : scqe + 64;
1208 mlx5_ib_warn(dev, "expected cqe in sw ownership\n");
1212 while (get_cqe_opcode(scqe64) != MLX5_CQE_RESIZE_CQ) {
1213 dcqe = mlx5_frag_buf_get_wqe(&cq->resize_buf->fbc,
1214 (i + 1) & cq->resize_buf->nent);
1215 dcqe64 = dsize == 64 ? dcqe : dcqe + 64;
1216 sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent);
1217 memcpy(dcqe, scqe, dsize);
1218 dcqe64->op_own = (dcqe64->op_own & ~MLX5_CQE_OWNER_MASK) | sw_own;
1221 scqe = get_sw_cqe(cq, i);
1222 scqe64 = ssize == 64 ? scqe : scqe + 64;
1224 mlx5_ib_warn(dev, "expected cqe in sw ownership\n");
1228 if (scqe == start_cqe) {
1229 pr_warn("resize CQ failed to get resize CQE, CQN 0x%x\n",
1234 ++cq->mcq.cons_index;
1238 int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
1240 struct mlx5_ib_dev *dev = to_mdev(ibcq->device);
1241 struct mlx5_ib_cq *cq = to_mcq(ibcq);
1250 unsigned long flags;
1252 if (!MLX5_CAP_GEN(dev->mdev, cq_resize)) {
1253 pr_info("Firmware does not support resize CQ\n");
1258 entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) {
1259 mlx5_ib_warn(dev, "wrong entries number %d, max %d\n",
1261 1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz));
1265 entries = roundup_pow_of_two(entries + 1);
1266 if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
1269 if (entries == ibcq->cqe + 1)
1272 mutex_lock(&cq->resize_mutex);
1274 err = resize_user(dev, cq, entries, udata, &npas, &page_shift,
1278 err = resize_kernel(dev, cq, entries, cqe_size);
1280 struct mlx5_frag_buf *frag_buf = &cq->resize_buf->frag_buf;
1282 npas = frag_buf->npages;
1283 page_shift = frag_buf->page_shift;
1290 inlen = MLX5_ST_SZ_BYTES(modify_cq_in) +
1291 MLX5_FLD_SZ_BYTES(modify_cq_in, pas[0]) * npas;
1293 in = kvzalloc(inlen, GFP_KERNEL);
1299 pas = (__be64 *)MLX5_ADDR_OF(modify_cq_in, in, pas);
1301 mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift,
1304 mlx5_fill_page_frag_array(&cq->resize_buf->frag_buf, pas);
1306 MLX5_SET(modify_cq_in, in,
1307 modify_field_select_resize_field_select.resize_field_select.resize_field_select,
1308 MLX5_MODIFY_CQ_MASK_LOG_SIZE |
1309 MLX5_MODIFY_CQ_MASK_PG_OFFSET |
1310 MLX5_MODIFY_CQ_MASK_PG_SIZE);
1312 cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
1314 MLX5_SET(cqc, cqc, log_page_size,
1315 page_shift - MLX5_ADAPTER_PAGE_SHIFT);
1316 MLX5_SET(cqc, cqc, cqe_sz,
1317 cqe_sz_to_mlx_sz(cqe_size,
1319 MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD));
1320 MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
1322 MLX5_SET(modify_cq_in, in, op_mod, MLX5_CQ_OPMOD_RESIZE);
1323 MLX5_SET(modify_cq_in, in, cqn, cq->mcq.cqn);
1325 err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen);
1330 cq->ibcq.cqe = entries - 1;
1331 ib_umem_release(cq->buf.umem);
1332 cq->buf.umem = cq->resize_umem;
1333 cq->resize_umem = NULL;
1335 struct mlx5_ib_cq_buf tbuf;
1338 spin_lock_irqsave(&cq->lock, flags);
1339 if (cq->resize_buf) {
1340 err = copy_resize_cqes(cq);
1343 cq->buf = *cq->resize_buf;
1344 kfree(cq->resize_buf);
1345 cq->resize_buf = NULL;
1349 cq->ibcq.cqe = entries - 1;
1350 spin_unlock_irqrestore(&cq->lock, flags);
1352 free_cq_buf(dev, &tbuf);
1354 mutex_unlock(&cq->resize_mutex);
1363 ib_umem_release(cq->resize_umem);
1365 free_cq_buf(dev, cq->resize_buf);
1366 cq->resize_buf = NULL;
1369 mutex_unlock(&cq->resize_mutex);
1373 int mlx5_ib_get_cqe_size(struct ib_cq *ibcq)
1375 struct mlx5_ib_cq *cq;
1381 return cq->cqe_size;
1384 /* Called from atomic context */
1385 int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc)
1387 struct mlx5_ib_wc *soft_wc;
1388 struct mlx5_ib_cq *cq = to_mcq(ibcq);
1389 unsigned long flags;
1391 soft_wc = kmalloc(sizeof(*soft_wc), GFP_ATOMIC);
1396 spin_lock_irqsave(&cq->lock, flags);
1397 list_add_tail(&soft_wc->list, &cq->wc_list);
1398 if (cq->notify_flags == IB_CQ_NEXT_COMP ||
1399 wc->status != IB_WC_SUCCESS) {
1400 cq->notify_flags = 0;
1401 schedule_work(&cq->notify_work);
1403 spin_unlock_irqrestore(&cq->lock, flags);