2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: Fast Path Operators
39 #define dev_fmt(fmt) "QPLIB: " fmt
41 #include <linux/interrupt.h>
42 #include <linux/spinlock.h>
43 #include <linux/sched.h>
44 #include <linux/slab.h>
45 #include <linux/pci.h>
46 #include <linux/delay.h>
47 #include <linux/prefetch.h>
48 #include <linux/if_ether.h>
52 #include "qplib_res.h"
53 #include "qplib_rcfw.h"
57 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
59 static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
61 qp->sq.condition = false;
62 qp->sq.send_phantom = false;
63 qp->sq.single = false;
67 static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
69 struct bnxt_qplib_cq *scq, *rcq;
74 if (!qp->sq.flushed) {
75 dev_dbg(&scq->hwq.pdev->dev,
76 "FP: Adding to SQ Flush list = %p\n", qp);
77 bnxt_qplib_cancel_phantom_processing(qp);
78 list_add_tail(&qp->sq_flush, &scq->sqf_head);
79 qp->sq.flushed = true;
82 if (!qp->rq.flushed) {
83 dev_dbg(&rcq->hwq.pdev->dev,
84 "FP: Adding to RQ Flush list = %p\n", qp);
85 list_add_tail(&qp->rq_flush, &rcq->rqf_head);
86 qp->rq.flushed = true;
91 static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
93 __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
95 spin_lock_irqsave(&qp->scq->flush_lock, *flags);
96 if (qp->scq == qp->rcq)
97 __acquire(&qp->rcq->flush_lock);
99 spin_lock(&qp->rcq->flush_lock);
102 static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
103 unsigned long *flags)
104 __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
106 if (qp->scq == qp->rcq)
107 __release(&qp->rcq->flush_lock);
109 spin_unlock(&qp->rcq->flush_lock);
110 spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
113 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
117 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
118 __bnxt_qplib_add_flush_qp(qp);
119 bnxt_qplib_release_cq_flush_locks(qp, &flags);
122 static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
124 if (qp->sq.flushed) {
125 qp->sq.flushed = false;
126 list_del(&qp->sq_flush);
129 if (qp->rq.flushed) {
130 qp->rq.flushed = false;
131 list_del(&qp->rq_flush);
136 void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
140 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
141 __clean_cq(qp->scq, (u64)(unsigned long)qp);
144 __clean_cq(qp->rcq, (u64)(unsigned long)qp);
148 __bnxt_qplib_del_flush_qp(qp);
149 bnxt_qplib_release_cq_flush_locks(qp, &flags);
152 static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
154 struct bnxt_qplib_nq_work *nq_work =
155 container_of(work, struct bnxt_qplib_nq_work, work);
157 struct bnxt_qplib_cq *cq = nq_work->cq;
158 struct bnxt_qplib_nq *nq = nq_work->nq;
161 spin_lock_bh(&cq->compl_lock);
162 if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
163 dev_dbg(&nq->pdev->dev,
164 "%s:Trigger cq = %p event nq = %p\n",
166 nq->cqn_handler(nq, cq);
168 spin_unlock_bh(&cq->compl_lock);
173 static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
174 struct bnxt_qplib_qp *qp)
176 struct bnxt_qplib_q *rq = &qp->rq;
177 struct bnxt_qplib_q *sq = &qp->sq;
180 dma_free_coherent(&res->pdev->dev,
181 rq->max_wqe * qp->rq_hdr_buf_size,
182 qp->rq_hdr_buf, qp->rq_hdr_buf_map);
184 dma_free_coherent(&res->pdev->dev,
185 sq->max_wqe * qp->sq_hdr_buf_size,
186 qp->sq_hdr_buf, qp->sq_hdr_buf_map);
187 qp->rq_hdr_buf = NULL;
188 qp->sq_hdr_buf = NULL;
189 qp->rq_hdr_buf_map = 0;
190 qp->sq_hdr_buf_map = 0;
191 qp->sq_hdr_buf_size = 0;
192 qp->rq_hdr_buf_size = 0;
195 static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
196 struct bnxt_qplib_qp *qp)
198 struct bnxt_qplib_q *rq = &qp->rq;
199 struct bnxt_qplib_q *sq = &qp->sq;
202 if (qp->sq_hdr_buf_size && sq->max_wqe) {
203 qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
204 sq->max_wqe * qp->sq_hdr_buf_size,
205 &qp->sq_hdr_buf_map, GFP_KERNEL);
206 if (!qp->sq_hdr_buf) {
208 dev_err(&res->pdev->dev,
209 "Failed to create sq_hdr_buf\n");
214 if (qp->rq_hdr_buf_size && rq->max_wqe) {
215 qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
220 if (!qp->rq_hdr_buf) {
222 dev_err(&res->pdev->dev,
223 "Failed to create rq_hdr_buf\n");
230 bnxt_qplib_free_qp_hdr_buf(res, qp);
234 static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq)
236 struct bnxt_qplib_hwq *hwq = &nq->hwq;
237 struct nq_base *nqe, **nq_ptr;
238 int budget = nq->budget;
239 u32 sw_cons, raw_cons;
243 spin_lock_bh(&hwq->lock);
244 /* Service the NQ until empty */
245 raw_cons = hwq->cons;
247 sw_cons = HWQ_CMP(raw_cons, hwq);
248 nq_ptr = (struct nq_base **)hwq->pbl_ptr;
249 nqe = &nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)];
250 if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
254 * The valid test of the entry must be done first before
255 * reading any further.
259 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
261 case NQ_BASE_TYPE_CQ_NOTIFICATION:
263 struct nq_cn *nqcne = (struct nq_cn *)nqe;
265 q_handle = le32_to_cpu(nqcne->cq_handle_low);
266 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
268 if ((unsigned long)cq == q_handle) {
269 nqcne->cq_handle_low = 0;
270 nqcne->cq_handle_high = 0;
280 spin_unlock_bh(&hwq->lock);
283 /* Wait for receiving all NQEs for this CQ and clean the NQEs associated with
286 static void __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events)
290 while (retry_cnt--) {
291 if (cnq_events == cq->cnq_events)
293 usleep_range(50, 100);
294 clean_nq(cq->nq, cq);
298 static void bnxt_qplib_service_nq(unsigned long data)
300 struct bnxt_qplib_nq *nq = (struct bnxt_qplib_nq *)data;
301 struct bnxt_qplib_hwq *hwq = &nq->hwq;
302 int num_srqne_processed = 0;
303 int num_cqne_processed = 0;
304 struct bnxt_qplib_cq *cq;
305 int budget = nq->budget;
306 u32 sw_cons, raw_cons;
311 spin_lock_bh(&hwq->lock);
312 /* Service the NQ until empty */
313 raw_cons = hwq->cons;
315 sw_cons = HWQ_CMP(raw_cons, hwq);
316 nqe = bnxt_qplib_get_qe(hwq, sw_cons, NULL);
317 if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
321 * The valid test of the entry must be done first before
322 * reading any further.
326 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
328 case NQ_BASE_TYPE_CQ_NOTIFICATION:
330 struct nq_cn *nqcne = (struct nq_cn *)nqe;
332 q_handle = le32_to_cpu(nqcne->cq_handle_low);
333 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
335 cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
338 bnxt_qplib_armen_db(&cq->dbinfo,
339 DBC_DBC_TYPE_CQ_ARMENA);
340 spin_lock_bh(&cq->compl_lock);
341 atomic_set(&cq->arm_state, 0);
342 if (!nq->cqn_handler(nq, (cq)))
343 num_cqne_processed++;
345 dev_warn(&nq->pdev->dev,
346 "cqn - type 0x%x not handled\n", type);
348 spin_unlock_bh(&cq->compl_lock);
351 case NQ_BASE_TYPE_SRQ_EVENT:
353 struct bnxt_qplib_srq *srq;
354 struct nq_srq_event *nqsrqe =
355 (struct nq_srq_event *)nqe;
357 q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
358 q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
360 srq = (struct bnxt_qplib_srq *)q_handle;
361 bnxt_qplib_armen_db(&srq->dbinfo,
362 DBC_DBC_TYPE_SRQ_ARMENA);
363 if (!nq->srqn_handler(nq,
364 (struct bnxt_qplib_srq *)q_handle,
366 num_srqne_processed++;
368 dev_warn(&nq->pdev->dev,
369 "SRQ event 0x%x not handled\n",
373 case NQ_BASE_TYPE_DBQ_EVENT:
376 dev_warn(&nq->pdev->dev,
377 "nqe with type = 0x%x not handled\n", type);
382 if (hwq->cons != raw_cons) {
383 hwq->cons = raw_cons;
384 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
386 spin_unlock_bh(&hwq->lock);
389 static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
391 struct bnxt_qplib_nq *nq = dev_instance;
392 struct bnxt_qplib_hwq *hwq = &nq->hwq;
395 /* Prefetch the NQ element */
396 sw_cons = HWQ_CMP(hwq->cons, hwq);
397 prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL));
399 /* Fan out to CPU affinitized kthreads? */
400 tasklet_schedule(&nq->nq_tasklet);
405 void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
407 tasklet_disable(&nq->nq_tasklet);
408 /* Mask h/w interrupt */
409 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false);
410 /* Sync with last running IRQ handler */
411 synchronize_irq(nq->msix_vec);
413 tasklet_kill(&nq->nq_tasklet);
415 irq_set_affinity_hint(nq->msix_vec, NULL);
416 free_irq(nq->msix_vec, nq);
417 nq->requested = false;
421 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
424 destroy_workqueue(nq->cqn_wq);
428 /* Make sure the HW is stopped! */
429 bnxt_qplib_nq_stop_irq(nq, true);
431 if (nq->nq_db.reg.bar_reg) {
432 iounmap(nq->nq_db.reg.bar_reg);
433 nq->nq_db.reg.bar_reg = NULL;
436 nq->cqn_handler = NULL;
437 nq->srqn_handler = NULL;
441 int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
442 int msix_vector, bool need_init)
449 nq->msix_vec = msix_vector;
451 tasklet_init(&nq->nq_tasklet, bnxt_qplib_service_nq,
454 tasklet_enable(&nq->nq_tasklet);
456 snprintf(nq->name, sizeof(nq->name), "bnxt_qplib_nq-%d", nq_indx);
457 rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq);
461 cpumask_clear(&nq->mask);
462 cpumask_set_cpu(nq_indx, &nq->mask);
463 rc = irq_set_affinity_hint(nq->msix_vec, &nq->mask);
465 dev_warn(&nq->pdev->dev,
466 "set affinity failed; vector: %d nq_idx: %d\n",
467 nq->msix_vec, nq_indx);
469 nq->requested = true;
470 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
475 static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq, u32 reg_offt)
477 resource_size_t reg_base;
478 struct bnxt_qplib_nq_db *nq_db;
479 struct pci_dev *pdev;
485 nq_db->reg.bar_id = NQ_CONS_PCI_BAR_REGION;
486 nq_db->reg.bar_base = pci_resource_start(pdev, nq_db->reg.bar_id);
487 if (!nq_db->reg.bar_base) {
488 dev_err(&pdev->dev, "QPLIB: NQ BAR region %d resc start is 0!",
494 reg_base = nq_db->reg.bar_base + reg_offt;
495 /* Unconditionally map 8 bytes to support 57500 series */
497 nq_db->reg.bar_reg = ioremap(reg_base, nq_db->reg.len);
498 if (!nq_db->reg.bar_reg) {
499 dev_err(&pdev->dev, "QPLIB: NQ BAR region %d mapping failed",
505 nq_db->dbinfo.db = nq_db->reg.bar_reg;
506 nq_db->dbinfo.hwq = &nq->hwq;
507 nq_db->dbinfo.xid = nq->ring_id;
512 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
513 int nq_idx, int msix_vector, int bar_reg_offset,
514 cqn_handler_t cqn_handler,
515 srqn_handler_t srqn_handler)
520 nq->cqn_handler = cqn_handler;
521 nq->srqn_handler = srqn_handler;
523 /* Have a task to schedule CQ notifiers in post send case */
524 nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq");
528 rc = bnxt_qplib_map_nq_db(nq, bar_reg_offset);
532 rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
534 dev_err(&nq->pdev->dev,
535 "Failed to request irq for nq-idx %d\n", nq_idx);
541 bnxt_qplib_disable_nq(nq);
545 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
547 if (nq->hwq.max_elements) {
548 bnxt_qplib_free_hwq(nq->res, &nq->hwq);
549 nq->hwq.max_elements = 0;
553 int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq)
555 struct bnxt_qplib_hwq_attr hwq_attr = {};
556 struct bnxt_qplib_sg_info sginfo = {};
558 nq->pdev = res->pdev;
560 if (!nq->hwq.max_elements ||
561 nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
562 nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
564 sginfo.pgsize = PAGE_SIZE;
565 sginfo.pgshft = PAGE_SHIFT;
567 hwq_attr.sginfo = &sginfo;
568 hwq_attr.depth = nq->hwq.max_elements;
569 hwq_attr.stride = sizeof(struct nq_base);
570 hwq_attr.type = bnxt_qplib_get_hwq_type(nq->res);
571 if (bnxt_qplib_alloc_init_hwq(&nq->hwq, &hwq_attr)) {
572 dev_err(&nq->pdev->dev, "FP NQ allocation failed");
580 void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
581 struct bnxt_qplib_srq *srq)
583 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
584 struct cmdq_destroy_srq req;
585 struct creq_destroy_srq_resp resp;
589 RCFW_CMD_PREP(req, DESTROY_SRQ, cmd_flags);
591 /* Configure the request */
592 req.srq_cid = cpu_to_le32(srq->id);
594 rc = bnxt_qplib_rcfw_send_message(rcfw, (struct cmdq_base *)&req,
595 (struct creq_base *)&resp, NULL, 0);
599 bnxt_qplib_free_hwq(res, &srq->hwq);
602 int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
603 struct bnxt_qplib_srq *srq)
605 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
606 struct bnxt_qplib_hwq_attr hwq_attr = {};
607 struct creq_create_srq_resp resp;
608 struct cmdq_create_srq req;
609 struct bnxt_qplib_pbl *pbl;
615 hwq_attr.sginfo = &srq->sg_info;
616 hwq_attr.depth = srq->max_wqe;
617 hwq_attr.stride = srq->wqe_size;
618 hwq_attr.type = HWQ_TYPE_QUEUE;
619 rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
623 srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
630 RCFW_CMD_PREP(req, CREATE_SRQ, cmd_flags);
632 /* Configure the request */
633 req.dpi = cpu_to_le32(srq->dpi->dpi);
634 req.srq_handle = cpu_to_le64((uintptr_t)srq);
636 req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
637 pbl = &srq->hwq.pbl[PBL_LVL_0];
638 pg_sz_lvl = ((u16)bnxt_qplib_base_pg_size(&srq->hwq) <<
639 CMDQ_CREATE_SRQ_PG_SIZE_SFT);
640 pg_sz_lvl |= (srq->hwq.level & CMDQ_CREATE_SRQ_LVL_MASK) <<
641 CMDQ_CREATE_SRQ_LVL_SFT;
642 req.pg_size_lvl = cpu_to_le16(pg_sz_lvl);
643 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
644 req.pd_id = cpu_to_le32(srq->pd->id);
645 req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
647 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
648 (void *)&resp, NULL, 0);
652 spin_lock_init(&srq->lock);
654 srq->last_idx = srq->hwq.max_elements - 1;
655 for (idx = 0; idx < srq->hwq.max_elements; idx++)
656 srq->swq[idx].next_idx = idx + 1;
657 srq->swq[srq->last_idx].next_idx = -1;
659 srq->id = le32_to_cpu(resp.xid);
660 srq->dbinfo.hwq = &srq->hwq;
661 srq->dbinfo.xid = srq->id;
662 srq->dbinfo.db = srq->dpi->dbr;
663 srq->dbinfo.max_slot = 1;
664 srq->dbinfo.priv_db = res->dpi_tbl.dbr_bar_reg_iomem;
666 bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
667 srq->arm_req = false;
671 bnxt_qplib_free_hwq(res, &srq->hwq);
677 int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
678 struct bnxt_qplib_srq *srq)
680 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
681 u32 sw_prod, sw_cons, count = 0;
683 sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
684 sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
686 count = sw_prod > sw_cons ? sw_prod - sw_cons :
687 srq_hwq->max_elements - sw_cons + sw_prod;
688 if (count > srq->threshold) {
689 srq->arm_req = false;
690 bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
692 /* Deferred arming */
699 int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
700 struct bnxt_qplib_srq *srq)
702 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
703 struct cmdq_query_srq req;
704 struct creq_query_srq_resp resp;
705 struct bnxt_qplib_rcfw_sbuf *sbuf;
706 struct creq_query_srq_resp_sb *sb;
710 RCFW_CMD_PREP(req, QUERY_SRQ, cmd_flags);
711 req.srq_cid = cpu_to_le32(srq->id);
713 /* Configure the request */
714 sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
718 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
720 srq->threshold = le16_to_cpu(sb->srq_limit);
721 bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
726 int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
727 struct bnxt_qplib_swqe *wqe)
729 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
731 struct sq_sge *hw_sge;
732 u32 sw_prod, sw_cons, count = 0;
735 spin_lock(&srq_hwq->lock);
736 if (srq->start_idx == srq->last_idx) {
737 dev_err(&srq_hwq->pdev->dev,
738 "FP: SRQ (0x%x) is full!\n", srq->id);
740 spin_unlock(&srq_hwq->lock);
743 next = srq->start_idx;
744 srq->start_idx = srq->swq[next].next_idx;
745 spin_unlock(&srq_hwq->lock);
747 sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
748 srqe = bnxt_qplib_get_qe(srq_hwq, sw_prod, NULL);
749 memset(srqe, 0, srq->wqe_size);
750 /* Calculate wqe_size16 and data_len */
751 for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
752 i < wqe->num_sge; i++, hw_sge++) {
753 hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
754 hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
755 hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
757 srqe->wqe_type = wqe->type;
758 srqe->flags = wqe->flags;
759 srqe->wqe_size = wqe->num_sge +
760 ((offsetof(typeof(*srqe), data) + 15) >> 4);
761 srqe->wr_id[0] = cpu_to_le32((u32)next);
762 srq->swq[next].wr_id = wqe->wr_id;
766 spin_lock(&srq_hwq->lock);
767 sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
768 /* retaining srq_hwq->cons for this logic
769 * actually the lock is only required to
770 * read srq_hwq->cons.
772 sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
773 count = sw_prod > sw_cons ? sw_prod - sw_cons :
774 srq_hwq->max_elements - sw_cons + sw_prod;
775 spin_unlock(&srq_hwq->lock);
777 bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
778 if (srq->arm_req == true && count > srq->threshold) {
779 srq->arm_req = false;
780 bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
788 static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que)
793 que->swq = kcalloc(que->max_wqe, sizeof(*que->swq), GFP_KERNEL);
800 que->swq_last = que->max_wqe - 1;
801 for (indx = 0; indx < que->max_wqe; indx++)
802 que->swq[indx].next_idx = indx + 1;
803 que->swq[que->swq_last].next_idx = 0; /* Make it circular */
809 int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
811 struct bnxt_qplib_hwq_attr hwq_attr = {};
812 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
813 struct bnxt_qplib_q *sq = &qp->sq;
814 struct bnxt_qplib_q *rq = &qp->rq;
815 struct creq_create_qp1_resp resp;
816 struct cmdq_create_qp1 req;
817 struct bnxt_qplib_pbl *pbl;
823 RCFW_CMD_PREP(req, CREATE_QP1, cmd_flags);
827 req.dpi = cpu_to_le32(qp->dpi->dpi);
828 req.qp_handle = cpu_to_le64(qp->qp_handle);
832 hwq_attr.sginfo = &sq->sg_info;
833 hwq_attr.stride = sizeof(struct sq_sge);
834 hwq_attr.depth = bnxt_qplib_get_depth(sq);
835 hwq_attr.type = HWQ_TYPE_QUEUE;
836 rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
840 rc = bnxt_qplib_alloc_init_swq(sq);
844 req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
845 pbl = &sq->hwq.pbl[PBL_LVL_0];
846 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
847 pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
848 CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT);
849 pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK);
850 req.sq_pg_size_sq_lvl = pg_sz_lvl;
852 cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
853 CMDQ_CREATE_QP1_SQ_SGE_SFT);
854 req.scq_cid = cpu_to_le32(qp->scq->id);
859 hwq_attr.sginfo = &rq->sg_info;
860 hwq_attr.stride = sizeof(struct sq_sge);
861 hwq_attr.depth = bnxt_qplib_get_depth(rq);
862 hwq_attr.type = HWQ_TYPE_QUEUE;
863 rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
866 rc = bnxt_qplib_alloc_init_swq(rq);
869 req.rq_size = cpu_to_le32(rq->max_wqe);
870 pbl = &rq->hwq.pbl[PBL_LVL_0];
871 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
872 pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
873 CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT);
874 pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK);
875 req.rq_pg_size_rq_lvl = pg_sz_lvl;
877 cpu_to_le16((rq->max_sge &
878 CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
879 CMDQ_CREATE_QP1_RQ_SGE_SFT);
881 req.rcq_cid = cpu_to_le32(qp->rcq->id);
882 /* Header buffer - allow hdr_buf pass in */
883 rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
888 qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
889 req.qp_flags = cpu_to_le32(qp_flags);
890 req.pd_id = cpu_to_le32(qp->pd->id);
892 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
893 (void *)&resp, NULL, 0);
897 qp->id = le32_to_cpu(resp.xid);
898 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
899 qp->cctx = res->cctx;
900 sq->dbinfo.hwq = &sq->hwq;
901 sq->dbinfo.xid = qp->id;
902 sq->dbinfo.db = qp->dpi->dbr;
903 sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
905 rq->dbinfo.hwq = &rq->hwq;
906 rq->dbinfo.xid = qp->id;
907 rq->dbinfo.db = qp->dpi->dbr;
908 rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
910 rcfw->qp_tbl[qp->id].qp_id = qp->id;
911 rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
916 bnxt_qplib_free_qp_hdr_buf(res, qp);
920 bnxt_qplib_free_hwq(res, &rq->hwq);
924 bnxt_qplib_free_hwq(res, &sq->hwq);
929 static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size)
931 struct bnxt_qplib_hwq *hwq;
932 struct bnxt_qplib_q *sq;
938 fpsne = (u64)bnxt_qplib_get_qe(hwq, hwq->depth, &psn_pg);
939 if (!IS_ALIGNED(fpsne, PAGE_SIZE))
940 indx_pad = ALIGN(fpsne, PAGE_SIZE) / size;
942 hwq->pad_pgofft = indx_pad;
943 hwq->pad_pg = (u64 *)psn_pg;
944 hwq->pad_stride = size;
947 int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
949 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
950 struct bnxt_qplib_hwq_attr hwq_attr = {};
951 struct bnxt_qplib_sg_info sginfo = {};
952 struct bnxt_qplib_q *sq = &qp->sq;
953 struct bnxt_qplib_q *rq = &qp->rq;
954 struct creq_create_qp_resp resp;
955 int rc, req_size, psn_sz = 0;
956 struct bnxt_qplib_hwq *xrrq;
957 struct bnxt_qplib_pbl *pbl;
958 struct cmdq_create_qp req;
964 RCFW_CMD_PREP(req, CREATE_QP, cmd_flags);
968 req.dpi = cpu_to_le32(qp->dpi->dpi);
969 req.qp_handle = cpu_to_le64(qp->qp_handle);
972 if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
973 psn_sz = bnxt_qplib_is_chip_gen_p5(res->cctx) ?
974 sizeof(struct sq_psn_search_ext) :
975 sizeof(struct sq_psn_search);
979 hwq_attr.sginfo = &sq->sg_info;
980 hwq_attr.stride = sizeof(struct sq_sge);
981 hwq_attr.depth = bnxt_qplib_get_depth(sq);
982 hwq_attr.aux_stride = psn_sz;
983 hwq_attr.aux_depth = bnxt_qplib_set_sq_size(sq, qp->wqe_mode);
984 hwq_attr.type = HWQ_TYPE_QUEUE;
985 rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
989 rc = bnxt_qplib_alloc_init_swq(sq);
994 bnxt_qplib_init_psn_ptr(qp, psn_sz);
996 req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
997 pbl = &sq->hwq.pbl[PBL_LVL_0];
998 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
999 pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
1000 CMDQ_CREATE_QP_SQ_PG_SIZE_SFT);
1001 pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK);
1002 req.sq_pg_size_sq_lvl = pg_sz_lvl;
1004 cpu_to_le16(((sq->max_sge & CMDQ_CREATE_QP_SQ_SGE_MASK) <<
1005 CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
1006 req.scq_cid = cpu_to_le32(qp->scq->id);
1011 hwq_attr.sginfo = &rq->sg_info;
1012 hwq_attr.stride = sizeof(struct sq_sge);
1013 hwq_attr.depth = bnxt_qplib_get_depth(rq);
1014 hwq_attr.aux_stride = 0;
1015 hwq_attr.aux_depth = 0;
1016 hwq_attr.type = HWQ_TYPE_QUEUE;
1017 rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
1020 rc = bnxt_qplib_alloc_init_swq(rq);
1024 req.rq_size = cpu_to_le32(rq->max_wqe);
1025 pbl = &rq->hwq.pbl[PBL_LVL_0];
1026 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1027 pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
1028 CMDQ_CREATE_QP_RQ_PG_SIZE_SFT);
1029 pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK);
1030 req.rq_pg_size_rq_lvl = pg_sz_lvl;
1031 nsge = (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1034 cpu_to_le16(((nsge &
1035 CMDQ_CREATE_QP_RQ_SGE_MASK) <<
1036 CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
1039 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
1040 req.srq_cid = cpu_to_le32(qp->srq->id);
1042 req.rcq_cid = cpu_to_le32(qp->rcq->id);
1044 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
1045 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
1047 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
1048 if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
1049 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED;
1050 req.qp_flags = cpu_to_le32(qp_flags);
1055 xrrq->max_elements =
1056 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1057 req_size = xrrq->max_elements *
1058 BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1059 req_size &= ~(PAGE_SIZE - 1);
1060 sginfo.pgsize = req_size;
1061 sginfo.pgshft = PAGE_SHIFT;
1064 hwq_attr.sginfo = &sginfo;
1065 hwq_attr.depth = xrrq->max_elements;
1066 hwq_attr.stride = BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE;
1067 hwq_attr.aux_stride = 0;
1068 hwq_attr.aux_depth = 0;
1069 hwq_attr.type = HWQ_TYPE_CTX;
1070 rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1073 pbl = &xrrq->pbl[PBL_LVL_0];
1074 req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1077 xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
1078 qp->max_dest_rd_atomic);
1079 req_size = xrrq->max_elements *
1080 BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1081 req_size &= ~(PAGE_SIZE - 1);
1082 sginfo.pgsize = req_size;
1083 hwq_attr.depth = xrrq->max_elements;
1084 hwq_attr.stride = BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE;
1085 rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1089 pbl = &xrrq->pbl[PBL_LVL_0];
1090 req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1092 req.pd_id = cpu_to_le32(qp->pd->id);
1094 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1095 (void *)&resp, NULL, 0);
1099 qp->id = le32_to_cpu(resp.xid);
1100 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
1101 INIT_LIST_HEAD(&qp->sq_flush);
1102 INIT_LIST_HEAD(&qp->rq_flush);
1103 qp->cctx = res->cctx;
1104 sq->dbinfo.hwq = &sq->hwq;
1105 sq->dbinfo.xid = qp->id;
1106 sq->dbinfo.db = qp->dpi->dbr;
1107 sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
1109 rq->dbinfo.hwq = &rq->hwq;
1110 rq->dbinfo.xid = qp->id;
1111 rq->dbinfo.db = qp->dpi->dbr;
1112 rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
1114 rcfw->qp_tbl[qp->id].qp_id = qp->id;
1115 rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
1119 bnxt_qplib_free_hwq(res, &qp->irrq);
1121 bnxt_qplib_free_hwq(res, &qp->orrq);
1125 bnxt_qplib_free_hwq(res, &rq->hwq);
1129 bnxt_qplib_free_hwq(res, &sq->hwq);
1134 static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
1136 switch (qp->state) {
1137 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1138 /* INIT->RTR, configure the path_mtu to the default
1139 * 2048 if not being requested
1141 if (!(qp->modify_flags &
1142 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
1144 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1146 CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1149 ~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1150 /* Bono FW require the max_dest_rd_atomic to be >= 1 */
1151 if (qp->max_dest_rd_atomic < 1)
1152 qp->max_dest_rd_atomic = 1;
1153 qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
1154 /* Bono FW 20.6.5 requires SGID_INDEX configuration */
1155 if (!(qp->modify_flags &
1156 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
1158 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
1159 qp->ah.sgid_index = 0;
1167 static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
1169 switch (qp->state) {
1170 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1171 /* Bono FW requires the max_rd_atomic to be >= 1 */
1172 if (qp->max_rd_atomic < 1)
1173 qp->max_rd_atomic = 1;
1174 /* Bono FW does not allow PKEY_INDEX,
1175 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
1176 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
1177 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
1181 ~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
1182 CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1183 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1184 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1185 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1186 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1187 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1188 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
1189 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
1190 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
1191 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
1192 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
1199 static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
1201 switch (qp->cur_qp_state) {
1202 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1204 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1205 __modify_flags_from_init_state(qp);
1207 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1208 __modify_flags_from_rtr_state(qp);
1210 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1212 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1214 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1216 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1223 int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1225 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1226 struct cmdq_modify_qp req;
1227 struct creq_modify_qp_resp resp;
1228 u16 cmd_flags = 0, pkey;
1233 RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags);
1235 /* Filter out the qp_attr_mask based on the state->new transition */
1236 __filter_modify_flags(qp);
1237 bmask = qp->modify_flags;
1238 req.modify_mask = cpu_to_le32(qp->modify_flags);
1239 req.qp_cid = cpu_to_le32(qp->id);
1240 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
1241 req.network_type_en_sqd_async_notify_new_state =
1242 (qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
1243 (qp->en_sqd_async_notify ?
1244 CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
1246 req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
1248 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
1249 req.access = qp->access;
1251 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY) {
1252 if (!bnxt_qplib_get_pkey(res, &res->pkey_tbl,
1253 qp->pkey_index, &pkey))
1254 req.pkey = cpu_to_le16(pkey);
1256 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
1257 req.qkey = cpu_to_le32(qp->qkey);
1259 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
1260 memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
1261 req.dgid[0] = cpu_to_le32(temp32[0]);
1262 req.dgid[1] = cpu_to_le32(temp32[1]);
1263 req.dgid[2] = cpu_to_le32(temp32[2]);
1264 req.dgid[3] = cpu_to_le32(temp32[3]);
1266 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
1267 req.flow_label = cpu_to_le32(qp->ah.flow_label);
1269 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
1270 req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
1271 [qp->ah.sgid_index]);
1273 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
1274 req.hop_limit = qp->ah.hop_limit;
1276 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
1277 req.traffic_class = qp->ah.traffic_class;
1279 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
1280 memcpy(req.dest_mac, qp->ah.dmac, 6);
1282 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
1283 req.path_mtu = qp->path_mtu;
1285 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
1286 req.timeout = qp->timeout;
1288 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
1289 req.retry_cnt = qp->retry_cnt;
1291 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
1292 req.rnr_retry = qp->rnr_retry;
1294 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
1295 req.min_rnr_timer = qp->min_rnr_timer;
1297 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
1298 req.rq_psn = cpu_to_le32(qp->rq.psn);
1300 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
1301 req.sq_psn = cpu_to_le32(qp->sq.psn);
1303 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
1305 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1307 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
1308 req.max_dest_rd_atomic =
1309 IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
1311 req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
1312 req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
1313 req.sq_sge = cpu_to_le16(qp->sq.max_sge);
1314 req.rq_sge = cpu_to_le16(qp->rq.max_sge);
1315 req.max_inline_data = cpu_to_le32(qp->max_inline_data);
1316 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
1317 req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
1319 req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
1321 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1322 (void *)&resp, NULL, 0);
1325 qp->cur_qp_state = qp->state;
1329 int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1331 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1332 struct cmdq_query_qp req;
1333 struct creq_query_qp_resp resp;
1334 struct bnxt_qplib_rcfw_sbuf *sbuf;
1335 struct creq_query_qp_resp_sb *sb;
1340 RCFW_CMD_PREP(req, QUERY_QP, cmd_flags);
1342 sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
1347 req.qp_cid = cpu_to_le32(qp->id);
1348 req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
1349 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
1353 /* Extract the context from the side buffer */
1354 qp->state = sb->en_sqd_async_notify_state &
1355 CREQ_QUERY_QP_RESP_SB_STATE_MASK;
1356 qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
1357 CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY ?
1359 qp->access = sb->access;
1360 qp->pkey_index = le16_to_cpu(sb->pkey);
1361 qp->qkey = le32_to_cpu(sb->qkey);
1363 temp32[0] = le32_to_cpu(sb->dgid[0]);
1364 temp32[1] = le32_to_cpu(sb->dgid[1]);
1365 temp32[2] = le32_to_cpu(sb->dgid[2]);
1366 temp32[3] = le32_to_cpu(sb->dgid[3]);
1367 memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
1369 qp->ah.flow_label = le32_to_cpu(sb->flow_label);
1371 qp->ah.sgid_index = 0;
1372 for (i = 0; i < res->sgid_tbl.max; i++) {
1373 if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
1374 qp->ah.sgid_index = i;
1378 if (i == res->sgid_tbl.max)
1379 dev_warn(&res->pdev->dev, "SGID not found??\n");
1381 qp->ah.hop_limit = sb->hop_limit;
1382 qp->ah.traffic_class = sb->traffic_class;
1383 memcpy(qp->ah.dmac, sb->dest_mac, 6);
1384 qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1385 CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
1386 CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
1387 qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1388 CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
1389 CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
1390 qp->timeout = sb->timeout;
1391 qp->retry_cnt = sb->retry_cnt;
1392 qp->rnr_retry = sb->rnr_retry;
1393 qp->min_rnr_timer = sb->min_rnr_timer;
1394 qp->rq.psn = le32_to_cpu(sb->rq_psn);
1395 qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
1396 qp->sq.psn = le32_to_cpu(sb->sq_psn);
1397 qp->max_dest_rd_atomic =
1398 IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
1399 qp->sq.max_wqe = qp->sq.hwq.max_elements;
1400 qp->rq.max_wqe = qp->rq.hwq.max_elements;
1401 qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
1402 qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
1403 qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
1404 qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
1405 memcpy(qp->smac, sb->src_mac, 6);
1406 qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
1408 bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
1412 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
1414 struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1415 struct cq_base *hw_cqe;
1418 for (i = 0; i < cq_hwq->max_elements; i++) {
1419 hw_cqe = bnxt_qplib_get_qe(cq_hwq, i, NULL);
1420 if (!CQE_CMP_VALID(hw_cqe, i, cq_hwq->max_elements))
1423 * The valid test of the entry must be done first before
1424 * reading any further.
1427 switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
1428 case CQ_BASE_CQE_TYPE_REQ:
1429 case CQ_BASE_CQE_TYPE_TERMINAL:
1431 struct cq_req *cqe = (struct cq_req *)hw_cqe;
1433 if (qp == le64_to_cpu(cqe->qp_handle))
1437 case CQ_BASE_CQE_TYPE_RES_RC:
1438 case CQ_BASE_CQE_TYPE_RES_UD:
1439 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
1441 struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
1443 if (qp == le64_to_cpu(cqe->qp_handle))
1453 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1454 struct bnxt_qplib_qp *qp)
1456 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1457 struct cmdq_destroy_qp req;
1458 struct creq_destroy_qp_resp resp;
1462 rcfw->qp_tbl[qp->id].qp_id = BNXT_QPLIB_QP_ID_INVALID;
1463 rcfw->qp_tbl[qp->id].qp_handle = NULL;
1465 RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags);
1467 req.qp_cid = cpu_to_le32(qp->id);
1468 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1469 (void *)&resp, NULL, 0);
1471 rcfw->qp_tbl[qp->id].qp_id = qp->id;
1472 rcfw->qp_tbl[qp->id].qp_handle = qp;
1479 void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
1480 struct bnxt_qplib_qp *qp)
1482 bnxt_qplib_free_qp_hdr_buf(res, qp);
1483 bnxt_qplib_free_hwq(res, &qp->sq.hwq);
1486 bnxt_qplib_free_hwq(res, &qp->rq.hwq);
1489 if (qp->irrq.max_elements)
1490 bnxt_qplib_free_hwq(res, &qp->irrq);
1491 if (qp->orrq.max_elements)
1492 bnxt_qplib_free_hwq(res, &qp->orrq);
1496 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1497 struct bnxt_qplib_sge *sge)
1499 struct bnxt_qplib_q *sq = &qp->sq;
1502 memset(sge, 0, sizeof(*sge));
1504 if (qp->sq_hdr_buf) {
1505 sw_prod = sq->swq_start;
1506 sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
1507 sw_prod * qp->sq_hdr_buf_size);
1508 sge->lkey = 0xFFFFFFFF;
1509 sge->size = qp->sq_hdr_buf_size;
1510 return qp->sq_hdr_buf + sw_prod * sge->size;
1515 u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1517 struct bnxt_qplib_q *rq = &qp->rq;
1519 return rq->swq_start;
1522 dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
1524 return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
1527 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1528 struct bnxt_qplib_sge *sge)
1530 struct bnxt_qplib_q *rq = &qp->rq;
1533 memset(sge, 0, sizeof(*sge));
1535 if (qp->rq_hdr_buf) {
1536 sw_prod = rq->swq_start;
1537 sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
1538 sw_prod * qp->rq_hdr_buf_size);
1539 sge->lkey = 0xFFFFFFFF;
1540 sge->size = qp->rq_hdr_buf_size;
1541 return qp->rq_hdr_buf + sw_prod * sge->size;
1546 static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
1547 struct bnxt_qplib_swqe *wqe,
1548 struct bnxt_qplib_swq *swq)
1550 struct sq_psn_search_ext *psns_ext;
1551 struct sq_psn_search *psns;
1555 if (!swq->psn_search)
1557 psns = swq->psn_search;
1558 psns_ext = swq->psn_ext;
1560 op_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1561 SQ_PSN_SEARCH_START_PSN_MASK);
1562 op_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1563 SQ_PSN_SEARCH_OPCODE_MASK);
1564 flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1565 SQ_PSN_SEARCH_NEXT_PSN_MASK);
1567 if (bnxt_qplib_is_chip_gen_p5(qp->cctx)) {
1568 psns_ext->opcode_start_psn = cpu_to_le32(op_spsn);
1569 psns_ext->flags_next_psn = cpu_to_le32(flg_npsn);
1570 psns_ext->start_slot_idx = cpu_to_le16(swq->slot_idx);
1572 psns->opcode_start_psn = cpu_to_le32(op_spsn);
1573 psns->flags_next_psn = cpu_to_le32(flg_npsn);
1577 static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
1578 struct bnxt_qplib_swqe *wqe,
1581 struct bnxt_qplib_hwq *hwq;
1582 int len, t_len, offt;
1583 bool pull_dst = true;
1584 void *il_dst = NULL;
1585 void *il_src = NULL;
1591 for (indx = 0; indx < wqe->num_sge; indx++) {
1592 len = wqe->sg_list[indx].size;
1593 il_src = (void *)wqe->sg_list[indx].addr;
1595 if (t_len > qp->max_inline_data)
1600 il_dst = bnxt_qplib_get_prod_qe(hwq, *idx);
1605 cplen = min_t(int, len, sizeof(struct sq_sge));
1606 cplen = min_t(int, cplen,
1607 (sizeof(struct sq_sge) - offt));
1608 memcpy(il_dst, il_src, cplen);
1614 if (t_cplen == sizeof(struct sq_sge))
1624 static u32 bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq,
1625 struct bnxt_qplib_sge *ssge,
1628 struct sq_sge *dsge;
1631 for (indx = 0; indx < nsge; indx++, (*idx)++) {
1632 dsge = bnxt_qplib_get_prod_qe(hwq, *idx);
1633 dsge->va_or_pa = cpu_to_le64(ssge[indx].addr);
1634 dsge->l_key = cpu_to_le32(ssge[indx].lkey);
1635 dsge->size = cpu_to_le32(ssge[indx].size);
1636 len += ssge[indx].size;
1642 static u16 bnxt_qplib_required_slots(struct bnxt_qplib_qp *qp,
1643 struct bnxt_qplib_swqe *wqe,
1644 u16 *wqe_sz, u16 *qdf, u8 mode)
1650 nsge = wqe->num_sge;
1651 /* Adding sq_send_hdr is a misnomer, for rq also hdr size is same. */
1652 bytes = sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge);
1653 if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1654 ilsize = bnxt_qplib_calc_ilsize(wqe, qp->max_inline_data);
1655 bytes = ALIGN(ilsize, sizeof(struct sq_sge));
1656 bytes += sizeof(struct sq_send_hdr);
1659 *qdf = __xlate_qfd(qp->sq.q_full_delta, bytes);
1662 if (mode == BNXT_QPLIB_WQE_MODE_STATIC)
1667 static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_q *sq,
1668 struct bnxt_qplib_swq *swq)
1670 struct bnxt_qplib_hwq *hwq;
1671 u32 pg_num, pg_indx;
1678 tail = swq->slot_idx / sq->dbinfo.max_slot;
1679 pg_num = (tail + hwq->pad_pgofft) / (PAGE_SIZE / hwq->pad_stride);
1680 pg_indx = (tail + hwq->pad_pgofft) % (PAGE_SIZE / hwq->pad_stride);
1681 buff = (void *)(hwq->pad_pg[pg_num] + pg_indx * hwq->pad_stride);
1682 swq->psn_ext = buff;
1683 swq->psn_search = buff;
1686 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1688 struct bnxt_qplib_q *sq = &qp->sq;
1690 bnxt_qplib_ring_prod_db(&sq->dbinfo, DBC_DBC_TYPE_SQ);
1693 int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1694 struct bnxt_qplib_swqe *wqe)
1696 struct bnxt_qplib_nq_work *nq_work = NULL;
1697 int i, rc = 0, data_len = 0, pkt_num = 0;
1698 struct bnxt_qplib_q *sq = &qp->sq;
1699 struct bnxt_qplib_hwq *hwq;
1700 struct bnxt_qplib_swq *swq;
1701 bool sch_handler = false;
1702 u16 wqe_sz, qdf = 0;
1711 if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS &&
1712 qp->state != CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1713 dev_err(&hwq->pdev->dev,
1714 "QPLIB: FP: QP (0x%x) is in the 0x%x state",
1720 slots = bnxt_qplib_required_slots(qp, wqe, &wqe_sz, &qdf, qp->wqe_mode);
1721 if (bnxt_qplib_queue_full(sq, slots + qdf)) {
1722 dev_err(&hwq->pdev->dev,
1723 "prod = %#x cons = %#x qdepth = %#x delta = %#x\n",
1724 hwq->prod, hwq->cons, hwq->depth, sq->q_full_delta);
1729 swq = bnxt_qplib_get_swqe(sq, &wqe_idx);
1730 bnxt_qplib_pull_psn_buff(sq, swq);
1733 swq->slot_idx = hwq->prod;
1735 swq->wr_id = wqe->wr_id;
1736 swq->type = wqe->type;
1737 swq->flags = wqe->flags;
1738 swq->start_psn = sq->psn & BTH_PSN_MASK;
1740 swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1742 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1744 dev_dbg(&hwq->pdev->dev,
1745 "%s Error QP. Scheduling for poll_cq\n", __func__);
1749 base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1750 ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1751 memset(base_hdr, 0, sizeof(struct sq_sge));
1752 memset(ext_hdr, 0, sizeof(struct sq_sge));
1754 if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE)
1755 /* Copy the inline data */
1756 data_len = bnxt_qplib_put_inline(qp, wqe, &idx);
1758 data_len = bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge,
1763 switch (wqe->type) {
1764 case BNXT_QPLIB_SWQE_TYPE_SEND:
1765 if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
1766 struct sq_send_raweth_qp1_hdr *sqe = base_hdr;
1767 struct sq_raw_ext_hdr *ext_sqe = ext_hdr;
1768 /* Assemble info for Raw Ethertype QPs */
1770 sqe->wqe_type = wqe->type;
1771 sqe->flags = wqe->flags;
1772 sqe->wqe_size = wqe_sz;
1773 sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
1774 sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
1775 sqe->length = cpu_to_le32(data_len);
1776 ext_sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
1777 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
1778 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
1783 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
1784 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
1786 struct sq_ud_ext_hdr *ext_sqe = ext_hdr;
1787 struct sq_send_hdr *sqe = base_hdr;
1789 sqe->wqe_type = wqe->type;
1790 sqe->flags = wqe->flags;
1791 sqe->wqe_size = wqe_sz;
1792 sqe->inv_key_or_imm_data = cpu_to_le32(wqe->send.inv_key);
1793 if (qp->type == CMDQ_CREATE_QP_TYPE_UD ||
1794 qp->type == CMDQ_CREATE_QP_TYPE_GSI) {
1795 sqe->q_key = cpu_to_le32(wqe->send.q_key);
1796 sqe->length = cpu_to_le32(data_len);
1797 sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
1798 ext_sqe->dst_qp = cpu_to_le32(wqe->send.dst_qp &
1799 SQ_SEND_DST_QP_MASK);
1800 ext_sqe->avid = cpu_to_le32(wqe->send.avid &
1803 sqe->length = cpu_to_le32(data_len);
1805 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1808 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1812 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
1813 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
1814 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
1816 struct sq_rdma_ext_hdr *ext_sqe = ext_hdr;
1817 struct sq_rdma_hdr *sqe = base_hdr;
1819 sqe->wqe_type = wqe->type;
1820 sqe->flags = wqe->flags;
1821 sqe->wqe_size = wqe_sz;
1822 sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
1823 sqe->length = cpu_to_le32((u32)data_len);
1824 ext_sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
1825 ext_sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
1827 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1830 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1833 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
1834 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
1836 struct sq_atomic_ext_hdr *ext_sqe = ext_hdr;
1837 struct sq_atomic_hdr *sqe = base_hdr;
1839 sqe->wqe_type = wqe->type;
1840 sqe->flags = wqe->flags;
1841 sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
1842 sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
1843 ext_sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
1844 ext_sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
1846 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1849 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1852 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
1854 struct sq_localinvalidate *sqe = base_hdr;
1856 sqe->wqe_type = wqe->type;
1857 sqe->flags = wqe->flags;
1858 sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
1862 case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
1864 struct sq_fr_pmr_ext_hdr *ext_sqe = ext_hdr;
1865 struct sq_fr_pmr_hdr *sqe = base_hdr;
1867 sqe->wqe_type = wqe->type;
1868 sqe->flags = wqe->flags;
1869 sqe->access_cntl = wqe->frmr.access_cntl |
1870 SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1871 sqe->zero_based_page_size_log =
1872 (wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
1873 SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
1874 (wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
1875 sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
1876 temp32 = cpu_to_le32(wqe->frmr.length);
1877 memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
1878 sqe->numlevels_pbl_page_size_log =
1879 ((wqe->frmr.pbl_pg_sz_log <<
1880 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
1881 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
1882 ((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
1883 SQ_FR_PMR_NUMLEVELS_MASK);
1885 for (i = 0; i < wqe->frmr.page_list_len; i++)
1886 wqe->frmr.pbl_ptr[i] = cpu_to_le64(
1887 wqe->frmr.page_list[i] |
1889 ext_sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
1890 ext_sqe->va = cpu_to_le64(wqe->frmr.va);
1894 case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
1896 struct sq_bind_ext_hdr *ext_sqe = ext_hdr;
1897 struct sq_bind_hdr *sqe = base_hdr;
1899 sqe->wqe_type = wqe->type;
1900 sqe->flags = wqe->flags;
1901 sqe->access_cntl = wqe->bind.access_cntl;
1902 sqe->mw_type_zero_based = wqe->bind.mw_type |
1903 (wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
1904 sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
1905 sqe->l_key = cpu_to_le32(wqe->bind.r_key);
1906 ext_sqe->va = cpu_to_le64(wqe->bind.va);
1907 ext_sqe->length_lo = cpu_to_le32(wqe->bind.length);
1911 /* Bad wqe, return error */
1915 swq->next_psn = sq->psn & BTH_PSN_MASK;
1916 bnxt_qplib_fill_psn_search(qp, wqe, swq);
1918 bnxt_qplib_swq_mod_start(sq, wqe_idx);
1919 bnxt_qplib_hwq_incr_prod(hwq, swq->slots);
1923 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
1925 nq_work->cq = qp->scq;
1926 nq_work->nq = qp->scq->nq;
1927 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
1928 queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
1930 dev_err(&hwq->pdev->dev,
1931 "FP: Failed to allocate SQ nq_work!\n");
1938 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
1940 struct bnxt_qplib_q *rq = &qp->rq;
1942 bnxt_qplib_ring_prod_db(&rq->dbinfo, DBC_DBC_TYPE_RQ);
1945 int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
1946 struct bnxt_qplib_swqe *wqe)
1948 struct bnxt_qplib_nq_work *nq_work = NULL;
1949 struct bnxt_qplib_q *rq = &qp->rq;
1950 struct rq_wqe_hdr *base_hdr;
1951 struct rq_ext_hdr *ext_hdr;
1952 struct bnxt_qplib_hwq *hwq;
1953 struct bnxt_qplib_swq *swq;
1954 bool sch_handler = false;
1960 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1961 dev_err(&hwq->pdev->dev,
1962 "QPLIB: FP: QP (0x%x) is in the 0x%x state",
1968 if (bnxt_qplib_queue_full(rq, rq->dbinfo.max_slot)) {
1969 dev_err(&hwq->pdev->dev,
1970 "FP: QP (0x%x) RQ is full!\n", qp->id);
1975 swq = bnxt_qplib_get_swqe(rq, &wqe_idx);
1976 swq->wr_id = wqe->wr_id;
1977 swq->slots = rq->dbinfo.max_slot;
1979 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1981 dev_dbg(&hwq->pdev->dev,
1982 "%s: Error QP. Scheduling for poll_cq\n", __func__);
1987 base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1988 ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1989 memset(base_hdr, 0, sizeof(struct sq_sge));
1990 memset(ext_hdr, 0, sizeof(struct sq_sge));
1991 wqe_sz = (sizeof(struct rq_wqe_hdr) +
1992 wqe->num_sge * sizeof(struct sq_sge)) >> 4;
1993 bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge, &idx);
1994 if (!wqe->num_sge) {
1997 sge = bnxt_qplib_get_prod_qe(hwq, idx++);
2001 base_hdr->wqe_type = wqe->type;
2002 base_hdr->flags = wqe->flags;
2003 base_hdr->wqe_size = wqe_sz;
2004 base_hdr->wr_id[0] = cpu_to_le32(wqe_idx);
2006 bnxt_qplib_swq_mod_start(rq, wqe_idx);
2007 bnxt_qplib_hwq_incr_prod(hwq, swq->slots);
2010 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2012 nq_work->cq = qp->rcq;
2013 nq_work->nq = qp->rcq->nq;
2014 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2015 queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
2017 dev_err(&hwq->pdev->dev,
2018 "FP: Failed to allocate RQ nq_work!\n");
2027 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2029 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2030 struct bnxt_qplib_hwq_attr hwq_attr = {};
2031 struct creq_create_cq_resp resp;
2032 struct bnxt_qplib_pbl *pbl;
2033 struct cmdq_create_cq req;
2039 hwq_attr.depth = cq->max_wqe;
2040 hwq_attr.stride = sizeof(struct cq_base);
2041 hwq_attr.type = HWQ_TYPE_QUEUE;
2042 hwq_attr.sginfo = &cq->sg_info;
2043 rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr);
2047 RCFW_CMD_PREP(req, CREATE_CQ, cmd_flags);
2050 dev_err(&rcfw->pdev->dev,
2051 "FP: CREATE_CQ failed due to NULL DPI\n");
2054 req.dpi = cpu_to_le32(cq->dpi->dpi);
2055 req.cq_handle = cpu_to_le64(cq->cq_handle);
2056 req.cq_size = cpu_to_le32(cq->hwq.max_elements);
2057 pbl = &cq->hwq.pbl[PBL_LVL_0];
2058 pg_sz_lvl = (bnxt_qplib_base_pg_size(&cq->hwq) <<
2059 CMDQ_CREATE_CQ_PG_SIZE_SFT);
2060 pg_sz_lvl |= (cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK);
2061 req.pg_size_lvl = cpu_to_le32(pg_sz_lvl);
2062 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2063 req.cq_fco_cnq_id = cpu_to_le32(
2064 (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
2065 CMDQ_CREATE_CQ_CNQ_ID_SFT);
2067 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
2068 (void *)&resp, NULL, 0);
2072 cq->id = le32_to_cpu(resp.xid);
2073 cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
2074 init_waitqueue_head(&cq->waitq);
2075 INIT_LIST_HEAD(&cq->sqf_head);
2076 INIT_LIST_HEAD(&cq->rqf_head);
2077 spin_lock_init(&cq->compl_lock);
2078 spin_lock_init(&cq->flush_lock);
2080 cq->dbinfo.hwq = &cq->hwq;
2081 cq->dbinfo.xid = cq->id;
2082 cq->dbinfo.db = cq->dpi->dbr;
2083 cq->dbinfo.priv_db = res->dpi_tbl.dbr_bar_reg_iomem;
2085 bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA);
2090 bnxt_qplib_free_hwq(res, &cq->hwq);
2095 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2097 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2098 struct cmdq_destroy_cq req;
2099 struct creq_destroy_cq_resp resp;
2100 u16 total_cnq_events;
2104 RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags);
2106 req.cq_cid = cpu_to_le32(cq->id);
2107 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
2108 (void *)&resp, NULL, 0);
2111 total_cnq_events = le16_to_cpu(resp.total_cnq_events);
2112 __wait_for_all_nqes(cq, total_cnq_events);
2113 bnxt_qplib_free_hwq(res, &cq->hwq);
2117 static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
2118 struct bnxt_qplib_cqe **pcqe, int *budget)
2120 struct bnxt_qplib_cqe *cqe;
2124 /* Now complete all outstanding SQEs with FLUSHED_ERR */
2125 start = sq->swq_start;
2128 last = sq->swq_last;
2131 /* Skip the FENCE WQE completions */
2132 if (sq->swq[last].wr_id == BNXT_QPLIB_FENCE_WRID) {
2133 bnxt_qplib_cancel_phantom_processing(qp);
2136 memset(cqe, 0, sizeof(*cqe));
2137 cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
2138 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2139 cqe->qp_handle = (u64)(unsigned long)qp;
2140 cqe->wr_id = sq->swq[last].wr_id;
2141 cqe->src_qp = qp->id;
2142 cqe->type = sq->swq[last].type;
2146 bnxt_qplib_hwq_incr_cons(&sq->hwq, sq->swq[last].slots);
2147 sq->swq_last = sq->swq[last].next_idx;
2150 if (!(*budget) && sq->swq_last != start)
2157 static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
2158 struct bnxt_qplib_cqe **pcqe, int *budget)
2160 struct bnxt_qplib_cqe *cqe;
2166 case CMDQ_CREATE_QP1_TYPE_GSI:
2167 opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
2169 case CMDQ_CREATE_QP_TYPE_RC:
2170 opcode = CQ_BASE_CQE_TYPE_RES_RC;
2172 case CMDQ_CREATE_QP_TYPE_UD:
2173 case CMDQ_CREATE_QP_TYPE_GSI:
2174 opcode = CQ_BASE_CQE_TYPE_RES_UD;
2178 /* Flush the rest of the RQ */
2179 start = rq->swq_start;
2182 last = rq->swq_last;
2185 memset(cqe, 0, sizeof(*cqe));
2187 CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
2188 cqe->opcode = opcode;
2189 cqe->qp_handle = (unsigned long)qp;
2190 cqe->wr_id = rq->swq[last].wr_id;
2193 bnxt_qplib_hwq_incr_cons(&rq->hwq, rq->swq[last].slots);
2194 rq->swq_last = rq->swq[last].next_idx;
2197 if (!*budget && rq->swq_last != start)
2204 void bnxt_qplib_mark_qp_error(void *qp_handle)
2206 struct bnxt_qplib_qp *qp = qp_handle;
2211 /* Must block new posting of SQ and RQ */
2212 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2213 bnxt_qplib_cancel_phantom_processing(qp);
2216 /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
2217 * CQE is track from sw_cq_cons to max_element but valid only if VALID=1
2219 static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
2220 u32 cq_cons, u32 swq_last, u32 cqe_sq_cons)
2222 u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
2223 struct bnxt_qplib_q *sq = &qp->sq;
2224 struct cq_req *peek_req_hwcqe;
2225 struct bnxt_qplib_qp *peek_qp;
2226 struct bnxt_qplib_q *peek_sq;
2227 struct bnxt_qplib_swq *swq;
2228 struct cq_base *peek_hwcqe;
2232 /* Check for the psn_search marking before completing */
2233 swq = &sq->swq[swq_last];
2234 if (swq->psn_search &&
2235 le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
2237 swq->psn_search->flags_next_psn = cpu_to_le32
2238 (le32_to_cpu(swq->psn_search->flags_next_psn)
2240 dev_dbg(&cq->hwq.pdev->dev,
2241 "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
2242 cq_cons, qp->id, swq_last, cqe_sq_cons);
2243 sq->condition = true;
2244 sq->send_phantom = true;
2246 /* TODO: Only ARM if the previous SQE is ARMALL */
2247 bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMALL);
2251 if (sq->condition) {
2252 /* Peek at the completions */
2253 peek_raw_cq_cons = cq->hwq.cons;
2254 peek_sw_cq_cons = cq_cons;
2255 i = cq->hwq.max_elements;
2257 peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
2258 peek_hwcqe = bnxt_qplib_get_qe(&cq->hwq,
2259 peek_sw_cq_cons, NULL);
2260 /* If the next hwcqe is VALID */
2261 if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
2262 cq->hwq.max_elements)) {
2264 * The valid test of the entry must be done first before
2265 * reading any further.
2268 /* If the next hwcqe is a REQ */
2269 if ((peek_hwcqe->cqe_type_toggle &
2270 CQ_BASE_CQE_TYPE_MASK) ==
2271 CQ_BASE_CQE_TYPE_REQ) {
2272 peek_req_hwcqe = (struct cq_req *)
2274 peek_qp = (struct bnxt_qplib_qp *)
2277 (peek_req_hwcqe->qp_handle));
2278 peek_sq = &peek_qp->sq;
2281 peek_req_hwcqe->sq_cons_idx)
2282 - 1) % sq->max_wqe);
2283 /* If the hwcqe's sq's wr_id matches */
2284 if (peek_sq == sq &&
2285 sq->swq[peek_sq_cons_idx].wr_id ==
2286 BNXT_QPLIB_FENCE_WRID) {
2288 * Unbreak only if the phantom
2291 dev_dbg(&cq->hwq.pdev->dev,
2292 "FP: Got Phantom CQE\n");
2293 sq->condition = false;
2299 /* Valid but not the phantom, so keep looping */
2301 /* Not valid yet, just exit and wait */
2308 dev_err(&cq->hwq.pdev->dev,
2309 "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
2310 cq_cons, qp->id, swq_last, cqe_sq_cons);
2317 static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
2318 struct cq_req *hwcqe,
2319 struct bnxt_qplib_cqe **pcqe, int *budget,
2320 u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
2322 struct bnxt_qplib_swq *swq;
2323 struct bnxt_qplib_cqe *cqe;
2324 struct bnxt_qplib_qp *qp;
2325 struct bnxt_qplib_q *sq;
2329 qp = (struct bnxt_qplib_qp *)((unsigned long)
2330 le64_to_cpu(hwcqe->qp_handle));
2332 dev_err(&cq->hwq.pdev->dev,
2333 "FP: Process Req qp is NULL\n");
2338 cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_wqe;
2339 if (qp->sq.flushed) {
2340 dev_dbg(&cq->hwq.pdev->dev,
2341 "%s: QP in Flush QP = %p\n", __func__, qp);
2344 /* Require to walk the sq's swq to fabricate CQEs for all previously
2345 * signaled SWQEs due to CQE aggregation from the current sq cons
2346 * to the cqe_sq_cons
2350 if (sq->swq_last == cqe_sq_cons)
2354 swq = &sq->swq[sq->swq_last];
2355 memset(cqe, 0, sizeof(*cqe));
2356 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2357 cqe->qp_handle = (u64)(unsigned long)qp;
2358 cqe->src_qp = qp->id;
2359 cqe->wr_id = swq->wr_id;
2360 if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
2362 cqe->type = swq->type;
2364 /* For the last CQE, check for status. For errors, regardless
2365 * of the request being signaled or not, it must complete with
2366 * the hwcqe error status
2368 if (swq->next_idx == cqe_sq_cons &&
2369 hwcqe->status != CQ_REQ_STATUS_OK) {
2370 cqe->status = hwcqe->status;
2371 dev_err(&cq->hwq.pdev->dev,
2372 "FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n",
2373 sq->swq_last, cqe->wr_id, cqe->status);
2376 bnxt_qplib_mark_qp_error(qp);
2377 /* Add qp to flush list of the CQ */
2378 bnxt_qplib_add_flush_qp(qp);
2380 /* Before we complete, do WA 9060 */
2381 if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
2386 if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2387 cqe->status = CQ_REQ_STATUS_OK;
2393 bnxt_qplib_hwq_incr_cons(&sq->hwq, swq->slots);
2394 sq->swq_last = swq->next_idx;
2400 if (sq->swq_last != cqe_sq_cons) {
2406 * Back to normal completion mode only after it has completed all of
2407 * the WC for this CQE
2414 static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
2416 spin_lock(&srq->hwq.lock);
2417 srq->swq[srq->last_idx].next_idx = (int)tag;
2418 srq->last_idx = (int)tag;
2419 srq->swq[srq->last_idx].next_idx = -1;
2420 srq->hwq.cons++; /* Support for SRQE counter */
2421 spin_unlock(&srq->hwq.lock);
2424 static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2425 struct cq_res_rc *hwcqe,
2426 struct bnxt_qplib_cqe **pcqe,
2429 struct bnxt_qplib_srq *srq;
2430 struct bnxt_qplib_cqe *cqe;
2431 struct bnxt_qplib_qp *qp;
2432 struct bnxt_qplib_q *rq;
2436 qp = (struct bnxt_qplib_qp *)((unsigned long)
2437 le64_to_cpu(hwcqe->qp_handle));
2439 dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n");
2442 if (qp->rq.flushed) {
2443 dev_dbg(&cq->hwq.pdev->dev,
2444 "%s: QP in Flush QP = %p\n", __func__, qp);
2449 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2450 cqe->length = le32_to_cpu(hwcqe->length);
2451 cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
2452 cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
2453 cqe->flags = le16_to_cpu(hwcqe->flags);
2454 cqe->status = hwcqe->status;
2455 cqe->qp_handle = (u64)(unsigned long)qp;
2457 wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
2458 CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
2459 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2463 if (wr_id_idx >= srq->hwq.max_elements) {
2464 dev_err(&cq->hwq.pdev->dev,
2465 "FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2466 wr_id_idx, srq->hwq.max_elements);
2469 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2470 bnxt_qplib_release_srqe(srq, wr_id_idx);
2475 struct bnxt_qplib_swq *swq;
2478 if (wr_id_idx > (rq->max_wqe - 1)) {
2479 dev_err(&cq->hwq.pdev->dev,
2480 "FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n",
2481 wr_id_idx, rq->max_wqe);
2484 if (wr_id_idx != rq->swq_last)
2486 swq = &rq->swq[rq->swq_last];
2487 cqe->wr_id = swq->wr_id;
2490 bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
2491 rq->swq_last = swq->next_idx;
2494 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2495 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2496 /* Add qp to flush list of the CQ */
2497 bnxt_qplib_add_flush_qp(qp);
2505 static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2506 struct cq_res_ud *hwcqe,
2507 struct bnxt_qplib_cqe **pcqe,
2510 struct bnxt_qplib_srq *srq;
2511 struct bnxt_qplib_cqe *cqe;
2512 struct bnxt_qplib_qp *qp;
2513 struct bnxt_qplib_q *rq;
2517 qp = (struct bnxt_qplib_qp *)((unsigned long)
2518 le64_to_cpu(hwcqe->qp_handle));
2520 dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n");
2523 if (qp->rq.flushed) {
2524 dev_dbg(&cq->hwq.pdev->dev,
2525 "%s: QP in Flush QP = %p\n", __func__, qp);
2529 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2530 cqe->length = le16_to_cpu(hwcqe->length) & CQ_RES_UD_LENGTH_MASK;
2531 cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata);
2532 cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
2533 cqe->flags = le16_to_cpu(hwcqe->flags);
2534 cqe->status = hwcqe->status;
2535 cqe->qp_handle = (u64)(unsigned long)qp;
2536 /*FIXME: Endianness fix needed for smace */
2537 memcpy(cqe->smac, hwcqe->src_mac, ETH_ALEN);
2538 wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
2539 & CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
2540 cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
2542 hwcqe->src_qp_high_srq_or_rq_wr_id) &
2543 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
2545 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2550 if (wr_id_idx >= srq->hwq.max_elements) {
2551 dev_err(&cq->hwq.pdev->dev,
2552 "FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2553 wr_id_idx, srq->hwq.max_elements);
2556 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2557 bnxt_qplib_release_srqe(srq, wr_id_idx);
2562 struct bnxt_qplib_swq *swq;
2565 if (wr_id_idx > (rq->max_wqe - 1)) {
2566 dev_err(&cq->hwq.pdev->dev,
2567 "FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n",
2568 wr_id_idx, rq->max_wqe);
2572 if (rq->swq_last != wr_id_idx)
2574 swq = &rq->swq[rq->swq_last];
2575 cqe->wr_id = swq->wr_id;
2578 bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
2579 rq->swq_last = swq->next_idx;
2582 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2583 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2584 /* Add qp to flush list of the CQ */
2585 bnxt_qplib_add_flush_qp(qp);
2592 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2594 struct cq_base *hw_cqe;
2595 u32 sw_cons, raw_cons;
2598 raw_cons = cq->hwq.cons;
2599 sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2600 hw_cqe = bnxt_qplib_get_qe(&cq->hwq, sw_cons, NULL);
2601 /* Check for Valid bit. If the CQE is valid, return false */
2602 rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
2606 static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2607 struct cq_res_raweth_qp1 *hwcqe,
2608 struct bnxt_qplib_cqe **pcqe,
2611 struct bnxt_qplib_qp *qp;
2612 struct bnxt_qplib_q *rq;
2613 struct bnxt_qplib_srq *srq;
2614 struct bnxt_qplib_cqe *cqe;
2618 qp = (struct bnxt_qplib_qp *)((unsigned long)
2619 le64_to_cpu(hwcqe->qp_handle));
2621 dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n");
2624 if (qp->rq.flushed) {
2625 dev_dbg(&cq->hwq.pdev->dev,
2626 "%s: QP in Flush QP = %p\n", __func__, qp);
2630 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2631 cqe->flags = le16_to_cpu(hwcqe->flags);
2632 cqe->qp_handle = (u64)(unsigned long)qp;
2635 le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
2636 & CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
2637 cqe->src_qp = qp->id;
2638 if (qp->id == 1 && !cqe->length) {
2639 /* Add workaround for the length misdetection */
2642 cqe->length = le16_to_cpu(hwcqe->length);
2644 cqe->pkey_index = qp->pkey_index;
2645 memcpy(cqe->smac, qp->smac, 6);
2647 cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
2648 cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
2649 cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
2651 if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
2654 dev_err(&cq->hwq.pdev->dev,
2655 "FP: SRQ used but not defined??\n");
2658 if (wr_id_idx >= srq->hwq.max_elements) {
2659 dev_err(&cq->hwq.pdev->dev,
2660 "FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2661 wr_id_idx, srq->hwq.max_elements);
2664 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2665 bnxt_qplib_release_srqe(srq, wr_id_idx);
2670 struct bnxt_qplib_swq *swq;
2673 if (wr_id_idx > (rq->max_wqe - 1)) {
2674 dev_err(&cq->hwq.pdev->dev,
2675 "FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n",
2676 wr_id_idx, rq->max_wqe);
2679 if (rq->swq_last != wr_id_idx)
2681 swq = &rq->swq[rq->swq_last];
2682 cqe->wr_id = swq->wr_id;
2685 bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
2686 rq->swq_last = swq->next_idx;
2689 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2690 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2691 /* Add qp to flush list of the CQ */
2692 bnxt_qplib_add_flush_qp(qp);
2700 static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
2701 struct cq_terminal *hwcqe,
2702 struct bnxt_qplib_cqe **pcqe,
2705 struct bnxt_qplib_qp *qp;
2706 struct bnxt_qplib_q *sq, *rq;
2707 struct bnxt_qplib_cqe *cqe;
2708 u32 swq_last = 0, cqe_cons;
2711 /* Check the Status */
2712 if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
2713 dev_warn(&cq->hwq.pdev->dev,
2714 "FP: CQ Process Terminal Error status = 0x%x\n",
2717 qp = (struct bnxt_qplib_qp *)((unsigned long)
2718 le64_to_cpu(hwcqe->qp_handle));
2720 dev_err(&cq->hwq.pdev->dev,
2721 "FP: CQ Process terminal qp is NULL\n");
2725 /* Must block new posting of SQ and RQ */
2726 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2731 cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
2732 if (cqe_cons == 0xFFFF)
2734 cqe_cons %= sq->max_wqe;
2736 if (qp->sq.flushed) {
2737 dev_dbg(&cq->hwq.pdev->dev,
2738 "%s: QP in Flush QP = %p\n", __func__, qp);
2742 /* Terminal CQE can also include aggregated successful CQEs prior.
2743 * So we must complete all CQEs from the current sq's cons to the
2744 * cq_cons with status OK
2748 swq_last = sq->swq_last;
2749 if (swq_last == cqe_cons)
2751 if (sq->swq[swq_last].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2752 memset(cqe, 0, sizeof(*cqe));
2753 cqe->status = CQ_REQ_STATUS_OK;
2754 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2755 cqe->qp_handle = (u64)(unsigned long)qp;
2756 cqe->src_qp = qp->id;
2757 cqe->wr_id = sq->swq[swq_last].wr_id;
2758 cqe->type = sq->swq[swq_last].type;
2762 bnxt_qplib_hwq_incr_cons(&sq->hwq, sq->swq[swq_last].slots);
2763 sq->swq_last = sq->swq[swq_last].next_idx;
2766 if (!(*budget) && swq_last != cqe_cons) {
2775 cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
2776 if (cqe_cons == 0xFFFF) {
2778 } else if (cqe_cons > rq->max_wqe - 1) {
2779 dev_err(&cq->hwq.pdev->dev,
2780 "FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
2781 cqe_cons, rq->max_wqe);
2785 if (qp->rq.flushed) {
2786 dev_dbg(&cq->hwq.pdev->dev,
2787 "%s: QP in Flush QP = %p\n", __func__, qp);
2792 /* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
2793 * from the current rq->cons to the rq->prod regardless what the
2794 * rq->cons the terminal CQE indicates
2797 /* Add qp to flush list of the CQ */
2798 bnxt_qplib_add_flush_qp(qp);
2803 static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
2804 struct cq_cutoff *hwcqe)
2806 /* Check the Status */
2807 if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
2808 dev_err(&cq->hwq.pdev->dev,
2809 "FP: CQ Process Cutoff Error status = 0x%x\n",
2813 clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
2814 wake_up_interruptible(&cq->waitq);
2819 int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
2820 struct bnxt_qplib_cqe *cqe,
2823 struct bnxt_qplib_qp *qp = NULL;
2824 u32 budget = num_cqes;
2825 unsigned long flags;
2827 spin_lock_irqsave(&cq->flush_lock, flags);
2828 list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
2829 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp);
2830 __flush_sq(&qp->sq, qp, &cqe, &budget);
2833 list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
2834 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp);
2835 __flush_rq(&qp->rq, qp, &cqe, &budget);
2837 spin_unlock_irqrestore(&cq->flush_lock, flags);
2839 return num_cqes - budget;
2842 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
2843 int num_cqes, struct bnxt_qplib_qp **lib_qp)
2845 struct cq_base *hw_cqe;
2846 u32 sw_cons, raw_cons;
2849 raw_cons = cq->hwq.cons;
2853 sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2854 hw_cqe = bnxt_qplib_get_qe(&cq->hwq, sw_cons, NULL);
2856 /* Check for Valid bit */
2857 if (!CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements))
2861 * The valid test of the entry must be done first before
2862 * reading any further.
2865 /* From the device's respective CQE format to qplib_wc*/
2866 switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
2867 case CQ_BASE_CQE_TYPE_REQ:
2868 rc = bnxt_qplib_cq_process_req(cq,
2869 (struct cq_req *)hw_cqe,
2873 case CQ_BASE_CQE_TYPE_RES_RC:
2874 rc = bnxt_qplib_cq_process_res_rc(cq,
2875 (struct cq_res_rc *)
2879 case CQ_BASE_CQE_TYPE_RES_UD:
2880 rc = bnxt_qplib_cq_process_res_ud
2881 (cq, (struct cq_res_ud *)hw_cqe, &cqe,
2884 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
2885 rc = bnxt_qplib_cq_process_res_raweth_qp1
2886 (cq, (struct cq_res_raweth_qp1 *)
2887 hw_cqe, &cqe, &budget);
2889 case CQ_BASE_CQE_TYPE_TERMINAL:
2890 rc = bnxt_qplib_cq_process_terminal
2891 (cq, (struct cq_terminal *)hw_cqe,
2894 case CQ_BASE_CQE_TYPE_CUT_OFF:
2895 bnxt_qplib_cq_process_cutoff
2896 (cq, (struct cq_cutoff *)hw_cqe);
2897 /* Done processing this CQ */
2900 dev_err(&cq->hwq.pdev->dev,
2901 "process_cq unknown type 0x%lx\n",
2902 hw_cqe->cqe_type_toggle &
2903 CQ_BASE_CQE_TYPE_MASK);
2910 /* Error while processing the CQE, just skip to the
2913 dev_err(&cq->hwq.pdev->dev,
2914 "process_cqe error rc = 0x%x\n", rc);
2918 if (cq->hwq.cons != raw_cons) {
2919 cq->hwq.cons = raw_cons;
2920 bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ);
2923 return num_cqes - budget;
2926 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
2929 bnxt_qplib_ring_db(&cq->dbinfo, arm_type);
2930 /* Using cq->arm_state variable to track whether to issue cq handler */
2931 atomic_set(&cq->arm_state, 1);
2934 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
2936 flush_workqueue(qp->scq->nq->cqn_wq);
2937 if (qp->scq != qp->rcq)
2938 flush_workqueue(qp->rcq->nq->cqn_wq);