2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: Fast Path Operators
39 #define dev_fmt(fmt) "QPLIB: " fmt
41 #include <linux/interrupt.h>
42 #include <linux/spinlock.h>
43 #include <linux/sched.h>
44 #include <linux/slab.h>
45 #include <linux/pci.h>
46 #include <linux/prefetch.h>
47 #include <linux/if_ether.h>
51 #include "qplib_res.h"
52 #include "qplib_rcfw.h"
56 static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq);
57 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
58 static void bnxt_qplib_arm_srq(struct bnxt_qplib_srq *srq, u32 arm_type);
60 static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
62 qp->sq.condition = false;
63 qp->sq.send_phantom = false;
64 qp->sq.single = false;
68 static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
70 struct bnxt_qplib_cq *scq, *rcq;
75 if (!qp->sq.flushed) {
76 dev_dbg(&scq->hwq.pdev->dev,
77 "FP: Adding to SQ Flush list = %p\n", qp);
78 bnxt_qplib_cancel_phantom_processing(qp);
79 list_add_tail(&qp->sq_flush, &scq->sqf_head);
80 qp->sq.flushed = true;
83 if (!qp->rq.flushed) {
84 dev_dbg(&rcq->hwq.pdev->dev,
85 "FP: Adding to RQ Flush list = %p\n", qp);
86 list_add_tail(&qp->rq_flush, &rcq->rqf_head);
87 qp->rq.flushed = true;
92 static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
94 __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
96 spin_lock_irqsave(&qp->scq->flush_lock, *flags);
97 if (qp->scq == qp->rcq)
98 __acquire(&qp->rcq->flush_lock);
100 spin_lock(&qp->rcq->flush_lock);
103 static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
104 unsigned long *flags)
105 __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
107 if (qp->scq == qp->rcq)
108 __release(&qp->rcq->flush_lock);
110 spin_unlock(&qp->rcq->flush_lock);
111 spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
114 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
118 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
119 __bnxt_qplib_add_flush_qp(qp);
120 bnxt_qplib_release_cq_flush_locks(qp, &flags);
123 static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
125 if (qp->sq.flushed) {
126 qp->sq.flushed = false;
127 list_del(&qp->sq_flush);
130 if (qp->rq.flushed) {
131 qp->rq.flushed = false;
132 list_del(&qp->rq_flush);
137 void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
141 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
142 __clean_cq(qp->scq, (u64)(unsigned long)qp);
145 __clean_cq(qp->rcq, (u64)(unsigned long)qp);
149 __bnxt_qplib_del_flush_qp(qp);
150 bnxt_qplib_release_cq_flush_locks(qp, &flags);
153 static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
155 struct bnxt_qplib_nq_work *nq_work =
156 container_of(work, struct bnxt_qplib_nq_work, work);
158 struct bnxt_qplib_cq *cq = nq_work->cq;
159 struct bnxt_qplib_nq *nq = nq_work->nq;
162 spin_lock_bh(&cq->compl_lock);
163 if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
164 dev_dbg(&nq->pdev->dev,
165 "%s:Trigger cq = %p event nq = %p\n",
167 nq->cqn_handler(nq, cq);
169 spin_unlock_bh(&cq->compl_lock);
174 static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
175 struct bnxt_qplib_qp *qp)
177 struct bnxt_qplib_q *rq = &qp->rq;
178 struct bnxt_qplib_q *sq = &qp->sq;
181 dma_free_coherent(&res->pdev->dev,
182 rq->hwq.max_elements * qp->rq_hdr_buf_size,
183 qp->rq_hdr_buf, qp->rq_hdr_buf_map);
185 dma_free_coherent(&res->pdev->dev,
186 sq->hwq.max_elements * qp->sq_hdr_buf_size,
187 qp->sq_hdr_buf, qp->sq_hdr_buf_map);
188 qp->rq_hdr_buf = NULL;
189 qp->sq_hdr_buf = NULL;
190 qp->rq_hdr_buf_map = 0;
191 qp->sq_hdr_buf_map = 0;
192 qp->sq_hdr_buf_size = 0;
193 qp->rq_hdr_buf_size = 0;
196 static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
197 struct bnxt_qplib_qp *qp)
199 struct bnxt_qplib_q *rq = &qp->rq;
200 struct bnxt_qplib_q *sq = &qp->sq;
203 if (qp->sq_hdr_buf_size && sq->hwq.max_elements) {
204 qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
205 sq->hwq.max_elements *
207 &qp->sq_hdr_buf_map, GFP_KERNEL);
208 if (!qp->sq_hdr_buf) {
210 dev_err(&res->pdev->dev,
211 "Failed to create sq_hdr_buf\n");
216 if (qp->rq_hdr_buf_size && rq->hwq.max_elements) {
217 qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
218 rq->hwq.max_elements *
222 if (!qp->rq_hdr_buf) {
224 dev_err(&res->pdev->dev,
225 "Failed to create rq_hdr_buf\n");
232 bnxt_qplib_free_qp_hdr_buf(res, qp);
236 static void bnxt_qplib_service_nq(unsigned long data)
238 struct bnxt_qplib_nq *nq = (struct bnxt_qplib_nq *)data;
239 struct bnxt_qplib_hwq *hwq = &nq->hwq;
240 struct nq_base *nqe, **nq_ptr;
241 struct bnxt_qplib_cq *cq;
242 int num_cqne_processed = 0;
243 int num_srqne_processed = 0;
244 u32 sw_cons, raw_cons;
246 int budget = nq->budget;
248 bool gen_p5 = bnxt_qplib_is_chip_gen_p5(nq->res->cctx);
250 /* Service the NQ until empty */
251 raw_cons = hwq->cons;
253 sw_cons = HWQ_CMP(raw_cons, hwq);
254 nq_ptr = (struct nq_base **)hwq->pbl_ptr;
255 nqe = &nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)];
256 if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
260 * The valid test of the entry must be done first before
261 * reading any further.
265 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
267 case NQ_BASE_TYPE_CQ_NOTIFICATION:
269 struct nq_cn *nqcne = (struct nq_cn *)nqe;
271 q_handle = le32_to_cpu(nqcne->cq_handle_low);
272 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
274 cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
275 bnxt_qplib_arm_cq_enable(cq);
276 spin_lock_bh(&cq->compl_lock);
277 atomic_set(&cq->arm_state, 0);
278 if (!nq->cqn_handler(nq, (cq)))
279 num_cqne_processed++;
281 dev_warn(&nq->pdev->dev,
282 "cqn - type 0x%x not handled\n", type);
283 spin_unlock_bh(&cq->compl_lock);
286 case NQ_BASE_TYPE_SRQ_EVENT:
288 struct nq_srq_event *nqsrqe =
289 (struct nq_srq_event *)nqe;
291 q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
292 q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
294 bnxt_qplib_arm_srq((struct bnxt_qplib_srq *)q_handle,
295 DBC_DBC_TYPE_SRQ_ARMENA);
296 if (!nq->srqn_handler(nq,
297 (struct bnxt_qplib_srq *)q_handle,
299 num_srqne_processed++;
301 dev_warn(&nq->pdev->dev,
302 "SRQ event 0x%x not handled\n",
306 case NQ_BASE_TYPE_DBQ_EVENT:
309 dev_warn(&nq->pdev->dev,
310 "nqe with type = 0x%x not handled\n", type);
315 if (hwq->cons != raw_cons) {
316 hwq->cons = raw_cons;
317 bnxt_qplib_ring_nq_db_rearm(nq->bar_reg_iomem, hwq->cons,
318 hwq->max_elements, nq->ring_id,
323 static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
325 struct bnxt_qplib_nq *nq = dev_instance;
326 struct bnxt_qplib_hwq *hwq = &nq->hwq;
327 struct nq_base **nq_ptr;
330 /* Prefetch the NQ element */
331 sw_cons = HWQ_CMP(hwq->cons, hwq);
332 nq_ptr = (struct nq_base **)nq->hwq.pbl_ptr;
333 prefetch(&nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)]);
335 /* Fan out to CPU affinitized kthreads? */
336 tasklet_schedule(&nq->worker);
341 void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
343 bool gen_p5 = bnxt_qplib_is_chip_gen_p5(nq->res->cctx);
344 tasklet_disable(&nq->worker);
345 /* Mask h/w interrupt */
346 bnxt_qplib_ring_nq_db(nq->bar_reg_iomem, nq->hwq.cons,
347 nq->hwq.max_elements, nq->ring_id, gen_p5);
348 /* Sync with last running IRQ handler */
349 synchronize_irq(nq->vector);
351 tasklet_kill(&nq->worker);
353 irq_set_affinity_hint(nq->vector, NULL);
354 free_irq(nq->vector, nq);
355 nq->requested = false;
359 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
362 destroy_workqueue(nq->cqn_wq);
366 /* Make sure the HW is stopped! */
368 bnxt_qplib_nq_stop_irq(nq, true);
370 if (nq->bar_reg_iomem)
371 iounmap(nq->bar_reg_iomem);
372 nq->bar_reg_iomem = NULL;
374 nq->cqn_handler = NULL;
375 nq->srqn_handler = NULL;
379 int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
380 int msix_vector, bool need_init)
382 bool gen_p5 = bnxt_qplib_is_chip_gen_p5(nq->res->cctx);
388 nq->vector = msix_vector;
390 tasklet_init(&nq->worker, bnxt_qplib_service_nq,
393 tasklet_enable(&nq->worker);
395 snprintf(nq->name, sizeof(nq->name), "bnxt_qplib_nq-%d", nq_indx);
396 rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq);
400 cpumask_clear(&nq->mask);
401 cpumask_set_cpu(nq_indx, &nq->mask);
402 rc = irq_set_affinity_hint(nq->vector, &nq->mask);
404 dev_warn(&nq->pdev->dev,
405 "set affinity failed; vector: %d nq_idx: %d\n",
406 nq->vector, nq_indx);
408 nq->requested = true;
409 bnxt_qplib_ring_nq_db_rearm(nq->bar_reg_iomem, nq->hwq.cons,
410 nq->hwq.max_elements, nq->ring_id, gen_p5);
415 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
416 int nq_idx, int msix_vector, int bar_reg_offset,
417 int (*cqn_handler)(struct bnxt_qplib_nq *nq,
418 struct bnxt_qplib_cq *),
419 int (*srqn_handler)(struct bnxt_qplib_nq *nq,
420 struct bnxt_qplib_srq *,
423 resource_size_t nq_base;
427 nq->cqn_handler = cqn_handler;
430 nq->srqn_handler = srqn_handler;
432 /* Have a task to schedule CQ notifiers in post send case */
433 nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq");
437 nq->bar_reg = NQ_CONS_PCI_BAR_REGION;
438 nq->bar_reg_off = bar_reg_offset;
439 nq_base = pci_resource_start(pdev, nq->bar_reg);
444 /* Unconditionally map 8 bytes to support 57500 series */
445 nq->bar_reg_iomem = ioremap_nocache(nq_base + nq->bar_reg_off, 8);
446 if (!nq->bar_reg_iomem) {
451 rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
453 dev_err(&nq->pdev->dev,
454 "Failed to request irq for nq-idx %d\n", nq_idx);
460 bnxt_qplib_disable_nq(nq);
464 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
466 if (nq->hwq.max_elements) {
467 bnxt_qplib_free_hwq(nq->pdev, &nq->hwq);
468 nq->hwq.max_elements = 0;
472 int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq)
477 if (!nq->hwq.max_elements ||
478 nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
479 nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
480 hwq_type = bnxt_qplib_get_hwq_type(nq->res);
481 if (bnxt_qplib_alloc_init_hwq(nq->pdev, &nq->hwq, NULL, 0,
482 &nq->hwq.max_elements,
483 BNXT_QPLIB_MAX_NQE_ENTRY_SIZE, 0,
484 PAGE_SIZE, hwq_type))
492 static void bnxt_qplib_arm_srq(struct bnxt_qplib_srq *srq, u32 arm_type)
494 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
500 sw_prod = (arm_type == DBC_DBC_TYPE_SRQ_ARM) ?
501 srq->threshold : HWQ_CMP(srq_hwq->prod, srq_hwq);
502 db = (arm_type == DBC_DBC_TYPE_SRQ_ARMENA) ? srq->dbr_base :
504 val = ((srq->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) | arm_type;
506 val |= (sw_prod << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK;
510 int bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
511 struct bnxt_qplib_srq *srq)
513 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
514 struct cmdq_destroy_srq req;
515 struct creq_destroy_srq_resp resp;
519 RCFW_CMD_PREP(req, DESTROY_SRQ, cmd_flags);
521 /* Configure the request */
522 req.srq_cid = cpu_to_le32(srq->id);
524 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
525 (void *)&resp, NULL, 0);
529 bnxt_qplib_free_hwq(res->pdev, &srq->hwq);
534 int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
535 struct bnxt_qplib_srq *srq)
537 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
538 struct cmdq_create_srq req;
539 struct creq_create_srq_resp resp;
540 struct bnxt_qplib_pbl *pbl;
544 srq->hwq.max_elements = srq->max_wqe;
545 rc = bnxt_qplib_alloc_init_hwq(res->pdev, &srq->hwq, srq->sglist,
546 srq->nmap, &srq->hwq.max_elements,
547 BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
548 PAGE_SIZE, HWQ_TYPE_QUEUE);
552 srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
559 RCFW_CMD_PREP(req, CREATE_SRQ, cmd_flags);
561 /* Configure the request */
562 req.dpi = cpu_to_le32(srq->dpi->dpi);
563 req.srq_handle = cpu_to_le64((uintptr_t)srq);
565 req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
566 pbl = &srq->hwq.pbl[PBL_LVL_0];
567 req.pg_size_lvl = cpu_to_le16((((u16)srq->hwq.level &
568 CMDQ_CREATE_SRQ_LVL_MASK) <<
569 CMDQ_CREATE_SRQ_LVL_SFT) |
570 (pbl->pg_size == ROCE_PG_SIZE_4K ?
571 CMDQ_CREATE_SRQ_PG_SIZE_PG_4K :
572 pbl->pg_size == ROCE_PG_SIZE_8K ?
573 CMDQ_CREATE_SRQ_PG_SIZE_PG_8K :
574 pbl->pg_size == ROCE_PG_SIZE_64K ?
575 CMDQ_CREATE_SRQ_PG_SIZE_PG_64K :
576 pbl->pg_size == ROCE_PG_SIZE_2M ?
577 CMDQ_CREATE_SRQ_PG_SIZE_PG_2M :
578 pbl->pg_size == ROCE_PG_SIZE_8M ?
579 CMDQ_CREATE_SRQ_PG_SIZE_PG_8M :
580 pbl->pg_size == ROCE_PG_SIZE_1G ?
581 CMDQ_CREATE_SRQ_PG_SIZE_PG_1G :
582 CMDQ_CREATE_SRQ_PG_SIZE_PG_4K));
583 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
584 req.pd_id = cpu_to_le32(srq->pd->id);
585 req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
587 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
588 (void *)&resp, NULL, 0);
592 spin_lock_init(&srq->lock);
594 srq->last_idx = srq->hwq.max_elements - 1;
595 for (idx = 0; idx < srq->hwq.max_elements; idx++)
596 srq->swq[idx].next_idx = idx + 1;
597 srq->swq[srq->last_idx].next_idx = -1;
599 srq->id = le32_to_cpu(resp.xid);
600 srq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
602 bnxt_qplib_arm_srq(srq, DBC_DBC_TYPE_SRQ_ARMENA);
603 srq->arm_req = false;
607 bnxt_qplib_free_hwq(res->pdev, &srq->hwq);
613 int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
614 struct bnxt_qplib_srq *srq)
616 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
617 u32 sw_prod, sw_cons, count = 0;
619 sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
620 sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
622 count = sw_prod > sw_cons ? sw_prod - sw_cons :
623 srq_hwq->max_elements - sw_cons + sw_prod;
624 if (count > srq->threshold) {
625 srq->arm_req = false;
626 bnxt_qplib_arm_srq(srq, DBC_DBC_TYPE_SRQ_ARM);
628 /* Deferred arming */
635 int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
636 struct bnxt_qplib_srq *srq)
638 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
639 struct cmdq_query_srq req;
640 struct creq_query_srq_resp resp;
641 struct bnxt_qplib_rcfw_sbuf *sbuf;
642 struct creq_query_srq_resp_sb *sb;
646 RCFW_CMD_PREP(req, QUERY_SRQ, cmd_flags);
647 req.srq_cid = cpu_to_le32(srq->id);
649 /* Configure the request */
650 sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
654 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
656 srq->threshold = le16_to_cpu(sb->srq_limit);
657 bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
662 int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
663 struct bnxt_qplib_swqe *wqe)
665 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
666 struct rq_wqe *srqe, **srqe_ptr;
667 struct sq_sge *hw_sge;
668 u32 sw_prod, sw_cons, count = 0;
671 spin_lock(&srq_hwq->lock);
672 if (srq->start_idx == srq->last_idx) {
673 dev_err(&srq_hwq->pdev->dev,
674 "FP: SRQ (0x%x) is full!\n", srq->id);
676 spin_unlock(&srq_hwq->lock);
679 next = srq->start_idx;
680 srq->start_idx = srq->swq[next].next_idx;
681 spin_unlock(&srq_hwq->lock);
683 sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
684 srqe_ptr = (struct rq_wqe **)srq_hwq->pbl_ptr;
685 srqe = &srqe_ptr[RQE_PG(sw_prod)][RQE_IDX(sw_prod)];
686 memset(srqe, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
687 /* Calculate wqe_size16 and data_len */
688 for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
689 i < wqe->num_sge; i++, hw_sge++) {
690 hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
691 hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
692 hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
694 srqe->wqe_type = wqe->type;
695 srqe->flags = wqe->flags;
696 srqe->wqe_size = wqe->num_sge +
697 ((offsetof(typeof(*srqe), data) + 15) >> 4);
698 srqe->wr_id[0] = cpu_to_le32((u32)next);
699 srq->swq[next].wr_id = wqe->wr_id;
703 spin_lock(&srq_hwq->lock);
704 sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
705 /* retaining srq_hwq->cons for this logic
706 * actually the lock is only required to
707 * read srq_hwq->cons.
709 sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
710 count = sw_prod > sw_cons ? sw_prod - sw_cons :
711 srq_hwq->max_elements - sw_cons + sw_prod;
712 spin_unlock(&srq_hwq->lock);
714 bnxt_qplib_arm_srq(srq, DBC_DBC_TYPE_SRQ);
715 if (srq->arm_req == true && count > srq->threshold) {
716 srq->arm_req = false;
717 bnxt_qplib_arm_srq(srq, DBC_DBC_TYPE_SRQ_ARM);
724 int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
726 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
727 struct cmdq_create_qp1 req;
728 struct creq_create_qp1_resp resp;
729 struct bnxt_qplib_pbl *pbl;
730 struct bnxt_qplib_q *sq = &qp->sq;
731 struct bnxt_qplib_q *rq = &qp->rq;
736 RCFW_CMD_PREP(req, CREATE_QP1, cmd_flags);
740 req.dpi = cpu_to_le32(qp->dpi->dpi);
741 req.qp_handle = cpu_to_le64(qp->qp_handle);
744 sq->hwq.max_elements = sq->max_wqe;
745 rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, NULL, 0,
746 &sq->hwq.max_elements,
747 BNXT_QPLIB_MAX_SQE_ENTRY_SIZE, 0,
748 PAGE_SIZE, HWQ_TYPE_QUEUE);
752 sq->swq = kcalloc(sq->hwq.max_elements, sizeof(*sq->swq), GFP_KERNEL);
757 pbl = &sq->hwq.pbl[PBL_LVL_0];
758 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
759 req.sq_pg_size_sq_lvl =
760 ((sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK)
761 << CMDQ_CREATE_QP1_SQ_LVL_SFT) |
762 (pbl->pg_size == ROCE_PG_SIZE_4K ?
763 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K :
764 pbl->pg_size == ROCE_PG_SIZE_8K ?
765 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8K :
766 pbl->pg_size == ROCE_PG_SIZE_64K ?
767 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_64K :
768 pbl->pg_size == ROCE_PG_SIZE_2M ?
769 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_2M :
770 pbl->pg_size == ROCE_PG_SIZE_8M ?
771 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8M :
772 pbl->pg_size == ROCE_PG_SIZE_1G ?
773 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_1G :
774 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K);
777 req.scq_cid = cpu_to_le32(qp->scq->id);
779 qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
783 rq->hwq.max_elements = qp->rq.max_wqe;
784 rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, NULL, 0,
785 &rq->hwq.max_elements,
786 BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
787 PAGE_SIZE, HWQ_TYPE_QUEUE);
791 rq->swq = kcalloc(rq->hwq.max_elements, sizeof(*rq->swq),
797 pbl = &rq->hwq.pbl[PBL_LVL_0];
798 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
799 req.rq_pg_size_rq_lvl =
800 ((rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK) <<
801 CMDQ_CREATE_QP1_RQ_LVL_SFT) |
802 (pbl->pg_size == ROCE_PG_SIZE_4K ?
803 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K :
804 pbl->pg_size == ROCE_PG_SIZE_8K ?
805 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8K :
806 pbl->pg_size == ROCE_PG_SIZE_64K ?
807 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_64K :
808 pbl->pg_size == ROCE_PG_SIZE_2M ?
809 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_2M :
810 pbl->pg_size == ROCE_PG_SIZE_8M ?
811 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8M :
812 pbl->pg_size == ROCE_PG_SIZE_1G ?
813 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_1G :
814 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K);
816 req.rcq_cid = cpu_to_le32(qp->rcq->id);
819 /* Header buffer - allow hdr_buf pass in */
820 rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
825 req.qp_flags = cpu_to_le32(qp_flags);
826 req.sq_size = cpu_to_le32(sq->hwq.max_elements);
827 req.rq_size = cpu_to_le32(rq->hwq.max_elements);
830 cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
831 CMDQ_CREATE_QP1_SQ_SGE_SFT);
833 cpu_to_le16((rq->max_sge & CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
834 CMDQ_CREATE_QP1_RQ_SGE_SFT);
836 req.pd_id = cpu_to_le32(qp->pd->id);
838 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
839 (void *)&resp, NULL, 0);
843 qp->id = le32_to_cpu(resp.xid);
844 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
845 rcfw->qp_tbl[qp->id].qp_id = qp->id;
846 rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
851 bnxt_qplib_free_qp_hdr_buf(res, qp);
853 bnxt_qplib_free_hwq(res->pdev, &rq->hwq);
856 bnxt_qplib_free_hwq(res->pdev, &sq->hwq);
862 int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
864 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
865 unsigned long int psn_search, poff = 0;
866 struct sq_psn_search **psn_search_ptr;
867 struct bnxt_qplib_q *sq = &qp->sq;
868 struct bnxt_qplib_q *rq = &qp->rq;
869 int i, rc, req_size, psn_sz = 0;
870 struct sq_send **hw_sq_send_ptr;
871 struct creq_create_qp_resp resp;
872 struct bnxt_qplib_hwq *xrrq;
873 u16 cmd_flags = 0, max_ssge;
874 struct cmdq_create_qp req;
875 struct bnxt_qplib_pbl *pbl;
879 RCFW_CMD_PREP(req, CREATE_QP, cmd_flags);
883 req.dpi = cpu_to_le32(qp->dpi->dpi);
884 req.qp_handle = cpu_to_le64(qp->qp_handle);
887 if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
888 psn_sz = bnxt_qplib_is_chip_gen_p5(res->cctx) ?
889 sizeof(struct sq_psn_search_ext) :
890 sizeof(struct sq_psn_search);
892 sq->hwq.max_elements = sq->max_wqe;
893 rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, sq->sglist,
894 sq->nmap, &sq->hwq.max_elements,
895 BNXT_QPLIB_MAX_SQE_ENTRY_SIZE,
897 PAGE_SIZE, HWQ_TYPE_QUEUE);
901 sq->swq = kcalloc(sq->hwq.max_elements, sizeof(*sq->swq), GFP_KERNEL);
906 hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
908 psn_search_ptr = (struct sq_psn_search **)
909 &hw_sq_send_ptr[get_sqe_pg
910 (sq->hwq.max_elements)];
911 psn_search = (unsigned long int)
912 &hw_sq_send_ptr[get_sqe_pg(sq->hwq.max_elements)]
913 [get_sqe_idx(sq->hwq.max_elements)];
914 if (psn_search & ~PAGE_MASK) {
915 /* If the psn_search does not start on a page boundary,
916 * then calculate the offset
918 poff = (psn_search & ~PAGE_MASK) /
919 BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE;
921 for (i = 0; i < sq->hwq.max_elements; i++) {
922 sq->swq[i].psn_search =
923 &psn_search_ptr[get_psne_pg(i + poff)]
924 [get_psne_idx(i + poff)];
925 /*psns_ext will be used only for P5 chips. */
927 (struct sq_psn_search_ext *)
928 &psn_search_ptr[get_psne_pg(i + poff)]
929 [get_psne_idx(i + poff)];
932 pbl = &sq->hwq.pbl[PBL_LVL_0];
933 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
934 req.sq_pg_size_sq_lvl =
935 ((sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK)
936 << CMDQ_CREATE_QP_SQ_LVL_SFT) |
937 (pbl->pg_size == ROCE_PG_SIZE_4K ?
938 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K :
939 pbl->pg_size == ROCE_PG_SIZE_8K ?
940 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8K :
941 pbl->pg_size == ROCE_PG_SIZE_64K ?
942 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_64K :
943 pbl->pg_size == ROCE_PG_SIZE_2M ?
944 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_2M :
945 pbl->pg_size == ROCE_PG_SIZE_8M ?
946 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8M :
947 pbl->pg_size == ROCE_PG_SIZE_1G ?
948 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_1G :
949 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K);
952 req.scq_cid = cpu_to_le32(qp->scq->id);
954 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
955 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
957 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
961 rq->hwq.max_elements = rq->max_wqe;
962 rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, rq->sglist,
963 rq->nmap, &rq->hwq.max_elements,
964 BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
965 PAGE_SIZE, HWQ_TYPE_QUEUE);
969 rq->swq = kcalloc(rq->hwq.max_elements, sizeof(*rq->swq),
975 pbl = &rq->hwq.pbl[PBL_LVL_0];
976 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
977 req.rq_pg_size_rq_lvl =
978 ((rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK) <<
979 CMDQ_CREATE_QP_RQ_LVL_SFT) |
980 (pbl->pg_size == ROCE_PG_SIZE_4K ?
981 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K :
982 pbl->pg_size == ROCE_PG_SIZE_8K ?
983 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8K :
984 pbl->pg_size == ROCE_PG_SIZE_64K ?
985 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_64K :
986 pbl->pg_size == ROCE_PG_SIZE_2M ?
987 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_2M :
988 pbl->pg_size == ROCE_PG_SIZE_8M ?
989 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8M :
990 pbl->pg_size == ROCE_PG_SIZE_1G ?
991 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_1G :
992 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K);
996 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
997 req.srq_cid = cpu_to_le32(qp->srq->id);
1002 req.rcq_cid = cpu_to_le32(qp->rcq->id);
1003 req.qp_flags = cpu_to_le32(qp_flags);
1004 req.sq_size = cpu_to_le32(sq->hwq.max_elements);
1005 req.rq_size = cpu_to_le32(rq->hwq.max_elements);
1006 qp->sq_hdr_buf = NULL;
1007 qp->rq_hdr_buf = NULL;
1009 rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
1013 /* CTRL-22434: Irrespective of the requested SGE count on the SQ
1014 * always create the QP with max send sges possible if the requested
1015 * inline size is greater than 0.
1017 max_ssge = qp->max_inline_data ? 6 : sq->max_sge;
1018 req.sq_fwo_sq_sge = cpu_to_le16(
1019 ((max_ssge & CMDQ_CREATE_QP_SQ_SGE_MASK)
1020 << CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
1021 max_rsge = bnxt_qplib_is_chip_gen_p5(res->cctx) ? 6 : rq->max_sge;
1022 req.rq_fwo_rq_sge = cpu_to_le16(
1023 ((max_rsge & CMDQ_CREATE_QP_RQ_SGE_MASK)
1024 << CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
1028 xrrq->max_elements =
1029 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1030 req_size = xrrq->max_elements *
1031 BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1032 req_size &= ~(PAGE_SIZE - 1);
1033 rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL, 0,
1034 &xrrq->max_elements,
1035 BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE,
1036 0, req_size, HWQ_TYPE_CTX);
1039 pbl = &xrrq->pbl[PBL_LVL_0];
1040 req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1043 xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
1044 qp->max_dest_rd_atomic);
1045 req_size = xrrq->max_elements *
1046 BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1047 req_size &= ~(PAGE_SIZE - 1);
1049 rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL, 0,
1050 &xrrq->max_elements,
1051 BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE,
1052 0, req_size, HWQ_TYPE_CTX);
1056 pbl = &xrrq->pbl[PBL_LVL_0];
1057 req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1059 req.pd_id = cpu_to_le32(qp->pd->id);
1061 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1062 (void *)&resp, NULL, 0);
1066 qp->id = le32_to_cpu(resp.xid);
1067 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
1068 qp->cctx = res->cctx;
1069 INIT_LIST_HEAD(&qp->sq_flush);
1070 INIT_LIST_HEAD(&qp->rq_flush);
1071 rcfw->qp_tbl[qp->id].qp_id = qp->id;
1072 rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
1077 if (qp->irrq.max_elements)
1078 bnxt_qplib_free_hwq(res->pdev, &qp->irrq);
1080 if (qp->orrq.max_elements)
1081 bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
1083 bnxt_qplib_free_qp_hdr_buf(res, qp);
1085 bnxt_qplib_free_hwq(res->pdev, &rq->hwq);
1088 bnxt_qplib_free_hwq(res->pdev, &sq->hwq);
1094 static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
1096 switch (qp->state) {
1097 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1098 /* INIT->RTR, configure the path_mtu to the default
1099 * 2048 if not being requested
1101 if (!(qp->modify_flags &
1102 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
1104 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1106 CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1109 ~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1110 /* Bono FW require the max_dest_rd_atomic to be >= 1 */
1111 if (qp->max_dest_rd_atomic < 1)
1112 qp->max_dest_rd_atomic = 1;
1113 qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
1114 /* Bono FW 20.6.5 requires SGID_INDEX configuration */
1115 if (!(qp->modify_flags &
1116 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
1118 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
1119 qp->ah.sgid_index = 0;
1127 static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
1129 switch (qp->state) {
1130 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1131 /* Bono FW requires the max_rd_atomic to be >= 1 */
1132 if (qp->max_rd_atomic < 1)
1133 qp->max_rd_atomic = 1;
1134 /* Bono FW does not allow PKEY_INDEX,
1135 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
1136 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
1137 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
1141 ~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
1142 CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1143 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1144 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1145 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1146 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1147 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1148 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
1149 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
1150 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
1151 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
1152 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
1159 static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
1161 switch (qp->cur_qp_state) {
1162 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1164 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1165 __modify_flags_from_init_state(qp);
1167 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1168 __modify_flags_from_rtr_state(qp);
1170 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1172 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1174 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1176 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1183 int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1185 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1186 struct cmdq_modify_qp req;
1187 struct creq_modify_qp_resp resp;
1188 u16 cmd_flags = 0, pkey;
1193 RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags);
1195 /* Filter out the qp_attr_mask based on the state->new transition */
1196 __filter_modify_flags(qp);
1197 bmask = qp->modify_flags;
1198 req.modify_mask = cpu_to_le32(qp->modify_flags);
1199 req.qp_cid = cpu_to_le32(qp->id);
1200 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
1201 req.network_type_en_sqd_async_notify_new_state =
1202 (qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
1203 (qp->en_sqd_async_notify ?
1204 CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
1206 req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
1208 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
1209 req.access = qp->access;
1211 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY) {
1212 if (!bnxt_qplib_get_pkey(res, &res->pkey_tbl,
1213 qp->pkey_index, &pkey))
1214 req.pkey = cpu_to_le16(pkey);
1216 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
1217 req.qkey = cpu_to_le32(qp->qkey);
1219 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
1220 memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
1221 req.dgid[0] = cpu_to_le32(temp32[0]);
1222 req.dgid[1] = cpu_to_le32(temp32[1]);
1223 req.dgid[2] = cpu_to_le32(temp32[2]);
1224 req.dgid[3] = cpu_to_le32(temp32[3]);
1226 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
1227 req.flow_label = cpu_to_le32(qp->ah.flow_label);
1229 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
1230 req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
1231 [qp->ah.sgid_index]);
1233 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
1234 req.hop_limit = qp->ah.hop_limit;
1236 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
1237 req.traffic_class = qp->ah.traffic_class;
1239 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
1240 memcpy(req.dest_mac, qp->ah.dmac, 6);
1242 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
1243 req.path_mtu = qp->path_mtu;
1245 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
1246 req.timeout = qp->timeout;
1248 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
1249 req.retry_cnt = qp->retry_cnt;
1251 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
1252 req.rnr_retry = qp->rnr_retry;
1254 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
1255 req.min_rnr_timer = qp->min_rnr_timer;
1257 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
1258 req.rq_psn = cpu_to_le32(qp->rq.psn);
1260 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
1261 req.sq_psn = cpu_to_le32(qp->sq.psn);
1263 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
1265 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1267 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
1268 req.max_dest_rd_atomic =
1269 IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
1271 req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
1272 req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
1273 req.sq_sge = cpu_to_le16(qp->sq.max_sge);
1274 req.rq_sge = cpu_to_le16(qp->rq.max_sge);
1275 req.max_inline_data = cpu_to_le32(qp->max_inline_data);
1276 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
1277 req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
1279 req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
1281 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1282 (void *)&resp, NULL, 0);
1285 qp->cur_qp_state = qp->state;
1289 int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1291 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1292 struct cmdq_query_qp req;
1293 struct creq_query_qp_resp resp;
1294 struct bnxt_qplib_rcfw_sbuf *sbuf;
1295 struct creq_query_qp_resp_sb *sb;
1300 RCFW_CMD_PREP(req, QUERY_QP, cmd_flags);
1302 sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
1307 req.qp_cid = cpu_to_le32(qp->id);
1308 req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
1309 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
1313 /* Extract the context from the side buffer */
1314 qp->state = sb->en_sqd_async_notify_state &
1315 CREQ_QUERY_QP_RESP_SB_STATE_MASK;
1316 qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
1317 CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY ?
1319 qp->access = sb->access;
1320 qp->pkey_index = le16_to_cpu(sb->pkey);
1321 qp->qkey = le32_to_cpu(sb->qkey);
1323 temp32[0] = le32_to_cpu(sb->dgid[0]);
1324 temp32[1] = le32_to_cpu(sb->dgid[1]);
1325 temp32[2] = le32_to_cpu(sb->dgid[2]);
1326 temp32[3] = le32_to_cpu(sb->dgid[3]);
1327 memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
1329 qp->ah.flow_label = le32_to_cpu(sb->flow_label);
1331 qp->ah.sgid_index = 0;
1332 for (i = 0; i < res->sgid_tbl.max; i++) {
1333 if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
1334 qp->ah.sgid_index = i;
1338 if (i == res->sgid_tbl.max)
1339 dev_warn(&res->pdev->dev, "SGID not found??\n");
1341 qp->ah.hop_limit = sb->hop_limit;
1342 qp->ah.traffic_class = sb->traffic_class;
1343 memcpy(qp->ah.dmac, sb->dest_mac, 6);
1344 qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1345 CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
1346 CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
1347 qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1348 CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
1349 CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
1350 qp->timeout = sb->timeout;
1351 qp->retry_cnt = sb->retry_cnt;
1352 qp->rnr_retry = sb->rnr_retry;
1353 qp->min_rnr_timer = sb->min_rnr_timer;
1354 qp->rq.psn = le32_to_cpu(sb->rq_psn);
1355 qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
1356 qp->sq.psn = le32_to_cpu(sb->sq_psn);
1357 qp->max_dest_rd_atomic =
1358 IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
1359 qp->sq.max_wqe = qp->sq.hwq.max_elements;
1360 qp->rq.max_wqe = qp->rq.hwq.max_elements;
1361 qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
1362 qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
1363 qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
1364 qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
1365 memcpy(qp->smac, sb->src_mac, 6);
1366 qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
1368 bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
1372 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
1374 struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1375 struct cq_base *hw_cqe, **hw_cqe_ptr;
1378 for (i = 0; i < cq_hwq->max_elements; i++) {
1379 hw_cqe_ptr = (struct cq_base **)cq_hwq->pbl_ptr;
1380 hw_cqe = &hw_cqe_ptr[CQE_PG(i)][CQE_IDX(i)];
1381 if (!CQE_CMP_VALID(hw_cqe, i, cq_hwq->max_elements))
1384 * The valid test of the entry must be done first before
1385 * reading any further.
1388 switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
1389 case CQ_BASE_CQE_TYPE_REQ:
1390 case CQ_BASE_CQE_TYPE_TERMINAL:
1392 struct cq_req *cqe = (struct cq_req *)hw_cqe;
1394 if (qp == le64_to_cpu(cqe->qp_handle))
1398 case CQ_BASE_CQE_TYPE_RES_RC:
1399 case CQ_BASE_CQE_TYPE_RES_UD:
1400 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
1402 struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
1404 if (qp == le64_to_cpu(cqe->qp_handle))
1414 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1415 struct bnxt_qplib_qp *qp)
1417 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1418 struct cmdq_destroy_qp req;
1419 struct creq_destroy_qp_resp resp;
1423 rcfw->qp_tbl[qp->id].qp_id = BNXT_QPLIB_QP_ID_INVALID;
1424 rcfw->qp_tbl[qp->id].qp_handle = NULL;
1426 RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags);
1428 req.qp_cid = cpu_to_le32(qp->id);
1429 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1430 (void *)&resp, NULL, 0);
1432 rcfw->qp_tbl[qp->id].qp_id = qp->id;
1433 rcfw->qp_tbl[qp->id].qp_handle = qp;
1440 void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
1441 struct bnxt_qplib_qp *qp)
1443 bnxt_qplib_free_qp_hdr_buf(res, qp);
1444 bnxt_qplib_free_hwq(res->pdev, &qp->sq.hwq);
1447 bnxt_qplib_free_hwq(res->pdev, &qp->rq.hwq);
1450 if (qp->irrq.max_elements)
1451 bnxt_qplib_free_hwq(res->pdev, &qp->irrq);
1452 if (qp->orrq.max_elements)
1453 bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
1457 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1458 struct bnxt_qplib_sge *sge)
1460 struct bnxt_qplib_q *sq = &qp->sq;
1463 memset(sge, 0, sizeof(*sge));
1465 if (qp->sq_hdr_buf) {
1466 sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1467 sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
1468 sw_prod * qp->sq_hdr_buf_size);
1469 sge->lkey = 0xFFFFFFFF;
1470 sge->size = qp->sq_hdr_buf_size;
1471 return qp->sq_hdr_buf + sw_prod * sge->size;
1476 u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1478 struct bnxt_qplib_q *rq = &qp->rq;
1480 return HWQ_CMP(rq->hwq.prod, &rq->hwq);
1483 dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
1485 return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
1488 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1489 struct bnxt_qplib_sge *sge)
1491 struct bnxt_qplib_q *rq = &qp->rq;
1494 memset(sge, 0, sizeof(*sge));
1496 if (qp->rq_hdr_buf) {
1497 sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1498 sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
1499 sw_prod * qp->rq_hdr_buf_size);
1500 sge->lkey = 0xFFFFFFFF;
1501 sge->size = qp->rq_hdr_buf_size;
1502 return qp->rq_hdr_buf + sw_prod * sge->size;
1507 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1509 struct bnxt_qplib_q *sq = &qp->sq;
1513 val = (((qp->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) |
1516 sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1517 val |= (sw_prod << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK;
1518 /* Flush all the WQE writes to HW */
1519 writeq(val, qp->dpi->dbr);
1522 int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1523 struct bnxt_qplib_swqe *wqe)
1525 struct bnxt_qplib_q *sq = &qp->sq;
1526 struct bnxt_qplib_swq *swq;
1527 struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
1528 struct sq_sge *hw_sge;
1529 struct bnxt_qplib_nq_work *nq_work = NULL;
1530 bool sch_handler = false;
1533 int i, rc = 0, data_len = 0, pkt_num = 0;
1536 if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS) {
1537 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1539 dev_dbg(&sq->hwq.pdev->dev,
1540 "%s Error QP. Scheduling for poll_cq\n",
1546 if (bnxt_qplib_queue_full(sq)) {
1547 dev_err(&sq->hwq.pdev->dev,
1548 "prod = %#x cons = %#x qdepth = %#x delta = %#x\n",
1549 sq->hwq.prod, sq->hwq.cons, sq->hwq.max_elements,
1554 sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1555 swq = &sq->swq[sw_prod];
1556 swq->wr_id = wqe->wr_id;
1557 swq->type = wqe->type;
1558 swq->flags = wqe->flags;
1560 swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1561 swq->start_psn = sq->psn & BTH_PSN_MASK;
1563 hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
1564 hw_sq_send_hdr = &hw_sq_send_ptr[get_sqe_pg(sw_prod)]
1565 [get_sqe_idx(sw_prod)];
1567 memset(hw_sq_send_hdr, 0, BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
1569 if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1570 /* Copy the inline data */
1571 if (wqe->inline_len > BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
1572 dev_warn(&sq->hwq.pdev->dev,
1573 "Inline data length > 96 detected\n");
1574 data_len = BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH;
1576 data_len = wqe->inline_len;
1578 memcpy(hw_sq_send_hdr->data, wqe->inline_data, data_len);
1579 wqe_size16 = (data_len + 15) >> 4;
1581 for (i = 0, hw_sge = (struct sq_sge *)hw_sq_send_hdr->data;
1582 i < wqe->num_sge; i++, hw_sge++) {
1583 hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
1584 hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
1585 hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
1586 data_len += wqe->sg_list[i].size;
1588 /* Each SGE entry = 1 WQE size16 */
1589 wqe_size16 = wqe->num_sge;
1590 /* HW requires wqe size has room for atleast one SGE even if
1591 * none was supplied by ULP
1598 switch (wqe->type) {
1599 case BNXT_QPLIB_SWQE_TYPE_SEND:
1600 if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
1601 /* Assemble info for Raw Ethertype QPs */
1602 struct sq_send_raweth_qp1 *sqe =
1603 (struct sq_send_raweth_qp1 *)hw_sq_send_hdr;
1605 sqe->wqe_type = wqe->type;
1606 sqe->flags = wqe->flags;
1607 sqe->wqe_size = wqe_size16 +
1608 ((offsetof(typeof(*sqe), data) + 15) >> 4);
1609 sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
1610 sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
1611 sqe->length = cpu_to_le32(data_len);
1612 sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
1613 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
1614 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
1619 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
1620 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
1622 struct sq_send *sqe = (struct sq_send *)hw_sq_send_hdr;
1624 sqe->wqe_type = wqe->type;
1625 sqe->flags = wqe->flags;
1626 sqe->wqe_size = wqe_size16 +
1627 ((offsetof(typeof(*sqe), data) + 15) >> 4);
1628 sqe->inv_key_or_imm_data = cpu_to_le32(
1630 if (qp->type == CMDQ_CREATE_QP_TYPE_UD ||
1631 qp->type == CMDQ_CREATE_QP_TYPE_GSI) {
1632 sqe->q_key = cpu_to_le32(wqe->send.q_key);
1633 sqe->dst_qp = cpu_to_le32(
1634 wqe->send.dst_qp & SQ_SEND_DST_QP_MASK);
1635 sqe->length = cpu_to_le32(data_len);
1636 sqe->avid = cpu_to_le32(wqe->send.avid &
1638 sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
1640 sqe->length = cpu_to_le32(data_len);
1644 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1647 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1651 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
1652 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
1653 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
1655 struct sq_rdma *sqe = (struct sq_rdma *)hw_sq_send_hdr;
1657 sqe->wqe_type = wqe->type;
1658 sqe->flags = wqe->flags;
1659 sqe->wqe_size = wqe_size16 +
1660 ((offsetof(typeof(*sqe), data) + 15) >> 4);
1661 sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
1662 sqe->length = cpu_to_le32((u32)data_len);
1663 sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
1664 sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
1666 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1669 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1672 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
1673 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
1675 struct sq_atomic *sqe = (struct sq_atomic *)hw_sq_send_hdr;
1677 sqe->wqe_type = wqe->type;
1678 sqe->flags = wqe->flags;
1679 sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
1680 sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
1681 sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
1682 sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
1684 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1687 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1690 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
1692 struct sq_localinvalidate *sqe =
1693 (struct sq_localinvalidate *)hw_sq_send_hdr;
1695 sqe->wqe_type = wqe->type;
1696 sqe->flags = wqe->flags;
1697 sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
1701 case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
1703 struct sq_fr_pmr *sqe = (struct sq_fr_pmr *)hw_sq_send_hdr;
1705 sqe->wqe_type = wqe->type;
1706 sqe->flags = wqe->flags;
1707 sqe->access_cntl = wqe->frmr.access_cntl |
1708 SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1709 sqe->zero_based_page_size_log =
1710 (wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
1711 SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
1712 (wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
1713 sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
1714 temp32 = cpu_to_le32(wqe->frmr.length);
1715 memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
1716 sqe->numlevels_pbl_page_size_log =
1717 ((wqe->frmr.pbl_pg_sz_log <<
1718 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
1719 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
1720 ((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
1721 SQ_FR_PMR_NUMLEVELS_MASK);
1723 for (i = 0; i < wqe->frmr.page_list_len; i++)
1724 wqe->frmr.pbl_ptr[i] = cpu_to_le64(
1725 wqe->frmr.page_list[i] |
1727 sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
1728 sqe->va = cpu_to_le64(wqe->frmr.va);
1732 case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
1734 struct sq_bind *sqe = (struct sq_bind *)hw_sq_send_hdr;
1736 sqe->wqe_type = wqe->type;
1737 sqe->flags = wqe->flags;
1738 sqe->access_cntl = wqe->bind.access_cntl;
1739 sqe->mw_type_zero_based = wqe->bind.mw_type |
1740 (wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
1741 sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
1742 sqe->l_key = cpu_to_le32(wqe->bind.r_key);
1743 sqe->va = cpu_to_le64(wqe->bind.va);
1744 temp32 = cpu_to_le32(wqe->bind.length);
1745 memcpy(&sqe->length, &temp32, sizeof(wqe->bind.length));
1749 /* Bad wqe, return error */
1753 swq->next_psn = sq->psn & BTH_PSN_MASK;
1754 if (swq->psn_search) {
1758 opcd_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1759 SQ_PSN_SEARCH_START_PSN_MASK);
1760 opcd_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1761 SQ_PSN_SEARCH_OPCODE_MASK);
1762 flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1763 SQ_PSN_SEARCH_NEXT_PSN_MASK);
1764 if (bnxt_qplib_is_chip_gen_p5(qp->cctx)) {
1765 swq->psn_ext->opcode_start_psn =
1766 cpu_to_le32(opcd_spsn);
1767 swq->psn_ext->flags_next_psn =
1768 cpu_to_le32(flg_npsn);
1770 swq->psn_search->opcode_start_psn =
1771 cpu_to_le32(opcd_spsn);
1772 swq->psn_search->flags_next_psn =
1773 cpu_to_le32(flg_npsn);
1778 /* Store the ULP info in the software structures */
1779 sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1780 swq = &sq->swq[sw_prod];
1781 swq->wr_id = wqe->wr_id;
1782 swq->type = wqe->type;
1783 swq->flags = wqe->flags;
1785 swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1786 swq->start_psn = sq->psn & BTH_PSN_MASK;
1793 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
1795 nq_work->cq = qp->scq;
1796 nq_work->nq = qp->scq->nq;
1797 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
1798 queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
1800 dev_err(&sq->hwq.pdev->dev,
1801 "FP: Failed to allocate SQ nq_work!\n");
1808 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
1810 struct bnxt_qplib_q *rq = &qp->rq;
1814 val = (((qp->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) |
1817 sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1818 val |= (sw_prod << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK;
1819 /* Flush the writes to HW Rx WQE before the ringing Rx DB */
1820 writeq(val, qp->dpi->dbr);
1823 int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
1824 struct bnxt_qplib_swqe *wqe)
1826 struct bnxt_qplib_q *rq = &qp->rq;
1827 struct rq_wqe *rqe, **rqe_ptr;
1828 struct sq_sge *hw_sge;
1829 struct bnxt_qplib_nq_work *nq_work = NULL;
1830 bool sch_handler = false;
1834 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1836 dev_dbg(&rq->hwq.pdev->dev,
1837 "%s: Error QP. Scheduling for poll_cq\n", __func__);
1840 if (bnxt_qplib_queue_full(rq)) {
1841 dev_err(&rq->hwq.pdev->dev,
1842 "FP: QP (0x%x) RQ is full!\n", qp->id);
1846 sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1847 rq->swq[sw_prod].wr_id = wqe->wr_id;
1849 rqe_ptr = (struct rq_wqe **)rq->hwq.pbl_ptr;
1850 rqe = &rqe_ptr[RQE_PG(sw_prod)][RQE_IDX(sw_prod)];
1852 memset(rqe, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
1854 /* Calculate wqe_size16 and data_len */
1855 for (i = 0, hw_sge = (struct sq_sge *)rqe->data;
1856 i < wqe->num_sge; i++, hw_sge++) {
1857 hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
1858 hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
1859 hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
1861 rqe->wqe_type = wqe->type;
1862 rqe->flags = wqe->flags;
1863 rqe->wqe_size = wqe->num_sge +
1864 ((offsetof(typeof(*rqe), data) + 15) >> 4);
1865 /* HW requires wqe size has room for atleast one SGE even if none
1866 * was supplied by ULP
1871 /* Supply the rqe->wr_id index to the wr_id_tbl for now */
1872 rqe->wr_id[0] = cpu_to_le32(sw_prod);
1876 /* Store the ULP info in the software structures */
1877 sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1878 rq->swq[sw_prod].wr_id = wqe->wr_id;
1883 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
1885 nq_work->cq = qp->rcq;
1886 nq_work->nq = qp->rcq->nq;
1887 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
1888 queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
1890 dev_err(&rq->hwq.pdev->dev,
1891 "FP: Failed to allocate RQ nq_work!\n");
1901 /* Spinlock must be held */
1902 static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq)
1906 val = ((cq->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) |
1907 DBC_DBC_TYPE_CQ_ARMENA;
1909 /* Flush memory writes before enabling the CQ */
1910 writeq(val, cq->dbr_base);
1913 static void bnxt_qplib_arm_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
1915 struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1920 val = ((cq->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) | arm_type;
1922 sw_cons = HWQ_CMP(cq_hwq->cons, cq_hwq);
1923 val |= (sw_cons << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK;
1924 /* flush memory writes before arming the CQ */
1925 writeq(val, cq->dpi->dbr);
1928 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
1930 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1931 struct cmdq_create_cq req;
1932 struct creq_create_cq_resp resp;
1933 struct bnxt_qplib_pbl *pbl;
1937 cq->hwq.max_elements = cq->max_wqe;
1938 rc = bnxt_qplib_alloc_init_hwq(res->pdev, &cq->hwq, cq->sghead,
1939 cq->nmap, &cq->hwq.max_elements,
1940 BNXT_QPLIB_MAX_CQE_ENTRY_SIZE, 0,
1941 PAGE_SIZE, HWQ_TYPE_QUEUE);
1945 RCFW_CMD_PREP(req, CREATE_CQ, cmd_flags);
1948 dev_err(&rcfw->pdev->dev,
1949 "FP: CREATE_CQ failed due to NULL DPI\n");
1952 req.dpi = cpu_to_le32(cq->dpi->dpi);
1953 req.cq_handle = cpu_to_le64(cq->cq_handle);
1955 req.cq_size = cpu_to_le32(cq->hwq.max_elements);
1956 pbl = &cq->hwq.pbl[PBL_LVL_0];
1957 req.pg_size_lvl = cpu_to_le32(
1958 ((cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK) <<
1959 CMDQ_CREATE_CQ_LVL_SFT) |
1960 (pbl->pg_size == ROCE_PG_SIZE_4K ? CMDQ_CREATE_CQ_PG_SIZE_PG_4K :
1961 pbl->pg_size == ROCE_PG_SIZE_8K ? CMDQ_CREATE_CQ_PG_SIZE_PG_8K :
1962 pbl->pg_size == ROCE_PG_SIZE_64K ? CMDQ_CREATE_CQ_PG_SIZE_PG_64K :
1963 pbl->pg_size == ROCE_PG_SIZE_2M ? CMDQ_CREATE_CQ_PG_SIZE_PG_2M :
1964 pbl->pg_size == ROCE_PG_SIZE_8M ? CMDQ_CREATE_CQ_PG_SIZE_PG_8M :
1965 pbl->pg_size == ROCE_PG_SIZE_1G ? CMDQ_CREATE_CQ_PG_SIZE_PG_1G :
1966 CMDQ_CREATE_CQ_PG_SIZE_PG_4K));
1968 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1970 req.cq_fco_cnq_id = cpu_to_le32(
1971 (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
1972 CMDQ_CREATE_CQ_CNQ_ID_SFT);
1974 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1975 (void *)&resp, NULL, 0);
1979 cq->id = le32_to_cpu(resp.xid);
1980 cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
1981 cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
1982 init_waitqueue_head(&cq->waitq);
1983 INIT_LIST_HEAD(&cq->sqf_head);
1984 INIT_LIST_HEAD(&cq->rqf_head);
1985 spin_lock_init(&cq->compl_lock);
1986 spin_lock_init(&cq->flush_lock);
1988 bnxt_qplib_arm_cq_enable(cq);
1992 bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
1997 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
1999 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2000 struct cmdq_destroy_cq req;
2001 struct creq_destroy_cq_resp resp;
2005 RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags);
2007 req.cq_cid = cpu_to_le32(cq->id);
2008 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
2009 (void *)&resp, NULL, 0);
2012 bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
2016 static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
2017 struct bnxt_qplib_cqe **pcqe, int *budget)
2019 u32 sw_prod, sw_cons;
2020 struct bnxt_qplib_cqe *cqe;
2023 /* Now complete all outstanding SQEs with FLUSHED_ERR */
2024 sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
2027 sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
2028 if (sw_cons == sw_prod) {
2031 /* Skip the FENCE WQE completions */
2032 if (sq->swq[sw_cons].wr_id == BNXT_QPLIB_FENCE_WRID) {
2033 bnxt_qplib_cancel_phantom_processing(qp);
2036 memset(cqe, 0, sizeof(*cqe));
2037 cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
2038 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2039 cqe->qp_handle = (u64)(unsigned long)qp;
2040 cqe->wr_id = sq->swq[sw_cons].wr_id;
2041 cqe->src_qp = qp->id;
2042 cqe->type = sq->swq[sw_cons].type;
2049 if (!(*budget) && HWQ_CMP(sq->hwq.cons, &sq->hwq) != sw_prod)
2056 static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
2057 struct bnxt_qplib_cqe **pcqe, int *budget)
2059 struct bnxt_qplib_cqe *cqe;
2060 u32 sw_prod, sw_cons;
2065 case CMDQ_CREATE_QP1_TYPE_GSI:
2066 opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
2068 case CMDQ_CREATE_QP_TYPE_RC:
2069 opcode = CQ_BASE_CQE_TYPE_RES_RC;
2071 case CMDQ_CREATE_QP_TYPE_UD:
2072 case CMDQ_CREATE_QP_TYPE_GSI:
2073 opcode = CQ_BASE_CQE_TYPE_RES_UD;
2077 /* Flush the rest of the RQ */
2078 sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
2081 sw_cons = HWQ_CMP(rq->hwq.cons, &rq->hwq);
2082 if (sw_cons == sw_prod)
2084 memset(cqe, 0, sizeof(*cqe));
2086 CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
2087 cqe->opcode = opcode;
2088 cqe->qp_handle = (unsigned long)qp;
2089 cqe->wr_id = rq->swq[sw_cons].wr_id;
2095 if (!*budget && HWQ_CMP(rq->hwq.cons, &rq->hwq) != sw_prod)
2102 void bnxt_qplib_mark_qp_error(void *qp_handle)
2104 struct bnxt_qplib_qp *qp = qp_handle;
2109 /* Must block new posting of SQ and RQ */
2110 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2111 bnxt_qplib_cancel_phantom_processing(qp);
2114 /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
2115 * CQE is track from sw_cq_cons to max_element but valid only if VALID=1
2117 static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
2118 u32 cq_cons, u32 sw_sq_cons, u32 cqe_sq_cons)
2120 struct bnxt_qplib_q *sq = &qp->sq;
2121 struct bnxt_qplib_swq *swq;
2122 u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
2123 struct cq_base *peek_hwcqe, **peek_hw_cqe_ptr;
2124 struct cq_req *peek_req_hwcqe;
2125 struct bnxt_qplib_qp *peek_qp;
2126 struct bnxt_qplib_q *peek_sq;
2130 /* Check for the psn_search marking before completing */
2131 swq = &sq->swq[sw_sq_cons];
2132 if (swq->psn_search &&
2133 le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
2135 swq->psn_search->flags_next_psn = cpu_to_le32
2136 (le32_to_cpu(swq->psn_search->flags_next_psn)
2138 dev_dbg(&cq->hwq.pdev->dev,
2139 "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
2140 cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
2141 sq->condition = true;
2142 sq->send_phantom = true;
2144 /* TODO: Only ARM if the previous SQE is ARMALL */
2145 bnxt_qplib_arm_cq(cq, DBC_DBC_TYPE_CQ_ARMALL);
2150 if (sq->condition) {
2151 /* Peek at the completions */
2152 peek_raw_cq_cons = cq->hwq.cons;
2153 peek_sw_cq_cons = cq_cons;
2154 i = cq->hwq.max_elements;
2156 peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
2157 peek_hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
2158 peek_hwcqe = &peek_hw_cqe_ptr[CQE_PG(peek_sw_cq_cons)]
2159 [CQE_IDX(peek_sw_cq_cons)];
2160 /* If the next hwcqe is VALID */
2161 if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
2162 cq->hwq.max_elements)) {
2164 * The valid test of the entry must be done first before
2165 * reading any further.
2168 /* If the next hwcqe is a REQ */
2169 if ((peek_hwcqe->cqe_type_toggle &
2170 CQ_BASE_CQE_TYPE_MASK) ==
2171 CQ_BASE_CQE_TYPE_REQ) {
2172 peek_req_hwcqe = (struct cq_req *)
2174 peek_qp = (struct bnxt_qplib_qp *)
2177 (peek_req_hwcqe->qp_handle));
2178 peek_sq = &peek_qp->sq;
2179 peek_sq_cons_idx = HWQ_CMP(le16_to_cpu(
2180 peek_req_hwcqe->sq_cons_idx) - 1
2182 /* If the hwcqe's sq's wr_id matches */
2183 if (peek_sq == sq &&
2184 sq->swq[peek_sq_cons_idx].wr_id ==
2185 BNXT_QPLIB_FENCE_WRID) {
2187 * Unbreak only if the phantom
2190 dev_dbg(&cq->hwq.pdev->dev,
2191 "FP: Got Phantom CQE\n");
2192 sq->condition = false;
2198 /* Valid but not the phantom, so keep looping */
2200 /* Not valid yet, just exit and wait */
2207 dev_err(&cq->hwq.pdev->dev,
2208 "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
2209 cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
2216 static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
2217 struct cq_req *hwcqe,
2218 struct bnxt_qplib_cqe **pcqe, int *budget,
2219 u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
2221 struct bnxt_qplib_qp *qp;
2222 struct bnxt_qplib_q *sq;
2223 struct bnxt_qplib_cqe *cqe;
2224 u32 sw_sq_cons, cqe_sq_cons;
2225 struct bnxt_qplib_swq *swq;
2228 qp = (struct bnxt_qplib_qp *)((unsigned long)
2229 le64_to_cpu(hwcqe->qp_handle));
2231 dev_err(&cq->hwq.pdev->dev,
2232 "FP: Process Req qp is NULL\n");
2237 cqe_sq_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq);
2238 if (cqe_sq_cons > sq->hwq.max_elements) {
2239 dev_err(&cq->hwq.pdev->dev,
2240 "FP: CQ Process req reported sq_cons_idx 0x%x which exceeded max 0x%x\n",
2241 cqe_sq_cons, sq->hwq.max_elements);
2245 if (qp->sq.flushed) {
2246 dev_dbg(&cq->hwq.pdev->dev,
2247 "%s: QP in Flush QP = %p\n", __func__, qp);
2250 /* Require to walk the sq's swq to fabricate CQEs for all previously
2251 * signaled SWQEs due to CQE aggregation from the current sq cons
2252 * to the cqe_sq_cons
2256 sw_sq_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
2257 if (sw_sq_cons == cqe_sq_cons)
2261 swq = &sq->swq[sw_sq_cons];
2262 memset(cqe, 0, sizeof(*cqe));
2263 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2264 cqe->qp_handle = (u64)(unsigned long)qp;
2265 cqe->src_qp = qp->id;
2266 cqe->wr_id = swq->wr_id;
2267 if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
2269 cqe->type = swq->type;
2271 /* For the last CQE, check for status. For errors, regardless
2272 * of the request being signaled or not, it must complete with
2273 * the hwcqe error status
2275 if (HWQ_CMP((sw_sq_cons + 1), &sq->hwq) == cqe_sq_cons &&
2276 hwcqe->status != CQ_REQ_STATUS_OK) {
2277 cqe->status = hwcqe->status;
2278 dev_err(&cq->hwq.pdev->dev,
2279 "FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n",
2280 sw_sq_cons, cqe->wr_id, cqe->status);
2283 bnxt_qplib_mark_qp_error(qp);
2284 /* Add qp to flush list of the CQ */
2285 bnxt_qplib_add_flush_qp(qp);
2287 if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2288 /* Before we complete, do WA 9060 */
2289 if (do_wa9060(qp, cq, cq_cons, sw_sq_cons,
2294 cqe->status = CQ_REQ_STATUS_OK;
2306 if (HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_sq_cons) {
2312 * Back to normal completion mode only after it has completed all of
2313 * the WC for this CQE
2320 static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
2322 spin_lock(&srq->hwq.lock);
2323 srq->swq[srq->last_idx].next_idx = (int)tag;
2324 srq->last_idx = (int)tag;
2325 srq->swq[srq->last_idx].next_idx = -1;
2326 srq->hwq.cons++; /* Support for SRQE counter */
2327 spin_unlock(&srq->hwq.lock);
2330 static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2331 struct cq_res_rc *hwcqe,
2332 struct bnxt_qplib_cqe **pcqe,
2335 struct bnxt_qplib_qp *qp;
2336 struct bnxt_qplib_q *rq;
2337 struct bnxt_qplib_srq *srq;
2338 struct bnxt_qplib_cqe *cqe;
2342 qp = (struct bnxt_qplib_qp *)((unsigned long)
2343 le64_to_cpu(hwcqe->qp_handle));
2345 dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n");
2348 if (qp->rq.flushed) {
2349 dev_dbg(&cq->hwq.pdev->dev,
2350 "%s: QP in Flush QP = %p\n", __func__, qp);
2355 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2356 cqe->length = le32_to_cpu(hwcqe->length);
2357 cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
2358 cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
2359 cqe->flags = le16_to_cpu(hwcqe->flags);
2360 cqe->status = hwcqe->status;
2361 cqe->qp_handle = (u64)(unsigned long)qp;
2363 wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
2364 CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
2365 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2369 if (wr_id_idx >= srq->hwq.max_elements) {
2370 dev_err(&cq->hwq.pdev->dev,
2371 "FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2372 wr_id_idx, srq->hwq.max_elements);
2375 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2376 bnxt_qplib_release_srqe(srq, wr_id_idx);
2382 if (wr_id_idx >= rq->hwq.max_elements) {
2383 dev_err(&cq->hwq.pdev->dev,
2384 "FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n",
2385 wr_id_idx, rq->hwq.max_elements);
2388 cqe->wr_id = rq->swq[wr_id_idx].wr_id;
2394 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2395 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2396 /* Add qp to flush list of the CQ */
2397 bnxt_qplib_add_flush_qp(qp);
2405 static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2406 struct cq_res_ud *hwcqe,
2407 struct bnxt_qplib_cqe **pcqe,
2410 struct bnxt_qplib_qp *qp;
2411 struct bnxt_qplib_q *rq;
2412 struct bnxt_qplib_srq *srq;
2413 struct bnxt_qplib_cqe *cqe;
2417 qp = (struct bnxt_qplib_qp *)((unsigned long)
2418 le64_to_cpu(hwcqe->qp_handle));
2420 dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n");
2423 if (qp->rq.flushed) {
2424 dev_dbg(&cq->hwq.pdev->dev,
2425 "%s: QP in Flush QP = %p\n", __func__, qp);
2429 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2430 cqe->length = (u32)le16_to_cpu(hwcqe->length);
2431 cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata);
2432 cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
2433 cqe->flags = le16_to_cpu(hwcqe->flags);
2434 cqe->status = hwcqe->status;
2435 cqe->qp_handle = (u64)(unsigned long)qp;
2436 /*FIXME: Endianness fix needed for smace */
2437 memcpy(cqe->smac, hwcqe->src_mac, ETH_ALEN);
2438 wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
2439 & CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
2440 cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
2442 hwcqe->src_qp_high_srq_or_rq_wr_id) &
2443 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
2445 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2450 if (wr_id_idx >= srq->hwq.max_elements) {
2451 dev_err(&cq->hwq.pdev->dev,
2452 "FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2453 wr_id_idx, srq->hwq.max_elements);
2456 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2457 bnxt_qplib_release_srqe(srq, wr_id_idx);
2463 if (wr_id_idx >= rq->hwq.max_elements) {
2464 dev_err(&cq->hwq.pdev->dev,
2465 "FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n",
2466 wr_id_idx, rq->hwq.max_elements);
2470 cqe->wr_id = rq->swq[wr_id_idx].wr_id;
2476 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2477 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2478 /* Add qp to flush list of the CQ */
2479 bnxt_qplib_add_flush_qp(qp);
2486 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2488 struct cq_base *hw_cqe, **hw_cqe_ptr;
2489 u32 sw_cons, raw_cons;
2492 raw_cons = cq->hwq.cons;
2493 sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2494 hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
2495 hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
2497 /* Check for Valid bit. If the CQE is valid, return false */
2498 rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
2502 static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2503 struct cq_res_raweth_qp1 *hwcqe,
2504 struct bnxt_qplib_cqe **pcqe,
2507 struct bnxt_qplib_qp *qp;
2508 struct bnxt_qplib_q *rq;
2509 struct bnxt_qplib_srq *srq;
2510 struct bnxt_qplib_cqe *cqe;
2514 qp = (struct bnxt_qplib_qp *)((unsigned long)
2515 le64_to_cpu(hwcqe->qp_handle));
2517 dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n");
2520 if (qp->rq.flushed) {
2521 dev_dbg(&cq->hwq.pdev->dev,
2522 "%s: QP in Flush QP = %p\n", __func__, qp);
2526 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2527 cqe->flags = le16_to_cpu(hwcqe->flags);
2528 cqe->qp_handle = (u64)(unsigned long)qp;
2531 le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
2532 & CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
2533 cqe->src_qp = qp->id;
2534 if (qp->id == 1 && !cqe->length) {
2535 /* Add workaround for the length misdetection */
2538 cqe->length = le16_to_cpu(hwcqe->length);
2540 cqe->pkey_index = qp->pkey_index;
2541 memcpy(cqe->smac, qp->smac, 6);
2543 cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
2544 cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
2545 cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
2547 if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
2550 dev_err(&cq->hwq.pdev->dev,
2551 "FP: SRQ used but not defined??\n");
2554 if (wr_id_idx >= srq->hwq.max_elements) {
2555 dev_err(&cq->hwq.pdev->dev,
2556 "FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2557 wr_id_idx, srq->hwq.max_elements);
2560 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2561 bnxt_qplib_release_srqe(srq, wr_id_idx);
2567 if (wr_id_idx >= rq->hwq.max_elements) {
2568 dev_err(&cq->hwq.pdev->dev,
2569 "FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n",
2570 wr_id_idx, rq->hwq.max_elements);
2573 cqe->wr_id = rq->swq[wr_id_idx].wr_id;
2579 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2580 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2581 /* Add qp to flush list of the CQ */
2582 bnxt_qplib_add_flush_qp(qp);
2590 static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
2591 struct cq_terminal *hwcqe,
2592 struct bnxt_qplib_cqe **pcqe,
2595 struct bnxt_qplib_qp *qp;
2596 struct bnxt_qplib_q *sq, *rq;
2597 struct bnxt_qplib_cqe *cqe;
2598 u32 sw_cons = 0, cqe_cons;
2601 /* Check the Status */
2602 if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
2603 dev_warn(&cq->hwq.pdev->dev,
2604 "FP: CQ Process Terminal Error status = 0x%x\n",
2607 qp = (struct bnxt_qplib_qp *)((unsigned long)
2608 le64_to_cpu(hwcqe->qp_handle));
2610 dev_err(&cq->hwq.pdev->dev,
2611 "FP: CQ Process terminal qp is NULL\n");
2615 /* Must block new posting of SQ and RQ */
2616 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2621 cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
2622 if (cqe_cons == 0xFFFF)
2625 if (cqe_cons > sq->hwq.max_elements) {
2626 dev_err(&cq->hwq.pdev->dev,
2627 "FP: CQ Process terminal reported sq_cons_idx 0x%x which exceeded max 0x%x\n",
2628 cqe_cons, sq->hwq.max_elements);
2632 if (qp->sq.flushed) {
2633 dev_dbg(&cq->hwq.pdev->dev,
2634 "%s: QP in Flush QP = %p\n", __func__, qp);
2638 /* Terminal CQE can also include aggregated successful CQEs prior.
2639 * So we must complete all CQEs from the current sq's cons to the
2640 * cq_cons with status OK
2644 sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
2645 if (sw_cons == cqe_cons)
2647 if (sq->swq[sw_cons].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2648 memset(cqe, 0, sizeof(*cqe));
2649 cqe->status = CQ_REQ_STATUS_OK;
2650 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2651 cqe->qp_handle = (u64)(unsigned long)qp;
2652 cqe->src_qp = qp->id;
2653 cqe->wr_id = sq->swq[sw_cons].wr_id;
2654 cqe->type = sq->swq[sw_cons].type;
2661 if (!(*budget) && sw_cons != cqe_cons) {
2670 cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
2671 if (cqe_cons == 0xFFFF) {
2673 } else if (cqe_cons > rq->hwq.max_elements) {
2674 dev_err(&cq->hwq.pdev->dev,
2675 "FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
2676 cqe_cons, rq->hwq.max_elements);
2680 if (qp->rq.flushed) {
2681 dev_dbg(&cq->hwq.pdev->dev,
2682 "%s: QP in Flush QP = %p\n", __func__, qp);
2687 /* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
2688 * from the current rq->cons to the rq->prod regardless what the
2689 * rq->cons the terminal CQE indicates
2692 /* Add qp to flush list of the CQ */
2693 bnxt_qplib_add_flush_qp(qp);
2698 static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
2699 struct cq_cutoff *hwcqe)
2701 /* Check the Status */
2702 if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
2703 dev_err(&cq->hwq.pdev->dev,
2704 "FP: CQ Process Cutoff Error status = 0x%x\n",
2708 clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
2709 wake_up_interruptible(&cq->waitq);
2714 int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
2715 struct bnxt_qplib_cqe *cqe,
2718 struct bnxt_qplib_qp *qp = NULL;
2719 u32 budget = num_cqes;
2720 unsigned long flags;
2722 spin_lock_irqsave(&cq->flush_lock, flags);
2723 list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
2724 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp);
2725 __flush_sq(&qp->sq, qp, &cqe, &budget);
2728 list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
2729 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp);
2730 __flush_rq(&qp->rq, qp, &cqe, &budget);
2732 spin_unlock_irqrestore(&cq->flush_lock, flags);
2734 return num_cqes - budget;
2737 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
2738 int num_cqes, struct bnxt_qplib_qp **lib_qp)
2740 struct cq_base *hw_cqe, **hw_cqe_ptr;
2741 u32 sw_cons, raw_cons;
2744 raw_cons = cq->hwq.cons;
2748 sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2749 hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
2750 hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
2752 /* Check for Valid bit */
2753 if (!CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements))
2757 * The valid test of the entry must be done first before
2758 * reading any further.
2761 /* From the device's respective CQE format to qplib_wc*/
2762 switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
2763 case CQ_BASE_CQE_TYPE_REQ:
2764 rc = bnxt_qplib_cq_process_req(cq,
2765 (struct cq_req *)hw_cqe,
2769 case CQ_BASE_CQE_TYPE_RES_RC:
2770 rc = bnxt_qplib_cq_process_res_rc(cq,
2771 (struct cq_res_rc *)
2775 case CQ_BASE_CQE_TYPE_RES_UD:
2776 rc = bnxt_qplib_cq_process_res_ud
2777 (cq, (struct cq_res_ud *)hw_cqe, &cqe,
2780 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
2781 rc = bnxt_qplib_cq_process_res_raweth_qp1
2782 (cq, (struct cq_res_raweth_qp1 *)
2783 hw_cqe, &cqe, &budget);
2785 case CQ_BASE_CQE_TYPE_TERMINAL:
2786 rc = bnxt_qplib_cq_process_terminal
2787 (cq, (struct cq_terminal *)hw_cqe,
2790 case CQ_BASE_CQE_TYPE_CUT_OFF:
2791 bnxt_qplib_cq_process_cutoff
2792 (cq, (struct cq_cutoff *)hw_cqe);
2793 /* Done processing this CQ */
2796 dev_err(&cq->hwq.pdev->dev,
2797 "process_cq unknown type 0x%lx\n",
2798 hw_cqe->cqe_type_toggle &
2799 CQ_BASE_CQE_TYPE_MASK);
2806 /* Error while processing the CQE, just skip to the
2809 dev_err(&cq->hwq.pdev->dev,
2810 "process_cqe error rc = 0x%x\n", rc);
2814 if (cq->hwq.cons != raw_cons) {
2815 cq->hwq.cons = raw_cons;
2816 bnxt_qplib_arm_cq(cq, DBC_DBC_TYPE_CQ);
2819 return num_cqes - budget;
2822 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
2825 bnxt_qplib_arm_cq(cq, arm_type);
2826 /* Using cq->arm_state variable to track whether to issue cq handler */
2827 atomic_set(&cq->arm_state, 1);
2830 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
2832 flush_workqueue(qp->scq->nq->cqn_wq);
2833 if (qp->scq != qp->rcq)
2834 flush_workqueue(qp->rcq->nq->cqn_wq);