2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: QPLib resource manager
39 #define dev_fmt(fmt) "QPLIB: " fmt
41 #include <linux/spinlock.h>
42 #include <linux/pci.h>
43 #include <linux/interrupt.h>
44 #include <linux/inetdevice.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/if_vlan.h>
47 #include <linux/vmalloc.h>
48 #include <rdma/ib_verbs.h>
49 #include <rdma/ib_umem.h>
52 #include "qplib_res.h"
54 #include "qplib_rcfw.h"
56 static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
57 struct bnxt_qplib_stats *stats);
58 static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
59 struct bnxt_qplib_stats *stats);
62 static void __free_pbl(struct bnxt_qplib_res *res, struct bnxt_qplib_pbl *pbl,
65 struct pci_dev *pdev = res->pdev;
69 for (i = 0; i < pbl->pg_count; i++) {
71 dma_free_coherent(&pdev->dev, pbl->pg_size,
72 (void *)((unsigned long)
78 "PBL free pg_arr[%d] empty?!\n", i);
79 pbl->pg_arr[i] = NULL;
84 vfree(pbl->pg_map_arr);
85 pbl->pg_map_arr = NULL;
90 static void bnxt_qplib_fill_user_dma_pages(struct bnxt_qplib_pbl *pbl,
91 struct bnxt_qplib_sg_info *sginfo)
93 struct ib_block_iter biter;
96 rdma_umem_for_each_dma_block(sginfo->umem, &biter, sginfo->pgsize) {
97 pbl->pg_map_arr[i] = rdma_block_iter_dma_address(&biter);
98 pbl->pg_arr[i] = NULL;
104 static int __alloc_pbl(struct bnxt_qplib_res *res,
105 struct bnxt_qplib_pbl *pbl,
106 struct bnxt_qplib_sg_info *sginfo)
108 struct pci_dev *pdev = res->pdev;
109 bool is_umem = false;
116 pages = ib_umem_num_dma_blocks(sginfo->umem, sginfo->pgsize);
118 pages = sginfo->npages;
119 /* page ptr arrays */
120 pbl->pg_arr = vmalloc(pages * sizeof(void *));
124 pbl->pg_map_arr = vmalloc(pages * sizeof(dma_addr_t));
125 if (!pbl->pg_map_arr) {
131 pbl->pg_size = sginfo->pgsize;
134 for (i = 0; i < pages; i++) {
135 pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
145 bnxt_qplib_fill_user_dma_pages(pbl, sginfo);
150 __free_pbl(res, pbl, is_umem);
155 void bnxt_qplib_free_hwq(struct bnxt_qplib_res *res,
156 struct bnxt_qplib_hwq *hwq)
160 if (!hwq->max_elements)
162 if (hwq->level >= PBL_LVL_MAX)
165 for (i = 0; i < hwq->level + 1; i++) {
167 __free_pbl(res, &hwq->pbl[i], hwq->is_user);
169 __free_pbl(res, &hwq->pbl[i], false);
172 hwq->level = PBL_LVL_MAX;
173 hwq->max_elements = 0;
174 hwq->element_size = 0;
180 /* All HWQs are power of 2 in size */
182 int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
183 struct bnxt_qplib_hwq_attr *hwq_attr)
185 u32 npages, aux_slots, pg_size, aux_pages = 0, aux_size = 0;
186 struct bnxt_qplib_sg_info sginfo = {};
187 u32 depth, stride, npbl, npde;
188 dma_addr_t *src_phys_ptr, **dst_virt_ptr;
189 struct bnxt_qplib_res *res;
190 struct pci_dev *pdev;
195 pg_size = hwq_attr->sginfo->pgsize;
196 hwq->level = PBL_LVL_MAX;
198 depth = roundup_pow_of_two(hwq_attr->depth);
199 stride = roundup_pow_of_two(hwq_attr->stride);
200 if (hwq_attr->aux_depth) {
201 aux_slots = hwq_attr->aux_depth;
202 aux_size = roundup_pow_of_two(hwq_attr->aux_stride);
203 aux_pages = (aux_slots * aux_size) / pg_size;
204 if ((aux_slots * aux_size) % pg_size)
208 if (!hwq_attr->sginfo->umem) {
209 hwq->is_user = false;
210 npages = (depth * stride) / pg_size + aux_pages;
211 if ((depth * stride) % pg_size)
215 hwq_attr->sginfo->npages = npages;
217 unsigned long sginfo_num_pages = ib_umem_num_dma_blocks(
218 hwq_attr->sginfo->umem, hwq_attr->sginfo->pgsize);
221 npages = sginfo_num_pages;
222 npages = (npages * PAGE_SIZE) /
223 BIT_ULL(hwq_attr->sginfo->pgshft);
224 if ((sginfo_num_pages * PAGE_SIZE) %
225 BIT_ULL(hwq_attr->sginfo->pgshft))
230 if (npages == MAX_PBL_LVL_0_PGS) {
231 /* This request is Level 0, map PTE */
232 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], hwq_attr->sginfo);
235 hwq->level = PBL_LVL_0;
238 if (npages > MAX_PBL_LVL_0_PGS) {
239 if (npages > MAX_PBL_LVL_1_PGS) {
240 u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
242 /* 2 levels of indirection */
243 npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
244 if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
246 npde = npbl >> MAX_PDL_LVL_SHIFT;
247 if (npbl % BIT(MAX_PDL_LVL_SHIFT))
249 /* Alloc PDE pages */
250 sginfo.pgsize = npde * pg_size;
252 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
254 /* Alloc PBL pages */
255 sginfo.npages = npbl;
256 sginfo.pgsize = PAGE_SIZE;
257 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1], &sginfo);
260 /* Fill PDL with PBL page pointers */
262 (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
263 src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
264 if (hwq_attr->type == HWQ_TYPE_MR) {
265 /* For MR it is expected that we supply only 1 contigous
266 * page i.e only 1 entry in the PDL that will contain
267 * all the PBLs for the user supplied memory region
269 for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
271 dst_virt_ptr[0][i] = src_phys_ptr[i] |
274 for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
276 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
280 /* Alloc or init PTEs */
281 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_2],
285 hwq->level = PBL_LVL_2;
286 if (hwq_attr->sginfo->nopte)
288 /* Fill PBLs with PTE pointers */
290 (dma_addr_t **)hwq->pbl[PBL_LVL_1].pg_arr;
291 src_phys_ptr = hwq->pbl[PBL_LVL_2].pg_map_arr;
292 for (i = 0; i < hwq->pbl[PBL_LVL_2].pg_count; i++) {
293 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
294 src_phys_ptr[i] | PTU_PTE_VALID;
296 if (hwq_attr->type == HWQ_TYPE_QUEUE) {
297 /* Find the last pg of the size */
298 i = hwq->pbl[PBL_LVL_2].pg_count;
299 dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
302 dst_virt_ptr[PTR_PG(i - 2)]
304 PTU_PTE_NEXT_TO_LAST;
306 } else { /* pages < 512 npbl = 1, npde = 0 */
307 u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
310 /* 1 level of indirection */
311 npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
312 if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
314 sginfo.npages = npbl;
315 sginfo.pgsize = PAGE_SIZE;
317 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
320 /* Alloc or init PTEs */
321 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1],
325 hwq->level = PBL_LVL_1;
326 if (hwq_attr->sginfo->nopte)
328 /* Fill PBL with PTE pointers */
330 (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
331 src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
332 for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
333 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
334 src_phys_ptr[i] | flag;
335 if (hwq_attr->type == HWQ_TYPE_QUEUE) {
336 /* Find the last pg of the size */
337 i = hwq->pbl[PBL_LVL_1].pg_count;
338 dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
341 dst_virt_ptr[PTR_PG(i - 2)]
343 PTU_PTE_NEXT_TO_LAST;
351 hwq->depth = hwq_attr->depth;
352 hwq->max_elements = depth;
353 hwq->element_size = stride;
354 hwq->qe_ppg = pg_size / stride;
355 /* For direct access to the elements */
357 if (hwq_attr->sginfo->nopte && hwq->level)
358 lvl = hwq->level - 1;
359 hwq->pbl_ptr = hwq->pbl[lvl].pg_arr;
360 hwq->pbl_dma_ptr = hwq->pbl[lvl].pg_map_arr;
361 spin_lock_init(&hwq->lock);
365 bnxt_qplib_free_hwq(res, hwq);
370 void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res,
371 struct bnxt_qplib_ctx *ctx)
375 bnxt_qplib_free_hwq(res, &ctx->qpc_tbl);
376 bnxt_qplib_free_hwq(res, &ctx->mrw_tbl);
377 bnxt_qplib_free_hwq(res, &ctx->srqc_tbl);
378 bnxt_qplib_free_hwq(res, &ctx->cq_tbl);
379 bnxt_qplib_free_hwq(res, &ctx->tim_tbl);
380 for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
381 bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.qtbl[i]);
382 /* restore original pde level before destroy */
383 ctx->tqm_ctx.pde.level = ctx->tqm_ctx.pde_level;
384 bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.pde);
385 bnxt_qplib_free_stats_ctx(res->pdev, &ctx->stats);
388 static int bnxt_qplib_alloc_tqm_rings(struct bnxt_qplib_res *res,
389 struct bnxt_qplib_ctx *ctx)
391 struct bnxt_qplib_hwq_attr hwq_attr = {};
392 struct bnxt_qplib_sg_info sginfo = {};
393 struct bnxt_qplib_tqm_ctx *tqmctx;
397 tqmctx = &ctx->tqm_ctx;
399 sginfo.pgsize = PAGE_SIZE;
400 sginfo.pgshft = PAGE_SHIFT;
401 hwq_attr.sginfo = &sginfo;
403 hwq_attr.type = HWQ_TYPE_CTX;
404 hwq_attr.depth = 512;
405 hwq_attr.stride = sizeof(u64);
406 /* Alloc pdl buffer */
407 rc = bnxt_qplib_alloc_init_hwq(&tqmctx->pde, &hwq_attr);
410 /* Save original pdl level */
411 tqmctx->pde_level = tqmctx->pde.level;
414 for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) {
415 if (!tqmctx->qcount[i])
417 hwq_attr.depth = ctx->qpc_count * tqmctx->qcount[i];
418 rc = bnxt_qplib_alloc_init_hwq(&tqmctx->qtbl[i], &hwq_attr);
426 static void bnxt_qplib_map_tqm_pgtbl(struct bnxt_qplib_tqm_ctx *ctx)
428 struct bnxt_qplib_hwq *tbl;
430 __le64 **pbl_ptr, *ptr;
435 pbl_ptr = (__le64 **)ctx->pde.pbl_ptr;
437 for (i = 0, j = 0; i < MAX_TQM_ALLOC_REQ;
438 i++, j += MAX_TQM_ALLOC_BLK_SIZE) {
440 if (!tbl->max_elements)
443 fnz_idx = i; /* first non-zero index */
444 switch (tbl->level) {
446 pg_count = tbl->pbl[PBL_LVL_1].pg_count;
447 for (k = 0; k < pg_count; k++) {
448 ptr = &pbl_ptr[PTR_PG(j + k)][PTR_IDX(j + k)];
449 dma_ptr = &tbl->pbl[PBL_LVL_1].pg_map_arr[k];
450 *ptr = cpu_to_le64(*dma_ptr | PTU_PTE_VALID);
456 ptr = &pbl_ptr[PTR_PG(j)][PTR_IDX(j)];
457 *ptr = cpu_to_le64(tbl->pbl[PBL_LVL_0].pg_map_arr[0] |
464 /* update pde level as per page table programming */
465 ctx->pde.level = (ctx->qtbl[fnz_idx].level == PBL_LVL_2) ? PBL_LVL_2 :
466 ctx->qtbl[fnz_idx].level + 1;
469 static int bnxt_qplib_setup_tqm_rings(struct bnxt_qplib_res *res,
470 struct bnxt_qplib_ctx *ctx)
474 rc = bnxt_qplib_alloc_tqm_rings(res, ctx);
478 bnxt_qplib_map_tqm_pgtbl(&ctx->tqm_ctx);
484 * Routine: bnxt_qplib_alloc_ctx
486 * Context tables are memories which are used by the chip fw.
487 * The 6 tables defined are:
488 * QPC ctx - holds QP states
489 * MRW ctx - holds memory region and window
490 * SRQ ctx - holds shared RQ states
491 * CQ ctx - holds completion queue states
492 * TQM ctx - holds Tx Queue Manager context
493 * TIM ctx - holds timer context
494 * Depending on the size of the tbl requested, either a 1 Page Buffer List
495 * or a 1-to-2-stage indirection Page Directory List + 1 PBL is used
497 * Table might be employed as follows:
498 * For 0 < ctx size <= 1 PAGE, 0 level of ind is used
499 * For 1 PAGE < ctx size <= 512 entries size, 1 level of ind is used
500 * For 512 < ctx size <= MAX, 2 levels of ind is used
502 * 0 if success, else -ERRORS
504 int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
505 struct bnxt_qplib_ctx *ctx,
506 bool virt_fn, bool is_p5)
508 struct bnxt_qplib_hwq_attr hwq_attr = {};
509 struct bnxt_qplib_sg_info sginfo = {};
512 if (virt_fn || is_p5)
516 sginfo.pgsize = PAGE_SIZE;
517 sginfo.pgshft = PAGE_SHIFT;
518 hwq_attr.sginfo = &sginfo;
521 hwq_attr.depth = ctx->qpc_count;
522 hwq_attr.stride = BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE;
523 hwq_attr.type = HWQ_TYPE_CTX;
524 rc = bnxt_qplib_alloc_init_hwq(&ctx->qpc_tbl, &hwq_attr);
529 hwq_attr.depth = ctx->mrw_count;
530 hwq_attr.stride = BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE;
531 rc = bnxt_qplib_alloc_init_hwq(&ctx->mrw_tbl, &hwq_attr);
536 hwq_attr.depth = ctx->srqc_count;
537 hwq_attr.stride = BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE;
538 rc = bnxt_qplib_alloc_init_hwq(&ctx->srqc_tbl, &hwq_attr);
543 hwq_attr.depth = ctx->cq_count;
544 hwq_attr.stride = BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE;
545 rc = bnxt_qplib_alloc_init_hwq(&ctx->cq_tbl, &hwq_attr);
550 rc = bnxt_qplib_setup_tqm_rings(res, ctx);
554 ctx->tim_tbl.max_elements = ctx->qpc_count * 16;
555 hwq_attr.depth = ctx->qpc_count * 16;
557 rc = bnxt_qplib_alloc_init_hwq(&ctx->tim_tbl, &hwq_attr);
562 rc = bnxt_qplib_alloc_stats_ctx(res->pdev, &ctx->stats);
569 bnxt_qplib_free_ctx(res, ctx);
574 void bnxt_qplib_get_guid(u8 *dev_addr, u8 *guid)
578 /* MAC-48 to EUI-64 mapping */
579 memcpy(mac, dev_addr, ETH_ALEN);
580 guid[0] = mac[0] ^ 2;
590 static void bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res *res,
591 struct bnxt_qplib_sgid_tbl *sgid_tbl)
593 kfree(sgid_tbl->tbl);
594 kfree(sgid_tbl->hw_id);
595 kfree(sgid_tbl->ctx);
596 kfree(sgid_tbl->vlan);
597 sgid_tbl->tbl = NULL;
598 sgid_tbl->hw_id = NULL;
599 sgid_tbl->ctx = NULL;
600 sgid_tbl->vlan = NULL;
602 sgid_tbl->active = 0;
605 static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res,
606 struct bnxt_qplib_sgid_tbl *sgid_tbl,
609 sgid_tbl->tbl = kcalloc(max, sizeof(*sgid_tbl->tbl), GFP_KERNEL);
613 sgid_tbl->hw_id = kcalloc(max, sizeof(u16), GFP_KERNEL);
614 if (!sgid_tbl->hw_id)
617 sgid_tbl->ctx = kcalloc(max, sizeof(void *), GFP_KERNEL);
621 sgid_tbl->vlan = kcalloc(max, sizeof(u8), GFP_KERNEL);
628 kfree(sgid_tbl->ctx);
629 sgid_tbl->ctx = NULL;
631 kfree(sgid_tbl->hw_id);
632 sgid_tbl->hw_id = NULL;
634 kfree(sgid_tbl->tbl);
635 sgid_tbl->tbl = NULL;
639 static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
640 struct bnxt_qplib_sgid_tbl *sgid_tbl)
644 for (i = 0; i < sgid_tbl->max; i++) {
645 if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
646 sizeof(bnxt_qplib_gid_zero)))
647 bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i].gid,
648 sgid_tbl->tbl[i].vlan_id, true);
650 memset(sgid_tbl->tbl, 0, sizeof(*sgid_tbl->tbl) * sgid_tbl->max);
651 memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
652 memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max);
653 sgid_tbl->active = 0;
656 static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
657 struct net_device *netdev)
661 for (i = 0; i < sgid_tbl->max; i++)
662 sgid_tbl->tbl[i].vlan_id = 0xffff;
664 memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
667 static void bnxt_qplib_free_pkey_tbl(struct bnxt_qplib_res *res,
668 struct bnxt_qplib_pkey_tbl *pkey_tbl)
671 dev_dbg(&res->pdev->dev, "PKEY tbl not present\n");
673 kfree(pkey_tbl->tbl);
675 pkey_tbl->tbl = NULL;
677 pkey_tbl->active = 0;
680 static int bnxt_qplib_alloc_pkey_tbl(struct bnxt_qplib_res *res,
681 struct bnxt_qplib_pkey_tbl *pkey_tbl,
684 pkey_tbl->tbl = kcalloc(max, sizeof(u16), GFP_KERNEL);
693 int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pdt, struct bnxt_qplib_pd *pd)
697 bit_num = find_first_bit(pdt->tbl, pdt->max);
698 if (bit_num == pdt->max)
701 /* Found unused PD */
702 clear_bit(bit_num, pdt->tbl);
707 int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
708 struct bnxt_qplib_pd_tbl *pdt,
709 struct bnxt_qplib_pd *pd)
711 if (test_and_set_bit(pd->id, pdt->tbl)) {
712 dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d\n",
720 static void bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl *pdt)
727 static int bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res *res,
728 struct bnxt_qplib_pd_tbl *pdt,
736 pdt->tbl = kmalloc(bytes, GFP_KERNEL);
741 memset((u8 *)pdt->tbl, 0xFF, bytes);
747 int bnxt_qplib_alloc_dpi(struct bnxt_qplib_dpi_tbl *dpit,
748 struct bnxt_qplib_dpi *dpi,
753 bit_num = find_first_bit(dpit->tbl, dpit->max);
754 if (bit_num == dpit->max)
757 /* Found unused DPI */
758 clear_bit(bit_num, dpit->tbl);
759 dpit->app_tbl[bit_num] = app;
762 dpi->dbr = dpit->dbr_bar_reg_iomem + (bit_num * PAGE_SIZE);
763 dpi->umdbr = dpit->unmapped_dbr + (bit_num * PAGE_SIZE);
768 int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
769 struct bnxt_qplib_dpi_tbl *dpit,
770 struct bnxt_qplib_dpi *dpi)
772 if (dpi->dpi >= dpit->max) {
773 dev_warn(&res->pdev->dev, "Invalid DPI? dpi = %d\n", dpi->dpi);
776 if (test_and_set_bit(dpi->dpi, dpit->tbl)) {
777 dev_warn(&res->pdev->dev, "Freeing an unused DPI? dpi = %d\n",
782 dpit->app_tbl[dpi->dpi] = NULL;
783 memset(dpi, 0, sizeof(*dpi));
788 static void bnxt_qplib_free_dpi_tbl(struct bnxt_qplib_res *res,
789 struct bnxt_qplib_dpi_tbl *dpit)
792 kfree(dpit->app_tbl);
793 if (dpit->dbr_bar_reg_iomem)
794 pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
795 memset(dpit, 0, sizeof(*dpit));
798 static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
799 struct bnxt_qplib_dpi_tbl *dpit,
802 u32 dbr_bar_reg = RCFW_DBR_PCI_BAR_REGION;
803 resource_size_t bar_reg_base;
806 if (dpit->dbr_bar_reg_iomem) {
807 dev_err(&res->pdev->dev, "DBR BAR region %d already mapped\n",
812 bar_reg_base = pci_resource_start(res->pdev, dbr_bar_reg);
814 dev_err(&res->pdev->dev, "BAR region %d resc start failed\n",
819 dbr_len = pci_resource_len(res->pdev, dbr_bar_reg) - dbr_offset;
820 if (!dbr_len || ((dbr_len & (PAGE_SIZE - 1)) != 0)) {
821 dev_err(&res->pdev->dev, "Invalid DBR length %d\n", dbr_len);
825 dpit->dbr_bar_reg_iomem = ioremap(bar_reg_base + dbr_offset,
827 if (!dpit->dbr_bar_reg_iomem) {
828 dev_err(&res->pdev->dev,
829 "FP: DBR BAR region %d mapping failed\n", dbr_bar_reg);
833 dpit->unmapped_dbr = bar_reg_base + dbr_offset;
834 dpit->max = dbr_len / PAGE_SIZE;
836 dpit->app_tbl = kcalloc(dpit->max, sizeof(void *), GFP_KERNEL);
840 bytes = dpit->max >> 3;
844 dpit->tbl = kmalloc(bytes, GFP_KERNEL);
846 kfree(dpit->app_tbl);
847 dpit->app_tbl = NULL;
851 memset((u8 *)dpit->tbl, 0xFF, bytes);
856 pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
857 dpit->dbr_bar_reg_iomem = NULL;
862 static void bnxt_qplib_cleanup_pkey_tbl(struct bnxt_qplib_pkey_tbl *pkey_tbl)
864 memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
865 pkey_tbl->active = 0;
868 static void bnxt_qplib_init_pkey_tbl(struct bnxt_qplib_res *res,
869 struct bnxt_qplib_pkey_tbl *pkey_tbl)
873 memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
875 /* pkey default = 0xFFFF */
876 bnxt_qplib_add_pkey(res, pkey_tbl, &pkey, false);
880 static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
881 struct bnxt_qplib_stats *stats)
884 dma_free_coherent(&pdev->dev, stats->size,
885 stats->dma, stats->dma_map);
887 memset(stats, 0, sizeof(*stats));
891 static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
892 struct bnxt_qplib_stats *stats)
894 memset(stats, 0, sizeof(*stats));
896 /* 128 byte aligned context memory is required only for 57500.
897 * However making this unconditional, it does not harm previous
900 stats->size = ALIGN(sizeof(struct ctx_hw_stats), 128);
901 stats->dma = dma_alloc_coherent(&pdev->dev, stats->size,
902 &stats->dma_map, GFP_KERNEL);
904 dev_err(&pdev->dev, "Stats DMA allocation failed\n");
910 void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res)
912 bnxt_qplib_cleanup_pkey_tbl(&res->pkey_tbl);
913 bnxt_qplib_cleanup_sgid_tbl(res, &res->sgid_tbl);
916 int bnxt_qplib_init_res(struct bnxt_qplib_res *res)
918 bnxt_qplib_init_sgid_tbl(&res->sgid_tbl, res->netdev);
919 bnxt_qplib_init_pkey_tbl(res, &res->pkey_tbl);
924 void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
926 bnxt_qplib_free_pkey_tbl(res, &res->pkey_tbl);
927 bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl);
928 bnxt_qplib_free_pd_tbl(&res->pd_tbl);
929 bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
932 int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
933 struct net_device *netdev,
934 struct bnxt_qplib_dev_attr *dev_attr)
939 res->netdev = netdev;
941 rc = bnxt_qplib_alloc_sgid_tbl(res, &res->sgid_tbl, dev_attr->max_sgid);
945 rc = bnxt_qplib_alloc_pkey_tbl(res, &res->pkey_tbl, dev_attr->max_pkey);
949 rc = bnxt_qplib_alloc_pd_tbl(res, &res->pd_tbl, dev_attr->max_pd);
953 rc = bnxt_qplib_alloc_dpi_tbl(res, &res->dpi_tbl, dev_attr->l2_db_size);
959 bnxt_qplib_free_res(res);
963 int bnxt_qplib_determine_atomics(struct pci_dev *dev)
968 comp = pci_enable_atomic_ops_to_root(dev,
969 PCI_EXP_DEVCAP2_ATOMIC_COMP32);
972 comp = pci_enable_atomic_ops_to_root(dev,
973 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
976 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &ctl2);
977 return !(ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);