Merge tag 'printk-for-5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/pmladek...
[linux-2.6-microblaze.git] / drivers / infiniband / hw / bnxt_re / qplib_res.c
1 /*
2  * Broadcom NetXtreme-E RoCE driver.
3  *
4  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in
21  *    the documentation and/or other materials provided with the
22  *    distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Description: QPLib resource manager
37  */
38
39 #define dev_fmt(fmt) "QPLIB: " fmt
40
41 #include <linux/spinlock.h>
42 #include <linux/pci.h>
43 #include <linux/interrupt.h>
44 #include <linux/inetdevice.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/if_vlan.h>
47 #include "roce_hsi.h"
48 #include "qplib_res.h"
49 #include "qplib_sp.h"
50 #include "qplib_rcfw.h"
51
52 static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
53                                       struct bnxt_qplib_stats *stats);
54 static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
55                                       struct bnxt_qplib_stats *stats);
56
57 /* PBL */
58 static void __free_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
59                        bool is_umem)
60 {
61         int i;
62
63         if (!is_umem) {
64                 for (i = 0; i < pbl->pg_count; i++) {
65                         if (pbl->pg_arr[i])
66                                 dma_free_coherent(&pdev->dev, pbl->pg_size,
67                                                   (void *)((unsigned long)
68                                                    pbl->pg_arr[i] &
69                                                   PAGE_MASK),
70                                                   pbl->pg_map_arr[i]);
71                         else
72                                 dev_warn(&pdev->dev,
73                                          "PBL free pg_arr[%d] empty?!\n", i);
74                         pbl->pg_arr[i] = NULL;
75                 }
76         }
77         kfree(pbl->pg_arr);
78         pbl->pg_arr = NULL;
79         kfree(pbl->pg_map_arr);
80         pbl->pg_map_arr = NULL;
81         pbl->pg_count = 0;
82         pbl->pg_size = 0;
83 }
84
85 static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
86                        struct scatterlist *sghead, u32 pages, u32 pg_size)
87 {
88         struct sg_dma_page_iter sg_iter;
89         bool is_umem = false;
90         int i;
91
92         /* page ptr arrays */
93         pbl->pg_arr = kcalloc(pages, sizeof(void *), GFP_KERNEL);
94         if (!pbl->pg_arr)
95                 return -ENOMEM;
96
97         pbl->pg_map_arr = kcalloc(pages, sizeof(dma_addr_t), GFP_KERNEL);
98         if (!pbl->pg_map_arr) {
99                 kfree(pbl->pg_arr);
100                 pbl->pg_arr = NULL;
101                 return -ENOMEM;
102         }
103         pbl->pg_count = 0;
104         pbl->pg_size = pg_size;
105
106         if (!sghead) {
107                 for (i = 0; i < pages; i++) {
108                         pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
109                                                             pbl->pg_size,
110                                                             &pbl->pg_map_arr[i],
111                                                             GFP_KERNEL);
112                         if (!pbl->pg_arr[i])
113                                 goto fail;
114                         pbl->pg_count++;
115                 }
116         } else {
117                 i = 0;
118                 is_umem = true;
119                 for_each_sg_dma_page (sghead, &sg_iter, pages, 0) {
120                         pbl->pg_map_arr[i] = sg_page_iter_dma_address(&sg_iter);
121                         pbl->pg_arr[i] = NULL;
122                         pbl->pg_count++;
123                         i++;
124                 }
125         }
126
127         return 0;
128
129 fail:
130         __free_pbl(pdev, pbl, is_umem);
131         return -ENOMEM;
132 }
133
134 /* HWQ */
135 void bnxt_qplib_free_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq)
136 {
137         int i;
138
139         if (!hwq->max_elements)
140                 return;
141         if (hwq->level >= PBL_LVL_MAX)
142                 return;
143
144         for (i = 0; i < hwq->level + 1; i++) {
145                 if (i == hwq->level)
146                         __free_pbl(pdev, &hwq->pbl[i], hwq->is_user);
147                 else
148                         __free_pbl(pdev, &hwq->pbl[i], false);
149         }
150
151         hwq->level = PBL_LVL_MAX;
152         hwq->max_elements = 0;
153         hwq->element_size = 0;
154         hwq->prod = 0;
155         hwq->cons = 0;
156         hwq->cp_bit = 0;
157 }
158
159 /* All HWQs are power of 2 in size */
160 int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
161                               struct scatterlist *sghead, int nmap,
162                               u32 *elements, u32 element_size, u32 aux,
163                               u32 pg_size, enum bnxt_qplib_hwq_type hwq_type)
164 {
165         u32 pages, slots, size, aux_pages = 0, aux_size = 0;
166         dma_addr_t *src_phys_ptr, **dst_virt_ptr;
167         int i, rc;
168
169         hwq->level = PBL_LVL_MAX;
170
171         slots = roundup_pow_of_two(*elements);
172         if (aux) {
173                 aux_size = roundup_pow_of_two(aux);
174                 aux_pages = (slots * aux_size) / pg_size;
175                 if ((slots * aux_size) % pg_size)
176                         aux_pages++;
177         }
178         size = roundup_pow_of_two(element_size);
179
180         if (!sghead) {
181                 hwq->is_user = false;
182                 pages = (slots * size) / pg_size + aux_pages;
183                 if ((slots * size) % pg_size)
184                         pages++;
185                 if (!pages)
186                         return -EINVAL;
187         } else {
188                 hwq->is_user = true;
189                 pages = nmap;
190         }
191
192         /* Alloc the 1st memory block; can be a PDL/PTL/PBL */
193         if (sghead && (pages == MAX_PBL_LVL_0_PGS))
194                 rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], sghead,
195                                  pages, pg_size);
196         else
197                 rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], NULL, 1, pg_size);
198         if (rc)
199                 goto fail;
200
201         hwq->level = PBL_LVL_0;
202
203         if (pages > MAX_PBL_LVL_0_PGS) {
204                 if (pages > MAX_PBL_LVL_1_PGS) {
205                         /* 2 levels of indirection */
206                         rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], NULL,
207                                          MAX_PBL_LVL_1_PGS_FOR_LVL_2, pg_size);
208                         if (rc)
209                                 goto fail;
210                         /* Fill in lvl0 PBL */
211                         dst_virt_ptr =
212                                 (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
213                         src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
214                         for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
215                                 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
216                                         src_phys_ptr[i] | PTU_PDE_VALID;
217                         hwq->level = PBL_LVL_1;
218
219                         rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_2], sghead,
220                                          pages, pg_size);
221                         if (rc)
222                                 goto fail;
223
224                         /* Fill in lvl1 PBL */
225                         dst_virt_ptr =
226                                 (dma_addr_t **)hwq->pbl[PBL_LVL_1].pg_arr;
227                         src_phys_ptr = hwq->pbl[PBL_LVL_2].pg_map_arr;
228                         for (i = 0; i < hwq->pbl[PBL_LVL_2].pg_count; i++) {
229                                 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
230                                         src_phys_ptr[i] | PTU_PTE_VALID;
231                         }
232                         if (hwq_type == HWQ_TYPE_QUEUE) {
233                                 /* Find the last pg of the size */
234                                 i = hwq->pbl[PBL_LVL_2].pg_count;
235                                 dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
236                                                                   PTU_PTE_LAST;
237                                 if (i > 1)
238                                         dst_virt_ptr[PTR_PG(i - 2)]
239                                                     [PTR_IDX(i - 2)] |=
240                                                     PTU_PTE_NEXT_TO_LAST;
241                         }
242                         hwq->level = PBL_LVL_2;
243                 } else {
244                         u32 flag = hwq_type == HWQ_TYPE_L2_CMPL ? 0 :
245                                                 PTU_PTE_VALID;
246
247                         /* 1 level of indirection */
248                         rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], sghead,
249                                          pages, pg_size);
250                         if (rc)
251                                 goto fail;
252                         /* Fill in lvl0 PBL */
253                         dst_virt_ptr =
254                                 (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
255                         src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
256                         for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++) {
257                                 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
258                                         src_phys_ptr[i] | flag;
259                         }
260                         if (hwq_type == HWQ_TYPE_QUEUE) {
261                                 /* Find the last pg of the size */
262                                 i = hwq->pbl[PBL_LVL_1].pg_count;
263                                 dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
264                                                                   PTU_PTE_LAST;
265                                 if (i > 1)
266                                         dst_virt_ptr[PTR_PG(i - 2)]
267                                                     [PTR_IDX(i - 2)] |=
268                                                     PTU_PTE_NEXT_TO_LAST;
269                         }
270                         hwq->level = PBL_LVL_1;
271                 }
272         }
273         hwq->pdev = pdev;
274         spin_lock_init(&hwq->lock);
275         hwq->prod = 0;
276         hwq->cons = 0;
277         *elements = hwq->max_elements = slots;
278         hwq->element_size = size;
279
280         /* For direct access to the elements */
281         hwq->pbl_ptr = hwq->pbl[hwq->level].pg_arr;
282         hwq->pbl_dma_ptr = hwq->pbl[hwq->level].pg_map_arr;
283
284         return 0;
285
286 fail:
287         bnxt_qplib_free_hwq(pdev, hwq);
288         return -ENOMEM;
289 }
290
291 /* Context Tables */
292 void bnxt_qplib_free_ctx(struct pci_dev *pdev,
293                          struct bnxt_qplib_ctx *ctx)
294 {
295         int i;
296
297         bnxt_qplib_free_hwq(pdev, &ctx->qpc_tbl);
298         bnxt_qplib_free_hwq(pdev, &ctx->mrw_tbl);
299         bnxt_qplib_free_hwq(pdev, &ctx->srqc_tbl);
300         bnxt_qplib_free_hwq(pdev, &ctx->cq_tbl);
301         bnxt_qplib_free_hwq(pdev, &ctx->tim_tbl);
302         for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
303                 bnxt_qplib_free_hwq(pdev, &ctx->tqm_tbl[i]);
304         bnxt_qplib_free_hwq(pdev, &ctx->tqm_pde);
305         bnxt_qplib_free_stats_ctx(pdev, &ctx->stats);
306 }
307
308 /*
309  * Routine: bnxt_qplib_alloc_ctx
310  * Description:
311  *     Context tables are memories which are used by the chip fw.
312  *     The 6 tables defined are:
313  *             QPC ctx - holds QP states
314  *             MRW ctx - holds memory region and window
315  *             SRQ ctx - holds shared RQ states
316  *             CQ ctx - holds completion queue states
317  *             TQM ctx - holds Tx Queue Manager context
318  *             TIM ctx - holds timer context
319  *     Depending on the size of the tbl requested, either a 1 Page Buffer List
320  *     or a 1-to-2-stage indirection Page Directory List + 1 PBL is used
321  *     instead.
322  *     Table might be employed as follows:
323  *             For 0      < ctx size <= 1 PAGE, 0 level of ind is used
324  *             For 1 PAGE < ctx size <= 512 entries size, 1 level of ind is used
325  *             For 512    < ctx size <= MAX, 2 levels of ind is used
326  * Returns:
327  *     0 if success, else -ERRORS
328  */
329 int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
330                          struct bnxt_qplib_ctx *ctx,
331                          bool virt_fn, bool is_p5)
332 {
333         int i, j, k, rc = 0;
334         int fnz_idx = -1;
335         __le64 **pbl_ptr;
336
337         if (virt_fn || is_p5)
338                 goto stats_alloc;
339
340         /* QPC Tables */
341         ctx->qpc_tbl.max_elements = ctx->qpc_count;
342         rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->qpc_tbl, NULL, 0,
343                                        &ctx->qpc_tbl.max_elements,
344                                        BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE, 0,
345                                        PAGE_SIZE, HWQ_TYPE_CTX);
346         if (rc)
347                 goto fail;
348
349         /* MRW Tables */
350         ctx->mrw_tbl.max_elements = ctx->mrw_count;
351         rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->mrw_tbl, NULL, 0,
352                                        &ctx->mrw_tbl.max_elements,
353                                        BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE, 0,
354                                        PAGE_SIZE, HWQ_TYPE_CTX);
355         if (rc)
356                 goto fail;
357
358         /* SRQ Tables */
359         ctx->srqc_tbl.max_elements = ctx->srqc_count;
360         rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->srqc_tbl, NULL, 0,
361                                        &ctx->srqc_tbl.max_elements,
362                                        BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE, 0,
363                                        PAGE_SIZE, HWQ_TYPE_CTX);
364         if (rc)
365                 goto fail;
366
367         /* CQ Tables */
368         ctx->cq_tbl.max_elements = ctx->cq_count;
369         rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->cq_tbl, NULL, 0,
370                                        &ctx->cq_tbl.max_elements,
371                                        BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE, 0,
372                                        PAGE_SIZE, HWQ_TYPE_CTX);
373         if (rc)
374                 goto fail;
375
376         /* TQM Buffer */
377         ctx->tqm_pde.max_elements = 512;
378         rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_pde, NULL, 0,
379                                        &ctx->tqm_pde.max_elements, sizeof(u64),
380                                        0, PAGE_SIZE, HWQ_TYPE_CTX);
381         if (rc)
382                 goto fail;
383
384         for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) {
385                 if (!ctx->tqm_count[i])
386                         continue;
387                 ctx->tqm_tbl[i].max_elements = ctx->qpc_count *
388                                                ctx->tqm_count[i];
389                 rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_tbl[i], NULL, 0,
390                                                &ctx->tqm_tbl[i].max_elements, 1,
391                                                0, PAGE_SIZE, HWQ_TYPE_CTX);
392                 if (rc)
393                         goto fail;
394         }
395         pbl_ptr = (__le64 **)ctx->tqm_pde.pbl_ptr;
396         for (i = 0, j = 0; i < MAX_TQM_ALLOC_REQ;
397              i++, j += MAX_TQM_ALLOC_BLK_SIZE) {
398                 if (!ctx->tqm_tbl[i].max_elements)
399                         continue;
400                 if (fnz_idx == -1)
401                         fnz_idx = i;
402                 switch (ctx->tqm_tbl[i].level) {
403                 case PBL_LVL_2:
404                         for (k = 0; k < ctx->tqm_tbl[i].pbl[PBL_LVL_1].pg_count;
405                              k++)
406                                 pbl_ptr[PTR_PG(j + k)][PTR_IDX(j + k)] =
407                                   cpu_to_le64(
408                                     ctx->tqm_tbl[i].pbl[PBL_LVL_1].pg_map_arr[k]
409                                     | PTU_PTE_VALID);
410                         break;
411                 case PBL_LVL_1:
412                 case PBL_LVL_0:
413                 default:
414                         pbl_ptr[PTR_PG(j)][PTR_IDX(j)] = cpu_to_le64(
415                                 ctx->tqm_tbl[i].pbl[PBL_LVL_0].pg_map_arr[0] |
416                                 PTU_PTE_VALID);
417                         break;
418                 }
419         }
420         if (fnz_idx == -1)
421                 fnz_idx = 0;
422         ctx->tqm_pde_level = ctx->tqm_tbl[fnz_idx].level == PBL_LVL_2 ?
423                              PBL_LVL_2 : ctx->tqm_tbl[fnz_idx].level + 1;
424
425         /* TIM Buffer */
426         ctx->tim_tbl.max_elements = ctx->qpc_count * 16;
427         rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tim_tbl, NULL, 0,
428                                        &ctx->tim_tbl.max_elements, 1,
429                                        0, PAGE_SIZE, HWQ_TYPE_CTX);
430         if (rc)
431                 goto fail;
432
433 stats_alloc:
434         /* Stats */
435         rc = bnxt_qplib_alloc_stats_ctx(pdev, &ctx->stats);
436         if (rc)
437                 goto fail;
438
439         return 0;
440
441 fail:
442         bnxt_qplib_free_ctx(pdev, ctx);
443         return rc;
444 }
445
446 /* GUID */
447 void bnxt_qplib_get_guid(u8 *dev_addr, u8 *guid)
448 {
449         u8 mac[ETH_ALEN];
450
451         /* MAC-48 to EUI-64 mapping */
452         memcpy(mac, dev_addr, ETH_ALEN);
453         guid[0] = mac[0] ^ 2;
454         guid[1] = mac[1];
455         guid[2] = mac[2];
456         guid[3] = 0xff;
457         guid[4] = 0xfe;
458         guid[5] = mac[3];
459         guid[6] = mac[4];
460         guid[7] = mac[5];
461 }
462
463 static void bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res *res,
464                                      struct bnxt_qplib_sgid_tbl *sgid_tbl)
465 {
466         kfree(sgid_tbl->tbl);
467         kfree(sgid_tbl->hw_id);
468         kfree(sgid_tbl->ctx);
469         kfree(sgid_tbl->vlan);
470         sgid_tbl->tbl = NULL;
471         sgid_tbl->hw_id = NULL;
472         sgid_tbl->ctx = NULL;
473         sgid_tbl->vlan = NULL;
474         sgid_tbl->max = 0;
475         sgid_tbl->active = 0;
476 }
477
478 static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res,
479                                      struct bnxt_qplib_sgid_tbl *sgid_tbl,
480                                      u16 max)
481 {
482         sgid_tbl->tbl = kcalloc(max, sizeof(struct bnxt_qplib_gid), GFP_KERNEL);
483         if (!sgid_tbl->tbl)
484                 return -ENOMEM;
485
486         sgid_tbl->hw_id = kcalloc(max, sizeof(u16), GFP_KERNEL);
487         if (!sgid_tbl->hw_id)
488                 goto out_free1;
489
490         sgid_tbl->ctx = kcalloc(max, sizeof(void *), GFP_KERNEL);
491         if (!sgid_tbl->ctx)
492                 goto out_free2;
493
494         sgid_tbl->vlan = kcalloc(max, sizeof(u8), GFP_KERNEL);
495         if (!sgid_tbl->vlan)
496                 goto out_free3;
497
498         sgid_tbl->max = max;
499         return 0;
500 out_free3:
501         kfree(sgid_tbl->ctx);
502         sgid_tbl->ctx = NULL;
503 out_free2:
504         kfree(sgid_tbl->hw_id);
505         sgid_tbl->hw_id = NULL;
506 out_free1:
507         kfree(sgid_tbl->tbl);
508         sgid_tbl->tbl = NULL;
509         return -ENOMEM;
510 };
511
512 static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
513                                         struct bnxt_qplib_sgid_tbl *sgid_tbl)
514 {
515         int i;
516
517         for (i = 0; i < sgid_tbl->max; i++) {
518                 if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
519                            sizeof(bnxt_qplib_gid_zero)))
520                         bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i], true);
521         }
522         memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
523         memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
524         memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max);
525         sgid_tbl->active = 0;
526 }
527
528 static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
529                                      struct net_device *netdev)
530 {
531         memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
532         memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
533 }
534
535 static void bnxt_qplib_free_pkey_tbl(struct bnxt_qplib_res *res,
536                                      struct bnxt_qplib_pkey_tbl *pkey_tbl)
537 {
538         if (!pkey_tbl->tbl)
539                 dev_dbg(&res->pdev->dev, "PKEY tbl not present\n");
540         else
541                 kfree(pkey_tbl->tbl);
542
543         pkey_tbl->tbl = NULL;
544         pkey_tbl->max = 0;
545         pkey_tbl->active = 0;
546 }
547
548 static int bnxt_qplib_alloc_pkey_tbl(struct bnxt_qplib_res *res,
549                                      struct bnxt_qplib_pkey_tbl *pkey_tbl,
550                                      u16 max)
551 {
552         pkey_tbl->tbl = kcalloc(max, sizeof(u16), GFP_KERNEL);
553         if (!pkey_tbl->tbl)
554                 return -ENOMEM;
555
556         pkey_tbl->max = max;
557         return 0;
558 };
559
560 /* PDs */
561 int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pdt, struct bnxt_qplib_pd *pd)
562 {
563         u32 bit_num;
564
565         bit_num = find_first_bit(pdt->tbl, pdt->max);
566         if (bit_num == pdt->max)
567                 return -ENOMEM;
568
569         /* Found unused PD */
570         clear_bit(bit_num, pdt->tbl);
571         pd->id = bit_num;
572         return 0;
573 }
574
575 int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
576                           struct bnxt_qplib_pd_tbl *pdt,
577                           struct bnxt_qplib_pd *pd)
578 {
579         if (test_and_set_bit(pd->id, pdt->tbl)) {
580                 dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d\n",
581                          pd->id);
582                 return -EINVAL;
583         }
584         pd->id = 0;
585         return 0;
586 }
587
588 static void bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl *pdt)
589 {
590         kfree(pdt->tbl);
591         pdt->tbl = NULL;
592         pdt->max = 0;
593 }
594
595 static int bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res *res,
596                                    struct bnxt_qplib_pd_tbl *pdt,
597                                    u32 max)
598 {
599         u32 bytes;
600
601         bytes = max >> 3;
602         if (!bytes)
603                 bytes = 1;
604         pdt->tbl = kmalloc(bytes, GFP_KERNEL);
605         if (!pdt->tbl)
606                 return -ENOMEM;
607
608         pdt->max = max;
609         memset((u8 *)pdt->tbl, 0xFF, bytes);
610
611         return 0;
612 }
613
614 /* DPIs */
615 int bnxt_qplib_alloc_dpi(struct bnxt_qplib_dpi_tbl *dpit,
616                          struct bnxt_qplib_dpi     *dpi,
617                          void                      *app)
618 {
619         u32 bit_num;
620
621         bit_num = find_first_bit(dpit->tbl, dpit->max);
622         if (bit_num == dpit->max)
623                 return -ENOMEM;
624
625         /* Found unused DPI */
626         clear_bit(bit_num, dpit->tbl);
627         dpit->app_tbl[bit_num] = app;
628
629         dpi->dpi = bit_num;
630         dpi->dbr = dpit->dbr_bar_reg_iomem + (bit_num * PAGE_SIZE);
631         dpi->umdbr = dpit->unmapped_dbr + (bit_num * PAGE_SIZE);
632
633         return 0;
634 }
635
636 int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
637                            struct bnxt_qplib_dpi_tbl *dpit,
638                            struct bnxt_qplib_dpi     *dpi)
639 {
640         if (dpi->dpi >= dpit->max) {
641                 dev_warn(&res->pdev->dev, "Invalid DPI? dpi = %d\n", dpi->dpi);
642                 return -EINVAL;
643         }
644         if (test_and_set_bit(dpi->dpi, dpit->tbl)) {
645                 dev_warn(&res->pdev->dev, "Freeing an unused DPI? dpi = %d\n",
646                          dpi->dpi);
647                 return -EINVAL;
648         }
649         if (dpit->app_tbl)
650                 dpit->app_tbl[dpi->dpi] = NULL;
651         memset(dpi, 0, sizeof(*dpi));
652
653         return 0;
654 }
655
656 static void bnxt_qplib_free_dpi_tbl(struct bnxt_qplib_res     *res,
657                                     struct bnxt_qplib_dpi_tbl *dpit)
658 {
659         kfree(dpit->tbl);
660         kfree(dpit->app_tbl);
661         if (dpit->dbr_bar_reg_iomem)
662                 pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
663         memset(dpit, 0, sizeof(*dpit));
664 }
665
666 static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res     *res,
667                                     struct bnxt_qplib_dpi_tbl *dpit,
668                                     u32                       dbr_offset)
669 {
670         u32 dbr_bar_reg = RCFW_DBR_PCI_BAR_REGION;
671         resource_size_t bar_reg_base;
672         u32 dbr_len, bytes;
673
674         if (dpit->dbr_bar_reg_iomem) {
675                 dev_err(&res->pdev->dev, "DBR BAR region %d already mapped\n",
676                         dbr_bar_reg);
677                 return -EALREADY;
678         }
679
680         bar_reg_base = pci_resource_start(res->pdev, dbr_bar_reg);
681         if (!bar_reg_base) {
682                 dev_err(&res->pdev->dev, "BAR region %d resc start failed\n",
683                         dbr_bar_reg);
684                 return -ENOMEM;
685         }
686
687         dbr_len = pci_resource_len(res->pdev, dbr_bar_reg) - dbr_offset;
688         if (!dbr_len || ((dbr_len & (PAGE_SIZE - 1)) != 0)) {
689                 dev_err(&res->pdev->dev, "Invalid DBR length %d\n", dbr_len);
690                 return -ENOMEM;
691         }
692
693         dpit->dbr_bar_reg_iomem = ioremap_nocache(bar_reg_base + dbr_offset,
694                                                   dbr_len);
695         if (!dpit->dbr_bar_reg_iomem) {
696                 dev_err(&res->pdev->dev,
697                         "FP: DBR BAR region %d mapping failed\n", dbr_bar_reg);
698                 return -ENOMEM;
699         }
700
701         dpit->unmapped_dbr = bar_reg_base + dbr_offset;
702         dpit->max = dbr_len / PAGE_SIZE;
703
704         dpit->app_tbl = kcalloc(dpit->max, sizeof(void *), GFP_KERNEL);
705         if (!dpit->app_tbl)
706                 goto unmap_io;
707
708         bytes = dpit->max >> 3;
709         if (!bytes)
710                 bytes = 1;
711
712         dpit->tbl = kmalloc(bytes, GFP_KERNEL);
713         if (!dpit->tbl) {
714                 kfree(dpit->app_tbl);
715                 dpit->app_tbl = NULL;
716                 goto unmap_io;
717         }
718
719         memset((u8 *)dpit->tbl, 0xFF, bytes);
720
721         return 0;
722
723 unmap_io:
724         pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
725         return -ENOMEM;
726 }
727
728 /* PKEYs */
729 static void bnxt_qplib_cleanup_pkey_tbl(struct bnxt_qplib_pkey_tbl *pkey_tbl)
730 {
731         memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
732         pkey_tbl->active = 0;
733 }
734
735 static void bnxt_qplib_init_pkey_tbl(struct bnxt_qplib_res *res,
736                                      struct bnxt_qplib_pkey_tbl *pkey_tbl)
737 {
738         u16 pkey = 0xFFFF;
739
740         memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
741
742         /* pkey default = 0xFFFF */
743         bnxt_qplib_add_pkey(res, pkey_tbl, &pkey, false);
744 }
745
746 /* Stats */
747 static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
748                                       struct bnxt_qplib_stats *stats)
749 {
750         if (stats->dma) {
751                 dma_free_coherent(&pdev->dev, stats->size,
752                                   stats->dma, stats->dma_map);
753         }
754         memset(stats, 0, sizeof(*stats));
755         stats->fw_id = -1;
756 }
757
758 static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
759                                       struct bnxt_qplib_stats *stats)
760 {
761         memset(stats, 0, sizeof(*stats));
762         stats->fw_id = -1;
763         /* 128 byte aligned context memory is required only for 57500.
764          * However making this unconditional, it does not harm previous
765          * generation.
766          */
767         stats->size = ALIGN(sizeof(struct ctx_hw_stats), 128);
768         stats->dma = dma_alloc_coherent(&pdev->dev, stats->size,
769                                         &stats->dma_map, GFP_KERNEL);
770         if (!stats->dma) {
771                 dev_err(&pdev->dev, "Stats DMA allocation failed\n");
772                 return -ENOMEM;
773         }
774         return 0;
775 }
776
777 void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res)
778 {
779         bnxt_qplib_cleanup_pkey_tbl(&res->pkey_tbl);
780         bnxt_qplib_cleanup_sgid_tbl(res, &res->sgid_tbl);
781 }
782
783 int bnxt_qplib_init_res(struct bnxt_qplib_res *res)
784 {
785         bnxt_qplib_init_sgid_tbl(&res->sgid_tbl, res->netdev);
786         bnxt_qplib_init_pkey_tbl(res, &res->pkey_tbl);
787
788         return 0;
789 }
790
791 void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
792 {
793         bnxt_qplib_free_pkey_tbl(res, &res->pkey_tbl);
794         bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl);
795         bnxt_qplib_free_pd_tbl(&res->pd_tbl);
796         bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
797
798         res->netdev = NULL;
799         res->pdev = NULL;
800 }
801
802 int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
803                          struct net_device *netdev,
804                          struct bnxt_qplib_dev_attr *dev_attr)
805 {
806         int rc = 0;
807
808         res->pdev = pdev;
809         res->netdev = netdev;
810
811         rc = bnxt_qplib_alloc_sgid_tbl(res, &res->sgid_tbl, dev_attr->max_sgid);
812         if (rc)
813                 goto fail;
814
815         rc = bnxt_qplib_alloc_pkey_tbl(res, &res->pkey_tbl, dev_attr->max_pkey);
816         if (rc)
817                 goto fail;
818
819         rc = bnxt_qplib_alloc_pd_tbl(res, &res->pd_tbl, dev_attr->max_pd);
820         if (rc)
821                 goto fail;
822
823         rc = bnxt_qplib_alloc_dpi_tbl(res, &res->dpi_tbl, dev_attr->l2_db_size);
824         if (rc)
825                 goto fail;
826
827         return 0;
828 fail:
829         bnxt_qplib_free_res(res);
830         return rc;
831 }