Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi...
[linux-2.6-microblaze.git] / drivers / net / ethernet / qlogic / qed / qed_cxt.c
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/types.h>
34 #include <linux/bitops.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/list.h>
39 #include <linux/log2.h>
40 #include <linux/pci.h>
41 #include <linux/slab.h>
42 #include <linux/string.h>
43 #include "qed.h"
44 #include "qed_cxt.h"
45 #include "qed_dev_api.h"
46 #include "qed_hsi.h"
47 #include "qed_hw.h"
48 #include "qed_init_ops.h"
49 #include "qed_rdma.h"
50 #include "qed_reg_addr.h"
51 #include "qed_sriov.h"
52
53 /* QM constants */
54 #define QM_PQ_ELEMENT_SIZE      4 /* in bytes */
55
56 /* Doorbell-Queue constants */
57 #define DQ_RANGE_SHIFT          4
58 #define DQ_RANGE_ALIGN          BIT(DQ_RANGE_SHIFT)
59
60 /* Searcher constants */
61 #define SRC_MIN_NUM_ELEMS 256
62
63 /* Timers constants */
64 #define TM_SHIFT        7
65 #define TM_ALIGN        BIT(TM_SHIFT)
66 #define TM_ELEM_SIZE    4
67
68 #define ILT_DEFAULT_HW_P_SIZE   4
69
70 #define ILT_PAGE_IN_BYTES(hw_p_size)    (1U << ((hw_p_size) + 12))
71 #define ILT_CFG_REG(cli, reg)   PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
72
73 /* ILT entry structure */
74 #define ILT_ENTRY_PHY_ADDR_MASK         (~0ULL >> 12)
75 #define ILT_ENTRY_PHY_ADDR_SHIFT        0
76 #define ILT_ENTRY_VALID_MASK            0x1ULL
77 #define ILT_ENTRY_VALID_SHIFT           52
78 #define ILT_ENTRY_IN_REGS               2
79 #define ILT_REG_SIZE_IN_BYTES           4
80
81 /* connection context union */
82 union conn_context {
83         struct e4_core_conn_context core_ctx;
84         struct e4_eth_conn_context eth_ctx;
85         struct e4_iscsi_conn_context iscsi_ctx;
86         struct e4_fcoe_conn_context fcoe_ctx;
87         struct e4_roce_conn_context roce_ctx;
88 };
89
90 /* TYPE-0 task context - iSCSI, FCOE */
91 union type0_task_context {
92         struct e4_iscsi_task_context iscsi_ctx;
93         struct e4_fcoe_task_context fcoe_ctx;
94 };
95
96 /* TYPE-1 task context - ROCE */
97 union type1_task_context {
98         struct e4_rdma_task_context roce_ctx;
99 };
100
101 struct src_ent {
102         u8 opaque[56];
103         u64 next;
104 };
105
106 #define CDUT_SEG_ALIGNMET               3 /* in 4k chunks */
107 #define CDUT_SEG_ALIGNMET_IN_BYTES      BIT(CDUT_SEG_ALIGNMET + 12)
108
109 #define CONN_CXT_SIZE(p_hwfn) \
110         ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
111
112 #define SRQ_CXT_SIZE (sizeof(struct rdma_srq_context))
113 #define XRC_SRQ_CXT_SIZE (sizeof(struct rdma_xrc_srq_context))
114
115 #define TYPE0_TASK_CXT_SIZE(p_hwfn) \
116         ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn)
117
118 /* Alignment is inherent to the type1_task_context structure */
119 #define TYPE1_TASK_CXT_SIZE(p_hwfn) sizeof(union type1_task_context)
120
121 static bool src_proto(enum protocol_type type)
122 {
123         return type == PROTOCOLID_ISCSI ||
124                type == PROTOCOLID_FCOE ||
125                type == PROTOCOLID_IWARP;
126 }
127
128 static bool tm_cid_proto(enum protocol_type type)
129 {
130         return type == PROTOCOLID_ISCSI ||
131                type == PROTOCOLID_FCOE ||
132                type == PROTOCOLID_ROCE ||
133                type == PROTOCOLID_IWARP;
134 }
135
136 static bool tm_tid_proto(enum protocol_type type)
137 {
138         return type == PROTOCOLID_FCOE;
139 }
140
141 /* counts the iids for the CDU/CDUC ILT client configuration */
142 struct qed_cdu_iids {
143         u32 pf_cids;
144         u32 per_vf_cids;
145 };
146
147 static void qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr,
148                              struct qed_cdu_iids *iids)
149 {
150         u32 type;
151
152         for (type = 0; type < MAX_CONN_TYPES; type++) {
153                 iids->pf_cids += p_mngr->conn_cfg[type].cid_count;
154                 iids->per_vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
155         }
156 }
157
158 /* counts the iids for the Searcher block configuration */
159 struct qed_src_iids {
160         u32 pf_cids;
161         u32 per_vf_cids;
162 };
163
164 static void qed_cxt_src_iids(struct qed_cxt_mngr *p_mngr,
165                              struct qed_src_iids *iids)
166 {
167         u32 i;
168
169         for (i = 0; i < MAX_CONN_TYPES; i++) {
170                 if (!src_proto(i))
171                         continue;
172
173                 iids->pf_cids += p_mngr->conn_cfg[i].cid_count;
174                 iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf;
175         }
176
177         /* Add L2 filtering filters in addition */
178         iids->pf_cids += p_mngr->arfs_count;
179 }
180
181 /* counts the iids for the Timers block configuration */
182 struct qed_tm_iids {
183         u32 pf_cids;
184         u32 pf_tids[NUM_TASK_PF_SEGMENTS];      /* per segment */
185         u32 pf_tids_total;
186         u32 per_vf_cids;
187         u32 per_vf_tids;
188 };
189
190 static void qed_cxt_tm_iids(struct qed_hwfn *p_hwfn,
191                             struct qed_cxt_mngr *p_mngr,
192                             struct qed_tm_iids *iids)
193 {
194         bool tm_vf_required = false;
195         bool tm_required = false;
196         int i, j;
197
198         /* Timers is a special case -> we don't count how many cids require
199          * timers but what's the max cid that will be used by the timer block.
200          * therefore we traverse in reverse order, and once we hit a protocol
201          * that requires the timers memory, we'll sum all the protocols up
202          * to that one.
203          */
204         for (i = MAX_CONN_TYPES - 1; i >= 0; i--) {
205                 struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i];
206
207                 if (tm_cid_proto(i) || tm_required) {
208                         if (p_cfg->cid_count)
209                                 tm_required = true;
210
211                         iids->pf_cids += p_cfg->cid_count;
212                 }
213
214                 if (tm_cid_proto(i) || tm_vf_required) {
215                         if (p_cfg->cids_per_vf)
216                                 tm_vf_required = true;
217
218                         iids->per_vf_cids += p_cfg->cids_per_vf;
219                 }
220
221                 if (tm_tid_proto(i)) {
222                         struct qed_tid_seg *segs = p_cfg->tid_seg;
223
224                         /* for each segment there is at most one
225                          * protocol for which count is not 0.
226                          */
227                         for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
228                                 iids->pf_tids[j] += segs[j].count;
229
230                         /* The last array elelment is for the VFs. As for PF
231                          * segments there can be only one protocol for
232                          * which this value is not 0.
233                          */
234                         iids->per_vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
235                 }
236         }
237
238         iids->pf_cids = roundup(iids->pf_cids, TM_ALIGN);
239         iids->per_vf_cids = roundup(iids->per_vf_cids, TM_ALIGN);
240         iids->per_vf_tids = roundup(iids->per_vf_tids, TM_ALIGN);
241
242         for (iids->pf_tids_total = 0, j = 0; j < NUM_TASK_PF_SEGMENTS; j++) {
243                 iids->pf_tids[j] = roundup(iids->pf_tids[j], TM_ALIGN);
244                 iids->pf_tids_total += iids->pf_tids[j];
245         }
246 }
247
248 static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
249                             struct qed_qm_iids *iids)
250 {
251         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
252         struct qed_tid_seg *segs;
253         u32 vf_cids = 0, type, j;
254         u32 vf_tids = 0;
255
256         for (type = 0; type < MAX_CONN_TYPES; type++) {
257                 iids->cids += p_mngr->conn_cfg[type].cid_count;
258                 vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
259
260                 segs = p_mngr->conn_cfg[type].tid_seg;
261                 /* for each segment there is at most one
262                  * protocol for which count is not 0.
263                  */
264                 for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
265                         iids->tids += segs[j].count;
266
267                 /* The last array elelment is for the VFs. As for PF
268                  * segments there can be only one protocol for
269                  * which this value is not 0.
270                  */
271                 vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
272         }
273
274         iids->vf_cids = vf_cids;
275         iids->tids += vf_tids * p_mngr->vf_count;
276
277         DP_VERBOSE(p_hwfn, QED_MSG_ILT,
278                    "iids: CIDS %08x vf_cids %08x tids %08x vf_tids %08x\n",
279                    iids->cids, iids->vf_cids, iids->tids, vf_tids);
280 }
281
282 static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn,
283                                                 u32 seg)
284 {
285         struct qed_cxt_mngr *p_cfg = p_hwfn->p_cxt_mngr;
286         u32 i;
287
288         /* Find the protocol with tid count > 0 for this segment.
289          * Note: there can only be one and this is already validated.
290          */
291         for (i = 0; i < MAX_CONN_TYPES; i++)
292                 if (p_cfg->conn_cfg[i].tid_seg[seg].count)
293                         return &p_cfg->conn_cfg[i].tid_seg[seg];
294         return NULL;
295 }
296
297 static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn,
298                                   u32 num_srqs, u32 num_xrc_srqs)
299 {
300         struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
301
302         p_mgr->srq_count = num_srqs;
303         p_mgr->xrc_srq_count = num_xrc_srqs;
304 }
305
306 u32 qed_cxt_get_ilt_page_size(struct qed_hwfn *p_hwfn,
307                               enum ilt_clients ilt_client)
308 {
309         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
310         struct qed_ilt_client_cfg *p_cli = &p_mngr->clients[ilt_client];
311
312         return ILT_PAGE_IN_BYTES(p_cli->p_size.val);
313 }
314
315 static u32 qed_cxt_xrc_srqs_per_page(struct qed_hwfn *p_hwfn)
316 {
317         u32 page_size;
318
319         page_size = qed_cxt_get_ilt_page_size(p_hwfn, ILT_CLI_TSDM);
320         return page_size / XRC_SRQ_CXT_SIZE;
321 }
322
323 u32 qed_cxt_get_total_srq_count(struct qed_hwfn *p_hwfn)
324 {
325         struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
326         u32 total_srqs;
327
328         total_srqs = p_mgr->srq_count + p_mgr->xrc_srq_count;
329
330         return total_srqs;
331 }
332
333 /* set the iids count per protocol */
334 static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
335                                         enum protocol_type type,
336                                         u32 cid_count, u32 vf_cid_cnt)
337 {
338         struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
339         struct qed_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
340
341         p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN);
342         p_conn->cids_per_vf = roundup(vf_cid_cnt, DQ_RANGE_ALIGN);
343
344         if (type == PROTOCOLID_ROCE) {
345                 u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val;
346                 u32 cxt_size = CONN_CXT_SIZE(p_hwfn);
347                 u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
348                 u32 align = elems_per_page * DQ_RANGE_ALIGN;
349
350                 p_conn->cid_count = roundup(p_conn->cid_count, align);
351         }
352 }
353
354 u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
355                                 enum protocol_type type, u32 *vf_cid)
356 {
357         if (vf_cid)
358                 *vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
359
360         return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
361 }
362
363 u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn,
364                                 enum protocol_type type)
365 {
366         return p_hwfn->p_cxt_mngr->acquired[type].start_cid;
367 }
368
369 u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
370                                 enum protocol_type type)
371 {
372         u32 cnt = 0;
373         int i;
374
375         for (i = 0; i < TASK_SEGMENTS; i++)
376                 cnt += p_hwfn->p_cxt_mngr->conn_cfg[type].tid_seg[i].count;
377
378         return cnt;
379 }
380
381 static void qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn,
382                                         enum protocol_type proto,
383                                         u8 seg,
384                                         u8 seg_type, u32 count, bool has_fl)
385 {
386         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
387         struct qed_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg];
388
389         p_seg->count = count;
390         p_seg->has_fl_mem = has_fl;
391         p_seg->type = seg_type;
392 }
393
394 static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
395                                  struct qed_ilt_cli_blk *p_blk,
396                                  u32 start_line, u32 total_size, u32 elem_size)
397 {
398         u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
399
400         /* verify thatits called only once for each block */
401         if (p_blk->total_size)
402                 return;
403
404         p_blk->total_size = total_size;
405         p_blk->real_size_in_page = 0;
406         if (elem_size)
407                 p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size;
408         p_blk->start_line = start_line;
409 }
410
411 static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn,
412                                  struct qed_ilt_client_cfg *p_cli,
413                                  struct qed_ilt_cli_blk *p_blk,
414                                  u32 *p_line, enum ilt_clients client_id)
415 {
416         if (!p_blk->total_size)
417                 return;
418
419         if (!p_cli->active)
420                 p_cli->first.val = *p_line;
421
422         p_cli->active = true;
423         *p_line += DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
424         p_cli->last.val = *p_line - 1;
425
426         DP_VERBOSE(p_hwfn, QED_MSG_ILT,
427                    "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x [Real %08x] Start line %d\n",
428                    client_id, p_cli->first.val,
429                    p_cli->last.val, p_blk->total_size,
430                    p_blk->real_size_in_page, p_blk->start_line);
431 }
432
433 static u32 qed_ilt_get_dynamic_line_cnt(struct qed_hwfn *p_hwfn,
434                                         enum ilt_clients ilt_client)
435 {
436         u32 cid_count = p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE].cid_count;
437         struct qed_ilt_client_cfg *p_cli;
438         u32 lines_to_skip = 0;
439         u32 cxts_per_p;
440
441         if (ilt_client == ILT_CLI_CDUC) {
442                 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
443
444                 cxts_per_p = ILT_PAGE_IN_BYTES(p_cli->p_size.val) /
445                     (u32) CONN_CXT_SIZE(p_hwfn);
446
447                 lines_to_skip = cid_count / cxts_per_p;
448         }
449
450         return lines_to_skip;
451 }
452
453 static struct qed_ilt_client_cfg *qed_cxt_set_cli(struct qed_ilt_client_cfg
454                                                   *p_cli)
455 {
456         p_cli->active = false;
457         p_cli->first.val = 0;
458         p_cli->last.val = 0;
459         return p_cli;
460 }
461
462 static struct qed_ilt_cli_blk *qed_cxt_set_blk(struct qed_ilt_cli_blk *p_blk)
463 {
464         p_blk->total_size = 0;
465         return p_blk;
466 }
467
468 static void qed_cxt_ilt_blk_reset(struct qed_hwfn *p_hwfn)
469 {
470         struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
471         u32 cli_idx, blk_idx;
472
473         for (cli_idx = 0; cli_idx < MAX_ILT_CLIENTS; cli_idx++) {
474                 for (blk_idx = 0; blk_idx < ILT_CLI_PF_BLOCKS; blk_idx++)
475                         clients[cli_idx].pf_blks[blk_idx].total_size = 0;
476
477                 for (blk_idx = 0; blk_idx < ILT_CLI_VF_BLOCKS; blk_idx++)
478                         clients[cli_idx].vf_blks[blk_idx].total_size = 0;
479         }
480 }
481
482 int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
483 {
484         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
485         u32 curr_line, total, i, task_size, line;
486         struct qed_ilt_client_cfg *p_cli;
487         struct qed_ilt_cli_blk *p_blk;
488         struct qed_cdu_iids cdu_iids;
489         struct qed_src_iids src_iids;
490         struct qed_qm_iids qm_iids;
491         struct qed_tm_iids tm_iids;
492         struct qed_tid_seg *p_seg;
493
494         memset(&qm_iids, 0, sizeof(qm_iids));
495         memset(&cdu_iids, 0, sizeof(cdu_iids));
496         memset(&src_iids, 0, sizeof(src_iids));
497         memset(&tm_iids, 0, sizeof(tm_iids));
498
499         p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
500
501         /* Reset all ILT blocks at the beginning of ILT computing in order
502          * to prevent memory allocation for irrelevant blocks afterwards.
503          */
504         qed_cxt_ilt_blk_reset(p_hwfn);
505
506         DP_VERBOSE(p_hwfn, QED_MSG_ILT,
507                    "hwfn [%d] - Set context manager starting line to be 0x%08x\n",
508                    p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
509
510         /* CDUC */
511         p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUC]);
512
513         curr_line = p_mngr->pf_start_line;
514
515         /* CDUC PF */
516         p_cli->pf_total_lines = 0;
517
518         /* get the counters for the CDUC and QM clients  */
519         qed_cxt_cdu_iids(p_mngr, &cdu_iids);
520
521         p_blk = qed_cxt_set_blk(&p_cli->pf_blks[CDUC_BLK]);
522
523         total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn);
524
525         qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
526                              total, CONN_CXT_SIZE(p_hwfn));
527
528         qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
529         p_cli->pf_total_lines = curr_line - p_blk->start_line;
530
531         p_blk->dynamic_line_cnt = qed_ilt_get_dynamic_line_cnt(p_hwfn,
532                                                                ILT_CLI_CDUC);
533
534         /* CDUC VF */
535         p_blk = qed_cxt_set_blk(&p_cli->vf_blks[CDUC_BLK]);
536         total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
537
538         qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
539                              total, CONN_CXT_SIZE(p_hwfn));
540
541         qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
542         p_cli->vf_total_lines = curr_line - p_blk->start_line;
543
544         for (i = 1; i < p_mngr->vf_count; i++)
545                 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
546                                      ILT_CLI_CDUC);
547
548         /* CDUT PF */
549         p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUT]);
550         p_cli->first.val = curr_line;
551
552         /* first the 'working' task memory */
553         for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
554                 p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
555                 if (!p_seg || p_seg->count == 0)
556                         continue;
557
558                 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[CDUT_SEG_BLK(i)]);
559                 total = p_seg->count * p_mngr->task_type_size[p_seg->type];
560                 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total,
561                                      p_mngr->task_type_size[p_seg->type]);
562
563                 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
564                                      ILT_CLI_CDUT);
565         }
566
567         /* next the 'init' task memory (forced load memory) */
568         for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
569                 p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
570                 if (!p_seg || p_seg->count == 0)
571                         continue;
572
573                 p_blk =
574                     qed_cxt_set_blk(&p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)]);
575
576                 if (!p_seg->has_fl_mem) {
577                         /* The segment is active (total size pf 'working'
578                          * memory is > 0) but has no FL (forced-load, Init)
579                          * memory. Thus:
580                          *
581                          * 1.   The total-size in the corrsponding FL block of
582                          *      the ILT client is set to 0 - No ILT line are
583                          *      provisioned and no ILT memory allocated.
584                          *
585                          * 2.   The start-line of said block is set to the
586                          *      start line of the matching working memory
587                          *      block in the ILT client. This is later used to
588                          *      configure the CDU segment offset registers and
589                          *      results in an FL command for TIDs of this
590                          *      segement behaves as regular load commands
591                          *      (loading TIDs from the working memory).
592                          */
593                         line = p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line;
594
595                         qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
596                         continue;
597                 }
598                 total = p_seg->count * p_mngr->task_type_size[p_seg->type];
599
600                 qed_ilt_cli_blk_fill(p_cli, p_blk,
601                                      curr_line, total,
602                                      p_mngr->task_type_size[p_seg->type]);
603
604                 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
605                                      ILT_CLI_CDUT);
606         }
607         p_cli->pf_total_lines = curr_line - p_cli->pf_blks[0].start_line;
608
609         /* CDUT VF */
610         p_seg = qed_cxt_tid_seg_info(p_hwfn, TASK_SEGMENT_VF);
611         if (p_seg && p_seg->count) {
612                 /* Stricly speaking we need to iterate over all VF
613                  * task segment types, but a VF has only 1 segment
614                  */
615
616                 /* 'working' memory */
617                 total = p_seg->count * p_mngr->task_type_size[p_seg->type];
618
619                 p_blk = qed_cxt_set_blk(&p_cli->vf_blks[CDUT_SEG_BLK(0)]);
620                 qed_ilt_cli_blk_fill(p_cli, p_blk,
621                                      curr_line, total,
622                                      p_mngr->task_type_size[p_seg->type]);
623
624                 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
625                                      ILT_CLI_CDUT);
626
627                 /* 'init' memory */
628                 p_blk =
629                     qed_cxt_set_blk(&p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]);
630                 if (!p_seg->has_fl_mem) {
631                         /* see comment above */
632                         line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line;
633                         qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
634                 } else {
635                         task_size = p_mngr->task_type_size[p_seg->type];
636                         qed_ilt_cli_blk_fill(p_cli, p_blk,
637                                              curr_line, total, task_size);
638                         qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
639                                              ILT_CLI_CDUT);
640                 }
641                 p_cli->vf_total_lines = curr_line -
642                     p_cli->vf_blks[0].start_line;
643
644                 /* Now for the rest of the VFs */
645                 for (i = 1; i < p_mngr->vf_count; i++) {
646                         p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
647                         qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
648                                              ILT_CLI_CDUT);
649
650                         p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
651                         qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
652                                              ILT_CLI_CDUT);
653                 }
654         }
655
656         /* QM */
657         p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_QM]);
658         p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
659
660         qed_cxt_qm_iids(p_hwfn, &qm_iids);
661         total = qed_qm_pf_mem_size(qm_iids.cids,
662                                    qm_iids.vf_cids, qm_iids.tids,
663                                    p_hwfn->qm_info.num_pqs,
664                                    p_hwfn->qm_info.num_vf_pqs);
665
666         DP_VERBOSE(p_hwfn,
667                    QED_MSG_ILT,
668                    "QM ILT Info, (cids=%d, vf_cids=%d, tids=%d, num_pqs=%d, num_vf_pqs=%d, memory_size=%d)\n",
669                    qm_iids.cids,
670                    qm_iids.vf_cids,
671                    qm_iids.tids,
672                    p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total);
673
674         qed_ilt_cli_blk_fill(p_cli, p_blk,
675                              curr_line, total * 0x1000,
676                              QM_PQ_ELEMENT_SIZE);
677
678         qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
679         p_cli->pf_total_lines = curr_line - p_blk->start_line;
680
681         /* SRC */
682         p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_SRC]);
683         qed_cxt_src_iids(p_mngr, &src_iids);
684
685         /* Both the PF and VFs searcher connections are stored in the per PF
686          * database. Thus sum the PF searcher cids and all the VFs searcher
687          * cids.
688          */
689         total = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
690         if (total) {
691                 u32 local_max = max_t(u32, total,
692                                       SRC_MIN_NUM_ELEMS);
693
694                 total = roundup_pow_of_two(local_max);
695
696                 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
697                 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
698                                      total * sizeof(struct src_ent),
699                                      sizeof(struct src_ent));
700
701                 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
702                                      ILT_CLI_SRC);
703                 p_cli->pf_total_lines = curr_line - p_blk->start_line;
704         }
705
706         /* TM PF */
707         p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TM]);
708         qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
709         total = tm_iids.pf_cids + tm_iids.pf_tids_total;
710         if (total) {
711                 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
712                 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
713                                      total * TM_ELEM_SIZE, TM_ELEM_SIZE);
714
715                 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
716                                      ILT_CLI_TM);
717                 p_cli->pf_total_lines = curr_line - p_blk->start_line;
718         }
719
720         /* TM VF */
721         total = tm_iids.per_vf_cids + tm_iids.per_vf_tids;
722         if (total) {
723                 p_blk = qed_cxt_set_blk(&p_cli->vf_blks[0]);
724                 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
725                                      total * TM_ELEM_SIZE, TM_ELEM_SIZE);
726
727                 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
728                                      ILT_CLI_TM);
729
730                 p_cli->vf_total_lines = curr_line - p_blk->start_line;
731                 for (i = 1; i < p_mngr->vf_count; i++)
732                         qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
733                                              ILT_CLI_TM);
734         }
735
736         /* TSDM (SRQ CONTEXT) */
737         total = qed_cxt_get_total_srq_count(p_hwfn);
738
739         if (total) {
740                 p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TSDM]);
741                 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[SRQ_BLK]);
742                 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
743                                      total * SRQ_CXT_SIZE, SRQ_CXT_SIZE);
744
745                 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
746                                      ILT_CLI_TSDM);
747                 p_cli->pf_total_lines = curr_line - p_blk->start_line;
748         }
749
750         *line_count = curr_line - p_hwfn->p_cxt_mngr->pf_start_line;
751
752         if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
753             RESC_NUM(p_hwfn, QED_ILT))
754                 return -EINVAL;
755
756         return 0;
757 }
758
759 u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines)
760 {
761         struct qed_ilt_client_cfg *p_cli;
762         u32 excess_lines, available_lines;
763         struct qed_cxt_mngr *p_mngr;
764         u32 ilt_page_size, elem_size;
765         struct qed_tid_seg *p_seg;
766         int i;
767
768         available_lines = RESC_NUM(p_hwfn, QED_ILT);
769         excess_lines = used_lines - available_lines;
770
771         if (!excess_lines)
772                 return 0;
773
774         if (!QED_IS_RDMA_PERSONALITY(p_hwfn))
775                 return 0;
776
777         p_mngr = p_hwfn->p_cxt_mngr;
778         p_cli = &p_mngr->clients[ILT_CLI_CDUT];
779         ilt_page_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
780
781         for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
782                 p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
783                 if (!p_seg || p_seg->count == 0)
784                         continue;
785
786                 elem_size = p_mngr->task_type_size[p_seg->type];
787                 if (!elem_size)
788                         continue;
789
790                 return (ilt_page_size / elem_size) * excess_lines;
791         }
792
793         DP_NOTICE(p_hwfn, "failed computing excess ILT lines\n");
794         return 0;
795 }
796
797 static void qed_cxt_src_t2_free(struct qed_hwfn *p_hwfn)
798 {
799         struct qed_src_t2 *p_t2 = &p_hwfn->p_cxt_mngr->src_t2;
800         u32 i;
801
802         if (!p_t2 || !p_t2->dma_mem)
803                 return;
804
805         for (i = 0; i < p_t2->num_pages; i++)
806                 if (p_t2->dma_mem[i].virt_addr)
807                         dma_free_coherent(&p_hwfn->cdev->pdev->dev,
808                                           p_t2->dma_mem[i].size,
809                                           p_t2->dma_mem[i].virt_addr,
810                                           p_t2->dma_mem[i].phys_addr);
811
812         kfree(p_t2->dma_mem);
813         p_t2->dma_mem = NULL;
814 }
815
816 static int
817 qed_cxt_t2_alloc_pages(struct qed_hwfn *p_hwfn,
818                        struct qed_src_t2 *p_t2, u32 total_size, u32 page_size)
819 {
820         void **p_virt;
821         u32 size, i;
822
823         if (!p_t2 || !p_t2->dma_mem)
824                 return -EINVAL;
825
826         for (i = 0; i < p_t2->num_pages; i++) {
827                 size = min_t(u32, total_size, page_size);
828                 p_virt = &p_t2->dma_mem[i].virt_addr;
829
830                 *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
831                                              size,
832                                              &p_t2->dma_mem[i].phys_addr,
833                                              GFP_KERNEL);
834                 if (!p_t2->dma_mem[i].virt_addr)
835                         return -ENOMEM;
836
837                 memset(*p_virt, 0, size);
838                 p_t2->dma_mem[i].size = size;
839                 total_size -= size;
840         }
841
842         return 0;
843 }
844
845 static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
846 {
847         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
848         u32 conn_num, total_size, ent_per_page, psz, i;
849         struct phys_mem_desc *p_t2_last_page;
850         struct qed_ilt_client_cfg *p_src;
851         struct qed_src_iids src_iids;
852         struct qed_src_t2 *p_t2;
853         int rc;
854
855         memset(&src_iids, 0, sizeof(src_iids));
856
857         /* if the SRC ILT client is inactive - there are no connection
858          * requiring the searcer, leave.
859          */
860         p_src = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_SRC];
861         if (!p_src->active)
862                 return 0;
863
864         qed_cxt_src_iids(p_mngr, &src_iids);
865         conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
866         total_size = conn_num * sizeof(struct src_ent);
867
868         /* use the same page size as the SRC ILT client */
869         psz = ILT_PAGE_IN_BYTES(p_src->p_size.val);
870         p_t2 = &p_mngr->src_t2;
871         p_t2->num_pages = DIV_ROUND_UP(total_size, psz);
872
873         /* allocate t2 */
874         p_t2->dma_mem = kcalloc(p_t2->num_pages, sizeof(struct phys_mem_desc),
875                                 GFP_KERNEL);
876         if (!p_t2->dma_mem) {
877                 DP_NOTICE(p_hwfn, "Failed to allocate t2 table\n");
878                 rc = -ENOMEM;
879                 goto t2_fail;
880         }
881
882         rc = qed_cxt_t2_alloc_pages(p_hwfn, p_t2, total_size, psz);
883         if (rc)
884                 goto t2_fail;
885
886         /* Set the t2 pointers */
887
888         /* entries per page - must be a power of two */
889         ent_per_page = psz / sizeof(struct src_ent);
890
891         p_t2->first_free = (u64)p_t2->dma_mem[0].phys_addr;
892
893         p_t2_last_page = &p_t2->dma_mem[(conn_num - 1) / ent_per_page];
894         p_t2->last_free = (u64)p_t2_last_page->phys_addr +
895             ((conn_num - 1) & (ent_per_page - 1)) * sizeof(struct src_ent);
896
897         for (i = 0; i < p_t2->num_pages; i++) {
898                 u32 ent_num = min_t(u32,
899                                     ent_per_page,
900                                     conn_num);
901                 struct src_ent *entries = p_t2->dma_mem[i].virt_addr;
902                 u64 p_ent_phys = (u64)p_t2->dma_mem[i].phys_addr, val;
903                 u32 j;
904
905                 for (j = 0; j < ent_num - 1; j++) {
906                         val = p_ent_phys + (j + 1) * sizeof(struct src_ent);
907                         entries[j].next = cpu_to_be64(val);
908                 }
909
910                 if (i < p_t2->num_pages - 1)
911                         val = (u64)p_t2->dma_mem[i + 1].phys_addr;
912                 else
913                         val = 0;
914                 entries[j].next = cpu_to_be64(val);
915
916                 conn_num -= ent_num;
917         }
918
919         return 0;
920
921 t2_fail:
922         qed_cxt_src_t2_free(p_hwfn);
923         return rc;
924 }
925
926 #define for_each_ilt_valid_client(pos, clients) \
927         for (pos = 0; pos < MAX_ILT_CLIENTS; pos++)     \
928                 if (!clients[pos].active) {     \
929                         continue;               \
930                 } else                          \
931
932 /* Total number of ILT lines used by this PF */
933 static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients)
934 {
935         u32 size = 0;
936         u32 i;
937
938         for_each_ilt_valid_client(i, ilt_clients)
939             size += (ilt_clients[i].last.val - ilt_clients[i].first.val + 1);
940
941         return size;
942 }
943
944 static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn)
945 {
946         struct qed_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients;
947         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
948         u32 ilt_size, i;
949
950         ilt_size = qed_cxt_ilt_shadow_size(p_cli);
951
952         for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
953                 struct phys_mem_desc *p_dma = &p_mngr->ilt_shadow[i];
954
955                 if (p_dma->virt_addr)
956                         dma_free_coherent(&p_hwfn->cdev->pdev->dev,
957                                           p_dma->size, p_dma->virt_addr,
958                                           p_dma->phys_addr);
959                 p_dma->virt_addr = NULL;
960         }
961         kfree(p_mngr->ilt_shadow);
962 }
963
964 static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
965                              struct qed_ilt_cli_blk *p_blk,
966                              enum ilt_clients ilt_client,
967                              u32 start_line_offset)
968 {
969         struct phys_mem_desc *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
970         u32 lines, line, sz_left, lines_to_skip = 0;
971
972         /* Special handling for RoCE that supports dynamic allocation */
973         if (QED_IS_RDMA_PERSONALITY(p_hwfn) &&
974             ((ilt_client == ILT_CLI_CDUT) || ilt_client == ILT_CLI_TSDM))
975                 return 0;
976
977         lines_to_skip = p_blk->dynamic_line_cnt;
978
979         if (!p_blk->total_size)
980                 return 0;
981
982         sz_left = p_blk->total_size;
983         lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) - lines_to_skip;
984         line = p_blk->start_line + start_line_offset -
985             p_hwfn->p_cxt_mngr->pf_start_line + lines_to_skip;
986
987         for (; lines; lines--) {
988                 dma_addr_t p_phys;
989                 void *p_virt;
990                 u32 size;
991
992                 size = min_t(u32, sz_left, p_blk->real_size_in_page);
993                 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size,
994                                             &p_phys, GFP_KERNEL);
995                 if (!p_virt)
996                         return -ENOMEM;
997
998                 ilt_shadow[line].phys_addr = p_phys;
999                 ilt_shadow[line].virt_addr = p_virt;
1000                 ilt_shadow[line].size = size;
1001
1002                 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
1003                            "ILT shadow: Line [%d] Physical 0x%llx Virtual %p Size %d\n",
1004                             line, (u64)p_phys, p_virt, size);
1005
1006                 sz_left -= size;
1007                 line++;
1008         }
1009
1010         return 0;
1011 }
1012
1013 static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
1014 {
1015         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1016         struct qed_ilt_client_cfg *clients = p_mngr->clients;
1017         struct qed_ilt_cli_blk *p_blk;
1018         u32 size, i, j, k;
1019         int rc;
1020
1021         size = qed_cxt_ilt_shadow_size(clients);
1022         p_mngr->ilt_shadow = kcalloc(size, sizeof(struct phys_mem_desc),
1023                                      GFP_KERNEL);
1024         if (!p_mngr->ilt_shadow) {
1025                 rc = -ENOMEM;
1026                 goto ilt_shadow_fail;
1027         }
1028
1029         DP_VERBOSE(p_hwfn, QED_MSG_ILT,
1030                    "Allocated 0x%x bytes for ilt shadow\n",
1031                    (u32)(size * sizeof(struct phys_mem_desc)));
1032
1033         for_each_ilt_valid_client(i, clients) {
1034                 for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
1035                         p_blk = &clients[i].pf_blks[j];
1036                         rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
1037                         if (rc)
1038                                 goto ilt_shadow_fail;
1039                 }
1040                 for (k = 0; k < p_mngr->vf_count; k++) {
1041                         for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) {
1042                                 u32 lines = clients[i].vf_total_lines * k;
1043
1044                                 p_blk = &clients[i].vf_blks[j];
1045                                 rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, lines);
1046                                 if (rc)
1047                                         goto ilt_shadow_fail;
1048                         }
1049                 }
1050         }
1051
1052         return 0;
1053
1054 ilt_shadow_fail:
1055         qed_ilt_shadow_free(p_hwfn);
1056         return rc;
1057 }
1058
1059 static void qed_cid_map_free(struct qed_hwfn *p_hwfn)
1060 {
1061         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1062         u32 type, vf;
1063
1064         for (type = 0; type < MAX_CONN_TYPES; type++) {
1065                 kfree(p_mngr->acquired[type].cid_map);
1066                 p_mngr->acquired[type].max_count = 0;
1067                 p_mngr->acquired[type].start_cid = 0;
1068
1069                 for (vf = 0; vf < MAX_NUM_VFS; vf++) {
1070                         kfree(p_mngr->acquired_vf[type][vf].cid_map);
1071                         p_mngr->acquired_vf[type][vf].max_count = 0;
1072                         p_mngr->acquired_vf[type][vf].start_cid = 0;
1073                 }
1074         }
1075 }
1076
1077 static int
1078 qed_cid_map_alloc_single(struct qed_hwfn *p_hwfn,
1079                          u32 type,
1080                          u32 cid_start,
1081                          u32 cid_count, struct qed_cid_acquired_map *p_map)
1082 {
1083         u32 size;
1084
1085         if (!cid_count)
1086                 return 0;
1087
1088         size = DIV_ROUND_UP(cid_count,
1089                             sizeof(unsigned long) * BITS_PER_BYTE) *
1090                sizeof(unsigned long);
1091         p_map->cid_map = kzalloc(size, GFP_KERNEL);
1092         if (!p_map->cid_map)
1093                 return -ENOMEM;
1094
1095         p_map->max_count = cid_count;
1096         p_map->start_cid = cid_start;
1097
1098         DP_VERBOSE(p_hwfn, QED_MSG_CXT,
1099                    "Type %08x start: %08x count %08x\n",
1100                    type, p_map->start_cid, p_map->max_count);
1101
1102         return 0;
1103 }
1104
1105 static int qed_cid_map_alloc(struct qed_hwfn *p_hwfn)
1106 {
1107         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1108         u32 start_cid = 0, vf_start_cid = 0;
1109         u32 type, vf;
1110
1111         for (type = 0; type < MAX_CONN_TYPES; type++) {
1112                 struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[type];
1113                 struct qed_cid_acquired_map *p_map;
1114
1115                 /* Handle PF maps */
1116                 p_map = &p_mngr->acquired[type];
1117                 if (qed_cid_map_alloc_single(p_hwfn, type, start_cid,
1118                                              p_cfg->cid_count, p_map))
1119                         goto cid_map_fail;
1120
1121                 /* Handle VF maps */
1122                 for (vf = 0; vf < MAX_NUM_VFS; vf++) {
1123                         p_map = &p_mngr->acquired_vf[type][vf];
1124                         if (qed_cid_map_alloc_single(p_hwfn, type,
1125                                                      vf_start_cid,
1126                                                      p_cfg->cids_per_vf, p_map))
1127                                 goto cid_map_fail;
1128                 }
1129
1130                 start_cid += p_cfg->cid_count;
1131                 vf_start_cid += p_cfg->cids_per_vf;
1132         }
1133
1134         return 0;
1135
1136 cid_map_fail:
1137         qed_cid_map_free(p_hwfn);
1138         return -ENOMEM;
1139 }
1140
1141 int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
1142 {
1143         struct qed_ilt_client_cfg *clients;
1144         struct qed_cxt_mngr *p_mngr;
1145         u32 i;
1146
1147         p_mngr = kzalloc(sizeof(*p_mngr), GFP_KERNEL);
1148         if (!p_mngr)
1149                 return -ENOMEM;
1150
1151         /* Initialize ILT client registers */
1152         clients = p_mngr->clients;
1153         clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
1154         clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
1155         clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
1156
1157         clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
1158         clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
1159         clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
1160
1161         clients[ILT_CLI_TM].first.reg = ILT_CFG_REG(TM, FIRST_ILT);
1162         clients[ILT_CLI_TM].last.reg = ILT_CFG_REG(TM, LAST_ILT);
1163         clients[ILT_CLI_TM].p_size.reg = ILT_CFG_REG(TM, P_SIZE);
1164
1165         clients[ILT_CLI_SRC].first.reg = ILT_CFG_REG(SRC, FIRST_ILT);
1166         clients[ILT_CLI_SRC].last.reg = ILT_CFG_REG(SRC, LAST_ILT);
1167         clients[ILT_CLI_SRC].p_size.reg = ILT_CFG_REG(SRC, P_SIZE);
1168
1169         clients[ILT_CLI_CDUT].first.reg = ILT_CFG_REG(CDUT, FIRST_ILT);
1170         clients[ILT_CLI_CDUT].last.reg = ILT_CFG_REG(CDUT, LAST_ILT);
1171         clients[ILT_CLI_CDUT].p_size.reg = ILT_CFG_REG(CDUT, P_SIZE);
1172
1173         clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT);
1174         clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
1175         clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
1176         /* default ILT page size for all clients is 64K */
1177         for (i = 0; i < MAX_ILT_CLIENTS; i++)
1178                 p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
1179
1180         p_mngr->conn_ctx_size = CONN_CXT_SIZE(p_hwfn);
1181
1182         /* Initialize task sizes */
1183         p_mngr->task_type_size[0] = TYPE0_TASK_CXT_SIZE(p_hwfn);
1184         p_mngr->task_type_size[1] = TYPE1_TASK_CXT_SIZE(p_hwfn);
1185
1186         if (p_hwfn->cdev->p_iov_info) {
1187                 p_mngr->vf_count = p_hwfn->cdev->p_iov_info->total_vfs;
1188                 p_mngr->first_vf_in_pf =
1189                         p_hwfn->cdev->p_iov_info->first_vf_in_pf;
1190         }
1191         /* Initialize the dynamic ILT allocation mutex */
1192         mutex_init(&p_mngr->mutex);
1193
1194         /* Set the cxt mangr pointer priori to further allocations */
1195         p_hwfn->p_cxt_mngr = p_mngr;
1196
1197         return 0;
1198 }
1199
1200 int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn)
1201 {
1202         int rc;
1203
1204         /* Allocate the ILT shadow table */
1205         rc = qed_ilt_shadow_alloc(p_hwfn);
1206         if (rc)
1207                 goto tables_alloc_fail;
1208
1209         /* Allocate the T2  table */
1210         rc = qed_cxt_src_t2_alloc(p_hwfn);
1211         if (rc)
1212                 goto tables_alloc_fail;
1213
1214         /* Allocate and initialize the acquired cids bitmaps */
1215         rc = qed_cid_map_alloc(p_hwfn);
1216         if (rc)
1217                 goto tables_alloc_fail;
1218
1219         return 0;
1220
1221 tables_alloc_fail:
1222         qed_cxt_mngr_free(p_hwfn);
1223         return rc;
1224 }
1225
1226 void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn)
1227 {
1228         if (!p_hwfn->p_cxt_mngr)
1229                 return;
1230
1231         qed_cid_map_free(p_hwfn);
1232         qed_cxt_src_t2_free(p_hwfn);
1233         qed_ilt_shadow_free(p_hwfn);
1234         kfree(p_hwfn->p_cxt_mngr);
1235
1236         p_hwfn->p_cxt_mngr = NULL;
1237 }
1238
1239 void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
1240 {
1241         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1242         struct qed_cid_acquired_map *p_map;
1243         struct qed_conn_type_cfg *p_cfg;
1244         int type;
1245         u32 len;
1246
1247         /* Reset acquired cids */
1248         for (type = 0; type < MAX_CONN_TYPES; type++) {
1249                 u32 vf;
1250
1251                 p_cfg = &p_mngr->conn_cfg[type];
1252                 if (p_cfg->cid_count) {
1253                         p_map = &p_mngr->acquired[type];
1254                         len = DIV_ROUND_UP(p_map->max_count,
1255                                            sizeof(unsigned long) *
1256                                            BITS_PER_BYTE) *
1257                               sizeof(unsigned long);
1258                         memset(p_map->cid_map, 0, len);
1259                 }
1260
1261                 if (!p_cfg->cids_per_vf)
1262                         continue;
1263
1264                 for (vf = 0; vf < MAX_NUM_VFS; vf++) {
1265                         p_map = &p_mngr->acquired_vf[type][vf];
1266                         len = DIV_ROUND_UP(p_map->max_count,
1267                                            sizeof(unsigned long) *
1268                                            BITS_PER_BYTE) *
1269                               sizeof(unsigned long);
1270                         memset(p_map->cid_map, 0, len);
1271                 }
1272         }
1273 }
1274
1275 /* CDU Common */
1276 #define CDUC_CXT_SIZE_SHIFT \
1277         CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT
1278
1279 #define CDUC_CXT_SIZE_MASK \
1280         (CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT)
1281
1282 #define CDUC_BLOCK_WASTE_SHIFT \
1283         CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT
1284
1285 #define CDUC_BLOCK_WASTE_MASK \
1286         (CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT)
1287
1288 #define CDUC_NCIB_SHIFT \
1289         CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT
1290
1291 #define CDUC_NCIB_MASK \
1292         (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT)
1293
1294 #define CDUT_TYPE0_CXT_SIZE_SHIFT \
1295         CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT
1296
1297 #define CDUT_TYPE0_CXT_SIZE_MASK                \
1298         (CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE >> \
1299          CDUT_TYPE0_CXT_SIZE_SHIFT)
1300
1301 #define CDUT_TYPE0_BLOCK_WASTE_SHIFT \
1302         CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT
1303
1304 #define CDUT_TYPE0_BLOCK_WASTE_MASK                    \
1305         (CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE >> \
1306          CDUT_TYPE0_BLOCK_WASTE_SHIFT)
1307
1308 #define CDUT_TYPE0_NCIB_SHIFT \
1309         CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT
1310
1311 #define CDUT_TYPE0_NCIB_MASK                             \
1312         (CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK >> \
1313          CDUT_TYPE0_NCIB_SHIFT)
1314
1315 #define CDUT_TYPE1_CXT_SIZE_SHIFT \
1316         CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT
1317
1318 #define CDUT_TYPE1_CXT_SIZE_MASK                \
1319         (CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE >> \
1320          CDUT_TYPE1_CXT_SIZE_SHIFT)
1321
1322 #define CDUT_TYPE1_BLOCK_WASTE_SHIFT \
1323         CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT
1324
1325 #define CDUT_TYPE1_BLOCK_WASTE_MASK                    \
1326         (CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE >> \
1327          CDUT_TYPE1_BLOCK_WASTE_SHIFT)
1328
1329 #define CDUT_TYPE1_NCIB_SHIFT \
1330         CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT
1331
1332 #define CDUT_TYPE1_NCIB_MASK                             \
1333         (CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK >> \
1334          CDUT_TYPE1_NCIB_SHIFT)
1335
1336 static void qed_cdu_init_common(struct qed_hwfn *p_hwfn)
1337 {
1338         u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
1339
1340         /* CDUC - connection configuration */
1341         page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
1342         cxt_size = CONN_CXT_SIZE(p_hwfn);
1343         elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1344         block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1345
1346         SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size);
1347         SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste);
1348         SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page);
1349         STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params);
1350
1351         /* CDUT - type-0 tasks configuration */
1352         page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT].p_size.val;
1353         cxt_size = p_hwfn->p_cxt_mngr->task_type_size[0];
1354         elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1355         block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1356
1357         /* cxt size and block-waste are multipes of 8 */
1358         cdu_params = 0;
1359         SET_FIELD(cdu_params, CDUT_TYPE0_CXT_SIZE, (cxt_size >> 3));
1360         SET_FIELD(cdu_params, CDUT_TYPE0_BLOCK_WASTE, (block_waste >> 3));
1361         SET_FIELD(cdu_params, CDUT_TYPE0_NCIB, elems_per_page);
1362         STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT0_PARAMS_RT_OFFSET, cdu_params);
1363
1364         /* CDUT - type-1 tasks configuration */
1365         cxt_size = p_hwfn->p_cxt_mngr->task_type_size[1];
1366         elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1367         block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1368
1369         /* cxt size and block-waste are multipes of 8 */
1370         cdu_params = 0;
1371         SET_FIELD(cdu_params, CDUT_TYPE1_CXT_SIZE, (cxt_size >> 3));
1372         SET_FIELD(cdu_params, CDUT_TYPE1_BLOCK_WASTE, (block_waste >> 3));
1373         SET_FIELD(cdu_params, CDUT_TYPE1_NCIB, elems_per_page);
1374         STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT1_PARAMS_RT_OFFSET, cdu_params);
1375 }
1376
1377 /* CDU PF */
1378 #define CDU_SEG_REG_TYPE_SHIFT          CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT
1379 #define CDU_SEG_REG_TYPE_MASK           0x1
1380 #define CDU_SEG_REG_OFFSET_SHIFT        0
1381 #define CDU_SEG_REG_OFFSET_MASK         CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK
1382
1383 static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn)
1384 {
1385         struct qed_ilt_client_cfg *p_cli;
1386         struct qed_tid_seg *p_seg;
1387         u32 cdu_seg_params, offset;
1388         int i;
1389
1390         static const u32 rt_type_offset_arr[] = {
1391                 CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET,
1392                 CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET,
1393                 CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET,
1394                 CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET
1395         };
1396
1397         static const u32 rt_type_offset_fl_arr[] = {
1398                 CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET,
1399                 CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET,
1400                 CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET,
1401                 CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET
1402         };
1403
1404         p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
1405
1406         /* There are initializations only for CDUT during pf Phase */
1407         for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
1408                 /* Segment 0 */
1409                 p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
1410                 if (!p_seg)
1411                         continue;
1412
1413                 /* Note: start_line is already adjusted for the CDU
1414                  * segment register granularity, so we just need to
1415                  * divide. Adjustment is implicit as we assume ILT
1416                  * Page size is larger than 32K!
1417                  */
1418                 offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
1419                           (p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line -
1420                            p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
1421
1422                 cdu_seg_params = 0;
1423                 SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
1424                 SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
1425                 STORE_RT_REG(p_hwfn, rt_type_offset_arr[i], cdu_seg_params);
1426
1427                 offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
1428                           (p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)].start_line -
1429                            p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
1430
1431                 cdu_seg_params = 0;
1432                 SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
1433                 SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
1434                 STORE_RT_REG(p_hwfn, rt_type_offset_fl_arr[i], cdu_seg_params);
1435         }
1436 }
1437
1438 void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
1439                     struct qed_ptt *p_ptt, bool is_pf_loading)
1440 {
1441         struct qed_qm_info *qm_info = &p_hwfn->qm_info;
1442         struct qed_qm_pf_rt_init_params params;
1443         struct qed_qm_iids iids;
1444
1445         memset(&iids, 0, sizeof(iids));
1446         qed_cxt_qm_iids(p_hwfn, &iids);
1447
1448         memset(&params, 0, sizeof(params));
1449         params.port_id = p_hwfn->port_id;
1450         params.pf_id = p_hwfn->rel_pf_id;
1451         params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
1452         params.is_pf_loading = is_pf_loading;
1453         params.num_pf_cids = iids.cids;
1454         params.num_vf_cids = iids.vf_cids;
1455         params.num_tids = iids.tids;
1456         params.start_pq = qm_info->start_pq;
1457         params.num_pf_pqs = qm_info->num_pqs - qm_info->num_vf_pqs;
1458         params.num_vf_pqs = qm_info->num_vf_pqs;
1459         params.start_vport = qm_info->start_vport;
1460         params.num_vports = qm_info->num_vports;
1461         params.pf_wfq = qm_info->pf_wfq;
1462         params.pf_rl = qm_info->pf_rl;
1463         params.pq_params = qm_info->qm_pq_params;
1464         params.vport_params = qm_info->qm_vport_params;
1465
1466         qed_qm_pf_rt_init(p_hwfn, p_ptt, &params);
1467 }
1468
1469 /* CM PF */
1470 static void qed_cm_init_pf(struct qed_hwfn *p_hwfn)
1471 {
1472         /* XCM pure-LB queue */
1473         STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET,
1474                      qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB));
1475 }
1476
1477 /* DQ PF */
1478 static void qed_dq_init_pf(struct qed_hwfn *p_hwfn)
1479 {
1480         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1481         u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0;
1482
1483         dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
1484         STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
1485
1486         dq_vf_max_cid += (p_mngr->conn_cfg[0].cids_per_vf >> DQ_RANGE_SHIFT);
1487         STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_0_RT_OFFSET, dq_vf_max_cid);
1488
1489         dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
1490         STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
1491
1492         dq_vf_max_cid += (p_mngr->conn_cfg[1].cids_per_vf >> DQ_RANGE_SHIFT);
1493         STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_1_RT_OFFSET, dq_vf_max_cid);
1494
1495         dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
1496         STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
1497
1498         dq_vf_max_cid += (p_mngr->conn_cfg[2].cids_per_vf >> DQ_RANGE_SHIFT);
1499         STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_2_RT_OFFSET, dq_vf_max_cid);
1500
1501         dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
1502         STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
1503
1504         dq_vf_max_cid += (p_mngr->conn_cfg[3].cids_per_vf >> DQ_RANGE_SHIFT);
1505         STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_3_RT_OFFSET, dq_vf_max_cid);
1506
1507         dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
1508         STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
1509
1510         dq_vf_max_cid += (p_mngr->conn_cfg[4].cids_per_vf >> DQ_RANGE_SHIFT);
1511         STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_4_RT_OFFSET, dq_vf_max_cid);
1512
1513         dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
1514         STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
1515
1516         dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT);
1517         STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid);
1518
1519         /* Connection types 6 & 7 are not in use, yet they must be configured
1520          * as the highest possible connection. Not configuring them means the
1521          * defaults will be  used, and with a large number of cids a bug may
1522          * occur, if the defaults will be smaller than dq_pf_max_cid /
1523          * dq_vf_max_cid.
1524          */
1525         STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid);
1526         STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid);
1527
1528         STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_7_RT_OFFSET, dq_pf_max_cid);
1529         STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid);
1530 }
1531
1532 static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
1533 {
1534         struct qed_ilt_client_cfg *ilt_clients;
1535         int i;
1536
1537         ilt_clients = p_hwfn->p_cxt_mngr->clients;
1538         for_each_ilt_valid_client(i, ilt_clients) {
1539                 STORE_RT_REG(p_hwfn,
1540                              ilt_clients[i].first.reg,
1541                              ilt_clients[i].first.val);
1542                 STORE_RT_REG(p_hwfn,
1543                              ilt_clients[i].last.reg, ilt_clients[i].last.val);
1544                 STORE_RT_REG(p_hwfn,
1545                              ilt_clients[i].p_size.reg,
1546                              ilt_clients[i].p_size.val);
1547         }
1548 }
1549
1550 static void qed_ilt_vf_bounds_init(struct qed_hwfn *p_hwfn)
1551 {
1552         struct qed_ilt_client_cfg *p_cli;
1553         u32 blk_factor;
1554
1555         /* For simplicty  we set the 'block' to be an ILT page */
1556         if (p_hwfn->cdev->p_iov_info) {
1557                 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
1558
1559                 STORE_RT_REG(p_hwfn,
1560                              PSWRQ2_REG_VF_BASE_RT_OFFSET,
1561                              p_iov->first_vf_in_pf);
1562                 STORE_RT_REG(p_hwfn,
1563                              PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET,
1564                              p_iov->first_vf_in_pf + p_iov->total_vfs);
1565         }
1566
1567         p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
1568         blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1569         if (p_cli->active) {
1570                 STORE_RT_REG(p_hwfn,
1571                              PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET,
1572                              blk_factor);
1573                 STORE_RT_REG(p_hwfn,
1574                              PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1575                              p_cli->pf_total_lines);
1576                 STORE_RT_REG(p_hwfn,
1577                              PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET,
1578                              p_cli->vf_total_lines);
1579         }
1580
1581         p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
1582         blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1583         if (p_cli->active) {
1584                 STORE_RT_REG(p_hwfn,
1585                              PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET,
1586                              blk_factor);
1587                 STORE_RT_REG(p_hwfn,
1588                              PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1589                              p_cli->pf_total_lines);
1590                 STORE_RT_REG(p_hwfn,
1591                              PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET,
1592                              p_cli->vf_total_lines);
1593         }
1594
1595         p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TM];
1596         blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1597         if (p_cli->active) {
1598                 STORE_RT_REG(p_hwfn,
1599                              PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET, blk_factor);
1600                 STORE_RT_REG(p_hwfn,
1601                              PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1602                              p_cli->pf_total_lines);
1603                 STORE_RT_REG(p_hwfn,
1604                              PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET,
1605                              p_cli->vf_total_lines);
1606         }
1607 }
1608
1609 /* ILT (PSWRQ2) PF */
1610 static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
1611 {
1612         struct qed_ilt_client_cfg *clients;
1613         struct qed_cxt_mngr *p_mngr;
1614         struct phys_mem_desc *p_shdw;
1615         u32 line, rt_offst, i;
1616
1617         qed_ilt_bounds_init(p_hwfn);
1618         qed_ilt_vf_bounds_init(p_hwfn);
1619
1620         p_mngr = p_hwfn->p_cxt_mngr;
1621         p_shdw = p_mngr->ilt_shadow;
1622         clients = p_hwfn->p_cxt_mngr->clients;
1623
1624         for_each_ilt_valid_client(i, clients) {
1625                 /** Client's 1st val and RT array are absolute, ILT shadows'
1626                  *  lines are relative.
1627                  */
1628                 line = clients[i].first.val - p_mngr->pf_start_line;
1629                 rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET +
1630                            clients[i].first.val * ILT_ENTRY_IN_REGS;
1631
1632                 for (; line <= clients[i].last.val - p_mngr->pf_start_line;
1633                      line++, rt_offst += ILT_ENTRY_IN_REGS) {
1634                         u64 ilt_hw_entry = 0;
1635
1636                         /** p_virt could be NULL incase of dynamic
1637                          *  allocation
1638                          */
1639                         if (p_shdw[line].virt_addr) {
1640                                 SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
1641                                 SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
1642                                           (p_shdw[line].phys_addr >> 12));
1643
1644                                 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
1645                                            "Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to Physical addr: 0x%llx\n",
1646                                            rt_offst, line, i,
1647                                            (u64)(p_shdw[line].phys_addr >> 12));
1648                         }
1649
1650                         STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
1651                 }
1652         }
1653 }
1654
1655 /* SRC (Searcher) PF */
1656 static void qed_src_init_pf(struct qed_hwfn *p_hwfn)
1657 {
1658         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1659         u32 rounded_conn_num, conn_num, conn_max;
1660         struct qed_src_iids src_iids;
1661
1662         memset(&src_iids, 0, sizeof(src_iids));
1663         qed_cxt_src_iids(p_mngr, &src_iids);
1664         conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
1665         if (!conn_num)
1666                 return;
1667
1668         conn_max = max_t(u32, conn_num, SRC_MIN_NUM_ELEMS);
1669         rounded_conn_num = roundup_pow_of_two(conn_max);
1670
1671         STORE_RT_REG(p_hwfn, SRC_REG_COUNTFREE_RT_OFFSET, conn_num);
1672         STORE_RT_REG(p_hwfn, SRC_REG_NUMBER_HASH_BITS_RT_OFFSET,
1673                      ilog2(rounded_conn_num));
1674
1675         STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET,
1676                          p_hwfn->p_cxt_mngr->first_free);
1677         STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET,
1678                          p_hwfn->p_cxt_mngr->last_free);
1679 }
1680
1681 /* Timers PF */
1682 #define TM_CFG_NUM_IDS_SHIFT            0
1683 #define TM_CFG_NUM_IDS_MASK             0xFFFFULL
1684 #define TM_CFG_PRE_SCAN_OFFSET_SHIFT    16
1685 #define TM_CFG_PRE_SCAN_OFFSET_MASK     0x1FFULL
1686 #define TM_CFG_PARENT_PF_SHIFT          25
1687 #define TM_CFG_PARENT_PF_MASK           0x7ULL
1688
1689 #define TM_CFG_CID_PRE_SCAN_ROWS_SHIFT  30
1690 #define TM_CFG_CID_PRE_SCAN_ROWS_MASK   0x1FFULL
1691
1692 #define TM_CFG_TID_OFFSET_SHIFT         30
1693 #define TM_CFG_TID_OFFSET_MASK          0x7FFFFULL
1694 #define TM_CFG_TID_PRE_SCAN_ROWS_SHIFT  49
1695 #define TM_CFG_TID_PRE_SCAN_ROWS_MASK   0x1FFULL
1696
1697 static void qed_tm_init_pf(struct qed_hwfn *p_hwfn)
1698 {
1699         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1700         u32 active_seg_mask = 0, tm_offset, rt_reg;
1701         struct qed_tm_iids tm_iids;
1702         u64 cfg_word;
1703         u8 i;
1704
1705         memset(&tm_iids, 0, sizeof(tm_iids));
1706         qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
1707
1708         /* @@@TBD No pre-scan for now */
1709
1710         /* Note: We assume consecutive VFs for a PF */
1711         for (i = 0; i < p_mngr->vf_count; i++) {
1712                 cfg_word = 0;
1713                 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids);
1714                 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1715                 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
1716                 SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0);
1717                 rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
1718                     (sizeof(cfg_word) / sizeof(u32)) *
1719                     (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i);
1720                 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1721         }
1722
1723         cfg_word = 0;
1724         SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_cids);
1725         SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1726         SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);       /* n/a for PF */
1727         SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0);       /* scan all   */
1728
1729         rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
1730             (sizeof(cfg_word) / sizeof(u32)) *
1731             (NUM_OF_VFS(p_hwfn->cdev) + p_hwfn->rel_pf_id);
1732         STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1733
1734         /* enale scan */
1735         STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_CONN_RT_OFFSET,
1736                      tm_iids.pf_cids ? 0x1 : 0x0);
1737
1738         /* @@@TBD how to enable the scan for the VFs */
1739
1740         tm_offset = tm_iids.per_vf_cids;
1741
1742         /* Note: We assume consecutive VFs for a PF */
1743         for (i = 0; i < p_mngr->vf_count; i++) {
1744                 cfg_word = 0;
1745                 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_tids);
1746                 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1747                 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
1748                 SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
1749                 SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0);
1750
1751                 rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
1752                     (sizeof(cfg_word) / sizeof(u32)) *
1753                     (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i);
1754
1755                 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1756         }
1757
1758         tm_offset = tm_iids.pf_cids;
1759         for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
1760                 cfg_word = 0;
1761                 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_tids[i]);
1762                 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1763                 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);
1764                 SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
1765                 SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0);
1766
1767                 rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
1768                     (sizeof(cfg_word) / sizeof(u32)) *
1769                     (NUM_OF_VFS(p_hwfn->cdev) +
1770                      p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i);
1771
1772                 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1773                 active_seg_mask |= (tm_iids.pf_tids[i] ? BIT(i) : 0);
1774
1775                 tm_offset += tm_iids.pf_tids[i];
1776         }
1777
1778         if (QED_IS_RDMA_PERSONALITY(p_hwfn))
1779                 active_seg_mask = 0;
1780
1781         STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask);
1782
1783         /* @@@TBD how to enable the scan for the VFs */
1784 }
1785
1786 static void qed_prs_init_common(struct qed_hwfn *p_hwfn)
1787 {
1788         if ((p_hwfn->hw_info.personality == QED_PCI_FCOE) &&
1789             p_hwfn->pf_params.fcoe_pf_params.is_target)
1790                 STORE_RT_REG(p_hwfn,
1791                              PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET, 0);
1792 }
1793
1794 static void qed_prs_init_pf(struct qed_hwfn *p_hwfn)
1795 {
1796         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1797         struct qed_conn_type_cfg *p_fcoe;
1798         struct qed_tid_seg *p_tid;
1799
1800         p_fcoe = &p_mngr->conn_cfg[PROTOCOLID_FCOE];
1801
1802         /* If FCoE is active set the MAX OX_ID (tid) in the Parser */
1803         if (!p_fcoe->cid_count)
1804                 return;
1805
1806         p_tid = &p_fcoe->tid_seg[QED_CXT_FCOE_TID_SEG];
1807         if (p_hwfn->pf_params.fcoe_pf_params.is_target) {
1808                 STORE_RT_REG_AGG(p_hwfn,
1809                                  PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET,
1810                                  p_tid->count);
1811         } else {
1812                 STORE_RT_REG_AGG(p_hwfn,
1813                                  PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET,
1814                                  p_tid->count);
1815         }
1816 }
1817
1818 void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
1819 {
1820         qed_cdu_init_common(p_hwfn);
1821         qed_prs_init_common(p_hwfn);
1822 }
1823
1824 void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1825 {
1826         qed_qm_init_pf(p_hwfn, p_ptt, true);
1827         qed_cm_init_pf(p_hwfn);
1828         qed_dq_init_pf(p_hwfn);
1829         qed_cdu_init_pf(p_hwfn);
1830         qed_ilt_init_pf(p_hwfn);
1831         qed_src_init_pf(p_hwfn);
1832         qed_tm_init_pf(p_hwfn);
1833         qed_prs_init_pf(p_hwfn);
1834 }
1835
1836 int _qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
1837                          enum protocol_type type, u32 *p_cid, u8 vfid)
1838 {
1839         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1840         struct qed_cid_acquired_map *p_map;
1841         u32 rel_cid;
1842
1843         if (type >= MAX_CONN_TYPES) {
1844                 DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
1845                 return -EINVAL;
1846         }
1847
1848         if (vfid >= MAX_NUM_VFS && vfid != QED_CXT_PF_CID) {
1849                 DP_NOTICE(p_hwfn, "VF [%02x] is out of range\n", vfid);
1850                 return -EINVAL;
1851         }
1852
1853         /* Determine the right map to take this CID from */
1854         if (vfid == QED_CXT_PF_CID)
1855                 p_map = &p_mngr->acquired[type];
1856         else
1857                 p_map = &p_mngr->acquired_vf[type][vfid];
1858
1859         if (!p_map->cid_map) {
1860                 DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
1861                 return -EINVAL;
1862         }
1863
1864         rel_cid = find_first_zero_bit(p_map->cid_map, p_map->max_count);
1865
1866         if (rel_cid >= p_map->max_count) {
1867                 DP_NOTICE(p_hwfn, "no CID available for protocol %d\n", type);
1868                 return -EINVAL;
1869         }
1870
1871         __set_bit(rel_cid, p_map->cid_map);
1872
1873         *p_cid = rel_cid + p_map->start_cid;
1874
1875         DP_VERBOSE(p_hwfn, QED_MSG_CXT,
1876                    "Acquired cid 0x%08x [rel. %08x] vfid %02x type %d\n",
1877                    *p_cid, rel_cid, vfid, type);
1878
1879         return 0;
1880 }
1881
1882 int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
1883                         enum protocol_type type, u32 *p_cid)
1884 {
1885         return _qed_cxt_acquire_cid(p_hwfn, type, p_cid, QED_CXT_PF_CID);
1886 }
1887
1888 static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
1889                                       u32 cid,
1890                                       u8 vfid,
1891                                       enum protocol_type *p_type,
1892                                       struct qed_cid_acquired_map **pp_map)
1893 {
1894         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1895         u32 rel_cid;
1896
1897         /* Iterate over protocols and find matching cid range */
1898         for (*p_type = 0; *p_type < MAX_CONN_TYPES; (*p_type)++) {
1899                 if (vfid == QED_CXT_PF_CID)
1900                         *pp_map = &p_mngr->acquired[*p_type];
1901                 else
1902                         *pp_map = &p_mngr->acquired_vf[*p_type][vfid];
1903
1904                 if (!((*pp_map)->cid_map))
1905                         continue;
1906                 if (cid >= (*pp_map)->start_cid &&
1907                     cid < (*pp_map)->start_cid + (*pp_map)->max_count)
1908                         break;
1909         }
1910
1911         if (*p_type == MAX_CONN_TYPES) {
1912                 DP_NOTICE(p_hwfn, "Invalid CID %d vfid %02x", cid, vfid);
1913                 goto fail;
1914         }
1915
1916         rel_cid = cid - (*pp_map)->start_cid;
1917         if (!test_bit(rel_cid, (*pp_map)->cid_map)) {
1918                 DP_NOTICE(p_hwfn, "CID %d [vifd %02x] not acquired",
1919                           cid, vfid);
1920                 goto fail;
1921         }
1922
1923         return true;
1924 fail:
1925         *p_type = MAX_CONN_TYPES;
1926         *pp_map = NULL;
1927         return false;
1928 }
1929
1930 void _qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid, u8 vfid)
1931 {
1932         struct qed_cid_acquired_map *p_map = NULL;
1933         enum protocol_type type;
1934         bool b_acquired;
1935         u32 rel_cid;
1936
1937         if (vfid != QED_CXT_PF_CID && vfid > MAX_NUM_VFS) {
1938                 DP_NOTICE(p_hwfn,
1939                           "Trying to return incorrect CID belonging to VF %02x\n",
1940                           vfid);
1941                 return;
1942         }
1943
1944         /* Test acquired and find matching per-protocol map */
1945         b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, vfid,
1946                                                &type, &p_map);
1947
1948         if (!b_acquired)
1949                 return;
1950
1951         rel_cid = cid - p_map->start_cid;
1952         clear_bit(rel_cid, p_map->cid_map);
1953
1954         DP_VERBOSE(p_hwfn, QED_MSG_CXT,
1955                    "Released CID 0x%08x [rel. %08x] vfid %02x type %d\n",
1956                    cid, rel_cid, vfid, type);
1957 }
1958
1959 void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid)
1960 {
1961         _qed_cxt_release_cid(p_hwfn, cid, QED_CXT_PF_CID);
1962 }
1963
1964 int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
1965 {
1966         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1967         struct qed_cid_acquired_map *p_map = NULL;
1968         u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
1969         enum protocol_type type;
1970         bool b_acquired;
1971
1972         /* Test acquired and find matching per-protocol map */
1973         b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid,
1974                                                QED_CXT_PF_CID, &type, &p_map);
1975
1976         if (!b_acquired)
1977                 return -EINVAL;
1978
1979         /* set the protocl type */
1980         p_info->type = type;
1981
1982         /* compute context virtual pointer */
1983         hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
1984
1985         conn_cxt_size = CONN_CXT_SIZE(p_hwfn);
1986         cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size;
1987         line = p_info->iid / cxts_per_p;
1988
1989         /* Make sure context is allocated (dynamic allocation) */
1990         if (!p_mngr->ilt_shadow[line].virt_addr)
1991                 return -EINVAL;
1992
1993         p_info->p_cxt = p_mngr->ilt_shadow[line].virt_addr +
1994                         p_info->iid % cxts_per_p * conn_cxt_size;
1995
1996         DP_VERBOSE(p_hwfn, (QED_MSG_ILT | QED_MSG_CXT),
1997                    "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n",
1998                    p_info->iid / cxts_per_p, p_info->p_cxt, p_info->iid);
1999
2000         return 0;
2001 }
2002
2003 static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
2004                                    struct qed_rdma_pf_params *p_params,
2005                                    u32 num_tasks)
2006 {
2007         u32 num_cons, num_qps;
2008         enum protocol_type proto;
2009
2010         if (p_hwfn->mcp_info->func_info.protocol == QED_PCI_ETH_RDMA) {
2011                 DP_VERBOSE(p_hwfn, QED_MSG_SP,
2012                            "Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n");
2013                 p_hwfn->hw_info.personality = QED_PCI_ETH_ROCE;
2014         }
2015
2016         switch (p_hwfn->hw_info.personality) {
2017         case QED_PCI_ETH_IWARP:
2018                 /* Each QP requires one connection */
2019                 num_cons = min_t(u32, IWARP_MAX_QPS, p_params->num_qps);
2020                 proto = PROTOCOLID_IWARP;
2021                 break;
2022         case QED_PCI_ETH_ROCE:
2023                 num_qps = min_t(u32, ROCE_MAX_QPS, p_params->num_qps);
2024                 num_cons = num_qps * 2; /* each QP requires two connections */
2025                 proto = PROTOCOLID_ROCE;
2026                 break;
2027         default:
2028                 return;
2029         }
2030
2031         if (num_cons && num_tasks) {
2032                 u32 num_srqs, num_xrc_srqs;
2033
2034                 qed_cxt_set_proto_cid_count(p_hwfn, proto, num_cons, 0);
2035
2036                 /* Deliberatly passing ROCE for tasks id. This is because
2037                  * iWARP / RoCE share the task id.
2038                  */
2039                 qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_ROCE,
2040                                             QED_CXT_ROCE_TID_SEG, 1,
2041                                             num_tasks, false);
2042
2043                 num_srqs = min_t(u32, QED_RDMA_MAX_SRQS, p_params->num_srqs);
2044
2045                 /* XRC SRQs populate a single ILT page */
2046                 num_xrc_srqs = qed_cxt_xrc_srqs_per_page(p_hwfn);
2047
2048                 qed_cxt_set_srq_count(p_hwfn, num_srqs, num_xrc_srqs);
2049         } else {
2050                 DP_INFO(p_hwfn->cdev,
2051                         "RDMA personality used without setting params!\n");
2052         }
2053 }
2054
2055 int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks)
2056 {
2057         /* Set the number of required CORE connections */
2058         u32 core_cids = 1; /* SPQ */
2059
2060         if (p_hwfn->using_ll2)
2061                 core_cids += 4;
2062         qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
2063
2064         switch (p_hwfn->hw_info.personality) {
2065         case QED_PCI_ETH_RDMA:
2066         case QED_PCI_ETH_IWARP:
2067         case QED_PCI_ETH_ROCE:
2068         {
2069                         qed_rdma_set_pf_params(p_hwfn,
2070                                                &p_hwfn->
2071                                                pf_params.rdma_pf_params,
2072                                                rdma_tasks);
2073                 /* no need for break since RoCE coexist with Ethernet */
2074         }
2075         /* fall through */
2076         case QED_PCI_ETH:
2077         {
2078                 struct qed_eth_pf_params *p_params =
2079                     &p_hwfn->pf_params.eth_pf_params;
2080
2081                 if (!p_params->num_vf_cons)
2082                         p_params->num_vf_cons =
2083                             ETH_PF_PARAMS_VF_CONS_DEFAULT;
2084                 qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
2085                                             p_params->num_cons,
2086                                             p_params->num_vf_cons);
2087                 p_hwfn->p_cxt_mngr->arfs_count = p_params->num_arfs_filters;
2088                 break;
2089         }
2090         case QED_PCI_FCOE:
2091         {
2092                 struct qed_fcoe_pf_params *p_params;
2093
2094                 p_params = &p_hwfn->pf_params.fcoe_pf_params;
2095
2096                 if (p_params->num_cons && p_params->num_tasks) {
2097                         qed_cxt_set_proto_cid_count(p_hwfn,
2098                                                     PROTOCOLID_FCOE,
2099                                                     p_params->num_cons,
2100                                                     0);
2101
2102                         qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_FCOE,
2103                                                     QED_CXT_FCOE_TID_SEG, 0,
2104                                                     p_params->num_tasks, true);
2105                 } else {
2106                         DP_INFO(p_hwfn->cdev,
2107                                 "Fcoe personality used without setting params!\n");
2108                 }
2109                 break;
2110         }
2111         case QED_PCI_ISCSI:
2112         {
2113                 struct qed_iscsi_pf_params *p_params;
2114
2115                 p_params = &p_hwfn->pf_params.iscsi_pf_params;
2116
2117                 if (p_params->num_cons && p_params->num_tasks) {
2118                         qed_cxt_set_proto_cid_count(p_hwfn,
2119                                                     PROTOCOLID_ISCSI,
2120                                                     p_params->num_cons,
2121                                                     0);
2122
2123                         qed_cxt_set_proto_tid_count(p_hwfn,
2124                                                     PROTOCOLID_ISCSI,
2125                                                     QED_CXT_ISCSI_TID_SEG,
2126                                                     0,
2127                                                     p_params->num_tasks,
2128                                                     true);
2129                 } else {
2130                         DP_INFO(p_hwfn->cdev,
2131                                 "Iscsi personality used without setting params!\n");
2132                 }
2133                 break;
2134         }
2135         default:
2136                 return -EINVAL;
2137         }
2138
2139         return 0;
2140 }
2141
2142 int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
2143                              struct qed_tid_mem *p_info)
2144 {
2145         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
2146         u32 proto, seg, total_lines, i, shadow_line;
2147         struct qed_ilt_client_cfg *p_cli;
2148         struct qed_ilt_cli_blk *p_fl_seg;
2149         struct qed_tid_seg *p_seg_info;
2150
2151         /* Verify the personality */
2152         switch (p_hwfn->hw_info.personality) {
2153         case QED_PCI_FCOE:
2154                 proto = PROTOCOLID_FCOE;
2155                 seg = QED_CXT_FCOE_TID_SEG;
2156                 break;
2157         case QED_PCI_ISCSI:
2158                 proto = PROTOCOLID_ISCSI;
2159                 seg = QED_CXT_ISCSI_TID_SEG;
2160                 break;
2161         default:
2162                 return -EINVAL;
2163         }
2164
2165         p_cli = &p_mngr->clients[ILT_CLI_CDUT];
2166         if (!p_cli->active)
2167                 return -EINVAL;
2168
2169         p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
2170         if (!p_seg_info->has_fl_mem)
2171                 return -EINVAL;
2172
2173         p_fl_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
2174         total_lines = DIV_ROUND_UP(p_fl_seg->total_size,
2175                                    p_fl_seg->real_size_in_page);
2176
2177         for (i = 0; i < total_lines; i++) {
2178                 shadow_line = i + p_fl_seg->start_line -
2179                     p_hwfn->p_cxt_mngr->pf_start_line;
2180                 p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].virt_addr;
2181         }
2182         p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) -
2183             p_fl_seg->real_size_in_page;
2184         p_info->tid_size = p_mngr->task_type_size[p_seg_info->type];
2185         p_info->num_tids_per_block = p_fl_seg->real_size_in_page /
2186             p_info->tid_size;
2187
2188         return 0;
2189 }
2190
2191 /* This function is very RoCE oriented, if another protocol in the future
2192  * will want this feature we'll need to modify the function to be more generic
2193  */
2194 int
2195 qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
2196                           enum qed_cxt_elem_type elem_type, u32 iid)
2197 {
2198         u32 reg_offset, shadow_line, elem_size, hw_p_size, elems_per_p, line;
2199         struct qed_ilt_client_cfg *p_cli;
2200         struct qed_ilt_cli_blk *p_blk;
2201         struct qed_ptt *p_ptt;
2202         dma_addr_t p_phys;
2203         u64 ilt_hw_entry;
2204         void *p_virt;
2205         int rc = 0;
2206
2207         switch (elem_type) {
2208         case QED_ELEM_CXT:
2209                 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
2210                 elem_size = CONN_CXT_SIZE(p_hwfn);
2211                 p_blk = &p_cli->pf_blks[CDUC_BLK];
2212                 break;
2213         case QED_ELEM_SRQ:
2214                 /* The first ILT page is not used for regular SRQs. Skip it. */
2215                 iid += p_hwfn->p_cxt_mngr->xrc_srq_count;
2216                 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
2217                 elem_size = SRQ_CXT_SIZE;
2218                 p_blk = &p_cli->pf_blks[SRQ_BLK];
2219                 break;
2220         case QED_ELEM_XRC_SRQ:
2221                 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
2222                 elem_size = XRC_SRQ_CXT_SIZE;
2223                 p_blk = &p_cli->pf_blks[SRQ_BLK];
2224                 break;
2225         case QED_ELEM_TASK:
2226                 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2227                 elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
2228                 p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)];
2229                 break;
2230         default:
2231                 DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type);
2232                 return -EINVAL;
2233         }
2234
2235         /* Calculate line in ilt */
2236         hw_p_size = p_cli->p_size.val;
2237         elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
2238         line = p_blk->start_line + (iid / elems_per_p);
2239         shadow_line = line - p_hwfn->p_cxt_mngr->pf_start_line;
2240
2241         /* If line is already allocated, do nothing, otherwise allocate it and
2242          * write it to the PSWRQ2 registers.
2243          * This section can be run in parallel from different contexts and thus
2244          * a mutex protection is needed.
2245          */
2246
2247         mutex_lock(&p_hwfn->p_cxt_mngr->mutex);
2248
2249         if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr)
2250                 goto out0;
2251
2252         p_ptt = qed_ptt_acquire(p_hwfn);
2253         if (!p_ptt) {
2254                 DP_NOTICE(p_hwfn,
2255                           "QED_TIME_OUT on ptt acquire - dynamic allocation");
2256                 rc = -EBUSY;
2257                 goto out0;
2258         }
2259
2260         p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
2261                                     p_blk->real_size_in_page, &p_phys,
2262                                     GFP_KERNEL);
2263         if (!p_virt) {
2264                 rc = -ENOMEM;
2265                 goto out1;
2266         }
2267
2268         /* configuration of refTagMask to 0xF is required for RoCE DIF MR only,
2269          * to compensate for a HW bug, but it is configured even if DIF is not
2270          * enabled. This is harmless and allows us to avoid a dedicated API. We
2271          * configure the field for all of the contexts on the newly allocated
2272          * page.
2273          */
2274         if (elem_type == QED_ELEM_TASK) {
2275                 u32 elem_i;
2276                 u8 *elem_start = (u8 *)p_virt;
2277                 union type1_task_context *elem;
2278
2279                 for (elem_i = 0; elem_i < elems_per_p; elem_i++) {
2280                         elem = (union type1_task_context *)elem_start;
2281                         SET_FIELD(elem->roce_ctx.tdif_context.flags1,
2282                                   TDIF_TASK_CONTEXT_REF_TAG_MASK, 0xf);
2283                         elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn);
2284                 }
2285         }
2286
2287         p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr = p_virt;
2288         p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr = p_phys;
2289         p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size =
2290             p_blk->real_size_in_page;
2291
2292         /* compute absolute offset */
2293         reg_offset = PSWRQ2_REG_ILT_MEMORY +
2294             (line * ILT_REG_SIZE_IN_BYTES * ILT_ENTRY_IN_REGS);
2295
2296         ilt_hw_entry = 0;
2297         SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
2298         SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
2299                   (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr
2300                    >> 12));
2301
2302         /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */
2303         qed_dmae_host2grc(p_hwfn, p_ptt, (u64) (uintptr_t)&ilt_hw_entry,
2304                           reg_offset, sizeof(ilt_hw_entry) / sizeof(u32),
2305                           NULL);
2306
2307         if (elem_type == QED_ELEM_CXT) {
2308                 u32 last_cid_allocated = (1 + (iid / elems_per_p)) *
2309                     elems_per_p;
2310
2311                 /* Update the relevant register in the parser */
2312                 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF,
2313                        last_cid_allocated - 1);
2314
2315                 if (!p_hwfn->b_rdma_enabled_in_prs) {
2316                         /* Enable RDMA search */
2317                         qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
2318                         p_hwfn->b_rdma_enabled_in_prs = true;
2319                 }
2320         }
2321
2322 out1:
2323         qed_ptt_release(p_hwfn, p_ptt);
2324 out0:
2325         mutex_unlock(&p_hwfn->p_cxt_mngr->mutex);
2326
2327         return rc;
2328 }
2329
2330 /* This function is very RoCE oriented, if another protocol in the future
2331  * will want this feature we'll need to modify the function to be more generic
2332  */
2333 static int
2334 qed_cxt_free_ilt_range(struct qed_hwfn *p_hwfn,
2335                        enum qed_cxt_elem_type elem_type,
2336                        u32 start_iid, u32 count)
2337 {
2338         u32 start_line, end_line, shadow_start_line, shadow_end_line;
2339         u32 reg_offset, elem_size, hw_p_size, elems_per_p;
2340         struct qed_ilt_client_cfg *p_cli;
2341         struct qed_ilt_cli_blk *p_blk;
2342         u32 end_iid = start_iid + count;
2343         struct qed_ptt *p_ptt;
2344         u64 ilt_hw_entry = 0;
2345         u32 i;
2346
2347         switch (elem_type) {
2348         case QED_ELEM_CXT:
2349                 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
2350                 elem_size = CONN_CXT_SIZE(p_hwfn);
2351                 p_blk = &p_cli->pf_blks[CDUC_BLK];
2352                 break;
2353         case QED_ELEM_SRQ:
2354                 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
2355                 elem_size = SRQ_CXT_SIZE;
2356                 p_blk = &p_cli->pf_blks[SRQ_BLK];
2357                 break;
2358         case QED_ELEM_TASK:
2359                 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2360                 elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
2361                 p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)];
2362                 break;
2363         default:
2364                 DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type);
2365                 return -EINVAL;
2366         }
2367
2368         /* Calculate line in ilt */
2369         hw_p_size = p_cli->p_size.val;
2370         elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
2371         start_line = p_blk->start_line + (start_iid / elems_per_p);
2372         end_line = p_blk->start_line + (end_iid / elems_per_p);
2373         if (((end_iid + 1) / elems_per_p) != (end_iid / elems_per_p))
2374                 end_line--;
2375
2376         shadow_start_line = start_line - p_hwfn->p_cxt_mngr->pf_start_line;
2377         shadow_end_line = end_line - p_hwfn->p_cxt_mngr->pf_start_line;
2378
2379         p_ptt = qed_ptt_acquire(p_hwfn);
2380         if (!p_ptt) {
2381                 DP_NOTICE(p_hwfn,
2382                           "QED_TIME_OUT on ptt acquire - dynamic allocation");
2383                 return -EBUSY;
2384         }
2385
2386         for (i = shadow_start_line; i < shadow_end_line; i++) {
2387                 if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr)
2388                         continue;
2389
2390                 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
2391                                   p_hwfn->p_cxt_mngr->ilt_shadow[i].size,
2392                                   p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr,
2393                                   p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr);
2394
2395                 p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr = NULL;
2396                 p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr = 0;
2397                 p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0;
2398
2399                 /* compute absolute offset */
2400                 reg_offset = PSWRQ2_REG_ILT_MEMORY +
2401                     ((start_line++) * ILT_REG_SIZE_IN_BYTES *
2402                      ILT_ENTRY_IN_REGS);
2403
2404                 /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a
2405                  * wide-bus.
2406                  */
2407                 qed_dmae_host2grc(p_hwfn, p_ptt,
2408                                   (u64) (uintptr_t) &ilt_hw_entry,
2409                                   reg_offset,
2410                                   sizeof(ilt_hw_entry) / sizeof(u32),
2411                                   NULL);
2412         }
2413
2414         qed_ptt_release(p_hwfn, p_ptt);
2415
2416         return 0;
2417 }
2418
2419 int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto)
2420 {
2421         int rc;
2422         u32 cid;
2423
2424         /* Free Connection CXT */
2425         rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_CXT,
2426                                     qed_cxt_get_proto_cid_start(p_hwfn,
2427                                                                 proto),
2428                                     qed_cxt_get_proto_cid_count(p_hwfn,
2429                                                                 proto, &cid));
2430
2431         if (rc)
2432                 return rc;
2433
2434         /* Free Task CXT ( Intentionally RoCE as task-id is shared between
2435          * RoCE and iWARP )
2436          */
2437         proto = PROTOCOLID_ROCE;
2438         rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_TASK, 0,
2439                                     qed_cxt_get_proto_tid_count(p_hwfn, proto));
2440         if (rc)
2441                 return rc;
2442
2443         /* Free TSDM CXT */
2444         rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_XRC_SRQ, 0,
2445                                     p_hwfn->p_cxt_mngr->xrc_srq_count);
2446
2447         rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_SRQ,
2448                                     p_hwfn->p_cxt_mngr->xrc_srq_count,
2449                                     p_hwfn->p_cxt_mngr->srq_count);
2450
2451         return rc;
2452 }
2453
2454 int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn,
2455                          u32 tid, u8 ctx_type, void **pp_task_ctx)
2456 {
2457         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
2458         struct qed_ilt_client_cfg *p_cli;
2459         struct qed_tid_seg *p_seg_info;
2460         struct qed_ilt_cli_blk *p_seg;
2461         u32 num_tids_per_block;
2462         u32 tid_size, ilt_idx;
2463         u32 total_lines;
2464         u32 proto, seg;
2465
2466         /* Verify the personality */
2467         switch (p_hwfn->hw_info.personality) {
2468         case QED_PCI_FCOE:
2469                 proto = PROTOCOLID_FCOE;
2470                 seg = QED_CXT_FCOE_TID_SEG;
2471                 break;
2472         case QED_PCI_ISCSI:
2473                 proto = PROTOCOLID_ISCSI;
2474                 seg = QED_CXT_ISCSI_TID_SEG;
2475                 break;
2476         default:
2477                 return -EINVAL;
2478         }
2479
2480         p_cli = &p_mngr->clients[ILT_CLI_CDUT];
2481         if (!p_cli->active)
2482                 return -EINVAL;
2483
2484         p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
2485
2486         if (ctx_type == QED_CTX_WORKING_MEM) {
2487                 p_seg = &p_cli->pf_blks[CDUT_SEG_BLK(seg)];
2488         } else if (ctx_type == QED_CTX_FL_MEM) {
2489                 if (!p_seg_info->has_fl_mem)
2490                         return -EINVAL;
2491                 p_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
2492         } else {
2493                 return -EINVAL;
2494         }
2495         total_lines = DIV_ROUND_UP(p_seg->total_size, p_seg->real_size_in_page);
2496         tid_size = p_mngr->task_type_size[p_seg_info->type];
2497         num_tids_per_block = p_seg->real_size_in_page / tid_size;
2498
2499         if (total_lines < tid / num_tids_per_block)
2500                 return -EINVAL;
2501
2502         ilt_idx = tid / num_tids_per_block + p_seg->start_line -
2503                   p_mngr->pf_start_line;
2504         *pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].virt_addr +
2505                        (tid % num_tids_per_block) * tid_size;
2506
2507         return 0;
2508 }
2509
2510 static u16 qed_blk_calculate_pages(struct qed_ilt_cli_blk *p_blk)
2511 {
2512         if (p_blk->real_size_in_page == 0)
2513                 return 0;
2514
2515         return DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
2516 }
2517
2518 u16 qed_get_cdut_num_pf_init_pages(struct qed_hwfn *p_hwfn)
2519 {
2520         struct qed_ilt_client_cfg *p_cli;
2521         struct qed_ilt_cli_blk *p_blk;
2522         u16 i, pages = 0;
2523
2524         p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2525         for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
2526                 p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)];
2527                 pages += qed_blk_calculate_pages(p_blk);
2528         }
2529
2530         return pages;
2531 }
2532
2533 u16 qed_get_cdut_num_vf_init_pages(struct qed_hwfn *p_hwfn)
2534 {
2535         struct qed_ilt_client_cfg *p_cli;
2536         struct qed_ilt_cli_blk *p_blk;
2537         u16 i, pages = 0;
2538
2539         p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2540         for (i = 0; i < NUM_TASK_VF_SEGMENTS; i++) {
2541                 p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(i, VF)];
2542                 pages += qed_blk_calculate_pages(p_blk);
2543         }
2544
2545         return pages;
2546 }
2547
2548 u16 qed_get_cdut_num_pf_work_pages(struct qed_hwfn *p_hwfn)
2549 {
2550         struct qed_ilt_client_cfg *p_cli;
2551         struct qed_ilt_cli_blk *p_blk;
2552         u16 i, pages = 0;
2553
2554         p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2555         for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
2556                 p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)];
2557                 pages += qed_blk_calculate_pages(p_blk);
2558         }
2559
2560         return pages;
2561 }
2562
2563 u16 qed_get_cdut_num_vf_work_pages(struct qed_hwfn *p_hwfn)
2564 {
2565         struct qed_ilt_client_cfg *p_cli;
2566         struct qed_ilt_cli_blk *p_blk;
2567         u16 pages = 0, i;
2568
2569         p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2570         for (i = 0; i < NUM_TASK_VF_SEGMENTS; i++) {
2571                 p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(i)];
2572                 pages += qed_blk_calculate_pages(p_blk);
2573         }
2574
2575         return pages;
2576 }