1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/types.h>
34 #include <linux/bitops.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/list.h>
39 #include <linux/log2.h>
40 #include <linux/pci.h>
41 #include <linux/slab.h>
42 #include <linux/string.h>
45 #include "qed_dev_api.h"
48 #include "qed_init_ops.h"
50 #include "qed_reg_addr.h"
51 #include "qed_sriov.h"
54 #define QM_PQ_ELEMENT_SIZE 4 /* in bytes */
56 /* Doorbell-Queue constants */
57 #define DQ_RANGE_SHIFT 4
58 #define DQ_RANGE_ALIGN BIT(DQ_RANGE_SHIFT)
60 /* Searcher constants */
61 #define SRC_MIN_NUM_ELEMS 256
63 /* Timers constants */
65 #define TM_ALIGN BIT(TM_SHIFT)
66 #define TM_ELEM_SIZE 4
68 #define ILT_DEFAULT_HW_P_SIZE 4
70 #define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
71 #define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
73 /* ILT entry structure */
74 #define ILT_ENTRY_PHY_ADDR_MASK (~0ULL >> 12)
75 #define ILT_ENTRY_PHY_ADDR_SHIFT 0
76 #define ILT_ENTRY_VALID_MASK 0x1ULL
77 #define ILT_ENTRY_VALID_SHIFT 52
78 #define ILT_ENTRY_IN_REGS 2
79 #define ILT_REG_SIZE_IN_BYTES 4
81 /* connection context union */
83 struct e4_core_conn_context core_ctx;
84 struct e4_eth_conn_context eth_ctx;
85 struct e4_iscsi_conn_context iscsi_ctx;
86 struct e4_fcoe_conn_context fcoe_ctx;
87 struct e4_roce_conn_context roce_ctx;
90 /* TYPE-0 task context - iSCSI, FCOE */
91 union type0_task_context {
92 struct e4_iscsi_task_context iscsi_ctx;
93 struct e4_fcoe_task_context fcoe_ctx;
96 /* TYPE-1 task context - ROCE */
97 union type1_task_context {
98 struct e4_rdma_task_context roce_ctx;
106 #define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */
107 #define CDUT_SEG_ALIGNMET_IN_BYTES BIT(CDUT_SEG_ALIGNMET + 12)
109 #define CONN_CXT_SIZE(p_hwfn) \
110 ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
112 #define SRQ_CXT_SIZE (sizeof(struct rdma_srq_context))
113 #define XRC_SRQ_CXT_SIZE (sizeof(struct rdma_xrc_srq_context))
115 #define TYPE0_TASK_CXT_SIZE(p_hwfn) \
116 ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn)
118 /* Alignment is inherent to the type1_task_context structure */
119 #define TYPE1_TASK_CXT_SIZE(p_hwfn) sizeof(union type1_task_context)
121 static bool src_proto(enum protocol_type type)
123 return type == PROTOCOLID_ISCSI ||
124 type == PROTOCOLID_FCOE ||
125 type == PROTOCOLID_IWARP;
128 static bool tm_cid_proto(enum protocol_type type)
130 return type == PROTOCOLID_ISCSI ||
131 type == PROTOCOLID_FCOE ||
132 type == PROTOCOLID_ROCE ||
133 type == PROTOCOLID_IWARP;
136 static bool tm_tid_proto(enum protocol_type type)
138 return type == PROTOCOLID_FCOE;
141 /* counts the iids for the CDU/CDUC ILT client configuration */
142 struct qed_cdu_iids {
147 static void qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr,
148 struct qed_cdu_iids *iids)
152 for (type = 0; type < MAX_CONN_TYPES; type++) {
153 iids->pf_cids += p_mngr->conn_cfg[type].cid_count;
154 iids->per_vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
158 /* counts the iids for the Searcher block configuration */
159 struct qed_src_iids {
164 static void qed_cxt_src_iids(struct qed_cxt_mngr *p_mngr,
165 struct qed_src_iids *iids)
169 for (i = 0; i < MAX_CONN_TYPES; i++) {
173 iids->pf_cids += p_mngr->conn_cfg[i].cid_count;
174 iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf;
177 /* Add L2 filtering filters in addition */
178 iids->pf_cids += p_mngr->arfs_count;
181 /* counts the iids for the Timers block configuration */
184 u32 pf_tids[NUM_TASK_PF_SEGMENTS]; /* per segment */
190 static void qed_cxt_tm_iids(struct qed_hwfn *p_hwfn,
191 struct qed_cxt_mngr *p_mngr,
192 struct qed_tm_iids *iids)
194 bool tm_vf_required = false;
195 bool tm_required = false;
198 /* Timers is a special case -> we don't count how many cids require
199 * timers but what's the max cid that will be used by the timer block.
200 * therefore we traverse in reverse order, and once we hit a protocol
201 * that requires the timers memory, we'll sum all the protocols up
204 for (i = MAX_CONN_TYPES - 1; i >= 0; i--) {
205 struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i];
207 if (tm_cid_proto(i) || tm_required) {
208 if (p_cfg->cid_count)
211 iids->pf_cids += p_cfg->cid_count;
214 if (tm_cid_proto(i) || tm_vf_required) {
215 if (p_cfg->cids_per_vf)
216 tm_vf_required = true;
218 iids->per_vf_cids += p_cfg->cids_per_vf;
221 if (tm_tid_proto(i)) {
222 struct qed_tid_seg *segs = p_cfg->tid_seg;
224 /* for each segment there is at most one
225 * protocol for which count is not 0.
227 for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
228 iids->pf_tids[j] += segs[j].count;
230 /* The last array elelment is for the VFs. As for PF
231 * segments there can be only one protocol for
232 * which this value is not 0.
234 iids->per_vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
238 iids->pf_cids = roundup(iids->pf_cids, TM_ALIGN);
239 iids->per_vf_cids = roundup(iids->per_vf_cids, TM_ALIGN);
240 iids->per_vf_tids = roundup(iids->per_vf_tids, TM_ALIGN);
242 for (iids->pf_tids_total = 0, j = 0; j < NUM_TASK_PF_SEGMENTS; j++) {
243 iids->pf_tids[j] = roundup(iids->pf_tids[j], TM_ALIGN);
244 iids->pf_tids_total += iids->pf_tids[j];
248 static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
249 struct qed_qm_iids *iids)
251 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
252 struct qed_tid_seg *segs;
253 u32 vf_cids = 0, type, j;
256 for (type = 0; type < MAX_CONN_TYPES; type++) {
257 iids->cids += p_mngr->conn_cfg[type].cid_count;
258 vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
260 segs = p_mngr->conn_cfg[type].tid_seg;
261 /* for each segment there is at most one
262 * protocol for which count is not 0.
264 for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
265 iids->tids += segs[j].count;
267 /* The last array elelment is for the VFs. As for PF
268 * segments there can be only one protocol for
269 * which this value is not 0.
271 vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
274 iids->vf_cids = vf_cids;
275 iids->tids += vf_tids * p_mngr->vf_count;
277 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
278 "iids: CIDS %08x vf_cids %08x tids %08x vf_tids %08x\n",
279 iids->cids, iids->vf_cids, iids->tids, vf_tids);
282 static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn,
285 struct qed_cxt_mngr *p_cfg = p_hwfn->p_cxt_mngr;
288 /* Find the protocol with tid count > 0 for this segment.
289 * Note: there can only be one and this is already validated.
291 for (i = 0; i < MAX_CONN_TYPES; i++)
292 if (p_cfg->conn_cfg[i].tid_seg[seg].count)
293 return &p_cfg->conn_cfg[i].tid_seg[seg];
297 static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn,
298 u32 num_srqs, u32 num_xrc_srqs)
300 struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
302 p_mgr->srq_count = num_srqs;
303 p_mgr->xrc_srq_count = num_xrc_srqs;
306 u32 qed_cxt_get_ilt_page_size(struct qed_hwfn *p_hwfn,
307 enum ilt_clients ilt_client)
309 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
310 struct qed_ilt_client_cfg *p_cli = &p_mngr->clients[ilt_client];
312 return ILT_PAGE_IN_BYTES(p_cli->p_size.val);
315 static u32 qed_cxt_xrc_srqs_per_page(struct qed_hwfn *p_hwfn)
319 page_size = qed_cxt_get_ilt_page_size(p_hwfn, ILT_CLI_TSDM);
320 return page_size / XRC_SRQ_CXT_SIZE;
323 u32 qed_cxt_get_total_srq_count(struct qed_hwfn *p_hwfn)
325 struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
328 total_srqs = p_mgr->srq_count + p_mgr->xrc_srq_count;
333 /* set the iids count per protocol */
334 static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
335 enum protocol_type type,
336 u32 cid_count, u32 vf_cid_cnt)
338 struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
339 struct qed_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
341 p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN);
342 p_conn->cids_per_vf = roundup(vf_cid_cnt, DQ_RANGE_ALIGN);
344 if (type == PROTOCOLID_ROCE) {
345 u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val;
346 u32 cxt_size = CONN_CXT_SIZE(p_hwfn);
347 u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
348 u32 align = elems_per_page * DQ_RANGE_ALIGN;
350 p_conn->cid_count = roundup(p_conn->cid_count, align);
354 u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
355 enum protocol_type type, u32 *vf_cid)
358 *vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
360 return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
363 u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn,
364 enum protocol_type type)
366 return p_hwfn->p_cxt_mngr->acquired[type].start_cid;
369 u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
370 enum protocol_type type)
375 for (i = 0; i < TASK_SEGMENTS; i++)
376 cnt += p_hwfn->p_cxt_mngr->conn_cfg[type].tid_seg[i].count;
381 static void qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn,
382 enum protocol_type proto,
384 u8 seg_type, u32 count, bool has_fl)
386 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
387 struct qed_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg];
389 p_seg->count = count;
390 p_seg->has_fl_mem = has_fl;
391 p_seg->type = seg_type;
394 static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
395 struct qed_ilt_cli_blk *p_blk,
396 u32 start_line, u32 total_size, u32 elem_size)
398 u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
400 /* verify thatits called only once for each block */
401 if (p_blk->total_size)
404 p_blk->total_size = total_size;
405 p_blk->real_size_in_page = 0;
407 p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size;
408 p_blk->start_line = start_line;
411 static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn,
412 struct qed_ilt_client_cfg *p_cli,
413 struct qed_ilt_cli_blk *p_blk,
414 u32 *p_line, enum ilt_clients client_id)
416 if (!p_blk->total_size)
420 p_cli->first.val = *p_line;
422 p_cli->active = true;
423 *p_line += DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
424 p_cli->last.val = *p_line - 1;
426 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
427 "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x [Real %08x] Start line %d\n",
428 client_id, p_cli->first.val,
429 p_cli->last.val, p_blk->total_size,
430 p_blk->real_size_in_page, p_blk->start_line);
433 static u32 qed_ilt_get_dynamic_line_cnt(struct qed_hwfn *p_hwfn,
434 enum ilt_clients ilt_client)
436 u32 cid_count = p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE].cid_count;
437 struct qed_ilt_client_cfg *p_cli;
438 u32 lines_to_skip = 0;
441 if (ilt_client == ILT_CLI_CDUC) {
442 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
444 cxts_per_p = ILT_PAGE_IN_BYTES(p_cli->p_size.val) /
445 (u32) CONN_CXT_SIZE(p_hwfn);
447 lines_to_skip = cid_count / cxts_per_p;
450 return lines_to_skip;
453 static struct qed_ilt_client_cfg *qed_cxt_set_cli(struct qed_ilt_client_cfg
456 p_cli->active = false;
457 p_cli->first.val = 0;
462 static struct qed_ilt_cli_blk *qed_cxt_set_blk(struct qed_ilt_cli_blk *p_blk)
464 p_blk->total_size = 0;
468 static void qed_cxt_ilt_blk_reset(struct qed_hwfn *p_hwfn)
470 struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
471 u32 cli_idx, blk_idx;
473 for (cli_idx = 0; cli_idx < MAX_ILT_CLIENTS; cli_idx++) {
474 for (blk_idx = 0; blk_idx < ILT_CLI_PF_BLOCKS; blk_idx++)
475 clients[cli_idx].pf_blks[blk_idx].total_size = 0;
477 for (blk_idx = 0; blk_idx < ILT_CLI_VF_BLOCKS; blk_idx++)
478 clients[cli_idx].vf_blks[blk_idx].total_size = 0;
482 int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
484 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
485 u32 curr_line, total, i, task_size, line;
486 struct qed_ilt_client_cfg *p_cli;
487 struct qed_ilt_cli_blk *p_blk;
488 struct qed_cdu_iids cdu_iids;
489 struct qed_src_iids src_iids;
490 struct qed_qm_iids qm_iids;
491 struct qed_tm_iids tm_iids;
492 struct qed_tid_seg *p_seg;
494 memset(&qm_iids, 0, sizeof(qm_iids));
495 memset(&cdu_iids, 0, sizeof(cdu_iids));
496 memset(&src_iids, 0, sizeof(src_iids));
497 memset(&tm_iids, 0, sizeof(tm_iids));
499 p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
501 /* Reset all ILT blocks at the beginning of ILT computing in order
502 * to prevent memory allocation for irrelevant blocks afterwards.
504 qed_cxt_ilt_blk_reset(p_hwfn);
506 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
507 "hwfn [%d] - Set context manager starting line to be 0x%08x\n",
508 p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
511 p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUC]);
513 curr_line = p_mngr->pf_start_line;
516 p_cli->pf_total_lines = 0;
518 /* get the counters for the CDUC and QM clients */
519 qed_cxt_cdu_iids(p_mngr, &cdu_iids);
521 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[CDUC_BLK]);
523 total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn);
525 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
526 total, CONN_CXT_SIZE(p_hwfn));
528 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
529 p_cli->pf_total_lines = curr_line - p_blk->start_line;
531 p_blk->dynamic_line_cnt = qed_ilt_get_dynamic_line_cnt(p_hwfn,
535 p_blk = qed_cxt_set_blk(&p_cli->vf_blks[CDUC_BLK]);
536 total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
538 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
539 total, CONN_CXT_SIZE(p_hwfn));
541 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
542 p_cli->vf_total_lines = curr_line - p_blk->start_line;
544 for (i = 1; i < p_mngr->vf_count; i++)
545 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
549 p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUT]);
550 p_cli->first.val = curr_line;
552 /* first the 'working' task memory */
553 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
554 p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
555 if (!p_seg || p_seg->count == 0)
558 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[CDUT_SEG_BLK(i)]);
559 total = p_seg->count * p_mngr->task_type_size[p_seg->type];
560 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total,
561 p_mngr->task_type_size[p_seg->type]);
563 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
567 /* next the 'init' task memory (forced load memory) */
568 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
569 p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
570 if (!p_seg || p_seg->count == 0)
574 qed_cxt_set_blk(&p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)]);
576 if (!p_seg->has_fl_mem) {
577 /* The segment is active (total size pf 'working'
578 * memory is > 0) but has no FL (forced-load, Init)
581 * 1. The total-size in the corrsponding FL block of
582 * the ILT client is set to 0 - No ILT line are
583 * provisioned and no ILT memory allocated.
585 * 2. The start-line of said block is set to the
586 * start line of the matching working memory
587 * block in the ILT client. This is later used to
588 * configure the CDU segment offset registers and
589 * results in an FL command for TIDs of this
590 * segement behaves as regular load commands
591 * (loading TIDs from the working memory).
593 line = p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line;
595 qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
598 total = p_seg->count * p_mngr->task_type_size[p_seg->type];
600 qed_ilt_cli_blk_fill(p_cli, p_blk,
602 p_mngr->task_type_size[p_seg->type]);
604 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
607 p_cli->pf_total_lines = curr_line - p_cli->pf_blks[0].start_line;
610 p_seg = qed_cxt_tid_seg_info(p_hwfn, TASK_SEGMENT_VF);
611 if (p_seg && p_seg->count) {
612 /* Stricly speaking we need to iterate over all VF
613 * task segment types, but a VF has only 1 segment
616 /* 'working' memory */
617 total = p_seg->count * p_mngr->task_type_size[p_seg->type];
619 p_blk = qed_cxt_set_blk(&p_cli->vf_blks[CDUT_SEG_BLK(0)]);
620 qed_ilt_cli_blk_fill(p_cli, p_blk,
622 p_mngr->task_type_size[p_seg->type]);
624 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
629 qed_cxt_set_blk(&p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]);
630 if (!p_seg->has_fl_mem) {
631 /* see comment above */
632 line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line;
633 qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
635 task_size = p_mngr->task_type_size[p_seg->type];
636 qed_ilt_cli_blk_fill(p_cli, p_blk,
637 curr_line, total, task_size);
638 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
641 p_cli->vf_total_lines = curr_line -
642 p_cli->vf_blks[0].start_line;
644 /* Now for the rest of the VFs */
645 for (i = 1; i < p_mngr->vf_count; i++) {
646 p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
647 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
650 p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
651 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
657 p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_QM]);
658 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
660 qed_cxt_qm_iids(p_hwfn, &qm_iids);
661 total = qed_qm_pf_mem_size(qm_iids.cids,
662 qm_iids.vf_cids, qm_iids.tids,
663 p_hwfn->qm_info.num_pqs,
664 p_hwfn->qm_info.num_vf_pqs);
668 "QM ILT Info, (cids=%d, vf_cids=%d, tids=%d, num_pqs=%d, num_vf_pqs=%d, memory_size=%d)\n",
672 p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total);
674 qed_ilt_cli_blk_fill(p_cli, p_blk,
675 curr_line, total * 0x1000,
678 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
679 p_cli->pf_total_lines = curr_line - p_blk->start_line;
682 p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_SRC]);
683 qed_cxt_src_iids(p_mngr, &src_iids);
685 /* Both the PF and VFs searcher connections are stored in the per PF
686 * database. Thus sum the PF searcher cids and all the VFs searcher
689 total = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
691 u32 local_max = max_t(u32, total,
694 total = roundup_pow_of_two(local_max);
696 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
697 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
698 total * sizeof(struct src_ent),
699 sizeof(struct src_ent));
701 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
703 p_cli->pf_total_lines = curr_line - p_blk->start_line;
707 p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TM]);
708 qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
709 total = tm_iids.pf_cids + tm_iids.pf_tids_total;
711 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
712 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
713 total * TM_ELEM_SIZE, TM_ELEM_SIZE);
715 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
717 p_cli->pf_total_lines = curr_line - p_blk->start_line;
721 total = tm_iids.per_vf_cids + tm_iids.per_vf_tids;
723 p_blk = qed_cxt_set_blk(&p_cli->vf_blks[0]);
724 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
725 total * TM_ELEM_SIZE, TM_ELEM_SIZE);
727 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
730 p_cli->vf_total_lines = curr_line - p_blk->start_line;
731 for (i = 1; i < p_mngr->vf_count; i++)
732 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
736 /* TSDM (SRQ CONTEXT) */
737 total = qed_cxt_get_total_srq_count(p_hwfn);
740 p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TSDM]);
741 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[SRQ_BLK]);
742 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
743 total * SRQ_CXT_SIZE, SRQ_CXT_SIZE);
745 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
747 p_cli->pf_total_lines = curr_line - p_blk->start_line;
750 *line_count = curr_line - p_hwfn->p_cxt_mngr->pf_start_line;
752 if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
753 RESC_NUM(p_hwfn, QED_ILT))
759 u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines)
761 struct qed_ilt_client_cfg *p_cli;
762 u32 excess_lines, available_lines;
763 struct qed_cxt_mngr *p_mngr;
764 u32 ilt_page_size, elem_size;
765 struct qed_tid_seg *p_seg;
768 available_lines = RESC_NUM(p_hwfn, QED_ILT);
769 excess_lines = used_lines - available_lines;
774 if (!QED_IS_RDMA_PERSONALITY(p_hwfn))
777 p_mngr = p_hwfn->p_cxt_mngr;
778 p_cli = &p_mngr->clients[ILT_CLI_CDUT];
779 ilt_page_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
781 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
782 p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
783 if (!p_seg || p_seg->count == 0)
786 elem_size = p_mngr->task_type_size[p_seg->type];
790 return (ilt_page_size / elem_size) * excess_lines;
793 DP_NOTICE(p_hwfn, "failed computing excess ILT lines\n");
797 static void qed_cxt_src_t2_free(struct qed_hwfn *p_hwfn)
799 struct qed_src_t2 *p_t2 = &p_hwfn->p_cxt_mngr->src_t2;
802 if (!p_t2 || !p_t2->dma_mem)
805 for (i = 0; i < p_t2->num_pages; i++)
806 if (p_t2->dma_mem[i].virt_addr)
807 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
808 p_t2->dma_mem[i].size,
809 p_t2->dma_mem[i].virt_addr,
810 p_t2->dma_mem[i].phys_addr);
812 kfree(p_t2->dma_mem);
813 p_t2->dma_mem = NULL;
817 qed_cxt_t2_alloc_pages(struct qed_hwfn *p_hwfn,
818 struct qed_src_t2 *p_t2, u32 total_size, u32 page_size)
823 if (!p_t2 || !p_t2->dma_mem)
826 for (i = 0; i < p_t2->num_pages; i++) {
827 size = min_t(u32, total_size, page_size);
828 p_virt = &p_t2->dma_mem[i].virt_addr;
830 *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
832 &p_t2->dma_mem[i].phys_addr,
834 if (!p_t2->dma_mem[i].virt_addr)
837 memset(*p_virt, 0, size);
838 p_t2->dma_mem[i].size = size;
845 static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
847 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
848 u32 conn_num, total_size, ent_per_page, psz, i;
849 struct phys_mem_desc *p_t2_last_page;
850 struct qed_ilt_client_cfg *p_src;
851 struct qed_src_iids src_iids;
852 struct qed_src_t2 *p_t2;
855 memset(&src_iids, 0, sizeof(src_iids));
857 /* if the SRC ILT client is inactive - there are no connection
858 * requiring the searcer, leave.
860 p_src = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_SRC];
864 qed_cxt_src_iids(p_mngr, &src_iids);
865 conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
866 total_size = conn_num * sizeof(struct src_ent);
868 /* use the same page size as the SRC ILT client */
869 psz = ILT_PAGE_IN_BYTES(p_src->p_size.val);
870 p_t2 = &p_mngr->src_t2;
871 p_t2->num_pages = DIV_ROUND_UP(total_size, psz);
874 p_t2->dma_mem = kcalloc(p_t2->num_pages, sizeof(struct phys_mem_desc),
876 if (!p_t2->dma_mem) {
877 DP_NOTICE(p_hwfn, "Failed to allocate t2 table\n");
882 rc = qed_cxt_t2_alloc_pages(p_hwfn, p_t2, total_size, psz);
886 /* Set the t2 pointers */
888 /* entries per page - must be a power of two */
889 ent_per_page = psz / sizeof(struct src_ent);
891 p_t2->first_free = (u64)p_t2->dma_mem[0].phys_addr;
893 p_t2_last_page = &p_t2->dma_mem[(conn_num - 1) / ent_per_page];
894 p_t2->last_free = (u64)p_t2_last_page->phys_addr +
895 ((conn_num - 1) & (ent_per_page - 1)) * sizeof(struct src_ent);
897 for (i = 0; i < p_t2->num_pages; i++) {
898 u32 ent_num = min_t(u32,
901 struct src_ent *entries = p_t2->dma_mem[i].virt_addr;
902 u64 p_ent_phys = (u64)p_t2->dma_mem[i].phys_addr, val;
905 for (j = 0; j < ent_num - 1; j++) {
906 val = p_ent_phys + (j + 1) * sizeof(struct src_ent);
907 entries[j].next = cpu_to_be64(val);
910 if (i < p_t2->num_pages - 1)
911 val = (u64)p_t2->dma_mem[i + 1].phys_addr;
914 entries[j].next = cpu_to_be64(val);
922 qed_cxt_src_t2_free(p_hwfn);
926 #define for_each_ilt_valid_client(pos, clients) \
927 for (pos = 0; pos < MAX_ILT_CLIENTS; pos++) \
928 if (!clients[pos].active) { \
932 /* Total number of ILT lines used by this PF */
933 static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients)
938 for_each_ilt_valid_client(i, ilt_clients)
939 size += (ilt_clients[i].last.val - ilt_clients[i].first.val + 1);
944 static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn)
946 struct qed_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients;
947 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
950 ilt_size = qed_cxt_ilt_shadow_size(p_cli);
952 for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
953 struct phys_mem_desc *p_dma = &p_mngr->ilt_shadow[i];
955 if (p_dma->virt_addr)
956 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
957 p_dma->size, p_dma->virt_addr,
959 p_dma->virt_addr = NULL;
961 kfree(p_mngr->ilt_shadow);
964 static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
965 struct qed_ilt_cli_blk *p_blk,
966 enum ilt_clients ilt_client,
967 u32 start_line_offset)
969 struct phys_mem_desc *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
970 u32 lines, line, sz_left, lines_to_skip = 0;
972 /* Special handling for RoCE that supports dynamic allocation */
973 if (QED_IS_RDMA_PERSONALITY(p_hwfn) &&
974 ((ilt_client == ILT_CLI_CDUT) || ilt_client == ILT_CLI_TSDM))
977 lines_to_skip = p_blk->dynamic_line_cnt;
979 if (!p_blk->total_size)
982 sz_left = p_blk->total_size;
983 lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) - lines_to_skip;
984 line = p_blk->start_line + start_line_offset -
985 p_hwfn->p_cxt_mngr->pf_start_line + lines_to_skip;
987 for (; lines; lines--) {
992 size = min_t(u32, sz_left, p_blk->real_size_in_page);
993 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size,
994 &p_phys, GFP_KERNEL);
998 ilt_shadow[line].phys_addr = p_phys;
999 ilt_shadow[line].virt_addr = p_virt;
1000 ilt_shadow[line].size = size;
1002 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
1003 "ILT shadow: Line [%d] Physical 0x%llx Virtual %p Size %d\n",
1004 line, (u64)p_phys, p_virt, size);
1013 static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
1015 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1016 struct qed_ilt_client_cfg *clients = p_mngr->clients;
1017 struct qed_ilt_cli_blk *p_blk;
1021 size = qed_cxt_ilt_shadow_size(clients);
1022 p_mngr->ilt_shadow = kcalloc(size, sizeof(struct phys_mem_desc),
1024 if (!p_mngr->ilt_shadow) {
1026 goto ilt_shadow_fail;
1029 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
1030 "Allocated 0x%x bytes for ilt shadow\n",
1031 (u32)(size * sizeof(struct phys_mem_desc)));
1033 for_each_ilt_valid_client(i, clients) {
1034 for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
1035 p_blk = &clients[i].pf_blks[j];
1036 rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
1038 goto ilt_shadow_fail;
1040 for (k = 0; k < p_mngr->vf_count; k++) {
1041 for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) {
1042 u32 lines = clients[i].vf_total_lines * k;
1044 p_blk = &clients[i].vf_blks[j];
1045 rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, lines);
1047 goto ilt_shadow_fail;
1055 qed_ilt_shadow_free(p_hwfn);
1059 static void qed_cid_map_free(struct qed_hwfn *p_hwfn)
1061 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1064 for (type = 0; type < MAX_CONN_TYPES; type++) {
1065 kfree(p_mngr->acquired[type].cid_map);
1066 p_mngr->acquired[type].max_count = 0;
1067 p_mngr->acquired[type].start_cid = 0;
1069 for (vf = 0; vf < MAX_NUM_VFS; vf++) {
1070 kfree(p_mngr->acquired_vf[type][vf].cid_map);
1071 p_mngr->acquired_vf[type][vf].max_count = 0;
1072 p_mngr->acquired_vf[type][vf].start_cid = 0;
1078 qed_cid_map_alloc_single(struct qed_hwfn *p_hwfn,
1081 u32 cid_count, struct qed_cid_acquired_map *p_map)
1088 size = DIV_ROUND_UP(cid_count,
1089 sizeof(unsigned long) * BITS_PER_BYTE) *
1090 sizeof(unsigned long);
1091 p_map->cid_map = kzalloc(size, GFP_KERNEL);
1092 if (!p_map->cid_map)
1095 p_map->max_count = cid_count;
1096 p_map->start_cid = cid_start;
1098 DP_VERBOSE(p_hwfn, QED_MSG_CXT,
1099 "Type %08x start: %08x count %08x\n",
1100 type, p_map->start_cid, p_map->max_count);
1105 static int qed_cid_map_alloc(struct qed_hwfn *p_hwfn)
1107 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1108 u32 start_cid = 0, vf_start_cid = 0;
1111 for (type = 0; type < MAX_CONN_TYPES; type++) {
1112 struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[type];
1113 struct qed_cid_acquired_map *p_map;
1115 /* Handle PF maps */
1116 p_map = &p_mngr->acquired[type];
1117 if (qed_cid_map_alloc_single(p_hwfn, type, start_cid,
1118 p_cfg->cid_count, p_map))
1121 /* Handle VF maps */
1122 for (vf = 0; vf < MAX_NUM_VFS; vf++) {
1123 p_map = &p_mngr->acquired_vf[type][vf];
1124 if (qed_cid_map_alloc_single(p_hwfn, type,
1126 p_cfg->cids_per_vf, p_map))
1130 start_cid += p_cfg->cid_count;
1131 vf_start_cid += p_cfg->cids_per_vf;
1137 qed_cid_map_free(p_hwfn);
1141 int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
1143 struct qed_ilt_client_cfg *clients;
1144 struct qed_cxt_mngr *p_mngr;
1147 p_mngr = kzalloc(sizeof(*p_mngr), GFP_KERNEL);
1151 /* Initialize ILT client registers */
1152 clients = p_mngr->clients;
1153 clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
1154 clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
1155 clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
1157 clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
1158 clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
1159 clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
1161 clients[ILT_CLI_TM].first.reg = ILT_CFG_REG(TM, FIRST_ILT);
1162 clients[ILT_CLI_TM].last.reg = ILT_CFG_REG(TM, LAST_ILT);
1163 clients[ILT_CLI_TM].p_size.reg = ILT_CFG_REG(TM, P_SIZE);
1165 clients[ILT_CLI_SRC].first.reg = ILT_CFG_REG(SRC, FIRST_ILT);
1166 clients[ILT_CLI_SRC].last.reg = ILT_CFG_REG(SRC, LAST_ILT);
1167 clients[ILT_CLI_SRC].p_size.reg = ILT_CFG_REG(SRC, P_SIZE);
1169 clients[ILT_CLI_CDUT].first.reg = ILT_CFG_REG(CDUT, FIRST_ILT);
1170 clients[ILT_CLI_CDUT].last.reg = ILT_CFG_REG(CDUT, LAST_ILT);
1171 clients[ILT_CLI_CDUT].p_size.reg = ILT_CFG_REG(CDUT, P_SIZE);
1173 clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT);
1174 clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
1175 clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
1176 /* default ILT page size for all clients is 64K */
1177 for (i = 0; i < MAX_ILT_CLIENTS; i++)
1178 p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
1180 p_mngr->conn_ctx_size = CONN_CXT_SIZE(p_hwfn);
1182 /* Initialize task sizes */
1183 p_mngr->task_type_size[0] = TYPE0_TASK_CXT_SIZE(p_hwfn);
1184 p_mngr->task_type_size[1] = TYPE1_TASK_CXT_SIZE(p_hwfn);
1186 if (p_hwfn->cdev->p_iov_info) {
1187 p_mngr->vf_count = p_hwfn->cdev->p_iov_info->total_vfs;
1188 p_mngr->first_vf_in_pf =
1189 p_hwfn->cdev->p_iov_info->first_vf_in_pf;
1191 /* Initialize the dynamic ILT allocation mutex */
1192 mutex_init(&p_mngr->mutex);
1194 /* Set the cxt mangr pointer priori to further allocations */
1195 p_hwfn->p_cxt_mngr = p_mngr;
1200 int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn)
1204 /* Allocate the ILT shadow table */
1205 rc = qed_ilt_shadow_alloc(p_hwfn);
1207 goto tables_alloc_fail;
1209 /* Allocate the T2 table */
1210 rc = qed_cxt_src_t2_alloc(p_hwfn);
1212 goto tables_alloc_fail;
1214 /* Allocate and initialize the acquired cids bitmaps */
1215 rc = qed_cid_map_alloc(p_hwfn);
1217 goto tables_alloc_fail;
1222 qed_cxt_mngr_free(p_hwfn);
1226 void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn)
1228 if (!p_hwfn->p_cxt_mngr)
1231 qed_cid_map_free(p_hwfn);
1232 qed_cxt_src_t2_free(p_hwfn);
1233 qed_ilt_shadow_free(p_hwfn);
1234 kfree(p_hwfn->p_cxt_mngr);
1236 p_hwfn->p_cxt_mngr = NULL;
1239 void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
1241 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1242 struct qed_cid_acquired_map *p_map;
1243 struct qed_conn_type_cfg *p_cfg;
1247 /* Reset acquired cids */
1248 for (type = 0; type < MAX_CONN_TYPES; type++) {
1251 p_cfg = &p_mngr->conn_cfg[type];
1252 if (p_cfg->cid_count) {
1253 p_map = &p_mngr->acquired[type];
1254 len = DIV_ROUND_UP(p_map->max_count,
1255 sizeof(unsigned long) *
1257 sizeof(unsigned long);
1258 memset(p_map->cid_map, 0, len);
1261 if (!p_cfg->cids_per_vf)
1264 for (vf = 0; vf < MAX_NUM_VFS; vf++) {
1265 p_map = &p_mngr->acquired_vf[type][vf];
1266 len = DIV_ROUND_UP(p_map->max_count,
1267 sizeof(unsigned long) *
1269 sizeof(unsigned long);
1270 memset(p_map->cid_map, 0, len);
1276 #define CDUC_CXT_SIZE_SHIFT \
1277 CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT
1279 #define CDUC_CXT_SIZE_MASK \
1280 (CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT)
1282 #define CDUC_BLOCK_WASTE_SHIFT \
1283 CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT
1285 #define CDUC_BLOCK_WASTE_MASK \
1286 (CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT)
1288 #define CDUC_NCIB_SHIFT \
1289 CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT
1291 #define CDUC_NCIB_MASK \
1292 (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT)
1294 #define CDUT_TYPE0_CXT_SIZE_SHIFT \
1295 CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT
1297 #define CDUT_TYPE0_CXT_SIZE_MASK \
1298 (CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE >> \
1299 CDUT_TYPE0_CXT_SIZE_SHIFT)
1301 #define CDUT_TYPE0_BLOCK_WASTE_SHIFT \
1302 CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT
1304 #define CDUT_TYPE0_BLOCK_WASTE_MASK \
1305 (CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE >> \
1306 CDUT_TYPE0_BLOCK_WASTE_SHIFT)
1308 #define CDUT_TYPE0_NCIB_SHIFT \
1309 CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT
1311 #define CDUT_TYPE0_NCIB_MASK \
1312 (CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK >> \
1313 CDUT_TYPE0_NCIB_SHIFT)
1315 #define CDUT_TYPE1_CXT_SIZE_SHIFT \
1316 CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT
1318 #define CDUT_TYPE1_CXT_SIZE_MASK \
1319 (CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE >> \
1320 CDUT_TYPE1_CXT_SIZE_SHIFT)
1322 #define CDUT_TYPE1_BLOCK_WASTE_SHIFT \
1323 CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT
1325 #define CDUT_TYPE1_BLOCK_WASTE_MASK \
1326 (CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE >> \
1327 CDUT_TYPE1_BLOCK_WASTE_SHIFT)
1329 #define CDUT_TYPE1_NCIB_SHIFT \
1330 CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT
1332 #define CDUT_TYPE1_NCIB_MASK \
1333 (CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK >> \
1334 CDUT_TYPE1_NCIB_SHIFT)
1336 static void qed_cdu_init_common(struct qed_hwfn *p_hwfn)
1338 u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
1340 /* CDUC - connection configuration */
1341 page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
1342 cxt_size = CONN_CXT_SIZE(p_hwfn);
1343 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1344 block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1346 SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size);
1347 SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste);
1348 SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page);
1349 STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params);
1351 /* CDUT - type-0 tasks configuration */
1352 page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT].p_size.val;
1353 cxt_size = p_hwfn->p_cxt_mngr->task_type_size[0];
1354 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1355 block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1357 /* cxt size and block-waste are multipes of 8 */
1359 SET_FIELD(cdu_params, CDUT_TYPE0_CXT_SIZE, (cxt_size >> 3));
1360 SET_FIELD(cdu_params, CDUT_TYPE0_BLOCK_WASTE, (block_waste >> 3));
1361 SET_FIELD(cdu_params, CDUT_TYPE0_NCIB, elems_per_page);
1362 STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT0_PARAMS_RT_OFFSET, cdu_params);
1364 /* CDUT - type-1 tasks configuration */
1365 cxt_size = p_hwfn->p_cxt_mngr->task_type_size[1];
1366 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1367 block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1369 /* cxt size and block-waste are multipes of 8 */
1371 SET_FIELD(cdu_params, CDUT_TYPE1_CXT_SIZE, (cxt_size >> 3));
1372 SET_FIELD(cdu_params, CDUT_TYPE1_BLOCK_WASTE, (block_waste >> 3));
1373 SET_FIELD(cdu_params, CDUT_TYPE1_NCIB, elems_per_page);
1374 STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT1_PARAMS_RT_OFFSET, cdu_params);
1378 #define CDU_SEG_REG_TYPE_SHIFT CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT
1379 #define CDU_SEG_REG_TYPE_MASK 0x1
1380 #define CDU_SEG_REG_OFFSET_SHIFT 0
1381 #define CDU_SEG_REG_OFFSET_MASK CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK
1383 static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn)
1385 struct qed_ilt_client_cfg *p_cli;
1386 struct qed_tid_seg *p_seg;
1387 u32 cdu_seg_params, offset;
1390 static const u32 rt_type_offset_arr[] = {
1391 CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET,
1392 CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET,
1393 CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET,
1394 CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET
1397 static const u32 rt_type_offset_fl_arr[] = {
1398 CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET,
1399 CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET,
1400 CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET,
1401 CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET
1404 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
1406 /* There are initializations only for CDUT during pf Phase */
1407 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
1409 p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
1413 /* Note: start_line is already adjusted for the CDU
1414 * segment register granularity, so we just need to
1415 * divide. Adjustment is implicit as we assume ILT
1416 * Page size is larger than 32K!
1418 offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
1419 (p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line -
1420 p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
1423 SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
1424 SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
1425 STORE_RT_REG(p_hwfn, rt_type_offset_arr[i], cdu_seg_params);
1427 offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
1428 (p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)].start_line -
1429 p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
1432 SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
1433 SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
1434 STORE_RT_REG(p_hwfn, rt_type_offset_fl_arr[i], cdu_seg_params);
1438 void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
1439 struct qed_ptt *p_ptt, bool is_pf_loading)
1441 struct qed_qm_info *qm_info = &p_hwfn->qm_info;
1442 struct qed_qm_pf_rt_init_params params;
1443 struct qed_qm_iids iids;
1445 memset(&iids, 0, sizeof(iids));
1446 qed_cxt_qm_iids(p_hwfn, &iids);
1448 memset(¶ms, 0, sizeof(params));
1449 params.port_id = p_hwfn->port_id;
1450 params.pf_id = p_hwfn->rel_pf_id;
1451 params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
1452 params.is_pf_loading = is_pf_loading;
1453 params.num_pf_cids = iids.cids;
1454 params.num_vf_cids = iids.vf_cids;
1455 params.num_tids = iids.tids;
1456 params.start_pq = qm_info->start_pq;
1457 params.num_pf_pqs = qm_info->num_pqs - qm_info->num_vf_pqs;
1458 params.num_vf_pqs = qm_info->num_vf_pqs;
1459 params.start_vport = qm_info->start_vport;
1460 params.num_vports = qm_info->num_vports;
1461 params.pf_wfq = qm_info->pf_wfq;
1462 params.pf_rl = qm_info->pf_rl;
1463 params.pq_params = qm_info->qm_pq_params;
1464 params.vport_params = qm_info->qm_vport_params;
1466 qed_qm_pf_rt_init(p_hwfn, p_ptt, ¶ms);
1470 static void qed_cm_init_pf(struct qed_hwfn *p_hwfn)
1472 /* XCM pure-LB queue */
1473 STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET,
1474 qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB));
1478 static void qed_dq_init_pf(struct qed_hwfn *p_hwfn)
1480 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1481 u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0;
1483 dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
1484 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
1486 dq_vf_max_cid += (p_mngr->conn_cfg[0].cids_per_vf >> DQ_RANGE_SHIFT);
1487 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_0_RT_OFFSET, dq_vf_max_cid);
1489 dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
1490 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
1492 dq_vf_max_cid += (p_mngr->conn_cfg[1].cids_per_vf >> DQ_RANGE_SHIFT);
1493 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_1_RT_OFFSET, dq_vf_max_cid);
1495 dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
1496 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
1498 dq_vf_max_cid += (p_mngr->conn_cfg[2].cids_per_vf >> DQ_RANGE_SHIFT);
1499 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_2_RT_OFFSET, dq_vf_max_cid);
1501 dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
1502 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
1504 dq_vf_max_cid += (p_mngr->conn_cfg[3].cids_per_vf >> DQ_RANGE_SHIFT);
1505 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_3_RT_OFFSET, dq_vf_max_cid);
1507 dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
1508 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
1510 dq_vf_max_cid += (p_mngr->conn_cfg[4].cids_per_vf >> DQ_RANGE_SHIFT);
1511 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_4_RT_OFFSET, dq_vf_max_cid);
1513 dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
1514 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
1516 dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT);
1517 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid);
1519 /* Connection types 6 & 7 are not in use, yet they must be configured
1520 * as the highest possible connection. Not configuring them means the
1521 * defaults will be used, and with a large number of cids a bug may
1522 * occur, if the defaults will be smaller than dq_pf_max_cid /
1525 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid);
1526 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid);
1528 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_7_RT_OFFSET, dq_pf_max_cid);
1529 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid);
1532 static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
1534 struct qed_ilt_client_cfg *ilt_clients;
1537 ilt_clients = p_hwfn->p_cxt_mngr->clients;
1538 for_each_ilt_valid_client(i, ilt_clients) {
1539 STORE_RT_REG(p_hwfn,
1540 ilt_clients[i].first.reg,
1541 ilt_clients[i].first.val);
1542 STORE_RT_REG(p_hwfn,
1543 ilt_clients[i].last.reg, ilt_clients[i].last.val);
1544 STORE_RT_REG(p_hwfn,
1545 ilt_clients[i].p_size.reg,
1546 ilt_clients[i].p_size.val);
1550 static void qed_ilt_vf_bounds_init(struct qed_hwfn *p_hwfn)
1552 struct qed_ilt_client_cfg *p_cli;
1555 /* For simplicty we set the 'block' to be an ILT page */
1556 if (p_hwfn->cdev->p_iov_info) {
1557 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
1559 STORE_RT_REG(p_hwfn,
1560 PSWRQ2_REG_VF_BASE_RT_OFFSET,
1561 p_iov->first_vf_in_pf);
1562 STORE_RT_REG(p_hwfn,
1563 PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET,
1564 p_iov->first_vf_in_pf + p_iov->total_vfs);
1567 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
1568 blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1569 if (p_cli->active) {
1570 STORE_RT_REG(p_hwfn,
1571 PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET,
1573 STORE_RT_REG(p_hwfn,
1574 PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1575 p_cli->pf_total_lines);
1576 STORE_RT_REG(p_hwfn,
1577 PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET,
1578 p_cli->vf_total_lines);
1581 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
1582 blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1583 if (p_cli->active) {
1584 STORE_RT_REG(p_hwfn,
1585 PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET,
1587 STORE_RT_REG(p_hwfn,
1588 PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1589 p_cli->pf_total_lines);
1590 STORE_RT_REG(p_hwfn,
1591 PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET,
1592 p_cli->vf_total_lines);
1595 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TM];
1596 blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1597 if (p_cli->active) {
1598 STORE_RT_REG(p_hwfn,
1599 PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET, blk_factor);
1600 STORE_RT_REG(p_hwfn,
1601 PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1602 p_cli->pf_total_lines);
1603 STORE_RT_REG(p_hwfn,
1604 PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET,
1605 p_cli->vf_total_lines);
1609 /* ILT (PSWRQ2) PF */
1610 static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
1612 struct qed_ilt_client_cfg *clients;
1613 struct qed_cxt_mngr *p_mngr;
1614 struct phys_mem_desc *p_shdw;
1615 u32 line, rt_offst, i;
1617 qed_ilt_bounds_init(p_hwfn);
1618 qed_ilt_vf_bounds_init(p_hwfn);
1620 p_mngr = p_hwfn->p_cxt_mngr;
1621 p_shdw = p_mngr->ilt_shadow;
1622 clients = p_hwfn->p_cxt_mngr->clients;
1624 for_each_ilt_valid_client(i, clients) {
1625 /** Client's 1st val and RT array are absolute, ILT shadows'
1626 * lines are relative.
1628 line = clients[i].first.val - p_mngr->pf_start_line;
1629 rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET +
1630 clients[i].first.val * ILT_ENTRY_IN_REGS;
1632 for (; line <= clients[i].last.val - p_mngr->pf_start_line;
1633 line++, rt_offst += ILT_ENTRY_IN_REGS) {
1634 u64 ilt_hw_entry = 0;
1636 /** p_virt could be NULL incase of dynamic
1639 if (p_shdw[line].virt_addr) {
1640 SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
1641 SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
1642 (p_shdw[line].phys_addr >> 12));
1644 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
1645 "Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to Physical addr: 0x%llx\n",
1647 (u64)(p_shdw[line].phys_addr >> 12));
1650 STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
1655 /* SRC (Searcher) PF */
1656 static void qed_src_init_pf(struct qed_hwfn *p_hwfn)
1658 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1659 u32 rounded_conn_num, conn_num, conn_max;
1660 struct qed_src_iids src_iids;
1662 memset(&src_iids, 0, sizeof(src_iids));
1663 qed_cxt_src_iids(p_mngr, &src_iids);
1664 conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
1668 conn_max = max_t(u32, conn_num, SRC_MIN_NUM_ELEMS);
1669 rounded_conn_num = roundup_pow_of_two(conn_max);
1671 STORE_RT_REG(p_hwfn, SRC_REG_COUNTFREE_RT_OFFSET, conn_num);
1672 STORE_RT_REG(p_hwfn, SRC_REG_NUMBER_HASH_BITS_RT_OFFSET,
1673 ilog2(rounded_conn_num));
1675 STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET,
1676 p_hwfn->p_cxt_mngr->first_free);
1677 STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET,
1678 p_hwfn->p_cxt_mngr->last_free);
1682 #define TM_CFG_NUM_IDS_SHIFT 0
1683 #define TM_CFG_NUM_IDS_MASK 0xFFFFULL
1684 #define TM_CFG_PRE_SCAN_OFFSET_SHIFT 16
1685 #define TM_CFG_PRE_SCAN_OFFSET_MASK 0x1FFULL
1686 #define TM_CFG_PARENT_PF_SHIFT 25
1687 #define TM_CFG_PARENT_PF_MASK 0x7ULL
1689 #define TM_CFG_CID_PRE_SCAN_ROWS_SHIFT 30
1690 #define TM_CFG_CID_PRE_SCAN_ROWS_MASK 0x1FFULL
1692 #define TM_CFG_TID_OFFSET_SHIFT 30
1693 #define TM_CFG_TID_OFFSET_MASK 0x7FFFFULL
1694 #define TM_CFG_TID_PRE_SCAN_ROWS_SHIFT 49
1695 #define TM_CFG_TID_PRE_SCAN_ROWS_MASK 0x1FFULL
1697 static void qed_tm_init_pf(struct qed_hwfn *p_hwfn)
1699 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1700 u32 active_seg_mask = 0, tm_offset, rt_reg;
1701 struct qed_tm_iids tm_iids;
1705 memset(&tm_iids, 0, sizeof(tm_iids));
1706 qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
1708 /* @@@TBD No pre-scan for now */
1710 /* Note: We assume consecutive VFs for a PF */
1711 for (i = 0; i < p_mngr->vf_count; i++) {
1713 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids);
1714 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1715 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
1716 SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0);
1717 rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
1718 (sizeof(cfg_word) / sizeof(u32)) *
1719 (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i);
1720 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1724 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_cids);
1725 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1726 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0); /* n/a for PF */
1727 SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all */
1729 rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
1730 (sizeof(cfg_word) / sizeof(u32)) *
1731 (NUM_OF_VFS(p_hwfn->cdev) + p_hwfn->rel_pf_id);
1732 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1735 STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_CONN_RT_OFFSET,
1736 tm_iids.pf_cids ? 0x1 : 0x0);
1738 /* @@@TBD how to enable the scan for the VFs */
1740 tm_offset = tm_iids.per_vf_cids;
1742 /* Note: We assume consecutive VFs for a PF */
1743 for (i = 0; i < p_mngr->vf_count; i++) {
1745 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_tids);
1746 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1747 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
1748 SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
1749 SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0);
1751 rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
1752 (sizeof(cfg_word) / sizeof(u32)) *
1753 (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i);
1755 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1758 tm_offset = tm_iids.pf_cids;
1759 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
1761 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_tids[i]);
1762 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1763 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);
1764 SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
1765 SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0);
1767 rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
1768 (sizeof(cfg_word) / sizeof(u32)) *
1769 (NUM_OF_VFS(p_hwfn->cdev) +
1770 p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i);
1772 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1773 active_seg_mask |= (tm_iids.pf_tids[i] ? BIT(i) : 0);
1775 tm_offset += tm_iids.pf_tids[i];
1778 if (QED_IS_RDMA_PERSONALITY(p_hwfn))
1779 active_seg_mask = 0;
1781 STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask);
1783 /* @@@TBD how to enable the scan for the VFs */
1786 static void qed_prs_init_common(struct qed_hwfn *p_hwfn)
1788 if ((p_hwfn->hw_info.personality == QED_PCI_FCOE) &&
1789 p_hwfn->pf_params.fcoe_pf_params.is_target)
1790 STORE_RT_REG(p_hwfn,
1791 PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET, 0);
1794 static void qed_prs_init_pf(struct qed_hwfn *p_hwfn)
1796 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1797 struct qed_conn_type_cfg *p_fcoe;
1798 struct qed_tid_seg *p_tid;
1800 p_fcoe = &p_mngr->conn_cfg[PROTOCOLID_FCOE];
1802 /* If FCoE is active set the MAX OX_ID (tid) in the Parser */
1803 if (!p_fcoe->cid_count)
1806 p_tid = &p_fcoe->tid_seg[QED_CXT_FCOE_TID_SEG];
1807 if (p_hwfn->pf_params.fcoe_pf_params.is_target) {
1808 STORE_RT_REG_AGG(p_hwfn,
1809 PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET,
1812 STORE_RT_REG_AGG(p_hwfn,
1813 PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET,
1818 void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
1820 qed_cdu_init_common(p_hwfn);
1821 qed_prs_init_common(p_hwfn);
1824 void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1826 qed_qm_init_pf(p_hwfn, p_ptt, true);
1827 qed_cm_init_pf(p_hwfn);
1828 qed_dq_init_pf(p_hwfn);
1829 qed_cdu_init_pf(p_hwfn);
1830 qed_ilt_init_pf(p_hwfn);
1831 qed_src_init_pf(p_hwfn);
1832 qed_tm_init_pf(p_hwfn);
1833 qed_prs_init_pf(p_hwfn);
1836 int _qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
1837 enum protocol_type type, u32 *p_cid, u8 vfid)
1839 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1840 struct qed_cid_acquired_map *p_map;
1843 if (type >= MAX_CONN_TYPES) {
1844 DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
1848 if (vfid >= MAX_NUM_VFS && vfid != QED_CXT_PF_CID) {
1849 DP_NOTICE(p_hwfn, "VF [%02x] is out of range\n", vfid);
1853 /* Determine the right map to take this CID from */
1854 if (vfid == QED_CXT_PF_CID)
1855 p_map = &p_mngr->acquired[type];
1857 p_map = &p_mngr->acquired_vf[type][vfid];
1859 if (!p_map->cid_map) {
1860 DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
1864 rel_cid = find_first_zero_bit(p_map->cid_map, p_map->max_count);
1866 if (rel_cid >= p_map->max_count) {
1867 DP_NOTICE(p_hwfn, "no CID available for protocol %d\n", type);
1871 __set_bit(rel_cid, p_map->cid_map);
1873 *p_cid = rel_cid + p_map->start_cid;
1875 DP_VERBOSE(p_hwfn, QED_MSG_CXT,
1876 "Acquired cid 0x%08x [rel. %08x] vfid %02x type %d\n",
1877 *p_cid, rel_cid, vfid, type);
1882 int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
1883 enum protocol_type type, u32 *p_cid)
1885 return _qed_cxt_acquire_cid(p_hwfn, type, p_cid, QED_CXT_PF_CID);
1888 static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
1891 enum protocol_type *p_type,
1892 struct qed_cid_acquired_map **pp_map)
1894 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1897 /* Iterate over protocols and find matching cid range */
1898 for (*p_type = 0; *p_type < MAX_CONN_TYPES; (*p_type)++) {
1899 if (vfid == QED_CXT_PF_CID)
1900 *pp_map = &p_mngr->acquired[*p_type];
1902 *pp_map = &p_mngr->acquired_vf[*p_type][vfid];
1904 if (!((*pp_map)->cid_map))
1906 if (cid >= (*pp_map)->start_cid &&
1907 cid < (*pp_map)->start_cid + (*pp_map)->max_count)
1911 if (*p_type == MAX_CONN_TYPES) {
1912 DP_NOTICE(p_hwfn, "Invalid CID %d vfid %02x", cid, vfid);
1916 rel_cid = cid - (*pp_map)->start_cid;
1917 if (!test_bit(rel_cid, (*pp_map)->cid_map)) {
1918 DP_NOTICE(p_hwfn, "CID %d [vifd %02x] not acquired",
1925 *p_type = MAX_CONN_TYPES;
1930 void _qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid, u8 vfid)
1932 struct qed_cid_acquired_map *p_map = NULL;
1933 enum protocol_type type;
1937 if (vfid != QED_CXT_PF_CID && vfid > MAX_NUM_VFS) {
1939 "Trying to return incorrect CID belonging to VF %02x\n",
1944 /* Test acquired and find matching per-protocol map */
1945 b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, vfid,
1951 rel_cid = cid - p_map->start_cid;
1952 clear_bit(rel_cid, p_map->cid_map);
1954 DP_VERBOSE(p_hwfn, QED_MSG_CXT,
1955 "Released CID 0x%08x [rel. %08x] vfid %02x type %d\n",
1956 cid, rel_cid, vfid, type);
1959 void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid)
1961 _qed_cxt_release_cid(p_hwfn, cid, QED_CXT_PF_CID);
1964 int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
1966 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1967 struct qed_cid_acquired_map *p_map = NULL;
1968 u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
1969 enum protocol_type type;
1972 /* Test acquired and find matching per-protocol map */
1973 b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid,
1974 QED_CXT_PF_CID, &type, &p_map);
1979 /* set the protocl type */
1980 p_info->type = type;
1982 /* compute context virtual pointer */
1983 hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
1985 conn_cxt_size = CONN_CXT_SIZE(p_hwfn);
1986 cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size;
1987 line = p_info->iid / cxts_per_p;
1989 /* Make sure context is allocated (dynamic allocation) */
1990 if (!p_mngr->ilt_shadow[line].virt_addr)
1993 p_info->p_cxt = p_mngr->ilt_shadow[line].virt_addr +
1994 p_info->iid % cxts_per_p * conn_cxt_size;
1996 DP_VERBOSE(p_hwfn, (QED_MSG_ILT | QED_MSG_CXT),
1997 "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n",
1998 p_info->iid / cxts_per_p, p_info->p_cxt, p_info->iid);
2003 static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
2004 struct qed_rdma_pf_params *p_params,
2007 u32 num_cons, num_qps;
2008 enum protocol_type proto;
2010 if (p_hwfn->mcp_info->func_info.protocol == QED_PCI_ETH_RDMA) {
2011 DP_VERBOSE(p_hwfn, QED_MSG_SP,
2012 "Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n");
2013 p_hwfn->hw_info.personality = QED_PCI_ETH_ROCE;
2016 switch (p_hwfn->hw_info.personality) {
2017 case QED_PCI_ETH_IWARP:
2018 /* Each QP requires one connection */
2019 num_cons = min_t(u32, IWARP_MAX_QPS, p_params->num_qps);
2020 proto = PROTOCOLID_IWARP;
2022 case QED_PCI_ETH_ROCE:
2023 num_qps = min_t(u32, ROCE_MAX_QPS, p_params->num_qps);
2024 num_cons = num_qps * 2; /* each QP requires two connections */
2025 proto = PROTOCOLID_ROCE;
2031 if (num_cons && num_tasks) {
2032 u32 num_srqs, num_xrc_srqs;
2034 qed_cxt_set_proto_cid_count(p_hwfn, proto, num_cons, 0);
2036 /* Deliberatly passing ROCE for tasks id. This is because
2037 * iWARP / RoCE share the task id.
2039 qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_ROCE,
2040 QED_CXT_ROCE_TID_SEG, 1,
2043 num_srqs = min_t(u32, QED_RDMA_MAX_SRQS, p_params->num_srqs);
2045 /* XRC SRQs populate a single ILT page */
2046 num_xrc_srqs = qed_cxt_xrc_srqs_per_page(p_hwfn);
2048 qed_cxt_set_srq_count(p_hwfn, num_srqs, num_xrc_srqs);
2050 DP_INFO(p_hwfn->cdev,
2051 "RDMA personality used without setting params!\n");
2055 int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks)
2057 /* Set the number of required CORE connections */
2058 u32 core_cids = 1; /* SPQ */
2060 if (p_hwfn->using_ll2)
2062 qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
2064 switch (p_hwfn->hw_info.personality) {
2065 case QED_PCI_ETH_RDMA:
2066 case QED_PCI_ETH_IWARP:
2067 case QED_PCI_ETH_ROCE:
2069 qed_rdma_set_pf_params(p_hwfn,
2071 pf_params.rdma_pf_params,
2073 /* no need for break since RoCE coexist with Ethernet */
2078 struct qed_eth_pf_params *p_params =
2079 &p_hwfn->pf_params.eth_pf_params;
2081 if (!p_params->num_vf_cons)
2082 p_params->num_vf_cons =
2083 ETH_PF_PARAMS_VF_CONS_DEFAULT;
2084 qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
2086 p_params->num_vf_cons);
2087 p_hwfn->p_cxt_mngr->arfs_count = p_params->num_arfs_filters;
2092 struct qed_fcoe_pf_params *p_params;
2094 p_params = &p_hwfn->pf_params.fcoe_pf_params;
2096 if (p_params->num_cons && p_params->num_tasks) {
2097 qed_cxt_set_proto_cid_count(p_hwfn,
2102 qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_FCOE,
2103 QED_CXT_FCOE_TID_SEG, 0,
2104 p_params->num_tasks, true);
2106 DP_INFO(p_hwfn->cdev,
2107 "Fcoe personality used without setting params!\n");
2113 struct qed_iscsi_pf_params *p_params;
2115 p_params = &p_hwfn->pf_params.iscsi_pf_params;
2117 if (p_params->num_cons && p_params->num_tasks) {
2118 qed_cxt_set_proto_cid_count(p_hwfn,
2123 qed_cxt_set_proto_tid_count(p_hwfn,
2125 QED_CXT_ISCSI_TID_SEG,
2127 p_params->num_tasks,
2130 DP_INFO(p_hwfn->cdev,
2131 "Iscsi personality used without setting params!\n");
2142 int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
2143 struct qed_tid_mem *p_info)
2145 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
2146 u32 proto, seg, total_lines, i, shadow_line;
2147 struct qed_ilt_client_cfg *p_cli;
2148 struct qed_ilt_cli_blk *p_fl_seg;
2149 struct qed_tid_seg *p_seg_info;
2151 /* Verify the personality */
2152 switch (p_hwfn->hw_info.personality) {
2154 proto = PROTOCOLID_FCOE;
2155 seg = QED_CXT_FCOE_TID_SEG;
2158 proto = PROTOCOLID_ISCSI;
2159 seg = QED_CXT_ISCSI_TID_SEG;
2165 p_cli = &p_mngr->clients[ILT_CLI_CDUT];
2169 p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
2170 if (!p_seg_info->has_fl_mem)
2173 p_fl_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
2174 total_lines = DIV_ROUND_UP(p_fl_seg->total_size,
2175 p_fl_seg->real_size_in_page);
2177 for (i = 0; i < total_lines; i++) {
2178 shadow_line = i + p_fl_seg->start_line -
2179 p_hwfn->p_cxt_mngr->pf_start_line;
2180 p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].virt_addr;
2182 p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) -
2183 p_fl_seg->real_size_in_page;
2184 p_info->tid_size = p_mngr->task_type_size[p_seg_info->type];
2185 p_info->num_tids_per_block = p_fl_seg->real_size_in_page /
2191 /* This function is very RoCE oriented, if another protocol in the future
2192 * will want this feature we'll need to modify the function to be more generic
2195 qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
2196 enum qed_cxt_elem_type elem_type, u32 iid)
2198 u32 reg_offset, shadow_line, elem_size, hw_p_size, elems_per_p, line;
2199 struct qed_ilt_client_cfg *p_cli;
2200 struct qed_ilt_cli_blk *p_blk;
2201 struct qed_ptt *p_ptt;
2207 switch (elem_type) {
2209 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
2210 elem_size = CONN_CXT_SIZE(p_hwfn);
2211 p_blk = &p_cli->pf_blks[CDUC_BLK];
2214 /* The first ILT page is not used for regular SRQs. Skip it. */
2215 iid += p_hwfn->p_cxt_mngr->xrc_srq_count;
2216 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
2217 elem_size = SRQ_CXT_SIZE;
2218 p_blk = &p_cli->pf_blks[SRQ_BLK];
2220 case QED_ELEM_XRC_SRQ:
2221 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
2222 elem_size = XRC_SRQ_CXT_SIZE;
2223 p_blk = &p_cli->pf_blks[SRQ_BLK];
2226 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2227 elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
2228 p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)];
2231 DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type);
2235 /* Calculate line in ilt */
2236 hw_p_size = p_cli->p_size.val;
2237 elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
2238 line = p_blk->start_line + (iid / elems_per_p);
2239 shadow_line = line - p_hwfn->p_cxt_mngr->pf_start_line;
2241 /* If line is already allocated, do nothing, otherwise allocate it and
2242 * write it to the PSWRQ2 registers.
2243 * This section can be run in parallel from different contexts and thus
2244 * a mutex protection is needed.
2247 mutex_lock(&p_hwfn->p_cxt_mngr->mutex);
2249 if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr)
2252 p_ptt = qed_ptt_acquire(p_hwfn);
2255 "QED_TIME_OUT on ptt acquire - dynamic allocation");
2260 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
2261 p_blk->real_size_in_page, &p_phys,
2268 /* configuration of refTagMask to 0xF is required for RoCE DIF MR only,
2269 * to compensate for a HW bug, but it is configured even if DIF is not
2270 * enabled. This is harmless and allows us to avoid a dedicated API. We
2271 * configure the field for all of the contexts on the newly allocated
2274 if (elem_type == QED_ELEM_TASK) {
2276 u8 *elem_start = (u8 *)p_virt;
2277 union type1_task_context *elem;
2279 for (elem_i = 0; elem_i < elems_per_p; elem_i++) {
2280 elem = (union type1_task_context *)elem_start;
2281 SET_FIELD(elem->roce_ctx.tdif_context.flags1,
2282 TDIF_TASK_CONTEXT_REF_TAG_MASK, 0xf);
2283 elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn);
2287 p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr = p_virt;
2288 p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr = p_phys;
2289 p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size =
2290 p_blk->real_size_in_page;
2292 /* compute absolute offset */
2293 reg_offset = PSWRQ2_REG_ILT_MEMORY +
2294 (line * ILT_REG_SIZE_IN_BYTES * ILT_ENTRY_IN_REGS);
2297 SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
2298 SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
2299 (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr
2302 /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */
2303 qed_dmae_host2grc(p_hwfn, p_ptt, (u64) (uintptr_t)&ilt_hw_entry,
2304 reg_offset, sizeof(ilt_hw_entry) / sizeof(u32),
2307 if (elem_type == QED_ELEM_CXT) {
2308 u32 last_cid_allocated = (1 + (iid / elems_per_p)) *
2311 /* Update the relevant register in the parser */
2312 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF,
2313 last_cid_allocated - 1);
2315 if (!p_hwfn->b_rdma_enabled_in_prs) {
2316 /* Enable RDMA search */
2317 qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
2318 p_hwfn->b_rdma_enabled_in_prs = true;
2323 qed_ptt_release(p_hwfn, p_ptt);
2325 mutex_unlock(&p_hwfn->p_cxt_mngr->mutex);
2330 /* This function is very RoCE oriented, if another protocol in the future
2331 * will want this feature we'll need to modify the function to be more generic
2334 qed_cxt_free_ilt_range(struct qed_hwfn *p_hwfn,
2335 enum qed_cxt_elem_type elem_type,
2336 u32 start_iid, u32 count)
2338 u32 start_line, end_line, shadow_start_line, shadow_end_line;
2339 u32 reg_offset, elem_size, hw_p_size, elems_per_p;
2340 struct qed_ilt_client_cfg *p_cli;
2341 struct qed_ilt_cli_blk *p_blk;
2342 u32 end_iid = start_iid + count;
2343 struct qed_ptt *p_ptt;
2344 u64 ilt_hw_entry = 0;
2347 switch (elem_type) {
2349 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
2350 elem_size = CONN_CXT_SIZE(p_hwfn);
2351 p_blk = &p_cli->pf_blks[CDUC_BLK];
2354 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
2355 elem_size = SRQ_CXT_SIZE;
2356 p_blk = &p_cli->pf_blks[SRQ_BLK];
2359 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2360 elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
2361 p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)];
2364 DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type);
2368 /* Calculate line in ilt */
2369 hw_p_size = p_cli->p_size.val;
2370 elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
2371 start_line = p_blk->start_line + (start_iid / elems_per_p);
2372 end_line = p_blk->start_line + (end_iid / elems_per_p);
2373 if (((end_iid + 1) / elems_per_p) != (end_iid / elems_per_p))
2376 shadow_start_line = start_line - p_hwfn->p_cxt_mngr->pf_start_line;
2377 shadow_end_line = end_line - p_hwfn->p_cxt_mngr->pf_start_line;
2379 p_ptt = qed_ptt_acquire(p_hwfn);
2382 "QED_TIME_OUT on ptt acquire - dynamic allocation");
2386 for (i = shadow_start_line; i < shadow_end_line; i++) {
2387 if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr)
2390 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
2391 p_hwfn->p_cxt_mngr->ilt_shadow[i].size,
2392 p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr,
2393 p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr);
2395 p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr = NULL;
2396 p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr = 0;
2397 p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0;
2399 /* compute absolute offset */
2400 reg_offset = PSWRQ2_REG_ILT_MEMORY +
2401 ((start_line++) * ILT_REG_SIZE_IN_BYTES *
2404 /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a
2407 qed_dmae_host2grc(p_hwfn, p_ptt,
2408 (u64) (uintptr_t) &ilt_hw_entry,
2410 sizeof(ilt_hw_entry) / sizeof(u32),
2414 qed_ptt_release(p_hwfn, p_ptt);
2419 int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto)
2424 /* Free Connection CXT */
2425 rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_CXT,
2426 qed_cxt_get_proto_cid_start(p_hwfn,
2428 qed_cxt_get_proto_cid_count(p_hwfn,
2434 /* Free Task CXT ( Intentionally RoCE as task-id is shared between
2437 proto = PROTOCOLID_ROCE;
2438 rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_TASK, 0,
2439 qed_cxt_get_proto_tid_count(p_hwfn, proto));
2444 rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_XRC_SRQ, 0,
2445 p_hwfn->p_cxt_mngr->xrc_srq_count);
2447 rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_SRQ,
2448 p_hwfn->p_cxt_mngr->xrc_srq_count,
2449 p_hwfn->p_cxt_mngr->srq_count);
2454 int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn,
2455 u32 tid, u8 ctx_type, void **pp_task_ctx)
2457 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
2458 struct qed_ilt_client_cfg *p_cli;
2459 struct qed_tid_seg *p_seg_info;
2460 struct qed_ilt_cli_blk *p_seg;
2461 u32 num_tids_per_block;
2462 u32 tid_size, ilt_idx;
2466 /* Verify the personality */
2467 switch (p_hwfn->hw_info.personality) {
2469 proto = PROTOCOLID_FCOE;
2470 seg = QED_CXT_FCOE_TID_SEG;
2473 proto = PROTOCOLID_ISCSI;
2474 seg = QED_CXT_ISCSI_TID_SEG;
2480 p_cli = &p_mngr->clients[ILT_CLI_CDUT];
2484 p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
2486 if (ctx_type == QED_CTX_WORKING_MEM) {
2487 p_seg = &p_cli->pf_blks[CDUT_SEG_BLK(seg)];
2488 } else if (ctx_type == QED_CTX_FL_MEM) {
2489 if (!p_seg_info->has_fl_mem)
2491 p_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
2495 total_lines = DIV_ROUND_UP(p_seg->total_size, p_seg->real_size_in_page);
2496 tid_size = p_mngr->task_type_size[p_seg_info->type];
2497 num_tids_per_block = p_seg->real_size_in_page / tid_size;
2499 if (total_lines < tid / num_tids_per_block)
2502 ilt_idx = tid / num_tids_per_block + p_seg->start_line -
2503 p_mngr->pf_start_line;
2504 *pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].virt_addr +
2505 (tid % num_tids_per_block) * tid_size;
2510 static u16 qed_blk_calculate_pages(struct qed_ilt_cli_blk *p_blk)
2512 if (p_blk->real_size_in_page == 0)
2515 return DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
2518 u16 qed_get_cdut_num_pf_init_pages(struct qed_hwfn *p_hwfn)
2520 struct qed_ilt_client_cfg *p_cli;
2521 struct qed_ilt_cli_blk *p_blk;
2524 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2525 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
2526 p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)];
2527 pages += qed_blk_calculate_pages(p_blk);
2533 u16 qed_get_cdut_num_vf_init_pages(struct qed_hwfn *p_hwfn)
2535 struct qed_ilt_client_cfg *p_cli;
2536 struct qed_ilt_cli_blk *p_blk;
2539 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2540 for (i = 0; i < NUM_TASK_VF_SEGMENTS; i++) {
2541 p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(i, VF)];
2542 pages += qed_blk_calculate_pages(p_blk);
2548 u16 qed_get_cdut_num_pf_work_pages(struct qed_hwfn *p_hwfn)
2550 struct qed_ilt_client_cfg *p_cli;
2551 struct qed_ilt_cli_blk *p_blk;
2554 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2555 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
2556 p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)];
2557 pages += qed_blk_calculate_pages(p_blk);
2563 u16 qed_get_cdut_num_vf_work_pages(struct qed_hwfn *p_hwfn)
2565 struct qed_ilt_client_cfg *p_cli;
2566 struct qed_ilt_cli_blk *p_blk;
2569 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2570 for (i = 0; i < NUM_TASK_VF_SEGMENTS; i++) {
2571 p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(i)];
2572 pages += qed_blk_calculate_pages(p_blk);