1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/types.h>
34 #include <linux/crc8.h>
35 #include <linux/delay.h>
36 #include <linux/kernel.h>
37 #include <linux/slab.h>
38 #include <linux/string.h>
41 #include "qed_init_ops.h"
42 #include "qed_reg_addr.h"
44 #define CDU_VALIDATION_DEFAULT_CFG 61
46 static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES_E4] = {
47 {400, 336, 352, 368, 304, 384, 416, 352}, /* region 3 offsets */
48 {528, 496, 416, 512, 448, 512, 544, 480}, /* region 4 offsets */
49 {608, 544, 496, 576, 576, 592, 624, 560} /* region 5 offsets */
52 static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
53 {240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */
56 /* General constants */
57 #define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
60 #define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, \
62 #define QM_INVALID_PQ_ID 0xffff
64 /* Max link speed (in Mbps) */
65 #define QM_MAX_LINK_SPEED 100000
68 #define QM_BYPASS_EN 1
69 #define QM_BYTE_CRD_EN 1
71 /* Other PQ constants */
72 #define QM_OTHER_PQS_PER_PF 4
76 /* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */
77 #define QM_WFQ_UPPER_BOUND 62500000
79 /* Bit of VOQ in WFQ VP PQ map */
80 #define QM_WFQ_VP_PQ_VOQ_SHIFT 0
82 /* Bit of PF in WFQ VP PQ map */
83 #define QM_WFQ_VP_PQ_PF_E4_SHIFT 5
85 /* 0x9000 = 4*9*1024 */
86 #define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
88 /* Max WFQ increment value is 0.7 * upper bound */
89 #define QM_WFQ_MAX_INC_VAL ((QM_WFQ_UPPER_BOUND * 7) / 10)
94 #define QM_RL_PERIOD 5
96 /* Period in 25MHz cycles */
97 #define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
99 /* RL increment value - rate is specified in mbps */
100 #define QM_RL_INC_VAL(rate) ({ \
101 typeof(rate) __rate = (rate); \
103 (u32)(((__rate ? __rate : 1000000) * QM_RL_PERIOD * 101) / \
107 /* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */
108 #define QM_PF_RL_UPPER_BOUND 62500000
110 /* Max PF RL increment value is 0.7 * upper bound */
111 #define QM_PF_RL_MAX_INC_VAL ((QM_PF_RL_UPPER_BOUND * 7) / 10)
113 /* Vport RL Upper bound, link speed is in Mpbs */
114 #define QM_VP_RL_UPPER_BOUND(speed) ((u32)max_t(u32, \
115 QM_RL_INC_VAL(speed), \
118 /* Max Vport RL increment value is the Vport RL upper bound */
119 #define QM_VP_RL_MAX_INC_VAL(speed) QM_VP_RL_UPPER_BOUND(speed)
121 /* Vport RL credit threshold in case of QM bypass */
122 #define QM_VP_RL_BYPASS_THRESH_SPEED (QM_VP_RL_UPPER_BOUND(10000) - 1)
124 /* AFullOprtnstcCrdMask constants */
125 #define QM_OPPOR_LINE_VOQ_DEF 1
126 #define QM_OPPOR_FW_STOP_DEF 0
127 #define QM_OPPOR_PQ_EMPTY_DEF 1
129 /* Command Queue constants */
131 /* Pure LB CmdQ lines (+spare) */
132 #define PBF_CMDQ_PURE_LB_LINES 150
134 #define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \
135 (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
136 (ext_voq) * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
137 PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
139 #define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) \
140 (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + \
141 (ext_voq) * (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
142 PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
144 /* Returns the VOQ line credit for the specified number of PBF command lines.
145 * PBF lines are specified in 256b units.
147 #define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
148 ((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
150 /* BTB: blocks constants (block size = 256B) */
152 /* 256B blocks in 9700B packet */
153 #define BTB_JUMBO_PKT_BLOCKS 38
155 /* Headroom per-port */
156 #define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
157 #define BTB_PURE_LB_FACTOR 10
159 /* Factored (hence really 0.7) */
160 #define BTB_PURE_LB_RATIO 7
162 /* QM stop command constants */
163 #define QM_STOP_PQ_MASK_WIDTH 32
164 #define QM_STOP_CMD_ADDR 2
165 #define QM_STOP_CMD_STRUCT_SIZE 2
166 #define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
167 #define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
168 #define QM_STOP_CMD_PAUSE_MASK_MASK -1
169 #define QM_STOP_CMD_GROUP_ID_OFFSET 1
170 #define QM_STOP_CMD_GROUP_ID_SHIFT 16
171 #define QM_STOP_CMD_GROUP_ID_MASK 15
172 #define QM_STOP_CMD_PQ_TYPE_OFFSET 1
173 #define QM_STOP_CMD_PQ_TYPE_SHIFT 24
174 #define QM_STOP_CMD_PQ_TYPE_MASK 1
175 #define QM_STOP_CMD_MAX_POLL_COUNT 100
176 #define QM_STOP_CMD_POLL_PERIOD_US 500
178 /* QM command macros */
179 #define QM_CMD_STRUCT_SIZE(cmd) cmd ## _STRUCT_SIZE
180 #define QM_CMD_SET_FIELD(var, cmd, field, value) \
181 SET_FIELD(var[cmd ## _ ## field ## _OFFSET], \
185 #define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, vp_pq_id, rl_valid, rl_id, \
189 memset(&__map, 0, sizeof(__map)); \
190 SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _PQ_VALID, 1); \
191 SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _RL_VALID, \
193 SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _VP_PQ_ID, \
195 SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _RL_ID, rl_id); \
196 SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _VOQ, ext_voq); \
197 SET_FIELD(__map.reg, \
198 QM_RF_PQ_MAP_ ## chip ## _WRR_WEIGHT_GROUP, wrr); \
199 STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + (pq_id), \
204 #define WRITE_PQ_INFO_TO_RAM 1
205 #define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \
206 (((vp) << 0) | ((pf) << 12) | ((tc) << 16) | ((port) << 20) | \
207 ((rl_valid ? 1 : 0) << 22) | (((rl) & 255) << 24) | \
210 #define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
211 XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + \
212 XSTORM_PQ_INFO_OFFSET(pq_id)
214 /******************** INTERNAL IMPLEMENTATION *********************/
216 /* Returns the external VOQ number */
217 static u8 qed_get_ext_voq(struct qed_hwfn *p_hwfn,
218 u8 port_id, u8 tc, u8 max_phys_tcs_per_port)
220 if (tc == PURE_LB_TC)
221 return NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB + port_id;
223 return port_id * max_phys_tcs_per_port + tc;
226 /* Prepare PF RL enable/disable runtime init values */
227 static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
229 STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
231 u8 num_ext_voqs = MAX_NUM_VOQS_E4;
232 u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1;
234 /* Enable RLs for all VOQs */
236 QM_REG_RLPFVOQENABLE_RT_OFFSET,
239 /* Write RL period */
241 QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
243 QM_REG_RLPFPERIODTIMER_RT_OFFSET,
244 QM_RL_PERIOD_CLK_25M);
246 /* Set credit threshold for QM bypass flow */
249 QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
250 QM_PF_RL_UPPER_BOUND);
254 /* Prepare PF WFQ enable/disable runtime init values */
255 static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en)
257 STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
259 /* Set credit threshold for QM bypass flow */
260 if (pf_wfq_en && QM_BYPASS_EN)
262 QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
266 /* Prepare global RL enable/disable runtime init values */
267 static void qed_enable_global_rl(struct qed_hwfn *p_hwfn, bool global_rl_en)
269 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
270 global_rl_en ? 1 : 0);
272 /* Write RL period (use timer 0 only) */
274 QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
275 QM_RL_PERIOD_CLK_25M);
277 QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
278 QM_RL_PERIOD_CLK_25M);
280 /* Set credit threshold for QM bypass flow */
283 QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
284 QM_VP_RL_BYPASS_THRESH_SPEED);
288 /* Prepare VPORT WFQ enable/disable runtime init values */
289 static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en)
291 STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
292 vport_wfq_en ? 1 : 0);
294 /* Set credit threshold for QM bypass flow */
295 if (vport_wfq_en && QM_BYPASS_EN)
297 QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
301 /* Prepare runtime init values to allocate PBF command queue lines for
304 static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
305 u8 ext_voq, u16 cmdq_lines)
307 u32 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
309 OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq),
311 STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + ext_voq,
313 STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + ext_voq,
317 /* Prepare runtime init values to allocate PBF command queue lines. */
318 static void qed_cmdq_lines_rt_init(
319 struct qed_hwfn *p_hwfn,
320 u8 max_ports_per_engine,
321 u8 max_phys_tcs_per_port,
322 struct init_qm_port_params port_params[MAX_NUM_PORTS])
324 u8 tc, ext_voq, port_id, num_tcs_in_port;
325 u8 num_ext_voqs = MAX_NUM_VOQS_E4;
327 /* Clear PBF lines of all VOQs */
328 for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++)
329 STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 0);
331 for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
332 u16 phys_lines, phys_lines_per_tc;
334 if (!port_params[port_id].active)
337 /* Find number of command queue lines to divide between the
338 * active physical TCs.
340 phys_lines = port_params[port_id].num_pbf_cmd_lines;
341 phys_lines -= PBF_CMDQ_PURE_LB_LINES;
343 /* Find #lines per active physical TC */
345 for (tc = 0; tc < max_phys_tcs_per_port; tc++)
346 if (((port_params[port_id].active_phys_tcs >>
349 phys_lines_per_tc = phys_lines / num_tcs_in_port;
351 /* Init registers per active TC */
352 for (tc = 0; tc < max_phys_tcs_per_port; tc++) {
353 ext_voq = qed_get_ext_voq(p_hwfn,
355 tc, max_phys_tcs_per_port);
356 if (((port_params[port_id].active_phys_tcs >>
358 qed_cmdq_lines_voq_rt_init(p_hwfn,
363 /* Init registers for pure LB TC */
364 ext_voq = qed_get_ext_voq(p_hwfn,
366 PURE_LB_TC, max_phys_tcs_per_port);
367 qed_cmdq_lines_voq_rt_init(p_hwfn, ext_voq,
368 PBF_CMDQ_PURE_LB_LINES);
372 /* Prepare runtime init values to allocate guaranteed BTB blocks for the
373 * specified port. The guaranteed BTB space is divided between the TCs as
374 * follows (shared space Is currently not used):
376 * B - BTB blocks for this port
377 * C - Number of physical TCs for this port
379 * a. 38 blocks (9700B jumbo frame) are allocated for global per port
381 * b. B = B - 38 (remainder after global headroom allocation).
382 * c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ.
383 * d. B = B - MAX(38, B/(C+0.7)) (remainder after pure LB allocation).
384 * e. B/C blocks are allocated for each physical TC.
386 * - MTU is up to 9700 bytes (38 blocks)
387 * - All TCs are considered symmetrical (same rate and packet size)
388 * - No optimization for lossy TC (all are considered lossless). Shared space
389 * is not enabled and allocated for each TC.
391 static void qed_btb_blocks_rt_init(
392 struct qed_hwfn *p_hwfn,
393 u8 max_ports_per_engine,
394 u8 max_phys_tcs_per_port,
395 struct init_qm_port_params port_params[MAX_NUM_PORTS])
397 u32 usable_blocks, pure_lb_blocks, phys_blocks;
398 u8 tc, ext_voq, port_id, num_tcs_in_port;
400 for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
401 if (!port_params[port_id].active)
404 /* Subtract headroom blocks */
405 usable_blocks = port_params[port_id].num_btb_blocks -
408 /* Find blocks per physical TC. Use factor to avoid floating
412 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
413 if (((port_params[port_id].active_phys_tcs >>
417 pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
418 (num_tcs_in_port * BTB_PURE_LB_FACTOR +
420 pure_lb_blocks = max_t(u32, BTB_JUMBO_PKT_BLOCKS,
421 pure_lb_blocks / BTB_PURE_LB_FACTOR);
422 phys_blocks = (usable_blocks - pure_lb_blocks) /
425 /* Init physical TCs */
426 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
427 if (((port_params[port_id].active_phys_tcs >>
430 qed_get_ext_voq(p_hwfn,
433 max_phys_tcs_per_port);
435 PBF_BTB_GUARANTEED_RT_OFFSET
436 (ext_voq), phys_blocks);
440 /* Init pure LB TC */
441 ext_voq = qed_get_ext_voq(p_hwfn,
443 PURE_LB_TC, max_phys_tcs_per_port);
444 STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
449 /* Prepare runtime init values for the specified RL.
450 * Set max link speed (100Gbps) per rate limiter.
451 * Return -1 on error.
453 static int qed_global_rl_rt_init(struct qed_hwfn *p_hwfn)
455 u32 upper_bound = QM_VP_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) |
456 (u32)QM_RL_CRD_REG_SIGN_BIT;
460 /* Go over all global RLs */
461 for (rl_id = 0; rl_id < MAX_QM_GLOBAL_RLS; rl_id++) {
462 inc_val = QM_RL_INC_VAL(QM_MAX_LINK_SPEED);
465 QM_REG_RLGLBLCRD_RT_OFFSET + rl_id,
466 (u32)QM_RL_CRD_REG_SIGN_BIT);
468 QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + rl_id,
471 QM_REG_RLGLBLINCVAL_RT_OFFSET + rl_id, inc_val);
477 /* Prepare Tx PQ mapping runtime init values for the specified PF */
478 static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
479 struct qed_ptt *p_ptt,
480 struct qed_qm_pf_rt_init_params *p_params,
481 u32 base_mem_addr_4kb)
483 u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
484 struct init_qm_vport_params *vport_params = p_params->vport_params;
485 u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
486 u16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group;
487 struct init_qm_pq_params *pq_params = p_params->pq_params;
488 u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb;
490 num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
492 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
493 last_pq_group = (p_params->start_pq + num_pqs - 1) /
494 QM_PF_QUEUE_GROUP_SIZE;
496 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
497 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
498 mem_addr_4kb = base_mem_addr_4kb;
500 /* Set mapping from PQ group to PF */
501 for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
502 STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
503 (u32)(p_params->pf_id));
506 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
507 QM_PQ_SIZE_256B(p_params->num_pf_cids));
508 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
509 QM_PQ_SIZE_256B(p_params->num_vf_cids));
511 /* Go over all Tx PQs */
512 for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) {
513 u16 *p_first_tx_pq_id, vport_id_in_pf;
514 struct qm_rf_pq_map_e4 tx_pq_map;
515 u8 tc_id = pq_params[i].tc_id;
519 ext_voq = qed_get_ext_voq(p_hwfn,
520 pq_params[i].port_id,
522 p_params->max_phys_tcs_per_port);
523 is_vf_pq = (i >= p_params->num_pf_pqs);
525 /* Update first Tx PQ of VPORT/TC */
526 vport_id_in_pf = pq_params[i].vport_id - p_params->start_vport;
528 &vport_params[vport_id_in_pf].first_tx_pq_id[tc_id];
529 if (*p_first_tx_pq_id == QM_INVALID_PQ_ID) {
531 (ext_voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
532 (p_params->pf_id << QM_WFQ_VP_PQ_PF_E4_SHIFT);
534 /* Create new VP PQ */
535 *p_first_tx_pq_id = pq_id;
537 /* Map VP PQ to VOQ and PF */
539 QM_REG_WFQVPMAP_RT_OFFSET +
544 /* Prepare PQ map entry */
545 QM_INIT_TX_PQ_MAP(p_hwfn,
550 pq_params[i].rl_valid,
552 ext_voq, pq_params[i].wrr_group);
554 /* Set PQ base address */
556 QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
559 /* Clear PQ pointer table entry (64 bit) */
560 if (p_params->is_pf_loading)
561 for (j = 0; j < 2; j++)
563 QM_REG_PTRTBLTX_RT_OFFSET +
566 /* Write PQ info to RAM */
567 if (WRITE_PQ_INFO_TO_RAM != 0) {
570 pq_info = PQ_INFO_ELEMENT(*p_first_tx_pq_id,
573 pq_params[i].port_id,
574 pq_params[i].rl_valid,
576 qed_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id),
580 /* If VF PQ, add indication to PQ VF mask */
582 tx_pq_vf_mask[pq_id /
583 QM_PF_QUEUE_GROUP_SIZE] |=
584 BIT((pq_id % QM_PF_QUEUE_GROUP_SIZE));
585 mem_addr_4kb += vport_pq_mem_4kb;
587 mem_addr_4kb += pq_mem_4kb;
591 /* Store Tx PQ VF mask to size select register */
592 for (i = 0; i < num_tx_pq_vf_masks; i++)
593 if (tx_pq_vf_mask[i])
595 QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i,
599 /* Prepare Other PQ mapping runtime init values for the specified PF */
600 static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
604 u32 num_tids, u32 base_mem_addr_4kb)
606 u32 pq_size, pq_mem_4kb, mem_addr_4kb;
607 u16 i, j, pq_id, pq_group;
609 /* A single other PQ group is used in each PF, where PQ group i is used
613 pq_size = num_pf_cids + num_tids;
614 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
615 mem_addr_4kb = base_mem_addr_4kb;
617 /* Map PQ group to PF */
618 STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
622 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
623 QM_PQ_SIZE_256B(pq_size));
625 for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
626 i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
627 /* Set PQ base address */
629 QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
632 /* Clear PQ pointer table entry */
634 for (j = 0; j < 2; j++)
636 QM_REG_PTRTBLOTHER_RT_OFFSET +
639 mem_addr_4kb += pq_mem_4kb;
643 /* Prepare PF WFQ runtime init values for the specified PF.
644 * Return -1 on error.
646 static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
648 struct qed_qm_pf_rt_init_params *p_params)
650 u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
651 struct init_qm_pq_params *pq_params = p_params->pq_params;
652 u32 inc_val, crd_reg_offset;
656 inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
657 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
658 DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n");
662 for (i = 0; i < num_tx_pqs; i++) {
663 ext_voq = qed_get_ext_voq(p_hwfn,
664 pq_params[i].port_id,
666 p_params->max_phys_tcs_per_port);
668 (p_params->pf_id < MAX_NUM_PFS_BB ?
669 QM_REG_WFQPFCRD_RT_OFFSET :
670 QM_REG_WFQPFCRD_MSB_RT_OFFSET) +
671 ext_voq * MAX_NUM_PFS_BB +
672 (p_params->pf_id % MAX_NUM_PFS_BB);
673 OVERWRITE_RT_REG(p_hwfn,
674 crd_reg_offset, (u32)QM_WFQ_CRD_REG_SIGN_BIT);
678 QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
679 QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
680 STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
686 /* Prepare PF RL runtime init values for the specified PF.
687 * Return -1 on error.
689 static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
691 u32 inc_val = QM_RL_INC_VAL(pf_rl);
693 if (inc_val > QM_PF_RL_MAX_INC_VAL) {
694 DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n");
699 QM_REG_RLPFCRD_RT_OFFSET + pf_id,
700 (u32)QM_RL_CRD_REG_SIGN_BIT);
702 QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
703 QM_PF_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
704 STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
709 /* Prepare VPORT WFQ runtime init values for the specified VPORTs.
710 * Return -1 on error.
712 static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
714 struct init_qm_vport_params *vport_params)
720 /* Go over all PF VPORTs */
721 for (i = 0; i < num_vports; i++) {
722 if (!vport_params[i].wfq)
725 inc_val = QM_WFQ_INC_VAL(vport_params[i].wfq);
726 if (inc_val > QM_WFQ_MAX_INC_VAL) {
728 "Invalid VPORT WFQ weight configuration\n");
732 /* Each VPORT can have several VPORT PQ IDs for various TCs */
733 for (tc = 0; tc < NUM_OF_TCS; tc++) {
734 vport_pq_id = vport_params[i].first_tx_pq_id[tc];
735 if (vport_pq_id != QM_INVALID_PQ_ID) {
737 QM_REG_WFQVPCRD_RT_OFFSET +
739 (u32)QM_WFQ_CRD_REG_SIGN_BIT);
741 QM_REG_WFQVPWEIGHT_RT_OFFSET +
742 vport_pq_id, inc_val);
750 static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn,
751 struct qed_ptt *p_ptt)
755 for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val;
757 udelay(QM_STOP_CMD_POLL_PERIOD_US);
758 reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
761 /* Check if timeout while waiting for SDM command ready */
762 if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
763 DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
764 "Timeout when waiting for QM SDM command ready signal\n");
771 static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn,
772 struct qed_ptt *p_ptt,
773 u32 cmd_addr, u32 cmd_data_lsb, u32 cmd_data_msb)
775 if (!qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
778 qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
779 qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
780 qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
781 qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
782 qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
784 return qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
787 /******************** INTERFACE IMPLEMENTATION *********************/
789 u32 qed_qm_pf_mem_size(u32 num_pf_cids,
791 u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs)
793 return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
794 QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
795 QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
798 int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
799 struct qed_qm_common_rt_init_params *p_params)
803 /* Init AFullOprtnstcCrdMask */
804 SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_LINEVOQ,
805 QM_OPPOR_LINE_VOQ_DEF);
806 SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ, QM_BYTE_CRD_EN);
807 SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFWFQ, p_params->pf_wfq_en);
808 SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPWFQ, p_params->vport_wfq_en);
809 SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFRL, p_params->pf_rl_en);
810 SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPQCNRL,
811 p_params->global_rl_en);
812 SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_FWPAUSE, QM_OPPOR_FW_STOP_DEF);
814 QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY, QM_OPPOR_PQ_EMPTY_DEF);
815 STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
817 /* Enable/disable PF RL */
818 qed_enable_pf_rl(p_hwfn, p_params->pf_rl_en);
820 /* Enable/disable PF WFQ */
821 qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en);
823 /* Enable/disable global RL */
824 qed_enable_global_rl(p_hwfn, p_params->global_rl_en);
826 /* Enable/disable VPORT WFQ */
827 qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en);
829 /* Init PBF CMDQ line credit */
830 qed_cmdq_lines_rt_init(p_hwfn,
831 p_params->max_ports_per_engine,
832 p_params->max_phys_tcs_per_port,
833 p_params->port_params);
835 /* Init BTB blocks in PBF */
836 qed_btb_blocks_rt_init(p_hwfn,
837 p_params->max_ports_per_engine,
838 p_params->max_phys_tcs_per_port,
839 p_params->port_params);
841 qed_global_rl_rt_init(p_hwfn);
846 int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
847 struct qed_ptt *p_ptt,
848 struct qed_qm_pf_rt_init_params *p_params)
850 struct init_qm_vport_params *vport_params = p_params->vport_params;
851 u32 other_mem_size_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids +
852 p_params->num_tids) *
858 /* Clear first Tx PQ ID array for each VPORT */
859 for (i = 0; i < p_params->num_vports; i++)
860 for (tc = 0; tc < NUM_OF_TCS; tc++)
861 vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
863 /* Map Other PQs (if any) */
864 qed_other_pq_map_rt_init(p_hwfn,
866 p_params->is_pf_loading, p_params->num_pf_cids,
867 p_params->num_tids, 0);
870 qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb);
873 if (p_params->pf_wfq)
874 if (qed_pf_wfq_rt_init(p_hwfn, p_params))
878 if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl))
882 if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params))
888 int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
889 struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
891 u32 inc_val = QM_WFQ_INC_VAL(pf_wfq);
893 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
894 DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n");
898 qed_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
903 int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
904 struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl)
906 u32 inc_val = QM_RL_INC_VAL(pf_rl);
908 if (inc_val > QM_PF_RL_MAX_INC_VAL) {
909 DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n");
914 p_ptt, QM_REG_RLPFCRD + pf_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
915 qed_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
920 int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
921 struct qed_ptt *p_ptt,
922 u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq)
928 inc_val = QM_WFQ_INC_VAL(wfq);
929 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
930 DP_NOTICE(p_hwfn, "Invalid VPORT WFQ configuration.\n");
934 /* A VPORT can have several VPORT PQ IDs for various TCs */
935 for (tc = 0; tc < NUM_OF_TCS; tc++) {
936 vport_pq_id = first_tx_pq_id[tc];
937 if (vport_pq_id != QM_INVALID_PQ_ID)
940 QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val);
946 int qed_init_global_rl(struct qed_hwfn *p_hwfn,
947 struct qed_ptt *p_ptt, u16 rl_id, u32 rate_limit)
951 inc_val = QM_RL_INC_VAL(rate_limit);
952 if (inc_val > QM_VP_RL_MAX_INC_VAL(rate_limit)) {
953 DP_NOTICE(p_hwfn, "Invalid rate limit configuration.\n");
957 qed_wr(p_hwfn, p_ptt,
958 QM_REG_RLGLBLCRD + rl_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
959 qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + rl_id * 4, inc_val);
964 bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
965 struct qed_ptt *p_ptt,
967 bool is_tx_pq, u16 start_pq, u16 num_pqs)
969 u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
970 u32 pq_mask = 0, last_pq, pq_id;
972 last_pq = start_pq + num_pqs - 1;
974 /* Set command's PQ type */
975 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
977 /* Go over requested PQs */
978 for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
979 /* Set PQ bit in mask (stop command only) */
981 pq_mask |= BIT((pq_id % QM_STOP_PQ_MASK_WIDTH));
983 /* If last PQ or end of PQ mask, write command */
984 if ((pq_id == last_pq) ||
985 (pq_id % QM_STOP_PQ_MASK_WIDTH ==
986 (QM_STOP_PQ_MASK_WIDTH - 1))) {
987 QM_CMD_SET_FIELD(cmd_arr,
988 QM_STOP_CMD, PAUSE_MASK, pq_mask);
989 QM_CMD_SET_FIELD(cmd_arr,
992 pq_id / QM_STOP_PQ_MASK_WIDTH);
993 if (!qed_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR,
994 cmd_arr[0], cmd_arr[1]))
1003 #define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
1005 typeof(var) *__p_var = &(var); \
1006 typeof(offset) __offset = offset; \
1007 *__p_var = (*__p_var & ~BIT(__offset)) | \
1008 ((enable) ? BIT(__offset) : 0); \
1011 #define PRS_ETH_TUNN_OUTPUT_FORMAT 0xF4DAB910
1012 #define PRS_ETH_OUTPUT_FORMAT 0xFFFF4910
1014 void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
1015 struct qed_ptt *p_ptt, u16 dest_port)
1017 /* Update PRS register */
1018 qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
1020 /* Update NIG register */
1021 qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
1023 /* Update PBF register */
1024 qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
1027 void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
1028 struct qed_ptt *p_ptt, bool vxlan_enable)
1033 /* Update PRS register */
1034 reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1035 shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT;
1036 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, vxlan_enable);
1037 qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1040 qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
1042 /* Update output only if tunnel blocks not included. */
1043 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
1044 qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1045 (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
1048 /* Update NIG register */
1049 reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
1050 shift = NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT;
1051 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, vxlan_enable);
1052 qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
1054 /* Update DORQ register */
1056 p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN, vxlan_enable ? 1 : 0);
1059 void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
1060 struct qed_ptt *p_ptt,
1061 bool eth_gre_enable, bool ip_gre_enable)
1066 /* Update PRS register */
1067 reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1068 shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT;
1069 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_gre_enable);
1070 shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT;
1071 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_gre_enable);
1072 qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1075 qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
1077 /* Update output only if tunnel blocks not included. */
1078 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
1079 qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1080 (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
1083 /* Update NIG register */
1084 reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
1085 shift = NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT;
1086 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_gre_enable);
1087 shift = NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT;
1088 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_gre_enable);
1089 qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
1091 /* Update DORQ registers */
1094 DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN, eth_gre_enable ? 1 : 0);
1096 p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN, ip_gre_enable ? 1 : 0);
1099 void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
1100 struct qed_ptt *p_ptt, u16 dest_port)
1102 /* Update PRS register */
1103 qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
1105 /* Update NIG register */
1106 qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
1108 /* Update PBF register */
1109 qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
1112 void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
1113 struct qed_ptt *p_ptt,
1114 bool eth_geneve_enable, bool ip_geneve_enable)
1119 /* Update PRS register */
1120 reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1121 shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT;
1122 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_geneve_enable);
1123 shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT;
1124 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_geneve_enable);
1125 qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1128 qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
1130 /* Update output only if tunnel blocks not included. */
1131 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
1132 qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1133 (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
1136 /* Update NIG register */
1137 qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
1138 eth_geneve_enable ? 1 : 0);
1139 qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
1141 /* EDPM with geneve tunnel not supported in BB */
1142 if (QED_IS_BB_B0(p_hwfn->cdev))
1145 /* Update DORQ registers */
1148 DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5,
1149 eth_geneve_enable ? 1 : 0);
1152 DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5,
1153 ip_geneve_enable ? 1 : 0);
1156 #define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 3
1157 #define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -925189872
1159 void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn,
1160 struct qed_ptt *p_ptt, bool enable)
1162 u32 reg_val, cfg_mask;
1164 /* read PRS config register */
1165 reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_MSG_INFO);
1167 /* set VXLAN_NO_L2_ENABLE mask */
1168 cfg_mask = BIT(PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET);
1171 /* set VXLAN_NO_L2_ENABLE flag */
1172 reg_val |= cfg_mask;
1174 /* update PRS FIC register */
1177 PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1178 (u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT);
1180 /* clear VXLAN_NO_L2_ENABLE flag */
1181 reg_val &= ~cfg_mask;
1184 /* write PRS config register */
1185 qed_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, reg_val);
1188 #define T_ETH_PACKET_ACTION_GFT_EVENTID 23
1189 #define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272
1190 #define T_ETH_PACKET_MATCH_RFS_EVENTID 25
1191 #define PARSER_ETH_CONN_CM_HDR 0
1192 #define CAM_LINE_SIZE sizeof(u32)
1193 #define RAM_LINE_SIZE sizeof(u64)
1194 #define REG_SIZE sizeof(u32)
1196 void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id)
1198 /* Disable gft search for PF */
1199 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
1201 /* Clean ram & cam for next gft session */
1204 qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0);
1208 p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, 0);
1211 PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + REG_SIZE,
1215 void qed_gft_config(struct qed_hwfn *p_hwfn,
1216 struct qed_ptt *p_ptt,
1220 bool ipv4, bool ipv6, enum gft_profile_type profile_type)
1222 u32 reg_val, cam_line, search_non_ip_as_gft;
1223 struct regpair ram_line = { };
1227 "gft_config: must accept at least on of - ipv4 or ipv6'\n");
1230 "gft_config: must accept at least on of - udp or tcp\n");
1231 if (profile_type >= MAX_GFT_PROFILE_TYPE)
1232 DP_NOTICE(p_hwfn, "gft_config: unsupported gft_profile_type\n");
1234 /* Set RFS event ID to be awakened i Tstorm By Prs */
1235 reg_val = T_ETH_PACKET_MATCH_RFS_EVENTID <<
1236 PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
1237 reg_val |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
1238 qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, reg_val);
1240 /* Do not load context only cid in PRS on match. */
1241 qed_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
1243 /* Do not use tenant ID exist bit for gft search */
1244 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TENANT_ID, 0);
1248 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_VALID, 1);
1250 /* Filters are per PF!! */
1252 GFT_CAM_LINE_MAPPED_PF_ID_MASK,
1253 GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
1254 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
1256 if (!(tcp && udp)) {
1258 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK,
1259 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
1262 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
1263 GFT_PROFILE_TCP_PROTOCOL);
1266 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
1267 GFT_PROFILE_UDP_PROTOCOL);
1270 if (!(ipv4 && ipv6)) {
1271 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
1274 GFT_CAM_LINE_MAPPED_IP_VERSION,
1278 GFT_CAM_LINE_MAPPED_IP_VERSION,
1282 /* Write characteristics to cam */
1283 qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
1286 qed_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
1288 /* Write line to RAM - compare to filter 4 tuple */
1290 /* Search no IP as GFT */
1291 search_non_ip_as_gft = 0;
1294 SET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
1295 SET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
1297 if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) {
1298 SET_FIELD(ram_line.hi, GFT_RAM_LINE_DST_IP, 1);
1299 SET_FIELD(ram_line.hi, GFT_RAM_LINE_SRC_IP, 1);
1300 SET_FIELD(ram_line.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
1301 SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);
1302 SET_FIELD(ram_line.lo, GFT_RAM_LINE_SRC_PORT, 1);
1303 SET_FIELD(ram_line.lo, GFT_RAM_LINE_DST_PORT, 1);
1304 } else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) {
1305 SET_FIELD(ram_line.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
1306 SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);
1307 SET_FIELD(ram_line.lo, GFT_RAM_LINE_DST_PORT, 1);
1308 } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) {
1309 SET_FIELD(ram_line.hi, GFT_RAM_LINE_DST_IP, 1);
1310 SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);
1311 } else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) {
1312 SET_FIELD(ram_line.hi, GFT_RAM_LINE_SRC_IP, 1);
1313 SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);
1314 } else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) {
1315 SET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
1317 /* Allow tunneled traffic without inner IP */
1318 search_non_ip_as_gft = 1;
1322 p_ptt, PRS_REG_SEARCH_NON_IP_AS_GFT, search_non_ip_as_gft);
1325 PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
1329 PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + REG_SIZE,
1332 /* Set default profile so that no filter match will happen */
1335 PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
1336 PRS_GFT_CAM_LINES_NO_MATCH, 0xffffffff);
1339 PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
1340 PRS_GFT_CAM_LINES_NO_MATCH + REG_SIZE, 0x3ff);
1342 /* Enable gft search */
1343 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
1346 DECLARE_CRC8_TABLE(cdu_crc8_table);
1348 /* Calculate and return CDU validation byte per connection type/region/cid */
1349 static u8 qed_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
1351 const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
1352 u8 crc, validation_byte = 0;
1353 static u8 crc8_table_valid; /* automatically initialized to 0 */
1354 u32 validation_string = 0;
1357 if (!crc8_table_valid) {
1358 crc8_populate_msb(cdu_crc8_table, 0x07);
1359 crc8_table_valid = 1;
1362 /* The CRC is calculated on the String-to-compress:
1363 * [31:8] = {CID[31:20],CID[11:0]}
1367 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)
1368 validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);
1370 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)
1371 validation_string |= ((region & 0xF) << 4);
1373 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
1374 validation_string |= (conn_type & 0xF);
1376 /* Convert to big-endian and calculate CRC8 */
1377 data_to_crc = be32_to_cpu(validation_string);
1379 crc = crc8(cdu_crc8_table,
1380 (u8 *)&data_to_crc, sizeof(data_to_crc), CRC8_INIT_VALUE);
1382 /* The validation byte [7:0] is composed:
1383 * for type A validation
1384 * [7] = active configuration bit
1387 * for type B validation
1388 * [7] = active configuration bit
1389 * [6:3] = connection_type[3:0]
1394 CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;
1396 if ((validation_cfg >>
1397 CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)
1398 validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);
1400 validation_byte |= crc & 0x7F;
1402 return validation_byte;
1405 /* Calcualte and set validation bytes for session context */
1406 void qed_calc_session_ctx_validation(void *p_ctx_mem,
1407 u16 ctx_size, u8 ctx_type, u32 cid)
1409 u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
1411 p_ctx = (u8 * const)p_ctx_mem;
1412 x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
1413 t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
1414 u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
1416 memset(p_ctx, 0, ctx_size);
1418 *x_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 3, cid);
1419 *t_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 4, cid);
1420 *u_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 5, cid);
1423 /* Calcualte and set validation bytes for task context */
1424 void qed_calc_task_ctx_validation(void *p_ctx_mem,
1425 u16 ctx_size, u8 ctx_type, u32 tid)
1427 u8 *p_ctx, *region1_val_ptr;
1429 p_ctx = (u8 * const)p_ctx_mem;
1430 region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
1432 memset(p_ctx, 0, ctx_size);
1434 *region1_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 1, tid);
1437 /* Memset session context to 0 while preserving validation bytes */
1438 void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
1440 u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
1441 u8 x_val, t_val, u_val;
1443 p_ctx = (u8 * const)p_ctx_mem;
1444 x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
1445 t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
1446 u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
1452 memset(p_ctx, 0, ctx_size);
1459 /* Memset task context to 0 while preserving validation bytes */
1460 void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
1462 u8 *p_ctx, *region1_val_ptr;
1465 p_ctx = (u8 * const)p_ctx_mem;
1466 region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
1468 region1_val = *region1_val_ptr;
1470 memset(p_ctx, 0, ctx_size);
1472 *region1_val_ptr = region1_val;
1475 /* Enable and configure context validation */
1476 void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
1477 struct qed_ptt *p_ptt)
1481 /* Enable validation for connection region 3: CCFC_CTX_VALID0[31:24] */
1482 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24;
1483 qed_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation);
1485 /* Enable validation for connection region 5: CCFC_CTX_VALID1[15:8] */
1486 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
1487 qed_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation);
1489 /* Enable validation for connection region 1: TCFC_CTX_VALID0[15:8] */
1490 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
1491 qed_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
1494 static u32 qed_get_rdma_assert_ram_addr(struct qed_hwfn *p_hwfn, u8 storm_id)
1498 return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
1499 TSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
1501 return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
1502 MSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
1504 return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
1505 USTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
1507 return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
1508 XSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
1510 return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
1511 YSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
1513 return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
1514 PSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
1521 void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn,
1522 struct qed_ptt *p_ptt,
1523 u8 assert_level[NUM_STORMS])
1527 for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
1528 u32 ram_addr = qed_get_rdma_assert_ram_addr(p_hwfn, storm_id);
1530 qed_wr(p_hwfn, p_ptt, ram_addr, assert_level[storm_id]);