1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/dcbnl.h>
7 #include <linux/if_ether.h>
8 #include <linux/list.h>
9 #include <linux/netlink.h>
16 struct mlxsw_sp_sb_pr {
17 enum mlxsw_reg_sbpr_mode mode;
23 struct mlxsw_cp_sb_occ {
28 struct mlxsw_sp_sb_cm {
32 struct mlxsw_cp_sb_occ occ;
37 #define MLXSW_SP_SB_INFI -1U
38 #define MLXSW_SP_SB_REST -2U
40 struct mlxsw_sp_sb_pm {
43 struct mlxsw_cp_sb_occ occ;
46 struct mlxsw_sp_sb_mm {
52 struct mlxsw_sp_sb_pool_des {
53 enum mlxsw_reg_sbxx_dir dir;
57 #define MLXSW_SP_SB_POOL_ING 0
58 #define MLXSW_SP_SB_POOL_EGR 4
59 #define MLXSW_SP_SB_POOL_EGR_MC 8
60 #define MLXSW_SP_SB_POOL_ING_CPU 9
61 #define MLXSW_SP_SB_POOL_EGR_CPU 10
63 static const struct mlxsw_sp_sb_pool_des mlxsw_sp1_sb_pool_dess[] = {
64 {MLXSW_REG_SBXX_DIR_INGRESS, 0},
65 {MLXSW_REG_SBXX_DIR_INGRESS, 1},
66 {MLXSW_REG_SBXX_DIR_INGRESS, 2},
67 {MLXSW_REG_SBXX_DIR_INGRESS, 3},
68 {MLXSW_REG_SBXX_DIR_EGRESS, 0},
69 {MLXSW_REG_SBXX_DIR_EGRESS, 1},
70 {MLXSW_REG_SBXX_DIR_EGRESS, 2},
71 {MLXSW_REG_SBXX_DIR_EGRESS, 3},
72 {MLXSW_REG_SBXX_DIR_EGRESS, 15},
73 {MLXSW_REG_SBXX_DIR_INGRESS, 4},
74 {MLXSW_REG_SBXX_DIR_EGRESS, 4},
77 static const struct mlxsw_sp_sb_pool_des mlxsw_sp2_sb_pool_dess[] = {
78 {MLXSW_REG_SBXX_DIR_INGRESS, 0},
79 {MLXSW_REG_SBXX_DIR_INGRESS, 1},
80 {MLXSW_REG_SBXX_DIR_INGRESS, 2},
81 {MLXSW_REG_SBXX_DIR_INGRESS, 3},
82 {MLXSW_REG_SBXX_DIR_EGRESS, 0},
83 {MLXSW_REG_SBXX_DIR_EGRESS, 1},
84 {MLXSW_REG_SBXX_DIR_EGRESS, 2},
85 {MLXSW_REG_SBXX_DIR_EGRESS, 3},
86 {MLXSW_REG_SBXX_DIR_EGRESS, 15},
87 {MLXSW_REG_SBXX_DIR_INGRESS, 4},
88 {MLXSW_REG_SBXX_DIR_EGRESS, 4},
91 #define MLXSW_SP_SB_ING_TC_COUNT 8
92 #define MLXSW_SP_SB_EG_TC_COUNT 16
94 struct mlxsw_sp_sb_port {
95 struct mlxsw_sp_sb_cm ing_cms[MLXSW_SP_SB_ING_TC_COUNT];
96 struct mlxsw_sp_sb_cm eg_cms[MLXSW_SP_SB_EG_TC_COUNT];
97 struct mlxsw_sp_sb_pm *pms;
101 struct mlxsw_sp_sb_pr *prs;
102 struct mlxsw_sp_sb_port *ports;
104 u32 max_headroom_cells;
108 struct mlxsw_sp_sb_vals {
109 unsigned int pool_count;
110 const struct mlxsw_sp_sb_pool_des *pool_dess;
111 const struct mlxsw_sp_sb_pm *pms;
112 const struct mlxsw_sp_sb_pm *pms_cpu;
113 const struct mlxsw_sp_sb_pr *prs;
114 const struct mlxsw_sp_sb_mm *mms;
115 const struct mlxsw_sp_sb_cm *cms_ingress;
116 const struct mlxsw_sp_sb_cm *cms_egress;
117 const struct mlxsw_sp_sb_cm *cms_cpu;
118 unsigned int mms_count;
119 unsigned int cms_ingress_count;
120 unsigned int cms_egress_count;
121 unsigned int cms_cpu_count;
124 u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells)
126 return mlxsw_sp->sb->cell_size * cells;
129 u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes)
131 return DIV_ROUND_UP(bytes, mlxsw_sp->sb->cell_size);
134 u32 mlxsw_sp_sb_max_headroom_cells(const struct mlxsw_sp *mlxsw_sp)
136 return mlxsw_sp->sb->max_headroom_cells;
139 static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
142 return &mlxsw_sp->sb->prs[pool_index];
145 static bool mlxsw_sp_sb_cm_exists(u8 pg_buff, enum mlxsw_reg_sbxx_dir dir)
147 if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
148 return pg_buff < MLXSW_SP_SB_ING_TC_COUNT;
150 return pg_buff < MLXSW_SP_SB_EG_TC_COUNT;
153 static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
154 u8 local_port, u8 pg_buff,
155 enum mlxsw_reg_sbxx_dir dir)
157 struct mlxsw_sp_sb_port *sb_port = &mlxsw_sp->sb->ports[local_port];
159 WARN_ON(!mlxsw_sp_sb_cm_exists(pg_buff, dir));
160 if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
161 return &sb_port->ing_cms[pg_buff];
163 return &sb_port->eg_cms[pg_buff];
166 static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
167 u8 local_port, u16 pool_index)
169 return &mlxsw_sp->sb->ports[local_port].pms[pool_index];
172 static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
173 enum mlxsw_reg_sbpr_mode mode,
174 u32 size, bool infi_size)
176 const struct mlxsw_sp_sb_pool_des *des =
177 &mlxsw_sp->sb_vals->pool_dess[pool_index];
178 char sbpr_pl[MLXSW_REG_SBPR_LEN];
179 struct mlxsw_sp_sb_pr *pr;
182 mlxsw_reg_sbpr_pack(sbpr_pl, des->pool, des->dir, mode,
184 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
189 size = mlxsw_sp_bytes_cells(mlxsw_sp, mlxsw_sp->sb->sb_size);
190 pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
196 static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
197 u8 pg_buff, u32 min_buff, u32 max_buff,
198 bool infi_max, u16 pool_index)
200 const struct mlxsw_sp_sb_pool_des *des =
201 &mlxsw_sp->sb_vals->pool_dess[pool_index];
202 char sbcm_pl[MLXSW_REG_SBCM_LEN];
203 struct mlxsw_sp_sb_cm *cm;
206 mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, des->dir,
207 min_buff, max_buff, infi_max, des->pool);
208 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
212 if (mlxsw_sp_sb_cm_exists(pg_buff, des->dir)) {
214 max_buff = mlxsw_sp_bytes_cells(mlxsw_sp,
215 mlxsw_sp->sb->sb_size);
217 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff,
219 cm->min_buff = min_buff;
220 cm->max_buff = max_buff;
221 cm->pool_index = pool_index;
226 static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
227 u16 pool_index, u32 min_buff, u32 max_buff)
229 const struct mlxsw_sp_sb_pool_des *des =
230 &mlxsw_sp->sb_vals->pool_dess[pool_index];
231 char sbpm_pl[MLXSW_REG_SBPM_LEN];
232 struct mlxsw_sp_sb_pm *pm;
235 mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir, false,
237 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl);
241 pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
242 pm->min_buff = min_buff;
243 pm->max_buff = max_buff;
247 static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port,
248 u16 pool_index, struct list_head *bulk_list)
250 const struct mlxsw_sp_sb_pool_des *des =
251 &mlxsw_sp->sb_vals->pool_dess[pool_index];
252 char sbpm_pl[MLXSW_REG_SBPM_LEN];
254 if (local_port == MLXSW_PORT_CPU_PORT &&
255 des->dir == MLXSW_REG_SBXX_DIR_INGRESS)
258 mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
260 return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
264 static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core,
265 char *sbpm_pl, size_t sbpm_pl_len,
266 unsigned long cb_priv)
268 struct mlxsw_sp_sb_pm *pm = (struct mlxsw_sp_sb_pm *) cb_priv;
270 mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max);
273 static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
274 u16 pool_index, struct list_head *bulk_list)
276 const struct mlxsw_sp_sb_pool_des *des =
277 &mlxsw_sp->sb_vals->pool_dess[pool_index];
278 char sbpm_pl[MLXSW_REG_SBPM_LEN];
279 struct mlxsw_sp_sb_pm *pm;
281 if (local_port == MLXSW_PORT_CPU_PORT &&
282 des->dir == MLXSW_REG_SBXX_DIR_INGRESS)
285 pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
286 mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
288 return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
290 mlxsw_sp_sb_pm_occ_query_cb,
294 /* 1/4 of a headroom necessary for 100Gbps port and 100m cable. */
295 #define MLXSW_SP_PB_HEADROOM 25632
296 #define MLXSW_SP_PB_UNUSED 8
298 static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
301 [0] = MLXSW_SP_PB_HEADROOM * mlxsw_sp_port->mapping.width,
302 [9] = MLXSW_PORT_MAX_MTU,
304 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
305 char pbmc_pl[MLXSW_REG_PBMC_LEN];
308 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port,
310 for (i = 0; i < ARRAY_SIZE(pbs); i++) {
311 u16 size = mlxsw_sp_bytes_cells(mlxsw_sp, pbs[i]);
313 if (i == MLXSW_SP_PB_UNUSED)
315 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, size);
317 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
318 MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
319 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
322 static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port)
324 char pptb_pl[MLXSW_REG_PPTB_LEN];
327 mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
328 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
329 mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, 0);
330 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
334 static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
338 err = mlxsw_sp_port_pb_init(mlxsw_sp_port);
341 return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port);
344 static int mlxsw_sp_sb_port_init(struct mlxsw_sp *mlxsw_sp,
345 struct mlxsw_sp_sb_port *sb_port)
347 struct mlxsw_sp_sb_pm *pms;
349 pms = kcalloc(mlxsw_sp->sb_vals->pool_count, sizeof(*pms),
357 static void mlxsw_sp_sb_port_fini(struct mlxsw_sp_sb_port *sb_port)
362 static int mlxsw_sp_sb_ports_init(struct mlxsw_sp *mlxsw_sp)
364 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
365 struct mlxsw_sp_sb_pr *prs;
369 mlxsw_sp->sb->ports = kcalloc(max_ports,
370 sizeof(struct mlxsw_sp_sb_port),
372 if (!mlxsw_sp->sb->ports)
375 prs = kcalloc(mlxsw_sp->sb_vals->pool_count, sizeof(*prs),
381 mlxsw_sp->sb->prs = prs;
383 for (i = 0; i < max_ports; i++) {
384 err = mlxsw_sp_sb_port_init(mlxsw_sp, &mlxsw_sp->sb->ports[i]);
386 goto err_sb_port_init;
392 for (i--; i >= 0; i--)
393 mlxsw_sp_sb_port_fini(&mlxsw_sp->sb->ports[i]);
394 kfree(mlxsw_sp->sb->prs);
396 kfree(mlxsw_sp->sb->ports);
400 static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp)
402 int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
405 for (i = max_ports - 1; i >= 0; i--)
406 mlxsw_sp_sb_port_fini(&mlxsw_sp->sb->ports[i]);
407 kfree(mlxsw_sp->sb->prs);
408 kfree(mlxsw_sp->sb->ports);
411 #define MLXSW_SP_SB_PR(_mode, _size) \
417 #define MLXSW_SP_SB_PR_EXT(_mode, _size, _freeze_mode, _freeze_size) \
421 .freeze_mode = _freeze_mode, \
422 .freeze_size = _freeze_size, \
425 #define MLXSW_SP1_SB_PR_CPU_SIZE (256 * 1000)
427 /* Order according to mlxsw_sp1_sb_pool_dess */
428 static const struct mlxsw_sp_sb_pr mlxsw_sp1_sb_prs[] = {
429 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST),
430 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
431 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
432 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
433 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST,
435 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
436 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
437 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
438 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI,
440 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
441 MLXSW_SP1_SB_PR_CPU_SIZE, true, false),
442 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
443 MLXSW_SP1_SB_PR_CPU_SIZE, true, false),
446 #define MLXSW_SP2_SB_PR_CPU_SIZE (256 * 1000)
448 /* Order according to mlxsw_sp2_sb_pool_dess */
449 static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = {
450 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST),
451 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
452 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
453 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
454 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST,
456 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
457 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
458 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
459 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI,
461 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
462 MLXSW_SP2_SB_PR_CPU_SIZE, true, false),
463 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
464 MLXSW_SP2_SB_PR_CPU_SIZE, true, false),
467 static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
468 const struct mlxsw_sp_sb_pr *prs,
469 const struct mlxsw_sp_sb_pool_des *pool_dess,
472 /* Round down, unlike mlxsw_sp_bytes_cells(). */
473 u32 sb_cells = div_u64(mlxsw_sp->sb->sb_size, mlxsw_sp->sb->cell_size);
474 u32 rest_cells[2] = {sb_cells, sb_cells};
478 /* Calculate how much space to give to the "REST" pools in either
481 for (i = 0; i < prs_len; i++) {
482 enum mlxsw_reg_sbxx_dir dir = pool_dess[i].dir;
483 u32 size = prs[i].size;
486 if (size == MLXSW_SP_SB_INFI || size == MLXSW_SP_SB_REST)
489 size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size);
490 if (WARN_ON_ONCE(size_cells > rest_cells[dir]))
493 rest_cells[dir] -= size_cells;
496 for (i = 0; i < prs_len; i++) {
497 u32 size = prs[i].size;
500 if (size == MLXSW_SP_SB_INFI) {
501 err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
503 } else if (size == MLXSW_SP_SB_REST) {
504 size_cells = rest_cells[pool_dess[i].dir];
505 err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
508 size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size);
509 err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
518 #define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool) \
520 .min_buff = _min_buff, \
521 .max_buff = _max_buff, \
522 .pool_index = _pool, \
525 #define MLXSW_SP_SB_CM_ING(_min_buff, _max_buff) \
527 .min_buff = _min_buff, \
528 .max_buff = _max_buff, \
529 .pool_index = MLXSW_SP_SB_POOL_ING, \
532 #define MLXSW_SP_SB_CM_EGR(_min_buff, _max_buff) \
534 .min_buff = _min_buff, \
535 .max_buff = _max_buff, \
536 .pool_index = MLXSW_SP_SB_POOL_EGR, \
539 #define MLXSW_SP_SB_CM_EGR_MC(_min_buff, _max_buff) \
541 .min_buff = _min_buff, \
542 .max_buff = _max_buff, \
543 .pool_index = MLXSW_SP_SB_POOL_EGR_MC, \
544 .freeze_pool = true, \
545 .freeze_thresh = true, \
548 static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_ingress[] = {
549 MLXSW_SP_SB_CM_ING(10000, 8),
550 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
551 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
552 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
553 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
554 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
555 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
556 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
557 MLXSW_SP_SB_CM_ING(0, 0), /* dummy, this PG does not exist */
558 MLXSW_SP_SB_CM(10000, 8, MLXSW_SP_SB_POOL_ING_CPU),
561 static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_ingress[] = {
562 MLXSW_SP_SB_CM_ING(0, 7),
563 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
564 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
565 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
566 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
567 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
568 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
569 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
570 MLXSW_SP_SB_CM_ING(0, 0), /* dummy, this PG does not exist */
571 MLXSW_SP_SB_CM(10000, 8, MLXSW_SP_SB_POOL_ING_CPU),
574 static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_egress[] = {
575 MLXSW_SP_SB_CM_EGR(1500, 9),
576 MLXSW_SP_SB_CM_EGR(1500, 9),
577 MLXSW_SP_SB_CM_EGR(1500, 9),
578 MLXSW_SP_SB_CM_EGR(1500, 9),
579 MLXSW_SP_SB_CM_EGR(1500, 9),
580 MLXSW_SP_SB_CM_EGR(1500, 9),
581 MLXSW_SP_SB_CM_EGR(1500, 9),
582 MLXSW_SP_SB_CM_EGR(1500, 9),
583 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
584 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
585 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
586 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
587 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
588 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
589 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
590 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
591 MLXSW_SP_SB_CM_EGR(1, 0xff),
594 static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_egress[] = {
595 MLXSW_SP_SB_CM_EGR(0, 7),
596 MLXSW_SP_SB_CM_EGR(0, 7),
597 MLXSW_SP_SB_CM_EGR(0, 7),
598 MLXSW_SP_SB_CM_EGR(0, 7),
599 MLXSW_SP_SB_CM_EGR(0, 7),
600 MLXSW_SP_SB_CM_EGR(0, 7),
601 MLXSW_SP_SB_CM_EGR(0, 7),
602 MLXSW_SP_SB_CM_EGR(0, 7),
603 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
604 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
605 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
606 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
607 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
608 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
609 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
610 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
611 MLXSW_SP_SB_CM_EGR(1, 0xff),
614 #define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, MLXSW_SP_SB_POOL_EGR_CPU)
616 static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
617 MLXSW_SP_CPU_PORT_SB_CM,
618 MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
619 MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
620 MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
621 MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
622 MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
623 MLXSW_SP_CPU_PORT_SB_CM,
624 MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
625 MLXSW_SP_CPU_PORT_SB_CM,
626 MLXSW_SP_CPU_PORT_SB_CM,
627 MLXSW_SP_CPU_PORT_SB_CM,
628 MLXSW_SP_CPU_PORT_SB_CM,
629 MLXSW_SP_CPU_PORT_SB_CM,
630 MLXSW_SP_CPU_PORT_SB_CM,
631 MLXSW_SP_CPU_PORT_SB_CM,
632 MLXSW_SP_CPU_PORT_SB_CM,
633 MLXSW_SP_CPU_PORT_SB_CM,
634 MLXSW_SP_CPU_PORT_SB_CM,
635 MLXSW_SP_CPU_PORT_SB_CM,
636 MLXSW_SP_CPU_PORT_SB_CM,
637 MLXSW_SP_CPU_PORT_SB_CM,
638 MLXSW_SP_CPU_PORT_SB_CM,
639 MLXSW_SP_CPU_PORT_SB_CM,
640 MLXSW_SP_CPU_PORT_SB_CM,
641 MLXSW_SP_CPU_PORT_SB_CM,
642 MLXSW_SP_CPU_PORT_SB_CM,
643 MLXSW_SP_CPU_PORT_SB_CM,
644 MLXSW_SP_CPU_PORT_SB_CM,
645 MLXSW_SP_CPU_PORT_SB_CM,
646 MLXSW_SP_CPU_PORT_SB_CM,
647 MLXSW_SP_CPU_PORT_SB_CM,
648 MLXSW_SP_CPU_PORT_SB_CM,
652 mlxsw_sp_sb_pool_is_static(struct mlxsw_sp *mlxsw_sp, u16 pool_index)
654 struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
656 return pr->mode == MLXSW_REG_SBPR_MODE_STATIC;
659 static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
660 enum mlxsw_reg_sbxx_dir dir,
661 const struct mlxsw_sp_sb_cm *cms,
664 const struct mlxsw_sp_sb_vals *sb_vals = mlxsw_sp->sb_vals;
668 for (i = 0; i < cms_len; i++) {
669 const struct mlxsw_sp_sb_cm *cm;
673 if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS)
674 continue; /* PG number 8 does not exist, skip it */
676 if (WARN_ON(sb_vals->pool_dess[cm->pool_index].dir != dir))
679 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, cm->min_buff);
680 max_buff = cm->max_buff;
681 if (max_buff == MLXSW_SP_SB_INFI) {
682 err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
684 true, cm->pool_index);
686 if (mlxsw_sp_sb_pool_is_static(mlxsw_sp,
688 max_buff = mlxsw_sp_bytes_cells(mlxsw_sp,
690 err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
692 false, cm->pool_index);
700 static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port)
702 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
705 err = __mlxsw_sp_sb_cms_init(mlxsw_sp,
706 mlxsw_sp_port->local_port,
707 MLXSW_REG_SBXX_DIR_INGRESS,
708 mlxsw_sp->sb_vals->cms_ingress,
709 mlxsw_sp->sb_vals->cms_ingress_count);
712 return __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
713 mlxsw_sp_port->local_port,
714 MLXSW_REG_SBXX_DIR_EGRESS,
715 mlxsw_sp->sb_vals->cms_egress,
716 mlxsw_sp->sb_vals->cms_egress_count);
719 static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
721 return __mlxsw_sp_sb_cms_init(mlxsw_sp, 0, MLXSW_REG_SBXX_DIR_EGRESS,
722 mlxsw_sp->sb_vals->cms_cpu,
723 mlxsw_sp->sb_vals->cms_cpu_count);
726 #define MLXSW_SP_SB_PM(_min_buff, _max_buff) \
728 .min_buff = _min_buff, \
729 .max_buff = _max_buff, \
732 /* Order according to mlxsw_sp1_sb_pool_dess */
733 static const struct mlxsw_sp_sb_pm mlxsw_sp1_sb_pms[] = {
734 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
735 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
736 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
737 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
738 MLXSW_SP_SB_PM(0, 7),
739 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
740 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
741 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
742 MLXSW_SP_SB_PM(10000, 90000),
743 MLXSW_SP_SB_PM(0, 8), /* 50% occupancy */
744 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
747 /* Order according to mlxsw_sp2_sb_pool_dess */
748 static const struct mlxsw_sp_sb_pm mlxsw_sp2_sb_pms[] = {
749 MLXSW_SP_SB_PM(0, 7),
750 MLXSW_SP_SB_PM(0, 0),
751 MLXSW_SP_SB_PM(0, 0),
752 MLXSW_SP_SB_PM(0, 0),
753 MLXSW_SP_SB_PM(0, 7),
754 MLXSW_SP_SB_PM(0, 0),
755 MLXSW_SP_SB_PM(0, 0),
756 MLXSW_SP_SB_PM(0, 0),
757 MLXSW_SP_SB_PM(10000, 90000),
758 MLXSW_SP_SB_PM(0, 8), /* 50% occupancy */
759 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
762 /* Order according to mlxsw_sp*_sb_pool_dess */
763 static const struct mlxsw_sp_sb_pm mlxsw_sp_cpu_port_sb_pms[] = {
764 MLXSW_SP_SB_PM(0, 0),
765 MLXSW_SP_SB_PM(0, 0),
766 MLXSW_SP_SB_PM(0, 0),
767 MLXSW_SP_SB_PM(0, 0),
768 MLXSW_SP_SB_PM(0, 0),
769 MLXSW_SP_SB_PM(0, 0),
770 MLXSW_SP_SB_PM(0, 0),
771 MLXSW_SP_SB_PM(0, 0),
772 MLXSW_SP_SB_PM(0, 90000),
773 MLXSW_SP_SB_PM(0, 0),
774 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
777 static int mlxsw_sp_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
778 const struct mlxsw_sp_sb_pm *pms,
783 for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
784 const struct mlxsw_sp_sb_pm *pm = &pms[i];
785 const struct mlxsw_sp_sb_pool_des *des;
789 des = &mlxsw_sp->sb_vals->pool_dess[i];
790 if (skip_ingress && des->dir == MLXSW_REG_SBXX_DIR_INGRESS)
793 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, pm->min_buff);
794 max_buff = pm->max_buff;
795 if (mlxsw_sp_sb_pool_is_static(mlxsw_sp, i))
796 max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, max_buff);
797 err = mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, i, min_buff,
805 static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
807 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
809 return mlxsw_sp_sb_pms_init(mlxsw_sp, mlxsw_sp_port->local_port,
810 mlxsw_sp->sb_vals->pms, false);
813 static int mlxsw_sp_cpu_port_sb_pms_init(struct mlxsw_sp *mlxsw_sp)
815 return mlxsw_sp_sb_pms_init(mlxsw_sp, 0, mlxsw_sp->sb_vals->pms_cpu,
819 #define MLXSW_SP_SB_MM(_min_buff, _max_buff) \
821 .min_buff = _min_buff, \
822 .max_buff = _max_buff, \
823 .pool_index = MLXSW_SP_SB_POOL_EGR, \
826 static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
827 MLXSW_SP_SB_MM(0, 6),
828 MLXSW_SP_SB_MM(0, 6),
829 MLXSW_SP_SB_MM(0, 6),
830 MLXSW_SP_SB_MM(0, 6),
831 MLXSW_SP_SB_MM(0, 6),
832 MLXSW_SP_SB_MM(0, 6),
833 MLXSW_SP_SB_MM(0, 6),
834 MLXSW_SP_SB_MM(0, 6),
835 MLXSW_SP_SB_MM(0, 6),
836 MLXSW_SP_SB_MM(0, 6),
837 MLXSW_SP_SB_MM(0, 6),
838 MLXSW_SP_SB_MM(0, 6),
839 MLXSW_SP_SB_MM(0, 6),
840 MLXSW_SP_SB_MM(0, 6),
841 MLXSW_SP_SB_MM(0, 6),
844 static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
846 char sbmm_pl[MLXSW_REG_SBMM_LEN];
850 for (i = 0; i < mlxsw_sp->sb_vals->mms_count; i++) {
851 const struct mlxsw_sp_sb_pool_des *des;
852 const struct mlxsw_sp_sb_mm *mc;
855 mc = &mlxsw_sp->sb_vals->mms[i];
856 des = &mlxsw_sp->sb_vals->pool_dess[mc->pool_index];
857 /* All pools used by sb_mm's are initialized using dynamic
858 * thresholds, therefore 'max_buff' isn't specified in cells.
860 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, mc->min_buff);
861 mlxsw_reg_sbmm_pack(sbmm_pl, i, min_buff, mc->max_buff,
863 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
870 static void mlxsw_sp_pool_count(struct mlxsw_sp *mlxsw_sp,
871 u16 *p_ingress_len, u16 *p_egress_len)
875 for (i = 0; i < mlxsw_sp->sb_vals->pool_count; ++i) {
876 if (mlxsw_sp->sb_vals->pool_dess[i].dir ==
877 MLXSW_REG_SBXX_DIR_INGRESS)
883 WARN(*p_egress_len == 0, "No egress pools\n");
886 const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals = {
887 .pool_count = ARRAY_SIZE(mlxsw_sp1_sb_pool_dess),
888 .pool_dess = mlxsw_sp1_sb_pool_dess,
889 .pms = mlxsw_sp1_sb_pms,
890 .pms_cpu = mlxsw_sp_cpu_port_sb_pms,
891 .prs = mlxsw_sp1_sb_prs,
892 .mms = mlxsw_sp_sb_mms,
893 .cms_ingress = mlxsw_sp1_sb_cms_ingress,
894 .cms_egress = mlxsw_sp1_sb_cms_egress,
895 .cms_cpu = mlxsw_sp_cpu_port_sb_cms,
896 .mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms),
897 .cms_ingress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_ingress),
898 .cms_egress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_egress),
899 .cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms),
902 const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals = {
903 .pool_count = ARRAY_SIZE(mlxsw_sp2_sb_pool_dess),
904 .pool_dess = mlxsw_sp2_sb_pool_dess,
905 .pms = mlxsw_sp2_sb_pms,
906 .pms_cpu = mlxsw_sp_cpu_port_sb_pms,
907 .prs = mlxsw_sp2_sb_prs,
908 .mms = mlxsw_sp_sb_mms,
909 .cms_ingress = mlxsw_sp2_sb_cms_ingress,
910 .cms_egress = mlxsw_sp2_sb_cms_egress,
911 .cms_cpu = mlxsw_sp_cpu_port_sb_cms,
912 .mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms),
913 .cms_ingress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_ingress),
914 .cms_egress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_egress),
915 .cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms),
918 int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
920 u32 max_headroom_size;
921 u16 ing_pool_count = 0;
922 u16 eg_pool_count = 0;
925 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE))
928 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, GUARANTEED_SHARED_BUFFER))
931 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_HEADROOM_SIZE))
934 mlxsw_sp->sb = kzalloc(sizeof(*mlxsw_sp->sb), GFP_KERNEL);
937 mlxsw_sp->sb->cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE);
938 mlxsw_sp->sb->sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
939 GUARANTEED_SHARED_BUFFER);
940 max_headroom_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
942 /* Round down, because this limit must not be overstepped. */
943 mlxsw_sp->sb->max_headroom_cells = max_headroom_size /
944 mlxsw_sp->sb->cell_size;
946 err = mlxsw_sp_sb_ports_init(mlxsw_sp);
948 goto err_sb_ports_init;
949 err = mlxsw_sp_sb_prs_init(mlxsw_sp, mlxsw_sp->sb_vals->prs,
950 mlxsw_sp->sb_vals->pool_dess,
951 mlxsw_sp->sb_vals->pool_count);
953 goto err_sb_prs_init;
954 err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
956 goto err_sb_cpu_port_sb_cms_init;
957 err = mlxsw_sp_cpu_port_sb_pms_init(mlxsw_sp);
959 goto err_sb_cpu_port_pms_init;
960 err = mlxsw_sp_sb_mms_init(mlxsw_sp);
962 goto err_sb_mms_init;
963 mlxsw_sp_pool_count(mlxsw_sp, &ing_pool_count, &eg_pool_count);
964 err = devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0,
965 mlxsw_sp->sb->sb_size,
968 MLXSW_SP_SB_ING_TC_COUNT,
969 MLXSW_SP_SB_EG_TC_COUNT);
971 goto err_devlink_sb_register;
975 err_devlink_sb_register:
977 err_sb_cpu_port_pms_init:
978 err_sb_cpu_port_sb_cms_init:
980 mlxsw_sp_sb_ports_fini(mlxsw_sp);
986 void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
988 devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
989 mlxsw_sp_sb_ports_fini(mlxsw_sp);
993 int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
997 err = mlxsw_sp_port_headroom_init(mlxsw_sp_port);
1000 err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port);
1003 err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port);
1008 int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
1009 unsigned int sb_index, u16 pool_index,
1010 struct devlink_sb_pool_info *pool_info)
1012 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1013 enum mlxsw_reg_sbxx_dir dir;
1014 struct mlxsw_sp_sb_pr *pr;
1016 dir = mlxsw_sp->sb_vals->pool_dess[pool_index].dir;
1017 pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
1018 pool_info->pool_type = (enum devlink_sb_pool_type) dir;
1019 pool_info->size = mlxsw_sp_cells_bytes(mlxsw_sp, pr->size);
1020 pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode;
1021 pool_info->cell_size = mlxsw_sp->sb->cell_size;
1025 int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
1026 unsigned int sb_index, u16 pool_index, u32 size,
1027 enum devlink_sb_threshold_type threshold_type,
1028 struct netlink_ext_ack *extack)
1030 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1031 u32 pool_size = mlxsw_sp_bytes_cells(mlxsw_sp, size);
1032 const struct mlxsw_sp_sb_pr *pr;
1033 enum mlxsw_reg_sbpr_mode mode;
1035 mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
1036 pr = &mlxsw_sp->sb_vals->prs[pool_index];
1038 if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core,
1039 GUARANTEED_SHARED_BUFFER)) {
1040 NL_SET_ERR_MSG_MOD(extack, "Exceeded shared buffer size");
1044 if (pr->freeze_mode && pr->mode != mode) {
1045 NL_SET_ERR_MSG_MOD(extack, "Changing this pool's threshold type is forbidden");
1049 if (pr->freeze_size && pr->size != size) {
1050 NL_SET_ERR_MSG_MOD(extack, "Changing this pool's size is forbidden");
1054 return mlxsw_sp_sb_pr_write(mlxsw_sp, pool_index, mode,
1058 #define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */
1060 static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
1063 struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
1065 if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC)
1066 return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
1067 return mlxsw_sp_cells_bytes(mlxsw_sp, max_buff);
1070 static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
1071 u32 threshold, u32 *p_max_buff,
1072 struct netlink_ext_ack *extack)
1074 struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
1076 if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) {
1079 val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
1080 if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN ||
1081 val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX) {
1082 NL_SET_ERR_MSG_MOD(extack, "Invalid dynamic threshold value");
1087 *p_max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, threshold);
1092 int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
1093 unsigned int sb_index, u16 pool_index,
1096 struct mlxsw_sp_port *mlxsw_sp_port =
1097 mlxsw_core_port_driver_priv(mlxsw_core_port);
1098 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1099 u8 local_port = mlxsw_sp_port->local_port;
1100 struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
1103 *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool_index,
1108 int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
1109 unsigned int sb_index, u16 pool_index,
1110 u32 threshold, struct netlink_ext_ack *extack)
1112 struct mlxsw_sp_port *mlxsw_sp_port =
1113 mlxsw_core_port_driver_priv(mlxsw_core_port);
1114 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1115 u8 local_port = mlxsw_sp_port->local_port;
1119 if (local_port == MLXSW_PORT_CPU_PORT) {
1120 NL_SET_ERR_MSG_MOD(extack, "Changing CPU port's threshold is forbidden");
1124 err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
1125 threshold, &max_buff, extack);
1129 return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool_index,
1133 int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
1134 unsigned int sb_index, u16 tc_index,
1135 enum devlink_sb_pool_type pool_type,
1136 u16 *p_pool_index, u32 *p_threshold)
1138 struct mlxsw_sp_port *mlxsw_sp_port =
1139 mlxsw_core_port_driver_priv(mlxsw_core_port);
1140 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1141 u8 local_port = mlxsw_sp_port->local_port;
1142 u8 pg_buff = tc_index;
1143 enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1144 struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
1147 *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool_index,
1149 *p_pool_index = cm->pool_index;
1153 int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
1154 unsigned int sb_index, u16 tc_index,
1155 enum devlink_sb_pool_type pool_type,
1156 u16 pool_index, u32 threshold,
1157 struct netlink_ext_ack *extack)
1159 struct mlxsw_sp_port *mlxsw_sp_port =
1160 mlxsw_core_port_driver_priv(mlxsw_core_port);
1161 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1162 u8 local_port = mlxsw_sp_port->local_port;
1163 const struct mlxsw_sp_sb_cm *cm;
1164 u8 pg_buff = tc_index;
1165 enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1169 if (local_port == MLXSW_PORT_CPU_PORT) {
1170 NL_SET_ERR_MSG_MOD(extack, "Changing CPU port's binding is forbidden");
1174 if (dir != mlxsw_sp->sb_vals->pool_dess[pool_index].dir) {
1175 NL_SET_ERR_MSG_MOD(extack, "Binding egress TC to ingress pool and vice versa is forbidden");
1179 if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
1180 cm = &mlxsw_sp->sb_vals->cms_ingress[tc_index];
1182 cm = &mlxsw_sp->sb_vals->cms_egress[tc_index];
1184 if (cm->freeze_pool && cm->pool_index != pool_index) {
1185 NL_SET_ERR_MSG_MOD(extack, "Binding this TC to a different pool is forbidden");
1189 if (cm->freeze_thresh && cm->max_buff != threshold) {
1190 NL_SET_ERR_MSG_MOD(extack, "Changing this TC's threshold is forbidden");
1194 err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
1195 threshold, &max_buff, extack);
1199 return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff,
1200 0, max_buff, false, pool_index);
1203 #define MASKED_COUNT_MAX \
1204 (MLXSW_REG_SBSR_REC_MAX_COUNT / \
1205 (MLXSW_SP_SB_ING_TC_COUNT + MLXSW_SP_SB_EG_TC_COUNT))
1207 struct mlxsw_sp_sb_sr_occ_query_cb_ctx {
1212 static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
1213 char *sbsr_pl, size_t sbsr_pl_len,
1214 unsigned long cb_priv)
1216 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1217 struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
1221 struct mlxsw_sp_sb_cm *cm;
1224 memcpy(&cb_ctx, &cb_priv, sizeof(cb_ctx));
1227 for (local_port = cb_ctx.local_port_1;
1228 local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1229 if (!mlxsw_sp->ports[local_port])
1231 if (local_port == MLXSW_PORT_CPU_PORT) {
1232 /* Ingress quotas are not supported for the CPU port */
1236 for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++) {
1237 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
1238 MLXSW_REG_SBXX_DIR_INGRESS);
1239 mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
1240 &cm->occ.cur, &cm->occ.max);
1242 if (++masked_count == cb_ctx.masked_count)
1246 for (local_port = cb_ctx.local_port_1;
1247 local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1248 if (!mlxsw_sp->ports[local_port])
1250 for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++) {
1251 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
1252 MLXSW_REG_SBXX_DIR_EGRESS);
1253 mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
1254 &cm->occ.cur, &cm->occ.max);
1256 if (++masked_count == cb_ctx.masked_count)
1261 int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
1262 unsigned int sb_index)
1264 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1265 struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
1266 unsigned long cb_priv;
1267 LIST_HEAD(bulk_list);
1276 sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
1280 local_port = MLXSW_PORT_CPU_PORT;
1282 local_port_1 = local_port;
1284 mlxsw_reg_sbsr_pack(sbsr_pl, false);
1285 for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
1286 mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
1287 for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
1288 mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
1289 for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1290 if (!mlxsw_sp->ports[local_port])
1292 if (local_port != MLXSW_PORT_CPU_PORT) {
1293 /* Ingress quotas are not supported for the CPU port */
1294 mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
1297 mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
1298 for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
1299 err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
1304 if (++masked_count == MASKED_COUNT_MAX)
1309 cb_ctx.masked_count = masked_count;
1310 cb_ctx.local_port_1 = local_port_1;
1311 memcpy(&cb_priv, &cb_ctx, sizeof(cb_ctx));
1312 err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
1313 &bulk_list, mlxsw_sp_sb_sr_occ_query_cb,
1317 if (local_port < mlxsw_core_max_ports(mlxsw_core)) {
1323 err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
1330 int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
1331 unsigned int sb_index)
1333 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1334 LIST_HEAD(bulk_list);
1336 unsigned int masked_count;
1342 sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
1346 local_port = MLXSW_PORT_CPU_PORT;
1349 mlxsw_reg_sbsr_pack(sbsr_pl, true);
1350 for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
1351 mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
1352 for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
1353 mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
1354 for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1355 if (!mlxsw_sp->ports[local_port])
1357 if (local_port != MLXSW_PORT_CPU_PORT) {
1358 /* Ingress quotas are not supported for the CPU port */
1359 mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
1362 mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
1363 for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
1364 err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
1369 if (++masked_count == MASKED_COUNT_MAX)
1374 err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
1375 &bulk_list, NULL, 0);
1378 if (local_port < mlxsw_core_max_ports(mlxsw_core)) {
1384 err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
1391 int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
1392 unsigned int sb_index, u16 pool_index,
1393 u32 *p_cur, u32 *p_max)
1395 struct mlxsw_sp_port *mlxsw_sp_port =
1396 mlxsw_core_port_driver_priv(mlxsw_core_port);
1397 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1398 u8 local_port = mlxsw_sp_port->local_port;
1399 struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
1402 *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.cur);
1403 *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.max);
1407 int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
1408 unsigned int sb_index, u16 tc_index,
1409 enum devlink_sb_pool_type pool_type,
1410 u32 *p_cur, u32 *p_max)
1412 struct mlxsw_sp_port *mlxsw_sp_port =
1413 mlxsw_core_port_driver_priv(mlxsw_core_port);
1414 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1415 u8 local_port = mlxsw_sp_port->local_port;
1416 u8 pg_buff = tc_index;
1417 enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1418 struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
1421 *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.cur);
1422 *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.max);