1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/dcbnl.h>
7 #include <linux/if_ether.h>
8 #include <linux/list.h>
9 #include <linux/netlink.h>
16 struct mlxsw_sp_sb_pr {
17 enum mlxsw_reg_sbpr_mode mode;
23 struct mlxsw_cp_sb_occ {
28 struct mlxsw_sp_sb_cm {
32 struct mlxsw_cp_sb_occ occ;
37 #define MLXSW_SP_SB_INFI -1U
39 struct mlxsw_sp_sb_pm {
42 struct mlxsw_cp_sb_occ occ;
45 struct mlxsw_sp_sb_mm {
51 struct mlxsw_sp_sb_pool_des {
52 enum mlxsw_reg_sbxx_dir dir;
56 #define MLXSW_SP_SB_POOL_ING 0
57 #define MLXSW_SP_SB_POOL_ING_MNG 3
58 #define MLXSW_SP_SB_POOL_EGR 4
59 #define MLXSW_SP_SB_POOL_EGR_MC 8
61 /* Order ingress pools before egress pools. */
62 static const struct mlxsw_sp_sb_pool_des mlxsw_sp1_sb_pool_dess[] = {
63 {MLXSW_REG_SBXX_DIR_INGRESS, 0},
64 {MLXSW_REG_SBXX_DIR_INGRESS, 1},
65 {MLXSW_REG_SBXX_DIR_INGRESS, 2},
66 {MLXSW_REG_SBXX_DIR_INGRESS, 3},
67 {MLXSW_REG_SBXX_DIR_EGRESS, 0},
68 {MLXSW_REG_SBXX_DIR_EGRESS, 1},
69 {MLXSW_REG_SBXX_DIR_EGRESS, 2},
70 {MLXSW_REG_SBXX_DIR_EGRESS, 3},
71 {MLXSW_REG_SBXX_DIR_EGRESS, 15},
74 static const struct mlxsw_sp_sb_pool_des mlxsw_sp2_sb_pool_dess[] = {
75 {MLXSW_REG_SBXX_DIR_INGRESS, 0},
76 {MLXSW_REG_SBXX_DIR_INGRESS, 1},
77 {MLXSW_REG_SBXX_DIR_INGRESS, 2},
78 {MLXSW_REG_SBXX_DIR_INGRESS, 3},
79 {MLXSW_REG_SBXX_DIR_EGRESS, 0},
80 {MLXSW_REG_SBXX_DIR_EGRESS, 1},
81 {MLXSW_REG_SBXX_DIR_EGRESS, 2},
82 {MLXSW_REG_SBXX_DIR_EGRESS, 3},
83 {MLXSW_REG_SBXX_DIR_EGRESS, 15},
86 #define MLXSW_SP_SB_ING_TC_COUNT 8
87 #define MLXSW_SP_SB_EG_TC_COUNT 16
89 struct mlxsw_sp_sb_port {
90 struct mlxsw_sp_sb_cm ing_cms[MLXSW_SP_SB_ING_TC_COUNT];
91 struct mlxsw_sp_sb_cm eg_cms[MLXSW_SP_SB_EG_TC_COUNT];
92 struct mlxsw_sp_sb_pm *pms;
96 struct mlxsw_sp_sb_pr *prs;
97 struct mlxsw_sp_sb_port *ports;
99 u32 max_headroom_cells;
103 struct mlxsw_sp_sb_vals {
104 unsigned int pool_count;
105 const struct mlxsw_sp_sb_pool_des *pool_dess;
106 const struct mlxsw_sp_sb_pm *pms;
107 const struct mlxsw_sp_sb_pr *prs;
108 const struct mlxsw_sp_sb_mm *mms;
109 const struct mlxsw_sp_sb_cm *cms_ingress;
110 const struct mlxsw_sp_sb_cm *cms_egress;
111 const struct mlxsw_sp_sb_cm *cms_cpu;
112 unsigned int mms_count;
113 unsigned int cms_ingress_count;
114 unsigned int cms_egress_count;
115 unsigned int cms_cpu_count;
118 u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells)
120 return mlxsw_sp->sb->cell_size * cells;
123 u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes)
125 return DIV_ROUND_UP(bytes, mlxsw_sp->sb->cell_size);
128 u32 mlxsw_sp_sb_max_headroom_cells(const struct mlxsw_sp *mlxsw_sp)
130 return mlxsw_sp->sb->max_headroom_cells;
133 static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
136 return &mlxsw_sp->sb->prs[pool_index];
139 static bool mlxsw_sp_sb_cm_exists(u8 pg_buff, enum mlxsw_reg_sbxx_dir dir)
141 if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
142 return pg_buff < MLXSW_SP_SB_ING_TC_COUNT;
144 return pg_buff < MLXSW_SP_SB_EG_TC_COUNT;
147 static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
148 u8 local_port, u8 pg_buff,
149 enum mlxsw_reg_sbxx_dir dir)
151 struct mlxsw_sp_sb_port *sb_port = &mlxsw_sp->sb->ports[local_port];
153 WARN_ON(!mlxsw_sp_sb_cm_exists(pg_buff, dir));
154 if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
155 return &sb_port->ing_cms[pg_buff];
157 return &sb_port->eg_cms[pg_buff];
160 static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
161 u8 local_port, u16 pool_index)
163 return &mlxsw_sp->sb->ports[local_port].pms[pool_index];
166 static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
167 enum mlxsw_reg_sbpr_mode mode,
168 u32 size, bool infi_size)
170 const struct mlxsw_sp_sb_pool_des *des =
171 &mlxsw_sp->sb_vals->pool_dess[pool_index];
172 char sbpr_pl[MLXSW_REG_SBPR_LEN];
173 struct mlxsw_sp_sb_pr *pr;
176 mlxsw_reg_sbpr_pack(sbpr_pl, des->pool, des->dir, mode,
178 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
183 size = mlxsw_sp_bytes_cells(mlxsw_sp, mlxsw_sp->sb->sb_size);
184 pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
190 static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
191 u8 pg_buff, u32 min_buff, u32 max_buff,
192 bool infi_max, u16 pool_index)
194 const struct mlxsw_sp_sb_pool_des *des =
195 &mlxsw_sp->sb_vals->pool_dess[pool_index];
196 char sbcm_pl[MLXSW_REG_SBCM_LEN];
197 struct mlxsw_sp_sb_cm *cm;
200 mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, des->dir,
201 min_buff, max_buff, infi_max, des->pool);
202 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
206 if (mlxsw_sp_sb_cm_exists(pg_buff, des->dir)) {
208 max_buff = mlxsw_sp_bytes_cells(mlxsw_sp,
209 mlxsw_sp->sb->sb_size);
211 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff,
213 cm->min_buff = min_buff;
214 cm->max_buff = max_buff;
215 cm->pool_index = pool_index;
220 static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
221 u16 pool_index, u32 min_buff, u32 max_buff)
223 const struct mlxsw_sp_sb_pool_des *des =
224 &mlxsw_sp->sb_vals->pool_dess[pool_index];
225 char sbpm_pl[MLXSW_REG_SBPM_LEN];
226 struct mlxsw_sp_sb_pm *pm;
229 mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir, false,
231 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl);
235 pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
236 pm->min_buff = min_buff;
237 pm->max_buff = max_buff;
241 static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port,
242 u16 pool_index, struct list_head *bulk_list)
244 const struct mlxsw_sp_sb_pool_des *des =
245 &mlxsw_sp->sb_vals->pool_dess[pool_index];
246 char sbpm_pl[MLXSW_REG_SBPM_LEN];
248 mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
250 return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
254 static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core,
255 char *sbpm_pl, size_t sbpm_pl_len,
256 unsigned long cb_priv)
258 struct mlxsw_sp_sb_pm *pm = (struct mlxsw_sp_sb_pm *) cb_priv;
260 mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max);
263 static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
264 u16 pool_index, struct list_head *bulk_list)
266 const struct mlxsw_sp_sb_pool_des *des =
267 &mlxsw_sp->sb_vals->pool_dess[pool_index];
268 char sbpm_pl[MLXSW_REG_SBPM_LEN];
269 struct mlxsw_sp_sb_pm *pm;
271 pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
272 mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
274 return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
276 mlxsw_sp_sb_pm_occ_query_cb,
280 /* 1/4 of a headroom necessary for 100Gbps port and 100m cable. */
281 #define MLXSW_SP_PB_HEADROOM 25632
282 #define MLXSW_SP_PB_UNUSED 8
284 static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
287 [0] = MLXSW_SP_PB_HEADROOM * mlxsw_sp_port->mapping.width,
288 [9] = 2 * MLXSW_PORT_MAX_MTU,
290 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
291 char pbmc_pl[MLXSW_REG_PBMC_LEN];
294 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port,
296 for (i = 0; i < ARRAY_SIZE(pbs); i++) {
297 u16 size = mlxsw_sp_bytes_cells(mlxsw_sp, pbs[i]);
299 if (i == MLXSW_SP_PB_UNUSED)
301 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, size);
303 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
304 MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
305 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
308 static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port)
310 char pptb_pl[MLXSW_REG_PPTB_LEN];
313 mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
314 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
315 mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, 0);
316 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
320 static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
324 err = mlxsw_sp_port_pb_init(mlxsw_sp_port);
327 return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port);
330 static int mlxsw_sp_sb_port_init(struct mlxsw_sp *mlxsw_sp,
331 struct mlxsw_sp_sb_port *sb_port)
333 struct mlxsw_sp_sb_pm *pms;
335 pms = kcalloc(mlxsw_sp->sb_vals->pool_count, sizeof(*pms),
343 static void mlxsw_sp_sb_port_fini(struct mlxsw_sp_sb_port *sb_port)
348 static int mlxsw_sp_sb_ports_init(struct mlxsw_sp *mlxsw_sp)
350 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
351 struct mlxsw_sp_sb_pr *prs;
355 mlxsw_sp->sb->ports = kcalloc(max_ports,
356 sizeof(struct mlxsw_sp_sb_port),
358 if (!mlxsw_sp->sb->ports)
361 prs = kcalloc(mlxsw_sp->sb_vals->pool_count, sizeof(*prs),
367 mlxsw_sp->sb->prs = prs;
369 for (i = 0; i < max_ports; i++) {
370 err = mlxsw_sp_sb_port_init(mlxsw_sp, &mlxsw_sp->sb->ports[i]);
372 goto err_sb_port_init;
378 for (i--; i >= 0; i--)
379 mlxsw_sp_sb_port_fini(&mlxsw_sp->sb->ports[i]);
380 kfree(mlxsw_sp->sb->prs);
382 kfree(mlxsw_sp->sb->ports);
386 static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp)
388 int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
391 for (i = max_ports - 1; i >= 0; i--)
392 mlxsw_sp_sb_port_fini(&mlxsw_sp->sb->ports[i]);
393 kfree(mlxsw_sp->sb->prs);
394 kfree(mlxsw_sp->sb->ports);
397 #define MLXSW_SP_SB_PR(_mode, _size) \
403 #define MLXSW_SP_SB_PR_EXT(_mode, _size, _freeze_mode, _freeze_size) \
407 .freeze_mode = _freeze_mode, \
408 .freeze_size = _freeze_size, \
411 #define MLXSW_SP1_SB_PR_INGRESS_SIZE 12440000
412 #define MLXSW_SP1_SB_PR_INGRESS_MNG_SIZE (200 * 1000)
413 #define MLXSW_SP1_SB_PR_EGRESS_SIZE 13232000
415 static const struct mlxsw_sp_sb_pr mlxsw_sp1_sb_prs[] = {
417 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
418 MLXSW_SP1_SB_PR_INGRESS_SIZE),
419 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
420 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
421 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
422 MLXSW_SP1_SB_PR_INGRESS_MNG_SIZE),
424 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
425 MLXSW_SP1_SB_PR_EGRESS_SIZE),
426 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
427 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
428 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
429 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI,
433 #define MLXSW_SP2_SB_PR_INGRESS_SIZE 40960000
434 #define MLXSW_SP2_SB_PR_INGRESS_MNG_SIZE (200 * 1000)
435 #define MLXSW_SP2_SB_PR_EGRESS_SIZE 40960000
437 static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = {
439 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
440 MLXSW_SP2_SB_PR_INGRESS_SIZE),
441 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
442 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
443 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
444 MLXSW_SP2_SB_PR_INGRESS_MNG_SIZE),
446 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
447 MLXSW_SP2_SB_PR_EGRESS_SIZE),
448 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
449 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
450 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
451 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI,
455 static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
456 const struct mlxsw_sp_sb_pr *prs,
462 for (i = 0; i < prs_len; i++) {
463 u32 size = prs[i].size;
466 if (size == MLXSW_SP_SB_INFI) {
467 err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
470 size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size);
471 err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
480 #define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool) \
482 .min_buff = _min_buff, \
483 .max_buff = _max_buff, \
484 .pool_index = _pool, \
487 #define MLXSW_SP_SB_CM_ING(_min_buff, _max_buff) \
489 .min_buff = _min_buff, \
490 .max_buff = _max_buff, \
491 .pool_index = MLXSW_SP_SB_POOL_ING, \
494 #define MLXSW_SP_SB_CM_EGR(_min_buff, _max_buff) \
496 .min_buff = _min_buff, \
497 .max_buff = _max_buff, \
498 .pool_index = MLXSW_SP_SB_POOL_EGR, \
501 #define MLXSW_SP_SB_CM_EGR_MC(_min_buff, _max_buff) \
503 .min_buff = _min_buff, \
504 .max_buff = _max_buff, \
505 .pool_index = MLXSW_SP_SB_POOL_EGR_MC, \
508 static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_ingress[] = {
509 MLXSW_SP_SB_CM_ING(10000, 8),
510 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
511 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
512 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
513 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
514 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
515 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
516 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
517 MLXSW_SP_SB_CM_ING(0, 0), /* dummy, this PG does not exist */
518 MLXSW_SP_SB_CM(20000, 1, MLXSW_SP_SB_POOL_ING_MNG),
521 static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_ingress[] = {
522 MLXSW_SP_SB_CM_ING(0, 7),
523 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
524 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
525 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
526 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
527 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
528 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
529 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
530 MLXSW_SP_SB_CM_ING(0, 0), /* dummy, this PG does not exist */
531 MLXSW_SP_SB_CM(20000, 1, MLXSW_SP_SB_POOL_ING_MNG),
534 static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_egress[] = {
535 MLXSW_SP_SB_CM_EGR(1500, 9),
536 MLXSW_SP_SB_CM_EGR(1500, 9),
537 MLXSW_SP_SB_CM_EGR(1500, 9),
538 MLXSW_SP_SB_CM_EGR(1500, 9),
539 MLXSW_SP_SB_CM_EGR(1500, 9),
540 MLXSW_SP_SB_CM_EGR(1500, 9),
541 MLXSW_SP_SB_CM_EGR(1500, 9),
542 MLXSW_SP_SB_CM_EGR(1500, 9),
543 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
544 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
545 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
546 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
547 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
548 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
549 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
550 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
551 MLXSW_SP_SB_CM_EGR(1, 0xff),
554 static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_egress[] = {
555 MLXSW_SP_SB_CM_EGR(0, 7),
556 MLXSW_SP_SB_CM_EGR(0, 7),
557 MLXSW_SP_SB_CM_EGR(0, 7),
558 MLXSW_SP_SB_CM_EGR(0, 7),
559 MLXSW_SP_SB_CM_EGR(0, 7),
560 MLXSW_SP_SB_CM_EGR(0, 7),
561 MLXSW_SP_SB_CM_EGR(0, 7),
562 MLXSW_SP_SB_CM_EGR(0, 7),
563 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
564 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
565 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
566 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
567 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
568 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
569 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
570 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
571 MLXSW_SP_SB_CM_EGR(1, 0xff),
574 #define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, MLXSW_SP_SB_POOL_EGR)
576 static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
577 MLXSW_SP_CPU_PORT_SB_CM,
578 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, MLXSW_SP_SB_POOL_EGR),
579 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, MLXSW_SP_SB_POOL_EGR),
580 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, MLXSW_SP_SB_POOL_EGR),
581 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, MLXSW_SP_SB_POOL_EGR),
582 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, MLXSW_SP_SB_POOL_EGR),
583 MLXSW_SP_CPU_PORT_SB_CM,
584 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, MLXSW_SP_SB_POOL_EGR),
585 MLXSW_SP_CPU_PORT_SB_CM,
586 MLXSW_SP_CPU_PORT_SB_CM,
587 MLXSW_SP_CPU_PORT_SB_CM,
588 MLXSW_SP_CPU_PORT_SB_CM,
589 MLXSW_SP_CPU_PORT_SB_CM,
590 MLXSW_SP_CPU_PORT_SB_CM,
591 MLXSW_SP_CPU_PORT_SB_CM,
592 MLXSW_SP_CPU_PORT_SB_CM,
593 MLXSW_SP_CPU_PORT_SB_CM,
594 MLXSW_SP_CPU_PORT_SB_CM,
595 MLXSW_SP_CPU_PORT_SB_CM,
596 MLXSW_SP_CPU_PORT_SB_CM,
597 MLXSW_SP_CPU_PORT_SB_CM,
598 MLXSW_SP_CPU_PORT_SB_CM,
599 MLXSW_SP_CPU_PORT_SB_CM,
600 MLXSW_SP_CPU_PORT_SB_CM,
601 MLXSW_SP_CPU_PORT_SB_CM,
602 MLXSW_SP_CPU_PORT_SB_CM,
603 MLXSW_SP_CPU_PORT_SB_CM,
604 MLXSW_SP_CPU_PORT_SB_CM,
605 MLXSW_SP_CPU_PORT_SB_CM,
606 MLXSW_SP_CPU_PORT_SB_CM,
607 MLXSW_SP_CPU_PORT_SB_CM,
608 MLXSW_SP_CPU_PORT_SB_CM,
612 mlxsw_sp_sb_pool_is_static(struct mlxsw_sp *mlxsw_sp, u16 pool_index)
614 struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
616 return pr->mode == MLXSW_REG_SBPR_MODE_STATIC;
619 static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
620 enum mlxsw_reg_sbxx_dir dir,
621 const struct mlxsw_sp_sb_cm *cms,
624 const struct mlxsw_sp_sb_vals *sb_vals = mlxsw_sp->sb_vals;
628 for (i = 0; i < cms_len; i++) {
629 const struct mlxsw_sp_sb_cm *cm;
633 if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS)
634 continue; /* PG number 8 does not exist, skip it */
636 if (WARN_ON(sb_vals->pool_dess[cm->pool_index].dir != dir))
639 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, cm->min_buff);
640 max_buff = cm->max_buff;
641 if (max_buff == MLXSW_SP_SB_INFI) {
642 err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
644 true, cm->pool_index);
646 if (mlxsw_sp_sb_pool_is_static(mlxsw_sp,
648 max_buff = mlxsw_sp_bytes_cells(mlxsw_sp,
650 err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
652 false, cm->pool_index);
660 static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port)
662 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
665 err = __mlxsw_sp_sb_cms_init(mlxsw_sp,
666 mlxsw_sp_port->local_port,
667 MLXSW_REG_SBXX_DIR_INGRESS,
668 mlxsw_sp->sb_vals->cms_ingress,
669 mlxsw_sp->sb_vals->cms_ingress_count);
672 return __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
673 mlxsw_sp_port->local_port,
674 MLXSW_REG_SBXX_DIR_EGRESS,
675 mlxsw_sp->sb_vals->cms_egress,
676 mlxsw_sp->sb_vals->cms_egress_count);
679 static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
681 return __mlxsw_sp_sb_cms_init(mlxsw_sp, 0, MLXSW_REG_SBXX_DIR_EGRESS,
682 mlxsw_sp->sb_vals->cms_cpu,
683 mlxsw_sp->sb_vals->cms_cpu_count);
686 #define MLXSW_SP_SB_PM(_min_buff, _max_buff) \
688 .min_buff = _min_buff, \
689 .max_buff = _max_buff, \
692 static const struct mlxsw_sp_sb_pm mlxsw_sp1_sb_pms[] = {
694 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
695 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
696 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
697 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
699 MLXSW_SP_SB_PM(0, 7),
700 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
701 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
702 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
703 MLXSW_SP_SB_PM(10000, 90000),
706 static const struct mlxsw_sp_sb_pm mlxsw_sp2_sb_pms[] = {
708 MLXSW_SP_SB_PM(0, 7),
709 MLXSW_SP_SB_PM(0, 0),
710 MLXSW_SP_SB_PM(0, 0),
711 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
713 MLXSW_SP_SB_PM(0, 7),
714 MLXSW_SP_SB_PM(0, 0),
715 MLXSW_SP_SB_PM(0, 0),
716 MLXSW_SP_SB_PM(0, 0),
717 MLXSW_SP_SB_PM(10000, 90000),
720 static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
722 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
726 for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
727 const struct mlxsw_sp_sb_pm *pm = &mlxsw_sp->sb_vals->pms[i];
731 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, pm->min_buff);
732 max_buff = pm->max_buff;
733 if (mlxsw_sp_sb_pool_is_static(mlxsw_sp, i))
734 max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, max_buff);
735 err = mlxsw_sp_sb_pm_write(mlxsw_sp, mlxsw_sp_port->local_port,
736 i, min_buff, max_buff);
743 #define MLXSW_SP_SB_MM(_min_buff, _max_buff) \
745 .min_buff = _min_buff, \
746 .max_buff = _max_buff, \
747 .pool_index = MLXSW_SP_SB_POOL_EGR, \
750 static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
751 MLXSW_SP_SB_MM(0, 6),
752 MLXSW_SP_SB_MM(0, 6),
753 MLXSW_SP_SB_MM(0, 6),
754 MLXSW_SP_SB_MM(0, 6),
755 MLXSW_SP_SB_MM(0, 6),
756 MLXSW_SP_SB_MM(0, 6),
757 MLXSW_SP_SB_MM(0, 6),
758 MLXSW_SP_SB_MM(0, 6),
759 MLXSW_SP_SB_MM(0, 6),
760 MLXSW_SP_SB_MM(0, 6),
761 MLXSW_SP_SB_MM(0, 6),
762 MLXSW_SP_SB_MM(0, 6),
763 MLXSW_SP_SB_MM(0, 6),
764 MLXSW_SP_SB_MM(0, 6),
765 MLXSW_SP_SB_MM(0, 6),
768 static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
770 char sbmm_pl[MLXSW_REG_SBMM_LEN];
774 for (i = 0; i < mlxsw_sp->sb_vals->mms_count; i++) {
775 const struct mlxsw_sp_sb_pool_des *des;
776 const struct mlxsw_sp_sb_mm *mc;
779 mc = &mlxsw_sp->sb_vals->mms[i];
780 des = &mlxsw_sp->sb_vals->pool_dess[mc->pool_index];
781 /* All pools used by sb_mm's are initialized using dynamic
782 * thresholds, therefore 'max_buff' isn't specified in cells.
784 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, mc->min_buff);
785 mlxsw_reg_sbmm_pack(sbmm_pl, i, min_buff, mc->max_buff,
787 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
794 static void mlxsw_sp_pool_count(struct mlxsw_sp *mlxsw_sp,
795 u16 *p_ingress_len, u16 *p_egress_len)
799 for (i = 0; i < mlxsw_sp->sb_vals->pool_count; ++i)
800 if (mlxsw_sp->sb_vals->pool_dess[i].dir ==
801 MLXSW_REG_SBXX_DIR_EGRESS)
803 WARN(1, "No egress pools\n");
807 *p_egress_len = mlxsw_sp->sb_vals->pool_count - i;
810 const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals = {
811 .pool_count = ARRAY_SIZE(mlxsw_sp1_sb_pool_dess),
812 .pool_dess = mlxsw_sp1_sb_pool_dess,
813 .pms = mlxsw_sp1_sb_pms,
814 .prs = mlxsw_sp1_sb_prs,
815 .mms = mlxsw_sp_sb_mms,
816 .cms_ingress = mlxsw_sp1_sb_cms_ingress,
817 .cms_egress = mlxsw_sp1_sb_cms_egress,
818 .cms_cpu = mlxsw_sp_cpu_port_sb_cms,
819 .mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms),
820 .cms_ingress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_ingress),
821 .cms_egress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_egress),
822 .cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms),
825 const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals = {
826 .pool_count = ARRAY_SIZE(mlxsw_sp2_sb_pool_dess),
827 .pool_dess = mlxsw_sp2_sb_pool_dess,
828 .pms = mlxsw_sp2_sb_pms,
829 .prs = mlxsw_sp2_sb_prs,
830 .mms = mlxsw_sp_sb_mms,
831 .cms_ingress = mlxsw_sp2_sb_cms_ingress,
832 .cms_egress = mlxsw_sp2_sb_cms_egress,
833 .cms_cpu = mlxsw_sp_cpu_port_sb_cms,
834 .mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms),
835 .cms_ingress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_ingress),
836 .cms_egress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_egress),
837 .cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms),
840 int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
842 u32 max_headroom_size;
847 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE))
850 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_BUFFER_SIZE))
853 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_HEADROOM_SIZE))
856 mlxsw_sp->sb = kzalloc(sizeof(*mlxsw_sp->sb), GFP_KERNEL);
859 mlxsw_sp->sb->cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE);
860 mlxsw_sp->sb->sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
862 max_headroom_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
864 /* Round down, because this limit must not be overstepped. */
865 mlxsw_sp->sb->max_headroom_cells = max_headroom_size /
866 mlxsw_sp->sb->cell_size;
868 err = mlxsw_sp_sb_ports_init(mlxsw_sp);
870 goto err_sb_ports_init;
871 err = mlxsw_sp_sb_prs_init(mlxsw_sp, mlxsw_sp->sb_vals->prs,
872 mlxsw_sp->sb_vals->pool_count);
874 goto err_sb_prs_init;
875 err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
877 goto err_sb_cpu_port_sb_cms_init;
878 err = mlxsw_sp_sb_mms_init(mlxsw_sp);
880 goto err_sb_mms_init;
881 mlxsw_sp_pool_count(mlxsw_sp, &ing_pool_count, &eg_pool_count);
882 err = devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0,
883 mlxsw_sp->sb->sb_size,
886 MLXSW_SP_SB_ING_TC_COUNT,
887 MLXSW_SP_SB_EG_TC_COUNT);
889 goto err_devlink_sb_register;
893 err_devlink_sb_register:
895 err_sb_cpu_port_sb_cms_init:
897 mlxsw_sp_sb_ports_fini(mlxsw_sp);
903 void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
905 devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
906 mlxsw_sp_sb_ports_fini(mlxsw_sp);
910 int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
914 err = mlxsw_sp_port_headroom_init(mlxsw_sp_port);
917 err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port);
920 err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port);
925 int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
926 unsigned int sb_index, u16 pool_index,
927 struct devlink_sb_pool_info *pool_info)
929 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
930 enum mlxsw_reg_sbxx_dir dir;
931 struct mlxsw_sp_sb_pr *pr;
933 dir = mlxsw_sp->sb_vals->pool_dess[pool_index].dir;
934 pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
935 pool_info->pool_type = (enum devlink_sb_pool_type) dir;
936 pool_info->size = mlxsw_sp_cells_bytes(mlxsw_sp, pr->size);
937 pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode;
938 pool_info->cell_size = mlxsw_sp->sb->cell_size;
942 int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
943 unsigned int sb_index, u16 pool_index, u32 size,
944 enum devlink_sb_threshold_type threshold_type,
945 struct netlink_ext_ack *extack)
947 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
948 u32 pool_size = mlxsw_sp_bytes_cells(mlxsw_sp, size);
949 const struct mlxsw_sp_sb_pr *pr;
950 enum mlxsw_reg_sbpr_mode mode;
952 mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
953 pr = &mlxsw_sp->sb_vals->prs[pool_index];
955 if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) {
956 NL_SET_ERR_MSG_MOD(extack, "Exceeded shared buffer size");
960 if (pr->freeze_mode && pr->mode != mode) {
961 NL_SET_ERR_MSG_MOD(extack, "Changing this pool's threshold type is forbidden");
965 if (pr->freeze_size && pr->size != size) {
966 NL_SET_ERR_MSG_MOD(extack, "Changing this pool's size is forbidden");
970 return mlxsw_sp_sb_pr_write(mlxsw_sp, pool_index, mode,
974 #define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */
976 static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
979 struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
981 if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC)
982 return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
983 return mlxsw_sp_cells_bytes(mlxsw_sp, max_buff);
986 static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
987 u32 threshold, u32 *p_max_buff,
988 struct netlink_ext_ack *extack)
990 struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
992 if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) {
995 val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
996 if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN ||
997 val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX) {
998 NL_SET_ERR_MSG_MOD(extack, "Invalid dynamic threshold value");
1003 *p_max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, threshold);
1008 int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
1009 unsigned int sb_index, u16 pool_index,
1012 struct mlxsw_sp_port *mlxsw_sp_port =
1013 mlxsw_core_port_driver_priv(mlxsw_core_port);
1014 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1015 u8 local_port = mlxsw_sp_port->local_port;
1016 struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
1019 *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool_index,
1024 int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
1025 unsigned int sb_index, u16 pool_index,
1026 u32 threshold, struct netlink_ext_ack *extack)
1028 struct mlxsw_sp_port *mlxsw_sp_port =
1029 mlxsw_core_port_driver_priv(mlxsw_core_port);
1030 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1031 u8 local_port = mlxsw_sp_port->local_port;
1035 err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
1036 threshold, &max_buff, extack);
1040 return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool_index,
1044 int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
1045 unsigned int sb_index, u16 tc_index,
1046 enum devlink_sb_pool_type pool_type,
1047 u16 *p_pool_index, u32 *p_threshold)
1049 struct mlxsw_sp_port *mlxsw_sp_port =
1050 mlxsw_core_port_driver_priv(mlxsw_core_port);
1051 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1052 u8 local_port = mlxsw_sp_port->local_port;
1053 u8 pg_buff = tc_index;
1054 enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1055 struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
1058 *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool_index,
1060 *p_pool_index = cm->pool_index;
1064 int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
1065 unsigned int sb_index, u16 tc_index,
1066 enum devlink_sb_pool_type pool_type,
1067 u16 pool_index, u32 threshold,
1068 struct netlink_ext_ack *extack)
1070 struct mlxsw_sp_port *mlxsw_sp_port =
1071 mlxsw_core_port_driver_priv(mlxsw_core_port);
1072 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1073 u8 local_port = mlxsw_sp_port->local_port;
1074 const struct mlxsw_sp_sb_cm *cm;
1075 u8 pg_buff = tc_index;
1076 enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1080 if (dir != mlxsw_sp->sb_vals->pool_dess[pool_index].dir) {
1081 NL_SET_ERR_MSG_MOD(extack, "Binding egress TC to ingress pool and vice versa is forbidden");
1085 if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
1086 cm = &mlxsw_sp->sb_vals->cms_ingress[tc_index];
1088 cm = &mlxsw_sp->sb_vals->cms_egress[tc_index];
1090 if (cm->freeze_pool && cm->pool_index != pool_index) {
1091 NL_SET_ERR_MSG_MOD(extack, "Binding this TC to a different pool is forbidden");
1095 if (cm->freeze_thresh && cm->max_buff != threshold) {
1096 NL_SET_ERR_MSG_MOD(extack, "Changing this TC's threshold is forbidden");
1100 err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
1101 threshold, &max_buff, extack);
1105 return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff,
1106 0, max_buff, false, pool_index);
1109 #define MASKED_COUNT_MAX \
1110 (MLXSW_REG_SBSR_REC_MAX_COUNT / \
1111 (MLXSW_SP_SB_ING_TC_COUNT + MLXSW_SP_SB_EG_TC_COUNT))
1113 struct mlxsw_sp_sb_sr_occ_query_cb_ctx {
1118 static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
1119 char *sbsr_pl, size_t sbsr_pl_len,
1120 unsigned long cb_priv)
1122 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1123 struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
1127 struct mlxsw_sp_sb_cm *cm;
1130 memcpy(&cb_ctx, &cb_priv, sizeof(cb_ctx));
1133 for (local_port = cb_ctx.local_port_1;
1134 local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1135 if (!mlxsw_sp->ports[local_port])
1137 for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++) {
1138 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
1139 MLXSW_REG_SBXX_DIR_INGRESS);
1140 mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
1141 &cm->occ.cur, &cm->occ.max);
1143 if (++masked_count == cb_ctx.masked_count)
1147 for (local_port = cb_ctx.local_port_1;
1148 local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1149 if (!mlxsw_sp->ports[local_port])
1151 for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++) {
1152 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
1153 MLXSW_REG_SBXX_DIR_EGRESS);
1154 mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
1155 &cm->occ.cur, &cm->occ.max);
1157 if (++masked_count == cb_ctx.masked_count)
1162 int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
1163 unsigned int sb_index)
1165 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1166 struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
1167 unsigned long cb_priv;
1168 LIST_HEAD(bulk_list);
1177 sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
1183 local_port_1 = local_port;
1185 mlxsw_reg_sbsr_pack(sbsr_pl, false);
1186 for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
1187 mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
1188 for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
1189 mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
1190 for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1191 if (!mlxsw_sp->ports[local_port])
1193 mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
1194 mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
1195 for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
1196 err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
1201 if (++masked_count == MASKED_COUNT_MAX)
1206 cb_ctx.masked_count = masked_count;
1207 cb_ctx.local_port_1 = local_port_1;
1208 memcpy(&cb_priv, &cb_ctx, sizeof(cb_ctx));
1209 err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
1210 &bulk_list, mlxsw_sp_sb_sr_occ_query_cb,
1214 if (local_port < mlxsw_core_max_ports(mlxsw_core))
1218 err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
1225 int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
1226 unsigned int sb_index)
1228 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1229 LIST_HEAD(bulk_list);
1231 unsigned int masked_count;
1237 sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
1244 mlxsw_reg_sbsr_pack(sbsr_pl, true);
1245 for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
1246 mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
1247 for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
1248 mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
1249 for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1250 if (!mlxsw_sp->ports[local_port])
1252 mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
1253 mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
1254 for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
1255 err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
1260 if (++masked_count == MASKED_COUNT_MAX)
1265 err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
1266 &bulk_list, NULL, 0);
1269 if (local_port < mlxsw_core_max_ports(mlxsw_core))
1273 err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
1280 int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
1281 unsigned int sb_index, u16 pool_index,
1282 u32 *p_cur, u32 *p_max)
1284 struct mlxsw_sp_port *mlxsw_sp_port =
1285 mlxsw_core_port_driver_priv(mlxsw_core_port);
1286 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1287 u8 local_port = mlxsw_sp_port->local_port;
1288 struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
1291 *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.cur);
1292 *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.max);
1296 int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
1297 unsigned int sb_index, u16 tc_index,
1298 enum devlink_sb_pool_type pool_type,
1299 u32 *p_cur, u32 *p_max)
1301 struct mlxsw_sp_port *mlxsw_sp_port =
1302 mlxsw_core_port_driver_priv(mlxsw_core_port);
1303 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1304 u8 local_port = mlxsw_sp_port->local_port;
1305 u8 pg_buff = tc_index;
1306 enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1307 struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
1310 *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.cur);
1311 *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.max);