mlxsw: spectrum_buffers: Forbid configuration of multicast pool
[linux-2.6-microblaze.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_buffers.c
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/dcbnl.h>
7 #include <linux/if_ether.h>
8 #include <linux/list.h>
9 #include <linux/netlink.h>
10
11 #include "spectrum.h"
12 #include "core.h"
13 #include "port.h"
14 #include "reg.h"
15
16 struct mlxsw_sp_sb_pr {
17         enum mlxsw_reg_sbpr_mode mode;
18         u32 size;
19         u8 freeze_mode:1,
20            freeze_size:1;
21 };
22
23 struct mlxsw_cp_sb_occ {
24         u32 cur;
25         u32 max;
26 };
27
28 struct mlxsw_sp_sb_cm {
29         u32 min_buff;
30         u32 max_buff;
31         u16 pool_index;
32         struct mlxsw_cp_sb_occ occ;
33         u8 freeze_pool:1,
34            freeze_thresh:1;
35 };
36
37 #define MLXSW_SP_SB_INFI -1U
38
39 struct mlxsw_sp_sb_pm {
40         u32 min_buff;
41         u32 max_buff;
42         struct mlxsw_cp_sb_occ occ;
43 };
44
45 struct mlxsw_sp_sb_mm {
46         u32 min_buff;
47         u32 max_buff;
48         u16 pool_index;
49 };
50
51 struct mlxsw_sp_sb_pool_des {
52         enum mlxsw_reg_sbxx_dir dir;
53         u8 pool;
54 };
55
56 #define MLXSW_SP_SB_POOL_ING            0
57 #define MLXSW_SP_SB_POOL_ING_MNG        3
58 #define MLXSW_SP_SB_POOL_EGR            4
59 #define MLXSW_SP_SB_POOL_EGR_MC         8
60
61 /* Order ingress pools before egress pools. */
62 static const struct mlxsw_sp_sb_pool_des mlxsw_sp1_sb_pool_dess[] = {
63         {MLXSW_REG_SBXX_DIR_INGRESS, 0},
64         {MLXSW_REG_SBXX_DIR_INGRESS, 1},
65         {MLXSW_REG_SBXX_DIR_INGRESS, 2},
66         {MLXSW_REG_SBXX_DIR_INGRESS, 3},
67         {MLXSW_REG_SBXX_DIR_EGRESS, 0},
68         {MLXSW_REG_SBXX_DIR_EGRESS, 1},
69         {MLXSW_REG_SBXX_DIR_EGRESS, 2},
70         {MLXSW_REG_SBXX_DIR_EGRESS, 3},
71         {MLXSW_REG_SBXX_DIR_EGRESS, 15},
72 };
73
74 static const struct mlxsw_sp_sb_pool_des mlxsw_sp2_sb_pool_dess[] = {
75         {MLXSW_REG_SBXX_DIR_INGRESS, 0},
76         {MLXSW_REG_SBXX_DIR_INGRESS, 1},
77         {MLXSW_REG_SBXX_DIR_INGRESS, 2},
78         {MLXSW_REG_SBXX_DIR_INGRESS, 3},
79         {MLXSW_REG_SBXX_DIR_EGRESS, 0},
80         {MLXSW_REG_SBXX_DIR_EGRESS, 1},
81         {MLXSW_REG_SBXX_DIR_EGRESS, 2},
82         {MLXSW_REG_SBXX_DIR_EGRESS, 3},
83         {MLXSW_REG_SBXX_DIR_EGRESS, 15},
84 };
85
86 #define MLXSW_SP_SB_ING_TC_COUNT 8
87 #define MLXSW_SP_SB_EG_TC_COUNT 16
88
89 struct mlxsw_sp_sb_port {
90         struct mlxsw_sp_sb_cm ing_cms[MLXSW_SP_SB_ING_TC_COUNT];
91         struct mlxsw_sp_sb_cm eg_cms[MLXSW_SP_SB_EG_TC_COUNT];
92         struct mlxsw_sp_sb_pm *pms;
93 };
94
95 struct mlxsw_sp_sb {
96         struct mlxsw_sp_sb_pr *prs;
97         struct mlxsw_sp_sb_port *ports;
98         u32 cell_size;
99         u32 max_headroom_cells;
100         u64 sb_size;
101 };
102
103 struct mlxsw_sp_sb_vals {
104         unsigned int pool_count;
105         const struct mlxsw_sp_sb_pool_des *pool_dess;
106         const struct mlxsw_sp_sb_pm *pms;
107         const struct mlxsw_sp_sb_pr *prs;
108         const struct mlxsw_sp_sb_mm *mms;
109         const struct mlxsw_sp_sb_cm *cms_ingress;
110         const struct mlxsw_sp_sb_cm *cms_egress;
111         const struct mlxsw_sp_sb_cm *cms_cpu;
112         unsigned int mms_count;
113         unsigned int cms_ingress_count;
114         unsigned int cms_egress_count;
115         unsigned int cms_cpu_count;
116 };
117
118 u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells)
119 {
120         return mlxsw_sp->sb->cell_size * cells;
121 }
122
123 u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes)
124 {
125         return DIV_ROUND_UP(bytes, mlxsw_sp->sb->cell_size);
126 }
127
128 u32 mlxsw_sp_sb_max_headroom_cells(const struct mlxsw_sp *mlxsw_sp)
129 {
130         return mlxsw_sp->sb->max_headroom_cells;
131 }
132
133 static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
134                                                  u16 pool_index)
135 {
136         return &mlxsw_sp->sb->prs[pool_index];
137 }
138
139 static bool mlxsw_sp_sb_cm_exists(u8 pg_buff, enum mlxsw_reg_sbxx_dir dir)
140 {
141         if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
142                 return pg_buff < MLXSW_SP_SB_ING_TC_COUNT;
143         else
144                 return pg_buff < MLXSW_SP_SB_EG_TC_COUNT;
145 }
146
147 static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
148                                                  u8 local_port, u8 pg_buff,
149                                                  enum mlxsw_reg_sbxx_dir dir)
150 {
151         struct mlxsw_sp_sb_port *sb_port = &mlxsw_sp->sb->ports[local_port];
152
153         WARN_ON(!mlxsw_sp_sb_cm_exists(pg_buff, dir));
154         if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
155                 return &sb_port->ing_cms[pg_buff];
156         else
157                 return &sb_port->eg_cms[pg_buff];
158 }
159
160 static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
161                                                  u8 local_port, u16 pool_index)
162 {
163         return &mlxsw_sp->sb->ports[local_port].pms[pool_index];
164 }
165
166 static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
167                                 enum mlxsw_reg_sbpr_mode mode,
168                                 u32 size, bool infi_size)
169 {
170         const struct mlxsw_sp_sb_pool_des *des =
171                 &mlxsw_sp->sb_vals->pool_dess[pool_index];
172         char sbpr_pl[MLXSW_REG_SBPR_LEN];
173         struct mlxsw_sp_sb_pr *pr;
174         int err;
175
176         mlxsw_reg_sbpr_pack(sbpr_pl, des->pool, des->dir, mode,
177                             size, infi_size);
178         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
179         if (err)
180                 return err;
181
182         if (infi_size)
183                 size = mlxsw_sp_bytes_cells(mlxsw_sp, mlxsw_sp->sb->sb_size);
184         pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
185         pr->mode = mode;
186         pr->size = size;
187         return 0;
188 }
189
190 static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
191                                 u8 pg_buff, u32 min_buff, u32 max_buff,
192                                 bool infi_max, u16 pool_index)
193 {
194         const struct mlxsw_sp_sb_pool_des *des =
195                 &mlxsw_sp->sb_vals->pool_dess[pool_index];
196         char sbcm_pl[MLXSW_REG_SBCM_LEN];
197         struct mlxsw_sp_sb_cm *cm;
198         int err;
199
200         mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, des->dir,
201                             min_buff, max_buff, infi_max, des->pool);
202         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
203         if (err)
204                 return err;
205
206         if (mlxsw_sp_sb_cm_exists(pg_buff, des->dir)) {
207                 if (infi_max)
208                         max_buff = mlxsw_sp_bytes_cells(mlxsw_sp,
209                                                         mlxsw_sp->sb->sb_size);
210
211                 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff,
212                                         des->dir);
213                 cm->min_buff = min_buff;
214                 cm->max_buff = max_buff;
215                 cm->pool_index = pool_index;
216         }
217         return 0;
218 }
219
220 static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
221                                 u16 pool_index, u32 min_buff, u32 max_buff)
222 {
223         const struct mlxsw_sp_sb_pool_des *des =
224                 &mlxsw_sp->sb_vals->pool_dess[pool_index];
225         char sbpm_pl[MLXSW_REG_SBPM_LEN];
226         struct mlxsw_sp_sb_pm *pm;
227         int err;
228
229         mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir, false,
230                             min_buff, max_buff);
231         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl);
232         if (err)
233                 return err;
234
235         pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
236         pm->min_buff = min_buff;
237         pm->max_buff = max_buff;
238         return 0;
239 }
240
241 static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port,
242                                     u16 pool_index, struct list_head *bulk_list)
243 {
244         const struct mlxsw_sp_sb_pool_des *des =
245                 &mlxsw_sp->sb_vals->pool_dess[pool_index];
246         char sbpm_pl[MLXSW_REG_SBPM_LEN];
247
248         mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
249                             true, 0, 0);
250         return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
251                                      bulk_list, NULL, 0);
252 }
253
254 static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core,
255                                         char *sbpm_pl, size_t sbpm_pl_len,
256                                         unsigned long cb_priv)
257 {
258         struct mlxsw_sp_sb_pm *pm = (struct mlxsw_sp_sb_pm *) cb_priv;
259
260         mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max);
261 }
262
263 static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
264                                     u16 pool_index, struct list_head *bulk_list)
265 {
266         const struct mlxsw_sp_sb_pool_des *des =
267                 &mlxsw_sp->sb_vals->pool_dess[pool_index];
268         char sbpm_pl[MLXSW_REG_SBPM_LEN];
269         struct mlxsw_sp_sb_pm *pm;
270
271         pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
272         mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
273                             false, 0, 0);
274         return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
275                                      bulk_list,
276                                      mlxsw_sp_sb_pm_occ_query_cb,
277                                      (unsigned long) pm);
278 }
279
280 /* 1/4 of a headroom necessary for 100Gbps port and 100m cable. */
281 #define MLXSW_SP_PB_HEADROOM 25632
282 #define MLXSW_SP_PB_UNUSED 8
283
284 static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
285 {
286         const u32 pbs[] = {
287                 [0] = MLXSW_SP_PB_HEADROOM * mlxsw_sp_port->mapping.width,
288                 [9] = 2 * MLXSW_PORT_MAX_MTU,
289         };
290         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
291         char pbmc_pl[MLXSW_REG_PBMC_LEN];
292         int i;
293
294         mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port,
295                             0xffff, 0xffff / 2);
296         for (i = 0; i < ARRAY_SIZE(pbs); i++) {
297                 u16 size = mlxsw_sp_bytes_cells(mlxsw_sp, pbs[i]);
298
299                 if (i == MLXSW_SP_PB_UNUSED)
300                         continue;
301                 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, size);
302         }
303         mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
304                                          MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
305         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
306 }
307
308 static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port)
309 {
310         char pptb_pl[MLXSW_REG_PPTB_LEN];
311         int i;
312
313         mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
314         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
315                 mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, 0);
316         return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
317                                pptb_pl);
318 }
319
320 static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
321 {
322         int err;
323
324         err = mlxsw_sp_port_pb_init(mlxsw_sp_port);
325         if (err)
326                 return err;
327         return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port);
328 }
329
330 static int mlxsw_sp_sb_port_init(struct mlxsw_sp *mlxsw_sp,
331                                  struct mlxsw_sp_sb_port *sb_port)
332 {
333         struct mlxsw_sp_sb_pm *pms;
334
335         pms = kcalloc(mlxsw_sp->sb_vals->pool_count, sizeof(*pms),
336                       GFP_KERNEL);
337         if (!pms)
338                 return -ENOMEM;
339         sb_port->pms = pms;
340         return 0;
341 }
342
343 static void mlxsw_sp_sb_port_fini(struct mlxsw_sp_sb_port *sb_port)
344 {
345         kfree(sb_port->pms);
346 }
347
348 static int mlxsw_sp_sb_ports_init(struct mlxsw_sp *mlxsw_sp)
349 {
350         unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
351         struct mlxsw_sp_sb_pr *prs;
352         int i;
353         int err;
354
355         mlxsw_sp->sb->ports = kcalloc(max_ports,
356                                       sizeof(struct mlxsw_sp_sb_port),
357                                       GFP_KERNEL);
358         if (!mlxsw_sp->sb->ports)
359                 return -ENOMEM;
360
361         prs = kcalloc(mlxsw_sp->sb_vals->pool_count, sizeof(*prs),
362                       GFP_KERNEL);
363         if (!prs) {
364                 err = -ENOMEM;
365                 goto err_alloc_prs;
366         }
367         mlxsw_sp->sb->prs = prs;
368
369         for (i = 0; i < max_ports; i++) {
370                 err = mlxsw_sp_sb_port_init(mlxsw_sp, &mlxsw_sp->sb->ports[i]);
371                 if (err)
372                         goto err_sb_port_init;
373         }
374
375         return 0;
376
377 err_sb_port_init:
378         for (i--; i >= 0; i--)
379                 mlxsw_sp_sb_port_fini(&mlxsw_sp->sb->ports[i]);
380         kfree(mlxsw_sp->sb->prs);
381 err_alloc_prs:
382         kfree(mlxsw_sp->sb->ports);
383         return err;
384 }
385
386 static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp)
387 {
388         int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
389         int i;
390
391         for (i = max_ports - 1; i >= 0; i--)
392                 mlxsw_sp_sb_port_fini(&mlxsw_sp->sb->ports[i]);
393         kfree(mlxsw_sp->sb->prs);
394         kfree(mlxsw_sp->sb->ports);
395 }
396
397 #define MLXSW_SP_SB_PR(_mode, _size)    \
398         {                               \
399                 .mode = _mode,          \
400                 .size = _size,          \
401         }
402
403 #define MLXSW_SP_SB_PR_EXT(_mode, _size, _freeze_mode, _freeze_size)    \
404         {                                                               \
405                 .mode = _mode,                                          \
406                 .size = _size,                                          \
407                 .freeze_mode = _freeze_mode,                            \
408                 .freeze_size = _freeze_size,                            \
409         }
410
411 #define MLXSW_SP1_SB_PR_INGRESS_SIZE    12440000
412 #define MLXSW_SP1_SB_PR_INGRESS_MNG_SIZE (200 * 1000)
413 #define MLXSW_SP1_SB_PR_EGRESS_SIZE     13232000
414
415 static const struct mlxsw_sp_sb_pr mlxsw_sp1_sb_prs[] = {
416         /* Ingress pools. */
417         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
418                        MLXSW_SP1_SB_PR_INGRESS_SIZE),
419         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
420         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
421         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
422                        MLXSW_SP1_SB_PR_INGRESS_MNG_SIZE),
423         /* Egress pools. */
424         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
425                        MLXSW_SP1_SB_PR_EGRESS_SIZE),
426         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
427         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
428         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
429         MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI,
430                            true, true),
431 };
432
433 #define MLXSW_SP2_SB_PR_INGRESS_SIZE    40960000
434 #define MLXSW_SP2_SB_PR_INGRESS_MNG_SIZE (200 * 1000)
435 #define MLXSW_SP2_SB_PR_EGRESS_SIZE     40960000
436
437 static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = {
438         /* Ingress pools. */
439         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
440                        MLXSW_SP2_SB_PR_INGRESS_SIZE),
441         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
442         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
443         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
444                        MLXSW_SP2_SB_PR_INGRESS_MNG_SIZE),
445         /* Egress pools. */
446         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
447                        MLXSW_SP2_SB_PR_EGRESS_SIZE),
448         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
449         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
450         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
451         MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI,
452                            true, true),
453 };
454
455 static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
456                                 const struct mlxsw_sp_sb_pr *prs,
457                                 size_t prs_len)
458 {
459         int i;
460         int err;
461
462         for (i = 0; i < prs_len; i++) {
463                 u32 size = prs[i].size;
464                 u32 size_cells;
465
466                 if (size == MLXSW_SP_SB_INFI) {
467                         err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
468                                                    0, true);
469                 } else {
470                         size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size);
471                         err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
472                                                    size_cells, false);
473                 }
474                 if (err)
475                         return err;
476         }
477         return 0;
478 }
479
480 #define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool)     \
481         {                                               \
482                 .min_buff = _min_buff,                  \
483                 .max_buff = _max_buff,                  \
484                 .pool_index = _pool,                    \
485         }
486
487 #define MLXSW_SP_SB_CM_ING(_min_buff, _max_buff)        \
488         {                                               \
489                 .min_buff = _min_buff,                  \
490                 .max_buff = _max_buff,                  \
491                 .pool_index = MLXSW_SP_SB_POOL_ING,     \
492         }
493
494 #define MLXSW_SP_SB_CM_EGR(_min_buff, _max_buff)        \
495         {                                               \
496                 .min_buff = _min_buff,                  \
497                 .max_buff = _max_buff,                  \
498                 .pool_index = MLXSW_SP_SB_POOL_EGR,     \
499         }
500
501 #define MLXSW_SP_SB_CM_EGR_MC(_min_buff, _max_buff)     \
502         {                                               \
503                 .min_buff = _min_buff,                  \
504                 .max_buff = _max_buff,                  \
505                 .pool_index = MLXSW_SP_SB_POOL_EGR_MC,  \
506         }
507
508 static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_ingress[] = {
509         MLXSW_SP_SB_CM_ING(10000, 8),
510         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
511         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
512         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
513         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
514         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
515         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
516         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
517         MLXSW_SP_SB_CM_ING(0, 0), /* dummy, this PG does not exist */
518         MLXSW_SP_SB_CM(20000, 1, MLXSW_SP_SB_POOL_ING_MNG),
519 };
520
521 static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_ingress[] = {
522         MLXSW_SP_SB_CM_ING(0, 7),
523         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
524         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
525         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
526         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
527         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
528         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
529         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
530         MLXSW_SP_SB_CM_ING(0, 0), /* dummy, this PG does not exist */
531         MLXSW_SP_SB_CM(20000, 1, MLXSW_SP_SB_POOL_ING_MNG),
532 };
533
534 static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_egress[] = {
535         MLXSW_SP_SB_CM_EGR(1500, 9),
536         MLXSW_SP_SB_CM_EGR(1500, 9),
537         MLXSW_SP_SB_CM_EGR(1500, 9),
538         MLXSW_SP_SB_CM_EGR(1500, 9),
539         MLXSW_SP_SB_CM_EGR(1500, 9),
540         MLXSW_SP_SB_CM_EGR(1500, 9),
541         MLXSW_SP_SB_CM_EGR(1500, 9),
542         MLXSW_SP_SB_CM_EGR(1500, 9),
543         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
544         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
545         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
546         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
547         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
548         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
549         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
550         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
551         MLXSW_SP_SB_CM_EGR(1, 0xff),
552 };
553
554 static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_egress[] = {
555         MLXSW_SP_SB_CM_EGR(0, 7),
556         MLXSW_SP_SB_CM_EGR(0, 7),
557         MLXSW_SP_SB_CM_EGR(0, 7),
558         MLXSW_SP_SB_CM_EGR(0, 7),
559         MLXSW_SP_SB_CM_EGR(0, 7),
560         MLXSW_SP_SB_CM_EGR(0, 7),
561         MLXSW_SP_SB_CM_EGR(0, 7),
562         MLXSW_SP_SB_CM_EGR(0, 7),
563         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
564         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
565         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
566         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
567         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
568         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
569         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
570         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
571         MLXSW_SP_SB_CM_EGR(1, 0xff),
572 };
573
574 #define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, MLXSW_SP_SB_POOL_EGR)
575
576 static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
577         MLXSW_SP_CPU_PORT_SB_CM,
578         MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, MLXSW_SP_SB_POOL_EGR),
579         MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, MLXSW_SP_SB_POOL_EGR),
580         MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, MLXSW_SP_SB_POOL_EGR),
581         MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, MLXSW_SP_SB_POOL_EGR),
582         MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, MLXSW_SP_SB_POOL_EGR),
583         MLXSW_SP_CPU_PORT_SB_CM,
584         MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, MLXSW_SP_SB_POOL_EGR),
585         MLXSW_SP_CPU_PORT_SB_CM,
586         MLXSW_SP_CPU_PORT_SB_CM,
587         MLXSW_SP_CPU_PORT_SB_CM,
588         MLXSW_SP_CPU_PORT_SB_CM,
589         MLXSW_SP_CPU_PORT_SB_CM,
590         MLXSW_SP_CPU_PORT_SB_CM,
591         MLXSW_SP_CPU_PORT_SB_CM,
592         MLXSW_SP_CPU_PORT_SB_CM,
593         MLXSW_SP_CPU_PORT_SB_CM,
594         MLXSW_SP_CPU_PORT_SB_CM,
595         MLXSW_SP_CPU_PORT_SB_CM,
596         MLXSW_SP_CPU_PORT_SB_CM,
597         MLXSW_SP_CPU_PORT_SB_CM,
598         MLXSW_SP_CPU_PORT_SB_CM,
599         MLXSW_SP_CPU_PORT_SB_CM,
600         MLXSW_SP_CPU_PORT_SB_CM,
601         MLXSW_SP_CPU_PORT_SB_CM,
602         MLXSW_SP_CPU_PORT_SB_CM,
603         MLXSW_SP_CPU_PORT_SB_CM,
604         MLXSW_SP_CPU_PORT_SB_CM,
605         MLXSW_SP_CPU_PORT_SB_CM,
606         MLXSW_SP_CPU_PORT_SB_CM,
607         MLXSW_SP_CPU_PORT_SB_CM,
608         MLXSW_SP_CPU_PORT_SB_CM,
609 };
610
611 static bool
612 mlxsw_sp_sb_pool_is_static(struct mlxsw_sp *mlxsw_sp, u16 pool_index)
613 {
614         struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
615
616         return pr->mode == MLXSW_REG_SBPR_MODE_STATIC;
617 }
618
619 static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
620                                   enum mlxsw_reg_sbxx_dir dir,
621                                   const struct mlxsw_sp_sb_cm *cms,
622                                   size_t cms_len)
623 {
624         const struct mlxsw_sp_sb_vals *sb_vals = mlxsw_sp->sb_vals;
625         int i;
626         int err;
627
628         for (i = 0; i < cms_len; i++) {
629                 const struct mlxsw_sp_sb_cm *cm;
630                 u32 min_buff;
631                 u32 max_buff;
632
633                 if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS)
634                         continue; /* PG number 8 does not exist, skip it */
635                 cm = &cms[i];
636                 if (WARN_ON(sb_vals->pool_dess[cm->pool_index].dir != dir))
637                         continue;
638
639                 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, cm->min_buff);
640                 max_buff = cm->max_buff;
641                 if (max_buff == MLXSW_SP_SB_INFI) {
642                         err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
643                                                    min_buff, 0,
644                                                    true, cm->pool_index);
645                 } else {
646                         if (mlxsw_sp_sb_pool_is_static(mlxsw_sp,
647                                                        cm->pool_index))
648                                 max_buff = mlxsw_sp_bytes_cells(mlxsw_sp,
649                                                                 max_buff);
650                         err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
651                                                    min_buff, max_buff,
652                                                    false, cm->pool_index);
653                 }
654                 if (err)
655                         return err;
656         }
657         return 0;
658 }
659
660 static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port)
661 {
662         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
663         int err;
664
665         err = __mlxsw_sp_sb_cms_init(mlxsw_sp,
666                                      mlxsw_sp_port->local_port,
667                                      MLXSW_REG_SBXX_DIR_INGRESS,
668                                      mlxsw_sp->sb_vals->cms_ingress,
669                                      mlxsw_sp->sb_vals->cms_ingress_count);
670         if (err)
671                 return err;
672         return __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
673                                       mlxsw_sp_port->local_port,
674                                       MLXSW_REG_SBXX_DIR_EGRESS,
675                                       mlxsw_sp->sb_vals->cms_egress,
676                                       mlxsw_sp->sb_vals->cms_egress_count);
677 }
678
679 static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
680 {
681         return __mlxsw_sp_sb_cms_init(mlxsw_sp, 0, MLXSW_REG_SBXX_DIR_EGRESS,
682                                       mlxsw_sp->sb_vals->cms_cpu,
683                                       mlxsw_sp->sb_vals->cms_cpu_count);
684 }
685
686 #define MLXSW_SP_SB_PM(_min_buff, _max_buff)    \
687         {                                       \
688                 .min_buff = _min_buff,          \
689                 .max_buff = _max_buff,          \
690         }
691
692 static const struct mlxsw_sp_sb_pm mlxsw_sp1_sb_pms[] = {
693         /* Ingress pools. */
694         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
695         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
696         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
697         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
698         /* Egress pools. */
699         MLXSW_SP_SB_PM(0, 7),
700         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
701         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
702         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
703         MLXSW_SP_SB_PM(10000, 90000),
704 };
705
706 static const struct mlxsw_sp_sb_pm mlxsw_sp2_sb_pms[] = {
707         /* Ingress pools. */
708         MLXSW_SP_SB_PM(0, 7),
709         MLXSW_SP_SB_PM(0, 0),
710         MLXSW_SP_SB_PM(0, 0),
711         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
712         /* Egress pools. */
713         MLXSW_SP_SB_PM(0, 7),
714         MLXSW_SP_SB_PM(0, 0),
715         MLXSW_SP_SB_PM(0, 0),
716         MLXSW_SP_SB_PM(0, 0),
717         MLXSW_SP_SB_PM(10000, 90000),
718 };
719
720 static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
721 {
722         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
723         int i;
724         int err;
725
726         for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
727                 const struct mlxsw_sp_sb_pm *pm = &mlxsw_sp->sb_vals->pms[i];
728                 u32 max_buff;
729                 u32 min_buff;
730
731                 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, pm->min_buff);
732                 max_buff = pm->max_buff;
733                 if (mlxsw_sp_sb_pool_is_static(mlxsw_sp, i))
734                         max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, max_buff);
735                 err = mlxsw_sp_sb_pm_write(mlxsw_sp, mlxsw_sp_port->local_port,
736                                            i, min_buff, max_buff);
737                 if (err)
738                         return err;
739         }
740         return 0;
741 }
742
743 #define MLXSW_SP_SB_MM(_min_buff, _max_buff)            \
744         {                                               \
745                 .min_buff = _min_buff,                  \
746                 .max_buff = _max_buff,                  \
747                 .pool_index = MLXSW_SP_SB_POOL_EGR,     \
748         }
749
750 static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
751         MLXSW_SP_SB_MM(0, 6),
752         MLXSW_SP_SB_MM(0, 6),
753         MLXSW_SP_SB_MM(0, 6),
754         MLXSW_SP_SB_MM(0, 6),
755         MLXSW_SP_SB_MM(0, 6),
756         MLXSW_SP_SB_MM(0, 6),
757         MLXSW_SP_SB_MM(0, 6),
758         MLXSW_SP_SB_MM(0, 6),
759         MLXSW_SP_SB_MM(0, 6),
760         MLXSW_SP_SB_MM(0, 6),
761         MLXSW_SP_SB_MM(0, 6),
762         MLXSW_SP_SB_MM(0, 6),
763         MLXSW_SP_SB_MM(0, 6),
764         MLXSW_SP_SB_MM(0, 6),
765         MLXSW_SP_SB_MM(0, 6),
766 };
767
768 static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
769 {
770         char sbmm_pl[MLXSW_REG_SBMM_LEN];
771         int i;
772         int err;
773
774         for (i = 0; i < mlxsw_sp->sb_vals->mms_count; i++) {
775                 const struct mlxsw_sp_sb_pool_des *des;
776                 const struct mlxsw_sp_sb_mm *mc;
777                 u32 min_buff;
778
779                 mc = &mlxsw_sp->sb_vals->mms[i];
780                 des = &mlxsw_sp->sb_vals->pool_dess[mc->pool_index];
781                 /* All pools used by sb_mm's are initialized using dynamic
782                  * thresholds, therefore 'max_buff' isn't specified in cells.
783                  */
784                 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, mc->min_buff);
785                 mlxsw_reg_sbmm_pack(sbmm_pl, i, min_buff, mc->max_buff,
786                                     des->pool);
787                 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
788                 if (err)
789                         return err;
790         }
791         return 0;
792 }
793
794 static void mlxsw_sp_pool_count(struct mlxsw_sp *mlxsw_sp,
795                                 u16 *p_ingress_len, u16 *p_egress_len)
796 {
797         int i;
798
799         for (i = 0; i < mlxsw_sp->sb_vals->pool_count; ++i)
800                 if (mlxsw_sp->sb_vals->pool_dess[i].dir ==
801                     MLXSW_REG_SBXX_DIR_EGRESS)
802                         goto out;
803         WARN(1, "No egress pools\n");
804
805 out:
806         *p_ingress_len = i;
807         *p_egress_len = mlxsw_sp->sb_vals->pool_count - i;
808 }
809
810 const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals = {
811         .pool_count = ARRAY_SIZE(mlxsw_sp1_sb_pool_dess),
812         .pool_dess = mlxsw_sp1_sb_pool_dess,
813         .pms = mlxsw_sp1_sb_pms,
814         .prs = mlxsw_sp1_sb_prs,
815         .mms = mlxsw_sp_sb_mms,
816         .cms_ingress = mlxsw_sp1_sb_cms_ingress,
817         .cms_egress = mlxsw_sp1_sb_cms_egress,
818         .cms_cpu = mlxsw_sp_cpu_port_sb_cms,
819         .mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms),
820         .cms_ingress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_ingress),
821         .cms_egress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_egress),
822         .cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms),
823 };
824
825 const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals = {
826         .pool_count = ARRAY_SIZE(mlxsw_sp2_sb_pool_dess),
827         .pool_dess = mlxsw_sp2_sb_pool_dess,
828         .pms = mlxsw_sp2_sb_pms,
829         .prs = mlxsw_sp2_sb_prs,
830         .mms = mlxsw_sp_sb_mms,
831         .cms_ingress = mlxsw_sp2_sb_cms_ingress,
832         .cms_egress = mlxsw_sp2_sb_cms_egress,
833         .cms_cpu = mlxsw_sp_cpu_port_sb_cms,
834         .mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms),
835         .cms_ingress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_ingress),
836         .cms_egress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_egress),
837         .cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms),
838 };
839
840 int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
841 {
842         u32 max_headroom_size;
843         u16 ing_pool_count;
844         u16 eg_pool_count;
845         int err;
846
847         if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE))
848                 return -EIO;
849
850         if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_BUFFER_SIZE))
851                 return -EIO;
852
853         if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_HEADROOM_SIZE))
854                 return -EIO;
855
856         mlxsw_sp->sb = kzalloc(sizeof(*mlxsw_sp->sb), GFP_KERNEL);
857         if (!mlxsw_sp->sb)
858                 return -ENOMEM;
859         mlxsw_sp->sb->cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE);
860         mlxsw_sp->sb->sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
861                                                    MAX_BUFFER_SIZE);
862         max_headroom_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
863                                                MAX_HEADROOM_SIZE);
864         /* Round down, because this limit must not be overstepped. */
865         mlxsw_sp->sb->max_headroom_cells = max_headroom_size /
866                                                 mlxsw_sp->sb->cell_size;
867
868         err = mlxsw_sp_sb_ports_init(mlxsw_sp);
869         if (err)
870                 goto err_sb_ports_init;
871         err = mlxsw_sp_sb_prs_init(mlxsw_sp, mlxsw_sp->sb_vals->prs,
872                                    mlxsw_sp->sb_vals->pool_count);
873         if (err)
874                 goto err_sb_prs_init;
875         err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
876         if (err)
877                 goto err_sb_cpu_port_sb_cms_init;
878         err = mlxsw_sp_sb_mms_init(mlxsw_sp);
879         if (err)
880                 goto err_sb_mms_init;
881         mlxsw_sp_pool_count(mlxsw_sp, &ing_pool_count, &eg_pool_count);
882         err = devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0,
883                                   mlxsw_sp->sb->sb_size,
884                                   ing_pool_count,
885                                   eg_pool_count,
886                                   MLXSW_SP_SB_ING_TC_COUNT,
887                                   MLXSW_SP_SB_EG_TC_COUNT);
888         if (err)
889                 goto err_devlink_sb_register;
890
891         return 0;
892
893 err_devlink_sb_register:
894 err_sb_mms_init:
895 err_sb_cpu_port_sb_cms_init:
896 err_sb_prs_init:
897         mlxsw_sp_sb_ports_fini(mlxsw_sp);
898 err_sb_ports_init:
899         kfree(mlxsw_sp->sb);
900         return err;
901 }
902
903 void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
904 {
905         devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
906         mlxsw_sp_sb_ports_fini(mlxsw_sp);
907         kfree(mlxsw_sp->sb);
908 }
909
910 int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
911 {
912         int err;
913
914         err = mlxsw_sp_port_headroom_init(mlxsw_sp_port);
915         if (err)
916                 return err;
917         err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port);
918         if (err)
919                 return err;
920         err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port);
921
922         return err;
923 }
924
925 int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
926                          unsigned int sb_index, u16 pool_index,
927                          struct devlink_sb_pool_info *pool_info)
928 {
929         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
930         enum mlxsw_reg_sbxx_dir dir;
931         struct mlxsw_sp_sb_pr *pr;
932
933         dir = mlxsw_sp->sb_vals->pool_dess[pool_index].dir;
934         pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
935         pool_info->pool_type = (enum devlink_sb_pool_type) dir;
936         pool_info->size = mlxsw_sp_cells_bytes(mlxsw_sp, pr->size);
937         pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode;
938         pool_info->cell_size = mlxsw_sp->sb->cell_size;
939         return 0;
940 }
941
942 int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
943                          unsigned int sb_index, u16 pool_index, u32 size,
944                          enum devlink_sb_threshold_type threshold_type,
945                          struct netlink_ext_ack *extack)
946 {
947         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
948         u32 pool_size = mlxsw_sp_bytes_cells(mlxsw_sp, size);
949         const struct mlxsw_sp_sb_pr *pr;
950         enum mlxsw_reg_sbpr_mode mode;
951
952         mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
953         pr = &mlxsw_sp->sb_vals->prs[pool_index];
954
955         if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) {
956                 NL_SET_ERR_MSG_MOD(extack, "Exceeded shared buffer size");
957                 return -EINVAL;
958         }
959
960         if (pr->freeze_mode && pr->mode != mode) {
961                 NL_SET_ERR_MSG_MOD(extack, "Changing this pool's threshold type is forbidden");
962                 return -EINVAL;
963         };
964
965         if (pr->freeze_size && pr->size != size) {
966                 NL_SET_ERR_MSG_MOD(extack, "Changing this pool's size is forbidden");
967                 return -EINVAL;
968         };
969
970         return mlxsw_sp_sb_pr_write(mlxsw_sp, pool_index, mode,
971                                     pool_size, false);
972 }
973
974 #define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */
975
976 static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
977                                      u32 max_buff)
978 {
979         struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
980
981         if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC)
982                 return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
983         return mlxsw_sp_cells_bytes(mlxsw_sp, max_buff);
984 }
985
986 static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
987                                     u32 threshold, u32 *p_max_buff,
988                                     struct netlink_ext_ack *extack)
989 {
990         struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
991
992         if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) {
993                 int val;
994
995                 val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
996                 if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN ||
997                     val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX) {
998                         NL_SET_ERR_MSG_MOD(extack, "Invalid dynamic threshold value");
999                         return -EINVAL;
1000                 }
1001                 *p_max_buff = val;
1002         } else {
1003                 *p_max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, threshold);
1004         }
1005         return 0;
1006 }
1007
1008 int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
1009                               unsigned int sb_index, u16 pool_index,
1010                               u32 *p_threshold)
1011 {
1012         struct mlxsw_sp_port *mlxsw_sp_port =
1013                         mlxsw_core_port_driver_priv(mlxsw_core_port);
1014         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1015         u8 local_port = mlxsw_sp_port->local_port;
1016         struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
1017                                                        pool_index);
1018
1019         *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool_index,
1020                                                  pm->max_buff);
1021         return 0;
1022 }
1023
1024 int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
1025                               unsigned int sb_index, u16 pool_index,
1026                               u32 threshold, struct netlink_ext_ack *extack)
1027 {
1028         struct mlxsw_sp_port *mlxsw_sp_port =
1029                         mlxsw_core_port_driver_priv(mlxsw_core_port);
1030         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1031         u8 local_port = mlxsw_sp_port->local_port;
1032         u32 max_buff;
1033         int err;
1034
1035         err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
1036                                        threshold, &max_buff, extack);
1037         if (err)
1038                 return err;
1039
1040         return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool_index,
1041                                     0, max_buff);
1042 }
1043
1044 int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
1045                                  unsigned int sb_index, u16 tc_index,
1046                                  enum devlink_sb_pool_type pool_type,
1047                                  u16 *p_pool_index, u32 *p_threshold)
1048 {
1049         struct mlxsw_sp_port *mlxsw_sp_port =
1050                         mlxsw_core_port_driver_priv(mlxsw_core_port);
1051         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1052         u8 local_port = mlxsw_sp_port->local_port;
1053         u8 pg_buff = tc_index;
1054         enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1055         struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
1056                                                        pg_buff, dir);
1057
1058         *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool_index,
1059                                                  cm->max_buff);
1060         *p_pool_index = cm->pool_index;
1061         return 0;
1062 }
1063
1064 int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
1065                                  unsigned int sb_index, u16 tc_index,
1066                                  enum devlink_sb_pool_type pool_type,
1067                                  u16 pool_index, u32 threshold,
1068                                  struct netlink_ext_ack *extack)
1069 {
1070         struct mlxsw_sp_port *mlxsw_sp_port =
1071                         mlxsw_core_port_driver_priv(mlxsw_core_port);
1072         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1073         u8 local_port = mlxsw_sp_port->local_port;
1074         const struct mlxsw_sp_sb_cm *cm;
1075         u8 pg_buff = tc_index;
1076         enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1077         u32 max_buff;
1078         int err;
1079
1080         if (dir != mlxsw_sp->sb_vals->pool_dess[pool_index].dir) {
1081                 NL_SET_ERR_MSG_MOD(extack, "Binding egress TC to ingress pool and vice versa is forbidden");
1082                 return -EINVAL;
1083         }
1084
1085         if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
1086                 cm = &mlxsw_sp->sb_vals->cms_ingress[tc_index];
1087         else
1088                 cm = &mlxsw_sp->sb_vals->cms_egress[tc_index];
1089
1090         if (cm->freeze_pool && cm->pool_index != pool_index) {
1091                 NL_SET_ERR_MSG_MOD(extack, "Binding this TC to a different pool is forbidden");
1092                 return -EINVAL;
1093         }
1094
1095         if (cm->freeze_thresh && cm->max_buff != threshold) {
1096                 NL_SET_ERR_MSG_MOD(extack, "Changing this TC's threshold is forbidden");
1097                 return -EINVAL;
1098         }
1099
1100         err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
1101                                        threshold, &max_buff, extack);
1102         if (err)
1103                 return err;
1104
1105         return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff,
1106                                     0, max_buff, false, pool_index);
1107 }
1108
1109 #define MASKED_COUNT_MAX \
1110         (MLXSW_REG_SBSR_REC_MAX_COUNT / \
1111          (MLXSW_SP_SB_ING_TC_COUNT + MLXSW_SP_SB_EG_TC_COUNT))
1112
1113 struct mlxsw_sp_sb_sr_occ_query_cb_ctx {
1114         u8 masked_count;
1115         u8 local_port_1;
1116 };
1117
1118 static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
1119                                         char *sbsr_pl, size_t sbsr_pl_len,
1120                                         unsigned long cb_priv)
1121 {
1122         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1123         struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
1124         u8 masked_count;
1125         u8 local_port;
1126         int rec_index = 0;
1127         struct mlxsw_sp_sb_cm *cm;
1128         int i;
1129
1130         memcpy(&cb_ctx, &cb_priv, sizeof(cb_ctx));
1131
1132         masked_count = 0;
1133         for (local_port = cb_ctx.local_port_1;
1134              local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1135                 if (!mlxsw_sp->ports[local_port])
1136                         continue;
1137                 for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++) {
1138                         cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
1139                                                 MLXSW_REG_SBXX_DIR_INGRESS);
1140                         mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
1141                                                   &cm->occ.cur, &cm->occ.max);
1142                 }
1143                 if (++masked_count == cb_ctx.masked_count)
1144                         break;
1145         }
1146         masked_count = 0;
1147         for (local_port = cb_ctx.local_port_1;
1148              local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1149                 if (!mlxsw_sp->ports[local_port])
1150                         continue;
1151                 for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++) {
1152                         cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
1153                                                 MLXSW_REG_SBXX_DIR_EGRESS);
1154                         mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
1155                                                   &cm->occ.cur, &cm->occ.max);
1156                 }
1157                 if (++masked_count == cb_ctx.masked_count)
1158                         break;
1159         }
1160 }
1161
1162 int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
1163                              unsigned int sb_index)
1164 {
1165         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1166         struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
1167         unsigned long cb_priv;
1168         LIST_HEAD(bulk_list);
1169         char *sbsr_pl;
1170         u8 masked_count;
1171         u8 local_port_1;
1172         u8 local_port = 0;
1173         int i;
1174         int err;
1175         int err2;
1176
1177         sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
1178         if (!sbsr_pl)
1179                 return -ENOMEM;
1180
1181 next_batch:
1182         local_port++;
1183         local_port_1 = local_port;
1184         masked_count = 0;
1185         mlxsw_reg_sbsr_pack(sbsr_pl, false);
1186         for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
1187                 mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
1188         for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
1189                 mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
1190         for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1191                 if (!mlxsw_sp->ports[local_port])
1192                         continue;
1193                 mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
1194                 mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
1195                 for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
1196                         err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
1197                                                        &bulk_list);
1198                         if (err)
1199                                 goto out;
1200                 }
1201                 if (++masked_count == MASKED_COUNT_MAX)
1202                         goto do_query;
1203         }
1204
1205 do_query:
1206         cb_ctx.masked_count = masked_count;
1207         cb_ctx.local_port_1 = local_port_1;
1208         memcpy(&cb_priv, &cb_ctx, sizeof(cb_ctx));
1209         err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
1210                                     &bulk_list, mlxsw_sp_sb_sr_occ_query_cb,
1211                                     cb_priv);
1212         if (err)
1213                 goto out;
1214         if (local_port < mlxsw_core_max_ports(mlxsw_core))
1215                 goto next_batch;
1216
1217 out:
1218         err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
1219         if (!err)
1220                 err = err2;
1221         kfree(sbsr_pl);
1222         return err;
1223 }
1224
1225 int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
1226                               unsigned int sb_index)
1227 {
1228         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1229         LIST_HEAD(bulk_list);
1230         char *sbsr_pl;
1231         unsigned int masked_count;
1232         u8 local_port = 0;
1233         int i;
1234         int err;
1235         int err2;
1236
1237         sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
1238         if (!sbsr_pl)
1239                 return -ENOMEM;
1240
1241 next_batch:
1242         local_port++;
1243         masked_count = 0;
1244         mlxsw_reg_sbsr_pack(sbsr_pl, true);
1245         for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
1246                 mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
1247         for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
1248                 mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
1249         for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1250                 if (!mlxsw_sp->ports[local_port])
1251                         continue;
1252                 mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
1253                 mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
1254                 for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
1255                         err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
1256                                                        &bulk_list);
1257                         if (err)
1258                                 goto out;
1259                 }
1260                 if (++masked_count == MASKED_COUNT_MAX)
1261                         goto do_query;
1262         }
1263
1264 do_query:
1265         err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
1266                                     &bulk_list, NULL, 0);
1267         if (err)
1268                 goto out;
1269         if (local_port < mlxsw_core_max_ports(mlxsw_core))
1270                 goto next_batch;
1271
1272 out:
1273         err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
1274         if (!err)
1275                 err = err2;
1276         kfree(sbsr_pl);
1277         return err;
1278 }
1279
1280 int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
1281                                   unsigned int sb_index, u16 pool_index,
1282                                   u32 *p_cur, u32 *p_max)
1283 {
1284         struct mlxsw_sp_port *mlxsw_sp_port =
1285                         mlxsw_core_port_driver_priv(mlxsw_core_port);
1286         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1287         u8 local_port = mlxsw_sp_port->local_port;
1288         struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
1289                                                        pool_index);
1290
1291         *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.cur);
1292         *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.max);
1293         return 0;
1294 }
1295
1296 int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
1297                                      unsigned int sb_index, u16 tc_index,
1298                                      enum devlink_sb_pool_type pool_type,
1299                                      u32 *p_cur, u32 *p_max)
1300 {
1301         struct mlxsw_sp_port *mlxsw_sp_port =
1302                         mlxsw_core_port_driver_priv(mlxsw_core_port);
1303         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1304         u8 local_port = mlxsw_sp_port->local_port;
1305         u8 pg_buff = tc_index;
1306         enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1307         struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
1308                                                        pg_buff, dir);
1309
1310         *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.cur);
1311         *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.max);
1312         return 0;
1313 }