Merge branch 'address-masking'
[linux-2.6-microblaze.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_buffers.c
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/dcbnl.h>
7 #include <linux/if_ether.h>
8 #include <linux/list.h>
9 #include <linux/netlink.h>
10
11 #include "spectrum.h"
12 #include "core.h"
13 #include "port.h"
14 #include "reg.h"
15
16 struct mlxsw_sp_sb_pr {
17         enum mlxsw_reg_sbpr_mode mode;
18         u32 size;
19         u8 freeze_mode:1,
20            freeze_size:1;
21 };
22
23 struct mlxsw_cp_sb_occ {
24         u32 cur;
25         u32 max;
26 };
27
28 struct mlxsw_sp_sb_cm {
29         u32 min_buff;
30         u32 max_buff;
31         u16 pool_index;
32         struct mlxsw_cp_sb_occ occ;
33         u8 freeze_pool:1,
34            freeze_thresh:1;
35 };
36
37 #define MLXSW_SP_SB_INFI -1U
38 #define MLXSW_SP_SB_REST -2U
39
40 struct mlxsw_sp_sb_pm {
41         u32 min_buff;
42         u32 max_buff;
43         struct mlxsw_cp_sb_occ occ;
44 };
45
46 struct mlxsw_sp_sb_mm {
47         u32 min_buff;
48         u32 max_buff;
49         u16 pool_index;
50 };
51
52 struct mlxsw_sp_sb_pool_des {
53         enum mlxsw_reg_sbxx_dir dir;
54         u8 pool;
55 };
56
57 #define MLXSW_SP_SB_POOL_ING            0
58 #define MLXSW_SP_SB_POOL_EGR            4
59 #define MLXSW_SP_SB_POOL_EGR_MC         8
60 #define MLXSW_SP_SB_POOL_ING_CPU        9
61 #define MLXSW_SP_SB_POOL_EGR_CPU        10
62
63 static const struct mlxsw_sp_sb_pool_des mlxsw_sp1_sb_pool_dess[] = {
64         {MLXSW_REG_SBXX_DIR_INGRESS, 0},
65         {MLXSW_REG_SBXX_DIR_INGRESS, 1},
66         {MLXSW_REG_SBXX_DIR_INGRESS, 2},
67         {MLXSW_REG_SBXX_DIR_INGRESS, 3},
68         {MLXSW_REG_SBXX_DIR_EGRESS, 0},
69         {MLXSW_REG_SBXX_DIR_EGRESS, 1},
70         {MLXSW_REG_SBXX_DIR_EGRESS, 2},
71         {MLXSW_REG_SBXX_DIR_EGRESS, 3},
72         {MLXSW_REG_SBXX_DIR_EGRESS, 15},
73         {MLXSW_REG_SBXX_DIR_INGRESS, 4},
74         {MLXSW_REG_SBXX_DIR_EGRESS, 4},
75 };
76
77 static const struct mlxsw_sp_sb_pool_des mlxsw_sp2_sb_pool_dess[] = {
78         {MLXSW_REG_SBXX_DIR_INGRESS, 0},
79         {MLXSW_REG_SBXX_DIR_INGRESS, 1},
80         {MLXSW_REG_SBXX_DIR_INGRESS, 2},
81         {MLXSW_REG_SBXX_DIR_INGRESS, 3},
82         {MLXSW_REG_SBXX_DIR_EGRESS, 0},
83         {MLXSW_REG_SBXX_DIR_EGRESS, 1},
84         {MLXSW_REG_SBXX_DIR_EGRESS, 2},
85         {MLXSW_REG_SBXX_DIR_EGRESS, 3},
86         {MLXSW_REG_SBXX_DIR_EGRESS, 15},
87         {MLXSW_REG_SBXX_DIR_INGRESS, 4},
88         {MLXSW_REG_SBXX_DIR_EGRESS, 4},
89 };
90
91 #define MLXSW_SP_SB_ING_TC_COUNT 8
92 #define MLXSW_SP_SB_EG_TC_COUNT 16
93
94 struct mlxsw_sp_sb_port {
95         struct mlxsw_sp_sb_cm ing_cms[MLXSW_SP_SB_ING_TC_COUNT];
96         struct mlxsw_sp_sb_cm eg_cms[MLXSW_SP_SB_EG_TC_COUNT];
97         struct mlxsw_sp_sb_pm *pms;
98 };
99
100 struct mlxsw_sp_sb {
101         struct mlxsw_sp_sb_pr *prs;
102         struct mlxsw_sp_sb_port *ports;
103         u32 cell_size;
104         u32 max_headroom_cells;
105         u64 sb_size;
106 };
107
108 struct mlxsw_sp_sb_vals {
109         unsigned int pool_count;
110         const struct mlxsw_sp_sb_pool_des *pool_dess;
111         const struct mlxsw_sp_sb_pm *pms;
112         const struct mlxsw_sp_sb_pm *pms_cpu;
113         const struct mlxsw_sp_sb_pr *prs;
114         const struct mlxsw_sp_sb_mm *mms;
115         const struct mlxsw_sp_sb_cm *cms_ingress;
116         const struct mlxsw_sp_sb_cm *cms_egress;
117         const struct mlxsw_sp_sb_cm *cms_cpu;
118         unsigned int mms_count;
119         unsigned int cms_ingress_count;
120         unsigned int cms_egress_count;
121         unsigned int cms_cpu_count;
122 };
123
124 struct mlxsw_sp_sb_ops {
125         u32 (*int_buf_size_get)(int mtu, u32 speed);
126 };
127
128 u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells)
129 {
130         return mlxsw_sp->sb->cell_size * cells;
131 }
132
133 u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes)
134 {
135         return DIV_ROUND_UP(bytes, mlxsw_sp->sb->cell_size);
136 }
137
138 static u32 mlxsw_sp_port_headroom_8x_adjust(const struct mlxsw_sp_port *mlxsw_sp_port,
139                                             u32 size_cells)
140 {
141         /* Ports with eight lanes use two headroom buffers between which the
142          * configured headroom size is split. Therefore, multiply the calculated
143          * headroom size by two.
144          */
145         return mlxsw_sp_port->mapping.width == 8 ? 2 * size_cells : size_cells;
146 }
147
148 static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
149                                                  u16 pool_index)
150 {
151         return &mlxsw_sp->sb->prs[pool_index];
152 }
153
154 static bool mlxsw_sp_sb_cm_exists(u8 pg_buff, enum mlxsw_reg_sbxx_dir dir)
155 {
156         if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
157                 return pg_buff < MLXSW_SP_SB_ING_TC_COUNT;
158         else
159                 return pg_buff < MLXSW_SP_SB_EG_TC_COUNT;
160 }
161
162 static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
163                                                  u16 local_port, u8 pg_buff,
164                                                  enum mlxsw_reg_sbxx_dir dir)
165 {
166         struct mlxsw_sp_sb_port *sb_port = &mlxsw_sp->sb->ports[local_port];
167
168         WARN_ON(!mlxsw_sp_sb_cm_exists(pg_buff, dir));
169         if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
170                 return &sb_port->ing_cms[pg_buff];
171         else
172                 return &sb_port->eg_cms[pg_buff];
173 }
174
175 static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
176                                                  u16 local_port, u16 pool_index)
177 {
178         return &mlxsw_sp->sb->ports[local_port].pms[pool_index];
179 }
180
181 static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
182                                 enum mlxsw_reg_sbpr_mode mode,
183                                 u32 size, bool infi_size)
184 {
185         const struct mlxsw_sp_sb_pool_des *des =
186                 &mlxsw_sp->sb_vals->pool_dess[pool_index];
187         char sbpr_pl[MLXSW_REG_SBPR_LEN];
188         struct mlxsw_sp_sb_pr *pr;
189         int err;
190
191         mlxsw_reg_sbpr_pack(sbpr_pl, des->pool, des->dir, mode,
192                             size, infi_size);
193         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
194         if (err)
195                 return err;
196
197         if (infi_size)
198                 size = mlxsw_sp_bytes_cells(mlxsw_sp, mlxsw_sp->sb->sb_size);
199         pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
200         pr->mode = mode;
201         pr->size = size;
202         return 0;
203 }
204
205 static int mlxsw_sp_sb_pr_desc_write(struct mlxsw_sp *mlxsw_sp,
206                                      enum mlxsw_reg_sbxx_dir dir,
207                                      enum mlxsw_reg_sbpr_mode mode,
208                                      u32 size, bool infi_size)
209 {
210         char sbpr_pl[MLXSW_REG_SBPR_LEN];
211
212         /* The FW default descriptor buffer configuration uses only pool 14 for
213          * descriptors.
214          */
215         mlxsw_reg_sbpr_pack(sbpr_pl, 14, dir, mode, size, infi_size);
216         mlxsw_reg_sbpr_desc_set(sbpr_pl, true);
217         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
218 }
219
220 static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u16 local_port,
221                                 u8 pg_buff, u32 min_buff, u32 max_buff,
222                                 bool infi_max, u16 pool_index)
223 {
224         const struct mlxsw_sp_sb_pool_des *des =
225                 &mlxsw_sp->sb_vals->pool_dess[pool_index];
226         char sbcm_pl[MLXSW_REG_SBCM_LEN];
227         struct mlxsw_sp_sb_cm *cm;
228         int err;
229
230         mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, des->dir,
231                             min_buff, max_buff, infi_max, des->pool);
232         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
233         if (err)
234                 return err;
235
236         if (mlxsw_sp_sb_cm_exists(pg_buff, des->dir)) {
237                 if (infi_max)
238                         max_buff = mlxsw_sp_bytes_cells(mlxsw_sp,
239                                                         mlxsw_sp->sb->sb_size);
240
241                 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff,
242                                         des->dir);
243                 cm->min_buff = min_buff;
244                 cm->max_buff = max_buff;
245                 cm->pool_index = pool_index;
246         }
247         return 0;
248 }
249
250 static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u16 local_port,
251                                 u16 pool_index, u32 min_buff, u32 max_buff)
252 {
253         const struct mlxsw_sp_sb_pool_des *des =
254                 &mlxsw_sp->sb_vals->pool_dess[pool_index];
255         char sbpm_pl[MLXSW_REG_SBPM_LEN];
256         struct mlxsw_sp_sb_pm *pm;
257         int err;
258
259         mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir, false,
260                             min_buff, max_buff);
261         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl);
262         if (err)
263                 return err;
264
265         pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
266         pm->min_buff = min_buff;
267         pm->max_buff = max_buff;
268         return 0;
269 }
270
271 static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u16 local_port,
272                                     u16 pool_index, struct list_head *bulk_list)
273 {
274         const struct mlxsw_sp_sb_pool_des *des =
275                 &mlxsw_sp->sb_vals->pool_dess[pool_index];
276         char sbpm_pl[MLXSW_REG_SBPM_LEN];
277
278         if (local_port == MLXSW_PORT_CPU_PORT &&
279             des->dir == MLXSW_REG_SBXX_DIR_INGRESS)
280                 return 0;
281
282         mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
283                             true, 0, 0);
284         return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
285                                      bulk_list, NULL, 0);
286 }
287
288 static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core,
289                                         char *sbpm_pl, size_t sbpm_pl_len,
290                                         unsigned long cb_priv)
291 {
292         struct mlxsw_sp_sb_pm *pm = (struct mlxsw_sp_sb_pm *) cb_priv;
293
294         mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max);
295 }
296
297 static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u16 local_port,
298                                     u16 pool_index, struct list_head *bulk_list)
299 {
300         const struct mlxsw_sp_sb_pool_des *des =
301                 &mlxsw_sp->sb_vals->pool_dess[pool_index];
302         char sbpm_pl[MLXSW_REG_SBPM_LEN];
303         struct mlxsw_sp_sb_pm *pm;
304
305         if (local_port == MLXSW_PORT_CPU_PORT &&
306             des->dir == MLXSW_REG_SBXX_DIR_INGRESS)
307                 return 0;
308
309         pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
310         mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
311                             false, 0, 0);
312         return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
313                                      bulk_list,
314                                      mlxsw_sp_sb_pm_occ_query_cb,
315                                      (unsigned long) pm);
316 }
317
318 void mlxsw_sp_hdroom_prios_reset_buf_idx(struct mlxsw_sp_hdroom *hdroom)
319 {
320         int prio;
321
322         for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
323                 switch (hdroom->mode) {
324                 case MLXSW_SP_HDROOM_MODE_DCB:
325                         hdroom->prios.prio[prio].buf_idx = hdroom->prios.prio[prio].ets_buf_idx;
326                         break;
327                 case MLXSW_SP_HDROOM_MODE_TC:
328                         hdroom->prios.prio[prio].buf_idx = hdroom->prios.prio[prio].set_buf_idx;
329                         break;
330                 }
331         }
332 }
333
334 void mlxsw_sp_hdroom_bufs_reset_lossiness(struct mlxsw_sp_hdroom *hdroom)
335 {
336         int prio;
337         int i;
338
339         for (i = 0; i < DCBX_MAX_BUFFERS; i++)
340                 hdroom->bufs.buf[i].lossy = true;
341
342         for (prio = 0; prio < IEEE_8021Q_MAX_PRIORITIES; prio++) {
343                 if (!hdroom->prios.prio[prio].lossy)
344                         hdroom->bufs.buf[hdroom->prios.prio[prio].buf_idx].lossy = false;
345         }
346 }
347
348 static u16 mlxsw_sp_hdroom_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, int mtu)
349 {
350         return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu);
351 }
352
353 static void mlxsw_sp_hdroom_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, bool lossy)
354 {
355         if (lossy)
356                 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size);
357         else
358                 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size,
359                                                     thres);
360 }
361
362 static u16 mlxsw_sp_hdroom_buf_delay_get(const struct mlxsw_sp *mlxsw_sp,
363                                          const struct mlxsw_sp_hdroom *hdroom)
364 {
365         u16 delay_cells;
366
367         delay_cells = mlxsw_sp_bytes_cells(mlxsw_sp, hdroom->delay_bytes);
368
369         /* In the worst case scenario the delay will be made up of packets that
370          * are all of size CELL_SIZE + 1, which means each packet will require
371          * almost twice its true size when buffered in the switch. We therefore
372          * multiply this value by the "cell factor", which is close to 2.
373          *
374          * Another MTU is added in case the transmitting host already started
375          * transmitting a maximum length frame when the PFC packet was received.
376          */
377         return 2 * delay_cells + mlxsw_sp_bytes_cells(mlxsw_sp, hdroom->mtu);
378 }
379
380 static u32 mlxsw_sp_hdroom_int_buf_size_get(struct mlxsw_sp *mlxsw_sp, int mtu, u32 speed)
381 {
382         u32 buffsize = mlxsw_sp->sb_ops->int_buf_size_get(mtu, speed);
383
384         return mlxsw_sp_bytes_cells(mlxsw_sp, buffsize) + 1;
385 }
386
387 static bool mlxsw_sp_hdroom_buf_is_used(const struct mlxsw_sp_hdroom *hdroom, int buf)
388 {
389         int prio;
390
391         for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
392                 if (hdroom->prios.prio[prio].buf_idx == buf)
393                         return true;
394         }
395         return false;
396 }
397
398 void mlxsw_sp_hdroom_bufs_reset_sizes(struct mlxsw_sp_port *mlxsw_sp_port,
399                                       struct mlxsw_sp_hdroom *hdroom)
400 {
401         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
402         unsigned int max_mtu = mlxsw_sp_port->dev->max_mtu;
403         u16 reserve_cells;
404         int i;
405
406         max_mtu += MLXSW_PORT_ETH_FRAME_HDR;
407         /* Internal buffer. */
408         reserve_cells = mlxsw_sp_hdroom_int_buf_size_get(mlxsw_sp, max_mtu,
409                                                          mlxsw_sp_port->max_speed);
410         reserve_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, reserve_cells);
411         hdroom->int_buf.reserve_cells = reserve_cells;
412
413         if (hdroom->int_buf.enable)
414                 hdroom->int_buf.size_cells = reserve_cells;
415         else
416                 hdroom->int_buf.size_cells = 0;
417
418         /* PG buffers. */
419         for (i = 0; i < DCBX_MAX_BUFFERS; i++) {
420                 struct mlxsw_sp_hdroom_buf *buf = &hdroom->bufs.buf[i];
421                 u16 thres_cells;
422                 u16 delay_cells;
423
424                 if (!mlxsw_sp_hdroom_buf_is_used(hdroom, i)) {
425                         thres_cells = 0;
426                         delay_cells = 0;
427                 } else if (buf->lossy) {
428                         thres_cells = mlxsw_sp_hdroom_buf_threshold_get(mlxsw_sp, hdroom->mtu);
429                         delay_cells = 0;
430                 } else {
431                         thres_cells = mlxsw_sp_hdroom_buf_threshold_get(mlxsw_sp, hdroom->mtu);
432                         delay_cells = mlxsw_sp_hdroom_buf_delay_get(mlxsw_sp, hdroom);
433                 }
434
435                 thres_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, thres_cells);
436                 delay_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, delay_cells);
437
438                 buf->thres_cells = thres_cells;
439                 if (hdroom->mode == MLXSW_SP_HDROOM_MODE_DCB) {
440                         buf->size_cells = thres_cells + delay_cells;
441                 } else {
442                         /* Do not allow going below the minimum size, even if
443                          * the user requested it.
444                          */
445                         buf->size_cells = max(buf->set_size_cells, buf->thres_cells);
446                 }
447         }
448 }
449
450 #define MLXSW_SP_PB_UNUSED 8
451
452 static int mlxsw_sp_hdroom_configure_buffers(struct mlxsw_sp_port *mlxsw_sp_port,
453                                              const struct mlxsw_sp_hdroom *hdroom, bool force)
454 {
455         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
456         char pbmc_pl[MLXSW_REG_PBMC_LEN];
457         bool dirty;
458         int err;
459         int i;
460
461         dirty = memcmp(&mlxsw_sp_port->hdroom->bufs, &hdroom->bufs, sizeof(hdroom->bufs));
462         if (!dirty && !force)
463                 return 0;
464
465         mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0xffff, 0xffff / 2);
466         for (i = 0; i < MLXSW_SP_PB_COUNT; i++) {
467                 const struct mlxsw_sp_hdroom_buf *buf = &hdroom->bufs.buf[i];
468
469                 if (i == MLXSW_SP_PB_UNUSED)
470                         continue;
471
472                 mlxsw_sp_hdroom_buf_pack(pbmc_pl, i, buf->size_cells, buf->thres_cells, buf->lossy);
473         }
474
475         mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
476         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
477         if (err)
478                 return err;
479
480         mlxsw_sp_port->hdroom->bufs = hdroom->bufs;
481         return 0;
482 }
483
484 static int mlxsw_sp_hdroom_configure_priomap(struct mlxsw_sp_port *mlxsw_sp_port,
485                                              const struct mlxsw_sp_hdroom *hdroom, bool force)
486 {
487         char pptb_pl[MLXSW_REG_PPTB_LEN];
488         bool dirty;
489         int prio;
490         int err;
491
492         dirty = memcmp(&mlxsw_sp_port->hdroom->prios, &hdroom->prios, sizeof(hdroom->prios));
493         if (!dirty && !force)
494                 return 0;
495
496         mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
497         for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
498                 mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, prio, hdroom->prios.prio[prio].buf_idx);
499
500         err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb), pptb_pl);
501         if (err)
502                 return err;
503
504         mlxsw_sp_port->hdroom->prios = hdroom->prios;
505         return 0;
506 }
507
508 static int mlxsw_sp_hdroom_configure_int_buf(struct mlxsw_sp_port *mlxsw_sp_port,
509                                              const struct mlxsw_sp_hdroom *hdroom, bool force)
510 {
511         char sbib_pl[MLXSW_REG_SBIB_LEN];
512         bool dirty;
513         int err;
514
515         dirty = memcmp(&mlxsw_sp_port->hdroom->int_buf, &hdroom->int_buf, sizeof(hdroom->int_buf));
516         if (!dirty && !force)
517                 return 0;
518
519         mlxsw_reg_sbib_pack(sbib_pl, mlxsw_sp_port->local_port, hdroom->int_buf.size_cells);
520         err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
521         if (err)
522                 return err;
523
524         mlxsw_sp_port->hdroom->int_buf = hdroom->int_buf;
525         return 0;
526 }
527
528 static bool mlxsw_sp_hdroom_bufs_fit(struct mlxsw_sp *mlxsw_sp,
529                                      const struct mlxsw_sp_hdroom *hdroom)
530 {
531         u32 taken_headroom_cells = 0;
532         int i;
533
534         for (i = 0; i < MLXSW_SP_PB_COUNT; i++)
535                 taken_headroom_cells += hdroom->bufs.buf[i].size_cells;
536
537         taken_headroom_cells += hdroom->int_buf.reserve_cells;
538         return taken_headroom_cells <= mlxsw_sp->sb->max_headroom_cells;
539 }
540
541 static int __mlxsw_sp_hdroom_configure(struct mlxsw_sp_port *mlxsw_sp_port,
542                                        const struct mlxsw_sp_hdroom *hdroom, bool force)
543 {
544         struct mlxsw_sp_hdroom orig_hdroom;
545         struct mlxsw_sp_hdroom tmp_hdroom;
546         int err;
547         int i;
548
549         /* Port buffers need to be configured in three steps. First, all buffers
550          * with non-zero size are configured. Then, prio-to-buffer map is
551          * updated, allowing traffic to flow to the now non-zero buffers.
552          * Finally, zero-sized buffers are configured, because now no traffic
553          * should be directed to them anymore. This way, in a non-congested
554          * system, no packet drops are introduced by the reconfiguration.
555          */
556
557         orig_hdroom = *mlxsw_sp_port->hdroom;
558         tmp_hdroom = orig_hdroom;
559         for (i = 0; i < MLXSW_SP_PB_COUNT; i++) {
560                 if (hdroom->bufs.buf[i].size_cells)
561                         tmp_hdroom.bufs.buf[i] = hdroom->bufs.buf[i];
562         }
563
564         if (!mlxsw_sp_hdroom_bufs_fit(mlxsw_sp_port->mlxsw_sp, &tmp_hdroom) ||
565             !mlxsw_sp_hdroom_bufs_fit(mlxsw_sp_port->mlxsw_sp, hdroom))
566                 return -ENOBUFS;
567
568         err = mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, &tmp_hdroom, force);
569         if (err)
570                 return err;
571
572         err = mlxsw_sp_hdroom_configure_priomap(mlxsw_sp_port, hdroom, force);
573         if (err)
574                 goto err_configure_priomap;
575
576         err = mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, hdroom, false);
577         if (err)
578                 goto err_configure_buffers;
579
580         err = mlxsw_sp_hdroom_configure_int_buf(mlxsw_sp_port, hdroom, false);
581         if (err)
582                 goto err_configure_int_buf;
583
584         *mlxsw_sp_port->hdroom = *hdroom;
585         return 0;
586
587 err_configure_int_buf:
588         mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, &tmp_hdroom, false);
589 err_configure_buffers:
590         mlxsw_sp_hdroom_configure_priomap(mlxsw_sp_port, &tmp_hdroom, false);
591 err_configure_priomap:
592         mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, &orig_hdroom, false);
593         return err;
594 }
595
596 int mlxsw_sp_hdroom_configure(struct mlxsw_sp_port *mlxsw_sp_port,
597                               const struct mlxsw_sp_hdroom *hdroom)
598 {
599         return __mlxsw_sp_hdroom_configure(mlxsw_sp_port, hdroom, false);
600 }
601
602 static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
603 {
604         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
605         struct mlxsw_sp_hdroom hdroom = {};
606         u32 size9;
607         int prio;
608
609         hdroom.mtu = mlxsw_sp_port->dev->mtu;
610         hdroom.mode = MLXSW_SP_HDROOM_MODE_DCB;
611         for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
612                 hdroom.prios.prio[prio].lossy = true;
613
614         mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
615         mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
616
617         /* Buffer 9 is used for control traffic. */
618         size9 = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port,
619                                                  mlxsw_sp_port->dev->max_mtu +
620                                                  MLXSW_PORT_ETH_FRAME_HDR);
621         hdroom.bufs.buf[9].size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size9);
622
623         return __mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom, true);
624 }
625
626 static int mlxsw_sp_sb_port_init(struct mlxsw_sp *mlxsw_sp,
627                                  struct mlxsw_sp_sb_port *sb_port)
628 {
629         struct mlxsw_sp_sb_pm *pms;
630
631         pms = kcalloc(mlxsw_sp->sb_vals->pool_count, sizeof(*pms),
632                       GFP_KERNEL);
633         if (!pms)
634                 return -ENOMEM;
635         sb_port->pms = pms;
636         return 0;
637 }
638
639 static void mlxsw_sp_sb_port_fini(struct mlxsw_sp_sb_port *sb_port)
640 {
641         kfree(sb_port->pms);
642 }
643
644 static int mlxsw_sp_sb_ports_init(struct mlxsw_sp *mlxsw_sp)
645 {
646         unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
647         struct mlxsw_sp_sb_pr *prs;
648         int i;
649         int err;
650
651         mlxsw_sp->sb->ports = kcalloc(max_ports,
652                                       sizeof(struct mlxsw_sp_sb_port),
653                                       GFP_KERNEL);
654         if (!mlxsw_sp->sb->ports)
655                 return -ENOMEM;
656
657         prs = kcalloc(mlxsw_sp->sb_vals->pool_count, sizeof(*prs),
658                       GFP_KERNEL);
659         if (!prs) {
660                 err = -ENOMEM;
661                 goto err_alloc_prs;
662         }
663         mlxsw_sp->sb->prs = prs;
664
665         for (i = 0; i < max_ports; i++) {
666                 err = mlxsw_sp_sb_port_init(mlxsw_sp, &mlxsw_sp->sb->ports[i]);
667                 if (err)
668                         goto err_sb_port_init;
669         }
670
671         return 0;
672
673 err_sb_port_init:
674         for (i--; i >= 0; i--)
675                 mlxsw_sp_sb_port_fini(&mlxsw_sp->sb->ports[i]);
676         kfree(mlxsw_sp->sb->prs);
677 err_alloc_prs:
678         kfree(mlxsw_sp->sb->ports);
679         return err;
680 }
681
682 static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp)
683 {
684         int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
685         int i;
686
687         for (i = max_ports - 1; i >= 0; i--)
688                 mlxsw_sp_sb_port_fini(&mlxsw_sp->sb->ports[i]);
689         kfree(mlxsw_sp->sb->prs);
690         kfree(mlxsw_sp->sb->ports);
691 }
692
693 #define MLXSW_SP_SB_PR(_mode, _size)    \
694         {                               \
695                 .mode = _mode,          \
696                 .size = _size,          \
697         }
698
699 #define MLXSW_SP_SB_PR_EXT(_mode, _size, _freeze_mode, _freeze_size)    \
700         {                                                               \
701                 .mode = _mode,                                          \
702                 .size = _size,                                          \
703                 .freeze_mode = _freeze_mode,                            \
704                 .freeze_size = _freeze_size,                            \
705         }
706
707 #define MLXSW_SP1_SB_PR_CPU_SIZE        (256 * 1000)
708
709 /* Order according to mlxsw_sp1_sb_pool_dess */
710 static const struct mlxsw_sp_sb_pr mlxsw_sp1_sb_prs[] = {
711         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST),
712         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
713         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
714         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
715         MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST,
716                            true, false),
717         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
718         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
719         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
720         MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI,
721                            true, true),
722         MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
723                            MLXSW_SP1_SB_PR_CPU_SIZE, true, false),
724         MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
725                            MLXSW_SP1_SB_PR_CPU_SIZE, true, false),
726 };
727
728 #define MLXSW_SP2_SB_PR_CPU_SIZE        (256 * 1000)
729
730 /* Order according to mlxsw_sp2_sb_pool_dess */
731 static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = {
732         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST),
733         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
734         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
735         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
736         MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST,
737                            true, false),
738         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
739         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
740         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
741         MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI,
742                            true, true),
743         MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
744                            MLXSW_SP2_SB_PR_CPU_SIZE, true, false),
745         MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
746                            MLXSW_SP2_SB_PR_CPU_SIZE, true, false),
747 };
748
749 static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
750                                 const struct mlxsw_sp_sb_pr *prs,
751                                 const struct mlxsw_sp_sb_pool_des *pool_dess,
752                                 size_t prs_len)
753 {
754         /* Round down, unlike mlxsw_sp_bytes_cells(). */
755         u32 sb_cells = div_u64(mlxsw_sp->sb->sb_size, mlxsw_sp->sb->cell_size);
756         u32 rest_cells[2] = {sb_cells, sb_cells};
757         int i;
758         int err;
759
760         /* Calculate how much space to give to the "REST" pools in either
761          * direction.
762          */
763         for (i = 0; i < prs_len; i++) {
764                 enum mlxsw_reg_sbxx_dir dir = pool_dess[i].dir;
765                 u32 size = prs[i].size;
766                 u32 size_cells;
767
768                 if (size == MLXSW_SP_SB_INFI || size == MLXSW_SP_SB_REST)
769                         continue;
770
771                 size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size);
772                 if (WARN_ON_ONCE(size_cells > rest_cells[dir]))
773                         continue;
774
775                 rest_cells[dir] -= size_cells;
776         }
777
778         for (i = 0; i < prs_len; i++) {
779                 u32 size = prs[i].size;
780                 u32 size_cells;
781
782                 if (size == MLXSW_SP_SB_INFI) {
783                         err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
784                                                    0, true);
785                 } else if (size == MLXSW_SP_SB_REST) {
786                         size_cells = rest_cells[pool_dess[i].dir];
787                         err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
788                                                    size_cells, false);
789                 } else {
790                         size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size);
791                         err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
792                                                    size_cells, false);
793                 }
794                 if (err)
795                         return err;
796         }
797
798         err = mlxsw_sp_sb_pr_desc_write(mlxsw_sp, MLXSW_REG_SBXX_DIR_INGRESS,
799                                         MLXSW_REG_SBPR_MODE_DYNAMIC, 0, true);
800         if (err)
801                 return err;
802
803         err = mlxsw_sp_sb_pr_desc_write(mlxsw_sp, MLXSW_REG_SBXX_DIR_EGRESS,
804                                         MLXSW_REG_SBPR_MODE_DYNAMIC, 0, true);
805         if (err)
806                 return err;
807
808         return 0;
809 }
810
811 #define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool)     \
812         {                                               \
813                 .min_buff = _min_buff,                  \
814                 .max_buff = _max_buff,                  \
815                 .pool_index = _pool,                    \
816         }
817
818 #define MLXSW_SP_SB_CM_ING(_min_buff, _max_buff)        \
819         {                                               \
820                 .min_buff = _min_buff,                  \
821                 .max_buff = _max_buff,                  \
822                 .pool_index = MLXSW_SP_SB_POOL_ING,     \
823         }
824
825 #define MLXSW_SP_SB_CM_EGR(_min_buff, _max_buff)        \
826         {                                               \
827                 .min_buff = _min_buff,                  \
828                 .max_buff = _max_buff,                  \
829                 .pool_index = MLXSW_SP_SB_POOL_EGR,     \
830         }
831
832 #define MLXSW_SP_SB_CM_EGR_MC(_min_buff, _max_buff)     \
833         {                                               \
834                 .min_buff = _min_buff,                  \
835                 .max_buff = _max_buff,                  \
836                 .pool_index = MLXSW_SP_SB_POOL_EGR_MC,  \
837                 .freeze_pool = true,                    \
838                 .freeze_thresh = true,                  \
839         }
840
841 static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_ingress[] = {
842         MLXSW_SP_SB_CM_ING(10000, 8),
843         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
844         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
845         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
846         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
847         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
848         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
849         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
850         MLXSW_SP_SB_CM_ING(0, 0), /* dummy, this PG does not exist */
851         MLXSW_SP_SB_CM(10000, 8, MLXSW_SP_SB_POOL_ING_CPU),
852 };
853
854 static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_ingress[] = {
855         MLXSW_SP_SB_CM_ING(0, 7),
856         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
857         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
858         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
859         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
860         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
861         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
862         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
863         MLXSW_SP_SB_CM_ING(0, 0), /* dummy, this PG does not exist */
864         MLXSW_SP_SB_CM(10000, 8, MLXSW_SP_SB_POOL_ING_CPU),
865 };
866
867 static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_egress[] = {
868         MLXSW_SP_SB_CM_EGR(1500, 9),
869         MLXSW_SP_SB_CM_EGR(1500, 9),
870         MLXSW_SP_SB_CM_EGR(1500, 9),
871         MLXSW_SP_SB_CM_EGR(1500, 9),
872         MLXSW_SP_SB_CM_EGR(1500, 9),
873         MLXSW_SP_SB_CM_EGR(1500, 9),
874         MLXSW_SP_SB_CM_EGR(1500, 9),
875         MLXSW_SP_SB_CM_EGR(1500, 9),
876         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
877         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
878         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
879         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
880         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
881         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
882         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
883         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
884         MLXSW_SP_SB_CM_EGR(1, 0xff),
885 };
886
887 static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_egress[] = {
888         MLXSW_SP_SB_CM_EGR(0, 7),
889         MLXSW_SP_SB_CM_EGR(0, 7),
890         MLXSW_SP_SB_CM_EGR(0, 7),
891         MLXSW_SP_SB_CM_EGR(0, 7),
892         MLXSW_SP_SB_CM_EGR(0, 7),
893         MLXSW_SP_SB_CM_EGR(0, 7),
894         MLXSW_SP_SB_CM_EGR(0, 7),
895         MLXSW_SP_SB_CM_EGR(0, 7),
896         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
897         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
898         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
899         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
900         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
901         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
902         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
903         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
904         MLXSW_SP_SB_CM_EGR(1, 0xff),
905 };
906
907 #define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, MLXSW_SP_SB_POOL_EGR_CPU)
908
909 static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
910         MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
911         MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
912         MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
913         MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
914         MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
915         MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
916         MLXSW_SP_CPU_PORT_SB_CM,
917         MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
918         MLXSW_SP_CPU_PORT_SB_CM,
919         MLXSW_SP_CPU_PORT_SB_CM,
920         MLXSW_SP_CPU_PORT_SB_CM,
921         MLXSW_SP_CPU_PORT_SB_CM,
922         MLXSW_SP_CPU_PORT_SB_CM,
923         MLXSW_SP_CPU_PORT_SB_CM,
924         MLXSW_SP_CPU_PORT_SB_CM,
925         MLXSW_SP_CPU_PORT_SB_CM,
926         MLXSW_SP_CPU_PORT_SB_CM,
927         MLXSW_SP_CPU_PORT_SB_CM,
928         MLXSW_SP_CPU_PORT_SB_CM,
929         MLXSW_SP_CPU_PORT_SB_CM,
930         MLXSW_SP_CPU_PORT_SB_CM,
931         MLXSW_SP_CPU_PORT_SB_CM,
932         MLXSW_SP_CPU_PORT_SB_CM,
933         MLXSW_SP_CPU_PORT_SB_CM,
934         MLXSW_SP_CPU_PORT_SB_CM,
935         MLXSW_SP_CPU_PORT_SB_CM,
936         MLXSW_SP_CPU_PORT_SB_CM,
937         MLXSW_SP_CPU_PORT_SB_CM,
938         MLXSW_SP_CPU_PORT_SB_CM,
939         MLXSW_SP_CPU_PORT_SB_CM,
940         MLXSW_SP_CPU_PORT_SB_CM,
941         MLXSW_SP_CPU_PORT_SB_CM,
942 };
943
944 static bool
945 mlxsw_sp_sb_pool_is_static(struct mlxsw_sp *mlxsw_sp, u16 pool_index)
946 {
947         struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
948
949         return pr->mode == MLXSW_REG_SBPR_MODE_STATIC;
950 }
951
952 static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u16 local_port,
953                                   enum mlxsw_reg_sbxx_dir dir,
954                                   const struct mlxsw_sp_sb_cm *cms,
955                                   size_t cms_len)
956 {
957         const struct mlxsw_sp_sb_vals *sb_vals = mlxsw_sp->sb_vals;
958         int i;
959         int err;
960
961         for (i = 0; i < cms_len; i++) {
962                 const struct mlxsw_sp_sb_cm *cm;
963                 u32 min_buff;
964                 u32 max_buff;
965
966                 if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS)
967                         continue; /* PG number 8 does not exist, skip it */
968                 cm = &cms[i];
969                 if (WARN_ON(sb_vals->pool_dess[cm->pool_index].dir != dir))
970                         continue;
971
972                 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, cm->min_buff);
973                 max_buff = cm->max_buff;
974                 if (max_buff == MLXSW_SP_SB_INFI) {
975                         err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
976                                                    min_buff, 0,
977                                                    true, cm->pool_index);
978                 } else {
979                         if (mlxsw_sp_sb_pool_is_static(mlxsw_sp,
980                                                        cm->pool_index))
981                                 max_buff = mlxsw_sp_bytes_cells(mlxsw_sp,
982                                                                 max_buff);
983                         err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
984                                                    min_buff, max_buff,
985                                                    false, cm->pool_index);
986                 }
987                 if (err)
988                         return err;
989         }
990         return 0;
991 }
992
993 static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port)
994 {
995         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
996         int err;
997
998         err = __mlxsw_sp_sb_cms_init(mlxsw_sp,
999                                      mlxsw_sp_port->local_port,
1000                                      MLXSW_REG_SBXX_DIR_INGRESS,
1001                                      mlxsw_sp->sb_vals->cms_ingress,
1002                                      mlxsw_sp->sb_vals->cms_ingress_count);
1003         if (err)
1004                 return err;
1005         return __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
1006                                       mlxsw_sp_port->local_port,
1007                                       MLXSW_REG_SBXX_DIR_EGRESS,
1008                                       mlxsw_sp->sb_vals->cms_egress,
1009                                       mlxsw_sp->sb_vals->cms_egress_count);
1010 }
1011
1012 static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
1013 {
1014         return __mlxsw_sp_sb_cms_init(mlxsw_sp, 0, MLXSW_REG_SBXX_DIR_EGRESS,
1015                                       mlxsw_sp->sb_vals->cms_cpu,
1016                                       mlxsw_sp->sb_vals->cms_cpu_count);
1017 }
1018
1019 #define MLXSW_SP_SB_PM(_min_buff, _max_buff)    \
1020         {                                       \
1021                 .min_buff = _min_buff,          \
1022                 .max_buff = _max_buff,          \
1023         }
1024
1025 /* Order according to mlxsw_sp1_sb_pool_dess */
1026 static const struct mlxsw_sp_sb_pm mlxsw_sp1_sb_pms[] = {
1027         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
1028         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
1029         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
1030         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
1031         MLXSW_SP_SB_PM(0, 7),
1032         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
1033         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
1034         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
1035         MLXSW_SP_SB_PM(10000, 90000),
1036         MLXSW_SP_SB_PM(0, 8),   /* 50% occupancy */
1037         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
1038 };
1039
1040 /* Order according to mlxsw_sp2_sb_pool_dess */
1041 static const struct mlxsw_sp_sb_pm mlxsw_sp2_sb_pms[] = {
1042         MLXSW_SP_SB_PM(0, 7),
1043         MLXSW_SP_SB_PM(0, 0),
1044         MLXSW_SP_SB_PM(0, 0),
1045         MLXSW_SP_SB_PM(0, 0),
1046         MLXSW_SP_SB_PM(0, 7),
1047         MLXSW_SP_SB_PM(0, 0),
1048         MLXSW_SP_SB_PM(0, 0),
1049         MLXSW_SP_SB_PM(0, 0),
1050         MLXSW_SP_SB_PM(10000, 90000),
1051         MLXSW_SP_SB_PM(0, 8),   /* 50% occupancy */
1052         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
1053 };
1054
1055 /* Order according to mlxsw_sp*_sb_pool_dess */
1056 static const struct mlxsw_sp_sb_pm mlxsw_sp_cpu_port_sb_pms[] = {
1057         MLXSW_SP_SB_PM(0, 0),
1058         MLXSW_SP_SB_PM(0, 0),
1059         MLXSW_SP_SB_PM(0, 0),
1060         MLXSW_SP_SB_PM(0, 0),
1061         MLXSW_SP_SB_PM(0, 0),
1062         MLXSW_SP_SB_PM(0, 0),
1063         MLXSW_SP_SB_PM(0, 0),
1064         MLXSW_SP_SB_PM(0, 0),
1065         MLXSW_SP_SB_PM(0, 90000),
1066         MLXSW_SP_SB_PM(0, 0),
1067         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
1068 };
1069
1070 static int mlxsw_sp_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u16 local_port,
1071                                 const struct mlxsw_sp_sb_pm *pms,
1072                                 bool skip_ingress)
1073 {
1074         int i, err;
1075
1076         for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
1077                 const struct mlxsw_sp_sb_pm *pm = &pms[i];
1078                 const struct mlxsw_sp_sb_pool_des *des;
1079                 u32 max_buff;
1080                 u32 min_buff;
1081
1082                 des = &mlxsw_sp->sb_vals->pool_dess[i];
1083                 if (skip_ingress && des->dir == MLXSW_REG_SBXX_DIR_INGRESS)
1084                         continue;
1085
1086                 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, pm->min_buff);
1087                 max_buff = pm->max_buff;
1088                 if (mlxsw_sp_sb_pool_is_static(mlxsw_sp, i))
1089                         max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, max_buff);
1090                 err = mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, i, min_buff,
1091                                            max_buff);
1092                 if (err)
1093                         return err;
1094         }
1095         return 0;
1096 }
1097
1098 static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
1099 {
1100         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1101
1102         return mlxsw_sp_sb_pms_init(mlxsw_sp, mlxsw_sp_port->local_port,
1103                                     mlxsw_sp->sb_vals->pms, false);
1104 }
1105
1106 static int mlxsw_sp_cpu_port_sb_pms_init(struct mlxsw_sp *mlxsw_sp)
1107 {
1108         return mlxsw_sp_sb_pms_init(mlxsw_sp, 0, mlxsw_sp->sb_vals->pms_cpu,
1109                                     true);
1110 }
1111
1112 #define MLXSW_SP_SB_MM(_min_buff, _max_buff)            \
1113         {                                               \
1114                 .min_buff = _min_buff,                  \
1115                 .max_buff = _max_buff,                  \
1116                 .pool_index = MLXSW_SP_SB_POOL_EGR,     \
1117         }
1118
1119 static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
1120         MLXSW_SP_SB_MM(0, 6),
1121         MLXSW_SP_SB_MM(0, 6),
1122         MLXSW_SP_SB_MM(0, 6),
1123         MLXSW_SP_SB_MM(0, 6),
1124         MLXSW_SP_SB_MM(0, 6),
1125         MLXSW_SP_SB_MM(0, 6),
1126         MLXSW_SP_SB_MM(0, 6),
1127         MLXSW_SP_SB_MM(0, 6),
1128         MLXSW_SP_SB_MM(0, 6),
1129         MLXSW_SP_SB_MM(0, 6),
1130         MLXSW_SP_SB_MM(0, 6),
1131         MLXSW_SP_SB_MM(0, 6),
1132         MLXSW_SP_SB_MM(0, 6),
1133         MLXSW_SP_SB_MM(0, 6),
1134         MLXSW_SP_SB_MM(0, 6),
1135 };
1136
1137 static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
1138 {
1139         char sbmm_pl[MLXSW_REG_SBMM_LEN];
1140         int i;
1141         int err;
1142
1143         for (i = 0; i < mlxsw_sp->sb_vals->mms_count; i++) {
1144                 const struct mlxsw_sp_sb_pool_des *des;
1145                 const struct mlxsw_sp_sb_mm *mc;
1146                 u32 min_buff;
1147
1148                 mc = &mlxsw_sp->sb_vals->mms[i];
1149                 des = &mlxsw_sp->sb_vals->pool_dess[mc->pool_index];
1150                 /* All pools used by sb_mm's are initialized using dynamic
1151                  * thresholds, therefore 'max_buff' isn't specified in cells.
1152                  */
1153                 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, mc->min_buff);
1154                 mlxsw_reg_sbmm_pack(sbmm_pl, i, min_buff, mc->max_buff,
1155                                     des->pool);
1156                 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
1157                 if (err)
1158                         return err;
1159         }
1160         return 0;
1161 }
1162
1163 static void mlxsw_sp_pool_count(struct mlxsw_sp *mlxsw_sp,
1164                                 u16 *p_ingress_len, u16 *p_egress_len)
1165 {
1166         int i;
1167
1168         for (i = 0; i < mlxsw_sp->sb_vals->pool_count; ++i) {
1169                 if (mlxsw_sp->sb_vals->pool_dess[i].dir ==
1170                     MLXSW_REG_SBXX_DIR_INGRESS)
1171                         (*p_ingress_len)++;
1172                 else
1173                         (*p_egress_len)++;
1174         }
1175
1176         WARN(*p_egress_len == 0, "No egress pools\n");
1177 }
1178
1179 const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals = {
1180         .pool_count = ARRAY_SIZE(mlxsw_sp1_sb_pool_dess),
1181         .pool_dess = mlxsw_sp1_sb_pool_dess,
1182         .pms = mlxsw_sp1_sb_pms,
1183         .pms_cpu = mlxsw_sp_cpu_port_sb_pms,
1184         .prs = mlxsw_sp1_sb_prs,
1185         .mms = mlxsw_sp_sb_mms,
1186         .cms_ingress = mlxsw_sp1_sb_cms_ingress,
1187         .cms_egress = mlxsw_sp1_sb_cms_egress,
1188         .cms_cpu = mlxsw_sp_cpu_port_sb_cms,
1189         .mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms),
1190         .cms_ingress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_ingress),
1191         .cms_egress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_egress),
1192         .cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms),
1193 };
1194
1195 const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals = {
1196         .pool_count = ARRAY_SIZE(mlxsw_sp2_sb_pool_dess),
1197         .pool_dess = mlxsw_sp2_sb_pool_dess,
1198         .pms = mlxsw_sp2_sb_pms,
1199         .pms_cpu = mlxsw_sp_cpu_port_sb_pms,
1200         .prs = mlxsw_sp2_sb_prs,
1201         .mms = mlxsw_sp_sb_mms,
1202         .cms_ingress = mlxsw_sp2_sb_cms_ingress,
1203         .cms_egress = mlxsw_sp2_sb_cms_egress,
1204         .cms_cpu = mlxsw_sp_cpu_port_sb_cms,
1205         .mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms),
1206         .cms_ingress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_ingress),
1207         .cms_egress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_egress),
1208         .cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms),
1209 };
1210
1211 static u32 mlxsw_sp1_pb_int_buf_size_get(int mtu, u32 speed)
1212 {
1213         return mtu * 5 / 2;
1214 }
1215
1216 static u32 __mlxsw_sp_pb_int_buf_size_get(int mtu, u32 speed, u32 buffer_factor)
1217 {
1218         return 3 * mtu + buffer_factor * speed / 1000;
1219 }
1220
1221 #define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38
1222
1223 static u32 mlxsw_sp2_pb_int_buf_size_get(int mtu, u32 speed)
1224 {
1225         int factor = MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR;
1226
1227         return __mlxsw_sp_pb_int_buf_size_get(mtu, speed, factor);
1228 }
1229
1230 #define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50
1231
1232 static u32 mlxsw_sp3_pb_int_buf_size_get(int mtu, u32 speed)
1233 {
1234         int factor = MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR;
1235
1236         return __mlxsw_sp_pb_int_buf_size_get(mtu, speed, factor);
1237 }
1238
1239 const struct mlxsw_sp_sb_ops mlxsw_sp1_sb_ops = {
1240         .int_buf_size_get = mlxsw_sp1_pb_int_buf_size_get,
1241 };
1242
1243 const struct mlxsw_sp_sb_ops mlxsw_sp2_sb_ops = {
1244         .int_buf_size_get = mlxsw_sp2_pb_int_buf_size_get,
1245 };
1246
1247 const struct mlxsw_sp_sb_ops mlxsw_sp3_sb_ops = {
1248         .int_buf_size_get = mlxsw_sp3_pb_int_buf_size_get,
1249 };
1250
1251 int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
1252 {
1253         u32 max_headroom_size;
1254         u16 ing_pool_count = 0;
1255         u16 eg_pool_count = 0;
1256         int err;
1257
1258         if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE))
1259                 return -EIO;
1260
1261         if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, GUARANTEED_SHARED_BUFFER))
1262                 return -EIO;
1263
1264         if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_HEADROOM_SIZE))
1265                 return -EIO;
1266
1267         mlxsw_sp->sb = kzalloc(sizeof(*mlxsw_sp->sb), GFP_KERNEL);
1268         if (!mlxsw_sp->sb)
1269                 return -ENOMEM;
1270         mlxsw_sp->sb->cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE);
1271         mlxsw_sp->sb->sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1272                                                    GUARANTEED_SHARED_BUFFER);
1273         max_headroom_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1274                                                MAX_HEADROOM_SIZE);
1275         /* Round down, because this limit must not be overstepped. */
1276         mlxsw_sp->sb->max_headroom_cells = max_headroom_size /
1277                                                 mlxsw_sp->sb->cell_size;
1278
1279         err = mlxsw_sp_sb_ports_init(mlxsw_sp);
1280         if (err)
1281                 goto err_sb_ports_init;
1282         err = mlxsw_sp_sb_prs_init(mlxsw_sp, mlxsw_sp->sb_vals->prs,
1283                                    mlxsw_sp->sb_vals->pool_dess,
1284                                    mlxsw_sp->sb_vals->pool_count);
1285         if (err)
1286                 goto err_sb_prs_init;
1287         err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
1288         if (err)
1289                 goto err_sb_cpu_port_sb_cms_init;
1290         err = mlxsw_sp_cpu_port_sb_pms_init(mlxsw_sp);
1291         if (err)
1292                 goto err_sb_cpu_port_pms_init;
1293         err = mlxsw_sp_sb_mms_init(mlxsw_sp);
1294         if (err)
1295                 goto err_sb_mms_init;
1296         mlxsw_sp_pool_count(mlxsw_sp, &ing_pool_count, &eg_pool_count);
1297         err = devl_sb_register(priv_to_devlink(mlxsw_sp->core), 0,
1298                                mlxsw_sp->sb->sb_size,
1299                                ing_pool_count,
1300                                eg_pool_count,
1301                                MLXSW_SP_SB_ING_TC_COUNT,
1302                                MLXSW_SP_SB_EG_TC_COUNT);
1303         if (err)
1304                 goto err_devlink_sb_register;
1305
1306         return 0;
1307
1308 err_devlink_sb_register:
1309 err_sb_mms_init:
1310 err_sb_cpu_port_pms_init:
1311 err_sb_cpu_port_sb_cms_init:
1312 err_sb_prs_init:
1313         mlxsw_sp_sb_ports_fini(mlxsw_sp);
1314 err_sb_ports_init:
1315         kfree(mlxsw_sp->sb);
1316         return err;
1317 }
1318
1319 void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
1320 {
1321         devl_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
1322         mlxsw_sp_sb_ports_fini(mlxsw_sp);
1323         kfree(mlxsw_sp->sb);
1324 }
1325
1326 int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
1327 {
1328         int err;
1329
1330         mlxsw_sp_port->hdroom = kzalloc(sizeof(*mlxsw_sp_port->hdroom), GFP_KERNEL);
1331         if (!mlxsw_sp_port->hdroom)
1332                 return -ENOMEM;
1333         mlxsw_sp_port->hdroom->mtu = mlxsw_sp_port->dev->mtu;
1334
1335         err = mlxsw_sp_port_headroom_init(mlxsw_sp_port);
1336         if (err)
1337                 goto err_headroom_init;
1338         err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port);
1339         if (err)
1340                 goto err_port_sb_cms_init;
1341         err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port);
1342         if (err)
1343                 goto err_port_sb_pms_init;
1344         return 0;
1345
1346 err_port_sb_pms_init:
1347 err_port_sb_cms_init:
1348 err_headroom_init:
1349         kfree(mlxsw_sp_port->hdroom);
1350         return err;
1351 }
1352
1353 void mlxsw_sp_port_buffers_fini(struct mlxsw_sp_port *mlxsw_sp_port)
1354 {
1355         kfree(mlxsw_sp_port->hdroom);
1356 }
1357
1358 int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
1359                          unsigned int sb_index, u16 pool_index,
1360                          struct devlink_sb_pool_info *pool_info)
1361 {
1362         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1363         enum mlxsw_reg_sbxx_dir dir;
1364         struct mlxsw_sp_sb_pr *pr;
1365
1366         dir = mlxsw_sp->sb_vals->pool_dess[pool_index].dir;
1367         pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
1368         pool_info->pool_type = (enum devlink_sb_pool_type) dir;
1369         pool_info->size = mlxsw_sp_cells_bytes(mlxsw_sp, pr->size);
1370         pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode;
1371         pool_info->cell_size = mlxsw_sp->sb->cell_size;
1372         return 0;
1373 }
1374
1375 int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
1376                          unsigned int sb_index, u16 pool_index, u32 size,
1377                          enum devlink_sb_threshold_type threshold_type,
1378                          struct netlink_ext_ack *extack)
1379 {
1380         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1381         u32 pool_size = mlxsw_sp_bytes_cells(mlxsw_sp, size);
1382         const struct mlxsw_sp_sb_pr *pr;
1383         enum mlxsw_reg_sbpr_mode mode;
1384
1385         mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
1386         pr = &mlxsw_sp->sb_vals->prs[pool_index];
1387
1388         if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core,
1389                                       GUARANTEED_SHARED_BUFFER)) {
1390                 NL_SET_ERR_MSG_MOD(extack, "Exceeded shared buffer size");
1391                 return -EINVAL;
1392         }
1393
1394         if (pr->freeze_mode && pr->mode != mode) {
1395                 NL_SET_ERR_MSG_MOD(extack, "Changing this pool's threshold type is forbidden");
1396                 return -EINVAL;
1397         }
1398
1399         if (pr->freeze_size && pr->size != size) {
1400                 NL_SET_ERR_MSG_MOD(extack, "Changing this pool's size is forbidden");
1401                 return -EINVAL;
1402         }
1403
1404         return mlxsw_sp_sb_pr_write(mlxsw_sp, pool_index, mode,
1405                                     pool_size, false);
1406 }
1407
1408 #define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */
1409
1410 static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
1411                                      u32 max_buff)
1412 {
1413         struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
1414
1415         if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC)
1416                 return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
1417         return mlxsw_sp_cells_bytes(mlxsw_sp, max_buff);
1418 }
1419
1420 static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
1421                                     u32 threshold, u32 *p_max_buff,
1422                                     struct netlink_ext_ack *extack)
1423 {
1424         struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
1425
1426         if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) {
1427                 int val;
1428
1429                 val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
1430                 if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN ||
1431                     val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX) {
1432                         NL_SET_ERR_MSG_MOD(extack, "Invalid dynamic threshold value");
1433                         return -EINVAL;
1434                 }
1435                 *p_max_buff = val;
1436         } else {
1437                 *p_max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, threshold);
1438         }
1439         return 0;
1440 }
1441
1442 int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
1443                               unsigned int sb_index, u16 pool_index,
1444                               u32 *p_threshold)
1445 {
1446         struct mlxsw_sp_port *mlxsw_sp_port =
1447                         mlxsw_core_port_driver_priv(mlxsw_core_port);
1448         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1449         u16 local_port = mlxsw_sp_port->local_port;
1450         struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
1451                                                        pool_index);
1452
1453         *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool_index,
1454                                                  pm->max_buff);
1455         return 0;
1456 }
1457
1458 int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
1459                               unsigned int sb_index, u16 pool_index,
1460                               u32 threshold, struct netlink_ext_ack *extack)
1461 {
1462         struct mlxsw_sp_port *mlxsw_sp_port =
1463                         mlxsw_core_port_driver_priv(mlxsw_core_port);
1464         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1465         u16 local_port = mlxsw_sp_port->local_port;
1466         u32 max_buff;
1467         int err;
1468
1469         if (local_port == MLXSW_PORT_CPU_PORT) {
1470                 NL_SET_ERR_MSG_MOD(extack, "Changing CPU port's threshold is forbidden");
1471                 return -EINVAL;
1472         }
1473
1474         err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
1475                                        threshold, &max_buff, extack);
1476         if (err)
1477                 return err;
1478
1479         return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool_index,
1480                                     0, max_buff);
1481 }
1482
1483 int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
1484                                  unsigned int sb_index, u16 tc_index,
1485                                  enum devlink_sb_pool_type pool_type,
1486                                  u16 *p_pool_index, u32 *p_threshold)
1487 {
1488         struct mlxsw_sp_port *mlxsw_sp_port =
1489                         mlxsw_core_port_driver_priv(mlxsw_core_port);
1490         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1491         u16 local_port = mlxsw_sp_port->local_port;
1492         u8 pg_buff = tc_index;
1493         enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1494         struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
1495                                                        pg_buff, dir);
1496
1497         *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool_index,
1498                                                  cm->max_buff);
1499         *p_pool_index = cm->pool_index;
1500         return 0;
1501 }
1502
1503 int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
1504                                  unsigned int sb_index, u16 tc_index,
1505                                  enum devlink_sb_pool_type pool_type,
1506                                  u16 pool_index, u32 threshold,
1507                                  struct netlink_ext_ack *extack)
1508 {
1509         struct mlxsw_sp_port *mlxsw_sp_port =
1510                         mlxsw_core_port_driver_priv(mlxsw_core_port);
1511         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1512         u16 local_port = mlxsw_sp_port->local_port;
1513         const struct mlxsw_sp_sb_cm *cm;
1514         u8 pg_buff = tc_index;
1515         enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1516         u32 max_buff;
1517         int err;
1518
1519         if (local_port == MLXSW_PORT_CPU_PORT) {
1520                 NL_SET_ERR_MSG_MOD(extack, "Changing CPU port's binding is forbidden");
1521                 return -EINVAL;
1522         }
1523
1524         if (dir != mlxsw_sp->sb_vals->pool_dess[pool_index].dir) {
1525                 NL_SET_ERR_MSG_MOD(extack, "Binding egress TC to ingress pool and vice versa is forbidden");
1526                 return -EINVAL;
1527         }
1528
1529         if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
1530                 cm = &mlxsw_sp->sb_vals->cms_ingress[tc_index];
1531         else
1532                 cm = &mlxsw_sp->sb_vals->cms_egress[tc_index];
1533
1534         if (cm->freeze_pool && cm->pool_index != pool_index) {
1535                 NL_SET_ERR_MSG_MOD(extack, "Binding this TC to a different pool is forbidden");
1536                 return -EINVAL;
1537         }
1538
1539         if (cm->freeze_thresh && cm->max_buff != threshold) {
1540                 NL_SET_ERR_MSG_MOD(extack, "Changing this TC's threshold is forbidden");
1541                 return -EINVAL;
1542         }
1543
1544         err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
1545                                        threshold, &max_buff, extack);
1546         if (err)
1547                 return err;
1548
1549         return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff,
1550                                     0, max_buff, false, pool_index);
1551 }
1552
1553 #define MASKED_COUNT_MAX \
1554         (MLXSW_REG_SBSR_REC_MAX_COUNT / \
1555          (MLXSW_SP_SB_ING_TC_COUNT + MLXSW_SP_SB_EG_TC_COUNT))
1556
1557 struct mlxsw_sp_sb_sr_occ_query_cb_ctx {
1558         u8 masked_count;
1559         u16 local_port_1;
1560 };
1561
1562 static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
1563                                         char *sbsr_pl, size_t sbsr_pl_len,
1564                                         unsigned long cb_priv)
1565 {
1566         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1567         struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
1568         u8 masked_count;
1569         u16 local_port;
1570         int rec_index = 0;
1571         struct mlxsw_sp_sb_cm *cm;
1572         int i;
1573
1574         memcpy(&cb_ctx, &cb_priv, sizeof(cb_ctx));
1575
1576         masked_count = 0;
1577         for (local_port = cb_ctx.local_port_1;
1578              local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1579                 if (!mlxsw_sp->ports[local_port])
1580                         continue;
1581                 if (local_port == MLXSW_PORT_CPU_PORT) {
1582                         /* Ingress quotas are not supported for the CPU port */
1583                         masked_count++;
1584                         continue;
1585                 }
1586                 for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++) {
1587                         cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
1588                                                 MLXSW_REG_SBXX_DIR_INGRESS);
1589                         mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
1590                                                   &cm->occ.cur, &cm->occ.max);
1591                 }
1592                 if (++masked_count == cb_ctx.masked_count)
1593                         break;
1594         }
1595         masked_count = 0;
1596         for (local_port = cb_ctx.local_port_1;
1597              local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1598                 if (!mlxsw_sp->ports[local_port])
1599                         continue;
1600                 for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++) {
1601                         cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
1602                                                 MLXSW_REG_SBXX_DIR_EGRESS);
1603                         mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
1604                                                   &cm->occ.cur, &cm->occ.max);
1605                 }
1606                 if (++masked_count == cb_ctx.masked_count)
1607                         break;
1608         }
1609 }
1610
1611 int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
1612                              unsigned int sb_index)
1613 {
1614         u16 local_port, local_port_1, first_local_port, last_local_port;
1615         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1616         struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
1617         u8 masked_count, current_page = 0;
1618         unsigned long cb_priv = 0;
1619         LIST_HEAD(bulk_list);
1620         char *sbsr_pl;
1621         int i;
1622         int err;
1623         int err2;
1624
1625         sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
1626         if (!sbsr_pl)
1627                 return -ENOMEM;
1628
1629         local_port = MLXSW_PORT_CPU_PORT;
1630 next_batch:
1631         local_port_1 = local_port;
1632         masked_count = 0;
1633         mlxsw_reg_sbsr_pack(sbsr_pl, false);
1634         mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page);
1635         first_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE;
1636         last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE +
1637                           MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1;
1638
1639         for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
1640                 mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
1641         for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
1642                 mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
1643         for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1644                 if (!mlxsw_sp->ports[local_port])
1645                         continue;
1646                 if (local_port > last_local_port) {
1647                         current_page++;
1648                         goto do_query;
1649                 }
1650                 if (local_port != MLXSW_PORT_CPU_PORT) {
1651                         /* Ingress quotas are not supported for the CPU port */
1652                         mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
1653                                                              local_port - first_local_port,
1654                                                              1);
1655                 }
1656                 mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl,
1657                                                     local_port - first_local_port,
1658                                                     1);
1659                 for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
1660                         err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
1661                                                        &bulk_list);
1662                         if (err)
1663                                 goto out;
1664                 }
1665                 if (++masked_count == MASKED_COUNT_MAX)
1666                         goto do_query;
1667         }
1668
1669 do_query:
1670         cb_ctx.masked_count = masked_count;
1671         cb_ctx.local_port_1 = local_port_1;
1672         memcpy(&cb_priv, &cb_ctx, sizeof(cb_ctx));
1673         err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
1674                                     &bulk_list, mlxsw_sp_sb_sr_occ_query_cb,
1675                                     cb_priv);
1676         if (err)
1677                 goto out;
1678         if (local_port < mlxsw_core_max_ports(mlxsw_core)) {
1679                 local_port++;
1680                 goto next_batch;
1681         }
1682
1683 out:
1684         err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
1685         if (!err)
1686                 err = err2;
1687         kfree(sbsr_pl);
1688         return err;
1689 }
1690
1691 int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
1692                               unsigned int sb_index)
1693 {
1694         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1695         u16 local_port, first_local_port, last_local_port;
1696         LIST_HEAD(bulk_list);
1697         unsigned int masked_count;
1698         u8 current_page = 0;
1699         char *sbsr_pl;
1700         int i;
1701         int err;
1702         int err2;
1703
1704         sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
1705         if (!sbsr_pl)
1706                 return -ENOMEM;
1707
1708         local_port = MLXSW_PORT_CPU_PORT;
1709 next_batch:
1710         masked_count = 0;
1711         mlxsw_reg_sbsr_pack(sbsr_pl, true);
1712         mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page);
1713         first_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE;
1714         last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE +
1715                           MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1;
1716
1717         for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
1718                 mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
1719         for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
1720                 mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
1721         for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1722                 if (!mlxsw_sp->ports[local_port])
1723                         continue;
1724                 if (local_port > last_local_port) {
1725                         current_page++;
1726                         goto do_query;
1727                 }
1728                 if (local_port != MLXSW_PORT_CPU_PORT) {
1729                         /* Ingress quotas are not supported for the CPU port */
1730                         mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
1731                                                              local_port - first_local_port,
1732                                                              1);
1733                 }
1734                 mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl,
1735                                                     local_port - first_local_port,
1736                                                     1);
1737                 for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
1738                         err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
1739                                                        &bulk_list);
1740                         if (err)
1741                                 goto out;
1742                 }
1743                 if (++masked_count == MASKED_COUNT_MAX)
1744                         goto do_query;
1745         }
1746
1747 do_query:
1748         err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
1749                                     &bulk_list, NULL, 0);
1750         if (err)
1751                 goto out;
1752         if (local_port < mlxsw_core_max_ports(mlxsw_core)) {
1753                 local_port++;
1754                 goto next_batch;
1755         }
1756
1757 out:
1758         err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
1759         if (!err)
1760                 err = err2;
1761         kfree(sbsr_pl);
1762         return err;
1763 }
1764
1765 int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
1766                                   unsigned int sb_index, u16 pool_index,
1767                                   u32 *p_cur, u32 *p_max)
1768 {
1769         struct mlxsw_sp_port *mlxsw_sp_port =
1770                         mlxsw_core_port_driver_priv(mlxsw_core_port);
1771         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1772         u16 local_port = mlxsw_sp_port->local_port;
1773         struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
1774                                                        pool_index);
1775
1776         *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.cur);
1777         *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.max);
1778         return 0;
1779 }
1780
1781 int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
1782                                      unsigned int sb_index, u16 tc_index,
1783                                      enum devlink_sb_pool_type pool_type,
1784                                      u32 *p_cur, u32 *p_max)
1785 {
1786         struct mlxsw_sp_port *mlxsw_sp_port =
1787                         mlxsw_core_port_driver_priv(mlxsw_core_port);
1788         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1789         u16 local_port = mlxsw_sp_port->local_port;
1790         u8 pg_buff = tc_index;
1791         enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1792         struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
1793                                                        pg_buff, dir);
1794
1795         *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.cur);
1796         *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.max);
1797         return 0;
1798 }