Merge branch 'for-v5.13-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/ebieder...
[linux-2.6-microblaze.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_buffers.c
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/dcbnl.h>
7 #include <linux/if_ether.h>
8 #include <linux/list.h>
9 #include <linux/netlink.h>
10
11 #include "spectrum.h"
12 #include "core.h"
13 #include "port.h"
14 #include "reg.h"
15
16 struct mlxsw_sp_sb_pr {
17         enum mlxsw_reg_sbpr_mode mode;
18         u32 size;
19         u8 freeze_mode:1,
20            freeze_size:1;
21 };
22
23 struct mlxsw_cp_sb_occ {
24         u32 cur;
25         u32 max;
26 };
27
28 struct mlxsw_sp_sb_cm {
29         u32 min_buff;
30         u32 max_buff;
31         u16 pool_index;
32         struct mlxsw_cp_sb_occ occ;
33         u8 freeze_pool:1,
34            freeze_thresh:1;
35 };
36
37 #define MLXSW_SP_SB_INFI -1U
38 #define MLXSW_SP_SB_REST -2U
39
40 struct mlxsw_sp_sb_pm {
41         u32 min_buff;
42         u32 max_buff;
43         struct mlxsw_cp_sb_occ occ;
44 };
45
46 struct mlxsw_sp_sb_mm {
47         u32 min_buff;
48         u32 max_buff;
49         u16 pool_index;
50 };
51
52 struct mlxsw_sp_sb_pool_des {
53         enum mlxsw_reg_sbxx_dir dir;
54         u8 pool;
55 };
56
57 #define MLXSW_SP_SB_POOL_ING            0
58 #define MLXSW_SP_SB_POOL_EGR            4
59 #define MLXSW_SP_SB_POOL_EGR_MC         8
60 #define MLXSW_SP_SB_POOL_ING_CPU        9
61 #define MLXSW_SP_SB_POOL_EGR_CPU        10
62
63 static const struct mlxsw_sp_sb_pool_des mlxsw_sp1_sb_pool_dess[] = {
64         {MLXSW_REG_SBXX_DIR_INGRESS, 0},
65         {MLXSW_REG_SBXX_DIR_INGRESS, 1},
66         {MLXSW_REG_SBXX_DIR_INGRESS, 2},
67         {MLXSW_REG_SBXX_DIR_INGRESS, 3},
68         {MLXSW_REG_SBXX_DIR_EGRESS, 0},
69         {MLXSW_REG_SBXX_DIR_EGRESS, 1},
70         {MLXSW_REG_SBXX_DIR_EGRESS, 2},
71         {MLXSW_REG_SBXX_DIR_EGRESS, 3},
72         {MLXSW_REG_SBXX_DIR_EGRESS, 15},
73         {MLXSW_REG_SBXX_DIR_INGRESS, 4},
74         {MLXSW_REG_SBXX_DIR_EGRESS, 4},
75 };
76
77 static const struct mlxsw_sp_sb_pool_des mlxsw_sp2_sb_pool_dess[] = {
78         {MLXSW_REG_SBXX_DIR_INGRESS, 0},
79         {MLXSW_REG_SBXX_DIR_INGRESS, 1},
80         {MLXSW_REG_SBXX_DIR_INGRESS, 2},
81         {MLXSW_REG_SBXX_DIR_INGRESS, 3},
82         {MLXSW_REG_SBXX_DIR_EGRESS, 0},
83         {MLXSW_REG_SBXX_DIR_EGRESS, 1},
84         {MLXSW_REG_SBXX_DIR_EGRESS, 2},
85         {MLXSW_REG_SBXX_DIR_EGRESS, 3},
86         {MLXSW_REG_SBXX_DIR_EGRESS, 15},
87         {MLXSW_REG_SBXX_DIR_INGRESS, 4},
88         {MLXSW_REG_SBXX_DIR_EGRESS, 4},
89 };
90
91 #define MLXSW_SP_SB_ING_TC_COUNT 8
92 #define MLXSW_SP_SB_EG_TC_COUNT 16
93
94 struct mlxsw_sp_sb_port {
95         struct mlxsw_sp_sb_cm ing_cms[MLXSW_SP_SB_ING_TC_COUNT];
96         struct mlxsw_sp_sb_cm eg_cms[MLXSW_SP_SB_EG_TC_COUNT];
97         struct mlxsw_sp_sb_pm *pms;
98 };
99
100 struct mlxsw_sp_sb {
101         struct mlxsw_sp_sb_pr *prs;
102         struct mlxsw_sp_sb_port *ports;
103         u32 cell_size;
104         u32 max_headroom_cells;
105         u64 sb_size;
106 };
107
108 struct mlxsw_sp_sb_vals {
109         unsigned int pool_count;
110         const struct mlxsw_sp_sb_pool_des *pool_dess;
111         const struct mlxsw_sp_sb_pm *pms;
112         const struct mlxsw_sp_sb_pm *pms_cpu;
113         const struct mlxsw_sp_sb_pr *prs;
114         const struct mlxsw_sp_sb_mm *mms;
115         const struct mlxsw_sp_sb_cm *cms_ingress;
116         const struct mlxsw_sp_sb_cm *cms_egress;
117         const struct mlxsw_sp_sb_cm *cms_cpu;
118         unsigned int mms_count;
119         unsigned int cms_ingress_count;
120         unsigned int cms_egress_count;
121         unsigned int cms_cpu_count;
122 };
123
124 struct mlxsw_sp_sb_ops {
125         u32 (*int_buf_size_get)(int mtu, u32 speed);
126 };
127
128 u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells)
129 {
130         return mlxsw_sp->sb->cell_size * cells;
131 }
132
133 u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes)
134 {
135         return DIV_ROUND_UP(bytes, mlxsw_sp->sb->cell_size);
136 }
137
138 static u32 mlxsw_sp_port_headroom_8x_adjust(const struct mlxsw_sp_port *mlxsw_sp_port,
139                                             u32 size_cells)
140 {
141         /* Ports with eight lanes use two headroom buffers between which the
142          * configured headroom size is split. Therefore, multiply the calculated
143          * headroom size by two.
144          */
145         return mlxsw_sp_port->mapping.width == 8 ? 2 * size_cells : size_cells;
146 }
147
148 static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
149                                                  u16 pool_index)
150 {
151         return &mlxsw_sp->sb->prs[pool_index];
152 }
153
154 static bool mlxsw_sp_sb_cm_exists(u8 pg_buff, enum mlxsw_reg_sbxx_dir dir)
155 {
156         if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
157                 return pg_buff < MLXSW_SP_SB_ING_TC_COUNT;
158         else
159                 return pg_buff < MLXSW_SP_SB_EG_TC_COUNT;
160 }
161
162 static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
163                                                  u8 local_port, u8 pg_buff,
164                                                  enum mlxsw_reg_sbxx_dir dir)
165 {
166         struct mlxsw_sp_sb_port *sb_port = &mlxsw_sp->sb->ports[local_port];
167
168         WARN_ON(!mlxsw_sp_sb_cm_exists(pg_buff, dir));
169         if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
170                 return &sb_port->ing_cms[pg_buff];
171         else
172                 return &sb_port->eg_cms[pg_buff];
173 }
174
175 static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
176                                                  u8 local_port, u16 pool_index)
177 {
178         return &mlxsw_sp->sb->ports[local_port].pms[pool_index];
179 }
180
181 static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
182                                 enum mlxsw_reg_sbpr_mode mode,
183                                 u32 size, bool infi_size)
184 {
185         const struct mlxsw_sp_sb_pool_des *des =
186                 &mlxsw_sp->sb_vals->pool_dess[pool_index];
187         char sbpr_pl[MLXSW_REG_SBPR_LEN];
188         struct mlxsw_sp_sb_pr *pr;
189         int err;
190
191         mlxsw_reg_sbpr_pack(sbpr_pl, des->pool, des->dir, mode,
192                             size, infi_size);
193         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
194         if (err)
195                 return err;
196
197         if (infi_size)
198                 size = mlxsw_sp_bytes_cells(mlxsw_sp, mlxsw_sp->sb->sb_size);
199         pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
200         pr->mode = mode;
201         pr->size = size;
202         return 0;
203 }
204
205 static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
206                                 u8 pg_buff, u32 min_buff, u32 max_buff,
207                                 bool infi_max, u16 pool_index)
208 {
209         const struct mlxsw_sp_sb_pool_des *des =
210                 &mlxsw_sp->sb_vals->pool_dess[pool_index];
211         char sbcm_pl[MLXSW_REG_SBCM_LEN];
212         struct mlxsw_sp_sb_cm *cm;
213         int err;
214
215         mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, des->dir,
216                             min_buff, max_buff, infi_max, des->pool);
217         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
218         if (err)
219                 return err;
220
221         if (mlxsw_sp_sb_cm_exists(pg_buff, des->dir)) {
222                 if (infi_max)
223                         max_buff = mlxsw_sp_bytes_cells(mlxsw_sp,
224                                                         mlxsw_sp->sb->sb_size);
225
226                 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff,
227                                         des->dir);
228                 cm->min_buff = min_buff;
229                 cm->max_buff = max_buff;
230                 cm->pool_index = pool_index;
231         }
232         return 0;
233 }
234
235 static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
236                                 u16 pool_index, u32 min_buff, u32 max_buff)
237 {
238         const struct mlxsw_sp_sb_pool_des *des =
239                 &mlxsw_sp->sb_vals->pool_dess[pool_index];
240         char sbpm_pl[MLXSW_REG_SBPM_LEN];
241         struct mlxsw_sp_sb_pm *pm;
242         int err;
243
244         mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir, false,
245                             min_buff, max_buff);
246         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl);
247         if (err)
248                 return err;
249
250         pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
251         pm->min_buff = min_buff;
252         pm->max_buff = max_buff;
253         return 0;
254 }
255
256 static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port,
257                                     u16 pool_index, struct list_head *bulk_list)
258 {
259         const struct mlxsw_sp_sb_pool_des *des =
260                 &mlxsw_sp->sb_vals->pool_dess[pool_index];
261         char sbpm_pl[MLXSW_REG_SBPM_LEN];
262
263         if (local_port == MLXSW_PORT_CPU_PORT &&
264             des->dir == MLXSW_REG_SBXX_DIR_INGRESS)
265                 return 0;
266
267         mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
268                             true, 0, 0);
269         return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
270                                      bulk_list, NULL, 0);
271 }
272
273 static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core,
274                                         char *sbpm_pl, size_t sbpm_pl_len,
275                                         unsigned long cb_priv)
276 {
277         struct mlxsw_sp_sb_pm *pm = (struct mlxsw_sp_sb_pm *) cb_priv;
278
279         mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max);
280 }
281
282 static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
283                                     u16 pool_index, struct list_head *bulk_list)
284 {
285         const struct mlxsw_sp_sb_pool_des *des =
286                 &mlxsw_sp->sb_vals->pool_dess[pool_index];
287         char sbpm_pl[MLXSW_REG_SBPM_LEN];
288         struct mlxsw_sp_sb_pm *pm;
289
290         if (local_port == MLXSW_PORT_CPU_PORT &&
291             des->dir == MLXSW_REG_SBXX_DIR_INGRESS)
292                 return 0;
293
294         pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
295         mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
296                             false, 0, 0);
297         return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
298                                      bulk_list,
299                                      mlxsw_sp_sb_pm_occ_query_cb,
300                                      (unsigned long) pm);
301 }
302
303 void mlxsw_sp_hdroom_prios_reset_buf_idx(struct mlxsw_sp_hdroom *hdroom)
304 {
305         int prio;
306
307         for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
308                 switch (hdroom->mode) {
309                 case MLXSW_SP_HDROOM_MODE_DCB:
310                         hdroom->prios.prio[prio].buf_idx = hdroom->prios.prio[prio].ets_buf_idx;
311                         break;
312                 case MLXSW_SP_HDROOM_MODE_TC:
313                         hdroom->prios.prio[prio].buf_idx = hdroom->prios.prio[prio].set_buf_idx;
314                         break;
315                 }
316         }
317 }
318
319 void mlxsw_sp_hdroom_bufs_reset_lossiness(struct mlxsw_sp_hdroom *hdroom)
320 {
321         int prio;
322         int i;
323
324         for (i = 0; i < DCBX_MAX_BUFFERS; i++)
325                 hdroom->bufs.buf[i].lossy = true;
326
327         for (prio = 0; prio < IEEE_8021Q_MAX_PRIORITIES; prio++) {
328                 if (!hdroom->prios.prio[prio].lossy)
329                         hdroom->bufs.buf[hdroom->prios.prio[prio].buf_idx].lossy = false;
330         }
331 }
332
333 static u16 mlxsw_sp_hdroom_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, int mtu)
334 {
335         return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu);
336 }
337
338 static void mlxsw_sp_hdroom_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, bool lossy)
339 {
340         if (lossy)
341                 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size);
342         else
343                 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size,
344                                                     thres);
345 }
346
347 static u16 mlxsw_sp_hdroom_buf_delay_get(const struct mlxsw_sp *mlxsw_sp,
348                                          const struct mlxsw_sp_hdroom *hdroom)
349 {
350         u16 delay_cells;
351
352         delay_cells = mlxsw_sp_bytes_cells(mlxsw_sp, hdroom->delay_bytes);
353
354         /* In the worst case scenario the delay will be made up of packets that
355          * are all of size CELL_SIZE + 1, which means each packet will require
356          * almost twice its true size when buffered in the switch. We therefore
357          * multiply this value by the "cell factor", which is close to 2.
358          *
359          * Another MTU is added in case the transmitting host already started
360          * transmitting a maximum length frame when the PFC packet was received.
361          */
362         return 2 * delay_cells + mlxsw_sp_bytes_cells(mlxsw_sp, hdroom->mtu);
363 }
364
365 static u32 mlxsw_sp_hdroom_int_buf_size_get(struct mlxsw_sp *mlxsw_sp, int mtu, u32 speed)
366 {
367         u32 buffsize = mlxsw_sp->sb_ops->int_buf_size_get(speed, mtu);
368
369         return mlxsw_sp_bytes_cells(mlxsw_sp, buffsize) + 1;
370 }
371
372 static bool mlxsw_sp_hdroom_buf_is_used(const struct mlxsw_sp_hdroom *hdroom, int buf)
373 {
374         int prio;
375
376         for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
377                 if (hdroom->prios.prio[prio].buf_idx == buf)
378                         return true;
379         }
380         return false;
381 }
382
383 void mlxsw_sp_hdroom_bufs_reset_sizes(struct mlxsw_sp_port *mlxsw_sp_port,
384                                       struct mlxsw_sp_hdroom *hdroom)
385 {
386         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
387         u16 reserve_cells;
388         int i;
389
390         /* Internal buffer. */
391         reserve_cells = mlxsw_sp_hdroom_int_buf_size_get(mlxsw_sp, mlxsw_sp_port->max_speed,
392                                                          mlxsw_sp_port->max_mtu);
393         reserve_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, reserve_cells);
394         hdroom->int_buf.reserve_cells = reserve_cells;
395
396         if (hdroom->int_buf.enable)
397                 hdroom->int_buf.size_cells = reserve_cells;
398         else
399                 hdroom->int_buf.size_cells = 0;
400
401         /* PG buffers. */
402         for (i = 0; i < DCBX_MAX_BUFFERS; i++) {
403                 struct mlxsw_sp_hdroom_buf *buf = &hdroom->bufs.buf[i];
404                 u16 thres_cells;
405                 u16 delay_cells;
406
407                 if (!mlxsw_sp_hdroom_buf_is_used(hdroom, i)) {
408                         thres_cells = 0;
409                         delay_cells = 0;
410                 } else if (buf->lossy) {
411                         thres_cells = mlxsw_sp_hdroom_buf_threshold_get(mlxsw_sp, hdroom->mtu);
412                         delay_cells = 0;
413                 } else {
414                         thres_cells = mlxsw_sp_hdroom_buf_threshold_get(mlxsw_sp, hdroom->mtu);
415                         delay_cells = mlxsw_sp_hdroom_buf_delay_get(mlxsw_sp, hdroom);
416                 }
417
418                 thres_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, thres_cells);
419                 delay_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, delay_cells);
420
421                 buf->thres_cells = thres_cells;
422                 if (hdroom->mode == MLXSW_SP_HDROOM_MODE_DCB) {
423                         buf->size_cells = thres_cells + delay_cells;
424                 } else {
425                         /* Do not allow going below the minimum size, even if
426                          * the user requested it.
427                          */
428                         buf->size_cells = max(buf->set_size_cells, buf->thres_cells);
429                 }
430         }
431 }
432
433 #define MLXSW_SP_PB_UNUSED 8
434
435 static int mlxsw_sp_hdroom_configure_buffers(struct mlxsw_sp_port *mlxsw_sp_port,
436                                              const struct mlxsw_sp_hdroom *hdroom, bool force)
437 {
438         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
439         char pbmc_pl[MLXSW_REG_PBMC_LEN];
440         bool dirty;
441         int err;
442         int i;
443
444         dirty = memcmp(&mlxsw_sp_port->hdroom->bufs, &hdroom->bufs, sizeof(hdroom->bufs));
445         if (!dirty && !force)
446                 return 0;
447
448         mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0xffff, 0xffff / 2);
449         for (i = 0; i < MLXSW_SP_PB_COUNT; i++) {
450                 const struct mlxsw_sp_hdroom_buf *buf = &hdroom->bufs.buf[i];
451
452                 if (i == MLXSW_SP_PB_UNUSED)
453                         continue;
454
455                 mlxsw_sp_hdroom_buf_pack(pbmc_pl, i, buf->size_cells, buf->thres_cells, buf->lossy);
456         }
457
458         mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
459         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
460         if (err)
461                 return err;
462
463         mlxsw_sp_port->hdroom->bufs = hdroom->bufs;
464         return 0;
465 }
466
467 static int mlxsw_sp_hdroom_configure_priomap(struct mlxsw_sp_port *mlxsw_sp_port,
468                                              const struct mlxsw_sp_hdroom *hdroom, bool force)
469 {
470         char pptb_pl[MLXSW_REG_PPTB_LEN];
471         bool dirty;
472         int prio;
473         int err;
474
475         dirty = memcmp(&mlxsw_sp_port->hdroom->prios, &hdroom->prios, sizeof(hdroom->prios));
476         if (!dirty && !force)
477                 return 0;
478
479         mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
480         for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
481                 mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, prio, hdroom->prios.prio[prio].buf_idx);
482
483         err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb), pptb_pl);
484         if (err)
485                 return err;
486
487         mlxsw_sp_port->hdroom->prios = hdroom->prios;
488         return 0;
489 }
490
491 static int mlxsw_sp_hdroom_configure_int_buf(struct mlxsw_sp_port *mlxsw_sp_port,
492                                              const struct mlxsw_sp_hdroom *hdroom, bool force)
493 {
494         char sbib_pl[MLXSW_REG_SBIB_LEN];
495         bool dirty;
496         int err;
497
498         dirty = memcmp(&mlxsw_sp_port->hdroom->int_buf, &hdroom->int_buf, sizeof(hdroom->int_buf));
499         if (!dirty && !force)
500                 return 0;
501
502         mlxsw_reg_sbib_pack(sbib_pl, mlxsw_sp_port->local_port, hdroom->int_buf.size_cells);
503         err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
504         if (err)
505                 return err;
506
507         mlxsw_sp_port->hdroom->int_buf = hdroom->int_buf;
508         return 0;
509 }
510
511 static bool mlxsw_sp_hdroom_bufs_fit(struct mlxsw_sp *mlxsw_sp,
512                                      const struct mlxsw_sp_hdroom *hdroom)
513 {
514         u32 taken_headroom_cells = 0;
515         int i;
516
517         for (i = 0; i < MLXSW_SP_PB_COUNT; i++)
518                 taken_headroom_cells += hdroom->bufs.buf[i].size_cells;
519
520         taken_headroom_cells += hdroom->int_buf.reserve_cells;
521         return taken_headroom_cells <= mlxsw_sp->sb->max_headroom_cells;
522 }
523
524 static int __mlxsw_sp_hdroom_configure(struct mlxsw_sp_port *mlxsw_sp_port,
525                                        const struct mlxsw_sp_hdroom *hdroom, bool force)
526 {
527         struct mlxsw_sp_hdroom orig_hdroom;
528         struct mlxsw_sp_hdroom tmp_hdroom;
529         int err;
530         int i;
531
532         /* Port buffers need to be configured in three steps. First, all buffers
533          * with non-zero size are configured. Then, prio-to-buffer map is
534          * updated, allowing traffic to flow to the now non-zero buffers.
535          * Finally, zero-sized buffers are configured, because now no traffic
536          * should be directed to them anymore. This way, in a non-congested
537          * system, no packet drops are introduced by the reconfiguration.
538          */
539
540         orig_hdroom = *mlxsw_sp_port->hdroom;
541         tmp_hdroom = orig_hdroom;
542         for (i = 0; i < MLXSW_SP_PB_COUNT; i++) {
543                 if (hdroom->bufs.buf[i].size_cells)
544                         tmp_hdroom.bufs.buf[i] = hdroom->bufs.buf[i];
545         }
546
547         if (!mlxsw_sp_hdroom_bufs_fit(mlxsw_sp_port->mlxsw_sp, &tmp_hdroom) ||
548             !mlxsw_sp_hdroom_bufs_fit(mlxsw_sp_port->mlxsw_sp, hdroom))
549                 return -ENOBUFS;
550
551         err = mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, &tmp_hdroom, force);
552         if (err)
553                 return err;
554
555         err = mlxsw_sp_hdroom_configure_priomap(mlxsw_sp_port, hdroom, force);
556         if (err)
557                 goto err_configure_priomap;
558
559         err = mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, hdroom, false);
560         if (err)
561                 goto err_configure_buffers;
562
563         err = mlxsw_sp_hdroom_configure_int_buf(mlxsw_sp_port, hdroom, false);
564         if (err)
565                 goto err_configure_int_buf;
566
567         *mlxsw_sp_port->hdroom = *hdroom;
568         return 0;
569
570 err_configure_int_buf:
571         mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, &tmp_hdroom, false);
572 err_configure_buffers:
573         mlxsw_sp_hdroom_configure_priomap(mlxsw_sp_port, &tmp_hdroom, false);
574 err_configure_priomap:
575         mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, &orig_hdroom, false);
576         return err;
577 }
578
579 int mlxsw_sp_hdroom_configure(struct mlxsw_sp_port *mlxsw_sp_port,
580                               const struct mlxsw_sp_hdroom *hdroom)
581 {
582         return __mlxsw_sp_hdroom_configure(mlxsw_sp_port, hdroom, false);
583 }
584
585 static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
586 {
587         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
588         struct mlxsw_sp_hdroom hdroom = {};
589         u32 size9;
590         int prio;
591
592         hdroom.mtu = mlxsw_sp_port->dev->mtu;
593         hdroom.mode = MLXSW_SP_HDROOM_MODE_DCB;
594         for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
595                 hdroom.prios.prio[prio].lossy = true;
596
597         mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
598         mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
599
600         /* Buffer 9 is used for control traffic. */
601         size9 = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, mlxsw_sp_port->max_mtu);
602         hdroom.bufs.buf[9].size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size9);
603
604         return __mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom, true);
605 }
606
607 static int mlxsw_sp_sb_port_init(struct mlxsw_sp *mlxsw_sp,
608                                  struct mlxsw_sp_sb_port *sb_port)
609 {
610         struct mlxsw_sp_sb_pm *pms;
611
612         pms = kcalloc(mlxsw_sp->sb_vals->pool_count, sizeof(*pms),
613                       GFP_KERNEL);
614         if (!pms)
615                 return -ENOMEM;
616         sb_port->pms = pms;
617         return 0;
618 }
619
620 static void mlxsw_sp_sb_port_fini(struct mlxsw_sp_sb_port *sb_port)
621 {
622         kfree(sb_port->pms);
623 }
624
625 static int mlxsw_sp_sb_ports_init(struct mlxsw_sp *mlxsw_sp)
626 {
627         unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
628         struct mlxsw_sp_sb_pr *prs;
629         int i;
630         int err;
631
632         mlxsw_sp->sb->ports = kcalloc(max_ports,
633                                       sizeof(struct mlxsw_sp_sb_port),
634                                       GFP_KERNEL);
635         if (!mlxsw_sp->sb->ports)
636                 return -ENOMEM;
637
638         prs = kcalloc(mlxsw_sp->sb_vals->pool_count, sizeof(*prs),
639                       GFP_KERNEL);
640         if (!prs) {
641                 err = -ENOMEM;
642                 goto err_alloc_prs;
643         }
644         mlxsw_sp->sb->prs = prs;
645
646         for (i = 0; i < max_ports; i++) {
647                 err = mlxsw_sp_sb_port_init(mlxsw_sp, &mlxsw_sp->sb->ports[i]);
648                 if (err)
649                         goto err_sb_port_init;
650         }
651
652         return 0;
653
654 err_sb_port_init:
655         for (i--; i >= 0; i--)
656                 mlxsw_sp_sb_port_fini(&mlxsw_sp->sb->ports[i]);
657         kfree(mlxsw_sp->sb->prs);
658 err_alloc_prs:
659         kfree(mlxsw_sp->sb->ports);
660         return err;
661 }
662
663 static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp)
664 {
665         int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
666         int i;
667
668         for (i = max_ports - 1; i >= 0; i--)
669                 mlxsw_sp_sb_port_fini(&mlxsw_sp->sb->ports[i]);
670         kfree(mlxsw_sp->sb->prs);
671         kfree(mlxsw_sp->sb->ports);
672 }
673
674 #define MLXSW_SP_SB_PR(_mode, _size)    \
675         {                               \
676                 .mode = _mode,          \
677                 .size = _size,          \
678         }
679
680 #define MLXSW_SP_SB_PR_EXT(_mode, _size, _freeze_mode, _freeze_size)    \
681         {                                                               \
682                 .mode = _mode,                                          \
683                 .size = _size,                                          \
684                 .freeze_mode = _freeze_mode,                            \
685                 .freeze_size = _freeze_size,                            \
686         }
687
688 #define MLXSW_SP1_SB_PR_CPU_SIZE        (256 * 1000)
689
690 /* Order according to mlxsw_sp1_sb_pool_dess */
691 static const struct mlxsw_sp_sb_pr mlxsw_sp1_sb_prs[] = {
692         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST),
693         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
694         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
695         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
696         MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST,
697                            true, false),
698         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
699         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
700         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
701         MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI,
702                            true, true),
703         MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
704                            MLXSW_SP1_SB_PR_CPU_SIZE, true, false),
705         MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
706                            MLXSW_SP1_SB_PR_CPU_SIZE, true, false),
707 };
708
709 #define MLXSW_SP2_SB_PR_CPU_SIZE        (256 * 1000)
710
711 /* Order according to mlxsw_sp2_sb_pool_dess */
712 static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = {
713         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST),
714         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
715         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
716         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
717         MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST,
718                            true, false),
719         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
720         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
721         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
722         MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI,
723                            true, true),
724         MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
725                            MLXSW_SP2_SB_PR_CPU_SIZE, true, false),
726         MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
727                            MLXSW_SP2_SB_PR_CPU_SIZE, true, false),
728 };
729
730 static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
731                                 const struct mlxsw_sp_sb_pr *prs,
732                                 const struct mlxsw_sp_sb_pool_des *pool_dess,
733                                 size_t prs_len)
734 {
735         /* Round down, unlike mlxsw_sp_bytes_cells(). */
736         u32 sb_cells = div_u64(mlxsw_sp->sb->sb_size, mlxsw_sp->sb->cell_size);
737         u32 rest_cells[2] = {sb_cells, sb_cells};
738         int i;
739         int err;
740
741         /* Calculate how much space to give to the "REST" pools in either
742          * direction.
743          */
744         for (i = 0; i < prs_len; i++) {
745                 enum mlxsw_reg_sbxx_dir dir = pool_dess[i].dir;
746                 u32 size = prs[i].size;
747                 u32 size_cells;
748
749                 if (size == MLXSW_SP_SB_INFI || size == MLXSW_SP_SB_REST)
750                         continue;
751
752                 size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size);
753                 if (WARN_ON_ONCE(size_cells > rest_cells[dir]))
754                         continue;
755
756                 rest_cells[dir] -= size_cells;
757         }
758
759         for (i = 0; i < prs_len; i++) {
760                 u32 size = prs[i].size;
761                 u32 size_cells;
762
763                 if (size == MLXSW_SP_SB_INFI) {
764                         err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
765                                                    0, true);
766                 } else if (size == MLXSW_SP_SB_REST) {
767                         size_cells = rest_cells[pool_dess[i].dir];
768                         err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
769                                                    size_cells, false);
770                 } else {
771                         size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size);
772                         err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
773                                                    size_cells, false);
774                 }
775                 if (err)
776                         return err;
777         }
778         return 0;
779 }
780
781 #define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool)     \
782         {                                               \
783                 .min_buff = _min_buff,                  \
784                 .max_buff = _max_buff,                  \
785                 .pool_index = _pool,                    \
786         }
787
788 #define MLXSW_SP_SB_CM_ING(_min_buff, _max_buff)        \
789         {                                               \
790                 .min_buff = _min_buff,                  \
791                 .max_buff = _max_buff,                  \
792                 .pool_index = MLXSW_SP_SB_POOL_ING,     \
793         }
794
795 #define MLXSW_SP_SB_CM_EGR(_min_buff, _max_buff)        \
796         {                                               \
797                 .min_buff = _min_buff,                  \
798                 .max_buff = _max_buff,                  \
799                 .pool_index = MLXSW_SP_SB_POOL_EGR,     \
800         }
801
802 #define MLXSW_SP_SB_CM_EGR_MC(_min_buff, _max_buff)     \
803         {                                               \
804                 .min_buff = _min_buff,                  \
805                 .max_buff = _max_buff,                  \
806                 .pool_index = MLXSW_SP_SB_POOL_EGR_MC,  \
807                 .freeze_pool = true,                    \
808                 .freeze_thresh = true,                  \
809         }
810
811 static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_ingress[] = {
812         MLXSW_SP_SB_CM_ING(10000, 8),
813         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
814         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
815         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
816         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
817         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
818         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
819         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
820         MLXSW_SP_SB_CM_ING(0, 0), /* dummy, this PG does not exist */
821         MLXSW_SP_SB_CM(10000, 8, MLXSW_SP_SB_POOL_ING_CPU),
822 };
823
824 static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_ingress[] = {
825         MLXSW_SP_SB_CM_ING(0, 7),
826         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
827         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
828         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
829         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
830         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
831         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
832         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
833         MLXSW_SP_SB_CM_ING(0, 0), /* dummy, this PG does not exist */
834         MLXSW_SP_SB_CM(10000, 8, MLXSW_SP_SB_POOL_ING_CPU),
835 };
836
837 static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_egress[] = {
838         MLXSW_SP_SB_CM_EGR(1500, 9),
839         MLXSW_SP_SB_CM_EGR(1500, 9),
840         MLXSW_SP_SB_CM_EGR(1500, 9),
841         MLXSW_SP_SB_CM_EGR(1500, 9),
842         MLXSW_SP_SB_CM_EGR(1500, 9),
843         MLXSW_SP_SB_CM_EGR(1500, 9),
844         MLXSW_SP_SB_CM_EGR(1500, 9),
845         MLXSW_SP_SB_CM_EGR(1500, 9),
846         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
847         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
848         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
849         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
850         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
851         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
852         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
853         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
854         MLXSW_SP_SB_CM_EGR(1, 0xff),
855 };
856
857 static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_egress[] = {
858         MLXSW_SP_SB_CM_EGR(0, 7),
859         MLXSW_SP_SB_CM_EGR(0, 7),
860         MLXSW_SP_SB_CM_EGR(0, 7),
861         MLXSW_SP_SB_CM_EGR(0, 7),
862         MLXSW_SP_SB_CM_EGR(0, 7),
863         MLXSW_SP_SB_CM_EGR(0, 7),
864         MLXSW_SP_SB_CM_EGR(0, 7),
865         MLXSW_SP_SB_CM_EGR(0, 7),
866         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
867         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
868         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
869         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
870         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
871         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
872         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
873         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
874         MLXSW_SP_SB_CM_EGR(1, 0xff),
875 };
876
877 #define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, MLXSW_SP_SB_POOL_EGR_CPU)
878
879 static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
880         MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
881         MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
882         MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
883         MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
884         MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
885         MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
886         MLXSW_SP_CPU_PORT_SB_CM,
887         MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
888         MLXSW_SP_CPU_PORT_SB_CM,
889         MLXSW_SP_CPU_PORT_SB_CM,
890         MLXSW_SP_CPU_PORT_SB_CM,
891         MLXSW_SP_CPU_PORT_SB_CM,
892         MLXSW_SP_CPU_PORT_SB_CM,
893         MLXSW_SP_CPU_PORT_SB_CM,
894         MLXSW_SP_CPU_PORT_SB_CM,
895         MLXSW_SP_CPU_PORT_SB_CM,
896         MLXSW_SP_CPU_PORT_SB_CM,
897         MLXSW_SP_CPU_PORT_SB_CM,
898         MLXSW_SP_CPU_PORT_SB_CM,
899         MLXSW_SP_CPU_PORT_SB_CM,
900         MLXSW_SP_CPU_PORT_SB_CM,
901         MLXSW_SP_CPU_PORT_SB_CM,
902         MLXSW_SP_CPU_PORT_SB_CM,
903         MLXSW_SP_CPU_PORT_SB_CM,
904         MLXSW_SP_CPU_PORT_SB_CM,
905         MLXSW_SP_CPU_PORT_SB_CM,
906         MLXSW_SP_CPU_PORT_SB_CM,
907         MLXSW_SP_CPU_PORT_SB_CM,
908         MLXSW_SP_CPU_PORT_SB_CM,
909         MLXSW_SP_CPU_PORT_SB_CM,
910         MLXSW_SP_CPU_PORT_SB_CM,
911         MLXSW_SP_CPU_PORT_SB_CM,
912 };
913
914 static bool
915 mlxsw_sp_sb_pool_is_static(struct mlxsw_sp *mlxsw_sp, u16 pool_index)
916 {
917         struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
918
919         return pr->mode == MLXSW_REG_SBPR_MODE_STATIC;
920 }
921
922 static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
923                                   enum mlxsw_reg_sbxx_dir dir,
924                                   const struct mlxsw_sp_sb_cm *cms,
925                                   size_t cms_len)
926 {
927         const struct mlxsw_sp_sb_vals *sb_vals = mlxsw_sp->sb_vals;
928         int i;
929         int err;
930
931         for (i = 0; i < cms_len; i++) {
932                 const struct mlxsw_sp_sb_cm *cm;
933                 u32 min_buff;
934                 u32 max_buff;
935
936                 if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS)
937                         continue; /* PG number 8 does not exist, skip it */
938                 cm = &cms[i];
939                 if (WARN_ON(sb_vals->pool_dess[cm->pool_index].dir != dir))
940                         continue;
941
942                 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, cm->min_buff);
943                 max_buff = cm->max_buff;
944                 if (max_buff == MLXSW_SP_SB_INFI) {
945                         err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
946                                                    min_buff, 0,
947                                                    true, cm->pool_index);
948                 } else {
949                         if (mlxsw_sp_sb_pool_is_static(mlxsw_sp,
950                                                        cm->pool_index))
951                                 max_buff = mlxsw_sp_bytes_cells(mlxsw_sp,
952                                                                 max_buff);
953                         err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
954                                                    min_buff, max_buff,
955                                                    false, cm->pool_index);
956                 }
957                 if (err)
958                         return err;
959         }
960         return 0;
961 }
962
963 static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port)
964 {
965         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
966         int err;
967
968         err = __mlxsw_sp_sb_cms_init(mlxsw_sp,
969                                      mlxsw_sp_port->local_port,
970                                      MLXSW_REG_SBXX_DIR_INGRESS,
971                                      mlxsw_sp->sb_vals->cms_ingress,
972                                      mlxsw_sp->sb_vals->cms_ingress_count);
973         if (err)
974                 return err;
975         return __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
976                                       mlxsw_sp_port->local_port,
977                                       MLXSW_REG_SBXX_DIR_EGRESS,
978                                       mlxsw_sp->sb_vals->cms_egress,
979                                       mlxsw_sp->sb_vals->cms_egress_count);
980 }
981
982 static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
983 {
984         return __mlxsw_sp_sb_cms_init(mlxsw_sp, 0, MLXSW_REG_SBXX_DIR_EGRESS,
985                                       mlxsw_sp->sb_vals->cms_cpu,
986                                       mlxsw_sp->sb_vals->cms_cpu_count);
987 }
988
989 #define MLXSW_SP_SB_PM(_min_buff, _max_buff)    \
990         {                                       \
991                 .min_buff = _min_buff,          \
992                 .max_buff = _max_buff,          \
993         }
994
995 /* Order according to mlxsw_sp1_sb_pool_dess */
996 static const struct mlxsw_sp_sb_pm mlxsw_sp1_sb_pms[] = {
997         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
998         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
999         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
1000         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
1001         MLXSW_SP_SB_PM(0, 7),
1002         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
1003         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
1004         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
1005         MLXSW_SP_SB_PM(10000, 90000),
1006         MLXSW_SP_SB_PM(0, 8),   /* 50% occupancy */
1007         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
1008 };
1009
1010 /* Order according to mlxsw_sp2_sb_pool_dess */
1011 static const struct mlxsw_sp_sb_pm mlxsw_sp2_sb_pms[] = {
1012         MLXSW_SP_SB_PM(0, 7),
1013         MLXSW_SP_SB_PM(0, 0),
1014         MLXSW_SP_SB_PM(0, 0),
1015         MLXSW_SP_SB_PM(0, 0),
1016         MLXSW_SP_SB_PM(0, 7),
1017         MLXSW_SP_SB_PM(0, 0),
1018         MLXSW_SP_SB_PM(0, 0),
1019         MLXSW_SP_SB_PM(0, 0),
1020         MLXSW_SP_SB_PM(10000, 90000),
1021         MLXSW_SP_SB_PM(0, 8),   /* 50% occupancy */
1022         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
1023 };
1024
1025 /* Order according to mlxsw_sp*_sb_pool_dess */
1026 static const struct mlxsw_sp_sb_pm mlxsw_sp_cpu_port_sb_pms[] = {
1027         MLXSW_SP_SB_PM(0, 0),
1028         MLXSW_SP_SB_PM(0, 0),
1029         MLXSW_SP_SB_PM(0, 0),
1030         MLXSW_SP_SB_PM(0, 0),
1031         MLXSW_SP_SB_PM(0, 0),
1032         MLXSW_SP_SB_PM(0, 0),
1033         MLXSW_SP_SB_PM(0, 0),
1034         MLXSW_SP_SB_PM(0, 0),
1035         MLXSW_SP_SB_PM(0, 90000),
1036         MLXSW_SP_SB_PM(0, 0),
1037         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
1038 };
1039
1040 static int mlxsw_sp_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1041                                 const struct mlxsw_sp_sb_pm *pms,
1042                                 bool skip_ingress)
1043 {
1044         int i, err;
1045
1046         for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
1047                 const struct mlxsw_sp_sb_pm *pm = &pms[i];
1048                 const struct mlxsw_sp_sb_pool_des *des;
1049                 u32 max_buff;
1050                 u32 min_buff;
1051
1052                 des = &mlxsw_sp->sb_vals->pool_dess[i];
1053                 if (skip_ingress && des->dir == MLXSW_REG_SBXX_DIR_INGRESS)
1054                         continue;
1055
1056                 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, pm->min_buff);
1057                 max_buff = pm->max_buff;
1058                 if (mlxsw_sp_sb_pool_is_static(mlxsw_sp, i))
1059                         max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, max_buff);
1060                 err = mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, i, min_buff,
1061                                            max_buff);
1062                 if (err)
1063                         return err;
1064         }
1065         return 0;
1066 }
1067
1068 static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
1069 {
1070         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1071
1072         return mlxsw_sp_sb_pms_init(mlxsw_sp, mlxsw_sp_port->local_port,
1073                                     mlxsw_sp->sb_vals->pms, false);
1074 }
1075
1076 static int mlxsw_sp_cpu_port_sb_pms_init(struct mlxsw_sp *mlxsw_sp)
1077 {
1078         return mlxsw_sp_sb_pms_init(mlxsw_sp, 0, mlxsw_sp->sb_vals->pms_cpu,
1079                                     true);
1080 }
1081
1082 #define MLXSW_SP_SB_MM(_min_buff, _max_buff)            \
1083         {                                               \
1084                 .min_buff = _min_buff,                  \
1085                 .max_buff = _max_buff,                  \
1086                 .pool_index = MLXSW_SP_SB_POOL_EGR,     \
1087         }
1088
1089 static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
1090         MLXSW_SP_SB_MM(0, 6),
1091         MLXSW_SP_SB_MM(0, 6),
1092         MLXSW_SP_SB_MM(0, 6),
1093         MLXSW_SP_SB_MM(0, 6),
1094         MLXSW_SP_SB_MM(0, 6),
1095         MLXSW_SP_SB_MM(0, 6),
1096         MLXSW_SP_SB_MM(0, 6),
1097         MLXSW_SP_SB_MM(0, 6),
1098         MLXSW_SP_SB_MM(0, 6),
1099         MLXSW_SP_SB_MM(0, 6),
1100         MLXSW_SP_SB_MM(0, 6),
1101         MLXSW_SP_SB_MM(0, 6),
1102         MLXSW_SP_SB_MM(0, 6),
1103         MLXSW_SP_SB_MM(0, 6),
1104         MLXSW_SP_SB_MM(0, 6),
1105 };
1106
1107 static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
1108 {
1109         char sbmm_pl[MLXSW_REG_SBMM_LEN];
1110         int i;
1111         int err;
1112
1113         for (i = 0; i < mlxsw_sp->sb_vals->mms_count; i++) {
1114                 const struct mlxsw_sp_sb_pool_des *des;
1115                 const struct mlxsw_sp_sb_mm *mc;
1116                 u32 min_buff;
1117
1118                 mc = &mlxsw_sp->sb_vals->mms[i];
1119                 des = &mlxsw_sp->sb_vals->pool_dess[mc->pool_index];
1120                 /* All pools used by sb_mm's are initialized using dynamic
1121                  * thresholds, therefore 'max_buff' isn't specified in cells.
1122                  */
1123                 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, mc->min_buff);
1124                 mlxsw_reg_sbmm_pack(sbmm_pl, i, min_buff, mc->max_buff,
1125                                     des->pool);
1126                 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
1127                 if (err)
1128                         return err;
1129         }
1130         return 0;
1131 }
1132
1133 static void mlxsw_sp_pool_count(struct mlxsw_sp *mlxsw_sp,
1134                                 u16 *p_ingress_len, u16 *p_egress_len)
1135 {
1136         int i;
1137
1138         for (i = 0; i < mlxsw_sp->sb_vals->pool_count; ++i) {
1139                 if (mlxsw_sp->sb_vals->pool_dess[i].dir ==
1140                     MLXSW_REG_SBXX_DIR_INGRESS)
1141                         (*p_ingress_len)++;
1142                 else
1143                         (*p_egress_len)++;
1144         }
1145
1146         WARN(*p_egress_len == 0, "No egress pools\n");
1147 }
1148
1149 const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals = {
1150         .pool_count = ARRAY_SIZE(mlxsw_sp1_sb_pool_dess),
1151         .pool_dess = mlxsw_sp1_sb_pool_dess,
1152         .pms = mlxsw_sp1_sb_pms,
1153         .pms_cpu = mlxsw_sp_cpu_port_sb_pms,
1154         .prs = mlxsw_sp1_sb_prs,
1155         .mms = mlxsw_sp_sb_mms,
1156         .cms_ingress = mlxsw_sp1_sb_cms_ingress,
1157         .cms_egress = mlxsw_sp1_sb_cms_egress,
1158         .cms_cpu = mlxsw_sp_cpu_port_sb_cms,
1159         .mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms),
1160         .cms_ingress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_ingress),
1161         .cms_egress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_egress),
1162         .cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms),
1163 };
1164
1165 const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals = {
1166         .pool_count = ARRAY_SIZE(mlxsw_sp2_sb_pool_dess),
1167         .pool_dess = mlxsw_sp2_sb_pool_dess,
1168         .pms = mlxsw_sp2_sb_pms,
1169         .pms_cpu = mlxsw_sp_cpu_port_sb_pms,
1170         .prs = mlxsw_sp2_sb_prs,
1171         .mms = mlxsw_sp_sb_mms,
1172         .cms_ingress = mlxsw_sp2_sb_cms_ingress,
1173         .cms_egress = mlxsw_sp2_sb_cms_egress,
1174         .cms_cpu = mlxsw_sp_cpu_port_sb_cms,
1175         .mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms),
1176         .cms_ingress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_ingress),
1177         .cms_egress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_egress),
1178         .cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms),
1179 };
1180
1181 static u32 mlxsw_sp1_pb_int_buf_size_get(int mtu, u32 speed)
1182 {
1183         return mtu * 5 / 2;
1184 }
1185
1186 static u32 __mlxsw_sp_pb_int_buf_size_get(int mtu, u32 speed, u32 buffer_factor)
1187 {
1188         return 3 * mtu + buffer_factor * speed / 1000;
1189 }
1190
1191 #define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38
1192
1193 static u32 mlxsw_sp2_pb_int_buf_size_get(int mtu, u32 speed)
1194 {
1195         int factor = MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR;
1196
1197         return __mlxsw_sp_pb_int_buf_size_get(mtu, speed, factor);
1198 }
1199
1200 #define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50
1201
1202 static u32 mlxsw_sp3_pb_int_buf_size_get(int mtu, u32 speed)
1203 {
1204         int factor = MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR;
1205
1206         return __mlxsw_sp_pb_int_buf_size_get(mtu, speed, factor);
1207 }
1208
1209 const struct mlxsw_sp_sb_ops mlxsw_sp1_sb_ops = {
1210         .int_buf_size_get = mlxsw_sp1_pb_int_buf_size_get,
1211 };
1212
1213 const struct mlxsw_sp_sb_ops mlxsw_sp2_sb_ops = {
1214         .int_buf_size_get = mlxsw_sp2_pb_int_buf_size_get,
1215 };
1216
1217 const struct mlxsw_sp_sb_ops mlxsw_sp3_sb_ops = {
1218         .int_buf_size_get = mlxsw_sp3_pb_int_buf_size_get,
1219 };
1220
1221 int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
1222 {
1223         u32 max_headroom_size;
1224         u16 ing_pool_count = 0;
1225         u16 eg_pool_count = 0;
1226         int err;
1227
1228         if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE))
1229                 return -EIO;
1230
1231         if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, GUARANTEED_SHARED_BUFFER))
1232                 return -EIO;
1233
1234         if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_HEADROOM_SIZE))
1235                 return -EIO;
1236
1237         mlxsw_sp->sb = kzalloc(sizeof(*mlxsw_sp->sb), GFP_KERNEL);
1238         if (!mlxsw_sp->sb)
1239                 return -ENOMEM;
1240         mlxsw_sp->sb->cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE);
1241         mlxsw_sp->sb->sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1242                                                    GUARANTEED_SHARED_BUFFER);
1243         max_headroom_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1244                                                MAX_HEADROOM_SIZE);
1245         /* Round down, because this limit must not be overstepped. */
1246         mlxsw_sp->sb->max_headroom_cells = max_headroom_size /
1247                                                 mlxsw_sp->sb->cell_size;
1248
1249         err = mlxsw_sp_sb_ports_init(mlxsw_sp);
1250         if (err)
1251                 goto err_sb_ports_init;
1252         err = mlxsw_sp_sb_prs_init(mlxsw_sp, mlxsw_sp->sb_vals->prs,
1253                                    mlxsw_sp->sb_vals->pool_dess,
1254                                    mlxsw_sp->sb_vals->pool_count);
1255         if (err)
1256                 goto err_sb_prs_init;
1257         err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
1258         if (err)
1259                 goto err_sb_cpu_port_sb_cms_init;
1260         err = mlxsw_sp_cpu_port_sb_pms_init(mlxsw_sp);
1261         if (err)
1262                 goto err_sb_cpu_port_pms_init;
1263         err = mlxsw_sp_sb_mms_init(mlxsw_sp);
1264         if (err)
1265                 goto err_sb_mms_init;
1266         mlxsw_sp_pool_count(mlxsw_sp, &ing_pool_count, &eg_pool_count);
1267         err = devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0,
1268                                   mlxsw_sp->sb->sb_size,
1269                                   ing_pool_count,
1270                                   eg_pool_count,
1271                                   MLXSW_SP_SB_ING_TC_COUNT,
1272                                   MLXSW_SP_SB_EG_TC_COUNT);
1273         if (err)
1274                 goto err_devlink_sb_register;
1275
1276         return 0;
1277
1278 err_devlink_sb_register:
1279 err_sb_mms_init:
1280 err_sb_cpu_port_pms_init:
1281 err_sb_cpu_port_sb_cms_init:
1282 err_sb_prs_init:
1283         mlxsw_sp_sb_ports_fini(mlxsw_sp);
1284 err_sb_ports_init:
1285         kfree(mlxsw_sp->sb);
1286         return err;
1287 }
1288
1289 void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
1290 {
1291         devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
1292         mlxsw_sp_sb_ports_fini(mlxsw_sp);
1293         kfree(mlxsw_sp->sb);
1294 }
1295
1296 int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
1297 {
1298         int err;
1299
1300         mlxsw_sp_port->hdroom = kzalloc(sizeof(*mlxsw_sp_port->hdroom), GFP_KERNEL);
1301         if (!mlxsw_sp_port->hdroom)
1302                 return -ENOMEM;
1303         mlxsw_sp_port->hdroom->mtu = mlxsw_sp_port->dev->mtu;
1304
1305         err = mlxsw_sp_port_headroom_init(mlxsw_sp_port);
1306         if (err)
1307                 goto err_headroom_init;
1308         err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port);
1309         if (err)
1310                 goto err_port_sb_cms_init;
1311         err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port);
1312         if (err)
1313                 goto err_port_sb_pms_init;
1314         return 0;
1315
1316 err_port_sb_pms_init:
1317 err_port_sb_cms_init:
1318 err_headroom_init:
1319         kfree(mlxsw_sp_port->hdroom);
1320         return err;
1321 }
1322
1323 void mlxsw_sp_port_buffers_fini(struct mlxsw_sp_port *mlxsw_sp_port)
1324 {
1325         kfree(mlxsw_sp_port->hdroom);
1326 }
1327
1328 int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
1329                          unsigned int sb_index, u16 pool_index,
1330                          struct devlink_sb_pool_info *pool_info)
1331 {
1332         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1333         enum mlxsw_reg_sbxx_dir dir;
1334         struct mlxsw_sp_sb_pr *pr;
1335
1336         dir = mlxsw_sp->sb_vals->pool_dess[pool_index].dir;
1337         pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
1338         pool_info->pool_type = (enum devlink_sb_pool_type) dir;
1339         pool_info->size = mlxsw_sp_cells_bytes(mlxsw_sp, pr->size);
1340         pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode;
1341         pool_info->cell_size = mlxsw_sp->sb->cell_size;
1342         return 0;
1343 }
1344
1345 int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
1346                          unsigned int sb_index, u16 pool_index, u32 size,
1347                          enum devlink_sb_threshold_type threshold_type,
1348                          struct netlink_ext_ack *extack)
1349 {
1350         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1351         u32 pool_size = mlxsw_sp_bytes_cells(mlxsw_sp, size);
1352         const struct mlxsw_sp_sb_pr *pr;
1353         enum mlxsw_reg_sbpr_mode mode;
1354
1355         mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
1356         pr = &mlxsw_sp->sb_vals->prs[pool_index];
1357
1358         if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core,
1359                                       GUARANTEED_SHARED_BUFFER)) {
1360                 NL_SET_ERR_MSG_MOD(extack, "Exceeded shared buffer size");
1361                 return -EINVAL;
1362         }
1363
1364         if (pr->freeze_mode && pr->mode != mode) {
1365                 NL_SET_ERR_MSG_MOD(extack, "Changing this pool's threshold type is forbidden");
1366                 return -EINVAL;
1367         }
1368
1369         if (pr->freeze_size && pr->size != size) {
1370                 NL_SET_ERR_MSG_MOD(extack, "Changing this pool's size is forbidden");
1371                 return -EINVAL;
1372         }
1373
1374         return mlxsw_sp_sb_pr_write(mlxsw_sp, pool_index, mode,
1375                                     pool_size, false);
1376 }
1377
1378 #define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */
1379
1380 static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
1381                                      u32 max_buff)
1382 {
1383         struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
1384
1385         if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC)
1386                 return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
1387         return mlxsw_sp_cells_bytes(mlxsw_sp, max_buff);
1388 }
1389
1390 static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
1391                                     u32 threshold, u32 *p_max_buff,
1392                                     struct netlink_ext_ack *extack)
1393 {
1394         struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
1395
1396         if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) {
1397                 int val;
1398
1399                 val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
1400                 if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN ||
1401                     val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX) {
1402                         NL_SET_ERR_MSG_MOD(extack, "Invalid dynamic threshold value");
1403                         return -EINVAL;
1404                 }
1405                 *p_max_buff = val;
1406         } else {
1407                 *p_max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, threshold);
1408         }
1409         return 0;
1410 }
1411
1412 int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
1413                               unsigned int sb_index, u16 pool_index,
1414                               u32 *p_threshold)
1415 {
1416         struct mlxsw_sp_port *mlxsw_sp_port =
1417                         mlxsw_core_port_driver_priv(mlxsw_core_port);
1418         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1419         u8 local_port = mlxsw_sp_port->local_port;
1420         struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
1421                                                        pool_index);
1422
1423         *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool_index,
1424                                                  pm->max_buff);
1425         return 0;
1426 }
1427
1428 int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
1429                               unsigned int sb_index, u16 pool_index,
1430                               u32 threshold, struct netlink_ext_ack *extack)
1431 {
1432         struct mlxsw_sp_port *mlxsw_sp_port =
1433                         mlxsw_core_port_driver_priv(mlxsw_core_port);
1434         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1435         u8 local_port = mlxsw_sp_port->local_port;
1436         u32 max_buff;
1437         int err;
1438
1439         if (local_port == MLXSW_PORT_CPU_PORT) {
1440                 NL_SET_ERR_MSG_MOD(extack, "Changing CPU port's threshold is forbidden");
1441                 return -EINVAL;
1442         }
1443
1444         err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
1445                                        threshold, &max_buff, extack);
1446         if (err)
1447                 return err;
1448
1449         return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool_index,
1450                                     0, max_buff);
1451 }
1452
1453 int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
1454                                  unsigned int sb_index, u16 tc_index,
1455                                  enum devlink_sb_pool_type pool_type,
1456                                  u16 *p_pool_index, u32 *p_threshold)
1457 {
1458         struct mlxsw_sp_port *mlxsw_sp_port =
1459                         mlxsw_core_port_driver_priv(mlxsw_core_port);
1460         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1461         u8 local_port = mlxsw_sp_port->local_port;
1462         u8 pg_buff = tc_index;
1463         enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1464         struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
1465                                                        pg_buff, dir);
1466
1467         *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool_index,
1468                                                  cm->max_buff);
1469         *p_pool_index = cm->pool_index;
1470         return 0;
1471 }
1472
1473 int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
1474                                  unsigned int sb_index, u16 tc_index,
1475                                  enum devlink_sb_pool_type pool_type,
1476                                  u16 pool_index, u32 threshold,
1477                                  struct netlink_ext_ack *extack)
1478 {
1479         struct mlxsw_sp_port *mlxsw_sp_port =
1480                         mlxsw_core_port_driver_priv(mlxsw_core_port);
1481         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1482         u8 local_port = mlxsw_sp_port->local_port;
1483         const struct mlxsw_sp_sb_cm *cm;
1484         u8 pg_buff = tc_index;
1485         enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1486         u32 max_buff;
1487         int err;
1488
1489         if (local_port == MLXSW_PORT_CPU_PORT) {
1490                 NL_SET_ERR_MSG_MOD(extack, "Changing CPU port's binding is forbidden");
1491                 return -EINVAL;
1492         }
1493
1494         if (dir != mlxsw_sp->sb_vals->pool_dess[pool_index].dir) {
1495                 NL_SET_ERR_MSG_MOD(extack, "Binding egress TC to ingress pool and vice versa is forbidden");
1496                 return -EINVAL;
1497         }
1498
1499         if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
1500                 cm = &mlxsw_sp->sb_vals->cms_ingress[tc_index];
1501         else
1502                 cm = &mlxsw_sp->sb_vals->cms_egress[tc_index];
1503
1504         if (cm->freeze_pool && cm->pool_index != pool_index) {
1505                 NL_SET_ERR_MSG_MOD(extack, "Binding this TC to a different pool is forbidden");
1506                 return -EINVAL;
1507         }
1508
1509         if (cm->freeze_thresh && cm->max_buff != threshold) {
1510                 NL_SET_ERR_MSG_MOD(extack, "Changing this TC's threshold is forbidden");
1511                 return -EINVAL;
1512         }
1513
1514         err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
1515                                        threshold, &max_buff, extack);
1516         if (err)
1517                 return err;
1518
1519         return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff,
1520                                     0, max_buff, false, pool_index);
1521 }
1522
1523 #define MASKED_COUNT_MAX \
1524         (MLXSW_REG_SBSR_REC_MAX_COUNT / \
1525          (MLXSW_SP_SB_ING_TC_COUNT + MLXSW_SP_SB_EG_TC_COUNT))
1526
1527 struct mlxsw_sp_sb_sr_occ_query_cb_ctx {
1528         u8 masked_count;
1529         u8 local_port_1;
1530 };
1531
1532 static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
1533                                         char *sbsr_pl, size_t sbsr_pl_len,
1534                                         unsigned long cb_priv)
1535 {
1536         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1537         struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
1538         u8 masked_count;
1539         u8 local_port;
1540         int rec_index = 0;
1541         struct mlxsw_sp_sb_cm *cm;
1542         int i;
1543
1544         memcpy(&cb_ctx, &cb_priv, sizeof(cb_ctx));
1545
1546         masked_count = 0;
1547         for (local_port = cb_ctx.local_port_1;
1548              local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1549                 if (!mlxsw_sp->ports[local_port])
1550                         continue;
1551                 if (local_port == MLXSW_PORT_CPU_PORT) {
1552                         /* Ingress quotas are not supported for the CPU port */
1553                         masked_count++;
1554                         continue;
1555                 }
1556                 for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++) {
1557                         cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
1558                                                 MLXSW_REG_SBXX_DIR_INGRESS);
1559                         mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
1560                                                   &cm->occ.cur, &cm->occ.max);
1561                 }
1562                 if (++masked_count == cb_ctx.masked_count)
1563                         break;
1564         }
1565         masked_count = 0;
1566         for (local_port = cb_ctx.local_port_1;
1567              local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1568                 if (!mlxsw_sp->ports[local_port])
1569                         continue;
1570                 for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++) {
1571                         cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
1572                                                 MLXSW_REG_SBXX_DIR_EGRESS);
1573                         mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
1574                                                   &cm->occ.cur, &cm->occ.max);
1575                 }
1576                 if (++masked_count == cb_ctx.masked_count)
1577                         break;
1578         }
1579 }
1580
1581 int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
1582                              unsigned int sb_index)
1583 {
1584         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1585         struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
1586         unsigned long cb_priv;
1587         LIST_HEAD(bulk_list);
1588         char *sbsr_pl;
1589         u8 masked_count;
1590         u8 local_port_1;
1591         u8 local_port;
1592         int i;
1593         int err;
1594         int err2;
1595
1596         sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
1597         if (!sbsr_pl)
1598                 return -ENOMEM;
1599
1600         local_port = MLXSW_PORT_CPU_PORT;
1601 next_batch:
1602         local_port_1 = local_port;
1603         masked_count = 0;
1604         mlxsw_reg_sbsr_pack(sbsr_pl, false);
1605         for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
1606                 mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
1607         for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
1608                 mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
1609         for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1610                 if (!mlxsw_sp->ports[local_port])
1611                         continue;
1612                 if (local_port != MLXSW_PORT_CPU_PORT) {
1613                         /* Ingress quotas are not supported for the CPU port */
1614                         mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
1615                                                              local_port, 1);
1616                 }
1617                 mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
1618                 for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
1619                         err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
1620                                                        &bulk_list);
1621                         if (err)
1622                                 goto out;
1623                 }
1624                 if (++masked_count == MASKED_COUNT_MAX)
1625                         goto do_query;
1626         }
1627
1628 do_query:
1629         cb_ctx.masked_count = masked_count;
1630         cb_ctx.local_port_1 = local_port_1;
1631         memcpy(&cb_priv, &cb_ctx, sizeof(cb_ctx));
1632         err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
1633                                     &bulk_list, mlxsw_sp_sb_sr_occ_query_cb,
1634                                     cb_priv);
1635         if (err)
1636                 goto out;
1637         if (local_port < mlxsw_core_max_ports(mlxsw_core)) {
1638                 local_port++;
1639                 goto next_batch;
1640         }
1641
1642 out:
1643         err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
1644         if (!err)
1645                 err = err2;
1646         kfree(sbsr_pl);
1647         return err;
1648 }
1649
1650 int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
1651                               unsigned int sb_index)
1652 {
1653         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1654         LIST_HEAD(bulk_list);
1655         char *sbsr_pl;
1656         unsigned int masked_count;
1657         u8 local_port;
1658         int i;
1659         int err;
1660         int err2;
1661
1662         sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
1663         if (!sbsr_pl)
1664                 return -ENOMEM;
1665
1666         local_port = MLXSW_PORT_CPU_PORT;
1667 next_batch:
1668         masked_count = 0;
1669         mlxsw_reg_sbsr_pack(sbsr_pl, true);
1670         for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
1671                 mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
1672         for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
1673                 mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
1674         for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1675                 if (!mlxsw_sp->ports[local_port])
1676                         continue;
1677                 if (local_port != MLXSW_PORT_CPU_PORT) {
1678                         /* Ingress quotas are not supported for the CPU port */
1679                         mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
1680                                                              local_port, 1);
1681                 }
1682                 mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
1683                 for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
1684                         err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
1685                                                        &bulk_list);
1686                         if (err)
1687                                 goto out;
1688                 }
1689                 if (++masked_count == MASKED_COUNT_MAX)
1690                         goto do_query;
1691         }
1692
1693 do_query:
1694         err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
1695                                     &bulk_list, NULL, 0);
1696         if (err)
1697                 goto out;
1698         if (local_port < mlxsw_core_max_ports(mlxsw_core)) {
1699                 local_port++;
1700                 goto next_batch;
1701         }
1702
1703 out:
1704         err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
1705         if (!err)
1706                 err = err2;
1707         kfree(sbsr_pl);
1708         return err;
1709 }
1710
1711 int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
1712                                   unsigned int sb_index, u16 pool_index,
1713                                   u32 *p_cur, u32 *p_max)
1714 {
1715         struct mlxsw_sp_port *mlxsw_sp_port =
1716                         mlxsw_core_port_driver_priv(mlxsw_core_port);
1717         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1718         u8 local_port = mlxsw_sp_port->local_port;
1719         struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
1720                                                        pool_index);
1721
1722         *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.cur);
1723         *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.max);
1724         return 0;
1725 }
1726
1727 int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
1728                                      unsigned int sb_index, u16 tc_index,
1729                                      enum devlink_sb_pool_type pool_type,
1730                                      u32 *p_cur, u32 *p_max)
1731 {
1732         struct mlxsw_sp_port *mlxsw_sp_port =
1733                         mlxsw_core_port_driver_priv(mlxsw_core_port);
1734         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1735         u8 local_port = mlxsw_sp_port->local_port;
1736         u8 pg_buff = tc_index;
1737         enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1738         struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
1739                                                        pg_buff, dir);
1740
1741         *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.cur);
1742         *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.max);
1743         return 0;
1744 }