2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/device.h>
33 #include <linux/netdevice.h>
36 #define MLX5E_MAX_PRIORITY 8
38 #define MLX5E_100MB (100000)
39 #define MLX5E_1GB (1000000)
41 #define MLX5E_CEE_STATE_UP 1
42 #define MLX5E_CEE_STATE_DOWN 0
45 MLX5E_VENDOR_TC_GROUP_NUM = 7,
46 MLX5E_LOWEST_PRIO_GROUP = 0,
49 /* If dcbx mode is non-host set the dcbx mode to host.
51 static int mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv *priv,
52 enum mlx5_dcbx_oper_mode mode)
54 struct mlx5_core_dev *mdev = priv->mdev;
55 u32 param[MLX5_ST_SZ_DW(dcbx_param)];
58 err = mlx5_query_port_dcbx_param(mdev, param);
62 MLX5_SET(dcbx_param, param, version_admin, mode);
63 if (mode != MLX5E_DCBX_PARAM_VER_OPER_HOST)
64 MLX5_SET(dcbx_param, param, willing_admin, 1);
66 return mlx5_set_port_dcbx_param(mdev, param);
69 static int mlx5e_dcbnl_switch_to_host_mode(struct mlx5e_priv *priv)
71 struct mlx5e_dcbx *dcbx = &priv->dcbx;
74 if (!MLX5_CAP_GEN(priv->mdev, dcbx))
77 if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
80 err = mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_HOST);
84 dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_HOST;
88 static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
91 struct mlx5e_priv *priv = netdev_priv(netdev);
92 struct mlx5_core_dev *mdev = priv->mdev;
93 u8 tc_group[IEEE_8021QAZ_MAX_TCS];
94 bool is_tc_group_6_exist = false;
95 bool is_zero_bw_ets_tc = false;
99 if (!MLX5_CAP_GEN(priv->mdev, ets))
102 ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
103 for (i = 0; i < ets->ets_cap; i++) {
104 err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]);
108 err = mlx5_query_port_tc_group(mdev, i, &tc_group[i]);
112 err = mlx5_query_port_tc_bw_alloc(mdev, i, &ets->tc_tx_bw[i]);
116 if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC &&
117 tc_group[i] == (MLX5E_LOWEST_PRIO_GROUP + 1))
118 is_zero_bw_ets_tc = true;
120 if (tc_group[i] == (MLX5E_VENDOR_TC_GROUP_NUM - 1))
121 is_tc_group_6_exist = true;
124 /* Report 0% ets tc if exits*/
125 if (is_zero_bw_ets_tc) {
126 for (i = 0; i < ets->ets_cap; i++)
127 if (tc_group[i] == MLX5E_LOWEST_PRIO_GROUP)
128 ets->tc_tx_bw[i] = 0;
131 /* Update tc_tsa based on fw setting*/
132 for (i = 0; i < ets->ets_cap; i++) {
133 if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC)
134 priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
135 else if (tc_group[i] == MLX5E_VENDOR_TC_GROUP_NUM &&
136 !is_tc_group_6_exist)
137 priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
139 memcpy(ets->tc_tsa, priv->dcbx.tc_tsa, sizeof(ets->tc_tsa));
144 static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
146 bool any_tc_mapped_to_ets = false;
147 bool ets_zero_bw = false;
151 for (i = 0; i <= max_tc; i++) {
152 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
153 any_tc_mapped_to_ets = true;
154 if (!ets->tc_tx_bw[i])
159 /* strict group has higher priority than ets group */
160 strict_group = MLX5E_LOWEST_PRIO_GROUP;
161 if (any_tc_mapped_to_ets)
166 for (i = 0; i <= max_tc; i++) {
167 switch (ets->tc_tsa[i]) {
168 case IEEE_8021QAZ_TSA_VENDOR:
169 tc_group[i] = MLX5E_VENDOR_TC_GROUP_NUM;
171 case IEEE_8021QAZ_TSA_STRICT:
172 tc_group[i] = strict_group++;
174 case IEEE_8021QAZ_TSA_ETS:
175 tc_group[i] = MLX5E_LOWEST_PRIO_GROUP;
176 if (ets->tc_tx_bw[i] && ets_zero_bw)
177 tc_group[i] = MLX5E_LOWEST_PRIO_GROUP + 1;
183 static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
184 u8 *tc_group, int max_tc)
186 int bw_for_ets_zero_bw_tc = 0;
187 int last_ets_zero_bw_tc = -1;
188 int num_ets_zero_bw = 0;
191 for (i = 0; i <= max_tc; i++) {
192 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS &&
195 last_ets_zero_bw_tc = i;
200 bw_for_ets_zero_bw_tc = MLX5E_MAX_BW_ALLOC / num_ets_zero_bw;
202 for (i = 0; i <= max_tc; i++) {
203 switch (ets->tc_tsa[i]) {
204 case IEEE_8021QAZ_TSA_VENDOR:
205 tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
207 case IEEE_8021QAZ_TSA_STRICT:
208 tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
210 case IEEE_8021QAZ_TSA_ETS:
211 tc_tx_bw[i] = ets->tc_tx_bw[i] ?
213 bw_for_ets_zero_bw_tc;
218 /* Make sure the total bw for ets zero bw group is 100% */
219 if (last_ets_zero_bw_tc != -1)
220 tc_tx_bw[last_ets_zero_bw_tc] +=
221 MLX5E_MAX_BW_ALLOC % num_ets_zero_bw;
224 /* If there are ETS BW 0,
225 * Set ETS group # to 1 for all ETS non zero BW tcs. Their sum must be 100%.
226 * Set group #0 to all the ETS BW 0 tcs and
227 * equally splits the 100% BW between them
228 * Report both group #0 and #1 as ETS type.
229 * All the tcs in group #0 will be reported with 0% BW.
231 int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
233 struct mlx5_core_dev *mdev = priv->mdev;
234 u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS];
235 u8 tc_group[IEEE_8021QAZ_MAX_TCS];
236 int max_tc = mlx5_max_tc(mdev);
239 mlx5e_build_tc_group(ets, tc_group, max_tc);
240 mlx5e_build_tc_tx_bw(ets, tc_tx_bw, tc_group, max_tc);
242 err = mlx5_set_port_prio_tc(mdev, ets->prio_tc);
246 err = mlx5_set_port_tc_group(mdev, tc_group);
250 err = mlx5_set_port_tc_bw_alloc(mdev, tc_tx_bw);
255 memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa));
259 static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
260 struct ieee_ets *ets)
265 /* Validate Priority */
266 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
267 if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY) {
269 "Failed to validate ETS: priority value greater than max(%d)\n",
275 /* Validate Bandwidth Sum */
276 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
277 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
278 bw_sum += ets->tc_tx_bw[i];
280 if (bw_sum != 0 && bw_sum != 100) {
282 "Failed to validate ETS: BW sum is illegal\n");
288 static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
289 struct ieee_ets *ets)
291 struct mlx5e_priv *priv = netdev_priv(netdev);
294 if (!MLX5_CAP_GEN(priv->mdev, ets))
297 err = mlx5e_dbcnl_validate_ets(netdev, ets);
301 err = mlx5e_dcbnl_ieee_setets_core(priv, ets);
308 static int mlx5e_dcbnl_ieee_getpfc(struct net_device *dev,
309 struct ieee_pfc *pfc)
311 struct mlx5e_priv *priv = netdev_priv(dev);
312 struct mlx5_core_dev *mdev = priv->mdev;
313 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
316 pfc->pfc_cap = mlx5_max_tc(mdev) + 1;
317 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
318 pfc->requests[i] = PPORT_PER_PRIO_GET(pstats, i, tx_pause);
319 pfc->indications[i] = PPORT_PER_PRIO_GET(pstats, i, rx_pause);
322 return mlx5_query_port_pfc(mdev, &pfc->pfc_en, NULL);
325 static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
326 struct ieee_pfc *pfc)
328 struct mlx5e_priv *priv = netdev_priv(dev);
329 struct mlx5_core_dev *mdev = priv->mdev;
333 mlx5_query_port_pfc(mdev, &curr_pfc_en, NULL);
335 if (pfc->pfc_en == curr_pfc_en)
338 ret = mlx5_set_port_pfc(mdev, pfc->pfc_en, pfc->pfc_en);
339 mlx5_toggle_port_link(mdev);
344 static u8 mlx5e_dcbnl_getdcbx(struct net_device *dev)
346 struct mlx5e_priv *priv = netdev_priv(dev);
348 return priv->dcbx.cap;
351 static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
353 struct mlx5e_priv *priv = netdev_priv(dev);
354 struct mlx5e_dcbx *dcbx = &priv->dcbx;
356 if (mode & DCB_CAP_DCBX_LLD_MANAGED)
359 if ((!mode) && MLX5_CAP_GEN(priv->mdev, dcbx)) {
360 if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_AUTO)
363 /* set dcbx to fw controlled */
364 if (!mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_AUTO)) {
365 dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO;
366 dcbx->cap &= ~DCB_CAP_DCBX_HOST;
373 if (!(mode & DCB_CAP_DCBX_HOST))
376 if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev)))
384 static int mlx5e_dcbnl_ieee_getmaxrate(struct net_device *netdev,
385 struct ieee_maxrate *maxrate)
387 struct mlx5e_priv *priv = netdev_priv(netdev);
388 struct mlx5_core_dev *mdev = priv->mdev;
389 u8 max_bw_value[IEEE_8021QAZ_MAX_TCS];
390 u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
394 err = mlx5_query_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
398 memset(maxrate->tc_maxrate, 0, sizeof(maxrate->tc_maxrate));
400 for (i = 0; i <= mlx5_max_tc(mdev); i++) {
401 switch (max_bw_unit[i]) {
402 case MLX5_100_MBPS_UNIT:
403 maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_100MB;
406 maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_1GB;
408 case MLX5_BW_NO_LIMIT:
411 WARN(true, "non-supported BW unit");
419 static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev,
420 struct ieee_maxrate *maxrate)
422 struct mlx5e_priv *priv = netdev_priv(netdev);
423 struct mlx5_core_dev *mdev = priv->mdev;
424 u8 max_bw_value[IEEE_8021QAZ_MAX_TCS];
425 u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
426 __u64 upper_limit_mbps = roundup(255 * MLX5E_100MB, MLX5E_1GB);
429 memset(max_bw_value, 0, sizeof(max_bw_value));
430 memset(max_bw_unit, 0, sizeof(max_bw_unit));
432 for (i = 0; i <= mlx5_max_tc(mdev); i++) {
433 if (!maxrate->tc_maxrate[i]) {
434 max_bw_unit[i] = MLX5_BW_NO_LIMIT;
437 if (maxrate->tc_maxrate[i] < upper_limit_mbps) {
438 max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
440 max_bw_value[i] = max_bw_value[i] ? max_bw_value[i] : 1;
441 max_bw_unit[i] = MLX5_100_MBPS_UNIT;
443 max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
445 max_bw_unit[i] = MLX5_GBPS_UNIT;
449 return mlx5_modify_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
452 static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
454 struct mlx5e_priv *priv = netdev_priv(netdev);
455 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
456 struct mlx5_core_dev *mdev = priv->mdev;
459 int err = -EOPNOTSUPP;
462 if (!MLX5_CAP_GEN(mdev, ets))
465 memset(&ets, 0, sizeof(ets));
466 memset(&pfc, 0, sizeof(pfc));
468 ets.ets_cap = IEEE_8021QAZ_MAX_TCS;
469 for (i = 0; i < CEE_DCBX_MAX_PGS; i++) {
470 ets.tc_tx_bw[i] = cee_cfg->pg_bw_pct[i];
471 ets.tc_rx_bw[i] = cee_cfg->pg_bw_pct[i];
472 ets.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
473 ets.prio_tc[i] = cee_cfg->prio_to_pg_map[i];
476 err = mlx5e_dbcnl_validate_ets(netdev, &ets);
479 "%s, Failed to validate ETS: %d\n", __func__, err);
483 err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
486 "%s, Failed to set ETS: %d\n", __func__, err);
491 pfc.pfc_cap = mlx5_max_tc(mdev) + 1;
492 if (!cee_cfg->pfc_enable)
495 for (i = 0; i < CEE_DCBX_MAX_PRIO; i++)
496 pfc.pfc_en |= cee_cfg->pfc_setting[i] << i;
498 err = mlx5e_dcbnl_ieee_setpfc(netdev, &pfc);
501 "%s, Failed to set PFC: %d\n", __func__, err);
505 return err ? MLX5_DCB_NO_CHG : MLX5_DCB_CHG_RESET;
508 static u8 mlx5e_dcbnl_getstate(struct net_device *netdev)
510 return MLX5E_CEE_STATE_UP;
513 static void mlx5e_dcbnl_getpermhwaddr(struct net_device *netdev,
516 struct mlx5e_priv *priv = netdev_priv(netdev);
521 memset(perm_addr, 0xff, MAX_ADDR_LEN);
523 mlx5_query_nic_vport_mac_address(priv->mdev, 0, perm_addr);
526 static void mlx5e_dcbnl_setpgtccfgtx(struct net_device *netdev,
527 int priority, u8 prio_type,
528 u8 pgid, u8 bw_pct, u8 up_map)
530 struct mlx5e_priv *priv = netdev_priv(netdev);
531 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
533 if (priority >= CEE_DCBX_MAX_PRIO) {
535 "%s, priority is out of range\n", __func__);
539 if (pgid >= CEE_DCBX_MAX_PGS) {
541 "%s, priority group is out of range\n", __func__);
545 cee_cfg->prio_to_pg_map[priority] = pgid;
548 static void mlx5e_dcbnl_setpgbwgcfgtx(struct net_device *netdev,
551 struct mlx5e_priv *priv = netdev_priv(netdev);
552 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
554 if (pgid >= CEE_DCBX_MAX_PGS) {
556 "%s, priority group is out of range\n", __func__);
560 cee_cfg->pg_bw_pct[pgid] = bw_pct;
563 static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev,
564 int priority, u8 *prio_type,
565 u8 *pgid, u8 *bw_pct, u8 *up_map)
567 struct mlx5e_priv *priv = netdev_priv(netdev);
568 struct mlx5_core_dev *mdev = priv->mdev;
570 if (!MLX5_CAP_GEN(priv->mdev, ets)) {
571 netdev_err(netdev, "%s, ets is not supported\n", __func__);
575 if (priority >= CEE_DCBX_MAX_PRIO) {
577 "%s, priority is out of range\n", __func__);
585 if (mlx5_query_port_prio_tc(mdev, priority, pgid))
589 static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
590 int pgid, u8 *bw_pct)
594 if (pgid >= CEE_DCBX_MAX_PGS) {
596 "%s, priority group is out of range\n", __func__);
600 mlx5e_dcbnl_ieee_getets(netdev, &ets);
601 *bw_pct = ets.tc_tx_bw[pgid];
604 static void mlx5e_dcbnl_setpfccfg(struct net_device *netdev,
605 int priority, u8 setting)
607 struct mlx5e_priv *priv = netdev_priv(netdev);
608 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
610 if (priority >= CEE_DCBX_MAX_PRIO) {
612 "%s, priority is out of range\n", __func__);
619 cee_cfg->pfc_setting[priority] = setting;
623 mlx5e_dcbnl_get_priority_pfc(struct net_device *netdev,
624 int priority, u8 *setting)
629 err = mlx5e_dcbnl_ieee_getpfc(netdev, &pfc);
634 *setting = (pfc.pfc_en >> priority) & 0x01;
639 static void mlx5e_dcbnl_getpfccfg(struct net_device *netdev,
640 int priority, u8 *setting)
642 if (priority >= CEE_DCBX_MAX_PRIO) {
644 "%s, priority is out of range\n", __func__);
651 mlx5e_dcbnl_get_priority_pfc(netdev, priority, setting);
654 static u8 mlx5e_dcbnl_getcap(struct net_device *netdev,
657 struct mlx5e_priv *priv = netdev_priv(netdev);
658 struct mlx5_core_dev *mdev = priv->mdev;
662 case DCB_CAP_ATTR_PG:
665 case DCB_CAP_ATTR_PFC:
668 case DCB_CAP_ATTR_UP2TC:
671 case DCB_CAP_ATTR_PG_TCS:
672 *cap = 1 << mlx5_max_tc(mdev);
674 case DCB_CAP_ATTR_PFC_TCS:
675 *cap = 1 << mlx5_max_tc(mdev);
677 case DCB_CAP_ATTR_GSP:
680 case DCB_CAP_ATTR_BCN:
683 case DCB_CAP_ATTR_DCBX:
684 *cap = priv->dcbx.cap |
685 DCB_CAP_DCBX_VER_CEE |
686 DCB_CAP_DCBX_VER_IEEE;
697 static int mlx5e_dcbnl_getnumtcs(struct net_device *netdev,
700 struct mlx5e_priv *priv = netdev_priv(netdev);
701 struct mlx5_core_dev *mdev = priv->mdev;
704 case DCB_NUMTCS_ATTR_PG:
705 case DCB_NUMTCS_ATTR_PFC:
706 *num = mlx5_max_tc(mdev) + 1;
715 static u8 mlx5e_dcbnl_getpfcstate(struct net_device *netdev)
719 if (mlx5e_dcbnl_ieee_getpfc(netdev, &pfc))
720 return MLX5E_CEE_STATE_DOWN;
722 return pfc.pfc_en ? MLX5E_CEE_STATE_UP : MLX5E_CEE_STATE_DOWN;
725 static void mlx5e_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
727 struct mlx5e_priv *priv = netdev_priv(netdev);
728 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
730 if ((state != MLX5E_CEE_STATE_UP) && (state != MLX5E_CEE_STATE_DOWN))
733 cee_cfg->pfc_enable = state;
736 const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = {
737 .ieee_getets = mlx5e_dcbnl_ieee_getets,
738 .ieee_setets = mlx5e_dcbnl_ieee_setets,
739 .ieee_getmaxrate = mlx5e_dcbnl_ieee_getmaxrate,
740 .ieee_setmaxrate = mlx5e_dcbnl_ieee_setmaxrate,
741 .ieee_getpfc = mlx5e_dcbnl_ieee_getpfc,
742 .ieee_setpfc = mlx5e_dcbnl_ieee_setpfc,
743 .getdcbx = mlx5e_dcbnl_getdcbx,
744 .setdcbx = mlx5e_dcbnl_setdcbx,
747 .setall = mlx5e_dcbnl_setall,
748 .getstate = mlx5e_dcbnl_getstate,
749 .getpermhwaddr = mlx5e_dcbnl_getpermhwaddr,
751 .setpgtccfgtx = mlx5e_dcbnl_setpgtccfgtx,
752 .setpgbwgcfgtx = mlx5e_dcbnl_setpgbwgcfgtx,
753 .getpgtccfgtx = mlx5e_dcbnl_getpgtccfgtx,
754 .getpgbwgcfgtx = mlx5e_dcbnl_getpgbwgcfgtx,
756 .setpfccfg = mlx5e_dcbnl_setpfccfg,
757 .getpfccfg = mlx5e_dcbnl_getpfccfg,
758 .getcap = mlx5e_dcbnl_getcap,
759 .getnumtcs = mlx5e_dcbnl_getnumtcs,
760 .getpfcstate = mlx5e_dcbnl_getpfcstate,
761 .setpfcstate = mlx5e_dcbnl_setpfcstate,
764 static void mlx5e_dcbnl_query_dcbx_mode(struct mlx5e_priv *priv,
765 enum mlx5_dcbx_oper_mode *mode)
767 u32 out[MLX5_ST_SZ_DW(dcbx_param)];
769 *mode = MLX5E_DCBX_PARAM_VER_OPER_HOST;
771 if (!mlx5_query_port_dcbx_param(priv->mdev, out))
772 *mode = MLX5_GET(dcbx_param, out, version_oper);
774 /* From driver's point of view, we only care if the mode
775 * is host (HOST) or non-host (AUTO)
777 if (*mode != MLX5E_DCBX_PARAM_VER_OPER_HOST)
778 *mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO;
781 static void mlx5e_ets_init(struct mlx5e_priv *priv)
786 if (!MLX5_CAP_GEN(priv->mdev, ets))
789 memset(&ets, 0, sizeof(ets));
790 ets.ets_cap = mlx5_max_tc(priv->mdev) + 1;
791 for (i = 0; i < ets.ets_cap; i++) {
792 ets.tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
793 ets.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
797 /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
801 mlx5e_dcbnl_ieee_setets_core(priv, &ets);
804 void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
806 struct mlx5e_dcbx *dcbx = &priv->dcbx;
808 if (!MLX5_CAP_GEN(priv->mdev, qos))
811 if (MLX5_CAP_GEN(priv->mdev, dcbx))
812 mlx5e_dcbnl_query_dcbx_mode(priv, &dcbx->mode);
814 priv->dcbx.cap = DCB_CAP_DCBX_VER_CEE |
815 DCB_CAP_DCBX_VER_IEEE;
816 if (priv->dcbx.mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
817 priv->dcbx.cap |= DCB_CAP_DCBX_HOST;
819 mlx5e_ets_init(priv);