1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include "hclge_main.h"
10 static int hclge_ieee_ets_to_tm_info(struct hclge_dev *hdev,
15 for (i = 0; i < HNAE3_MAX_TC; i++) {
16 switch (ets->tc_tsa[i]) {
17 case IEEE_8021QAZ_TSA_STRICT:
18 hdev->tm_info.tc_info[i].tc_sch_mode =
20 hdev->tm_info.pg_info[0].tc_dwrr[i] = 0;
22 case IEEE_8021QAZ_TSA_ETS:
23 hdev->tm_info.tc_info[i].tc_sch_mode =
25 hdev->tm_info.pg_info[0].tc_dwrr[i] =
29 /* Hardware only supports SP (strict priority)
30 * or ETS (enhanced transmission selection)
31 * algorithms, if we receive some other value
32 * from dcbnl, then throw an error.
38 hclge_tm_prio_tc_info_update(hdev, ets->prio_tc);
43 static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev,
48 memset(ets, 0, sizeof(*ets));
50 ets->ets_cap = hdev->tc_max;
52 for (i = 0; i < HNAE3_MAX_TC; i++) {
53 ets->prio_tc[i] = hdev->tm_info.prio_tc[i];
54 ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i];
56 if (hdev->tm_info.tc_info[i].tc_sch_mode ==
58 ets->tc_tsa[i] = IEEE_8021QAZ_TSA_STRICT;
60 ets->tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
65 static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets)
67 struct hclge_vport *vport = hclge_get_vport(h);
68 struct hclge_dev *hdev = vport->back;
70 hclge_tm_info_to_ieee_ets(hdev, ets);
75 static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc,
80 if (num_tc > hdev->tc_max) {
81 dev_err(&hdev->pdev->dev,
82 "tc num checking failed, %u > tc_max(%u)\n",
83 num_tc, hdev->tc_max);
87 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
88 if (prio_tc[i] >= num_tc) {
89 dev_err(&hdev->pdev->dev,
90 "prio_tc[%d] checking failed, %u >= num_tc(%u)\n",
91 i, prio_tc[i], num_tc);
96 if (num_tc > hdev->vport[0].alloc_tqps) {
97 dev_err(&hdev->pdev->dev,
98 "allocated tqp checking failed, %u > tqp(%u)\n",
99 num_tc, hdev->vport[0].alloc_tqps);
106 static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
107 u8 *tc, bool *changed)
109 bool has_ets_tc = false;
110 u32 total_ets_bw = 0;
115 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
116 if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i])
119 if (ets->prio_tc[i] > max_tc)
120 max_tc = ets->prio_tc[i];
123 ret = hclge_dcb_common_validate(hdev, max_tc + 1, ets->prio_tc);
127 for (i = 0; i < hdev->tc_max; i++) {
128 switch (ets->tc_tsa[i]) {
129 case IEEE_8021QAZ_TSA_STRICT:
130 if (hdev->tm_info.tc_info[i].tc_sch_mode !=
134 case IEEE_8021QAZ_TSA_ETS:
135 if (hdev->tm_info.tc_info[i].tc_sch_mode !=
139 total_ets_bw += ets->tc_tx_bw[i];
147 if (has_ets_tc && total_ets_bw != BW_PERCENT)
151 if (*tc != hdev->tm_info.num_tc)
157 static int hclge_map_update(struct hclge_dev *hdev)
161 ret = hclge_tm_schd_setup_hw(hdev);
165 ret = hclge_pause_setup_hw(hdev, false);
169 ret = hclge_buffer_alloc(hdev);
173 hclge_rss_indir_init_cfg(hdev);
175 return hclge_rss_init_hw(hdev);
178 static int hclge_client_setup_tc(struct hclge_dev *hdev)
180 struct hclge_vport *vport = hdev->vport;
181 struct hnae3_client *client;
182 struct hnae3_handle *handle;
186 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
187 handle = &vport[i].nic;
188 client = handle->client;
190 if (!client || !client->ops || !client->ops->setup_tc)
193 ret = client->ops->setup_tc(handle, hdev->tm_info.num_tc);
201 static int hclge_notify_down_uinit(struct hclge_dev *hdev)
205 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
209 return hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
212 static int hclge_notify_init_up(struct hclge_dev *hdev)
216 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
220 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
223 static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
225 struct hclge_vport *vport = hclge_get_vport(h);
226 struct net_device *netdev = h->kinfo.netdev;
227 struct hclge_dev *hdev = vport->back;
228 bool map_changed = false;
232 if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
233 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
236 ret = hclge_ets_validate(hdev, ets, &num_tc, &map_changed);
241 netif_dbg(h, drv, netdev, "set ets\n");
243 ret = hclge_notify_down_uinit(hdev);
248 hclge_tm_schd_info_update(hdev, num_tc);
250 ret = hclge_ieee_ets_to_tm_info(hdev, ets);
255 ret = hclge_map_update(hdev);
259 ret = hclge_client_setup_tc(hdev);
263 ret = hclge_notify_init_up(hdev);
268 return hclge_tm_dwrr_cfg(hdev);
274 hclge_notify_init_up(hdev);
279 static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
281 u64 requests[HNAE3_MAX_TC], indications[HNAE3_MAX_TC];
282 struct hclge_vport *vport = hclge_get_vport(h);
283 struct hclge_dev *hdev = vport->back;
284 u8 i, j, pfc_map, *prio_tc;
287 memset(pfc, 0, sizeof(*pfc));
288 pfc->pfc_cap = hdev->pfc_max;
289 prio_tc = hdev->tm_info.prio_tc;
290 pfc_map = hdev->tm_info.hw_pfc_map;
292 /* Pfc setting is based on TC */
293 for (i = 0; i < hdev->tm_info.num_tc; i++) {
294 for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) {
295 if ((prio_tc[j] == i) && (pfc_map & BIT(i)))
296 pfc->pfc_en |= BIT(j);
300 ret = hclge_pfc_tx_stats_get(hdev, requests);
304 ret = hclge_pfc_rx_stats_get(hdev, indications);
308 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
309 pfc->requests[i] = requests[i];
310 pfc->indications[i] = indications[i];
315 static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
317 struct hclge_vport *vport = hclge_get_vport(h);
318 struct net_device *netdev = h->kinfo.netdev;
319 struct hclge_dev *hdev = vport->back;
320 u8 i, j, pfc_map, *prio_tc;
323 if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
324 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
327 if (pfc->pfc_en == hdev->tm_info.pfc_en)
330 prio_tc = hdev->tm_info.prio_tc;
333 for (i = 0; i < hdev->tm_info.num_tc; i++) {
334 for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) {
335 if ((prio_tc[j] == i) && (pfc->pfc_en & BIT(j))) {
342 hdev->tm_info.hw_pfc_map = pfc_map;
343 hdev->tm_info.pfc_en = pfc->pfc_en;
345 netif_dbg(h, drv, netdev,
346 "set pfc: pfc_en=%x, pfc_map=%x, num_tc=%u\n",
347 pfc->pfc_en, pfc_map, hdev->tm_info.num_tc);
349 hclge_tm_pfc_info_update(hdev);
351 ret = hclge_pause_setup_hw(hdev, false);
355 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
359 ret = hclge_buffer_alloc(hdev);
361 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
365 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
368 /* DCBX configuration */
369 static u8 hclge_getdcbx(struct hnae3_handle *h)
371 struct hclge_vport *vport = hclge_get_vport(h);
372 struct hclge_dev *hdev = vport->back;
374 if (hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
377 return hdev->dcbx_cap;
380 static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode)
382 struct hclge_vport *vport = hclge_get_vport(h);
383 struct net_device *netdev = h->kinfo.netdev;
384 struct hclge_dev *hdev = vport->back;
386 netif_dbg(h, drv, netdev, "set dcbx: mode=%u\n", mode);
388 /* No support for LLD_MANAGED modes or CEE */
389 if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
390 (mode & DCB_CAP_DCBX_VER_CEE) ||
391 !(mode & DCB_CAP_DCBX_HOST))
394 hdev->dcbx_cap = mode;
399 /* Set up TC for hardware offloaded mqprio in channel mode */
400 static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc)
402 struct hclge_vport *vport = hclge_get_vport(h);
403 struct hclge_dev *hdev = vport->back;
406 if (hdev->flag & HCLGE_FLAG_DCB_ENABLE)
409 ret = hclge_dcb_common_validate(hdev, tc, prio_tc);
413 ret = hclge_notify_down_uinit(hdev);
417 hclge_tm_schd_info_update(hdev, tc);
418 hclge_tm_prio_tc_info_update(hdev, prio_tc);
420 ret = hclge_tm_init_hw(hdev, false);
424 ret = hclge_client_setup_tc(hdev);
428 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
431 hdev->flag |= HCLGE_FLAG_MQPRIO_ENABLE;
433 hdev->flag &= ~HCLGE_FLAG_MQPRIO_ENABLE;
435 return hclge_notify_init_up(hdev);
438 hclge_notify_init_up(hdev);
443 static const struct hnae3_dcb_ops hns3_dcb_ops = {
444 .ieee_getets = hclge_ieee_getets,
445 .ieee_setets = hclge_ieee_setets,
446 .ieee_getpfc = hclge_ieee_getpfc,
447 .ieee_setpfc = hclge_ieee_setpfc,
448 .getdcbx = hclge_getdcbx,
449 .setdcbx = hclge_setdcbx,
450 .setup_tc = hclge_setup_tc,
453 void hclge_dcb_ops_set(struct hclge_dev *hdev)
455 struct hclge_vport *vport = hdev->vport;
456 struct hnae3_knic_private_info *kinfo;
458 /* Hdev does not support DCB or vport is
459 * not a pf, then dcb_ops is not set.
461 if (!hnae3_dev_dcb_supported(hdev) ||
462 vport->vport_id != 0)
465 kinfo = &vport->nic.kinfo;
466 kinfo->dcb_ops = &hns3_dcb_ops;
467 hdev->dcbx_cap = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_HOST;