1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include "hclge_main.h"
11 static int hclge_ieee_ets_to_tm_info(struct hclge_dev *hdev,
16 for (i = 0; i < HNAE3_MAX_TC; i++) {
17 switch (ets->tc_tsa[i]) {
18 case IEEE_8021QAZ_TSA_STRICT:
19 hdev->tm_info.tc_info[i].tc_sch_mode =
21 hdev->tm_info.pg_info[0].tc_dwrr[i] = 0;
23 case IEEE_8021QAZ_TSA_ETS:
24 hdev->tm_info.tc_info[i].tc_sch_mode =
26 hdev->tm_info.pg_info[0].tc_dwrr[i] =
30 /* Hardware only supports SP (strict priority)
31 * or ETS (enhanced transmission selection)
32 * algorithms, if we receive some other value
33 * from dcbnl, then throw an error.
39 hclge_tm_prio_tc_info_update(hdev, ets->prio_tc);
44 static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev,
49 memset(ets, 0, sizeof(*ets));
51 ets->ets_cap = hdev->tc_max;
53 for (i = 0; i < HNAE3_MAX_TC; i++) {
54 ets->prio_tc[i] = hdev->tm_info.prio_tc[i];
55 ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i];
57 if (hdev->tm_info.tc_info[i].tc_sch_mode ==
59 ets->tc_tsa[i] = IEEE_8021QAZ_TSA_STRICT;
61 ets->tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
66 static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets)
68 struct hclge_vport *vport = hclge_get_vport(h);
69 struct hclge_dev *hdev = vport->back;
71 hclge_tm_info_to_ieee_ets(hdev, ets);
76 static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc,
81 if (num_tc > hdev->tc_max) {
82 dev_err(&hdev->pdev->dev,
83 "tc num checking failed, %u > tc_max(%u)\n",
84 num_tc, hdev->tc_max);
88 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
89 if (prio_tc[i] >= num_tc) {
90 dev_err(&hdev->pdev->dev,
91 "prio_tc[%d] checking failed, %u >= num_tc(%u)\n",
92 i, prio_tc[i], num_tc);
97 if (num_tc > hdev->vport[0].alloc_tqps) {
98 dev_err(&hdev->pdev->dev,
99 "allocated tqp checking failed, %u > tqp(%u)\n",
100 num_tc, hdev->vport[0].alloc_tqps);
107 static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
108 u8 *tc, bool *changed)
110 bool has_ets_tc = false;
111 u32 total_ets_bw = 0;
116 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
117 if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i])
120 if (ets->prio_tc[i] > max_tc)
121 max_tc = ets->prio_tc[i];
124 ret = hclge_dcb_common_validate(hdev, max_tc + 1, ets->prio_tc);
128 for (i = 0; i < hdev->tc_max; i++) {
129 switch (ets->tc_tsa[i]) {
130 case IEEE_8021QAZ_TSA_STRICT:
131 if (hdev->tm_info.tc_info[i].tc_sch_mode !=
135 case IEEE_8021QAZ_TSA_ETS:
136 if (hdev->tm_info.tc_info[i].tc_sch_mode !=
140 total_ets_bw += ets->tc_tx_bw[i];
148 if (has_ets_tc && total_ets_bw != BW_PERCENT)
152 if (*tc != hdev->tm_info.num_tc)
158 static int hclge_map_update(struct hclge_dev *hdev)
162 ret = hclge_tm_schd_setup_hw(hdev);
166 ret = hclge_pause_setup_hw(hdev, false);
170 ret = hclge_buffer_alloc(hdev);
174 hclge_rss_indir_init_cfg(hdev);
176 return hclge_rss_init_hw(hdev);
179 static int hclge_client_setup_tc(struct hclge_dev *hdev)
181 struct hclge_vport *vport = hdev->vport;
182 struct hnae3_client *client;
183 struct hnae3_handle *handle;
187 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
188 handle = &vport[i].nic;
189 client = handle->client;
191 if (!client || !client->ops || !client->ops->setup_tc)
194 ret = client->ops->setup_tc(handle, hdev->tm_info.num_tc);
202 static int hclge_notify_down_uinit(struct hclge_dev *hdev)
206 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
210 return hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
213 static int hclge_notify_init_up(struct hclge_dev *hdev)
217 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
221 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
224 static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
226 struct hclge_vport *vport = hclge_get_vport(h);
227 struct net_device *netdev = h->kinfo.netdev;
228 struct hclge_dev *hdev = vport->back;
229 bool map_changed = false;
233 if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
234 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
237 ret = hclge_ets_validate(hdev, ets, &num_tc, &map_changed);
242 netif_dbg(h, drv, netdev, "set ets\n");
244 ret = hclge_notify_down_uinit(hdev);
249 hclge_tm_schd_info_update(hdev, num_tc);
251 ret = hclge_ieee_ets_to_tm_info(hdev, ets);
256 ret = hclge_map_update(hdev);
260 ret = hclge_client_setup_tc(hdev);
264 ret = hclge_notify_init_up(hdev);
269 return hclge_tm_dwrr_cfg(hdev);
275 hclge_notify_init_up(hdev);
280 static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
282 u64 requests[HNAE3_MAX_TC], indications[HNAE3_MAX_TC];
283 struct hclge_vport *vport = hclge_get_vport(h);
284 struct hclge_dev *hdev = vport->back;
285 u8 i, j, pfc_map, *prio_tc;
288 memset(pfc, 0, sizeof(*pfc));
289 pfc->pfc_cap = hdev->pfc_max;
290 prio_tc = hdev->tm_info.prio_tc;
291 pfc_map = hdev->tm_info.hw_pfc_map;
293 /* Pfc setting is based on TC */
294 for (i = 0; i < hdev->tm_info.num_tc; i++) {
295 for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) {
296 if ((prio_tc[j] == i) && (pfc_map & BIT(i)))
297 pfc->pfc_en |= BIT(j);
301 ret = hclge_pfc_tx_stats_get(hdev, requests);
305 ret = hclge_pfc_rx_stats_get(hdev, indications);
309 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
310 pfc->requests[i] = requests[i];
311 pfc->indications[i] = indications[i];
316 static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
318 struct hclge_vport *vport = hclge_get_vport(h);
319 struct net_device *netdev = h->kinfo.netdev;
320 struct hclge_dev *hdev = vport->back;
321 u8 i, j, pfc_map, *prio_tc;
324 if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
325 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
328 if (pfc->pfc_en == hdev->tm_info.pfc_en)
331 prio_tc = hdev->tm_info.prio_tc;
334 for (i = 0; i < hdev->tm_info.num_tc; i++) {
335 for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) {
336 if ((prio_tc[j] == i) && (pfc->pfc_en & BIT(j))) {
343 hdev->tm_info.hw_pfc_map = pfc_map;
344 hdev->tm_info.pfc_en = pfc->pfc_en;
346 netif_dbg(h, drv, netdev,
347 "set pfc: pfc_en=%x, pfc_map=%x, num_tc=%u\n",
348 pfc->pfc_en, pfc_map, hdev->tm_info.num_tc);
350 hclge_tm_pfc_info_update(hdev);
352 ret = hclge_pause_setup_hw(hdev, false);
356 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
360 ret = hclge_buffer_alloc(hdev);
362 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
366 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
369 /* DCBX configuration */
370 static u8 hclge_getdcbx(struct hnae3_handle *h)
372 struct hclge_vport *vport = hclge_get_vport(h);
373 struct hclge_dev *hdev = vport->back;
375 if (hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
378 return hdev->dcbx_cap;
381 static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode)
383 struct hclge_vport *vport = hclge_get_vport(h);
384 struct net_device *netdev = h->kinfo.netdev;
385 struct hclge_dev *hdev = vport->back;
387 netif_dbg(h, drv, netdev, "set dcbx: mode=%u\n", mode);
389 /* No support for LLD_MANAGED modes or CEE */
390 if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
391 (mode & DCB_CAP_DCBX_VER_CEE) ||
392 !(mode & DCB_CAP_DCBX_HOST))
395 hdev->dcbx_cap = mode;
400 static int hclge_mqprio_qopt_check(struct hclge_dev *hdev,
401 struct tc_mqprio_qopt_offload *mqprio_qopt)
407 if (!mqprio_qopt->qopt.num_tc) {
408 mqprio_qopt->qopt.num_tc = 1;
412 ret = hclge_dcb_common_validate(hdev, mqprio_qopt->qopt.num_tc,
413 mqprio_qopt->qopt.prio_tc_map);
417 for (i = 0; i < mqprio_qopt->qopt.num_tc; i++) {
418 if (!is_power_of_2(mqprio_qopt->qopt.count[i])) {
419 dev_err(&hdev->pdev->dev,
420 "qopt queue count must be power of 2\n");
424 if (mqprio_qopt->qopt.count[i] > hdev->rss_size_max) {
425 dev_err(&hdev->pdev->dev,
426 "qopt queue count should be no more than %u\n",
431 if (mqprio_qopt->qopt.offset[i] != queue_sum) {
432 dev_err(&hdev->pdev->dev,
433 "qopt queue offset must start from 0, and being continuous\n");
437 if (mqprio_qopt->min_rate[i] || mqprio_qopt->max_rate[i]) {
438 dev_err(&hdev->pdev->dev,
439 "qopt tx_rate is not supported\n");
443 queue_sum = mqprio_qopt->qopt.offset[i];
444 queue_sum += mqprio_qopt->qopt.count[i];
446 if (hdev->vport[0].alloc_tqps < queue_sum) {
447 dev_err(&hdev->pdev->dev,
448 "qopt queue count sum should be less than %u\n",
449 hdev->vport[0].alloc_tqps);
456 static void hclge_sync_mqprio_qopt(struct hnae3_tc_info *tc_info,
457 struct tc_mqprio_qopt_offload *mqprio_qopt)
461 memset(tc_info, 0, sizeof(*tc_info));
462 tc_info->num_tc = mqprio_qopt->qopt.num_tc;
463 memcpy(tc_info->prio_tc, mqprio_qopt->qopt.prio_tc_map,
464 sizeof_field(struct hnae3_tc_info, prio_tc));
465 memcpy(tc_info->tqp_count, mqprio_qopt->qopt.count,
466 sizeof_field(struct hnae3_tc_info, tqp_count));
467 memcpy(tc_info->tqp_offset, mqprio_qopt->qopt.offset,
468 sizeof_field(struct hnae3_tc_info, tqp_offset));
470 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
471 set_bit(tc_info->prio_tc[i], &tc_info->tc_en);
474 static int hclge_config_tc(struct hclge_dev *hdev,
475 struct hnae3_tc_info *tc_info)
479 hclge_tm_schd_info_update(hdev, tc_info->num_tc);
480 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
481 hdev->tm_info.prio_tc[i] = tc_info->prio_tc[i];
483 return hclge_map_update(hdev);
486 /* Set up TC for hardware offloaded mqprio in channel mode */
487 static int hclge_setup_tc(struct hnae3_handle *h,
488 struct tc_mqprio_qopt_offload *mqprio_qopt)
490 struct hclge_vport *vport = hclge_get_vport(h);
491 struct hnae3_knic_private_info *kinfo;
492 struct hclge_dev *hdev = vport->back;
493 struct hnae3_tc_info old_tc_info;
494 u8 tc = mqprio_qopt->qopt.num_tc;
497 /* if client unregistered, it's not allowed to change
498 * mqprio configuration, which may cause uninit ring
501 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
504 if (hdev->flag & HCLGE_FLAG_DCB_ENABLE)
507 ret = hclge_mqprio_qopt_check(hdev, mqprio_qopt);
509 dev_err(&hdev->pdev->dev,
510 "failed to check mqprio qopt params, ret = %d\n", ret);
514 ret = hclge_notify_down_uinit(hdev);
518 kinfo = &vport->nic.kinfo;
519 memcpy(&old_tc_info, &kinfo->tc_info, sizeof(old_tc_info));
520 hclge_sync_mqprio_qopt(&kinfo->tc_info, mqprio_qopt);
521 kinfo->tc_info.mqprio_active = tc > 0;
523 ret = hclge_config_tc(hdev, &kinfo->tc_info);
527 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
530 hdev->flag |= HCLGE_FLAG_MQPRIO_ENABLE;
532 hdev->flag &= ~HCLGE_FLAG_MQPRIO_ENABLE;
534 return hclge_notify_init_up(hdev);
538 memcpy(&kinfo->tc_info, &old_tc_info, sizeof(old_tc_info));
539 if (hclge_config_tc(hdev, &kinfo->tc_info))
540 dev_err(&hdev->pdev->dev,
541 "failed to roll back tc configuration\n");
543 hclge_notify_init_up(hdev);
548 static const struct hnae3_dcb_ops hns3_dcb_ops = {
549 .ieee_getets = hclge_ieee_getets,
550 .ieee_setets = hclge_ieee_setets,
551 .ieee_getpfc = hclge_ieee_getpfc,
552 .ieee_setpfc = hclge_ieee_setpfc,
553 .getdcbx = hclge_getdcbx,
554 .setdcbx = hclge_setdcbx,
555 .setup_tc = hclge_setup_tc,
558 void hclge_dcb_ops_set(struct hclge_dev *hdev)
560 struct hclge_vport *vport = hdev->vport;
561 struct hnae3_knic_private_info *kinfo;
563 /* Hdev does not support DCB or vport is
564 * not a pf, then dcb_ops is not set.
566 if (!hnae3_dev_dcb_supported(hdev) ||
567 vport->vport_id != 0)
570 kinfo = &vport->nic.kinfo;
571 kinfo->dcb_ops = &hns3_dcb_ops;
572 hdev->dcbx_cap = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_HOST;