1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/etherdevice.h>
5 #include <linux/iopoll.h>
6 #include <net/rtnetlink.h>
7 #include "hclgevf_cmd.h"
8 #include "hclgevf_main.h"
12 #define HCLGEVF_NAME "hclgevf"
14 #define HCLGEVF_RESET_MAX_FAIL_CNT 5
16 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev);
17 static void hclgevf_task_schedule(struct hclgevf_dev *hdev,
20 static struct hnae3_ae_algo ae_algovf;
22 static struct workqueue_struct *hclgevf_wq;
24 static const struct pci_device_id ae_algovf_pci_tbl[] = {
25 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0},
26 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF),
27 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
28 /* required last entry */
32 static const u8 hclgevf_hash_key[] = {
33 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
34 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
35 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
36 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
37 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
40 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
42 static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG,
43 HCLGEVF_CMDQ_TX_ADDR_H_REG,
44 HCLGEVF_CMDQ_TX_DEPTH_REG,
45 HCLGEVF_CMDQ_TX_TAIL_REG,
46 HCLGEVF_CMDQ_TX_HEAD_REG,
47 HCLGEVF_CMDQ_RX_ADDR_L_REG,
48 HCLGEVF_CMDQ_RX_ADDR_H_REG,
49 HCLGEVF_CMDQ_RX_DEPTH_REG,
50 HCLGEVF_CMDQ_RX_TAIL_REG,
51 HCLGEVF_CMDQ_RX_HEAD_REG,
52 HCLGEVF_VECTOR0_CMDQ_SRC_REG,
53 HCLGEVF_VECTOR0_CMDQ_STATE_REG,
54 HCLGEVF_CMDQ_INTR_EN_REG,
55 HCLGEVF_CMDQ_INTR_GEN_REG};
57 static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE,
61 static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG,
62 HCLGEVF_RING_RX_ADDR_H_REG,
63 HCLGEVF_RING_RX_BD_NUM_REG,
64 HCLGEVF_RING_RX_BD_LENGTH_REG,
65 HCLGEVF_RING_RX_MERGE_EN_REG,
66 HCLGEVF_RING_RX_TAIL_REG,
67 HCLGEVF_RING_RX_HEAD_REG,
68 HCLGEVF_RING_RX_FBD_NUM_REG,
69 HCLGEVF_RING_RX_OFFSET_REG,
70 HCLGEVF_RING_RX_FBD_OFFSET_REG,
71 HCLGEVF_RING_RX_STASH_REG,
72 HCLGEVF_RING_RX_BD_ERR_REG,
73 HCLGEVF_RING_TX_ADDR_L_REG,
74 HCLGEVF_RING_TX_ADDR_H_REG,
75 HCLGEVF_RING_TX_BD_NUM_REG,
76 HCLGEVF_RING_TX_PRIORITY_REG,
77 HCLGEVF_RING_TX_TC_REG,
78 HCLGEVF_RING_TX_MERGE_EN_REG,
79 HCLGEVF_RING_TX_TAIL_REG,
80 HCLGEVF_RING_TX_HEAD_REG,
81 HCLGEVF_RING_TX_FBD_NUM_REG,
82 HCLGEVF_RING_TX_OFFSET_REG,
83 HCLGEVF_RING_TX_EBD_NUM_REG,
84 HCLGEVF_RING_TX_EBD_OFFSET_REG,
85 HCLGEVF_RING_TX_BD_ERR_REG,
88 static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG,
89 HCLGEVF_TQP_INTR_GL0_REG,
90 HCLGEVF_TQP_INTR_GL1_REG,
91 HCLGEVF_TQP_INTR_GL2_REG,
92 HCLGEVF_TQP_INTR_RL_REG};
94 static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle)
97 return container_of(handle, struct hclgevf_dev, nic);
98 else if (handle->client->type == HNAE3_CLIENT_ROCE)
99 return container_of(handle, struct hclgevf_dev, roce);
101 return container_of(handle, struct hclgevf_dev, nic);
104 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle)
106 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
107 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
108 struct hclgevf_desc desc;
109 struct hclgevf_tqp *tqp;
113 for (i = 0; i < kinfo->num_tqps; i++) {
114 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
115 hclgevf_cmd_setup_basic_desc(&desc,
116 HCLGEVF_OPC_QUERY_RX_STATUS,
119 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
120 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
122 dev_err(&hdev->pdev->dev,
123 "Query tqp stat fail, status = %d,queue = %d\n",
127 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
128 le32_to_cpu(desc.data[1]);
130 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS,
133 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
134 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
136 dev_err(&hdev->pdev->dev,
137 "Query tqp stat fail, status = %d,queue = %d\n",
141 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
142 le32_to_cpu(desc.data[1]);
148 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
150 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
151 struct hclgevf_tqp *tqp;
155 for (i = 0; i < kinfo->num_tqps; i++) {
156 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
157 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
159 for (i = 0; i < kinfo->num_tqps; i++) {
160 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
161 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
167 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset)
169 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
171 return kinfo->num_tqps * 2;
174 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
176 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
180 for (i = 0; i < kinfo->num_tqps; i++) {
181 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
182 struct hclgevf_tqp, q);
183 snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
185 buff += ETH_GSTRING_LEN;
188 for (i = 0; i < kinfo->num_tqps; i++) {
189 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
190 struct hclgevf_tqp, q);
191 snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
193 buff += ETH_GSTRING_LEN;
199 static void hclgevf_update_stats(struct hnae3_handle *handle,
200 struct net_device_stats *net_stats)
202 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
205 status = hclgevf_tqps_update_stats(handle);
207 dev_err(&hdev->pdev->dev,
208 "VF update of TQPS stats fail, status = %d.\n",
212 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
214 if (strset == ETH_SS_TEST)
216 else if (strset == ETH_SS_STATS)
217 return hclgevf_tqps_get_sset_count(handle, strset);
222 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
225 u8 *p = (char *)data;
227 if (strset == ETH_SS_STATS)
228 p = hclgevf_tqps_get_strings(handle, p);
231 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
233 hclgevf_tqps_get_stats(handle, data);
236 static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code,
240 memset(msg, 0, sizeof(struct hclge_vf_to_pf_msg));
242 msg->subcode = subcode;
246 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
248 struct hclge_vf_to_pf_msg send_msg;
252 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_TCINFO, 0);
253 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg,
256 dev_err(&hdev->pdev->dev,
257 "VF request to get TC info from PF failed %d",
262 hdev->hw_tc_map = resp_msg;
267 static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev)
269 struct hnae3_handle *nic = &hdev->nic;
270 struct hclge_vf_to_pf_msg send_msg;
274 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
275 HCLGE_MBX_GET_PORT_BASE_VLAN_STATE);
276 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg,
279 dev_err(&hdev->pdev->dev,
280 "VF request to get port based vlan state failed %d",
285 nic->port_base_vlan_state = resp_msg;
290 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
292 #define HCLGEVF_TQPS_RSS_INFO_LEN 6
293 #define HCLGEVF_TQPS_ALLOC_OFFSET 0
294 #define HCLGEVF_TQPS_RSS_SIZE_OFFSET 2
295 #define HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET 4
297 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN];
298 struct hclge_vf_to_pf_msg send_msg;
301 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QINFO, 0);
302 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
303 HCLGEVF_TQPS_RSS_INFO_LEN);
305 dev_err(&hdev->pdev->dev,
306 "VF request to get tqp info from PF failed %d",
311 memcpy(&hdev->num_tqps, &resp_msg[HCLGEVF_TQPS_ALLOC_OFFSET],
313 memcpy(&hdev->rss_size_max, &resp_msg[HCLGEVF_TQPS_RSS_SIZE_OFFSET],
315 memcpy(&hdev->rx_buf_len, &resp_msg[HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET],
321 static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev)
323 #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4
324 #define HCLGEVF_TQPS_NUM_TX_DESC_OFFSET 0
325 #define HCLGEVF_TQPS_NUM_RX_DESC_OFFSET 2
327 u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN];
328 struct hclge_vf_to_pf_msg send_msg;
331 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QDEPTH, 0);
332 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
333 HCLGEVF_TQPS_DEPTH_INFO_LEN);
335 dev_err(&hdev->pdev->dev,
336 "VF request to get tqp depth info from PF failed %d",
341 memcpy(&hdev->num_tx_desc, &resp_msg[HCLGEVF_TQPS_NUM_TX_DESC_OFFSET],
343 memcpy(&hdev->num_rx_desc, &resp_msg[HCLGEVF_TQPS_NUM_RX_DESC_OFFSET],
349 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id)
351 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
352 struct hclge_vf_to_pf_msg send_msg;
357 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0);
358 memcpy(send_msg.data, &queue_id, sizeof(queue_id));
359 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data,
362 qid_in_pf = *(u16 *)resp_data;
367 static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev)
369 struct hclge_vf_to_pf_msg send_msg;
373 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MEDIA_TYPE, 0);
374 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
377 dev_err(&hdev->pdev->dev,
378 "VF request to get the pf port media type failed %d",
383 hdev->hw.mac.media_type = resp_msg[0];
384 hdev->hw.mac.module_type = resp_msg[1];
389 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
391 struct hclgevf_tqp *tqp;
394 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
395 sizeof(struct hclgevf_tqp), GFP_KERNEL);
401 for (i = 0; i < hdev->num_tqps; i++) {
402 tqp->dev = &hdev->pdev->dev;
405 tqp->q.ae_algo = &ae_algovf;
406 tqp->q.buf_size = hdev->rx_buf_len;
407 tqp->q.tx_desc_num = hdev->num_tx_desc;
408 tqp->q.rx_desc_num = hdev->num_rx_desc;
410 /* need an extended offset to configure queues >=
411 * HCLGEVF_TQP_MAX_SIZE_DEV_V2.
413 if (i < HCLGEVF_TQP_MAX_SIZE_DEV_V2)
414 tqp->q.io_base = hdev->hw.io_base +
415 HCLGEVF_TQP_REG_OFFSET +
416 i * HCLGEVF_TQP_REG_SIZE;
418 tqp->q.io_base = hdev->hw.io_base +
419 HCLGEVF_TQP_REG_OFFSET +
420 HCLGEVF_TQP_EXT_REG_OFFSET +
421 (i - HCLGEVF_TQP_MAX_SIZE_DEV_V2) *
422 HCLGEVF_TQP_REG_SIZE;
430 static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
432 struct hnae3_handle *nic = &hdev->nic;
433 struct hnae3_knic_private_info *kinfo;
434 u16 new_tqps = hdev->num_tqps;
439 kinfo->num_tx_desc = hdev->num_tx_desc;
440 kinfo->num_rx_desc = hdev->num_rx_desc;
441 kinfo->rx_buf_len = hdev->rx_buf_len;
442 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++)
443 if (hdev->hw_tc_map & BIT(i))
446 num_tc = num_tc ? num_tc : 1;
447 kinfo->tc_info.num_tc = num_tc;
448 kinfo->rss_size = min_t(u16, hdev->rss_size_max, new_tqps / num_tc);
449 new_tqps = kinfo->rss_size * num_tc;
450 kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
452 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
453 sizeof(struct hnae3_queue *), GFP_KERNEL);
457 for (i = 0; i < kinfo->num_tqps; i++) {
458 hdev->htqp[i].q.handle = &hdev->nic;
459 hdev->htqp[i].q.tqp_index = i;
460 kinfo->tqp[i] = &hdev->htqp[i].q;
463 /* after init the max rss_size and tqps, adjust the default tqp numbers
464 * and rss size with the actual vector numbers
466 kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps);
467 kinfo->rss_size = min_t(u16, kinfo->num_tqps / num_tc,
473 static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
475 struct hclge_vf_to_pf_msg send_msg;
478 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_STATUS, 0);
479 status = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
481 dev_err(&hdev->pdev->dev,
482 "VF failed to fetch link status(%d) from PF", status);
485 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
487 struct hnae3_handle *rhandle = &hdev->roce;
488 struct hnae3_handle *handle = &hdev->nic;
489 struct hnae3_client *rclient;
490 struct hnae3_client *client;
492 if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state))
495 client = handle->client;
496 rclient = hdev->roce_client;
499 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state;
500 if (link_state != hdev->hw.mac.link) {
501 client->ops->link_status_change(handle, !!link_state);
502 if (rclient && rclient->ops->link_status_change)
503 rclient->ops->link_status_change(rhandle, !!link_state);
504 hdev->hw.mac.link = link_state;
507 clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state);
510 static void hclgevf_update_link_mode(struct hclgevf_dev *hdev)
512 #define HCLGEVF_ADVERTISING 0
513 #define HCLGEVF_SUPPORTED 1
515 struct hclge_vf_to_pf_msg send_msg;
517 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_MODE, 0);
518 send_msg.data[0] = HCLGEVF_ADVERTISING;
519 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
520 send_msg.data[0] = HCLGEVF_SUPPORTED;
521 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
524 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
526 struct hnae3_handle *nic = &hdev->nic;
529 nic->ae_algo = &ae_algovf;
530 nic->pdev = hdev->pdev;
531 nic->numa_node_mask = hdev->numa_node_mask;
532 nic->flags |= HNAE3_SUPPORT_VF;
534 ret = hclgevf_knic_setup(hdev);
536 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n",
541 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id)
543 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) {
544 dev_warn(&hdev->pdev->dev,
545 "vector(vector_id %d) has been freed.\n", vector_id);
549 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT;
550 hdev->num_msi_left += 1;
551 hdev->num_msi_used -= 1;
554 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num,
555 struct hnae3_vector_info *vector_info)
557 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
558 struct hnae3_vector_info *vector = vector_info;
562 vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num);
563 vector_num = min(hdev->num_msi_left, vector_num);
565 for (j = 0; j < vector_num; j++) {
566 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) {
567 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) {
568 vector->vector = pci_irq_vector(hdev->pdev, i);
569 vector->io_addr = hdev->hw.io_base +
570 HCLGEVF_VECTOR_REG_BASE +
571 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET;
572 hdev->vector_status[i] = 0;
573 hdev->vector_irq[i] = vector->vector;
582 hdev->num_msi_left -= alloc;
583 hdev->num_msi_used += alloc;
588 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector)
592 for (i = 0; i < hdev->num_msi; i++)
593 if (vector == hdev->vector_irq[i])
599 static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev,
600 const u8 hfunc, const u8 *key)
602 struct hclgevf_rss_config_cmd *req;
603 unsigned int key_offset = 0;
604 struct hclgevf_desc desc;
609 key_counts = HCLGEVF_RSS_KEY_SIZE;
610 req = (struct hclgevf_rss_config_cmd *)desc.data;
613 hclgevf_cmd_setup_basic_desc(&desc,
614 HCLGEVF_OPC_RSS_GENERIC_CONFIG,
617 req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK);
619 (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B);
621 key_size = min(HCLGEVF_RSS_HASH_KEY_NUM, key_counts);
622 memcpy(req->hash_key,
623 key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size);
625 key_counts -= key_size;
627 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
629 dev_err(&hdev->pdev->dev,
630 "Configure RSS config fail, status = %d\n",
639 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle)
641 return HCLGEVF_RSS_KEY_SIZE;
644 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev)
646 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl;
647 struct hclgevf_rss_indirection_table_cmd *req;
648 struct hclgevf_desc desc;
653 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data;
654 rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
655 HCLGEVF_RSS_CFG_TBL_SIZE;
657 for (i = 0; i < rss_cfg_tbl_num; i++) {
658 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE,
660 req->start_table_index =
661 cpu_to_le16(i * HCLGEVF_RSS_CFG_TBL_SIZE);
662 req->rss_set_bitmap = cpu_to_le16(HCLGEVF_RSS_SET_BITMAP_MSK);
663 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++)
665 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j];
667 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
669 dev_err(&hdev->pdev->dev,
670 "VF failed(=%d) to set RSS indirection table\n",
679 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
681 struct hclgevf_rss_tc_mode_cmd *req;
682 u16 tc_offset[HCLGEVF_MAX_TC_NUM];
683 u16 tc_valid[HCLGEVF_MAX_TC_NUM];
684 u16 tc_size[HCLGEVF_MAX_TC_NUM];
685 struct hclgevf_desc desc;
690 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data;
692 roundup_size = roundup_pow_of_two(rss_size);
693 roundup_size = ilog2(roundup_size);
695 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
696 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i));
697 tc_size[i] = roundup_size;
698 tc_offset[i] = rss_size * i;
701 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
702 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
705 hnae3_set_bit(mode, HCLGEVF_RSS_TC_VALID_B,
706 (tc_valid[i] & 0x1));
707 hnae3_set_field(mode, HCLGEVF_RSS_TC_SIZE_M,
708 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
709 hnae3_set_bit(mode, HCLGEVF_RSS_TC_SIZE_MSB_B,
710 tc_size[i] >> HCLGEVF_RSS_TC_SIZE_MSB_OFFSET &
712 hnae3_set_field(mode, HCLGEVF_RSS_TC_OFFSET_M,
713 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
715 req->rss_tc_mode[i] = cpu_to_le16(mode);
717 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
719 dev_err(&hdev->pdev->dev,
720 "VF failed(=%d) to set rss tc mode\n", status);
725 /* for revision 0x20, vf shared the same rss config with pf */
726 static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev)
728 #define HCLGEVF_RSS_MBX_RESP_LEN 8
729 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
730 u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN];
731 struct hclge_vf_to_pf_msg send_msg;
732 u16 msg_num, hash_key_index;
736 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0);
737 msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) /
738 HCLGEVF_RSS_MBX_RESP_LEN;
739 for (index = 0; index < msg_num; index++) {
740 send_msg.data[0] = index;
741 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
742 HCLGEVF_RSS_MBX_RESP_LEN);
744 dev_err(&hdev->pdev->dev,
745 "VF get rss hash key from PF failed, ret=%d",
750 hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index;
751 if (index == msg_num - 1)
752 memcpy(&rss_cfg->rss_hash_key[hash_key_index],
754 HCLGEVF_RSS_KEY_SIZE - hash_key_index);
756 memcpy(&rss_cfg->rss_hash_key[hash_key_index],
757 &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN);
763 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
766 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
767 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
770 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
771 /* Get hash algorithm */
773 switch (rss_cfg->hash_algo) {
774 case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ:
775 *hfunc = ETH_RSS_HASH_TOP;
777 case HCLGEVF_RSS_HASH_ALGO_SIMPLE:
778 *hfunc = ETH_RSS_HASH_XOR;
781 *hfunc = ETH_RSS_HASH_UNKNOWN;
786 /* Get the RSS Key required by the user */
788 memcpy(key, rss_cfg->rss_hash_key,
789 HCLGEVF_RSS_KEY_SIZE);
792 *hfunc = ETH_RSS_HASH_TOP;
794 ret = hclgevf_get_rss_hash_key(hdev);
797 memcpy(key, rss_cfg->rss_hash_key,
798 HCLGEVF_RSS_KEY_SIZE);
803 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
804 indir[i] = rss_cfg->rss_indirection_tbl[i];
809 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
810 const u8 *key, const u8 hfunc)
812 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
813 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
816 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
817 /* Set the RSS Hash Key if specififed by the user */
820 case ETH_RSS_HASH_TOP:
822 HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
824 case ETH_RSS_HASH_XOR:
826 HCLGEVF_RSS_HASH_ALGO_SIMPLE;
828 case ETH_RSS_HASH_NO_CHANGE:
834 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
839 /* Update the shadow RSS key with user specified qids */
840 memcpy(rss_cfg->rss_hash_key, key,
841 HCLGEVF_RSS_KEY_SIZE);
845 /* update the shadow RSS table with user specified qids */
846 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
847 rss_cfg->rss_indirection_tbl[i] = indir[i];
849 /* update the hardware */
850 return hclgevf_set_rss_indir_table(hdev);
853 static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
855 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0;
857 if (nfc->data & RXH_L4_B_2_3)
858 hash_sets |= HCLGEVF_D_PORT_BIT;
860 hash_sets &= ~HCLGEVF_D_PORT_BIT;
862 if (nfc->data & RXH_IP_SRC)
863 hash_sets |= HCLGEVF_S_IP_BIT;
865 hash_sets &= ~HCLGEVF_S_IP_BIT;
867 if (nfc->data & RXH_IP_DST)
868 hash_sets |= HCLGEVF_D_IP_BIT;
870 hash_sets &= ~HCLGEVF_D_IP_BIT;
872 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
873 hash_sets |= HCLGEVF_V_TAG_BIT;
878 static int hclgevf_init_rss_tuple_cmd(struct hnae3_handle *handle,
879 struct ethtool_rxnfc *nfc,
880 struct hclgevf_rss_input_tuple_cmd *req)
882 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
883 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
886 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
887 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
888 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
889 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
890 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
891 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
892 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
893 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
895 tuple_sets = hclgevf_get_rss_hash_bits(nfc);
896 switch (nfc->flow_type) {
898 req->ipv4_tcp_en = tuple_sets;
901 req->ipv6_tcp_en = tuple_sets;
904 req->ipv4_udp_en = tuple_sets;
907 req->ipv6_udp_en = tuple_sets;
910 req->ipv4_sctp_en = tuple_sets;
913 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
914 (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
917 req->ipv6_sctp_en = tuple_sets;
920 req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
923 req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
932 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
933 struct ethtool_rxnfc *nfc)
935 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
936 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
937 struct hclgevf_rss_input_tuple_cmd *req;
938 struct hclgevf_desc desc;
941 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
945 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
948 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
949 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
951 ret = hclgevf_init_rss_tuple_cmd(handle, nfc, req);
953 dev_err(&hdev->pdev->dev,
954 "failed to init rss tuple cmd, ret = %d\n", ret);
958 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
960 dev_err(&hdev->pdev->dev,
961 "Set rss tuple fail, status = %d\n", ret);
965 rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
966 rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
967 rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
968 rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
969 rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
970 rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
971 rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
972 rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
976 static int hclgevf_get_rss_tuple_by_flow_type(struct hclgevf_dev *hdev,
977 int flow_type, u8 *tuple_sets)
981 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_tcp_en;
984 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_udp_en;
987 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_tcp_en;
990 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_udp_en;
993 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_sctp_en;
996 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_sctp_en;
1000 *tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT;
1009 static u64 hclgevf_convert_rss_tuple(u8 tuple_sets)
1013 if (tuple_sets & HCLGEVF_D_PORT_BIT)
1014 tuple_data |= RXH_L4_B_2_3;
1015 if (tuple_sets & HCLGEVF_S_PORT_BIT)
1016 tuple_data |= RXH_L4_B_0_1;
1017 if (tuple_sets & HCLGEVF_D_IP_BIT)
1018 tuple_data |= RXH_IP_DST;
1019 if (tuple_sets & HCLGEVF_S_IP_BIT)
1020 tuple_data |= RXH_IP_SRC;
1025 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
1026 struct ethtool_rxnfc *nfc)
1028 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1032 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
1037 ret = hclgevf_get_rss_tuple_by_flow_type(hdev, nfc->flow_type,
1039 if (ret || !tuple_sets)
1042 nfc->data = hclgevf_convert_rss_tuple(tuple_sets);
1047 static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev,
1048 struct hclgevf_rss_cfg *rss_cfg)
1050 struct hclgevf_rss_input_tuple_cmd *req;
1051 struct hclgevf_desc desc;
1054 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
1056 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
1058 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
1059 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
1060 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
1061 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
1062 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
1063 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
1064 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
1065 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
1067 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
1069 dev_err(&hdev->pdev->dev,
1070 "Configure rss input fail, status = %d\n", ret);
1074 static int hclgevf_get_tc_size(struct hnae3_handle *handle)
1076 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1077 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
1079 return rss_cfg->rss_size;
1082 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
1084 struct hnae3_ring_chain_node *ring_chain)
1086 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1087 struct hclge_vf_to_pf_msg send_msg;
1088 struct hnae3_ring_chain_node *node;
1092 memset(&send_msg, 0, sizeof(send_msg));
1093 send_msg.code = en ? HCLGE_MBX_MAP_RING_TO_VECTOR :
1094 HCLGE_MBX_UNMAP_RING_TO_VECTOR;
1095 send_msg.vector_id = vector_id;
1097 for (node = ring_chain; node; node = node->next) {
1098 send_msg.param[i].ring_type =
1099 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B);
1101 send_msg.param[i].tqp_index = node->tqp_index;
1102 send_msg.param[i].int_gl_index =
1103 hnae3_get_field(node->int_gl_idx,
1104 HNAE3_RING_GL_IDX_M,
1105 HNAE3_RING_GL_IDX_S);
1108 if (i == HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM || !node->next) {
1109 send_msg.ring_num = i;
1111 status = hclgevf_send_mbx_msg(hdev, &send_msg, false,
1114 dev_err(&hdev->pdev->dev,
1115 "Map TQP fail, status is %d.\n",
1126 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector,
1127 struct hnae3_ring_chain_node *ring_chain)
1129 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1132 vector_id = hclgevf_get_vector_index(hdev, vector);
1133 if (vector_id < 0) {
1134 dev_err(&handle->pdev->dev,
1135 "Get vector index fail. ret =%d\n", vector_id);
1139 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain);
1142 static int hclgevf_unmap_ring_from_vector(
1143 struct hnae3_handle *handle,
1145 struct hnae3_ring_chain_node *ring_chain)
1147 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1150 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
1153 vector_id = hclgevf_get_vector_index(hdev, vector);
1154 if (vector_id < 0) {
1155 dev_err(&handle->pdev->dev,
1156 "Get vector index fail. ret =%d\n", vector_id);
1160 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain);
1162 dev_err(&handle->pdev->dev,
1163 "Unmap ring from vector fail. vector=%d, ret =%d\n",
1170 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
1172 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1175 vector_id = hclgevf_get_vector_index(hdev, vector);
1176 if (vector_id < 0) {
1177 dev_err(&handle->pdev->dev,
1178 "hclgevf_put_vector get vector index fail. ret =%d\n",
1183 hclgevf_free_vector(hdev, vector_id);
1188 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
1189 bool en_uc_pmc, bool en_mc_pmc,
1192 struct hnae3_handle *handle = &hdev->nic;
1193 struct hclge_vf_to_pf_msg send_msg;
1196 memset(&send_msg, 0, sizeof(send_msg));
1197 send_msg.code = HCLGE_MBX_SET_PROMISC_MODE;
1198 send_msg.en_bc = en_bc_pmc ? 1 : 0;
1199 send_msg.en_uc = en_uc_pmc ? 1 : 0;
1200 send_msg.en_mc = en_mc_pmc ? 1 : 0;
1201 send_msg.en_limit_promisc = test_bit(HNAE3_PFLAG_LIMIT_PROMISC,
1202 &handle->priv_flags) ? 1 : 0;
1204 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
1206 dev_err(&hdev->pdev->dev,
1207 "Set promisc mode fail, status is %d.\n", ret);
1212 static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
1215 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1218 en_bc_pmc = hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
1220 return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc,
1224 static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle)
1226 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1228 set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
1229 hclgevf_task_schedule(hdev, 0);
1232 static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev)
1234 struct hnae3_handle *handle = &hdev->nic;
1235 bool en_uc_pmc = handle->netdev_flags & HNAE3_UPE;
1236 bool en_mc_pmc = handle->netdev_flags & HNAE3_MPE;
1239 if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) {
1240 ret = hclgevf_set_promisc_mode(handle, en_uc_pmc, en_mc_pmc);
1242 clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
1246 static int hclgevf_tqp_enable_cmd_send(struct hclgevf_dev *hdev, u16 tqp_id,
1247 u16 stream_id, bool enable)
1249 struct hclgevf_cfg_com_tqp_queue_cmd *req;
1250 struct hclgevf_desc desc;
1252 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data;
1254 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE,
1256 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
1257 req->stream_id = cpu_to_le16(stream_id);
1259 req->enable |= 1U << HCLGEVF_TQP_ENABLE_B;
1261 return hclgevf_cmd_send(&hdev->hw, &desc, 1);
1264 static int hclgevf_tqp_enable(struct hnae3_handle *handle, bool enable)
1266 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1270 for (i = 0; i < handle->kinfo.num_tqps; i++) {
1271 ret = hclgevf_tqp_enable_cmd_send(hdev, i, 0, enable);
1279 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
1281 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
1282 struct hclgevf_tqp *tqp;
1285 for (i = 0; i < kinfo->num_tqps; i++) {
1286 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
1287 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
1291 static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p)
1293 struct hclge_vf_to_pf_msg send_msg;
1294 u8 host_mac[ETH_ALEN];
1297 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MAC_ADDR, 0);
1298 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, host_mac,
1301 dev_err(&hdev->pdev->dev,
1302 "fail to get VF MAC from host %d", status);
1306 ether_addr_copy(p, host_mac);
1311 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
1313 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1314 u8 host_mac_addr[ETH_ALEN];
1316 if (hclgevf_get_host_mac_addr(hdev, host_mac_addr))
1319 hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr);
1320 if (hdev->has_pf_mac)
1321 ether_addr_copy(p, host_mac_addr);
1323 ether_addr_copy(p, hdev->hw.mac.mac_addr);
1326 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
1329 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1330 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr;
1331 struct hclge_vf_to_pf_msg send_msg;
1332 u8 *new_mac_addr = (u8 *)p;
1335 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0);
1336 send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY;
1337 ether_addr_copy(send_msg.data, new_mac_addr);
1338 if (is_first && !hdev->has_pf_mac)
1339 eth_zero_addr(&send_msg.data[ETH_ALEN]);
1341 ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr);
1342 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1344 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
1349 static struct hclgevf_mac_addr_node *
1350 hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr)
1352 struct hclgevf_mac_addr_node *mac_node, *tmp;
1354 list_for_each_entry_safe(mac_node, tmp, list, node)
1355 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
1361 static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node *mac_node,
1362 enum HCLGEVF_MAC_NODE_STATE state)
1365 /* from set_rx_mode or tmp_add_list */
1366 case HCLGEVF_MAC_TO_ADD:
1367 if (mac_node->state == HCLGEVF_MAC_TO_DEL)
1368 mac_node->state = HCLGEVF_MAC_ACTIVE;
1370 /* only from set_rx_mode */
1371 case HCLGEVF_MAC_TO_DEL:
1372 if (mac_node->state == HCLGEVF_MAC_TO_ADD) {
1373 list_del(&mac_node->node);
1376 mac_node->state = HCLGEVF_MAC_TO_DEL;
1379 /* only from tmp_add_list, the mac_node->state won't be
1380 * HCLGEVF_MAC_ACTIVE
1382 case HCLGEVF_MAC_ACTIVE:
1383 if (mac_node->state == HCLGEVF_MAC_TO_ADD)
1384 mac_node->state = HCLGEVF_MAC_ACTIVE;
1389 static int hclgevf_update_mac_list(struct hnae3_handle *handle,
1390 enum HCLGEVF_MAC_NODE_STATE state,
1391 enum HCLGEVF_MAC_ADDR_TYPE mac_type,
1392 const unsigned char *addr)
1394 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1395 struct hclgevf_mac_addr_node *mac_node;
1396 struct list_head *list;
1398 list = (mac_type == HCLGEVF_MAC_ADDR_UC) ?
1399 &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list;
1401 spin_lock_bh(&hdev->mac_table.mac_list_lock);
1403 /* if the mac addr is already in the mac list, no need to add a new
1404 * one into it, just check the mac addr state, convert it to a new
1405 * new state, or just remove it, or do nothing.
1407 mac_node = hclgevf_find_mac_node(list, addr);
1409 hclgevf_update_mac_node(mac_node, state);
1410 spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1413 /* if this address is never added, unnecessary to delete */
1414 if (state == HCLGEVF_MAC_TO_DEL) {
1415 spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1419 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
1421 spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1425 mac_node->state = state;
1426 ether_addr_copy(mac_node->mac_addr, addr);
1427 list_add_tail(&mac_node->node, list);
1429 spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1433 static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
1434 const unsigned char *addr)
1436 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD,
1437 HCLGEVF_MAC_ADDR_UC, addr);
1440 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle,
1441 const unsigned char *addr)
1443 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL,
1444 HCLGEVF_MAC_ADDR_UC, addr);
1447 static int hclgevf_add_mc_addr(struct hnae3_handle *handle,
1448 const unsigned char *addr)
1450 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD,
1451 HCLGEVF_MAC_ADDR_MC, addr);
1454 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle,
1455 const unsigned char *addr)
1457 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL,
1458 HCLGEVF_MAC_ADDR_MC, addr);
1461 static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev,
1462 struct hclgevf_mac_addr_node *mac_node,
1463 enum HCLGEVF_MAC_ADDR_TYPE mac_type)
1465 struct hclge_vf_to_pf_msg send_msg;
1468 if (mac_type == HCLGEVF_MAC_ADDR_UC) {
1469 code = HCLGE_MBX_SET_UNICAST;
1470 if (mac_node->state == HCLGEVF_MAC_TO_ADD)
1471 subcode = HCLGE_MBX_MAC_VLAN_UC_ADD;
1473 subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE;
1475 code = HCLGE_MBX_SET_MULTICAST;
1476 if (mac_node->state == HCLGEVF_MAC_TO_ADD)
1477 subcode = HCLGE_MBX_MAC_VLAN_MC_ADD;
1479 subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE;
1482 hclgevf_build_send_msg(&send_msg, code, subcode);
1483 ether_addr_copy(send_msg.data, mac_node->mac_addr);
1484 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
1487 static void hclgevf_config_mac_list(struct hclgevf_dev *hdev,
1488 struct list_head *list,
1489 enum HCLGEVF_MAC_ADDR_TYPE mac_type)
1491 struct hclgevf_mac_addr_node *mac_node, *tmp;
1494 list_for_each_entry_safe(mac_node, tmp, list, node) {
1495 ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type);
1497 dev_err(&hdev->pdev->dev,
1498 "failed to configure mac %pM, state = %d, ret = %d\n",
1499 mac_node->mac_addr, mac_node->state, ret);
1502 if (mac_node->state == HCLGEVF_MAC_TO_ADD) {
1503 mac_node->state = HCLGEVF_MAC_ACTIVE;
1505 list_del(&mac_node->node);
1511 static void hclgevf_sync_from_add_list(struct list_head *add_list,
1512 struct list_head *mac_list)
1514 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
1516 list_for_each_entry_safe(mac_node, tmp, add_list, node) {
1517 /* if the mac address from tmp_add_list is not in the
1518 * uc/mc_mac_list, it means have received a TO_DEL request
1519 * during the time window of sending mac config request to PF
1520 * If mac_node state is ACTIVE, then change its state to TO_DEL,
1521 * then it will be removed at next time. If is TO_ADD, it means
1522 * send TO_ADD request failed, so just remove the mac node.
1524 new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr);
1526 hclgevf_update_mac_node(new_node, mac_node->state);
1527 list_del(&mac_node->node);
1529 } else if (mac_node->state == HCLGEVF_MAC_ACTIVE) {
1530 mac_node->state = HCLGEVF_MAC_TO_DEL;
1531 list_del(&mac_node->node);
1532 list_add_tail(&mac_node->node, mac_list);
1534 list_del(&mac_node->node);
1540 static void hclgevf_sync_from_del_list(struct list_head *del_list,
1541 struct list_head *mac_list)
1543 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
1545 list_for_each_entry_safe(mac_node, tmp, del_list, node) {
1546 new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr);
1548 /* If the mac addr is exist in the mac list, it means
1549 * received a new request TO_ADD during the time window
1550 * of sending mac addr configurrequest to PF, so just
1551 * change the mac state to ACTIVE.
1553 new_node->state = HCLGEVF_MAC_ACTIVE;
1554 list_del(&mac_node->node);
1557 list_del(&mac_node->node);
1558 list_add_tail(&mac_node->node, mac_list);
1563 static void hclgevf_clear_list(struct list_head *list)
1565 struct hclgevf_mac_addr_node *mac_node, *tmp;
1567 list_for_each_entry_safe(mac_node, tmp, list, node) {
1568 list_del(&mac_node->node);
1573 static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev,
1574 enum HCLGEVF_MAC_ADDR_TYPE mac_type)
1576 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
1577 struct list_head tmp_add_list, tmp_del_list;
1578 struct list_head *list;
1580 INIT_LIST_HEAD(&tmp_add_list);
1581 INIT_LIST_HEAD(&tmp_del_list);
1583 /* move the mac addr to the tmp_add_list and tmp_del_list, then
1584 * we can add/delete these mac addr outside the spin lock
1586 list = (mac_type == HCLGEVF_MAC_ADDR_UC) ?
1587 &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list;
1589 spin_lock_bh(&hdev->mac_table.mac_list_lock);
1591 list_for_each_entry_safe(mac_node, tmp, list, node) {
1592 switch (mac_node->state) {
1593 case HCLGEVF_MAC_TO_DEL:
1594 list_del(&mac_node->node);
1595 list_add_tail(&mac_node->node, &tmp_del_list);
1597 case HCLGEVF_MAC_TO_ADD:
1598 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
1602 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
1603 new_node->state = mac_node->state;
1604 list_add_tail(&new_node->node, &tmp_add_list);
1612 spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1614 /* delete first, in order to get max mac table space for adding */
1615 hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type);
1616 hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type);
1618 /* if some mac addresses were added/deleted fail, move back to the
1619 * mac_list, and retry at next time.
1621 spin_lock_bh(&hdev->mac_table.mac_list_lock);
1623 hclgevf_sync_from_del_list(&tmp_del_list, list);
1624 hclgevf_sync_from_add_list(&tmp_add_list, list);
1626 spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1629 static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev)
1631 hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC);
1632 hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC);
1635 static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev)
1637 spin_lock_bh(&hdev->mac_table.mac_list_lock);
1639 hclgevf_clear_list(&hdev->mac_table.uc_mac_list);
1640 hclgevf_clear_list(&hdev->mac_table.mc_mac_list);
1642 spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1645 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
1646 __be16 proto, u16 vlan_id,
1649 #define HCLGEVF_VLAN_MBX_IS_KILL_OFFSET 0
1650 #define HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET 1
1651 #define HCLGEVF_VLAN_MBX_PROTO_OFFSET 3
1653 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1654 struct hclge_vf_to_pf_msg send_msg;
1657 if (vlan_id > HCLGEVF_MAX_VLAN_ID)
1660 if (proto != htons(ETH_P_8021Q))
1661 return -EPROTONOSUPPORT;
1663 /* When device is resetting or reset failed, firmware is unable to
1664 * handle mailbox. Just record the vlan id, and remove it after
1667 if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
1668 test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) {
1669 set_bit(vlan_id, hdev->vlan_del_fail_bmap);
1673 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
1674 HCLGE_MBX_VLAN_FILTER);
1675 send_msg.data[HCLGEVF_VLAN_MBX_IS_KILL_OFFSET] = is_kill;
1676 memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET], &vlan_id,
1678 memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_PROTO_OFFSET], &proto,
1680 /* when remove hw vlan filter failed, record the vlan id,
1681 * and try to remove it from hw later, to be consistence
1684 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1686 set_bit(vlan_id, hdev->vlan_del_fail_bmap);
1691 static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev)
1693 #define HCLGEVF_MAX_SYNC_COUNT 60
1694 struct hnae3_handle *handle = &hdev->nic;
1695 int ret, sync_cnt = 0;
1698 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
1699 while (vlan_id != VLAN_N_VID) {
1700 ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q),
1705 clear_bit(vlan_id, hdev->vlan_del_fail_bmap);
1707 if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT)
1710 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
1714 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
1716 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1717 struct hclge_vf_to_pf_msg send_msg;
1719 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
1720 HCLGE_MBX_VLAN_RX_OFF_CFG);
1721 send_msg.data[0] = enable ? 1 : 0;
1722 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
1725 static int hclgevf_reset_tqp(struct hnae3_handle *handle)
1727 #define HCLGEVF_RESET_ALL_QUEUE_DONE 1U
1728 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1729 struct hclge_vf_to_pf_msg send_msg;
1730 u8 return_status = 0;
1734 /* disable vf queue before send queue reset msg to PF */
1735 ret = hclgevf_tqp_enable(handle, false);
1737 dev_err(&hdev->pdev->dev, "failed to disable tqp, ret = %d\n",
1742 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0);
1744 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &return_status,
1745 sizeof(return_status));
1746 if (ret || return_status == HCLGEVF_RESET_ALL_QUEUE_DONE)
1749 for (i = 1; i < handle->kinfo.num_tqps; i++) {
1750 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0);
1751 memcpy(send_msg.data, &i, sizeof(i));
1752 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1760 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu)
1762 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1763 struct hclge_vf_to_pf_msg send_msg;
1765 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0);
1766 memcpy(send_msg.data, &new_mtu, sizeof(new_mtu));
1767 return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1770 static int hclgevf_notify_client(struct hclgevf_dev *hdev,
1771 enum hnae3_reset_notify_type type)
1773 struct hnae3_client *client = hdev->nic_client;
1774 struct hnae3_handle *handle = &hdev->nic;
1777 if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) ||
1781 if (!client->ops->reset_notify)
1784 ret = client->ops->reset_notify(handle, type);
1786 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
1792 static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev,
1793 enum hnae3_reset_notify_type type)
1795 struct hnae3_client *client = hdev->roce_client;
1796 struct hnae3_handle *handle = &hdev->roce;
1799 if (!test_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state) || !client)
1802 if (!client->ops->reset_notify)
1805 ret = client->ops->reset_notify(handle, type);
1807 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
1812 static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
1814 #define HCLGEVF_RESET_WAIT_US 20000
1815 #define HCLGEVF_RESET_WAIT_CNT 2000
1816 #define HCLGEVF_RESET_WAIT_TIMEOUT_US \
1817 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT)
1822 if (hdev->reset_type == HNAE3_VF_RESET)
1823 ret = readl_poll_timeout(hdev->hw.io_base +
1824 HCLGEVF_VF_RST_ING, val,
1825 !(val & HCLGEVF_VF_RST_ING_BIT),
1826 HCLGEVF_RESET_WAIT_US,
1827 HCLGEVF_RESET_WAIT_TIMEOUT_US);
1829 ret = readl_poll_timeout(hdev->hw.io_base +
1830 HCLGEVF_RST_ING, val,
1831 !(val & HCLGEVF_RST_ING_BITS),
1832 HCLGEVF_RESET_WAIT_US,
1833 HCLGEVF_RESET_WAIT_TIMEOUT_US);
1835 /* hardware completion status should be available by this time */
1837 dev_err(&hdev->pdev->dev,
1838 "couldn't get reset done status from h/w, timeout!\n");
1842 /* we will wait a bit more to let reset of the stack to complete. This
1843 * might happen in case reset assertion was made by PF. Yes, this also
1844 * means we might end up waiting bit more even for VF reset.
1851 static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable)
1855 reg_val = hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG);
1857 reg_val |= HCLGEVF_NIC_SW_RST_RDY;
1859 reg_val &= ~HCLGEVF_NIC_SW_RST_RDY;
1861 hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG,
1865 static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
1869 /* uninitialize the nic client */
1870 ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
1874 /* re-initialize the hclge device */
1875 ret = hclgevf_reset_hdev(hdev);
1877 dev_err(&hdev->pdev->dev,
1878 "hclge device re-init failed, VF is disabled!\n");
1882 /* bring up the nic client again */
1883 ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT);
1887 /* clear handshake status with IMP */
1888 hclgevf_reset_handshake(hdev, false);
1890 /* bring up the nic to enable TX/RX again */
1891 return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
1894 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
1896 #define HCLGEVF_RESET_SYNC_TIME 100
1898 if (hdev->reset_type == HNAE3_VF_FUNC_RESET) {
1899 struct hclge_vf_to_pf_msg send_msg;
1902 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0);
1903 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1905 dev_err(&hdev->pdev->dev,
1906 "failed to assert VF reset, ret = %d\n", ret);
1909 hdev->rst_stats.vf_func_rst_cnt++;
1912 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
1913 /* inform hardware that preparatory work is done */
1914 msleep(HCLGEVF_RESET_SYNC_TIME);
1915 hclgevf_reset_handshake(hdev, true);
1916 dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done\n",
1922 static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev)
1924 dev_info(&hdev->pdev->dev, "VF function reset count: %u\n",
1925 hdev->rst_stats.vf_func_rst_cnt);
1926 dev_info(&hdev->pdev->dev, "FLR reset count: %u\n",
1927 hdev->rst_stats.flr_rst_cnt);
1928 dev_info(&hdev->pdev->dev, "VF reset count: %u\n",
1929 hdev->rst_stats.vf_rst_cnt);
1930 dev_info(&hdev->pdev->dev, "reset done count: %u\n",
1931 hdev->rst_stats.rst_done_cnt);
1932 dev_info(&hdev->pdev->dev, "HW reset done count: %u\n",
1933 hdev->rst_stats.hw_rst_done_cnt);
1934 dev_info(&hdev->pdev->dev, "reset count: %u\n",
1935 hdev->rst_stats.rst_cnt);
1936 dev_info(&hdev->pdev->dev, "reset fail count: %u\n",
1937 hdev->rst_stats.rst_fail_cnt);
1938 dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n",
1939 hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE));
1940 dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n",
1941 hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STATE_REG));
1942 dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n",
1943 hclgevf_read_dev(&hdev->hw, HCLGEVF_CMDQ_TX_DEPTH_REG));
1944 dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n",
1945 hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING));
1946 dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state);
1949 static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
1951 /* recover handshake status with IMP when reset fail */
1952 hclgevf_reset_handshake(hdev, true);
1953 hdev->rst_stats.rst_fail_cnt++;
1954 dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n",
1955 hdev->rst_stats.rst_fail_cnt);
1957 if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT)
1958 set_bit(hdev->reset_type, &hdev->reset_pending);
1960 if (hclgevf_is_reset_pending(hdev)) {
1961 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1962 hclgevf_reset_task_schedule(hdev);
1964 set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
1965 hclgevf_dump_rst_info(hdev);
1969 static int hclgevf_reset_prepare(struct hclgevf_dev *hdev)
1973 hdev->rst_stats.rst_cnt++;
1975 /* perform reset of the stack & ae device for a client */
1976 ret = hclgevf_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
1981 /* bring down the nic to stop any ongoing TX/RX */
1982 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
1987 return hclgevf_reset_prepare_wait(hdev);
1990 static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev)
1994 hdev->rst_stats.hw_rst_done_cnt++;
1995 ret = hclgevf_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
2000 /* now, re-initialize the nic client and ae device */
2001 ret = hclgevf_reset_stack(hdev);
2004 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n");
2008 ret = hclgevf_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
2009 /* ignore RoCE notify error if it fails HCLGEVF_RESET_MAX_FAIL_CNT - 1
2013 hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT - 1)
2016 ret = hclgevf_notify_roce_client(hdev, HNAE3_UP_CLIENT);
2020 hdev->last_reset_time = jiffies;
2021 hdev->rst_stats.rst_done_cnt++;
2022 hdev->rst_stats.rst_fail_cnt = 0;
2023 clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
2028 static void hclgevf_reset(struct hclgevf_dev *hdev)
2030 if (hclgevf_reset_prepare(hdev))
2033 /* check if VF could successfully fetch the hardware reset completion
2034 * status from the hardware
2036 if (hclgevf_reset_wait(hdev)) {
2037 /* can't do much in this situation, will disable VF */
2038 dev_err(&hdev->pdev->dev,
2039 "failed to fetch H/W reset completion status\n");
2043 if (hclgevf_reset_rebuild(hdev))
2049 hclgevf_reset_err_handle(hdev);
2052 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev,
2053 unsigned long *addr)
2055 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
2057 /* return the highest priority reset level amongst all */
2058 if (test_bit(HNAE3_VF_RESET, addr)) {
2059 rst_level = HNAE3_VF_RESET;
2060 clear_bit(HNAE3_VF_RESET, addr);
2061 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
2062 clear_bit(HNAE3_VF_FUNC_RESET, addr);
2063 } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) {
2064 rst_level = HNAE3_VF_FULL_RESET;
2065 clear_bit(HNAE3_VF_FULL_RESET, addr);
2066 clear_bit(HNAE3_VF_FUNC_RESET, addr);
2067 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) {
2068 rst_level = HNAE3_VF_PF_FUNC_RESET;
2069 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
2070 clear_bit(HNAE3_VF_FUNC_RESET, addr);
2071 } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) {
2072 rst_level = HNAE3_VF_FUNC_RESET;
2073 clear_bit(HNAE3_VF_FUNC_RESET, addr);
2074 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
2075 rst_level = HNAE3_FLR_RESET;
2076 clear_bit(HNAE3_FLR_RESET, addr);
2082 static void hclgevf_reset_event(struct pci_dev *pdev,
2083 struct hnae3_handle *handle)
2085 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
2086 struct hclgevf_dev *hdev = ae_dev->priv;
2088 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
2090 if (hdev->default_reset_request)
2092 hclgevf_get_reset_level(hdev,
2093 &hdev->default_reset_request);
2095 hdev->reset_level = HNAE3_VF_FUNC_RESET;
2097 /* reset of this VF requested */
2098 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
2099 hclgevf_reset_task_schedule(hdev);
2101 hdev->last_reset_time = jiffies;
2104 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
2105 enum hnae3_reset_type rst_type)
2107 struct hclgevf_dev *hdev = ae_dev->priv;
2109 set_bit(rst_type, &hdev->default_reset_request);
2112 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
2114 writel(en ? 1 : 0, vector->addr);
2117 static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev)
2119 #define HCLGEVF_FLR_RETRY_WAIT_MS 500
2120 #define HCLGEVF_FLR_RETRY_CNT 5
2122 struct hclgevf_dev *hdev = ae_dev->priv;
2127 down(&hdev->reset_sem);
2128 set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
2129 hdev->reset_type = HNAE3_FLR_RESET;
2130 ret = hclgevf_reset_prepare(hdev);
2132 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
2134 if (hdev->reset_pending ||
2135 retry_cnt++ < HCLGEVF_FLR_RETRY_CNT) {
2136 dev_err(&hdev->pdev->dev,
2137 "reset_pending:0x%lx, retry_cnt:%d\n",
2138 hdev->reset_pending, retry_cnt);
2139 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
2140 up(&hdev->reset_sem);
2141 msleep(HCLGEVF_FLR_RETRY_WAIT_MS);
2146 /* disable misc vector before FLR done */
2147 hclgevf_enable_vector(&hdev->misc_vector, false);
2148 hdev->rst_stats.flr_rst_cnt++;
2151 static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev)
2153 struct hclgevf_dev *hdev = ae_dev->priv;
2156 hclgevf_enable_vector(&hdev->misc_vector, true);
2158 ret = hclgevf_reset_rebuild(hdev);
2160 dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n",
2163 hdev->reset_type = HNAE3_NONE_RESET;
2164 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
2165 up(&hdev->reset_sem);
2168 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
2170 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2172 return hdev->fw_version;
2175 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
2177 struct hclgevf_misc_vector *vector = &hdev->misc_vector;
2179 vector->vector_irq = pci_irq_vector(hdev->pdev,
2180 HCLGEVF_MISC_VECTOR_NUM);
2181 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE;
2182 /* vector status always valid for Vector 0 */
2183 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0;
2184 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq;
2186 hdev->num_msi_left -= 1;
2187 hdev->num_msi_used += 1;
2190 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
2192 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
2193 !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED,
2195 mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
2198 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
2200 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
2201 !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED,
2203 mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
2206 static void hclgevf_task_schedule(struct hclgevf_dev *hdev,
2207 unsigned long delay)
2209 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
2210 !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state))
2211 mod_delayed_work(hclgevf_wq, &hdev->service_task, delay);
2214 static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
2216 #define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3
2218 if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state))
2221 down(&hdev->reset_sem);
2222 set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
2224 if (test_and_clear_bit(HCLGEVF_RESET_PENDING,
2225 &hdev->reset_state)) {
2226 /* PF has initmated that it is about to reset the hardware.
2227 * We now have to poll & check if hardware has actually
2228 * completed the reset sequence. On hardware reset completion,
2229 * VF needs to reset the client and ae device.
2231 hdev->reset_attempts = 0;
2233 hdev->last_reset_time = jiffies;
2234 while ((hdev->reset_type =
2235 hclgevf_get_reset_level(hdev, &hdev->reset_pending))
2236 != HNAE3_NONE_RESET)
2237 hclgevf_reset(hdev);
2238 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
2239 &hdev->reset_state)) {
2240 /* we could be here when either of below happens:
2241 * 1. reset was initiated due to watchdog timeout caused by
2242 * a. IMP was earlier reset and our TX got choked down and
2243 * which resulted in watchdog reacting and inducing VF
2244 * reset. This also means our cmdq would be unreliable.
2245 * b. problem in TX due to other lower layer(example link
2246 * layer not functioning properly etc.)
2247 * 2. VF reset might have been initiated due to some config
2250 * NOTE: Theres no clear way to detect above cases than to react
2251 * to the response of PF for this reset request. PF will ack the
2252 * 1b and 2. cases but we will not get any intimation about 1a
2253 * from PF as cmdq would be in unreliable state i.e. mailbox
2254 * communication between PF and VF would be broken.
2256 * if we are never geting into pending state it means either:
2257 * 1. PF is not receiving our request which could be due to IMP
2260 * We cannot do much for 2. but to check first we can try reset
2261 * our PCIe + stack and see if it alleviates the problem.
2263 if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) {
2264 /* prepare for full reset of stack + pcie interface */
2265 set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
2267 /* "defer" schedule the reset task again */
2268 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
2270 hdev->reset_attempts++;
2272 set_bit(hdev->reset_level, &hdev->reset_pending);
2273 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
2275 hclgevf_reset_task_schedule(hdev);
2278 hdev->reset_type = HNAE3_NONE_RESET;
2279 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
2280 up(&hdev->reset_sem);
2283 static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev)
2285 if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state))
2288 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state))
2291 hclgevf_mbx_async_handler(hdev);
2293 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
2296 static void hclgevf_keep_alive(struct hclgevf_dev *hdev)
2298 struct hclge_vf_to_pf_msg send_msg;
2301 if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state))
2304 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0);
2305 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
2307 dev_err(&hdev->pdev->dev,
2308 "VF sends keep alive cmd failed(=%d)\n", ret);
2311 static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev)
2313 unsigned long delta = round_jiffies_relative(HZ);
2314 struct hnae3_handle *handle = &hdev->nic;
2316 if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state))
2319 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
2320 delta = jiffies - hdev->last_serv_processed;
2322 if (delta < round_jiffies_relative(HZ)) {
2323 delta = round_jiffies_relative(HZ) - delta;
2328 hdev->serv_processed_cnt++;
2329 if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL))
2330 hclgevf_keep_alive(hdev);
2332 if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) {
2333 hdev->last_serv_processed = jiffies;
2337 if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL))
2338 hclgevf_tqps_update_stats(handle);
2340 /* request the link status from the PF. PF would be able to tell VF
2341 * about such updates in future so we might remove this later
2343 hclgevf_request_link_info(hdev);
2345 hclgevf_update_link_mode(hdev);
2347 hclgevf_sync_vlan_filter(hdev);
2349 hclgevf_sync_mac_table(hdev);
2351 hclgevf_sync_promisc_mode(hdev);
2353 hdev->last_serv_processed = jiffies;
2356 hclgevf_task_schedule(hdev, delta);
2359 static void hclgevf_service_task(struct work_struct *work)
2361 struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev,
2364 hclgevf_reset_service_task(hdev);
2365 hclgevf_mailbox_service_task(hdev);
2366 hclgevf_periodic_service_task(hdev);
2368 /* Handle reset and mbx again in case periodical task delays the
2369 * handling by calling hclgevf_task_schedule() in
2370 * hclgevf_periodic_service_task()
2372 hclgevf_reset_service_task(hdev);
2373 hclgevf_mailbox_service_task(hdev);
2376 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
2378 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr);
2381 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
2384 u32 val, cmdq_stat_reg, rst_ing_reg;
2386 /* fetch the events from their corresponding regs */
2387 cmdq_stat_reg = hclgevf_read_dev(&hdev->hw,
2388 HCLGEVF_VECTOR0_CMDQ_STATE_REG);
2389 if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
2390 rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
2391 dev_info(&hdev->pdev->dev,
2392 "receive reset interrupt 0x%x!\n", rst_ing_reg);
2393 set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
2394 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
2395 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
2396 *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B);
2397 hdev->rst_stats.vf_rst_cnt++;
2398 /* set up VF hardware reset status, its PF will clear
2399 * this status when PF has initialized done.
2401 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING);
2402 hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING,
2403 val | HCLGEVF_VF_RST_ING_BIT);
2404 return HCLGEVF_VECTOR0_EVENT_RST;
2407 /* check for vector0 mailbox(=CMDQ RX) event source */
2408 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) {
2409 /* for revision 0x21, clearing interrupt is writing bit 0
2410 * to the clear register, writing bit 1 means to keep the
2412 * for revision 0x20, the clear register is a read & write
2413 * register, so we should just write 0 to the bit we are
2414 * handling, and keep other bits as cmdq_stat_reg.
2416 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
2417 *clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
2419 *clearval = cmdq_stat_reg &
2420 ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
2422 return HCLGEVF_VECTOR0_EVENT_MBX;
2425 /* print other vector0 event source */
2426 dev_info(&hdev->pdev->dev,
2427 "vector 0 interrupt from unknown source, cmdq_src = %#x\n",
2430 return HCLGEVF_VECTOR0_EVENT_OTHER;
2433 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
2435 enum hclgevf_evt_cause event_cause;
2436 struct hclgevf_dev *hdev = data;
2439 hclgevf_enable_vector(&hdev->misc_vector, false);
2440 event_cause = hclgevf_check_evt_cause(hdev, &clearval);
2442 switch (event_cause) {
2443 case HCLGEVF_VECTOR0_EVENT_RST:
2444 hclgevf_reset_task_schedule(hdev);
2446 case HCLGEVF_VECTOR0_EVENT_MBX:
2447 hclgevf_mbx_handler(hdev);
2453 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) {
2454 hclgevf_clear_event_cause(hdev, clearval);
2455 hclgevf_enable_vector(&hdev->misc_vector, true);
2461 static int hclgevf_configure(struct hclgevf_dev *hdev)
2465 /* get current port based vlan state from PF */
2466 ret = hclgevf_get_port_base_vlan_filter_state(hdev);
2470 /* get queue configuration from PF */
2471 ret = hclgevf_get_queue_info(hdev);
2475 /* get queue depth info from PF */
2476 ret = hclgevf_get_queue_depth(hdev);
2480 ret = hclgevf_get_pf_media_type(hdev);
2484 /* get tc configuration from PF */
2485 return hclgevf_get_tc_info(hdev);
2488 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev)
2490 struct pci_dev *pdev = ae_dev->pdev;
2491 struct hclgevf_dev *hdev;
2493 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
2498 hdev->ae_dev = ae_dev;
2499 ae_dev->priv = hdev;
2504 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
2506 struct hnae3_handle *roce = &hdev->roce;
2507 struct hnae3_handle *nic = &hdev->nic;
2509 roce->rinfo.num_vectors = hdev->num_roce_msix;
2511 if (hdev->num_msi_left < roce->rinfo.num_vectors ||
2512 hdev->num_msi_left == 0)
2515 roce->rinfo.base_vector = hdev->roce_base_vector;
2517 roce->rinfo.netdev = nic->kinfo.netdev;
2518 roce->rinfo.roce_io_base = hdev->hw.io_base;
2519 roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2521 roce->pdev = nic->pdev;
2522 roce->ae_algo = nic->ae_algo;
2523 roce->numa_node_mask = nic->numa_node_mask;
2528 static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en)
2530 struct hclgevf_cfg_gro_status_cmd *req;
2531 struct hclgevf_desc desc;
2534 if (!hnae3_dev_gro_supported(hdev))
2537 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG,
2539 req = (struct hclgevf_cfg_gro_status_cmd *)desc.data;
2541 req->gro_en = en ? 1 : 0;
2543 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
2545 dev_err(&hdev->pdev->dev,
2546 "VF GRO hardware config cmd failed, ret = %d.\n", ret);
2551 static int hclgevf_rss_init_cfg(struct hclgevf_dev *hdev)
2553 u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
2554 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
2555 struct hclgevf_rss_tuple_cfg *tuple_sets;
2558 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
2559 rss_cfg->rss_size = hdev->nic.kinfo.rss_size;
2560 tuple_sets = &rss_cfg->rss_tuple_sets;
2561 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
2564 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE;
2566 rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
2567 sizeof(*rss_ind_tbl), GFP_KERNEL);
2571 rss_cfg->rss_indirection_tbl = rss_ind_tbl;
2572 memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key,
2573 HCLGEVF_RSS_KEY_SIZE);
2575 tuple_sets->ipv4_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
2576 tuple_sets->ipv4_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
2577 tuple_sets->ipv4_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP;
2578 tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
2579 tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
2580 tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
2581 tuple_sets->ipv6_sctp_en =
2582 hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
2583 HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT :
2584 HCLGEVF_RSS_INPUT_TUPLE_SCTP;
2585 tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
2588 /* Initialize RSS indirect table */
2589 for (i = 0; i < rss_ind_tbl_size; i++)
2590 rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size;
2595 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
2597 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
2600 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
2601 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
2602 rss_cfg->rss_hash_key);
2606 ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg);
2611 ret = hclgevf_set_rss_indir_table(hdev);
2615 return hclgevf_set_rss_tc_mode(hdev, rss_cfg->rss_size);
2618 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
2620 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
2624 static void hclgevf_flush_link_update(struct hclgevf_dev *hdev)
2626 #define HCLGEVF_FLUSH_LINK_TIMEOUT 100000
2628 unsigned long last = hdev->serv_processed_cnt;
2631 while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) &&
2632 i++ < HCLGEVF_FLUSH_LINK_TIMEOUT &&
2633 last == hdev->serv_processed_cnt)
2637 static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable)
2639 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2642 hclgevf_task_schedule(hdev, 0);
2644 set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2646 /* flush memory to make sure DOWN is seen by service task */
2647 smp_mb__before_atomic();
2648 hclgevf_flush_link_update(hdev);
2652 static int hclgevf_ae_start(struct hnae3_handle *handle)
2654 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2656 hclgevf_reset_tqp_stats(handle);
2658 hclgevf_request_link_info(hdev);
2660 hclgevf_update_link_mode(hdev);
2662 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2667 static void hclgevf_ae_stop(struct hnae3_handle *handle)
2669 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2671 set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2673 if (hdev->reset_type != HNAE3_VF_RESET)
2674 hclgevf_reset_tqp(handle);
2676 hclgevf_reset_tqp_stats(handle);
2677 hclgevf_update_link_status(hdev, 0);
2680 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive)
2682 #define HCLGEVF_STATE_ALIVE 1
2683 #define HCLGEVF_STATE_NOT_ALIVE 0
2685 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2686 struct hclge_vf_to_pf_msg send_msg;
2688 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_ALIVE, 0);
2689 send_msg.data[0] = alive ? HCLGEVF_STATE_ALIVE :
2690 HCLGEVF_STATE_NOT_ALIVE;
2691 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
2694 static int hclgevf_client_start(struct hnae3_handle *handle)
2696 return hclgevf_set_alive(handle, true);
2699 static void hclgevf_client_stop(struct hnae3_handle *handle)
2701 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2704 ret = hclgevf_set_alive(handle, false);
2706 dev_warn(&hdev->pdev->dev,
2707 "%s failed %d\n", __func__, ret);
2710 static void hclgevf_state_init(struct hclgevf_dev *hdev)
2712 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
2713 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
2714 clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
2716 INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task);
2718 mutex_init(&hdev->mbx_resp.mbx_mutex);
2719 sema_init(&hdev->reset_sem, 1);
2721 spin_lock_init(&hdev->mac_table.mac_list_lock);
2722 INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list);
2723 INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list);
2725 /* bring the device down */
2726 set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2729 static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
2731 set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2732 set_bit(HCLGEVF_STATE_REMOVING, &hdev->state);
2734 if (hdev->service_task.work.func)
2735 cancel_delayed_work_sync(&hdev->service_task);
2737 mutex_destroy(&hdev->mbx_resp.mbx_mutex);
2740 static int hclgevf_init_msi(struct hclgevf_dev *hdev)
2742 struct pci_dev *pdev = hdev->pdev;
2746 if (hnae3_dev_roce_supported(hdev))
2747 vectors = pci_alloc_irq_vectors(pdev,
2748 hdev->roce_base_msix_offset + 1,
2752 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2754 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2758 "failed(%d) to allocate MSI/MSI-X vectors\n",
2762 if (vectors < hdev->num_msi)
2763 dev_warn(&hdev->pdev->dev,
2764 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2765 hdev->num_msi, vectors);
2767 hdev->num_msi = vectors;
2768 hdev->num_msi_left = vectors;
2770 hdev->base_msi_vector = pdev->irq;
2771 hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset;
2773 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2774 sizeof(u16), GFP_KERNEL);
2775 if (!hdev->vector_status) {
2776 pci_free_irq_vectors(pdev);
2780 for (i = 0; i < hdev->num_msi; i++)
2781 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT;
2783 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2784 sizeof(int), GFP_KERNEL);
2785 if (!hdev->vector_irq) {
2786 devm_kfree(&pdev->dev, hdev->vector_status);
2787 pci_free_irq_vectors(pdev);
2794 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
2796 struct pci_dev *pdev = hdev->pdev;
2798 devm_kfree(&pdev->dev, hdev->vector_status);
2799 devm_kfree(&pdev->dev, hdev->vector_irq);
2800 pci_free_irq_vectors(pdev);
2803 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
2807 hclgevf_get_misc_vector(hdev);
2809 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
2810 HCLGEVF_NAME, pci_name(hdev->pdev));
2811 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
2812 0, hdev->misc_vector.name, hdev);
2814 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n",
2815 hdev->misc_vector.vector_irq);
2819 hclgevf_clear_event_cause(hdev, 0);
2821 /* enable misc. vector(vector 0) */
2822 hclgevf_enable_vector(&hdev->misc_vector, true);
2827 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
2829 /* disable misc vector(vector 0) */
2830 hclgevf_enable_vector(&hdev->misc_vector, false);
2831 synchronize_irq(hdev->misc_vector.vector_irq);
2832 free_irq(hdev->misc_vector.vector_irq, hdev);
2833 hclgevf_free_vector(hdev, 0);
2836 static void hclgevf_info_show(struct hclgevf_dev *hdev)
2838 struct device *dev = &hdev->pdev->dev;
2840 dev_info(dev, "VF info begin:\n");
2842 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
2843 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
2844 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
2845 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
2846 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
2847 dev_info(dev, "PF media type of this VF: %u\n",
2848 hdev->hw.mac.media_type);
2850 dev_info(dev, "VF info end.\n");
2853 static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
2854 struct hnae3_client *client)
2856 struct hclgevf_dev *hdev = ae_dev->priv;
2857 int rst_cnt = hdev->rst_stats.rst_cnt;
2860 ret = client->ops->init_instance(&hdev->nic);
2864 set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
2865 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
2866 rst_cnt != hdev->rst_stats.rst_cnt) {
2867 clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
2869 client->ops->uninit_instance(&hdev->nic, 0);
2873 hnae3_set_client_init_flag(client, ae_dev, 1);
2875 if (netif_msg_drv(&hdev->nic))
2876 hclgevf_info_show(hdev);
2881 static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
2882 struct hnae3_client *client)
2884 struct hclgevf_dev *hdev = ae_dev->priv;
2887 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
2891 ret = hclgevf_init_roce_base_info(hdev);
2895 ret = client->ops->init_instance(&hdev->roce);
2899 set_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state);
2900 hnae3_set_client_init_flag(client, ae_dev, 1);
2905 static int hclgevf_init_client_instance(struct hnae3_client *client,
2906 struct hnae3_ae_dev *ae_dev)
2908 struct hclgevf_dev *hdev = ae_dev->priv;
2911 switch (client->type) {
2912 case HNAE3_CLIENT_KNIC:
2913 hdev->nic_client = client;
2914 hdev->nic.client = client;
2916 ret = hclgevf_init_nic_client_instance(ae_dev, client);
2920 ret = hclgevf_init_roce_client_instance(ae_dev,
2926 case HNAE3_CLIENT_ROCE:
2927 if (hnae3_dev_roce_supported(hdev)) {
2928 hdev->roce_client = client;
2929 hdev->roce.client = client;
2932 ret = hclgevf_init_roce_client_instance(ae_dev, client);
2944 hdev->nic_client = NULL;
2945 hdev->nic.client = NULL;
2948 hdev->roce_client = NULL;
2949 hdev->roce.client = NULL;
2953 static void hclgevf_uninit_client_instance(struct hnae3_client *client,
2954 struct hnae3_ae_dev *ae_dev)
2956 struct hclgevf_dev *hdev = ae_dev->priv;
2958 /* un-init roce, if it exists */
2959 if (hdev->roce_client) {
2960 clear_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state);
2961 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
2962 hdev->roce_client = NULL;
2963 hdev->roce.client = NULL;
2966 /* un-init nic/unic, if this was not called by roce client */
2967 if (client->ops->uninit_instance && hdev->nic_client &&
2968 client->type != HNAE3_CLIENT_ROCE) {
2969 clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
2971 client->ops->uninit_instance(&hdev->nic, 0);
2972 hdev->nic_client = NULL;
2973 hdev->nic.client = NULL;
2977 static int hclgevf_dev_mem_map(struct hclgevf_dev *hdev)
2979 #define HCLGEVF_MEM_BAR 4
2981 struct pci_dev *pdev = hdev->pdev;
2982 struct hclgevf_hw *hw = &hdev->hw;
2984 /* for device does not have device memory, return directly */
2985 if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGEVF_MEM_BAR)))
2988 hw->mem_base = devm_ioremap_wc(&pdev->dev,
2989 pci_resource_start(pdev,
2991 pci_resource_len(pdev, HCLGEVF_MEM_BAR));
2992 if (!hw->mem_base) {
2993 dev_err(&pdev->dev, "failed to map device memory\n");
3000 static int hclgevf_pci_init(struct hclgevf_dev *hdev)
3002 struct pci_dev *pdev = hdev->pdev;
3003 struct hclgevf_hw *hw;
3006 ret = pci_enable_device(pdev);
3008 dev_err(&pdev->dev, "failed to enable PCI device\n");
3012 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3014 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting");
3015 goto err_disable_device;
3018 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME);
3020 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
3021 goto err_disable_device;
3024 pci_set_master(pdev);
3027 hw->io_base = pci_iomap(pdev, 2, 0);
3029 dev_err(&pdev->dev, "can't map configuration register space\n");
3031 goto err_clr_master;
3034 ret = hclgevf_dev_mem_map(hdev);
3036 goto err_unmap_io_base;
3041 pci_iounmap(pdev, hdev->hw.io_base);
3043 pci_clear_master(pdev);
3044 pci_release_regions(pdev);
3046 pci_disable_device(pdev);
3051 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
3053 struct pci_dev *pdev = hdev->pdev;
3055 if (hdev->hw.mem_base)
3056 devm_iounmap(&pdev->dev, hdev->hw.mem_base);
3058 pci_iounmap(pdev, hdev->hw.io_base);
3059 pci_clear_master(pdev);
3060 pci_release_regions(pdev);
3061 pci_disable_device(pdev);
3064 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
3066 struct hclgevf_query_res_cmd *req;
3067 struct hclgevf_desc desc;
3070 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true);
3071 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
3073 dev_err(&hdev->pdev->dev,
3074 "query vf resource failed, ret = %d.\n", ret);
3078 req = (struct hclgevf_query_res_cmd *)desc.data;
3080 if (hnae3_dev_roce_supported(hdev)) {
3081 hdev->roce_base_msix_offset =
3082 hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
3083 HCLGEVF_MSIX_OFT_ROCEE_M,
3084 HCLGEVF_MSIX_OFT_ROCEE_S);
3085 hdev->num_roce_msix =
3086 hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number),
3087 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
3089 /* nic's msix numbers is always equals to the roce's. */
3090 hdev->num_nic_msix = hdev->num_roce_msix;
3092 /* VF should have NIC vectors and Roce vectors, NIC vectors
3093 * are queued before Roce vectors. The offset is fixed to 64.
3095 hdev->num_msi = hdev->num_roce_msix +
3096 hdev->roce_base_msix_offset;
3099 hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number),
3100 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
3102 hdev->num_nic_msix = hdev->num_msi;
3105 if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) {
3106 dev_err(&hdev->pdev->dev,
3107 "Just %u msi resources, not enough for vf(min:2).\n",
3108 hdev->num_nic_msix);
3115 static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev)
3117 #define HCLGEVF_MAX_NON_TSO_BD_NUM 8U
3119 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3121 ae_dev->dev_specs.max_non_tso_bd_num =
3122 HCLGEVF_MAX_NON_TSO_BD_NUM;
3123 ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
3124 ae_dev->dev_specs.rss_key_size = HCLGEVF_RSS_KEY_SIZE;
3125 ae_dev->dev_specs.max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
3126 ae_dev->dev_specs.max_frm_size = HCLGEVF_MAC_MAX_FRAME;
3129 static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev,
3130 struct hclgevf_desc *desc)
3132 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3133 struct hclgevf_dev_specs_0_cmd *req0;
3134 struct hclgevf_dev_specs_1_cmd *req1;
3136 req0 = (struct hclgevf_dev_specs_0_cmd *)desc[0].data;
3137 req1 = (struct hclgevf_dev_specs_1_cmd *)desc[1].data;
3139 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
3140 ae_dev->dev_specs.rss_ind_tbl_size =
3141 le16_to_cpu(req0->rss_ind_tbl_size);
3142 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
3143 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
3144 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
3145 ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
3148 static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev)
3150 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
3152 if (!dev_specs->max_non_tso_bd_num)
3153 dev_specs->max_non_tso_bd_num = HCLGEVF_MAX_NON_TSO_BD_NUM;
3154 if (!dev_specs->rss_ind_tbl_size)
3155 dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
3156 if (!dev_specs->rss_key_size)
3157 dev_specs->rss_key_size = HCLGEVF_RSS_KEY_SIZE;
3158 if (!dev_specs->max_int_gl)
3159 dev_specs->max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
3160 if (!dev_specs->max_frm_size)
3161 dev_specs->max_frm_size = HCLGEVF_MAC_MAX_FRAME;
3164 static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev)
3166 struct hclgevf_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM];
3170 /* set default specifications as devices lower than version V3 do not
3171 * support querying specifications from firmware.
3173 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
3174 hclgevf_set_default_dev_specs(hdev);
3178 for (i = 0; i < HCLGEVF_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
3179 hclgevf_cmd_setup_basic_desc(&desc[i],
3180 HCLGEVF_OPC_QUERY_DEV_SPECS, true);
3181 desc[i].flag |= cpu_to_le16(HCLGEVF_CMD_FLAG_NEXT);
3183 hclgevf_cmd_setup_basic_desc(&desc[i], HCLGEVF_OPC_QUERY_DEV_SPECS,
3186 ret = hclgevf_cmd_send(&hdev->hw, desc, HCLGEVF_QUERY_DEV_SPECS_BD_NUM);
3190 hclgevf_parse_dev_specs(hdev, desc);
3191 hclgevf_check_dev_specs(hdev);
3196 static int hclgevf_pci_reset(struct hclgevf_dev *hdev)
3198 struct pci_dev *pdev = hdev->pdev;
3201 if (hdev->reset_type == HNAE3_VF_FULL_RESET &&
3202 test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
3203 hclgevf_misc_irq_uninit(hdev);
3204 hclgevf_uninit_msi(hdev);
3205 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
3208 if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
3209 pci_set_master(pdev);
3210 ret = hclgevf_init_msi(hdev);
3213 "failed(%d) to init MSI/MSI-X\n", ret);
3217 ret = hclgevf_misc_irq_init(hdev);
3219 hclgevf_uninit_msi(hdev);
3220 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
3225 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
3231 static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev)
3233 struct hclge_vf_to_pf_msg send_msg;
3235 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_HANDLE_VF_TBL,
3236 HCLGE_MBX_VPORT_LIST_CLEAR);
3237 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
3240 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
3242 struct pci_dev *pdev = hdev->pdev;
3245 ret = hclgevf_pci_reset(hdev);
3247 dev_err(&pdev->dev, "pci reset failed %d\n", ret);
3251 ret = hclgevf_cmd_init(hdev);
3253 dev_err(&pdev->dev, "cmd failed %d\n", ret);
3257 ret = hclgevf_rss_init_hw(hdev);
3259 dev_err(&hdev->pdev->dev,
3260 "failed(%d) to initialize RSS\n", ret);
3264 ret = hclgevf_config_gro(hdev, true);
3268 ret = hclgevf_init_vlan_config(hdev);
3270 dev_err(&hdev->pdev->dev,
3271 "failed(%d) to initialize VLAN config\n", ret);
3275 set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
3277 dev_info(&hdev->pdev->dev, "Reset done\n");
3282 static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
3284 struct pci_dev *pdev = hdev->pdev;
3287 ret = hclgevf_pci_init(hdev);
3291 ret = hclgevf_cmd_queue_init(hdev);
3293 goto err_cmd_queue_init;
3295 ret = hclgevf_cmd_init(hdev);
3299 /* Get vf resource */
3300 ret = hclgevf_query_vf_resource(hdev);
3304 ret = hclgevf_query_dev_specs(hdev);
3307 "failed to query dev specifications, ret = %d\n", ret);
3311 ret = hclgevf_init_msi(hdev);
3313 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
3317 hclgevf_state_init(hdev);
3318 hdev->reset_level = HNAE3_VF_FUNC_RESET;
3319 hdev->reset_type = HNAE3_NONE_RESET;
3321 ret = hclgevf_misc_irq_init(hdev);
3323 goto err_misc_irq_init;
3325 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
3327 ret = hclgevf_configure(hdev);
3329 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
3333 ret = hclgevf_alloc_tqps(hdev);
3335 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret);
3339 ret = hclgevf_set_handle_info(hdev);
3343 ret = hclgevf_config_gro(hdev, true);
3347 /* Initialize RSS for this VF */
3348 ret = hclgevf_rss_init_cfg(hdev);
3350 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
3354 ret = hclgevf_rss_init_hw(hdev);
3356 dev_err(&hdev->pdev->dev,
3357 "failed(%d) to initialize RSS\n", ret);
3361 /* ensure vf tbl list as empty before init*/
3362 ret = hclgevf_clear_vport_list(hdev);
3365 "failed to clear tbl list configuration, ret = %d.\n",
3370 ret = hclgevf_init_vlan_config(hdev);
3372 dev_err(&hdev->pdev->dev,
3373 "failed(%d) to initialize VLAN config\n", ret);
3377 hdev->last_reset_time = jiffies;
3378 dev_info(&hdev->pdev->dev, "finished initializing %s driver\n",
3379 HCLGEVF_DRIVER_NAME);
3381 hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
3386 hclgevf_misc_irq_uninit(hdev);
3388 hclgevf_state_uninit(hdev);
3389 hclgevf_uninit_msi(hdev);
3391 hclgevf_cmd_uninit(hdev);
3393 hclgevf_pci_uninit(hdev);
3394 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
3398 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
3400 struct hclge_vf_to_pf_msg send_msg;
3402 hclgevf_state_uninit(hdev);
3404 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0);
3405 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
3407 if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
3408 hclgevf_misc_irq_uninit(hdev);
3409 hclgevf_uninit_msi(hdev);
3412 hclgevf_cmd_uninit(hdev);
3413 hclgevf_pci_uninit(hdev);
3414 hclgevf_uninit_mac_list(hdev);
3417 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
3419 struct pci_dev *pdev = ae_dev->pdev;
3422 ret = hclgevf_alloc_hdev(ae_dev);
3424 dev_err(&pdev->dev, "hclge device allocation failed\n");
3428 ret = hclgevf_init_hdev(ae_dev->priv);
3430 dev_err(&pdev->dev, "hclge device initialization failed\n");
3437 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
3439 struct hclgevf_dev *hdev = ae_dev->priv;
3441 hclgevf_uninit_hdev(hdev);
3442 ae_dev->priv = NULL;
3445 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
3447 struct hnae3_handle *nic = &hdev->nic;
3448 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
3450 return min_t(u32, hdev->rss_size_max,
3451 hdev->num_tqps / kinfo->tc_info.num_tc);
3455 * hclgevf_get_channels - Get the current channels enabled and max supported.
3456 * @handle: hardware information for network interface
3457 * @ch: ethtool channels structure
3459 * We don't support separate tx and rx queues as channels. The other count
3460 * represents how many queues are being used for control. max_combined counts
3461 * how many queue pairs we can support. They may not be mapped 1 to 1 with
3462 * q_vectors since we support a lot more queue pairs than q_vectors.
3464 static void hclgevf_get_channels(struct hnae3_handle *handle,
3465 struct ethtool_channels *ch)
3467 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3469 ch->max_combined = hclgevf_get_max_channels(hdev);
3470 ch->other_count = 0;
3472 ch->combined_count = handle->kinfo.rss_size;
3475 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle,
3476 u16 *alloc_tqps, u16 *max_rss_size)
3478 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3480 *alloc_tqps = hdev->num_tqps;
3481 *max_rss_size = hdev->rss_size_max;
3484 static void hclgevf_update_rss_size(struct hnae3_handle *handle,
3487 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3488 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3491 kinfo->req_rss_size = new_tqps_num;
3493 max_rss_size = min_t(u16, hdev->rss_size_max,
3494 hdev->num_tqps / kinfo->tc_info.num_tc);
3496 /* Use the user's configuration when it is not larger than
3497 * max_rss_size, otherwise, use the maximum specification value.
3499 if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
3500 kinfo->req_rss_size <= max_rss_size)
3501 kinfo->rss_size = kinfo->req_rss_size;
3502 else if (kinfo->rss_size > max_rss_size ||
3503 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size))
3504 kinfo->rss_size = max_rss_size;
3506 kinfo->num_tqps = kinfo->tc_info.num_tc * kinfo->rss_size;
3509 static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
3510 bool rxfh_configured)
3512 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3513 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3514 u16 cur_rss_size = kinfo->rss_size;
3515 u16 cur_tqps = kinfo->num_tqps;
3520 hclgevf_update_rss_size(handle, new_tqps_num);
3522 ret = hclgevf_set_rss_tc_mode(hdev, kinfo->rss_size);
3526 /* RSS indirection table has been configuared by user */
3527 if (rxfh_configured)
3530 /* Reinitializes the rss indirect table according to the new RSS size */
3531 rss_indir = kcalloc(hdev->ae_dev->dev_specs.rss_ind_tbl_size,
3532 sizeof(u32), GFP_KERNEL);
3536 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
3537 rss_indir[i] = i % kinfo->rss_size;
3539 hdev->rss_cfg.rss_size = kinfo->rss_size;
3541 ret = hclgevf_set_rss(handle, rss_indir, NULL, 0);
3543 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
3550 dev_info(&hdev->pdev->dev,
3551 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
3552 cur_rss_size, kinfo->rss_size,
3553 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
3558 static int hclgevf_get_status(struct hnae3_handle *handle)
3560 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3562 return hdev->hw.mac.link;
3565 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle,
3566 u8 *auto_neg, u32 *speed,
3569 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3572 *speed = hdev->hw.mac.speed;
3574 *duplex = hdev->hw.mac.duplex;
3576 *auto_neg = AUTONEG_DISABLE;
3579 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
3582 hdev->hw.mac.speed = speed;
3583 hdev->hw.mac.duplex = duplex;
3586 static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable)
3588 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3590 return hclgevf_config_gro(hdev, enable);
3593 static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type,
3596 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3599 *media_type = hdev->hw.mac.media_type;
3602 *module_type = hdev->hw.mac.module_type;
3605 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle)
3607 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3609 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
3612 static bool hclgevf_get_cmdq_stat(struct hnae3_handle *handle)
3614 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3616 return test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
3619 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle)
3621 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3623 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
3626 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle)
3628 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3630 return hdev->rst_stats.hw_rst_done_cnt;
3633 static void hclgevf_get_link_mode(struct hnae3_handle *handle,
3634 unsigned long *supported,
3635 unsigned long *advertising)
3637 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3639 *supported = hdev->hw.mac.supported;
3640 *advertising = hdev->hw.mac.advertising;
3643 #define MAX_SEPARATE_NUM 4
3644 #define SEPARATOR_VALUE 0xFFFFFFFF
3645 #define REG_NUM_PER_LINE 4
3646 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
3648 static int hclgevf_get_regs_len(struct hnae3_handle *handle)
3650 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
3651 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3653 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
3654 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
3655 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
3656 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
3658 return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps +
3659 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE;
3662 static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
3665 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3666 int i, j, reg_um, separator_num;
3669 *version = hdev->fw_version;
3671 /* fetching per-VF registers values from VF PCIe register space */
3672 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
3673 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
3674 for (i = 0; i < reg_um; i++)
3675 *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
3676 for (i = 0; i < separator_num; i++)
3677 *reg++ = SEPARATOR_VALUE;
3679 reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
3680 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
3681 for (i = 0; i < reg_um; i++)
3682 *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]);
3683 for (i = 0; i < separator_num; i++)
3684 *reg++ = SEPARATOR_VALUE;
3686 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
3687 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
3688 for (j = 0; j < hdev->num_tqps; j++) {
3689 for (i = 0; i < reg_um; i++)
3690 *reg++ = hclgevf_read_dev(&hdev->hw,
3691 ring_reg_addr_list[i] +
3693 for (i = 0; i < separator_num; i++)
3694 *reg++ = SEPARATOR_VALUE;
3697 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
3698 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
3699 for (j = 0; j < hdev->num_msi_used - 1; j++) {
3700 for (i = 0; i < reg_um; i++)
3701 *reg++ = hclgevf_read_dev(&hdev->hw,
3702 tqp_intr_reg_addr_list[i] +
3704 for (i = 0; i < separator_num; i++)
3705 *reg++ = SEPARATOR_VALUE;
3709 void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
3710 u8 *port_base_vlan_info, u8 data_size)
3712 struct hnae3_handle *nic = &hdev->nic;
3713 struct hclge_vf_to_pf_msg send_msg;
3718 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
3719 test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) {
3720 dev_warn(&hdev->pdev->dev,
3721 "is resetting when updating port based vlan info\n");
3726 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
3732 /* send msg to PF and wait update port based vlan info */
3733 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
3734 HCLGE_MBX_PORT_BASE_VLAN_CFG);
3735 memcpy(send_msg.data, port_base_vlan_info, data_size);
3736 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
3738 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
3739 nic->port_base_vlan_state = state;
3741 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
3744 hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
3748 static const struct hnae3_ae_ops hclgevf_ops = {
3749 .init_ae_dev = hclgevf_init_ae_dev,
3750 .uninit_ae_dev = hclgevf_uninit_ae_dev,
3751 .flr_prepare = hclgevf_flr_prepare,
3752 .flr_done = hclgevf_flr_done,
3753 .init_client_instance = hclgevf_init_client_instance,
3754 .uninit_client_instance = hclgevf_uninit_client_instance,
3755 .start = hclgevf_ae_start,
3756 .stop = hclgevf_ae_stop,
3757 .client_start = hclgevf_client_start,
3758 .client_stop = hclgevf_client_stop,
3759 .map_ring_to_vector = hclgevf_map_ring_to_vector,
3760 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
3761 .get_vector = hclgevf_get_vector,
3762 .put_vector = hclgevf_put_vector,
3763 .reset_queue = hclgevf_reset_tqp,
3764 .get_mac_addr = hclgevf_get_mac_addr,
3765 .set_mac_addr = hclgevf_set_mac_addr,
3766 .add_uc_addr = hclgevf_add_uc_addr,
3767 .rm_uc_addr = hclgevf_rm_uc_addr,
3768 .add_mc_addr = hclgevf_add_mc_addr,
3769 .rm_mc_addr = hclgevf_rm_mc_addr,
3770 .get_stats = hclgevf_get_stats,
3771 .update_stats = hclgevf_update_stats,
3772 .get_strings = hclgevf_get_strings,
3773 .get_sset_count = hclgevf_get_sset_count,
3774 .get_rss_key_size = hclgevf_get_rss_key_size,
3775 .get_rss = hclgevf_get_rss,
3776 .set_rss = hclgevf_set_rss,
3777 .get_rss_tuple = hclgevf_get_rss_tuple,
3778 .set_rss_tuple = hclgevf_set_rss_tuple,
3779 .get_tc_size = hclgevf_get_tc_size,
3780 .get_fw_version = hclgevf_get_fw_version,
3781 .set_vlan_filter = hclgevf_set_vlan_filter,
3782 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag,
3783 .reset_event = hclgevf_reset_event,
3784 .set_default_reset_request = hclgevf_set_def_reset_request,
3785 .set_channels = hclgevf_set_channels,
3786 .get_channels = hclgevf_get_channels,
3787 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info,
3788 .get_regs_len = hclgevf_get_regs_len,
3789 .get_regs = hclgevf_get_regs,
3790 .get_status = hclgevf_get_status,
3791 .get_ksettings_an_result = hclgevf_get_ksettings_an_result,
3792 .get_media_type = hclgevf_get_media_type,
3793 .get_hw_reset_stat = hclgevf_get_hw_reset_stat,
3794 .ae_dev_resetting = hclgevf_ae_dev_resetting,
3795 .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt,
3796 .set_gro_en = hclgevf_gro_en,
3797 .set_mtu = hclgevf_set_mtu,
3798 .get_global_queue_id = hclgevf_get_qid_global,
3799 .set_timer_task = hclgevf_set_timer_task,
3800 .get_link_mode = hclgevf_get_link_mode,
3801 .set_promisc_mode = hclgevf_set_promisc_mode,
3802 .request_update_promisc_mode = hclgevf_request_update_promisc_mode,
3803 .get_cmdq_stat = hclgevf_get_cmdq_stat,
3806 static struct hnae3_ae_algo ae_algovf = {
3807 .ops = &hclgevf_ops,
3808 .pdev_id_table = ae_algovf_pci_tbl,
3811 static int hclgevf_init(void)
3813 pr_info("%s is initializing\n", HCLGEVF_NAME);
3815 hclgevf_wq = alloc_workqueue("%s", 0, 0, HCLGEVF_NAME);
3817 pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME);
3821 hnae3_register_ae_algo(&ae_algovf);
3826 static void hclgevf_exit(void)
3828 hnae3_unregister_ae_algo(&ae_algovf);
3829 destroy_workqueue(hclgevf_wq);
3831 module_init(hclgevf_init);
3832 module_exit(hclgevf_exit);
3834 MODULE_LICENSE("GPL");
3835 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
3836 MODULE_DESCRIPTION("HCLGEVF Driver");
3837 MODULE_VERSION(HCLGEVF_MOD_VERSION);