1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/etherdevice.h>
5 #include <linux/iopoll.h>
6 #include <net/rtnetlink.h>
7 #include "hclgevf_cmd.h"
8 #include "hclgevf_main.h"
12 #define HCLGEVF_NAME "hclgevf"
14 #define HCLGEVF_RESET_MAX_FAIL_CNT 5
16 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev);
17 static void hclgevf_task_schedule(struct hclgevf_dev *hdev,
20 static struct hnae3_ae_algo ae_algovf;
22 static struct workqueue_struct *hclgevf_wq;
24 static const struct pci_device_id ae_algovf_pci_tbl[] = {
25 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0},
26 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF),
27 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
28 /* required last entry */
32 static const u8 hclgevf_hash_key[] = {
33 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
34 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
35 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
36 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
37 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
40 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
42 static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG,
43 HCLGEVF_CMDQ_TX_ADDR_H_REG,
44 HCLGEVF_CMDQ_TX_DEPTH_REG,
45 HCLGEVF_CMDQ_TX_TAIL_REG,
46 HCLGEVF_CMDQ_TX_HEAD_REG,
47 HCLGEVF_CMDQ_RX_ADDR_L_REG,
48 HCLGEVF_CMDQ_RX_ADDR_H_REG,
49 HCLGEVF_CMDQ_RX_DEPTH_REG,
50 HCLGEVF_CMDQ_RX_TAIL_REG,
51 HCLGEVF_CMDQ_RX_HEAD_REG,
52 HCLGEVF_VECTOR0_CMDQ_SRC_REG,
53 HCLGEVF_VECTOR0_CMDQ_STATE_REG,
54 HCLGEVF_CMDQ_INTR_EN_REG,
55 HCLGEVF_CMDQ_INTR_GEN_REG};
57 static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE,
61 static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG,
62 HCLGEVF_RING_RX_ADDR_H_REG,
63 HCLGEVF_RING_RX_BD_NUM_REG,
64 HCLGEVF_RING_RX_BD_LENGTH_REG,
65 HCLGEVF_RING_RX_MERGE_EN_REG,
66 HCLGEVF_RING_RX_TAIL_REG,
67 HCLGEVF_RING_RX_HEAD_REG,
68 HCLGEVF_RING_RX_FBD_NUM_REG,
69 HCLGEVF_RING_RX_OFFSET_REG,
70 HCLGEVF_RING_RX_FBD_OFFSET_REG,
71 HCLGEVF_RING_RX_STASH_REG,
72 HCLGEVF_RING_RX_BD_ERR_REG,
73 HCLGEVF_RING_TX_ADDR_L_REG,
74 HCLGEVF_RING_TX_ADDR_H_REG,
75 HCLGEVF_RING_TX_BD_NUM_REG,
76 HCLGEVF_RING_TX_PRIORITY_REG,
77 HCLGEVF_RING_TX_TC_REG,
78 HCLGEVF_RING_TX_MERGE_EN_REG,
79 HCLGEVF_RING_TX_TAIL_REG,
80 HCLGEVF_RING_TX_HEAD_REG,
81 HCLGEVF_RING_TX_FBD_NUM_REG,
82 HCLGEVF_RING_TX_OFFSET_REG,
83 HCLGEVF_RING_TX_EBD_NUM_REG,
84 HCLGEVF_RING_TX_EBD_OFFSET_REG,
85 HCLGEVF_RING_TX_BD_ERR_REG,
88 static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG,
89 HCLGEVF_TQP_INTR_GL0_REG,
90 HCLGEVF_TQP_INTR_GL1_REG,
91 HCLGEVF_TQP_INTR_GL2_REG,
92 HCLGEVF_TQP_INTR_RL_REG};
94 static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle)
97 return container_of(handle, struct hclgevf_dev, nic);
98 else if (handle->client->type == HNAE3_CLIENT_ROCE)
99 return container_of(handle, struct hclgevf_dev, roce);
101 return container_of(handle, struct hclgevf_dev, nic);
104 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle)
106 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
107 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
108 struct hclgevf_desc desc;
109 struct hclgevf_tqp *tqp;
113 for (i = 0; i < kinfo->num_tqps; i++) {
114 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
115 hclgevf_cmd_setup_basic_desc(&desc,
116 HCLGEVF_OPC_QUERY_RX_STATUS,
119 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
120 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
122 dev_err(&hdev->pdev->dev,
123 "Query tqp stat fail, status = %d,queue = %d\n",
127 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
128 le32_to_cpu(desc.data[1]);
130 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS,
133 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
134 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
136 dev_err(&hdev->pdev->dev,
137 "Query tqp stat fail, status = %d,queue = %d\n",
141 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
142 le32_to_cpu(desc.data[1]);
148 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
150 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
151 struct hclgevf_tqp *tqp;
155 for (i = 0; i < kinfo->num_tqps; i++) {
156 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
157 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
159 for (i = 0; i < kinfo->num_tqps; i++) {
160 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
161 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
167 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset)
169 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
171 return kinfo->num_tqps * 2;
174 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
176 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
180 for (i = 0; i < kinfo->num_tqps; i++) {
181 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
182 struct hclgevf_tqp, q);
183 snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
185 buff += ETH_GSTRING_LEN;
188 for (i = 0; i < kinfo->num_tqps; i++) {
189 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
190 struct hclgevf_tqp, q);
191 snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
193 buff += ETH_GSTRING_LEN;
199 static void hclgevf_update_stats(struct hnae3_handle *handle,
200 struct net_device_stats *net_stats)
202 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
205 status = hclgevf_tqps_update_stats(handle);
207 dev_err(&hdev->pdev->dev,
208 "VF update of TQPS stats fail, status = %d.\n",
212 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
214 if (strset == ETH_SS_TEST)
216 else if (strset == ETH_SS_STATS)
217 return hclgevf_tqps_get_sset_count(handle, strset);
222 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
225 u8 *p = (char *)data;
227 if (strset == ETH_SS_STATS)
228 p = hclgevf_tqps_get_strings(handle, p);
231 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
233 hclgevf_tqps_get_stats(handle, data);
236 static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code,
240 memset(msg, 0, sizeof(struct hclge_vf_to_pf_msg));
242 msg->subcode = subcode;
246 static int hclgevf_get_basic_info(struct hclgevf_dev *hdev)
248 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
249 u8 resp_msg[HCLGE_MBX_MAX_RESP_DATA_SIZE];
250 struct hclge_basic_info *basic_info;
251 struct hclge_vf_to_pf_msg send_msg;
255 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_BASIC_INFO, 0);
256 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
259 dev_err(&hdev->pdev->dev,
260 "failed to get basic info from pf, ret = %d", status);
264 basic_info = (struct hclge_basic_info *)resp_msg;
266 hdev->hw_tc_map = basic_info->hw_tc_map;
267 hdev->mbx_api_version = basic_info->mbx_api_version;
268 caps = basic_info->pf_caps;
269 if (test_bit(HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, &caps))
270 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
275 static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev)
277 struct hnae3_handle *nic = &hdev->nic;
278 struct hclge_vf_to_pf_msg send_msg;
282 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
283 HCLGE_MBX_GET_PORT_BASE_VLAN_STATE);
284 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg,
287 dev_err(&hdev->pdev->dev,
288 "VF request to get port based vlan state failed %d",
293 nic->port_base_vlan_state = resp_msg;
298 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
300 #define HCLGEVF_TQPS_RSS_INFO_LEN 6
301 #define HCLGEVF_TQPS_ALLOC_OFFSET 0
302 #define HCLGEVF_TQPS_RSS_SIZE_OFFSET 2
303 #define HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET 4
305 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN];
306 struct hclge_vf_to_pf_msg send_msg;
309 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QINFO, 0);
310 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
311 HCLGEVF_TQPS_RSS_INFO_LEN);
313 dev_err(&hdev->pdev->dev,
314 "VF request to get tqp info from PF failed %d",
319 memcpy(&hdev->num_tqps, &resp_msg[HCLGEVF_TQPS_ALLOC_OFFSET],
321 memcpy(&hdev->rss_size_max, &resp_msg[HCLGEVF_TQPS_RSS_SIZE_OFFSET],
323 memcpy(&hdev->rx_buf_len, &resp_msg[HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET],
329 static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev)
331 #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4
332 #define HCLGEVF_TQPS_NUM_TX_DESC_OFFSET 0
333 #define HCLGEVF_TQPS_NUM_RX_DESC_OFFSET 2
335 u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN];
336 struct hclge_vf_to_pf_msg send_msg;
339 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QDEPTH, 0);
340 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
341 HCLGEVF_TQPS_DEPTH_INFO_LEN);
343 dev_err(&hdev->pdev->dev,
344 "VF request to get tqp depth info from PF failed %d",
349 memcpy(&hdev->num_tx_desc, &resp_msg[HCLGEVF_TQPS_NUM_TX_DESC_OFFSET],
351 memcpy(&hdev->num_rx_desc, &resp_msg[HCLGEVF_TQPS_NUM_RX_DESC_OFFSET],
357 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id)
359 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
360 struct hclge_vf_to_pf_msg send_msg;
365 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0);
366 memcpy(send_msg.data, &queue_id, sizeof(queue_id));
367 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data,
370 qid_in_pf = *(u16 *)resp_data;
375 static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev)
377 struct hclge_vf_to_pf_msg send_msg;
381 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MEDIA_TYPE, 0);
382 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
385 dev_err(&hdev->pdev->dev,
386 "VF request to get the pf port media type failed %d",
391 hdev->hw.mac.media_type = resp_msg[0];
392 hdev->hw.mac.module_type = resp_msg[1];
397 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
399 struct hclgevf_tqp *tqp;
402 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
403 sizeof(struct hclgevf_tqp), GFP_KERNEL);
409 for (i = 0; i < hdev->num_tqps; i++) {
410 tqp->dev = &hdev->pdev->dev;
413 tqp->q.ae_algo = &ae_algovf;
414 tqp->q.buf_size = hdev->rx_buf_len;
415 tqp->q.tx_desc_num = hdev->num_tx_desc;
416 tqp->q.rx_desc_num = hdev->num_rx_desc;
418 /* need an extended offset to configure queues >=
419 * HCLGEVF_TQP_MAX_SIZE_DEV_V2.
421 if (i < HCLGEVF_TQP_MAX_SIZE_DEV_V2)
422 tqp->q.io_base = hdev->hw.io_base +
423 HCLGEVF_TQP_REG_OFFSET +
424 i * HCLGEVF_TQP_REG_SIZE;
426 tqp->q.io_base = hdev->hw.io_base +
427 HCLGEVF_TQP_REG_OFFSET +
428 HCLGEVF_TQP_EXT_REG_OFFSET +
429 (i - HCLGEVF_TQP_MAX_SIZE_DEV_V2) *
430 HCLGEVF_TQP_REG_SIZE;
438 static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
440 struct hnae3_handle *nic = &hdev->nic;
441 struct hnae3_knic_private_info *kinfo;
442 u16 new_tqps = hdev->num_tqps;
447 kinfo->num_tx_desc = hdev->num_tx_desc;
448 kinfo->num_rx_desc = hdev->num_rx_desc;
449 kinfo->rx_buf_len = hdev->rx_buf_len;
450 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++)
451 if (hdev->hw_tc_map & BIT(i))
454 num_tc = num_tc ? num_tc : 1;
455 kinfo->tc_info.num_tc = num_tc;
456 kinfo->rss_size = min_t(u16, hdev->rss_size_max, new_tqps / num_tc);
457 new_tqps = kinfo->rss_size * num_tc;
458 kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
460 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
461 sizeof(struct hnae3_queue *), GFP_KERNEL);
465 for (i = 0; i < kinfo->num_tqps; i++) {
466 hdev->htqp[i].q.handle = &hdev->nic;
467 hdev->htqp[i].q.tqp_index = i;
468 kinfo->tqp[i] = &hdev->htqp[i].q;
471 /* after init the max rss_size and tqps, adjust the default tqp numbers
472 * and rss size with the actual vector numbers
474 kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps);
475 kinfo->rss_size = min_t(u16, kinfo->num_tqps / num_tc,
481 static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
483 struct hclge_vf_to_pf_msg send_msg;
486 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_STATUS, 0);
487 status = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
489 dev_err(&hdev->pdev->dev,
490 "VF failed to fetch link status(%d) from PF", status);
493 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
495 struct hnae3_handle *rhandle = &hdev->roce;
496 struct hnae3_handle *handle = &hdev->nic;
497 struct hnae3_client *rclient;
498 struct hnae3_client *client;
500 if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state))
503 client = handle->client;
504 rclient = hdev->roce_client;
507 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state;
508 if (link_state != hdev->hw.mac.link) {
509 hdev->hw.mac.link = link_state;
510 client->ops->link_status_change(handle, !!link_state);
511 if (rclient && rclient->ops->link_status_change)
512 rclient->ops->link_status_change(rhandle, !!link_state);
515 clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state);
518 static void hclgevf_update_link_mode(struct hclgevf_dev *hdev)
520 #define HCLGEVF_ADVERTISING 0
521 #define HCLGEVF_SUPPORTED 1
523 struct hclge_vf_to_pf_msg send_msg;
525 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_MODE, 0);
526 send_msg.data[0] = HCLGEVF_ADVERTISING;
527 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
528 send_msg.data[0] = HCLGEVF_SUPPORTED;
529 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
532 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
534 struct hnae3_handle *nic = &hdev->nic;
537 nic->ae_algo = &ae_algovf;
538 nic->pdev = hdev->pdev;
539 nic->numa_node_mask = hdev->numa_node_mask;
540 nic->flags |= HNAE3_SUPPORT_VF;
542 ret = hclgevf_knic_setup(hdev);
544 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n",
549 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id)
551 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) {
552 dev_warn(&hdev->pdev->dev,
553 "vector(vector_id %d) has been freed.\n", vector_id);
557 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT;
558 hdev->num_msi_left += 1;
559 hdev->num_msi_used -= 1;
562 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num,
563 struct hnae3_vector_info *vector_info)
565 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
566 struct hnae3_vector_info *vector = vector_info;
570 vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num);
571 vector_num = min(hdev->num_msi_left, vector_num);
573 for (j = 0; j < vector_num; j++) {
574 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) {
575 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) {
576 vector->vector = pci_irq_vector(hdev->pdev, i);
577 vector->io_addr = hdev->hw.io_base +
578 HCLGEVF_VECTOR_REG_BASE +
579 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET;
580 hdev->vector_status[i] = 0;
581 hdev->vector_irq[i] = vector->vector;
590 hdev->num_msi_left -= alloc;
591 hdev->num_msi_used += alloc;
596 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector)
600 for (i = 0; i < hdev->num_msi; i++)
601 if (vector == hdev->vector_irq[i])
607 static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev,
608 const u8 hfunc, const u8 *key)
610 struct hclgevf_rss_config_cmd *req;
611 unsigned int key_offset = 0;
612 struct hclgevf_desc desc;
617 key_counts = HCLGEVF_RSS_KEY_SIZE;
618 req = (struct hclgevf_rss_config_cmd *)desc.data;
621 hclgevf_cmd_setup_basic_desc(&desc,
622 HCLGEVF_OPC_RSS_GENERIC_CONFIG,
625 req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK);
627 (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B);
629 key_size = min(HCLGEVF_RSS_HASH_KEY_NUM, key_counts);
630 memcpy(req->hash_key,
631 key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size);
633 key_counts -= key_size;
635 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
637 dev_err(&hdev->pdev->dev,
638 "Configure RSS config fail, status = %d\n",
647 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle)
649 return HCLGEVF_RSS_KEY_SIZE;
652 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev)
654 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl;
655 struct hclgevf_rss_indirection_table_cmd *req;
656 struct hclgevf_desc desc;
661 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data;
662 rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
663 HCLGEVF_RSS_CFG_TBL_SIZE;
665 for (i = 0; i < rss_cfg_tbl_num; i++) {
666 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE,
668 req->start_table_index =
669 cpu_to_le16(i * HCLGEVF_RSS_CFG_TBL_SIZE);
670 req->rss_set_bitmap = cpu_to_le16(HCLGEVF_RSS_SET_BITMAP_MSK);
671 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++)
673 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j];
675 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
677 dev_err(&hdev->pdev->dev,
678 "VF failed(=%d) to set RSS indirection table\n",
687 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
689 struct hclgevf_rss_tc_mode_cmd *req;
690 u16 tc_offset[HCLGEVF_MAX_TC_NUM];
691 u16 tc_valid[HCLGEVF_MAX_TC_NUM];
692 u16 tc_size[HCLGEVF_MAX_TC_NUM];
693 struct hclgevf_desc desc;
698 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data;
700 roundup_size = roundup_pow_of_two(rss_size);
701 roundup_size = ilog2(roundup_size);
703 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
704 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i));
705 tc_size[i] = roundup_size;
706 tc_offset[i] = rss_size * i;
709 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
710 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
713 hnae3_set_bit(mode, HCLGEVF_RSS_TC_VALID_B,
714 (tc_valid[i] & 0x1));
715 hnae3_set_field(mode, HCLGEVF_RSS_TC_SIZE_M,
716 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
717 hnae3_set_bit(mode, HCLGEVF_RSS_TC_SIZE_MSB_B,
718 tc_size[i] >> HCLGEVF_RSS_TC_SIZE_MSB_OFFSET &
720 hnae3_set_field(mode, HCLGEVF_RSS_TC_OFFSET_M,
721 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
723 req->rss_tc_mode[i] = cpu_to_le16(mode);
725 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
727 dev_err(&hdev->pdev->dev,
728 "VF failed(=%d) to set rss tc mode\n", status);
733 /* for revision 0x20, vf shared the same rss config with pf */
734 static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev)
736 #define HCLGEVF_RSS_MBX_RESP_LEN 8
737 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
738 u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN];
739 struct hclge_vf_to_pf_msg send_msg;
740 u16 msg_num, hash_key_index;
744 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0);
745 msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) /
746 HCLGEVF_RSS_MBX_RESP_LEN;
747 for (index = 0; index < msg_num; index++) {
748 send_msg.data[0] = index;
749 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
750 HCLGEVF_RSS_MBX_RESP_LEN);
752 dev_err(&hdev->pdev->dev,
753 "VF get rss hash key from PF failed, ret=%d",
758 hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index;
759 if (index == msg_num - 1)
760 memcpy(&rss_cfg->rss_hash_key[hash_key_index],
762 HCLGEVF_RSS_KEY_SIZE - hash_key_index);
764 memcpy(&rss_cfg->rss_hash_key[hash_key_index],
765 &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN);
771 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
774 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
775 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
778 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
779 /* Get hash algorithm */
781 switch (rss_cfg->hash_algo) {
782 case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ:
783 *hfunc = ETH_RSS_HASH_TOP;
785 case HCLGEVF_RSS_HASH_ALGO_SIMPLE:
786 *hfunc = ETH_RSS_HASH_XOR;
789 *hfunc = ETH_RSS_HASH_UNKNOWN;
794 /* Get the RSS Key required by the user */
796 memcpy(key, rss_cfg->rss_hash_key,
797 HCLGEVF_RSS_KEY_SIZE);
800 *hfunc = ETH_RSS_HASH_TOP;
802 ret = hclgevf_get_rss_hash_key(hdev);
805 memcpy(key, rss_cfg->rss_hash_key,
806 HCLGEVF_RSS_KEY_SIZE);
811 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
812 indir[i] = rss_cfg->rss_indirection_tbl[i];
817 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
818 const u8 *key, const u8 hfunc)
820 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
821 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
824 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
825 /* Set the RSS Hash Key if specififed by the user */
828 case ETH_RSS_HASH_TOP:
830 HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
832 case ETH_RSS_HASH_XOR:
834 HCLGEVF_RSS_HASH_ALGO_SIMPLE;
836 case ETH_RSS_HASH_NO_CHANGE:
842 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
847 /* Update the shadow RSS key with user specified qids */
848 memcpy(rss_cfg->rss_hash_key, key,
849 HCLGEVF_RSS_KEY_SIZE);
853 /* update the shadow RSS table with user specified qids */
854 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
855 rss_cfg->rss_indirection_tbl[i] = indir[i];
857 /* update the hardware */
858 return hclgevf_set_rss_indir_table(hdev);
861 static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
863 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0;
865 if (nfc->data & RXH_L4_B_2_3)
866 hash_sets |= HCLGEVF_D_PORT_BIT;
868 hash_sets &= ~HCLGEVF_D_PORT_BIT;
870 if (nfc->data & RXH_IP_SRC)
871 hash_sets |= HCLGEVF_S_IP_BIT;
873 hash_sets &= ~HCLGEVF_S_IP_BIT;
875 if (nfc->data & RXH_IP_DST)
876 hash_sets |= HCLGEVF_D_IP_BIT;
878 hash_sets &= ~HCLGEVF_D_IP_BIT;
880 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
881 hash_sets |= HCLGEVF_V_TAG_BIT;
886 static int hclgevf_init_rss_tuple_cmd(struct hnae3_handle *handle,
887 struct ethtool_rxnfc *nfc,
888 struct hclgevf_rss_input_tuple_cmd *req)
890 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
891 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
894 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
895 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
896 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
897 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
898 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
899 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
900 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
901 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
903 tuple_sets = hclgevf_get_rss_hash_bits(nfc);
904 switch (nfc->flow_type) {
906 req->ipv4_tcp_en = tuple_sets;
909 req->ipv6_tcp_en = tuple_sets;
912 req->ipv4_udp_en = tuple_sets;
915 req->ipv6_udp_en = tuple_sets;
918 req->ipv4_sctp_en = tuple_sets;
921 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
922 (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
925 req->ipv6_sctp_en = tuple_sets;
928 req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
931 req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
940 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
941 struct ethtool_rxnfc *nfc)
943 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
944 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
945 struct hclgevf_rss_input_tuple_cmd *req;
946 struct hclgevf_desc desc;
949 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
953 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
956 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
957 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
959 ret = hclgevf_init_rss_tuple_cmd(handle, nfc, req);
961 dev_err(&hdev->pdev->dev,
962 "failed to init rss tuple cmd, ret = %d\n", ret);
966 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
968 dev_err(&hdev->pdev->dev,
969 "Set rss tuple fail, status = %d\n", ret);
973 rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
974 rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
975 rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
976 rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
977 rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
978 rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
979 rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
980 rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
984 static int hclgevf_get_rss_tuple_by_flow_type(struct hclgevf_dev *hdev,
985 int flow_type, u8 *tuple_sets)
989 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_tcp_en;
992 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_udp_en;
995 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_tcp_en;
998 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_udp_en;
1001 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_sctp_en;
1004 *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_sctp_en;
1008 *tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT;
1017 static u64 hclgevf_convert_rss_tuple(u8 tuple_sets)
1021 if (tuple_sets & HCLGEVF_D_PORT_BIT)
1022 tuple_data |= RXH_L4_B_2_3;
1023 if (tuple_sets & HCLGEVF_S_PORT_BIT)
1024 tuple_data |= RXH_L4_B_0_1;
1025 if (tuple_sets & HCLGEVF_D_IP_BIT)
1026 tuple_data |= RXH_IP_DST;
1027 if (tuple_sets & HCLGEVF_S_IP_BIT)
1028 tuple_data |= RXH_IP_SRC;
1033 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
1034 struct ethtool_rxnfc *nfc)
1036 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1040 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
1045 ret = hclgevf_get_rss_tuple_by_flow_type(hdev, nfc->flow_type,
1047 if (ret || !tuple_sets)
1050 nfc->data = hclgevf_convert_rss_tuple(tuple_sets);
1055 static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev,
1056 struct hclgevf_rss_cfg *rss_cfg)
1058 struct hclgevf_rss_input_tuple_cmd *req;
1059 struct hclgevf_desc desc;
1062 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
1064 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
1066 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
1067 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
1068 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
1069 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
1070 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
1071 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
1072 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
1073 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
1075 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
1077 dev_err(&hdev->pdev->dev,
1078 "Configure rss input fail, status = %d\n", ret);
1082 static int hclgevf_get_tc_size(struct hnae3_handle *handle)
1084 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1085 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
1087 return rss_cfg->rss_size;
1090 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
1092 struct hnae3_ring_chain_node *ring_chain)
1094 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1095 struct hclge_vf_to_pf_msg send_msg;
1096 struct hnae3_ring_chain_node *node;
1100 memset(&send_msg, 0, sizeof(send_msg));
1101 send_msg.code = en ? HCLGE_MBX_MAP_RING_TO_VECTOR :
1102 HCLGE_MBX_UNMAP_RING_TO_VECTOR;
1103 send_msg.vector_id = vector_id;
1105 for (node = ring_chain; node; node = node->next) {
1106 send_msg.param[i].ring_type =
1107 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B);
1109 send_msg.param[i].tqp_index = node->tqp_index;
1110 send_msg.param[i].int_gl_index =
1111 hnae3_get_field(node->int_gl_idx,
1112 HNAE3_RING_GL_IDX_M,
1113 HNAE3_RING_GL_IDX_S);
1116 if (i == HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM || !node->next) {
1117 send_msg.ring_num = i;
1119 status = hclgevf_send_mbx_msg(hdev, &send_msg, false,
1122 dev_err(&hdev->pdev->dev,
1123 "Map TQP fail, status is %d.\n",
1134 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector,
1135 struct hnae3_ring_chain_node *ring_chain)
1137 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1140 vector_id = hclgevf_get_vector_index(hdev, vector);
1141 if (vector_id < 0) {
1142 dev_err(&handle->pdev->dev,
1143 "Get vector index fail. ret =%d\n", vector_id);
1147 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain);
1150 static int hclgevf_unmap_ring_from_vector(
1151 struct hnae3_handle *handle,
1153 struct hnae3_ring_chain_node *ring_chain)
1155 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1158 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
1161 vector_id = hclgevf_get_vector_index(hdev, vector);
1162 if (vector_id < 0) {
1163 dev_err(&handle->pdev->dev,
1164 "Get vector index fail. ret =%d\n", vector_id);
1168 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain);
1170 dev_err(&handle->pdev->dev,
1171 "Unmap ring from vector fail. vector=%d, ret =%d\n",
1178 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
1180 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1183 vector_id = hclgevf_get_vector_index(hdev, vector);
1184 if (vector_id < 0) {
1185 dev_err(&handle->pdev->dev,
1186 "hclgevf_put_vector get vector index fail. ret =%d\n",
1191 hclgevf_free_vector(hdev, vector_id);
1196 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
1197 bool en_uc_pmc, bool en_mc_pmc,
1200 struct hnae3_handle *handle = &hdev->nic;
1201 struct hclge_vf_to_pf_msg send_msg;
1204 memset(&send_msg, 0, sizeof(send_msg));
1205 send_msg.code = HCLGE_MBX_SET_PROMISC_MODE;
1206 send_msg.en_bc = en_bc_pmc ? 1 : 0;
1207 send_msg.en_uc = en_uc_pmc ? 1 : 0;
1208 send_msg.en_mc = en_mc_pmc ? 1 : 0;
1209 send_msg.en_limit_promisc = test_bit(HNAE3_PFLAG_LIMIT_PROMISC,
1210 &handle->priv_flags) ? 1 : 0;
1212 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
1214 dev_err(&hdev->pdev->dev,
1215 "Set promisc mode fail, status is %d.\n", ret);
1220 static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
1223 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1226 en_bc_pmc = hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
1228 return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc,
1232 static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle)
1234 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1236 set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
1237 hclgevf_task_schedule(hdev, 0);
1240 static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev)
1242 struct hnae3_handle *handle = &hdev->nic;
1243 bool en_uc_pmc = handle->netdev_flags & HNAE3_UPE;
1244 bool en_mc_pmc = handle->netdev_flags & HNAE3_MPE;
1247 if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) {
1248 ret = hclgevf_set_promisc_mode(handle, en_uc_pmc, en_mc_pmc);
1250 clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
1254 static int hclgevf_tqp_enable_cmd_send(struct hclgevf_dev *hdev, u16 tqp_id,
1255 u16 stream_id, bool enable)
1257 struct hclgevf_cfg_com_tqp_queue_cmd *req;
1258 struct hclgevf_desc desc;
1260 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data;
1262 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE,
1264 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
1265 req->stream_id = cpu_to_le16(stream_id);
1267 req->enable |= 1U << HCLGEVF_TQP_ENABLE_B;
1269 return hclgevf_cmd_send(&hdev->hw, &desc, 1);
1272 static int hclgevf_tqp_enable(struct hnae3_handle *handle, bool enable)
1274 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1278 for (i = 0; i < handle->kinfo.num_tqps; i++) {
1279 ret = hclgevf_tqp_enable_cmd_send(hdev, i, 0, enable);
1287 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
1289 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
1290 struct hclgevf_tqp *tqp;
1293 for (i = 0; i < kinfo->num_tqps; i++) {
1294 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
1295 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
1299 static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p)
1301 struct hclge_vf_to_pf_msg send_msg;
1302 u8 host_mac[ETH_ALEN];
1305 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MAC_ADDR, 0);
1306 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, host_mac,
1309 dev_err(&hdev->pdev->dev,
1310 "fail to get VF MAC from host %d", status);
1314 ether_addr_copy(p, host_mac);
1319 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
1321 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1322 u8 host_mac_addr[ETH_ALEN];
1324 if (hclgevf_get_host_mac_addr(hdev, host_mac_addr))
1327 hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr);
1328 if (hdev->has_pf_mac)
1329 ether_addr_copy(p, host_mac_addr);
1331 ether_addr_copy(p, hdev->hw.mac.mac_addr);
1334 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
1337 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1338 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr;
1339 struct hclge_vf_to_pf_msg send_msg;
1340 u8 *new_mac_addr = (u8 *)p;
1343 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0);
1344 send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY;
1345 ether_addr_copy(send_msg.data, new_mac_addr);
1346 if (is_first && !hdev->has_pf_mac)
1347 eth_zero_addr(&send_msg.data[ETH_ALEN]);
1349 ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr);
1350 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1352 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
1357 static struct hclgevf_mac_addr_node *
1358 hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr)
1360 struct hclgevf_mac_addr_node *mac_node, *tmp;
1362 list_for_each_entry_safe(mac_node, tmp, list, node)
1363 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
1369 static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node *mac_node,
1370 enum HCLGEVF_MAC_NODE_STATE state)
1373 /* from set_rx_mode or tmp_add_list */
1374 case HCLGEVF_MAC_TO_ADD:
1375 if (mac_node->state == HCLGEVF_MAC_TO_DEL)
1376 mac_node->state = HCLGEVF_MAC_ACTIVE;
1378 /* only from set_rx_mode */
1379 case HCLGEVF_MAC_TO_DEL:
1380 if (mac_node->state == HCLGEVF_MAC_TO_ADD) {
1381 list_del(&mac_node->node);
1384 mac_node->state = HCLGEVF_MAC_TO_DEL;
1387 /* only from tmp_add_list, the mac_node->state won't be
1388 * HCLGEVF_MAC_ACTIVE
1390 case HCLGEVF_MAC_ACTIVE:
1391 if (mac_node->state == HCLGEVF_MAC_TO_ADD)
1392 mac_node->state = HCLGEVF_MAC_ACTIVE;
1397 static int hclgevf_update_mac_list(struct hnae3_handle *handle,
1398 enum HCLGEVF_MAC_NODE_STATE state,
1399 enum HCLGEVF_MAC_ADDR_TYPE mac_type,
1400 const unsigned char *addr)
1402 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1403 struct hclgevf_mac_addr_node *mac_node;
1404 struct list_head *list;
1406 list = (mac_type == HCLGEVF_MAC_ADDR_UC) ?
1407 &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list;
1409 spin_lock_bh(&hdev->mac_table.mac_list_lock);
1411 /* if the mac addr is already in the mac list, no need to add a new
1412 * one into it, just check the mac addr state, convert it to a new
1413 * new state, or just remove it, or do nothing.
1415 mac_node = hclgevf_find_mac_node(list, addr);
1417 hclgevf_update_mac_node(mac_node, state);
1418 spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1421 /* if this address is never added, unnecessary to delete */
1422 if (state == HCLGEVF_MAC_TO_DEL) {
1423 spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1427 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
1429 spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1433 mac_node->state = state;
1434 ether_addr_copy(mac_node->mac_addr, addr);
1435 list_add_tail(&mac_node->node, list);
1437 spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1441 static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
1442 const unsigned char *addr)
1444 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD,
1445 HCLGEVF_MAC_ADDR_UC, addr);
1448 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle,
1449 const unsigned char *addr)
1451 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL,
1452 HCLGEVF_MAC_ADDR_UC, addr);
1455 static int hclgevf_add_mc_addr(struct hnae3_handle *handle,
1456 const unsigned char *addr)
1458 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD,
1459 HCLGEVF_MAC_ADDR_MC, addr);
1462 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle,
1463 const unsigned char *addr)
1465 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL,
1466 HCLGEVF_MAC_ADDR_MC, addr);
1469 static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev,
1470 struct hclgevf_mac_addr_node *mac_node,
1471 enum HCLGEVF_MAC_ADDR_TYPE mac_type)
1473 struct hclge_vf_to_pf_msg send_msg;
1476 if (mac_type == HCLGEVF_MAC_ADDR_UC) {
1477 code = HCLGE_MBX_SET_UNICAST;
1478 if (mac_node->state == HCLGEVF_MAC_TO_ADD)
1479 subcode = HCLGE_MBX_MAC_VLAN_UC_ADD;
1481 subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE;
1483 code = HCLGE_MBX_SET_MULTICAST;
1484 if (mac_node->state == HCLGEVF_MAC_TO_ADD)
1485 subcode = HCLGE_MBX_MAC_VLAN_MC_ADD;
1487 subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE;
1490 hclgevf_build_send_msg(&send_msg, code, subcode);
1491 ether_addr_copy(send_msg.data, mac_node->mac_addr);
1492 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
1495 static void hclgevf_config_mac_list(struct hclgevf_dev *hdev,
1496 struct list_head *list,
1497 enum HCLGEVF_MAC_ADDR_TYPE mac_type)
1499 struct hclgevf_mac_addr_node *mac_node, *tmp;
1502 list_for_each_entry_safe(mac_node, tmp, list, node) {
1503 ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type);
1505 dev_err(&hdev->pdev->dev,
1506 "failed to configure mac %pM, state = %d, ret = %d\n",
1507 mac_node->mac_addr, mac_node->state, ret);
1510 if (mac_node->state == HCLGEVF_MAC_TO_ADD) {
1511 mac_node->state = HCLGEVF_MAC_ACTIVE;
1513 list_del(&mac_node->node);
1519 static void hclgevf_sync_from_add_list(struct list_head *add_list,
1520 struct list_head *mac_list)
1522 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
1524 list_for_each_entry_safe(mac_node, tmp, add_list, node) {
1525 /* if the mac address from tmp_add_list is not in the
1526 * uc/mc_mac_list, it means have received a TO_DEL request
1527 * during the time window of sending mac config request to PF
1528 * If mac_node state is ACTIVE, then change its state to TO_DEL,
1529 * then it will be removed at next time. If is TO_ADD, it means
1530 * send TO_ADD request failed, so just remove the mac node.
1532 new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr);
1534 hclgevf_update_mac_node(new_node, mac_node->state);
1535 list_del(&mac_node->node);
1537 } else if (mac_node->state == HCLGEVF_MAC_ACTIVE) {
1538 mac_node->state = HCLGEVF_MAC_TO_DEL;
1539 list_move_tail(&mac_node->node, mac_list);
1541 list_del(&mac_node->node);
1547 static void hclgevf_sync_from_del_list(struct list_head *del_list,
1548 struct list_head *mac_list)
1550 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
1552 list_for_each_entry_safe(mac_node, tmp, del_list, node) {
1553 new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr);
1555 /* If the mac addr is exist in the mac list, it means
1556 * received a new request TO_ADD during the time window
1557 * of sending mac addr configurrequest to PF, so just
1558 * change the mac state to ACTIVE.
1560 new_node->state = HCLGEVF_MAC_ACTIVE;
1561 list_del(&mac_node->node);
1564 list_move_tail(&mac_node->node, mac_list);
1569 static void hclgevf_clear_list(struct list_head *list)
1571 struct hclgevf_mac_addr_node *mac_node, *tmp;
1573 list_for_each_entry_safe(mac_node, tmp, list, node) {
1574 list_del(&mac_node->node);
1579 static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev,
1580 enum HCLGEVF_MAC_ADDR_TYPE mac_type)
1582 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
1583 struct list_head tmp_add_list, tmp_del_list;
1584 struct list_head *list;
1586 INIT_LIST_HEAD(&tmp_add_list);
1587 INIT_LIST_HEAD(&tmp_del_list);
1589 /* move the mac addr to the tmp_add_list and tmp_del_list, then
1590 * we can add/delete these mac addr outside the spin lock
1592 list = (mac_type == HCLGEVF_MAC_ADDR_UC) ?
1593 &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list;
1595 spin_lock_bh(&hdev->mac_table.mac_list_lock);
1597 list_for_each_entry_safe(mac_node, tmp, list, node) {
1598 switch (mac_node->state) {
1599 case HCLGEVF_MAC_TO_DEL:
1600 list_move_tail(&mac_node->node, &tmp_del_list);
1602 case HCLGEVF_MAC_TO_ADD:
1603 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
1607 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
1608 new_node->state = mac_node->state;
1609 list_add_tail(&new_node->node, &tmp_add_list);
1617 spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1619 /* delete first, in order to get max mac table space for adding */
1620 hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type);
1621 hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type);
1623 /* if some mac addresses were added/deleted fail, move back to the
1624 * mac_list, and retry at next time.
1626 spin_lock_bh(&hdev->mac_table.mac_list_lock);
1628 hclgevf_sync_from_del_list(&tmp_del_list, list);
1629 hclgevf_sync_from_add_list(&tmp_add_list, list);
1631 spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1634 static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev)
1636 hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC);
1637 hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC);
1640 static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev)
1642 spin_lock_bh(&hdev->mac_table.mac_list_lock);
1644 hclgevf_clear_list(&hdev->mac_table.uc_mac_list);
1645 hclgevf_clear_list(&hdev->mac_table.mc_mac_list);
1647 spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1650 static int hclgevf_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
1652 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1653 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
1654 struct hclge_vf_to_pf_msg send_msg;
1656 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
1659 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
1660 HCLGE_MBX_ENABLE_VLAN_FILTER);
1661 send_msg.data[0] = enable ? 1 : 0;
1663 return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1666 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
1667 __be16 proto, u16 vlan_id,
1670 #define HCLGEVF_VLAN_MBX_IS_KILL_OFFSET 0
1671 #define HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET 1
1672 #define HCLGEVF_VLAN_MBX_PROTO_OFFSET 3
1674 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1675 struct hclge_vf_to_pf_msg send_msg;
1678 if (vlan_id > HCLGEVF_MAX_VLAN_ID)
1681 if (proto != htons(ETH_P_8021Q))
1682 return -EPROTONOSUPPORT;
1684 /* When device is resetting or reset failed, firmware is unable to
1685 * handle mailbox. Just record the vlan id, and remove it after
1688 if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
1689 test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) {
1690 set_bit(vlan_id, hdev->vlan_del_fail_bmap);
1694 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
1695 HCLGE_MBX_VLAN_FILTER);
1696 send_msg.data[HCLGEVF_VLAN_MBX_IS_KILL_OFFSET] = is_kill;
1697 memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET], &vlan_id,
1699 memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_PROTO_OFFSET], &proto,
1701 /* when remove hw vlan filter failed, record the vlan id,
1702 * and try to remove it from hw later, to be consistence
1705 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1707 set_bit(vlan_id, hdev->vlan_del_fail_bmap);
1712 static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev)
1714 #define HCLGEVF_MAX_SYNC_COUNT 60
1715 struct hnae3_handle *handle = &hdev->nic;
1716 int ret, sync_cnt = 0;
1719 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
1720 while (vlan_id != VLAN_N_VID) {
1721 ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q),
1726 clear_bit(vlan_id, hdev->vlan_del_fail_bmap);
1728 if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT)
1731 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
1735 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
1737 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1738 struct hclge_vf_to_pf_msg send_msg;
1740 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
1741 HCLGE_MBX_VLAN_RX_OFF_CFG);
1742 send_msg.data[0] = enable ? 1 : 0;
1743 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
1746 static int hclgevf_reset_tqp(struct hnae3_handle *handle)
1748 #define HCLGEVF_RESET_ALL_QUEUE_DONE 1U
1749 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1750 struct hclge_vf_to_pf_msg send_msg;
1751 u8 return_status = 0;
1755 /* disable vf queue before send queue reset msg to PF */
1756 ret = hclgevf_tqp_enable(handle, false);
1758 dev_err(&hdev->pdev->dev, "failed to disable tqp, ret = %d\n",
1763 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0);
1765 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &return_status,
1766 sizeof(return_status));
1767 if (ret || return_status == HCLGEVF_RESET_ALL_QUEUE_DONE)
1770 for (i = 1; i < handle->kinfo.num_tqps; i++) {
1771 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0);
1772 memcpy(send_msg.data, &i, sizeof(i));
1773 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1781 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu)
1783 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1784 struct hclge_vf_to_pf_msg send_msg;
1786 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0);
1787 memcpy(send_msg.data, &new_mtu, sizeof(new_mtu));
1788 return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1791 static int hclgevf_notify_client(struct hclgevf_dev *hdev,
1792 enum hnae3_reset_notify_type type)
1794 struct hnae3_client *client = hdev->nic_client;
1795 struct hnae3_handle *handle = &hdev->nic;
1798 if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) ||
1802 if (!client->ops->reset_notify)
1805 ret = client->ops->reset_notify(handle, type);
1807 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
1813 static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev,
1814 enum hnae3_reset_notify_type type)
1816 struct hnae3_client *client = hdev->roce_client;
1817 struct hnae3_handle *handle = &hdev->roce;
1820 if (!test_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state) || !client)
1823 if (!client->ops->reset_notify)
1826 ret = client->ops->reset_notify(handle, type);
1828 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
1833 static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
1835 #define HCLGEVF_RESET_WAIT_US 20000
1836 #define HCLGEVF_RESET_WAIT_CNT 2000
1837 #define HCLGEVF_RESET_WAIT_TIMEOUT_US \
1838 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT)
1843 if (hdev->reset_type == HNAE3_VF_RESET)
1844 ret = readl_poll_timeout(hdev->hw.io_base +
1845 HCLGEVF_VF_RST_ING, val,
1846 !(val & HCLGEVF_VF_RST_ING_BIT),
1847 HCLGEVF_RESET_WAIT_US,
1848 HCLGEVF_RESET_WAIT_TIMEOUT_US);
1850 ret = readl_poll_timeout(hdev->hw.io_base +
1851 HCLGEVF_RST_ING, val,
1852 !(val & HCLGEVF_RST_ING_BITS),
1853 HCLGEVF_RESET_WAIT_US,
1854 HCLGEVF_RESET_WAIT_TIMEOUT_US);
1856 /* hardware completion status should be available by this time */
1858 dev_err(&hdev->pdev->dev,
1859 "couldn't get reset done status from h/w, timeout!\n");
1863 /* we will wait a bit more to let reset of the stack to complete. This
1864 * might happen in case reset assertion was made by PF. Yes, this also
1865 * means we might end up waiting bit more even for VF reset.
1872 static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable)
1876 reg_val = hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG);
1878 reg_val |= HCLGEVF_NIC_SW_RST_RDY;
1880 reg_val &= ~HCLGEVF_NIC_SW_RST_RDY;
1882 hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG,
1886 static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
1890 /* uninitialize the nic client */
1891 ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
1895 /* re-initialize the hclge device */
1896 ret = hclgevf_reset_hdev(hdev);
1898 dev_err(&hdev->pdev->dev,
1899 "hclge device re-init failed, VF is disabled!\n");
1903 /* bring up the nic client again */
1904 ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT);
1908 /* clear handshake status with IMP */
1909 hclgevf_reset_handshake(hdev, false);
1911 /* bring up the nic to enable TX/RX again */
1912 return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
1915 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
1917 #define HCLGEVF_RESET_SYNC_TIME 100
1919 if (hdev->reset_type == HNAE3_VF_FUNC_RESET) {
1920 struct hclge_vf_to_pf_msg send_msg;
1923 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0);
1924 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1926 dev_err(&hdev->pdev->dev,
1927 "failed to assert VF reset, ret = %d\n", ret);
1930 hdev->rst_stats.vf_func_rst_cnt++;
1933 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
1934 /* inform hardware that preparatory work is done */
1935 msleep(HCLGEVF_RESET_SYNC_TIME);
1936 hclgevf_reset_handshake(hdev, true);
1937 dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done\n",
1943 static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev)
1945 dev_info(&hdev->pdev->dev, "VF function reset count: %u\n",
1946 hdev->rst_stats.vf_func_rst_cnt);
1947 dev_info(&hdev->pdev->dev, "FLR reset count: %u\n",
1948 hdev->rst_stats.flr_rst_cnt);
1949 dev_info(&hdev->pdev->dev, "VF reset count: %u\n",
1950 hdev->rst_stats.vf_rst_cnt);
1951 dev_info(&hdev->pdev->dev, "reset done count: %u\n",
1952 hdev->rst_stats.rst_done_cnt);
1953 dev_info(&hdev->pdev->dev, "HW reset done count: %u\n",
1954 hdev->rst_stats.hw_rst_done_cnt);
1955 dev_info(&hdev->pdev->dev, "reset count: %u\n",
1956 hdev->rst_stats.rst_cnt);
1957 dev_info(&hdev->pdev->dev, "reset fail count: %u\n",
1958 hdev->rst_stats.rst_fail_cnt);
1959 dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n",
1960 hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE));
1961 dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n",
1962 hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STATE_REG));
1963 dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n",
1964 hclgevf_read_dev(&hdev->hw, HCLGEVF_CMDQ_TX_DEPTH_REG));
1965 dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n",
1966 hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING));
1967 dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state);
1970 static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
1972 /* recover handshake status with IMP when reset fail */
1973 hclgevf_reset_handshake(hdev, true);
1974 hdev->rst_stats.rst_fail_cnt++;
1975 dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n",
1976 hdev->rst_stats.rst_fail_cnt);
1978 if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT)
1979 set_bit(hdev->reset_type, &hdev->reset_pending);
1981 if (hclgevf_is_reset_pending(hdev)) {
1982 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1983 hclgevf_reset_task_schedule(hdev);
1985 set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
1986 hclgevf_dump_rst_info(hdev);
1990 static int hclgevf_reset_prepare(struct hclgevf_dev *hdev)
1994 hdev->rst_stats.rst_cnt++;
1996 /* perform reset of the stack & ae device for a client */
1997 ret = hclgevf_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
2002 /* bring down the nic to stop any ongoing TX/RX */
2003 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
2008 return hclgevf_reset_prepare_wait(hdev);
2011 static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev)
2015 hdev->rst_stats.hw_rst_done_cnt++;
2016 ret = hclgevf_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
2021 /* now, re-initialize the nic client and ae device */
2022 ret = hclgevf_reset_stack(hdev);
2025 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n");
2029 ret = hclgevf_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
2030 /* ignore RoCE notify error if it fails HCLGEVF_RESET_MAX_FAIL_CNT - 1
2034 hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT - 1)
2037 ret = hclgevf_notify_roce_client(hdev, HNAE3_UP_CLIENT);
2041 hdev->last_reset_time = jiffies;
2042 hdev->rst_stats.rst_done_cnt++;
2043 hdev->rst_stats.rst_fail_cnt = 0;
2044 clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
2049 static void hclgevf_reset(struct hclgevf_dev *hdev)
2051 if (hclgevf_reset_prepare(hdev))
2054 /* check if VF could successfully fetch the hardware reset completion
2055 * status from the hardware
2057 if (hclgevf_reset_wait(hdev)) {
2058 /* can't do much in this situation, will disable VF */
2059 dev_err(&hdev->pdev->dev,
2060 "failed to fetch H/W reset completion status\n");
2064 if (hclgevf_reset_rebuild(hdev))
2070 hclgevf_reset_err_handle(hdev);
2073 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev,
2074 unsigned long *addr)
2076 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
2078 /* return the highest priority reset level amongst all */
2079 if (test_bit(HNAE3_VF_RESET, addr)) {
2080 rst_level = HNAE3_VF_RESET;
2081 clear_bit(HNAE3_VF_RESET, addr);
2082 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
2083 clear_bit(HNAE3_VF_FUNC_RESET, addr);
2084 } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) {
2085 rst_level = HNAE3_VF_FULL_RESET;
2086 clear_bit(HNAE3_VF_FULL_RESET, addr);
2087 clear_bit(HNAE3_VF_FUNC_RESET, addr);
2088 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) {
2089 rst_level = HNAE3_VF_PF_FUNC_RESET;
2090 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
2091 clear_bit(HNAE3_VF_FUNC_RESET, addr);
2092 } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) {
2093 rst_level = HNAE3_VF_FUNC_RESET;
2094 clear_bit(HNAE3_VF_FUNC_RESET, addr);
2095 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
2096 rst_level = HNAE3_FLR_RESET;
2097 clear_bit(HNAE3_FLR_RESET, addr);
2103 static void hclgevf_reset_event(struct pci_dev *pdev,
2104 struct hnae3_handle *handle)
2106 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
2107 struct hclgevf_dev *hdev = ae_dev->priv;
2109 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
2111 if (hdev->default_reset_request)
2113 hclgevf_get_reset_level(hdev,
2114 &hdev->default_reset_request);
2116 hdev->reset_level = HNAE3_VF_FUNC_RESET;
2118 /* reset of this VF requested */
2119 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
2120 hclgevf_reset_task_schedule(hdev);
2122 hdev->last_reset_time = jiffies;
2125 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
2126 enum hnae3_reset_type rst_type)
2128 struct hclgevf_dev *hdev = ae_dev->priv;
2130 set_bit(rst_type, &hdev->default_reset_request);
2133 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
2135 writel(en ? 1 : 0, vector->addr);
2138 static void hclgevf_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
2139 enum hnae3_reset_type rst_type)
2141 #define HCLGEVF_RESET_RETRY_WAIT_MS 500
2142 #define HCLGEVF_RESET_RETRY_CNT 5
2144 struct hclgevf_dev *hdev = ae_dev->priv;
2149 down(&hdev->reset_sem);
2150 set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
2151 hdev->reset_type = rst_type;
2152 ret = hclgevf_reset_prepare(hdev);
2154 dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n",
2156 if (hdev->reset_pending ||
2157 retry_cnt++ < HCLGEVF_RESET_RETRY_CNT) {
2158 dev_err(&hdev->pdev->dev,
2159 "reset_pending:0x%lx, retry_cnt:%d\n",
2160 hdev->reset_pending, retry_cnt);
2161 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
2162 up(&hdev->reset_sem);
2163 msleep(HCLGEVF_RESET_RETRY_WAIT_MS);
2168 /* disable misc vector before reset done */
2169 hclgevf_enable_vector(&hdev->misc_vector, false);
2171 if (hdev->reset_type == HNAE3_FLR_RESET)
2172 hdev->rst_stats.flr_rst_cnt++;
2175 static void hclgevf_reset_done(struct hnae3_ae_dev *ae_dev)
2177 struct hclgevf_dev *hdev = ae_dev->priv;
2180 hclgevf_enable_vector(&hdev->misc_vector, true);
2182 ret = hclgevf_reset_rebuild(hdev);
2184 dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n",
2187 hdev->reset_type = HNAE3_NONE_RESET;
2188 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
2189 up(&hdev->reset_sem);
2192 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
2194 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2196 return hdev->fw_version;
2199 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
2201 struct hclgevf_misc_vector *vector = &hdev->misc_vector;
2203 vector->vector_irq = pci_irq_vector(hdev->pdev,
2204 HCLGEVF_MISC_VECTOR_NUM);
2205 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE;
2206 /* vector status always valid for Vector 0 */
2207 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0;
2208 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq;
2210 hdev->num_msi_left -= 1;
2211 hdev->num_msi_used += 1;
2214 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
2216 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
2217 !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED,
2219 mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
2222 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
2224 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
2225 !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED,
2227 mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
2230 static void hclgevf_task_schedule(struct hclgevf_dev *hdev,
2231 unsigned long delay)
2233 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
2234 !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state))
2235 mod_delayed_work(hclgevf_wq, &hdev->service_task, delay);
2238 static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
2240 #define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3
2242 if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state))
2245 down(&hdev->reset_sem);
2246 set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
2248 if (test_and_clear_bit(HCLGEVF_RESET_PENDING,
2249 &hdev->reset_state)) {
2250 /* PF has intimated that it is about to reset the hardware.
2251 * We now have to poll & check if hardware has actually
2252 * completed the reset sequence. On hardware reset completion,
2253 * VF needs to reset the client and ae device.
2255 hdev->reset_attempts = 0;
2257 hdev->last_reset_time = jiffies;
2258 while ((hdev->reset_type =
2259 hclgevf_get_reset_level(hdev, &hdev->reset_pending))
2260 != HNAE3_NONE_RESET)
2261 hclgevf_reset(hdev);
2262 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
2263 &hdev->reset_state)) {
2264 /* we could be here when either of below happens:
2265 * 1. reset was initiated due to watchdog timeout caused by
2266 * a. IMP was earlier reset and our TX got choked down and
2267 * which resulted in watchdog reacting and inducing VF
2268 * reset. This also means our cmdq would be unreliable.
2269 * b. problem in TX due to other lower layer(example link
2270 * layer not functioning properly etc.)
2271 * 2. VF reset might have been initiated due to some config
2274 * NOTE: Theres no clear way to detect above cases than to react
2275 * to the response of PF for this reset request. PF will ack the
2276 * 1b and 2. cases but we will not get any intimation about 1a
2277 * from PF as cmdq would be in unreliable state i.e. mailbox
2278 * communication between PF and VF would be broken.
2280 * if we are never geting into pending state it means either:
2281 * 1. PF is not receiving our request which could be due to IMP
2284 * We cannot do much for 2. but to check first we can try reset
2285 * our PCIe + stack and see if it alleviates the problem.
2287 if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) {
2288 /* prepare for full reset of stack + pcie interface */
2289 set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
2291 /* "defer" schedule the reset task again */
2292 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
2294 hdev->reset_attempts++;
2296 set_bit(hdev->reset_level, &hdev->reset_pending);
2297 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
2299 hclgevf_reset_task_schedule(hdev);
2302 hdev->reset_type = HNAE3_NONE_RESET;
2303 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
2304 up(&hdev->reset_sem);
2307 static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev)
2309 if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state))
2312 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state))
2315 hclgevf_mbx_async_handler(hdev);
2317 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
2320 static void hclgevf_keep_alive(struct hclgevf_dev *hdev)
2322 struct hclge_vf_to_pf_msg send_msg;
2325 if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state))
2328 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0);
2329 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
2331 dev_err(&hdev->pdev->dev,
2332 "VF sends keep alive cmd failed(=%d)\n", ret);
2335 static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev)
2337 unsigned long delta = round_jiffies_relative(HZ);
2338 struct hnae3_handle *handle = &hdev->nic;
2340 if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state))
2343 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
2344 delta = jiffies - hdev->last_serv_processed;
2346 if (delta < round_jiffies_relative(HZ)) {
2347 delta = round_jiffies_relative(HZ) - delta;
2352 hdev->serv_processed_cnt++;
2353 if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL))
2354 hclgevf_keep_alive(hdev);
2356 if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) {
2357 hdev->last_serv_processed = jiffies;
2361 if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL))
2362 hclgevf_tqps_update_stats(handle);
2364 /* VF does not need to request link status when this bit is set, because
2365 * PF will push its link status to VFs when link status changed.
2367 if (!test_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state))
2368 hclgevf_request_link_info(hdev);
2370 hclgevf_update_link_mode(hdev);
2372 hclgevf_sync_vlan_filter(hdev);
2374 hclgevf_sync_mac_table(hdev);
2376 hclgevf_sync_promisc_mode(hdev);
2378 hdev->last_serv_processed = jiffies;
2381 hclgevf_task_schedule(hdev, delta);
2384 static void hclgevf_service_task(struct work_struct *work)
2386 struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev,
2389 hclgevf_reset_service_task(hdev);
2390 hclgevf_mailbox_service_task(hdev);
2391 hclgevf_periodic_service_task(hdev);
2393 /* Handle reset and mbx again in case periodical task delays the
2394 * handling by calling hclgevf_task_schedule() in
2395 * hclgevf_periodic_service_task()
2397 hclgevf_reset_service_task(hdev);
2398 hclgevf_mailbox_service_task(hdev);
2401 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
2403 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr);
2406 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
2409 u32 val, cmdq_stat_reg, rst_ing_reg;
2411 /* fetch the events from their corresponding regs */
2412 cmdq_stat_reg = hclgevf_read_dev(&hdev->hw,
2413 HCLGEVF_VECTOR0_CMDQ_STATE_REG);
2414 if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
2415 rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
2416 dev_info(&hdev->pdev->dev,
2417 "receive reset interrupt 0x%x!\n", rst_ing_reg);
2418 set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
2419 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
2420 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
2421 *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B);
2422 hdev->rst_stats.vf_rst_cnt++;
2423 /* set up VF hardware reset status, its PF will clear
2424 * this status when PF has initialized done.
2426 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING);
2427 hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING,
2428 val | HCLGEVF_VF_RST_ING_BIT);
2429 return HCLGEVF_VECTOR0_EVENT_RST;
2432 /* check for vector0 mailbox(=CMDQ RX) event source */
2433 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) {
2434 /* for revision 0x21, clearing interrupt is writing bit 0
2435 * to the clear register, writing bit 1 means to keep the
2437 * for revision 0x20, the clear register is a read & write
2438 * register, so we should just write 0 to the bit we are
2439 * handling, and keep other bits as cmdq_stat_reg.
2441 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
2442 *clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
2444 *clearval = cmdq_stat_reg &
2445 ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
2447 return HCLGEVF_VECTOR0_EVENT_MBX;
2450 /* print other vector0 event source */
2451 dev_info(&hdev->pdev->dev,
2452 "vector 0 interrupt from unknown source, cmdq_src = %#x\n",
2455 return HCLGEVF_VECTOR0_EVENT_OTHER;
2458 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
2460 enum hclgevf_evt_cause event_cause;
2461 struct hclgevf_dev *hdev = data;
2464 hclgevf_enable_vector(&hdev->misc_vector, false);
2465 event_cause = hclgevf_check_evt_cause(hdev, &clearval);
2467 switch (event_cause) {
2468 case HCLGEVF_VECTOR0_EVENT_RST:
2469 hclgevf_reset_task_schedule(hdev);
2471 case HCLGEVF_VECTOR0_EVENT_MBX:
2472 hclgevf_mbx_handler(hdev);
2478 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) {
2479 hclgevf_clear_event_cause(hdev, clearval);
2480 hclgevf_enable_vector(&hdev->misc_vector, true);
2486 static int hclgevf_configure(struct hclgevf_dev *hdev)
2490 hdev->gro_en = true;
2492 ret = hclgevf_get_basic_info(hdev);
2496 /* get current port based vlan state from PF */
2497 ret = hclgevf_get_port_base_vlan_filter_state(hdev);
2501 /* get queue configuration from PF */
2502 ret = hclgevf_get_queue_info(hdev);
2506 /* get queue depth info from PF */
2507 ret = hclgevf_get_queue_depth(hdev);
2511 return hclgevf_get_pf_media_type(hdev);
2514 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev)
2516 struct pci_dev *pdev = ae_dev->pdev;
2517 struct hclgevf_dev *hdev;
2519 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
2524 hdev->ae_dev = ae_dev;
2525 ae_dev->priv = hdev;
2530 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
2532 struct hnae3_handle *roce = &hdev->roce;
2533 struct hnae3_handle *nic = &hdev->nic;
2535 roce->rinfo.num_vectors = hdev->num_roce_msix;
2537 if (hdev->num_msi_left < roce->rinfo.num_vectors ||
2538 hdev->num_msi_left == 0)
2541 roce->rinfo.base_vector = hdev->roce_base_vector;
2543 roce->rinfo.netdev = nic->kinfo.netdev;
2544 roce->rinfo.roce_io_base = hdev->hw.io_base;
2545 roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2547 roce->pdev = nic->pdev;
2548 roce->ae_algo = nic->ae_algo;
2549 roce->numa_node_mask = nic->numa_node_mask;
2554 static int hclgevf_config_gro(struct hclgevf_dev *hdev)
2556 struct hclgevf_cfg_gro_status_cmd *req;
2557 struct hclgevf_desc desc;
2560 if (!hnae3_dev_gro_supported(hdev))
2563 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG,
2565 req = (struct hclgevf_cfg_gro_status_cmd *)desc.data;
2567 req->gro_en = hdev->gro_en ? 1 : 0;
2569 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
2571 dev_err(&hdev->pdev->dev,
2572 "VF GRO hardware config cmd failed, ret = %d.\n", ret);
2577 static int hclgevf_rss_init_cfg(struct hclgevf_dev *hdev)
2579 u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
2580 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
2581 struct hclgevf_rss_tuple_cfg *tuple_sets;
2584 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
2585 rss_cfg->rss_size = hdev->nic.kinfo.rss_size;
2586 tuple_sets = &rss_cfg->rss_tuple_sets;
2587 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
2590 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE;
2592 rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
2593 sizeof(*rss_ind_tbl), GFP_KERNEL);
2597 rss_cfg->rss_indirection_tbl = rss_ind_tbl;
2598 memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key,
2599 HCLGEVF_RSS_KEY_SIZE);
2601 tuple_sets->ipv4_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
2602 tuple_sets->ipv4_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
2603 tuple_sets->ipv4_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP;
2604 tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
2605 tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
2606 tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
2607 tuple_sets->ipv6_sctp_en =
2608 hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
2609 HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT :
2610 HCLGEVF_RSS_INPUT_TUPLE_SCTP;
2611 tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
2614 /* Initialize RSS indirect table */
2615 for (i = 0; i < rss_ind_tbl_size; i++)
2616 rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size;
2621 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
2623 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
2626 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
2627 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
2628 rss_cfg->rss_hash_key);
2632 ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg);
2637 ret = hclgevf_set_rss_indir_table(hdev);
2641 return hclgevf_set_rss_tc_mode(hdev, rss_cfg->rss_size);
2644 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
2646 struct hnae3_handle *nic = &hdev->nic;
2649 ret = hclgevf_en_hw_strip_rxvtag(nic, true);
2651 dev_err(&hdev->pdev->dev,
2652 "failed to enable rx vlan offload, ret = %d\n", ret);
2656 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
2660 static void hclgevf_flush_link_update(struct hclgevf_dev *hdev)
2662 #define HCLGEVF_FLUSH_LINK_TIMEOUT 100000
2664 unsigned long last = hdev->serv_processed_cnt;
2667 while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) &&
2668 i++ < HCLGEVF_FLUSH_LINK_TIMEOUT &&
2669 last == hdev->serv_processed_cnt)
2673 static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable)
2675 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2678 hclgevf_task_schedule(hdev, 0);
2680 set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2682 /* flush memory to make sure DOWN is seen by service task */
2683 smp_mb__before_atomic();
2684 hclgevf_flush_link_update(hdev);
2688 static int hclgevf_ae_start(struct hnae3_handle *handle)
2690 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2692 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2693 clear_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state);
2695 hclgevf_reset_tqp_stats(handle);
2697 hclgevf_request_link_info(hdev);
2699 hclgevf_update_link_mode(hdev);
2704 static void hclgevf_ae_stop(struct hnae3_handle *handle)
2706 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2708 set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2710 if (hdev->reset_type != HNAE3_VF_RESET)
2711 hclgevf_reset_tqp(handle);
2713 hclgevf_reset_tqp_stats(handle);
2714 hclgevf_update_link_status(hdev, 0);
2717 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive)
2719 #define HCLGEVF_STATE_ALIVE 1
2720 #define HCLGEVF_STATE_NOT_ALIVE 0
2722 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2723 struct hclge_vf_to_pf_msg send_msg;
2725 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_ALIVE, 0);
2726 send_msg.data[0] = alive ? HCLGEVF_STATE_ALIVE :
2727 HCLGEVF_STATE_NOT_ALIVE;
2728 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
2731 static int hclgevf_client_start(struct hnae3_handle *handle)
2733 return hclgevf_set_alive(handle, true);
2736 static void hclgevf_client_stop(struct hnae3_handle *handle)
2738 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2741 ret = hclgevf_set_alive(handle, false);
2743 dev_warn(&hdev->pdev->dev,
2744 "%s failed %d\n", __func__, ret);
2747 static void hclgevf_state_init(struct hclgevf_dev *hdev)
2749 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
2750 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
2751 clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
2753 INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task);
2755 mutex_init(&hdev->mbx_resp.mbx_mutex);
2756 sema_init(&hdev->reset_sem, 1);
2758 spin_lock_init(&hdev->mac_table.mac_list_lock);
2759 INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list);
2760 INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list);
2762 /* bring the device down */
2763 set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2766 static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
2768 set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2769 set_bit(HCLGEVF_STATE_REMOVING, &hdev->state);
2771 if (hdev->service_task.work.func)
2772 cancel_delayed_work_sync(&hdev->service_task);
2774 mutex_destroy(&hdev->mbx_resp.mbx_mutex);
2777 static int hclgevf_init_msi(struct hclgevf_dev *hdev)
2779 struct pci_dev *pdev = hdev->pdev;
2783 if (hnae3_dev_roce_supported(hdev))
2784 vectors = pci_alloc_irq_vectors(pdev,
2785 hdev->roce_base_msix_offset + 1,
2789 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2791 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2795 "failed(%d) to allocate MSI/MSI-X vectors\n",
2799 if (vectors < hdev->num_msi)
2800 dev_warn(&hdev->pdev->dev,
2801 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2802 hdev->num_msi, vectors);
2804 hdev->num_msi = vectors;
2805 hdev->num_msi_left = vectors;
2807 hdev->base_msi_vector = pdev->irq;
2808 hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset;
2810 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2811 sizeof(u16), GFP_KERNEL);
2812 if (!hdev->vector_status) {
2813 pci_free_irq_vectors(pdev);
2817 for (i = 0; i < hdev->num_msi; i++)
2818 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT;
2820 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2821 sizeof(int), GFP_KERNEL);
2822 if (!hdev->vector_irq) {
2823 devm_kfree(&pdev->dev, hdev->vector_status);
2824 pci_free_irq_vectors(pdev);
2831 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
2833 struct pci_dev *pdev = hdev->pdev;
2835 devm_kfree(&pdev->dev, hdev->vector_status);
2836 devm_kfree(&pdev->dev, hdev->vector_irq);
2837 pci_free_irq_vectors(pdev);
2840 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
2844 hclgevf_get_misc_vector(hdev);
2846 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
2847 HCLGEVF_NAME, pci_name(hdev->pdev));
2848 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
2849 0, hdev->misc_vector.name, hdev);
2851 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n",
2852 hdev->misc_vector.vector_irq);
2856 hclgevf_clear_event_cause(hdev, 0);
2858 /* enable misc. vector(vector 0) */
2859 hclgevf_enable_vector(&hdev->misc_vector, true);
2864 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
2866 /* disable misc vector(vector 0) */
2867 hclgevf_enable_vector(&hdev->misc_vector, false);
2868 synchronize_irq(hdev->misc_vector.vector_irq);
2869 free_irq(hdev->misc_vector.vector_irq, hdev);
2870 hclgevf_free_vector(hdev, 0);
2873 static void hclgevf_info_show(struct hclgevf_dev *hdev)
2875 struct device *dev = &hdev->pdev->dev;
2877 dev_info(dev, "VF info begin:\n");
2879 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
2880 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
2881 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
2882 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
2883 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
2884 dev_info(dev, "PF media type of this VF: %u\n",
2885 hdev->hw.mac.media_type);
2887 dev_info(dev, "VF info end.\n");
2890 static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
2891 struct hnae3_client *client)
2893 struct hclgevf_dev *hdev = ae_dev->priv;
2894 int rst_cnt = hdev->rst_stats.rst_cnt;
2897 ret = client->ops->init_instance(&hdev->nic);
2901 set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
2902 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
2903 rst_cnt != hdev->rst_stats.rst_cnt) {
2904 clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
2906 client->ops->uninit_instance(&hdev->nic, 0);
2910 hnae3_set_client_init_flag(client, ae_dev, 1);
2912 if (netif_msg_drv(&hdev->nic))
2913 hclgevf_info_show(hdev);
2918 static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
2919 struct hnae3_client *client)
2921 struct hclgevf_dev *hdev = ae_dev->priv;
2924 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
2928 ret = hclgevf_init_roce_base_info(hdev);
2932 ret = client->ops->init_instance(&hdev->roce);
2936 set_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state);
2937 hnae3_set_client_init_flag(client, ae_dev, 1);
2942 static int hclgevf_init_client_instance(struct hnae3_client *client,
2943 struct hnae3_ae_dev *ae_dev)
2945 struct hclgevf_dev *hdev = ae_dev->priv;
2948 switch (client->type) {
2949 case HNAE3_CLIENT_KNIC:
2950 hdev->nic_client = client;
2951 hdev->nic.client = client;
2953 ret = hclgevf_init_nic_client_instance(ae_dev, client);
2957 ret = hclgevf_init_roce_client_instance(ae_dev,
2963 case HNAE3_CLIENT_ROCE:
2964 if (hnae3_dev_roce_supported(hdev)) {
2965 hdev->roce_client = client;
2966 hdev->roce.client = client;
2969 ret = hclgevf_init_roce_client_instance(ae_dev, client);
2981 hdev->nic_client = NULL;
2982 hdev->nic.client = NULL;
2985 hdev->roce_client = NULL;
2986 hdev->roce.client = NULL;
2990 static void hclgevf_uninit_client_instance(struct hnae3_client *client,
2991 struct hnae3_ae_dev *ae_dev)
2993 struct hclgevf_dev *hdev = ae_dev->priv;
2995 /* un-init roce, if it exists */
2996 if (hdev->roce_client) {
2997 clear_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state);
2998 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
2999 hdev->roce_client = NULL;
3000 hdev->roce.client = NULL;
3003 /* un-init nic/unic, if this was not called by roce client */
3004 if (client->ops->uninit_instance && hdev->nic_client &&
3005 client->type != HNAE3_CLIENT_ROCE) {
3006 clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
3008 client->ops->uninit_instance(&hdev->nic, 0);
3009 hdev->nic_client = NULL;
3010 hdev->nic.client = NULL;
3014 static int hclgevf_dev_mem_map(struct hclgevf_dev *hdev)
3016 #define HCLGEVF_MEM_BAR 4
3018 struct pci_dev *pdev = hdev->pdev;
3019 struct hclgevf_hw *hw = &hdev->hw;
3021 /* for device does not have device memory, return directly */
3022 if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGEVF_MEM_BAR)))
3025 hw->mem_base = devm_ioremap_wc(&pdev->dev,
3026 pci_resource_start(pdev,
3028 pci_resource_len(pdev, HCLGEVF_MEM_BAR));
3029 if (!hw->mem_base) {
3030 dev_err(&pdev->dev, "failed to map device memory\n");
3037 static int hclgevf_pci_init(struct hclgevf_dev *hdev)
3039 struct pci_dev *pdev = hdev->pdev;
3040 struct hclgevf_hw *hw;
3043 ret = pci_enable_device(pdev);
3045 dev_err(&pdev->dev, "failed to enable PCI device\n");
3049 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3051 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting");
3052 goto err_disable_device;
3055 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME);
3057 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
3058 goto err_disable_device;
3061 pci_set_master(pdev);
3064 hw->io_base = pci_iomap(pdev, 2, 0);
3066 dev_err(&pdev->dev, "can't map configuration register space\n");
3068 goto err_clr_master;
3071 ret = hclgevf_dev_mem_map(hdev);
3073 goto err_unmap_io_base;
3078 pci_iounmap(pdev, hdev->hw.io_base);
3080 pci_clear_master(pdev);
3081 pci_release_regions(pdev);
3083 pci_disable_device(pdev);
3088 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
3090 struct pci_dev *pdev = hdev->pdev;
3092 if (hdev->hw.mem_base)
3093 devm_iounmap(&pdev->dev, hdev->hw.mem_base);
3095 pci_iounmap(pdev, hdev->hw.io_base);
3096 pci_clear_master(pdev);
3097 pci_release_regions(pdev);
3098 pci_disable_device(pdev);
3101 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
3103 struct hclgevf_query_res_cmd *req;
3104 struct hclgevf_desc desc;
3107 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true);
3108 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
3110 dev_err(&hdev->pdev->dev,
3111 "query vf resource failed, ret = %d.\n", ret);
3115 req = (struct hclgevf_query_res_cmd *)desc.data;
3117 if (hnae3_dev_roce_supported(hdev)) {
3118 hdev->roce_base_msix_offset =
3119 hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
3120 HCLGEVF_MSIX_OFT_ROCEE_M,
3121 HCLGEVF_MSIX_OFT_ROCEE_S);
3122 hdev->num_roce_msix =
3123 hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number),
3124 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
3126 /* nic's msix numbers is always equals to the roce's. */
3127 hdev->num_nic_msix = hdev->num_roce_msix;
3129 /* VF should have NIC vectors and Roce vectors, NIC vectors
3130 * are queued before Roce vectors. The offset is fixed to 64.
3132 hdev->num_msi = hdev->num_roce_msix +
3133 hdev->roce_base_msix_offset;
3136 hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number),
3137 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
3139 hdev->num_nic_msix = hdev->num_msi;
3142 if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) {
3143 dev_err(&hdev->pdev->dev,
3144 "Just %u msi resources, not enough for vf(min:2).\n",
3145 hdev->num_nic_msix);
3152 static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev)
3154 #define HCLGEVF_MAX_NON_TSO_BD_NUM 8U
3156 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3158 ae_dev->dev_specs.max_non_tso_bd_num =
3159 HCLGEVF_MAX_NON_TSO_BD_NUM;
3160 ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
3161 ae_dev->dev_specs.rss_key_size = HCLGEVF_RSS_KEY_SIZE;
3162 ae_dev->dev_specs.max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
3163 ae_dev->dev_specs.max_frm_size = HCLGEVF_MAC_MAX_FRAME;
3166 static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev,
3167 struct hclgevf_desc *desc)
3169 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3170 struct hclgevf_dev_specs_0_cmd *req0;
3171 struct hclgevf_dev_specs_1_cmd *req1;
3173 req0 = (struct hclgevf_dev_specs_0_cmd *)desc[0].data;
3174 req1 = (struct hclgevf_dev_specs_1_cmd *)desc[1].data;
3176 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
3177 ae_dev->dev_specs.rss_ind_tbl_size =
3178 le16_to_cpu(req0->rss_ind_tbl_size);
3179 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
3180 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
3181 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
3182 ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
3185 static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev)
3187 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
3189 if (!dev_specs->max_non_tso_bd_num)
3190 dev_specs->max_non_tso_bd_num = HCLGEVF_MAX_NON_TSO_BD_NUM;
3191 if (!dev_specs->rss_ind_tbl_size)
3192 dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
3193 if (!dev_specs->rss_key_size)
3194 dev_specs->rss_key_size = HCLGEVF_RSS_KEY_SIZE;
3195 if (!dev_specs->max_int_gl)
3196 dev_specs->max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
3197 if (!dev_specs->max_frm_size)
3198 dev_specs->max_frm_size = HCLGEVF_MAC_MAX_FRAME;
3201 static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev)
3203 struct hclgevf_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM];
3207 /* set default specifications as devices lower than version V3 do not
3208 * support querying specifications from firmware.
3210 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
3211 hclgevf_set_default_dev_specs(hdev);
3215 for (i = 0; i < HCLGEVF_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
3216 hclgevf_cmd_setup_basic_desc(&desc[i],
3217 HCLGEVF_OPC_QUERY_DEV_SPECS, true);
3218 desc[i].flag |= cpu_to_le16(HCLGEVF_CMD_FLAG_NEXT);
3220 hclgevf_cmd_setup_basic_desc(&desc[i], HCLGEVF_OPC_QUERY_DEV_SPECS,
3223 ret = hclgevf_cmd_send(&hdev->hw, desc, HCLGEVF_QUERY_DEV_SPECS_BD_NUM);
3227 hclgevf_parse_dev_specs(hdev, desc);
3228 hclgevf_check_dev_specs(hdev);
3233 static int hclgevf_pci_reset(struct hclgevf_dev *hdev)
3235 struct pci_dev *pdev = hdev->pdev;
3238 if (hdev->reset_type == HNAE3_VF_FULL_RESET &&
3239 test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
3240 hclgevf_misc_irq_uninit(hdev);
3241 hclgevf_uninit_msi(hdev);
3242 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
3245 if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
3246 pci_set_master(pdev);
3247 ret = hclgevf_init_msi(hdev);
3250 "failed(%d) to init MSI/MSI-X\n", ret);
3254 ret = hclgevf_misc_irq_init(hdev);
3256 hclgevf_uninit_msi(hdev);
3257 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
3262 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
3268 static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev)
3270 struct hclge_vf_to_pf_msg send_msg;
3272 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_HANDLE_VF_TBL,
3273 HCLGE_MBX_VPORT_LIST_CLEAR);
3274 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
3277 static void hclgevf_init_rxd_adv_layout(struct hclgevf_dev *hdev)
3279 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
3280 hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 1);
3283 static void hclgevf_uninit_rxd_adv_layout(struct hclgevf_dev *hdev)
3285 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
3286 hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 0);
3289 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
3291 struct pci_dev *pdev = hdev->pdev;
3294 ret = hclgevf_pci_reset(hdev);
3296 dev_err(&pdev->dev, "pci reset failed %d\n", ret);
3300 ret = hclgevf_cmd_init(hdev);
3302 dev_err(&pdev->dev, "cmd failed %d\n", ret);
3306 ret = hclgevf_rss_init_hw(hdev);
3308 dev_err(&hdev->pdev->dev,
3309 "failed(%d) to initialize RSS\n", ret);
3313 ret = hclgevf_config_gro(hdev);
3317 ret = hclgevf_init_vlan_config(hdev);
3319 dev_err(&hdev->pdev->dev,
3320 "failed(%d) to initialize VLAN config\n", ret);
3324 set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
3326 hclgevf_init_rxd_adv_layout(hdev);
3328 dev_info(&hdev->pdev->dev, "Reset done\n");
3333 static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
3335 struct pci_dev *pdev = hdev->pdev;
3338 ret = hclgevf_pci_init(hdev);
3342 ret = hclgevf_cmd_queue_init(hdev);
3344 goto err_cmd_queue_init;
3346 ret = hclgevf_cmd_init(hdev);
3350 /* Get vf resource */
3351 ret = hclgevf_query_vf_resource(hdev);
3355 ret = hclgevf_query_dev_specs(hdev);
3358 "failed to query dev specifications, ret = %d\n", ret);
3362 ret = hclgevf_init_msi(hdev);
3364 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
3368 hclgevf_state_init(hdev);
3369 hdev->reset_level = HNAE3_VF_FUNC_RESET;
3370 hdev->reset_type = HNAE3_NONE_RESET;
3372 ret = hclgevf_misc_irq_init(hdev);
3374 goto err_misc_irq_init;
3376 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
3378 ret = hclgevf_configure(hdev);
3380 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
3384 ret = hclgevf_alloc_tqps(hdev);
3386 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret);
3390 ret = hclgevf_set_handle_info(hdev);
3394 ret = hclgevf_config_gro(hdev);
3398 /* Initialize RSS for this VF */
3399 ret = hclgevf_rss_init_cfg(hdev);
3401 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
3405 ret = hclgevf_rss_init_hw(hdev);
3407 dev_err(&hdev->pdev->dev,
3408 "failed(%d) to initialize RSS\n", ret);
3412 /* ensure vf tbl list as empty before init*/
3413 ret = hclgevf_clear_vport_list(hdev);
3416 "failed to clear tbl list configuration, ret = %d.\n",
3421 ret = hclgevf_init_vlan_config(hdev);
3423 dev_err(&hdev->pdev->dev,
3424 "failed(%d) to initialize VLAN config\n", ret);
3428 hclgevf_init_rxd_adv_layout(hdev);
3430 hdev->last_reset_time = jiffies;
3431 dev_info(&hdev->pdev->dev, "finished initializing %s driver\n",
3432 HCLGEVF_DRIVER_NAME);
3434 hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
3439 hclgevf_misc_irq_uninit(hdev);
3441 hclgevf_state_uninit(hdev);
3442 hclgevf_uninit_msi(hdev);
3444 hclgevf_cmd_uninit(hdev);
3446 hclgevf_pci_uninit(hdev);
3447 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
3451 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
3453 struct hclge_vf_to_pf_msg send_msg;
3455 hclgevf_state_uninit(hdev);
3456 hclgevf_uninit_rxd_adv_layout(hdev);
3458 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0);
3459 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
3461 if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
3462 hclgevf_misc_irq_uninit(hdev);
3463 hclgevf_uninit_msi(hdev);
3466 hclgevf_cmd_uninit(hdev);
3467 hclgevf_pci_uninit(hdev);
3468 hclgevf_uninit_mac_list(hdev);
3471 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
3473 struct pci_dev *pdev = ae_dev->pdev;
3476 ret = hclgevf_alloc_hdev(ae_dev);
3478 dev_err(&pdev->dev, "hclge device allocation failed\n");
3482 ret = hclgevf_init_hdev(ae_dev->priv);
3484 dev_err(&pdev->dev, "hclge device initialization failed\n");
3491 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
3493 struct hclgevf_dev *hdev = ae_dev->priv;
3495 hclgevf_uninit_hdev(hdev);
3496 ae_dev->priv = NULL;
3499 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
3501 struct hnae3_handle *nic = &hdev->nic;
3502 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
3504 return min_t(u32, hdev->rss_size_max,
3505 hdev->num_tqps / kinfo->tc_info.num_tc);
3509 * hclgevf_get_channels - Get the current channels enabled and max supported.
3510 * @handle: hardware information for network interface
3511 * @ch: ethtool channels structure
3513 * We don't support separate tx and rx queues as channels. The other count
3514 * represents how many queues are being used for control. max_combined counts
3515 * how many queue pairs we can support. They may not be mapped 1 to 1 with
3516 * q_vectors since we support a lot more queue pairs than q_vectors.
3518 static void hclgevf_get_channels(struct hnae3_handle *handle,
3519 struct ethtool_channels *ch)
3521 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3523 ch->max_combined = hclgevf_get_max_channels(hdev);
3524 ch->other_count = 0;
3526 ch->combined_count = handle->kinfo.rss_size;
3529 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle,
3530 u16 *alloc_tqps, u16 *max_rss_size)
3532 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3534 *alloc_tqps = hdev->num_tqps;
3535 *max_rss_size = hdev->rss_size_max;
3538 static void hclgevf_update_rss_size(struct hnae3_handle *handle,
3541 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3542 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3545 kinfo->req_rss_size = new_tqps_num;
3547 max_rss_size = min_t(u16, hdev->rss_size_max,
3548 hdev->num_tqps / kinfo->tc_info.num_tc);
3550 /* Use the user's configuration when it is not larger than
3551 * max_rss_size, otherwise, use the maximum specification value.
3553 if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
3554 kinfo->req_rss_size <= max_rss_size)
3555 kinfo->rss_size = kinfo->req_rss_size;
3556 else if (kinfo->rss_size > max_rss_size ||
3557 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size))
3558 kinfo->rss_size = max_rss_size;
3560 kinfo->num_tqps = kinfo->tc_info.num_tc * kinfo->rss_size;
3563 static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
3564 bool rxfh_configured)
3566 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3567 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3568 u16 cur_rss_size = kinfo->rss_size;
3569 u16 cur_tqps = kinfo->num_tqps;
3574 hclgevf_update_rss_size(handle, new_tqps_num);
3576 ret = hclgevf_set_rss_tc_mode(hdev, kinfo->rss_size);
3580 /* RSS indirection table has been configured by user */
3581 if (rxfh_configured)
3584 /* Reinitializes the rss indirect table according to the new RSS size */
3585 rss_indir = kcalloc(hdev->ae_dev->dev_specs.rss_ind_tbl_size,
3586 sizeof(u32), GFP_KERNEL);
3590 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
3591 rss_indir[i] = i % kinfo->rss_size;
3593 hdev->rss_cfg.rss_size = kinfo->rss_size;
3595 ret = hclgevf_set_rss(handle, rss_indir, NULL, 0);
3597 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
3604 dev_info(&hdev->pdev->dev,
3605 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
3606 cur_rss_size, kinfo->rss_size,
3607 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
3612 static int hclgevf_get_status(struct hnae3_handle *handle)
3614 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3616 return hdev->hw.mac.link;
3619 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle,
3620 u8 *auto_neg, u32 *speed,
3623 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3626 *speed = hdev->hw.mac.speed;
3628 *duplex = hdev->hw.mac.duplex;
3630 *auto_neg = AUTONEG_DISABLE;
3633 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
3636 hdev->hw.mac.speed = speed;
3637 hdev->hw.mac.duplex = duplex;
3640 static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable)
3642 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3643 bool gro_en_old = hdev->gro_en;
3646 hdev->gro_en = enable;
3647 ret = hclgevf_config_gro(hdev);
3649 hdev->gro_en = gro_en_old;
3654 static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type,
3657 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3660 *media_type = hdev->hw.mac.media_type;
3663 *module_type = hdev->hw.mac.module_type;
3666 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle)
3668 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3670 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
3673 static bool hclgevf_get_cmdq_stat(struct hnae3_handle *handle)
3675 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3677 return test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
3680 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle)
3682 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3684 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
3687 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle)
3689 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3691 return hdev->rst_stats.hw_rst_done_cnt;
3694 static void hclgevf_get_link_mode(struct hnae3_handle *handle,
3695 unsigned long *supported,
3696 unsigned long *advertising)
3698 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3700 *supported = hdev->hw.mac.supported;
3701 *advertising = hdev->hw.mac.advertising;
3704 #define MAX_SEPARATE_NUM 4
3705 #define SEPARATOR_VALUE 0xFDFCFBFA
3706 #define REG_NUM_PER_LINE 4
3707 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
3709 static int hclgevf_get_regs_len(struct hnae3_handle *handle)
3711 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
3712 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3714 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
3715 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
3716 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
3717 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
3719 return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps +
3720 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE;
3723 static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
3726 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3727 int i, j, reg_um, separator_num;
3730 *version = hdev->fw_version;
3732 /* fetching per-VF registers values from VF PCIe register space */
3733 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
3734 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
3735 for (i = 0; i < reg_um; i++)
3736 *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
3737 for (i = 0; i < separator_num; i++)
3738 *reg++ = SEPARATOR_VALUE;
3740 reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
3741 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
3742 for (i = 0; i < reg_um; i++)
3743 *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]);
3744 for (i = 0; i < separator_num; i++)
3745 *reg++ = SEPARATOR_VALUE;
3747 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
3748 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
3749 for (j = 0; j < hdev->num_tqps; j++) {
3750 for (i = 0; i < reg_um; i++)
3751 *reg++ = hclgevf_read_dev(&hdev->hw,
3752 ring_reg_addr_list[i] +
3754 for (i = 0; i < separator_num; i++)
3755 *reg++ = SEPARATOR_VALUE;
3758 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
3759 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
3760 for (j = 0; j < hdev->num_msi_used - 1; j++) {
3761 for (i = 0; i < reg_um; i++)
3762 *reg++ = hclgevf_read_dev(&hdev->hw,
3763 tqp_intr_reg_addr_list[i] +
3765 for (i = 0; i < separator_num; i++)
3766 *reg++ = SEPARATOR_VALUE;
3770 void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
3771 u8 *port_base_vlan_info, u8 data_size)
3773 struct hnae3_handle *nic = &hdev->nic;
3774 struct hclge_vf_to_pf_msg send_msg;
3779 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
3780 test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) {
3781 dev_warn(&hdev->pdev->dev,
3782 "is resetting when updating port based vlan info\n");
3787 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
3793 /* send msg to PF and wait update port based vlan info */
3794 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
3795 HCLGE_MBX_PORT_BASE_VLAN_CFG);
3796 memcpy(send_msg.data, port_base_vlan_info, data_size);
3797 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
3799 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
3800 nic->port_base_vlan_state = state;
3802 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
3805 hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
3809 static const struct hnae3_ae_ops hclgevf_ops = {
3810 .init_ae_dev = hclgevf_init_ae_dev,
3811 .uninit_ae_dev = hclgevf_uninit_ae_dev,
3812 .reset_prepare = hclgevf_reset_prepare_general,
3813 .reset_done = hclgevf_reset_done,
3814 .init_client_instance = hclgevf_init_client_instance,
3815 .uninit_client_instance = hclgevf_uninit_client_instance,
3816 .start = hclgevf_ae_start,
3817 .stop = hclgevf_ae_stop,
3818 .client_start = hclgevf_client_start,
3819 .client_stop = hclgevf_client_stop,
3820 .map_ring_to_vector = hclgevf_map_ring_to_vector,
3821 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
3822 .get_vector = hclgevf_get_vector,
3823 .put_vector = hclgevf_put_vector,
3824 .reset_queue = hclgevf_reset_tqp,
3825 .get_mac_addr = hclgevf_get_mac_addr,
3826 .set_mac_addr = hclgevf_set_mac_addr,
3827 .add_uc_addr = hclgevf_add_uc_addr,
3828 .rm_uc_addr = hclgevf_rm_uc_addr,
3829 .add_mc_addr = hclgevf_add_mc_addr,
3830 .rm_mc_addr = hclgevf_rm_mc_addr,
3831 .get_stats = hclgevf_get_stats,
3832 .update_stats = hclgevf_update_stats,
3833 .get_strings = hclgevf_get_strings,
3834 .get_sset_count = hclgevf_get_sset_count,
3835 .get_rss_key_size = hclgevf_get_rss_key_size,
3836 .get_rss = hclgevf_get_rss,
3837 .set_rss = hclgevf_set_rss,
3838 .get_rss_tuple = hclgevf_get_rss_tuple,
3839 .set_rss_tuple = hclgevf_set_rss_tuple,
3840 .get_tc_size = hclgevf_get_tc_size,
3841 .get_fw_version = hclgevf_get_fw_version,
3842 .set_vlan_filter = hclgevf_set_vlan_filter,
3843 .enable_vlan_filter = hclgevf_enable_vlan_filter,
3844 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag,
3845 .reset_event = hclgevf_reset_event,
3846 .set_default_reset_request = hclgevf_set_def_reset_request,
3847 .set_channels = hclgevf_set_channels,
3848 .get_channels = hclgevf_get_channels,
3849 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info,
3850 .get_regs_len = hclgevf_get_regs_len,
3851 .get_regs = hclgevf_get_regs,
3852 .get_status = hclgevf_get_status,
3853 .get_ksettings_an_result = hclgevf_get_ksettings_an_result,
3854 .get_media_type = hclgevf_get_media_type,
3855 .get_hw_reset_stat = hclgevf_get_hw_reset_stat,
3856 .ae_dev_resetting = hclgevf_ae_dev_resetting,
3857 .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt,
3858 .set_gro_en = hclgevf_gro_en,
3859 .set_mtu = hclgevf_set_mtu,
3860 .get_global_queue_id = hclgevf_get_qid_global,
3861 .set_timer_task = hclgevf_set_timer_task,
3862 .get_link_mode = hclgevf_get_link_mode,
3863 .set_promisc_mode = hclgevf_set_promisc_mode,
3864 .request_update_promisc_mode = hclgevf_request_update_promisc_mode,
3865 .get_cmdq_stat = hclgevf_get_cmdq_stat,
3868 static struct hnae3_ae_algo ae_algovf = {
3869 .ops = &hclgevf_ops,
3870 .pdev_id_table = ae_algovf_pci_tbl,
3873 static int hclgevf_init(void)
3875 pr_info("%s is initializing\n", HCLGEVF_NAME);
3877 hclgevf_wq = alloc_workqueue("%s", 0, 0, HCLGEVF_NAME);
3879 pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME);
3883 hnae3_register_ae_algo(&ae_algovf);
3888 static void hclgevf_exit(void)
3890 hnae3_unregister_ae_algo(&ae_algovf);
3891 destroy_workqueue(hclgevf_wq);
3893 module_init(hclgevf_init);
3894 module_exit(hclgevf_exit);
3896 MODULE_LICENSE("GPL");
3897 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
3898 MODULE_DESCRIPTION("HCLGEVF Driver");
3899 MODULE_VERSION(HCLGEVF_MOD_VERSION);