1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <net/rtnetlink.h>
16 #include "hclge_cmd.h"
17 #include "hclge_dcb.h"
18 #include "hclge_main.h"
19 #include "hclge_mbx.h"
20 #include "hclge_mdio.h"
22 #include "hclge_err.h"
25 #define HCLGE_NAME "hclge"
26 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
27 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
30 static int hclge_init_vlan_config(struct hclge_dev *hdev);
31 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
32 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
33 u16 *allocated_size, bool is_alloc);
35 static struct hnae3_ae_algo ae_algo;
37 static const struct pci_device_id ae_algo_pci_tbl[] = {
38 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
39 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
40 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
41 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
42 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
45 /* required last entry */
49 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
51 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
53 "Serdes serial Loopback test",
54 "Serdes parallel Loopback test",
58 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
59 {"mac_tx_mac_pause_num",
60 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
61 {"mac_rx_mac_pause_num",
62 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
63 {"mac_tx_pfc_pri0_pkt_num",
64 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
65 {"mac_tx_pfc_pri1_pkt_num",
66 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
67 {"mac_tx_pfc_pri2_pkt_num",
68 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
69 {"mac_tx_pfc_pri3_pkt_num",
70 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
71 {"mac_tx_pfc_pri4_pkt_num",
72 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
73 {"mac_tx_pfc_pri5_pkt_num",
74 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
75 {"mac_tx_pfc_pri6_pkt_num",
76 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
77 {"mac_tx_pfc_pri7_pkt_num",
78 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
79 {"mac_rx_pfc_pri0_pkt_num",
80 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
81 {"mac_rx_pfc_pri1_pkt_num",
82 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
83 {"mac_rx_pfc_pri2_pkt_num",
84 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
85 {"mac_rx_pfc_pri3_pkt_num",
86 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
87 {"mac_rx_pfc_pri4_pkt_num",
88 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
89 {"mac_rx_pfc_pri5_pkt_num",
90 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
91 {"mac_rx_pfc_pri6_pkt_num",
92 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
93 {"mac_rx_pfc_pri7_pkt_num",
94 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
95 {"mac_tx_total_pkt_num",
96 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
97 {"mac_tx_total_oct_num",
98 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
99 {"mac_tx_good_pkt_num",
100 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
101 {"mac_tx_bad_pkt_num",
102 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
103 {"mac_tx_good_oct_num",
104 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
105 {"mac_tx_bad_oct_num",
106 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
107 {"mac_tx_uni_pkt_num",
108 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
109 {"mac_tx_multi_pkt_num",
110 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
111 {"mac_tx_broad_pkt_num",
112 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
113 {"mac_tx_undersize_pkt_num",
114 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
115 {"mac_tx_oversize_pkt_num",
116 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
117 {"mac_tx_64_oct_pkt_num",
118 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
119 {"mac_tx_65_127_oct_pkt_num",
120 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
121 {"mac_tx_128_255_oct_pkt_num",
122 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
123 {"mac_tx_256_511_oct_pkt_num",
124 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
125 {"mac_tx_512_1023_oct_pkt_num",
126 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
127 {"mac_tx_1024_1518_oct_pkt_num",
128 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
129 {"mac_tx_1519_2047_oct_pkt_num",
130 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
131 {"mac_tx_2048_4095_oct_pkt_num",
132 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
133 {"mac_tx_4096_8191_oct_pkt_num",
134 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
135 {"mac_tx_8192_9216_oct_pkt_num",
136 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
137 {"mac_tx_9217_12287_oct_pkt_num",
138 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
139 {"mac_tx_12288_16383_oct_pkt_num",
140 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
141 {"mac_tx_1519_max_good_pkt_num",
142 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
143 {"mac_tx_1519_max_bad_pkt_num",
144 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
145 {"mac_rx_total_pkt_num",
146 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
147 {"mac_rx_total_oct_num",
148 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
149 {"mac_rx_good_pkt_num",
150 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
151 {"mac_rx_bad_pkt_num",
152 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
153 {"mac_rx_good_oct_num",
154 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
155 {"mac_rx_bad_oct_num",
156 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
157 {"mac_rx_uni_pkt_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
159 {"mac_rx_multi_pkt_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
161 {"mac_rx_broad_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
163 {"mac_rx_undersize_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
165 {"mac_rx_oversize_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
167 {"mac_rx_64_oct_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
169 {"mac_rx_65_127_oct_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
171 {"mac_rx_128_255_oct_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
173 {"mac_rx_256_511_oct_pkt_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
175 {"mac_rx_512_1023_oct_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
177 {"mac_rx_1024_1518_oct_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
179 {"mac_rx_1519_2047_oct_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
181 {"mac_rx_2048_4095_oct_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
183 {"mac_rx_4096_8191_oct_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
185 {"mac_rx_8192_9216_oct_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
187 {"mac_rx_9217_12287_oct_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
189 {"mac_rx_12288_16383_oct_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
191 {"mac_rx_1519_max_good_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
193 {"mac_rx_1519_max_bad_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
196 {"mac_tx_fragment_pkt_num",
197 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
198 {"mac_tx_undermin_pkt_num",
199 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
200 {"mac_tx_jabber_pkt_num",
201 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
202 {"mac_tx_err_all_pkt_num",
203 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
204 {"mac_tx_from_app_good_pkt_num",
205 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
206 {"mac_tx_from_app_bad_pkt_num",
207 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
208 {"mac_rx_fragment_pkt_num",
209 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
210 {"mac_rx_undermin_pkt_num",
211 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
212 {"mac_rx_jabber_pkt_num",
213 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
214 {"mac_rx_fcs_err_pkt_num",
215 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
216 {"mac_rx_send_app_good_pkt_num",
217 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
218 {"mac_rx_send_app_bad_pkt_num",
219 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
222 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
224 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
225 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
226 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
227 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
228 .i_port_bitmap = 0x1,
232 static int hclge_mac_update_stats(struct hclge_dev *hdev)
234 #define HCLGE_MAC_CMD_NUM 21
235 #define HCLGE_RTN_DATA_NUM 4
237 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
238 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
243 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
244 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
246 dev_err(&hdev->pdev->dev,
247 "Get MAC pkt stats fail, status = %d.\n", ret);
252 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
253 if (unlikely(i == 0)) {
254 desc_data = (__le64 *)(&desc[i].data[0]);
255 n = HCLGE_RTN_DATA_NUM - 2;
257 desc_data = (__le64 *)(&desc[i]);
258 n = HCLGE_RTN_DATA_NUM;
260 for (k = 0; k < n; k++) {
261 *data++ += le64_to_cpu(*desc_data);
269 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
271 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
272 struct hclge_vport *vport = hclge_get_vport(handle);
273 struct hclge_dev *hdev = vport->back;
274 struct hnae3_queue *queue;
275 struct hclge_desc desc[1];
276 struct hclge_tqp *tqp;
279 for (i = 0; i < kinfo->num_tqps; i++) {
280 queue = handle->kinfo.tqp[i];
281 tqp = container_of(queue, struct hclge_tqp, q);
282 /* command : HCLGE_OPC_QUERY_IGU_STAT */
283 hclge_cmd_setup_basic_desc(&desc[0],
284 HCLGE_OPC_QUERY_RX_STATUS,
287 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
288 ret = hclge_cmd_send(&hdev->hw, desc, 1);
290 dev_err(&hdev->pdev->dev,
291 "Query tqp stat fail, status = %d,queue = %d\n",
295 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
296 le32_to_cpu(desc[0].data[1]);
299 for (i = 0; i < kinfo->num_tqps; i++) {
300 queue = handle->kinfo.tqp[i];
301 tqp = container_of(queue, struct hclge_tqp, q);
302 /* command : HCLGE_OPC_QUERY_IGU_STAT */
303 hclge_cmd_setup_basic_desc(&desc[0],
304 HCLGE_OPC_QUERY_TX_STATUS,
307 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
308 ret = hclge_cmd_send(&hdev->hw, desc, 1);
310 dev_err(&hdev->pdev->dev,
311 "Query tqp stat fail, status = %d,queue = %d\n",
315 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
316 le32_to_cpu(desc[0].data[1]);
322 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
324 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
325 struct hclge_tqp *tqp;
329 for (i = 0; i < kinfo->num_tqps; i++) {
330 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
331 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
334 for (i = 0; i < kinfo->num_tqps; i++) {
335 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
336 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
342 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
344 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
346 return kinfo->num_tqps * (2);
349 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
351 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
355 for (i = 0; i < kinfo->num_tqps; i++) {
356 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
357 struct hclge_tqp, q);
358 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
360 buff = buff + ETH_GSTRING_LEN;
363 for (i = 0; i < kinfo->num_tqps; i++) {
364 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
365 struct hclge_tqp, q);
366 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
368 buff = buff + ETH_GSTRING_LEN;
374 static u64 *hclge_comm_get_stats(void *comm_stats,
375 const struct hclge_comm_stats_str strs[],
381 for (i = 0; i < size; i++)
382 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
387 static u8 *hclge_comm_get_strings(u32 stringset,
388 const struct hclge_comm_stats_str strs[],
391 char *buff = (char *)data;
394 if (stringset != ETH_SS_STATS)
397 for (i = 0; i < size; i++) {
398 snprintf(buff, ETH_GSTRING_LEN,
400 buff = buff + ETH_GSTRING_LEN;
406 static void hclge_update_netstat(struct hclge_hw_stats *hw_stats,
407 struct net_device_stats *net_stats)
409 net_stats->tx_dropped = 0;
410 net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num;
411 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num;
412 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
414 net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num;
415 net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num;
417 net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
418 net_stats->rx_length_errors =
419 hw_stats->mac_stats.mac_rx_undersize_pkt_num;
420 net_stats->rx_length_errors +=
421 hw_stats->mac_stats.mac_rx_oversize_pkt_num;
422 net_stats->rx_over_errors =
423 hw_stats->mac_stats.mac_rx_oversize_pkt_num;
426 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
428 struct hnae3_handle *handle;
431 handle = &hdev->vport[0].nic;
432 if (handle->client) {
433 status = hclge_tqps_update_stats(handle);
435 dev_err(&hdev->pdev->dev,
436 "Update TQPS stats fail, status = %d.\n",
441 status = hclge_mac_update_stats(hdev);
443 dev_err(&hdev->pdev->dev,
444 "Update MAC stats fail, status = %d.\n", status);
446 hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats);
449 static void hclge_update_stats(struct hnae3_handle *handle,
450 struct net_device_stats *net_stats)
452 struct hclge_vport *vport = hclge_get_vport(handle);
453 struct hclge_dev *hdev = vport->back;
454 struct hclge_hw_stats *hw_stats = &hdev->hw_stats;
457 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
460 status = hclge_mac_update_stats(hdev);
462 dev_err(&hdev->pdev->dev,
463 "Update MAC stats fail, status = %d.\n",
466 status = hclge_tqps_update_stats(handle);
468 dev_err(&hdev->pdev->dev,
469 "Update TQPS stats fail, status = %d.\n",
472 hclge_update_netstat(hw_stats, net_stats);
474 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
477 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
479 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
480 HNAE3_SUPPORT_PHY_LOOPBACK |\
481 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
482 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
484 struct hclge_vport *vport = hclge_get_vport(handle);
485 struct hclge_dev *hdev = vport->back;
488 /* Loopback test support rules:
489 * mac: only GE mode support
490 * serdes: all mac mode will support include GE/XGE/LGE/CGE
491 * phy: only support when phy device exist on board
493 if (stringset == ETH_SS_TEST) {
494 /* clear loopback bit flags at first */
495 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
496 if (hdev->pdev->revision >= 0x21 ||
497 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
498 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
499 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
501 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
505 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
506 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
507 } else if (stringset == ETH_SS_STATS) {
508 count = ARRAY_SIZE(g_mac_stats_string) +
509 hclge_tqps_get_sset_count(handle, stringset);
515 static void hclge_get_strings(struct hnae3_handle *handle,
519 u8 *p = (char *)data;
522 if (stringset == ETH_SS_STATS) {
523 size = ARRAY_SIZE(g_mac_stats_string);
524 p = hclge_comm_get_strings(stringset,
528 p = hclge_tqps_get_strings(handle, p);
529 } else if (stringset == ETH_SS_TEST) {
530 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
532 hns3_nic_test_strs[HNAE3_LOOP_APP],
534 p += ETH_GSTRING_LEN;
536 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
538 hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
540 p += ETH_GSTRING_LEN;
542 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
544 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
546 p += ETH_GSTRING_LEN;
548 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
550 hns3_nic_test_strs[HNAE3_LOOP_PHY],
552 p += ETH_GSTRING_LEN;
557 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
559 struct hclge_vport *vport = hclge_get_vport(handle);
560 struct hclge_dev *hdev = vport->back;
563 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
565 ARRAY_SIZE(g_mac_stats_string),
567 p = hclge_tqps_get_stats(handle, p);
570 static int hclge_parse_func_status(struct hclge_dev *hdev,
571 struct hclge_func_status_cmd *status)
573 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
576 /* Set the pf to main pf */
577 if (status->pf_state & HCLGE_PF_STATE_MAIN)
578 hdev->flag |= HCLGE_FLAG_MAIN;
580 hdev->flag &= ~HCLGE_FLAG_MAIN;
585 static int hclge_query_function_status(struct hclge_dev *hdev)
587 struct hclge_func_status_cmd *req;
588 struct hclge_desc desc;
592 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
593 req = (struct hclge_func_status_cmd *)desc.data;
596 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
598 dev_err(&hdev->pdev->dev,
599 "query function status failed %d.\n",
605 /* Check pf reset is done */
608 usleep_range(1000, 2000);
609 } while (timeout++ < 5);
611 ret = hclge_parse_func_status(hdev, req);
616 static int hclge_query_pf_resource(struct hclge_dev *hdev)
618 struct hclge_pf_res_cmd *req;
619 struct hclge_desc desc;
622 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
623 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
625 dev_err(&hdev->pdev->dev,
626 "query pf resource failed %d.\n", ret);
630 req = (struct hclge_pf_res_cmd *)desc.data;
631 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
632 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
634 if (hnae3_dev_roce_supported(hdev)) {
635 hdev->roce_base_msix_offset =
636 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
637 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
639 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
640 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
642 /* PF should have NIC vectors and Roce vectors,
643 * NIC vectors are queued before Roce vectors.
645 hdev->num_msi = hdev->num_roce_msi +
646 hdev->roce_base_msix_offset;
649 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
650 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
656 static int hclge_parse_speed(int speed_cmd, int *speed)
660 *speed = HCLGE_MAC_SPEED_10M;
663 *speed = HCLGE_MAC_SPEED_100M;
666 *speed = HCLGE_MAC_SPEED_1G;
669 *speed = HCLGE_MAC_SPEED_10G;
672 *speed = HCLGE_MAC_SPEED_25G;
675 *speed = HCLGE_MAC_SPEED_40G;
678 *speed = HCLGE_MAC_SPEED_50G;
681 *speed = HCLGE_MAC_SPEED_100G;
690 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
693 unsigned long *supported = hdev->hw.mac.supported;
695 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
696 set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
699 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
700 set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
703 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
704 set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
707 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
708 set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
711 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
712 set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
715 set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
716 set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
719 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
721 u8 media_type = hdev->hw.mac.media_type;
723 if (media_type != HNAE3_MEDIA_TYPE_FIBER)
726 hclge_parse_fiber_link_mode(hdev, speed_ability);
729 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
731 struct hclge_cfg_param_cmd *req;
732 u64 mac_addr_tmp_high;
736 req = (struct hclge_cfg_param_cmd *)desc[0].data;
738 /* get the configuration */
739 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
742 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
743 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
744 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
745 HCLGE_CFG_TQP_DESC_N_M,
746 HCLGE_CFG_TQP_DESC_N_S);
748 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
749 HCLGE_CFG_PHY_ADDR_M,
750 HCLGE_CFG_PHY_ADDR_S);
751 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
752 HCLGE_CFG_MEDIA_TP_M,
753 HCLGE_CFG_MEDIA_TP_S);
754 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
755 HCLGE_CFG_RX_BUF_LEN_M,
756 HCLGE_CFG_RX_BUF_LEN_S);
757 /* get mac_address */
758 mac_addr_tmp = __le32_to_cpu(req->param[2]);
759 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
760 HCLGE_CFG_MAC_ADDR_H_M,
761 HCLGE_CFG_MAC_ADDR_H_S);
763 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
765 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
766 HCLGE_CFG_DEFAULT_SPEED_M,
767 HCLGE_CFG_DEFAULT_SPEED_S);
768 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
769 HCLGE_CFG_RSS_SIZE_M,
770 HCLGE_CFG_RSS_SIZE_S);
772 for (i = 0; i < ETH_ALEN; i++)
773 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
775 req = (struct hclge_cfg_param_cmd *)desc[1].data;
776 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
778 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
779 HCLGE_CFG_SPEED_ABILITY_M,
780 HCLGE_CFG_SPEED_ABILITY_S);
781 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
782 HCLGE_CFG_UMV_TBL_SPACE_M,
783 HCLGE_CFG_UMV_TBL_SPACE_S);
785 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
788 /* hclge_get_cfg: query the static parameter from flash
789 * @hdev: pointer to struct hclge_dev
790 * @hcfg: the config structure to be getted
792 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
794 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
795 struct hclge_cfg_param_cmd *req;
798 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
801 req = (struct hclge_cfg_param_cmd *)desc[i].data;
802 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
804 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
805 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
806 /* Len should be united by 4 bytes when send to hardware */
807 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
808 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
809 req->offset = cpu_to_le32(offset);
812 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
814 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
818 hclge_parse_cfg(hcfg, desc);
823 static int hclge_get_cap(struct hclge_dev *hdev)
827 ret = hclge_query_function_status(hdev);
829 dev_err(&hdev->pdev->dev,
830 "query function status error %d.\n", ret);
834 /* get pf resource */
835 ret = hclge_query_pf_resource(hdev);
837 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
842 static int hclge_configure(struct hclge_dev *hdev)
844 struct hclge_cfg cfg;
847 ret = hclge_get_cfg(hdev, &cfg);
849 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
853 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
854 hdev->base_tqp_pid = 0;
855 hdev->rss_size_max = cfg.rss_size_max;
856 hdev->rx_buf_len = cfg.rx_buf_len;
857 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
858 hdev->hw.mac.media_type = cfg.media_type;
859 hdev->hw.mac.phy_addr = cfg.phy_addr;
860 hdev->num_desc = cfg.tqp_desc_num;
861 hdev->tm_info.num_pg = 1;
862 hdev->tc_max = cfg.tc_num;
863 hdev->tm_info.hw_pfc_map = 0;
864 hdev->wanted_umv_size = cfg.umv_space;
866 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
868 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
872 hclge_parse_link_mode(hdev, cfg.speed_ability);
874 if ((hdev->tc_max > HNAE3_MAX_TC) ||
875 (hdev->tc_max < 1)) {
876 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
881 /* Dev does not support DCB */
882 if (!hnae3_dev_dcb_supported(hdev)) {
886 hdev->pfc_max = hdev->tc_max;
889 hdev->tm_info.num_tc = hdev->tc_max;
891 /* Currently not support uncontiuous tc */
892 for (i = 0; i < hdev->tm_info.num_tc; i++)
893 hnae3_set_bit(hdev->hw_tc_map, i, 1);
895 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
900 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
903 struct hclge_cfg_tso_status_cmd *req;
904 struct hclge_desc desc;
907 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
909 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
912 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
913 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
914 req->tso_mss_min = cpu_to_le16(tso_mss);
917 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
918 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
919 req->tso_mss_max = cpu_to_le16(tso_mss);
921 return hclge_cmd_send(&hdev->hw, &desc, 1);
924 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
926 struct hclge_cfg_gro_status_cmd *req;
927 struct hclge_desc desc;
930 if (!hnae3_dev_gro_supported(hdev))
933 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
934 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
936 req->gro_en = cpu_to_le16(en ? 1 : 0);
938 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
940 dev_err(&hdev->pdev->dev,
941 "GRO hardware config cmd failed, ret = %d\n", ret);
946 static int hclge_alloc_tqps(struct hclge_dev *hdev)
948 struct hclge_tqp *tqp;
951 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
952 sizeof(struct hclge_tqp), GFP_KERNEL);
958 for (i = 0; i < hdev->num_tqps; i++) {
959 tqp->dev = &hdev->pdev->dev;
962 tqp->q.ae_algo = &ae_algo;
963 tqp->q.buf_size = hdev->rx_buf_len;
964 tqp->q.desc_num = hdev->num_desc;
965 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
966 i * HCLGE_TQP_REG_SIZE;
974 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
975 u16 tqp_pid, u16 tqp_vid, bool is_pf)
977 struct hclge_tqp_map_cmd *req;
978 struct hclge_desc desc;
981 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
983 req = (struct hclge_tqp_map_cmd *)desc.data;
984 req->tqp_id = cpu_to_le16(tqp_pid);
985 req->tqp_vf = func_id;
986 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
987 1 << HCLGE_TQP_MAP_EN_B;
988 req->tqp_vid = cpu_to_le16(tqp_vid);
990 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
992 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
997 static int hclge_assign_tqp(struct hclge_vport *vport)
999 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1000 struct hclge_dev *hdev = vport->back;
1003 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1004 alloced < kinfo->num_tqps; i++) {
1005 if (!hdev->htqp[i].alloced) {
1006 hdev->htqp[i].q.handle = &vport->nic;
1007 hdev->htqp[i].q.tqp_index = alloced;
1008 hdev->htqp[i].q.desc_num = kinfo->num_desc;
1009 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1010 hdev->htqp[i].alloced = true;
1014 vport->alloc_tqps = kinfo->num_tqps;
1019 static int hclge_knic_setup(struct hclge_vport *vport,
1020 u16 num_tqps, u16 num_desc)
1022 struct hnae3_handle *nic = &vport->nic;
1023 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1024 struct hclge_dev *hdev = vport->back;
1027 kinfo->num_desc = num_desc;
1028 kinfo->rx_buf_len = hdev->rx_buf_len;
1029 kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc);
1031 = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc);
1032 kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc;
1034 for (i = 0; i < HNAE3_MAX_TC; i++) {
1035 if (hdev->hw_tc_map & BIT(i)) {
1036 kinfo->tc_info[i].enable = true;
1037 kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
1038 kinfo->tc_info[i].tqp_count = kinfo->rss_size;
1039 kinfo->tc_info[i].tc = i;
1041 /* Set to default queue if TC is disable */
1042 kinfo->tc_info[i].enable = false;
1043 kinfo->tc_info[i].tqp_offset = 0;
1044 kinfo->tc_info[i].tqp_count = 1;
1045 kinfo->tc_info[i].tc = 0;
1049 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
1050 sizeof(struct hnae3_queue *), GFP_KERNEL);
1054 ret = hclge_assign_tqp(vport);
1056 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1061 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1062 struct hclge_vport *vport)
1064 struct hnae3_handle *nic = &vport->nic;
1065 struct hnae3_knic_private_info *kinfo;
1068 kinfo = &nic->kinfo;
1069 for (i = 0; i < kinfo->num_tqps; i++) {
1070 struct hclge_tqp *q =
1071 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1075 is_pf = !(vport->vport_id);
1076 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1085 static int hclge_map_tqp(struct hclge_dev *hdev)
1087 struct hclge_vport *vport = hdev->vport;
1090 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1091 for (i = 0; i < num_vport; i++) {
1094 ret = hclge_map_tqp_to_vport(hdev, vport);
1104 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1106 /* this would be initialized later */
1109 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1111 struct hnae3_handle *nic = &vport->nic;
1112 struct hclge_dev *hdev = vport->back;
1115 nic->pdev = hdev->pdev;
1116 nic->ae_algo = &ae_algo;
1117 nic->numa_node_mask = hdev->numa_node_mask;
1119 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1120 ret = hclge_knic_setup(vport, num_tqps, hdev->num_desc);
1122 dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1127 hclge_unic_setup(vport, num_tqps);
1133 static int hclge_alloc_vport(struct hclge_dev *hdev)
1135 struct pci_dev *pdev = hdev->pdev;
1136 struct hclge_vport *vport;
1142 /* We need to alloc a vport for main NIC of PF */
1143 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1145 if (hdev->num_tqps < num_vport) {
1146 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1147 hdev->num_tqps, num_vport);
1151 /* Alloc the same number of TQPs for every vport */
1152 tqp_per_vport = hdev->num_tqps / num_vport;
1153 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1155 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1160 hdev->vport = vport;
1161 hdev->num_alloc_vport = num_vport;
1163 if (IS_ENABLED(CONFIG_PCI_IOV))
1164 hdev->num_alloc_vfs = hdev->num_req_vfs;
1166 for (i = 0; i < num_vport; i++) {
1168 vport->vport_id = i;
1169 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1172 ret = hclge_vport_setup(vport, tqp_main_vport);
1174 ret = hclge_vport_setup(vport, tqp_per_vport);
1177 "vport setup failed for vport %d, %d\n",
1188 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1189 struct hclge_pkt_buf_alloc *buf_alloc)
1191 /* TX buffer size is unit by 128 byte */
1192 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1193 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1194 struct hclge_tx_buff_alloc_cmd *req;
1195 struct hclge_desc desc;
1199 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1201 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1202 for (i = 0; i < HCLGE_TC_NUM; i++) {
1203 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1205 req->tx_pkt_buff[i] =
1206 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1207 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1210 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1212 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1218 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1219 struct hclge_pkt_buf_alloc *buf_alloc)
1221 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1224 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1229 static int hclge_get_tc_num(struct hclge_dev *hdev)
1233 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1234 if (hdev->hw_tc_map & BIT(i))
1239 static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
1243 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1244 if (hdev->hw_tc_map & BIT(i) &&
1245 hdev->tm_info.hw_pfc_map & BIT(i))
1250 /* Get the number of pfc enabled TCs, which have private buffer */
1251 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1252 struct hclge_pkt_buf_alloc *buf_alloc)
1254 struct hclge_priv_buf *priv;
1257 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1258 priv = &buf_alloc->priv_buf[i];
1259 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1267 /* Get the number of pfc disabled TCs, which have private buffer */
1268 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1269 struct hclge_pkt_buf_alloc *buf_alloc)
1271 struct hclge_priv_buf *priv;
1274 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1275 priv = &buf_alloc->priv_buf[i];
1276 if (hdev->hw_tc_map & BIT(i) &&
1277 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1285 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1287 struct hclge_priv_buf *priv;
1291 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1292 priv = &buf_alloc->priv_buf[i];
1294 rx_priv += priv->buf_size;
1299 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1301 u32 i, total_tx_size = 0;
1303 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1304 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1306 return total_tx_size;
1309 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1310 struct hclge_pkt_buf_alloc *buf_alloc,
1313 u32 shared_buf_min, shared_buf_tc, shared_std;
1314 int tc_num, pfc_enable_num;
1319 tc_num = hclge_get_tc_num(hdev);
1320 pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
1322 if (hnae3_dev_dcb_supported(hdev))
1323 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV;
1325 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV;
1327 shared_buf_tc = pfc_enable_num * hdev->mps +
1328 (tc_num - pfc_enable_num) * hdev->mps / 2 +
1330 shared_std = max_t(u32, shared_buf_min, shared_buf_tc);
1332 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1333 if (rx_all <= rx_priv + shared_std)
1336 shared_buf = rx_all - rx_priv;
1337 buf_alloc->s_buf.buf_size = shared_buf;
1338 buf_alloc->s_buf.self.high = shared_buf;
1339 buf_alloc->s_buf.self.low = 2 * hdev->mps;
1341 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1342 if ((hdev->hw_tc_map & BIT(i)) &&
1343 (hdev->tm_info.hw_pfc_map & BIT(i))) {
1344 buf_alloc->s_buf.tc_thrd[i].low = hdev->mps;
1345 buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps;
1347 buf_alloc->s_buf.tc_thrd[i].low = 0;
1348 buf_alloc->s_buf.tc_thrd[i].high = hdev->mps;
1355 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1356 struct hclge_pkt_buf_alloc *buf_alloc)
1360 total_size = hdev->pkt_buf_size;
1362 /* alloc tx buffer for all enabled tc */
1363 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1364 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1366 if (total_size < HCLGE_DEFAULT_TX_BUF)
1369 if (hdev->hw_tc_map & BIT(i))
1370 priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
1372 priv->tx_buf_size = 0;
1374 total_size -= priv->tx_buf_size;
1380 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1381 * @hdev: pointer to struct hclge_dev
1382 * @buf_alloc: pointer to buffer calculation data
1383 * @return: 0: calculate sucessful, negative: fail
1385 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1386 struct hclge_pkt_buf_alloc *buf_alloc)
1388 #define HCLGE_BUF_SIZE_UNIT 128
1389 u32 rx_all = hdev->pkt_buf_size, aligned_mps;
1390 int no_pfc_priv_num, pfc_priv_num;
1391 struct hclge_priv_buf *priv;
1394 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1395 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1397 /* When DCB is not supported, rx private
1398 * buffer is not allocated.
1400 if (!hnae3_dev_dcb_supported(hdev)) {
1401 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1407 /* step 1, try to alloc private buffer for all enabled tc */
1408 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1409 priv = &buf_alloc->priv_buf[i];
1410 if (hdev->hw_tc_map & BIT(i)) {
1412 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1413 priv->wl.low = aligned_mps;
1414 priv->wl.high = priv->wl.low + aligned_mps;
1415 priv->buf_size = priv->wl.high +
1419 priv->wl.high = 2 * aligned_mps;
1420 priv->buf_size = priv->wl.high;
1430 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1433 /* step 2, try to decrease the buffer size of
1434 * no pfc TC's private buffer
1436 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1437 priv = &buf_alloc->priv_buf[i];
1444 if (!(hdev->hw_tc_map & BIT(i)))
1449 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1451 priv->wl.high = priv->wl.low + aligned_mps;
1452 priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV;
1455 priv->wl.high = aligned_mps;
1456 priv->buf_size = priv->wl.high;
1460 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1463 /* step 3, try to reduce the number of pfc disabled TCs,
1464 * which have private buffer
1466 /* get the total no pfc enable TC number, which have private buffer */
1467 no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1469 /* let the last to be cleared first */
1470 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1471 priv = &buf_alloc->priv_buf[i];
1473 if (hdev->hw_tc_map & BIT(i) &&
1474 !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1475 /* Clear the no pfc TC private buffer */
1483 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1484 no_pfc_priv_num == 0)
1488 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1491 /* step 4, try to reduce the number of pfc enabled TCs
1492 * which have private buffer.
1494 pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1496 /* let the last to be cleared first */
1497 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1498 priv = &buf_alloc->priv_buf[i];
1500 if (hdev->hw_tc_map & BIT(i) &&
1501 hdev->tm_info.hw_pfc_map & BIT(i)) {
1502 /* Reduce the number of pfc TC with private buffer */
1510 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1514 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1520 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1521 struct hclge_pkt_buf_alloc *buf_alloc)
1523 struct hclge_rx_priv_buff_cmd *req;
1524 struct hclge_desc desc;
1528 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1529 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1531 /* Alloc private buffer TCs */
1532 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1533 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1536 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1538 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1542 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1543 (1 << HCLGE_TC0_PRI_BUF_EN_B));
1545 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1547 dev_err(&hdev->pdev->dev,
1548 "rx private buffer alloc cmd failed %d\n", ret);
1553 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1554 struct hclge_pkt_buf_alloc *buf_alloc)
1556 struct hclge_rx_priv_wl_buf *req;
1557 struct hclge_priv_buf *priv;
1558 struct hclge_desc desc[2];
1562 for (i = 0; i < 2; i++) {
1563 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1565 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1567 /* The first descriptor set the NEXT bit to 1 */
1569 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1571 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1573 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1574 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1576 priv = &buf_alloc->priv_buf[idx];
1577 req->tc_wl[j].high =
1578 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1579 req->tc_wl[j].high |=
1580 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1582 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1583 req->tc_wl[j].low |=
1584 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1588 /* Send 2 descriptor at one time */
1589 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1591 dev_err(&hdev->pdev->dev,
1592 "rx private waterline config cmd failed %d\n",
1597 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1598 struct hclge_pkt_buf_alloc *buf_alloc)
1600 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1601 struct hclge_rx_com_thrd *req;
1602 struct hclge_desc desc[2];
1603 struct hclge_tc_thrd *tc;
1607 for (i = 0; i < 2; i++) {
1608 hclge_cmd_setup_basic_desc(&desc[i],
1609 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1610 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1612 /* The first descriptor set the NEXT bit to 1 */
1614 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1616 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1618 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1619 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1621 req->com_thrd[j].high =
1622 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1623 req->com_thrd[j].high |=
1624 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1625 req->com_thrd[j].low =
1626 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1627 req->com_thrd[j].low |=
1628 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1632 /* Send 2 descriptors at one time */
1633 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1635 dev_err(&hdev->pdev->dev,
1636 "common threshold config cmd failed %d\n", ret);
1640 static int hclge_common_wl_config(struct hclge_dev *hdev,
1641 struct hclge_pkt_buf_alloc *buf_alloc)
1643 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1644 struct hclge_rx_com_wl *req;
1645 struct hclge_desc desc;
1648 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1650 req = (struct hclge_rx_com_wl *)desc.data;
1651 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1652 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1654 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
1655 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1657 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1659 dev_err(&hdev->pdev->dev,
1660 "common waterline config cmd failed %d\n", ret);
1665 int hclge_buffer_alloc(struct hclge_dev *hdev)
1667 struct hclge_pkt_buf_alloc *pkt_buf;
1670 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
1674 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
1676 dev_err(&hdev->pdev->dev,
1677 "could not calc tx buffer size for all TCs %d\n", ret);
1681 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
1683 dev_err(&hdev->pdev->dev,
1684 "could not alloc tx buffers %d\n", ret);
1688 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
1690 dev_err(&hdev->pdev->dev,
1691 "could not calc rx priv buffer size for all TCs %d\n",
1696 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
1698 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
1703 if (hnae3_dev_dcb_supported(hdev)) {
1704 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
1706 dev_err(&hdev->pdev->dev,
1707 "could not configure rx private waterline %d\n",
1712 ret = hclge_common_thrd_config(hdev, pkt_buf);
1714 dev_err(&hdev->pdev->dev,
1715 "could not configure common threshold %d\n",
1721 ret = hclge_common_wl_config(hdev, pkt_buf);
1723 dev_err(&hdev->pdev->dev,
1724 "could not configure common waterline %d\n", ret);
1731 static int hclge_init_roce_base_info(struct hclge_vport *vport)
1733 struct hnae3_handle *roce = &vport->roce;
1734 struct hnae3_handle *nic = &vport->nic;
1736 roce->rinfo.num_vectors = vport->back->num_roce_msi;
1738 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
1739 vport->back->num_msi_left == 0)
1742 roce->rinfo.base_vector = vport->back->roce_base_vector;
1744 roce->rinfo.netdev = nic->kinfo.netdev;
1745 roce->rinfo.roce_io_base = vport->back->hw.io_base;
1747 roce->pdev = nic->pdev;
1748 roce->ae_algo = nic->ae_algo;
1749 roce->numa_node_mask = nic->numa_node_mask;
1754 static int hclge_init_msi(struct hclge_dev *hdev)
1756 struct pci_dev *pdev = hdev->pdev;
1760 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1761 PCI_IRQ_MSI | PCI_IRQ_MSIX);
1764 "failed(%d) to allocate MSI/MSI-X vectors\n",
1768 if (vectors < hdev->num_msi)
1769 dev_warn(&hdev->pdev->dev,
1770 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1771 hdev->num_msi, vectors);
1773 hdev->num_msi = vectors;
1774 hdev->num_msi_left = vectors;
1775 hdev->base_msi_vector = pdev->irq;
1776 hdev->roce_base_vector = hdev->base_msi_vector +
1777 hdev->roce_base_msix_offset;
1779 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1780 sizeof(u16), GFP_KERNEL);
1781 if (!hdev->vector_status) {
1782 pci_free_irq_vectors(pdev);
1786 for (i = 0; i < hdev->num_msi; i++)
1787 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
1789 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
1790 sizeof(int), GFP_KERNEL);
1791 if (!hdev->vector_irq) {
1792 pci_free_irq_vectors(pdev);
1799 static u8 hclge_check_speed_dup(u8 duplex, int speed)
1802 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
1803 duplex = HCLGE_MAC_FULL;
1808 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
1811 struct hclge_config_mac_speed_dup_cmd *req;
1812 struct hclge_desc desc;
1815 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
1817 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
1819 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
1822 case HCLGE_MAC_SPEED_10M:
1823 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1824 HCLGE_CFG_SPEED_S, 6);
1826 case HCLGE_MAC_SPEED_100M:
1827 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1828 HCLGE_CFG_SPEED_S, 7);
1830 case HCLGE_MAC_SPEED_1G:
1831 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1832 HCLGE_CFG_SPEED_S, 0);
1834 case HCLGE_MAC_SPEED_10G:
1835 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1836 HCLGE_CFG_SPEED_S, 1);
1838 case HCLGE_MAC_SPEED_25G:
1839 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1840 HCLGE_CFG_SPEED_S, 2);
1842 case HCLGE_MAC_SPEED_40G:
1843 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1844 HCLGE_CFG_SPEED_S, 3);
1846 case HCLGE_MAC_SPEED_50G:
1847 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1848 HCLGE_CFG_SPEED_S, 4);
1850 case HCLGE_MAC_SPEED_100G:
1851 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1852 HCLGE_CFG_SPEED_S, 5);
1855 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
1859 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
1862 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1864 dev_err(&hdev->pdev->dev,
1865 "mac speed/duplex config cmd failed %d.\n", ret);
1872 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
1876 duplex = hclge_check_speed_dup(duplex, speed);
1877 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
1880 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
1884 hdev->hw.mac.speed = speed;
1885 hdev->hw.mac.duplex = duplex;
1890 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
1893 struct hclge_vport *vport = hclge_get_vport(handle);
1894 struct hclge_dev *hdev = vport->back;
1896 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
1899 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
1901 struct hclge_config_auto_neg_cmd *req;
1902 struct hclge_desc desc;
1906 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
1908 req = (struct hclge_config_auto_neg_cmd *)desc.data;
1909 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
1910 req->cfg_an_cmd_flag = cpu_to_le32(flag);
1912 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1914 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
1920 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
1922 struct hclge_vport *vport = hclge_get_vport(handle);
1923 struct hclge_dev *hdev = vport->back;
1925 return hclge_set_autoneg_en(hdev, enable);
1928 static int hclge_get_autoneg(struct hnae3_handle *handle)
1930 struct hclge_vport *vport = hclge_get_vport(handle);
1931 struct hclge_dev *hdev = vport->back;
1932 struct phy_device *phydev = hdev->hw.mac.phydev;
1935 return phydev->autoneg;
1937 return hdev->hw.mac.autoneg;
1940 static int hclge_mac_init(struct hclge_dev *hdev)
1942 struct hclge_mac *mac = &hdev->hw.mac;
1945 hdev->support_sfp_query = true;
1946 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
1947 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
1948 hdev->hw.mac.duplex);
1950 dev_err(&hdev->pdev->dev,
1951 "Config mac speed dup fail ret=%d\n", ret);
1957 ret = hclge_set_mac_mtu(hdev, hdev->mps);
1959 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
1963 ret = hclge_buffer_alloc(hdev);
1965 dev_err(&hdev->pdev->dev,
1966 "allocate buffer fail, ret=%d\n", ret);
1971 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
1973 if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
1974 schedule_work(&hdev->mbx_service_task);
1977 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
1979 if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
1980 schedule_work(&hdev->rst_service_task);
1983 static void hclge_task_schedule(struct hclge_dev *hdev)
1985 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
1986 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
1987 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
1988 (void)schedule_work(&hdev->service_task);
1991 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
1993 struct hclge_link_status_cmd *req;
1994 struct hclge_desc desc;
1998 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
1999 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2001 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2006 req = (struct hclge_link_status_cmd *)desc.data;
2007 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2009 return !!link_status;
2012 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2017 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2020 mac_state = hclge_get_mac_link_status(hdev);
2022 if (hdev->hw.mac.phydev) {
2023 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2024 link_stat = mac_state &
2025 hdev->hw.mac.phydev->link;
2030 link_stat = mac_state;
2036 static void hclge_update_link_status(struct hclge_dev *hdev)
2038 struct hnae3_client *client = hdev->nic_client;
2039 struct hnae3_handle *handle;
2045 state = hclge_get_mac_phy_link(hdev);
2046 if (state != hdev->hw.mac.link) {
2047 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2048 handle = &hdev->vport[i].nic;
2049 client->ops->link_status_change(handle, state);
2051 hdev->hw.mac.link = state;
2055 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2057 struct hclge_sfp_speed_cmd *resp = NULL;
2058 struct hclge_desc desc;
2061 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SFP_GET_SPEED, true);
2062 resp = (struct hclge_sfp_speed_cmd *)desc.data;
2063 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2064 if (ret == -EOPNOTSUPP) {
2065 dev_warn(&hdev->pdev->dev,
2066 "IMP do not support get SFP speed %d\n", ret);
2069 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2073 *speed = resp->sfp_speed;
2078 static int hclge_update_speed_duplex(struct hclge_dev *hdev)
2080 struct hclge_mac mac = hdev->hw.mac;
2084 /* get the speed from SFP cmd when phy
2090 /* if IMP does not support get SFP/qSFP speed, return directly */
2091 if (!hdev->support_sfp_query)
2094 ret = hclge_get_sfp_speed(hdev, &speed);
2095 if (ret == -EOPNOTSUPP) {
2096 hdev->support_sfp_query = false;
2102 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2103 return 0; /* do nothing if no SFP */
2105 /* must config full duplex for SFP */
2106 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2109 static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
2111 struct hclge_vport *vport = hclge_get_vport(handle);
2112 struct hclge_dev *hdev = vport->back;
2114 return hclge_update_speed_duplex(hdev);
2117 static int hclge_get_status(struct hnae3_handle *handle)
2119 struct hclge_vport *vport = hclge_get_vport(handle);
2120 struct hclge_dev *hdev = vport->back;
2122 hclge_update_link_status(hdev);
2124 return hdev->hw.mac.link;
2127 static void hclge_service_timer(struct timer_list *t)
2129 struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2131 mod_timer(&hdev->service_timer, jiffies + HZ);
2132 hdev->hw_stats.stats_timer++;
2133 hclge_task_schedule(hdev);
2136 static void hclge_service_complete(struct hclge_dev *hdev)
2138 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2140 /* Flush memory before next watchdog */
2141 smp_mb__before_atomic();
2142 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2145 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2150 /* fetch the events from their corresponding regs */
2151 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2152 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2154 /* Assumption: If by any chance reset and mailbox events are reported
2155 * together then we will only process reset event in this go and will
2156 * defer the processing of the mailbox events. Since, we would have not
2157 * cleared RX CMDQ event this time we would receive again another
2158 * interrupt from H/W just for the mailbox.
2161 /* check for vector0 reset event sources */
2162 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2163 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2164 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2165 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2166 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2167 return HCLGE_VECTOR0_EVENT_RST;
2170 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2171 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2172 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2173 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2174 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2175 return HCLGE_VECTOR0_EVENT_RST;
2178 if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2179 dev_info(&hdev->pdev->dev, "core reset interrupt\n");
2180 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2181 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2182 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2183 return HCLGE_VECTOR0_EVENT_RST;
2186 /* check for vector0 mailbox(=CMDQ RX) event source */
2187 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2188 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2189 *clearval = cmdq_src_reg;
2190 return HCLGE_VECTOR0_EVENT_MBX;
2193 return HCLGE_VECTOR0_EVENT_OTHER;
2196 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2199 switch (event_type) {
2200 case HCLGE_VECTOR0_EVENT_RST:
2201 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2203 case HCLGE_VECTOR0_EVENT_MBX:
2204 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2211 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2213 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2214 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2215 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2216 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2217 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2220 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2222 writel(enable ? 1 : 0, vector->addr);
2225 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2227 struct hclge_dev *hdev = data;
2231 hclge_enable_vector(&hdev->misc_vector, false);
2232 event_cause = hclge_check_event_cause(hdev, &clearval);
2234 /* vector 0 interrupt is shared with reset and mailbox source events.*/
2235 switch (event_cause) {
2236 case HCLGE_VECTOR0_EVENT_RST:
2237 hclge_reset_task_schedule(hdev);
2239 case HCLGE_VECTOR0_EVENT_MBX:
2240 /* If we are here then,
2241 * 1. Either we are not handling any mbx task and we are not
2244 * 2. We could be handling a mbx task but nothing more is
2246 * In both cases, we should schedule mbx task as there are more
2247 * mbx messages reported by this interrupt.
2249 hclge_mbx_task_schedule(hdev);
2252 dev_warn(&hdev->pdev->dev,
2253 "received unknown or unhandled event of vector0\n");
2257 /* clear the source of interrupt if it is not cause by reset */
2258 if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2259 hclge_clear_event_cause(hdev, event_cause, clearval);
2260 hclge_enable_vector(&hdev->misc_vector, true);
2266 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2268 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2269 dev_warn(&hdev->pdev->dev,
2270 "vector(vector_id %d) has been freed.\n", vector_id);
2274 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2275 hdev->num_msi_left += 1;
2276 hdev->num_msi_used -= 1;
2279 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2281 struct hclge_misc_vector *vector = &hdev->misc_vector;
2283 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2285 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2286 hdev->vector_status[0] = 0;
2288 hdev->num_msi_left -= 1;
2289 hdev->num_msi_used += 1;
2292 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2296 hclge_get_misc_vector(hdev);
2298 /* this would be explicitly freed in the end */
2299 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2300 0, "hclge_misc", hdev);
2302 hclge_free_vector(hdev, 0);
2303 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2304 hdev->misc_vector.vector_irq);
2310 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2312 free_irq(hdev->misc_vector.vector_irq, hdev);
2313 hclge_free_vector(hdev, 0);
2316 static int hclge_notify_client(struct hclge_dev *hdev,
2317 enum hnae3_reset_notify_type type)
2319 struct hnae3_client *client = hdev->nic_client;
2322 if (!client->ops->reset_notify)
2325 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2326 struct hnae3_handle *handle = &hdev->vport[i].nic;
2329 ret = client->ops->reset_notify(handle, type);
2331 dev_err(&hdev->pdev->dev,
2332 "notify nic client failed %d(%d)\n", type, ret);
2340 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2341 enum hnae3_reset_notify_type type)
2343 struct hnae3_client *client = hdev->roce_client;
2350 if (!client->ops->reset_notify)
2353 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2354 struct hnae3_handle *handle = &hdev->vport[i].roce;
2356 ret = client->ops->reset_notify(handle, type);
2358 dev_err(&hdev->pdev->dev,
2359 "notify roce client failed %d(%d)",
2368 static int hclge_reset_wait(struct hclge_dev *hdev)
2370 #define HCLGE_RESET_WATI_MS 100
2371 #define HCLGE_RESET_WAIT_CNT 200
2372 u32 val, reg, reg_bit;
2375 switch (hdev->reset_type) {
2376 case HNAE3_IMP_RESET:
2377 reg = HCLGE_GLOBAL_RESET_REG;
2378 reg_bit = HCLGE_IMP_RESET_BIT;
2380 case HNAE3_GLOBAL_RESET:
2381 reg = HCLGE_GLOBAL_RESET_REG;
2382 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2384 case HNAE3_CORE_RESET:
2385 reg = HCLGE_GLOBAL_RESET_REG;
2386 reg_bit = HCLGE_CORE_RESET_BIT;
2388 case HNAE3_FUNC_RESET:
2389 reg = HCLGE_FUN_RST_ING;
2390 reg_bit = HCLGE_FUN_RST_ING_B;
2392 case HNAE3_FLR_RESET:
2395 dev_err(&hdev->pdev->dev,
2396 "Wait for unsupported reset type: %d\n",
2401 if (hdev->reset_type == HNAE3_FLR_RESET) {
2402 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2403 cnt++ < HCLGE_RESET_WAIT_CNT)
2404 msleep(HCLGE_RESET_WATI_MS);
2406 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2407 dev_err(&hdev->pdev->dev,
2408 "flr wait timeout: %d\n", cnt);
2415 val = hclge_read_dev(&hdev->hw, reg);
2416 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2417 msleep(HCLGE_RESET_WATI_MS);
2418 val = hclge_read_dev(&hdev->hw, reg);
2422 if (cnt >= HCLGE_RESET_WAIT_CNT) {
2423 dev_warn(&hdev->pdev->dev,
2424 "Wait for reset timeout: %d\n", hdev->reset_type);
2431 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2433 struct hclge_vf_rst_cmd *req;
2434 struct hclge_desc desc;
2436 req = (struct hclge_vf_rst_cmd *)desc.data;
2437 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2438 req->dest_vfid = func_id;
2443 return hclge_cmd_send(&hdev->hw, &desc, 1);
2446 int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
2450 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
2451 struct hclge_vport *vport = &hdev->vport[i];
2454 /* Send cmd to set/clear VF's FUNC_RST_ING */
2455 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
2457 dev_err(&hdev->pdev->dev,
2458 "set vf(%d) rst failed %d!\n",
2459 vport->vport_id, ret);
2466 /* Inform VF to process the reset.
2467 * hclge_inform_reset_assert_to_vf may fail if VF
2468 * driver is not loaded.
2470 ret = hclge_inform_reset_assert_to_vf(vport);
2472 dev_warn(&hdev->pdev->dev,
2473 "inform reset to vf(%d) failed %d!\n",
2474 vport->vport_id, ret);
2480 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
2482 struct hclge_desc desc;
2483 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
2486 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
2487 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
2488 req->fun_reset_vfid = func_id;
2490 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2492 dev_err(&hdev->pdev->dev,
2493 "send function reset cmd fail, status =%d\n", ret);
2498 static void hclge_do_reset(struct hclge_dev *hdev)
2500 struct pci_dev *pdev = hdev->pdev;
2503 switch (hdev->reset_type) {
2504 case HNAE3_GLOBAL_RESET:
2505 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2506 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
2507 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2508 dev_info(&pdev->dev, "Global Reset requested\n");
2510 case HNAE3_CORE_RESET:
2511 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2512 hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
2513 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2514 dev_info(&pdev->dev, "Core Reset requested\n");
2516 case HNAE3_FUNC_RESET:
2517 dev_info(&pdev->dev, "PF Reset requested\n");
2518 /* schedule again to check later */
2519 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
2520 hclge_reset_task_schedule(hdev);
2522 case HNAE3_FLR_RESET:
2523 dev_info(&pdev->dev, "FLR requested\n");
2524 /* schedule again to check later */
2525 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
2526 hclge_reset_task_schedule(hdev);
2529 dev_warn(&pdev->dev,
2530 "Unsupported reset type: %d\n", hdev->reset_type);
2535 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
2536 unsigned long *addr)
2538 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
2540 /* return the highest priority reset level amongst all */
2541 if (test_bit(HNAE3_IMP_RESET, addr)) {
2542 rst_level = HNAE3_IMP_RESET;
2543 clear_bit(HNAE3_IMP_RESET, addr);
2544 clear_bit(HNAE3_GLOBAL_RESET, addr);
2545 clear_bit(HNAE3_CORE_RESET, addr);
2546 clear_bit(HNAE3_FUNC_RESET, addr);
2547 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
2548 rst_level = HNAE3_GLOBAL_RESET;
2549 clear_bit(HNAE3_GLOBAL_RESET, addr);
2550 clear_bit(HNAE3_CORE_RESET, addr);
2551 clear_bit(HNAE3_FUNC_RESET, addr);
2552 } else if (test_bit(HNAE3_CORE_RESET, addr)) {
2553 rst_level = HNAE3_CORE_RESET;
2554 clear_bit(HNAE3_CORE_RESET, addr);
2555 clear_bit(HNAE3_FUNC_RESET, addr);
2556 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
2557 rst_level = HNAE3_FUNC_RESET;
2558 clear_bit(HNAE3_FUNC_RESET, addr);
2559 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
2560 rst_level = HNAE3_FLR_RESET;
2561 clear_bit(HNAE3_FLR_RESET, addr);
2567 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
2571 switch (hdev->reset_type) {
2572 case HNAE3_IMP_RESET:
2573 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2575 case HNAE3_GLOBAL_RESET:
2576 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2578 case HNAE3_CORE_RESET:
2579 clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2588 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
2589 hclge_enable_vector(&hdev->misc_vector, true);
2592 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
2596 switch (hdev->reset_type) {
2597 case HNAE3_FUNC_RESET:
2599 case HNAE3_FLR_RESET:
2600 ret = hclge_set_all_vf_rst(hdev, true);
2609 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
2614 switch (hdev->reset_type) {
2615 case HNAE3_FUNC_RESET:
2616 /* There is no mechanism for PF to know if VF has stopped IO
2617 * for now, just wait 100 ms for VF to stop IO
2620 ret = hclge_func_reset_cmd(hdev, 0);
2622 dev_err(&hdev->pdev->dev,
2623 "asserting function reset fail %d!\n", ret);
2627 /* After performaning pf reset, it is not necessary to do the
2628 * mailbox handling or send any command to firmware, because
2629 * any mailbox handling or command to firmware is only valid
2630 * after hclge_cmd_init is called.
2632 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2634 case HNAE3_FLR_RESET:
2635 /* There is no mechanism for PF to know if VF has stopped IO
2636 * for now, just wait 100 ms for VF to stop IO
2639 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2640 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
2642 case HNAE3_IMP_RESET:
2643 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
2644 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
2645 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
2651 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
2656 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
2658 #define MAX_RESET_FAIL_CNT 5
2659 #define RESET_UPGRADE_DELAY_SEC 10
2661 if (hdev->reset_pending) {
2662 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
2663 hdev->reset_pending);
2665 } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
2666 (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
2667 BIT(HCLGE_IMP_RESET_BIT))) {
2668 dev_info(&hdev->pdev->dev,
2669 "reset failed because IMP Reset is pending\n");
2670 hclge_clear_reset_cause(hdev);
2672 } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
2673 hdev->reset_fail_cnt++;
2675 set_bit(hdev->reset_type, &hdev->reset_pending);
2676 dev_info(&hdev->pdev->dev,
2677 "re-schedule to wait for hw reset done\n");
2681 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
2682 hclge_clear_reset_cause(hdev);
2683 mod_timer(&hdev->reset_timer,
2684 jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
2689 hclge_clear_reset_cause(hdev);
2690 dev_err(&hdev->pdev->dev, "Reset fail!\n");
2694 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
2698 switch (hdev->reset_type) {
2699 case HNAE3_FUNC_RESET:
2701 case HNAE3_FLR_RESET:
2702 ret = hclge_set_all_vf_rst(hdev, false);
2711 static void hclge_reset(struct hclge_dev *hdev)
2713 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
2714 bool is_timeout = false;
2717 /* Initialize ae_dev reset status as well, in case enet layer wants to
2718 * know if device is undergoing reset
2720 ae_dev->reset_type = hdev->reset_type;
2721 hdev->reset_count++;
2722 hdev->last_reset_time = jiffies;
2723 /* perform reset of the stack & ae device for a client */
2724 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
2728 ret = hclge_reset_prepare_down(hdev);
2733 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2735 goto err_reset_lock;
2739 ret = hclge_reset_prepare_wait(hdev);
2743 if (hclge_reset_wait(hdev)) {
2748 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
2753 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
2755 goto err_reset_lock;
2757 ret = hclge_reset_ae_dev(hdev->ae_dev);
2759 goto err_reset_lock;
2761 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
2763 goto err_reset_lock;
2765 hclge_clear_reset_cause(hdev);
2767 ret = hclge_reset_prepare_up(hdev);
2769 goto err_reset_lock;
2771 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2773 goto err_reset_lock;
2777 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
2781 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
2790 if (hclge_reset_err_handle(hdev, is_timeout))
2791 hclge_reset_task_schedule(hdev);
2794 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
2796 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
2797 struct hclge_dev *hdev = ae_dev->priv;
2799 /* We might end up getting called broadly because of 2 below cases:
2800 * 1. Recoverable error was conveyed through APEI and only way to bring
2801 * normalcy is to reset.
2802 * 2. A new reset request from the stack due to timeout
2804 * For the first case,error event might not have ae handle available.
2805 * check if this is a new reset request and we are not here just because
2806 * last reset attempt did not succeed and watchdog hit us again. We will
2807 * know this if last reset request did not occur very recently (watchdog
2808 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
2809 * In case of new request we reset the "reset level" to PF reset.
2810 * And if it is a repeat reset request of the most recent one then we
2811 * want to make sure we throttle the reset request. Therefore, we will
2812 * not allow it again before 3*HZ times.
2815 handle = &hdev->vport[0].nic;
2817 if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
2819 else if (hdev->default_reset_request)
2821 hclge_get_reset_level(hdev,
2822 &hdev->default_reset_request);
2823 else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
2824 hdev->reset_level = HNAE3_FUNC_RESET;
2826 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
2829 /* request reset & schedule reset task */
2830 set_bit(hdev->reset_level, &hdev->reset_request);
2831 hclge_reset_task_schedule(hdev);
2833 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
2834 hdev->reset_level++;
2837 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
2838 enum hnae3_reset_type rst_type)
2840 struct hclge_dev *hdev = ae_dev->priv;
2842 set_bit(rst_type, &hdev->default_reset_request);
2845 static void hclge_reset_timer(struct timer_list *t)
2847 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
2849 dev_info(&hdev->pdev->dev,
2850 "triggering global reset in reset timer\n");
2851 set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
2852 hclge_reset_event(hdev->pdev, NULL);
2855 static void hclge_reset_subtask(struct hclge_dev *hdev)
2857 /* check if there is any ongoing reset in the hardware. This status can
2858 * be checked from reset_pending. If there is then, we need to wait for
2859 * hardware to complete reset.
2860 * a. If we are able to figure out in reasonable time that hardware
2861 * has fully resetted then, we can proceed with driver, client
2863 * b. else, we can come back later to check this status so re-sched
2866 hdev->last_reset_time = jiffies;
2867 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
2868 if (hdev->reset_type != HNAE3_NONE_RESET)
2871 /* check if we got any *new* reset requests to be honored */
2872 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
2873 if (hdev->reset_type != HNAE3_NONE_RESET)
2874 hclge_do_reset(hdev);
2876 hdev->reset_type = HNAE3_NONE_RESET;
2879 static void hclge_reset_service_task(struct work_struct *work)
2881 struct hclge_dev *hdev =
2882 container_of(work, struct hclge_dev, rst_service_task);
2884 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
2887 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
2889 hclge_reset_subtask(hdev);
2891 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
2894 static void hclge_mailbox_service_task(struct work_struct *work)
2896 struct hclge_dev *hdev =
2897 container_of(work, struct hclge_dev, mbx_service_task);
2899 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
2902 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
2904 hclge_mbx_handler(hdev);
2906 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
2909 static void hclge_update_vport_alive(struct hclge_dev *hdev)
2913 /* start from vport 1 for PF is always alive */
2914 for (i = 1; i < hdev->num_alloc_vport; i++) {
2915 struct hclge_vport *vport = &hdev->vport[i];
2917 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
2918 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
2920 /* If vf is not alive, set to default value */
2921 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
2922 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
2926 static void hclge_service_task(struct work_struct *work)
2928 struct hclge_dev *hdev =
2929 container_of(work, struct hclge_dev, service_task);
2931 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
2932 hclge_update_stats_for_all(hdev);
2933 hdev->hw_stats.stats_timer = 0;
2936 hclge_update_speed_duplex(hdev);
2937 hclge_update_link_status(hdev);
2938 hclge_update_vport_alive(hdev);
2939 hclge_service_complete(hdev);
2942 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
2944 /* VF handle has no client */
2945 if (!handle->client)
2946 return container_of(handle, struct hclge_vport, nic);
2947 else if (handle->client->type == HNAE3_CLIENT_ROCE)
2948 return container_of(handle, struct hclge_vport, roce);
2950 return container_of(handle, struct hclge_vport, nic);
2953 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
2954 struct hnae3_vector_info *vector_info)
2956 struct hclge_vport *vport = hclge_get_vport(handle);
2957 struct hnae3_vector_info *vector = vector_info;
2958 struct hclge_dev *hdev = vport->back;
2962 vector_num = min(hdev->num_msi_left, vector_num);
2964 for (j = 0; j < vector_num; j++) {
2965 for (i = 1; i < hdev->num_msi; i++) {
2966 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
2967 vector->vector = pci_irq_vector(hdev->pdev, i);
2968 vector->io_addr = hdev->hw.io_base +
2969 HCLGE_VECTOR_REG_BASE +
2970 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
2972 HCLGE_VECTOR_VF_OFFSET;
2973 hdev->vector_status[i] = vport->vport_id;
2974 hdev->vector_irq[i] = vector->vector;
2983 hdev->num_msi_left -= alloc;
2984 hdev->num_msi_used += alloc;
2989 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
2993 for (i = 0; i < hdev->num_msi; i++)
2994 if (vector == hdev->vector_irq[i])
3000 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3002 struct hclge_vport *vport = hclge_get_vport(handle);
3003 struct hclge_dev *hdev = vport->back;
3006 vector_id = hclge_get_vector_index(hdev, vector);
3007 if (vector_id < 0) {
3008 dev_err(&hdev->pdev->dev,
3009 "Get vector index fail. vector_id =%d\n", vector_id);
3013 hclge_free_vector(hdev, vector_id);
3018 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3020 return HCLGE_RSS_KEY_SIZE;
3023 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3025 return HCLGE_RSS_IND_TBL_SIZE;
3028 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3029 const u8 hfunc, const u8 *key)
3031 struct hclge_rss_config_cmd *req;
3032 struct hclge_desc desc;
3037 req = (struct hclge_rss_config_cmd *)desc.data;
3039 for (key_offset = 0; key_offset < 3; key_offset++) {
3040 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3043 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3044 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3046 if (key_offset == 2)
3048 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3050 key_size = HCLGE_RSS_HASH_KEY_NUM;
3052 memcpy(req->hash_key,
3053 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3055 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3057 dev_err(&hdev->pdev->dev,
3058 "Configure RSS config fail, status = %d\n",
3066 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3068 struct hclge_rss_indirection_table_cmd *req;
3069 struct hclge_desc desc;
3073 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3075 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3076 hclge_cmd_setup_basic_desc
3077 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3079 req->start_table_index =
3080 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3081 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3083 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3084 req->rss_result[j] =
3085 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3087 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3089 dev_err(&hdev->pdev->dev,
3090 "Configure rss indir table fail,status = %d\n",
3098 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3099 u16 *tc_size, u16 *tc_offset)
3101 struct hclge_rss_tc_mode_cmd *req;
3102 struct hclge_desc desc;
3106 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3107 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3109 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3112 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3113 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3114 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3115 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3116 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3118 req->rss_tc_mode[i] = cpu_to_le16(mode);
3121 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3123 dev_err(&hdev->pdev->dev,
3124 "Configure rss tc mode fail, status = %d\n", ret);
3129 static void hclge_get_rss_type(struct hclge_vport *vport)
3131 if (vport->rss_tuple_sets.ipv4_tcp_en ||
3132 vport->rss_tuple_sets.ipv4_udp_en ||
3133 vport->rss_tuple_sets.ipv4_sctp_en ||
3134 vport->rss_tuple_sets.ipv6_tcp_en ||
3135 vport->rss_tuple_sets.ipv6_udp_en ||
3136 vport->rss_tuple_sets.ipv6_sctp_en)
3137 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3138 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3139 vport->rss_tuple_sets.ipv6_fragment_en)
3140 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3142 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3145 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3147 struct hclge_rss_input_tuple_cmd *req;
3148 struct hclge_desc desc;
3151 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3153 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3155 /* Get the tuple cfg from pf */
3156 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3157 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3158 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3159 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3160 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3161 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3162 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3163 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3164 hclge_get_rss_type(&hdev->vport[0]);
3165 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3167 dev_err(&hdev->pdev->dev,
3168 "Configure rss input fail, status = %d\n", ret);
3172 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3175 struct hclge_vport *vport = hclge_get_vport(handle);
3178 /* Get hash algorithm */
3180 switch (vport->rss_algo) {
3181 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3182 *hfunc = ETH_RSS_HASH_TOP;
3184 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3185 *hfunc = ETH_RSS_HASH_XOR;
3188 *hfunc = ETH_RSS_HASH_UNKNOWN;
3193 /* Get the RSS Key required by the user */
3195 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3197 /* Get indirect table */
3199 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3200 indir[i] = vport->rss_indirection_tbl[i];
3205 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3206 const u8 *key, const u8 hfunc)
3208 struct hclge_vport *vport = hclge_get_vport(handle);
3209 struct hclge_dev *hdev = vport->back;
3213 /* Set the RSS Hash Key if specififed by the user */
3216 case ETH_RSS_HASH_TOP:
3217 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3219 case ETH_RSS_HASH_XOR:
3220 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3222 case ETH_RSS_HASH_NO_CHANGE:
3223 hash_algo = vport->rss_algo;
3229 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3233 /* Update the shadow RSS key with user specified qids */
3234 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3235 vport->rss_algo = hash_algo;
3238 /* Update the shadow RSS table with user specified qids */
3239 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3240 vport->rss_indirection_tbl[i] = indir[i];
3242 /* Update the hardware */
3243 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3246 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3248 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3250 if (nfc->data & RXH_L4_B_2_3)
3251 hash_sets |= HCLGE_D_PORT_BIT;
3253 hash_sets &= ~HCLGE_D_PORT_BIT;
3255 if (nfc->data & RXH_IP_SRC)
3256 hash_sets |= HCLGE_S_IP_BIT;
3258 hash_sets &= ~HCLGE_S_IP_BIT;
3260 if (nfc->data & RXH_IP_DST)
3261 hash_sets |= HCLGE_D_IP_BIT;
3263 hash_sets &= ~HCLGE_D_IP_BIT;
3265 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3266 hash_sets |= HCLGE_V_TAG_BIT;
3271 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3272 struct ethtool_rxnfc *nfc)
3274 struct hclge_vport *vport = hclge_get_vport(handle);
3275 struct hclge_dev *hdev = vport->back;
3276 struct hclge_rss_input_tuple_cmd *req;
3277 struct hclge_desc desc;
3281 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3282 RXH_L4_B_0_1 | RXH_L4_B_2_3))
3285 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3286 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3288 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3289 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3290 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3291 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3292 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3293 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3294 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3295 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3297 tuple_sets = hclge_get_rss_hash_bits(nfc);
3298 switch (nfc->flow_type) {
3300 req->ipv4_tcp_en = tuple_sets;
3303 req->ipv6_tcp_en = tuple_sets;
3306 req->ipv4_udp_en = tuple_sets;
3309 req->ipv6_udp_en = tuple_sets;
3312 req->ipv4_sctp_en = tuple_sets;
3315 if ((nfc->data & RXH_L4_B_0_1) ||
3316 (nfc->data & RXH_L4_B_2_3))
3319 req->ipv6_sctp_en = tuple_sets;
3322 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3325 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3331 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3333 dev_err(&hdev->pdev->dev,
3334 "Set rss tuple fail, status = %d\n", ret);
3338 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3339 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3340 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3341 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3342 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3343 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3344 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3345 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3346 hclge_get_rss_type(vport);
3350 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3351 struct ethtool_rxnfc *nfc)
3353 struct hclge_vport *vport = hclge_get_vport(handle);
3358 switch (nfc->flow_type) {
3360 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3363 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3366 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3369 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3372 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3375 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3379 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3388 if (tuple_sets & HCLGE_D_PORT_BIT)
3389 nfc->data |= RXH_L4_B_2_3;
3390 if (tuple_sets & HCLGE_S_PORT_BIT)
3391 nfc->data |= RXH_L4_B_0_1;
3392 if (tuple_sets & HCLGE_D_IP_BIT)
3393 nfc->data |= RXH_IP_DST;
3394 if (tuple_sets & HCLGE_S_IP_BIT)
3395 nfc->data |= RXH_IP_SRC;
3400 static int hclge_get_tc_size(struct hnae3_handle *handle)
3402 struct hclge_vport *vport = hclge_get_vport(handle);
3403 struct hclge_dev *hdev = vport->back;
3405 return hdev->rss_size_max;
3408 int hclge_rss_init_hw(struct hclge_dev *hdev)
3410 struct hclge_vport *vport = hdev->vport;
3411 u8 *rss_indir = vport[0].rss_indirection_tbl;
3412 u16 rss_size = vport[0].alloc_rss_size;
3413 u8 *key = vport[0].rss_hash_key;
3414 u8 hfunc = vport[0].rss_algo;
3415 u16 tc_offset[HCLGE_MAX_TC_NUM];
3416 u16 tc_valid[HCLGE_MAX_TC_NUM];
3417 u16 tc_size[HCLGE_MAX_TC_NUM];
3421 ret = hclge_set_rss_indir_table(hdev, rss_indir);
3425 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
3429 ret = hclge_set_rss_input_tuple(hdev);
3433 /* Each TC have the same queue size, and tc_size set to hardware is
3434 * the log2 of roundup power of two of rss_size, the acutal queue
3435 * size is limited by indirection table.
3437 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
3438 dev_err(&hdev->pdev->dev,
3439 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
3444 roundup_size = roundup_pow_of_two(rss_size);
3445 roundup_size = ilog2(roundup_size);
3447 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3450 if (!(hdev->hw_tc_map & BIT(i)))
3454 tc_size[i] = roundup_size;
3455 tc_offset[i] = rss_size * i;
3458 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
3461 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
3463 struct hclge_vport *vport = hdev->vport;
3466 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
3467 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3468 vport[j].rss_indirection_tbl[i] =
3469 i % vport[j].alloc_rss_size;
3473 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
3475 struct hclge_vport *vport = hdev->vport;
3478 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3479 vport[i].rss_tuple_sets.ipv4_tcp_en =
3480 HCLGE_RSS_INPUT_TUPLE_OTHER;
3481 vport[i].rss_tuple_sets.ipv4_udp_en =
3482 HCLGE_RSS_INPUT_TUPLE_OTHER;
3483 vport[i].rss_tuple_sets.ipv4_sctp_en =
3484 HCLGE_RSS_INPUT_TUPLE_SCTP;
3485 vport[i].rss_tuple_sets.ipv4_fragment_en =
3486 HCLGE_RSS_INPUT_TUPLE_OTHER;
3487 vport[i].rss_tuple_sets.ipv6_tcp_en =
3488 HCLGE_RSS_INPUT_TUPLE_OTHER;
3489 vport[i].rss_tuple_sets.ipv6_udp_en =
3490 HCLGE_RSS_INPUT_TUPLE_OTHER;
3491 vport[i].rss_tuple_sets.ipv6_sctp_en =
3492 HCLGE_RSS_INPUT_TUPLE_SCTP;
3493 vport[i].rss_tuple_sets.ipv6_fragment_en =
3494 HCLGE_RSS_INPUT_TUPLE_OTHER;
3496 vport[i].rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3498 netdev_rss_key_fill(vport[i].rss_hash_key, HCLGE_RSS_KEY_SIZE);
3501 hclge_rss_indir_init_cfg(hdev);
3504 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
3505 int vector_id, bool en,
3506 struct hnae3_ring_chain_node *ring_chain)
3508 struct hclge_dev *hdev = vport->back;
3509 struct hnae3_ring_chain_node *node;
3510 struct hclge_desc desc;
3511 struct hclge_ctrl_vector_chain_cmd *req
3512 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3513 enum hclge_cmd_status status;
3514 enum hclge_opcode_type op;
3515 u16 tqp_type_and_id;
3518 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
3519 hclge_cmd_setup_basic_desc(&desc, op, false);
3520 req->int_vector_id = vector_id;
3523 for (node = ring_chain; node; node = node->next) {
3524 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
3525 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
3527 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
3528 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
3529 HCLGE_TQP_ID_S, node->tqp_index);
3530 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
3532 hnae3_get_field(node->int_gl_idx,
3533 HNAE3_RING_GL_IDX_M,
3534 HNAE3_RING_GL_IDX_S));
3535 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
3536 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
3537 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
3538 req->vfid = vport->vport_id;
3540 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3542 dev_err(&hdev->pdev->dev,
3543 "Map TQP fail, status is %d.\n",
3549 hclge_cmd_setup_basic_desc(&desc,
3552 req->int_vector_id = vector_id;
3557 req->int_cause_num = i;
3558 req->vfid = vport->vport_id;
3559 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3561 dev_err(&hdev->pdev->dev,
3562 "Map TQP fail, status is %d.\n", status);
3570 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
3572 struct hnae3_ring_chain_node *ring_chain)
3574 struct hclge_vport *vport = hclge_get_vport(handle);
3575 struct hclge_dev *hdev = vport->back;
3578 vector_id = hclge_get_vector_index(hdev, vector);
3579 if (vector_id < 0) {
3580 dev_err(&hdev->pdev->dev,
3581 "Get vector index fail. vector_id =%d\n", vector_id);
3585 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
3588 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
3590 struct hnae3_ring_chain_node *ring_chain)
3592 struct hclge_vport *vport = hclge_get_vport(handle);
3593 struct hclge_dev *hdev = vport->back;
3596 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3599 vector_id = hclge_get_vector_index(hdev, vector);
3600 if (vector_id < 0) {
3601 dev_err(&handle->pdev->dev,
3602 "Get vector index fail. ret =%d\n", vector_id);
3606 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
3608 dev_err(&handle->pdev->dev,
3609 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
3616 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
3617 struct hclge_promisc_param *param)
3619 struct hclge_promisc_cfg_cmd *req;
3620 struct hclge_desc desc;
3623 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
3625 req = (struct hclge_promisc_cfg_cmd *)desc.data;
3626 req->vf_id = param->vf_id;
3628 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
3629 * pdev revision(0x20), new revision support them. The
3630 * value of this two fields will not return error when driver
3631 * send command to fireware in revision(0x20).
3633 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
3634 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
3636 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3638 dev_err(&hdev->pdev->dev,
3639 "Set promisc mode fail, status is %d.\n", ret);
3644 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
3645 bool en_mc, bool en_bc, int vport_id)
3650 memset(param, 0, sizeof(struct hclge_promisc_param));
3652 param->enable = HCLGE_PROMISC_EN_UC;
3654 param->enable |= HCLGE_PROMISC_EN_MC;
3656 param->enable |= HCLGE_PROMISC_EN_BC;
3657 param->vf_id = vport_id;
3660 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
3663 struct hclge_vport *vport = hclge_get_vport(handle);
3664 struct hclge_dev *hdev = vport->back;
3665 struct hclge_promisc_param param;
3667 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, true,
3669 return hclge_cmd_set_promisc_mode(hdev, ¶m);
3672 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
3674 struct hclge_get_fd_mode_cmd *req;
3675 struct hclge_desc desc;
3678 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
3680 req = (struct hclge_get_fd_mode_cmd *)desc.data;
3682 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3684 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
3688 *fd_mode = req->mode;
3693 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
3694 u32 *stage1_entry_num,
3695 u32 *stage2_entry_num,
3696 u16 *stage1_counter_num,
3697 u16 *stage2_counter_num)
3699 struct hclge_get_fd_allocation_cmd *req;
3700 struct hclge_desc desc;
3703 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
3705 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
3707 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3709 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
3714 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
3715 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
3716 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
3717 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
3722 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
3724 struct hclge_set_fd_key_config_cmd *req;
3725 struct hclge_fd_key_cfg *stage;
3726 struct hclge_desc desc;
3729 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
3731 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
3732 stage = &hdev->fd_cfg.key_cfg[stage_num];
3733 req->stage = stage_num;
3734 req->key_select = stage->key_sel;
3735 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
3736 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
3737 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
3738 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
3739 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
3740 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
3742 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3744 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
3749 static int hclge_init_fd_config(struct hclge_dev *hdev)
3751 #define LOW_2_WORDS 0x03
3752 struct hclge_fd_key_cfg *key_cfg;
3755 if (!hnae3_dev_fd_supported(hdev))
3758 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
3762 switch (hdev->fd_cfg.fd_mode) {
3763 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
3764 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
3766 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
3767 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
3770 dev_err(&hdev->pdev->dev,
3771 "Unsupported flow director mode %d\n",
3772 hdev->fd_cfg.fd_mode);
3776 hdev->fd_cfg.fd_en = true;
3777 hdev->fd_cfg.proto_support =
3778 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
3779 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
3780 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
3781 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
3782 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
3783 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
3784 key_cfg->outer_sipv6_word_en = 0;
3785 key_cfg->outer_dipv6_word_en = 0;
3787 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
3788 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
3789 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
3790 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
3792 /* If use max 400bit key, we can support tuples for ether type */
3793 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
3794 hdev->fd_cfg.proto_support |= ETHER_FLOW;
3795 key_cfg->tuple_active |=
3796 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
3799 /* roce_type is used to filter roce frames
3800 * dst_vport is used to specify the rule
3802 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
3804 ret = hclge_get_fd_allocation(hdev,
3805 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
3806 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
3807 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
3808 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
3812 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
3815 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
3816 int loc, u8 *key, bool is_add)
3818 struct hclge_fd_tcam_config_1_cmd *req1;
3819 struct hclge_fd_tcam_config_2_cmd *req2;
3820 struct hclge_fd_tcam_config_3_cmd *req3;
3821 struct hclge_desc desc[3];
3824 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
3825 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3826 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
3827 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3828 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
3830 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
3831 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
3832 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
3834 req1->stage = stage;
3835 req1->xy_sel = sel_x ? 1 : 0;
3836 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
3837 req1->index = cpu_to_le32(loc);
3838 req1->entry_vld = sel_x ? is_add : 0;
3841 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
3842 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
3843 sizeof(req2->tcam_data));
3844 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
3845 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
3848 ret = hclge_cmd_send(&hdev->hw, desc, 3);
3850 dev_err(&hdev->pdev->dev,
3851 "config tcam key fail, ret=%d\n",
3857 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
3858 struct hclge_fd_ad_data *action)
3860 struct hclge_fd_ad_config_cmd *req;
3861 struct hclge_desc desc;
3865 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
3867 req = (struct hclge_fd_ad_config_cmd *)desc.data;
3868 req->index = cpu_to_le32(loc);
3871 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
3872 action->write_rule_id_to_bd);
3873 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
3876 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
3877 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
3878 action->forward_to_direct_queue);
3879 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
3881 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
3882 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
3883 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
3884 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
3885 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
3886 action->counter_id);
3888 req->ad_data = cpu_to_le64(ad_data);
3889 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3891 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
3896 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
3897 struct hclge_fd_rule *rule)
3899 u16 tmp_x_s, tmp_y_s;
3900 u32 tmp_x_l, tmp_y_l;
3903 if (rule->unused_tuple & tuple_bit)
3906 switch (tuple_bit) {
3909 case BIT(INNER_DST_MAC):
3910 for (i = 0; i < 6; i++) {
3911 calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
3912 rule->tuples_mask.dst_mac[i]);
3913 calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
3914 rule->tuples_mask.dst_mac[i]);
3918 case BIT(INNER_SRC_MAC):
3919 for (i = 0; i < 6; i++) {
3920 calc_x(key_x[5 - i], rule->tuples.src_mac[i],
3921 rule->tuples.src_mac[i]);
3922 calc_y(key_y[5 - i], rule->tuples.src_mac[i],
3923 rule->tuples.src_mac[i]);
3927 case BIT(INNER_VLAN_TAG_FST):
3928 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
3929 rule->tuples_mask.vlan_tag1);
3930 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
3931 rule->tuples_mask.vlan_tag1);
3932 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
3933 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
3936 case BIT(INNER_ETH_TYPE):
3937 calc_x(tmp_x_s, rule->tuples.ether_proto,
3938 rule->tuples_mask.ether_proto);
3939 calc_y(tmp_y_s, rule->tuples.ether_proto,
3940 rule->tuples_mask.ether_proto);
3941 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
3942 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
3945 case BIT(INNER_IP_TOS):
3946 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
3947 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
3950 case BIT(INNER_IP_PROTO):
3951 calc_x(*key_x, rule->tuples.ip_proto,
3952 rule->tuples_mask.ip_proto);
3953 calc_y(*key_y, rule->tuples.ip_proto,
3954 rule->tuples_mask.ip_proto);
3957 case BIT(INNER_SRC_IP):
3958 calc_x(tmp_x_l, rule->tuples.src_ip[3],
3959 rule->tuples_mask.src_ip[3]);
3960 calc_y(tmp_y_l, rule->tuples.src_ip[3],
3961 rule->tuples_mask.src_ip[3]);
3962 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
3963 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
3966 case BIT(INNER_DST_IP):
3967 calc_x(tmp_x_l, rule->tuples.dst_ip[3],
3968 rule->tuples_mask.dst_ip[3]);
3969 calc_y(tmp_y_l, rule->tuples.dst_ip[3],
3970 rule->tuples_mask.dst_ip[3]);
3971 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
3972 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
3975 case BIT(INNER_SRC_PORT):
3976 calc_x(tmp_x_s, rule->tuples.src_port,
3977 rule->tuples_mask.src_port);
3978 calc_y(tmp_y_s, rule->tuples.src_port,
3979 rule->tuples_mask.src_port);
3980 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
3981 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
3984 case BIT(INNER_DST_PORT):
3985 calc_x(tmp_x_s, rule->tuples.dst_port,
3986 rule->tuples_mask.dst_port);
3987 calc_y(tmp_y_s, rule->tuples.dst_port,
3988 rule->tuples_mask.dst_port);
3989 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
3990 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
3998 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
3999 u8 vf_id, u8 network_port_id)
4001 u32 port_number = 0;
4003 if (port_type == HOST_PORT) {
4004 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4006 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4008 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4010 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4011 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4012 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4018 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4019 __le32 *key_x, __le32 *key_y,
4020 struct hclge_fd_rule *rule)
4022 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4023 u8 cur_pos = 0, tuple_size, shift_bits;
4026 for (i = 0; i < MAX_META_DATA; i++) {
4027 tuple_size = meta_data_key_info[i].key_length;
4028 tuple_bit = key_cfg->meta_data_active & BIT(i);
4030 switch (tuple_bit) {
4031 case BIT(ROCE_TYPE):
4032 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4033 cur_pos += tuple_size;
4035 case BIT(DST_VPORT):
4036 port_number = hclge_get_port_number(HOST_PORT, 0,
4038 hnae3_set_field(meta_data,
4039 GENMASK(cur_pos + tuple_size, cur_pos),
4040 cur_pos, port_number);
4041 cur_pos += tuple_size;
4048 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4049 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4050 shift_bits = sizeof(meta_data) * 8 - cur_pos;
4052 *key_x = cpu_to_le32(tmp_x << shift_bits);
4053 *key_y = cpu_to_le32(tmp_y << shift_bits);
4056 /* A complete key is combined with meta data key and tuple key.
4057 * Meta data key is stored at the MSB region, and tuple key is stored at
4058 * the LSB region, unused bits will be filled 0.
4060 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4061 struct hclge_fd_rule *rule)
4063 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4064 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4065 u8 *cur_key_x, *cur_key_y;
4066 int i, ret, tuple_size;
4067 u8 meta_data_region;
4069 memset(key_x, 0, sizeof(key_x));
4070 memset(key_y, 0, sizeof(key_y));
4074 for (i = 0 ; i < MAX_TUPLE; i++) {
4078 tuple_size = tuple_key_info[i].key_length / 8;
4079 check_tuple = key_cfg->tuple_active & BIT(i);
4081 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4084 cur_key_x += tuple_size;
4085 cur_key_y += tuple_size;
4089 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4090 MAX_META_DATA_LENGTH / 8;
4092 hclge_fd_convert_meta_data(key_cfg,
4093 (__le32 *)(key_x + meta_data_region),
4094 (__le32 *)(key_y + meta_data_region),
4097 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4100 dev_err(&hdev->pdev->dev,
4101 "fd key_y config fail, loc=%d, ret=%d\n",
4102 rule->queue_id, ret);
4106 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4109 dev_err(&hdev->pdev->dev,
4110 "fd key_x config fail, loc=%d, ret=%d\n",
4111 rule->queue_id, ret);
4115 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4116 struct hclge_fd_rule *rule)
4118 struct hclge_fd_ad_data ad_data;
4120 ad_data.ad_id = rule->location;
4122 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4123 ad_data.drop_packet = true;
4124 ad_data.forward_to_direct_queue = false;
4125 ad_data.queue_id = 0;
4127 ad_data.drop_packet = false;
4128 ad_data.forward_to_direct_queue = true;
4129 ad_data.queue_id = rule->queue_id;
4132 ad_data.use_counter = false;
4133 ad_data.counter_id = 0;
4135 ad_data.use_next_stage = false;
4136 ad_data.next_input_key = 0;
4138 ad_data.write_rule_id_to_bd = true;
4139 ad_data.rule_id = rule->location;
4141 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4144 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4145 struct ethtool_rx_flow_spec *fs, u32 *unused)
4147 struct ethtool_tcpip4_spec *tcp_ip4_spec;
4148 struct ethtool_usrip4_spec *usr_ip4_spec;
4149 struct ethtool_tcpip6_spec *tcp_ip6_spec;
4150 struct ethtool_usrip6_spec *usr_ip6_spec;
4151 struct ethhdr *ether_spec;
4153 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4156 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4159 if ((fs->flow_type & FLOW_EXT) &&
4160 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4161 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4165 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4169 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4170 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4172 if (!tcp_ip4_spec->ip4src)
4173 *unused |= BIT(INNER_SRC_IP);
4175 if (!tcp_ip4_spec->ip4dst)
4176 *unused |= BIT(INNER_DST_IP);
4178 if (!tcp_ip4_spec->psrc)
4179 *unused |= BIT(INNER_SRC_PORT);
4181 if (!tcp_ip4_spec->pdst)
4182 *unused |= BIT(INNER_DST_PORT);
4184 if (!tcp_ip4_spec->tos)
4185 *unused |= BIT(INNER_IP_TOS);
4189 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4190 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4191 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4193 if (!usr_ip4_spec->ip4src)
4194 *unused |= BIT(INNER_SRC_IP);
4196 if (!usr_ip4_spec->ip4dst)
4197 *unused |= BIT(INNER_DST_IP);
4199 if (!usr_ip4_spec->tos)
4200 *unused |= BIT(INNER_IP_TOS);
4202 if (!usr_ip4_spec->proto)
4203 *unused |= BIT(INNER_IP_PROTO);
4205 if (usr_ip4_spec->l4_4_bytes)
4208 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4215 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4216 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4219 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4220 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4221 *unused |= BIT(INNER_SRC_IP);
4223 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4224 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4225 *unused |= BIT(INNER_DST_IP);
4227 if (!tcp_ip6_spec->psrc)
4228 *unused |= BIT(INNER_SRC_PORT);
4230 if (!tcp_ip6_spec->pdst)
4231 *unused |= BIT(INNER_DST_PORT);
4233 if (tcp_ip6_spec->tclass)
4237 case IPV6_USER_FLOW:
4238 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4239 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4240 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4241 BIT(INNER_DST_PORT);
4243 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4244 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4245 *unused |= BIT(INNER_SRC_IP);
4247 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4248 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4249 *unused |= BIT(INNER_DST_IP);
4251 if (!usr_ip6_spec->l4_proto)
4252 *unused |= BIT(INNER_IP_PROTO);
4254 if (usr_ip6_spec->tclass)
4257 if (usr_ip6_spec->l4_4_bytes)
4262 ether_spec = &fs->h_u.ether_spec;
4263 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4264 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4265 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4267 if (is_zero_ether_addr(ether_spec->h_source))
4268 *unused |= BIT(INNER_SRC_MAC);
4270 if (is_zero_ether_addr(ether_spec->h_dest))
4271 *unused |= BIT(INNER_DST_MAC);
4273 if (!ether_spec->h_proto)
4274 *unused |= BIT(INNER_ETH_TYPE);
4281 if ((fs->flow_type & FLOW_EXT)) {
4282 if (fs->h_ext.vlan_etype)
4284 if (!fs->h_ext.vlan_tci)
4285 *unused |= BIT(INNER_VLAN_TAG_FST);
4287 if (fs->m_ext.vlan_tci) {
4288 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4292 *unused |= BIT(INNER_VLAN_TAG_FST);
4295 if (fs->flow_type & FLOW_MAC_EXT) {
4296 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4299 if (is_zero_ether_addr(fs->h_ext.h_dest))
4300 *unused |= BIT(INNER_DST_MAC);
4302 *unused &= ~(BIT(INNER_DST_MAC));
4308 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4310 struct hclge_fd_rule *rule = NULL;
4311 struct hlist_node *node2;
4313 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4314 if (rule->location >= location)
4318 return rule && rule->location == location;
4321 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4322 struct hclge_fd_rule *new_rule,
4326 struct hclge_fd_rule *rule = NULL, *parent = NULL;
4327 struct hlist_node *node2;
4329 if (is_add && !new_rule)
4332 hlist_for_each_entry_safe(rule, node2,
4333 &hdev->fd_rule_list, rule_node) {
4334 if (rule->location >= location)
4339 if (rule && rule->location == location) {
4340 hlist_del(&rule->rule_node);
4342 hdev->hclge_fd_rule_num--;
4347 } else if (!is_add) {
4348 dev_err(&hdev->pdev->dev,
4349 "delete fail, rule %d is inexistent\n",
4354 INIT_HLIST_NODE(&new_rule->rule_node);
4357 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4359 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4361 hdev->hclge_fd_rule_num++;
4366 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4367 struct ethtool_rx_flow_spec *fs,
4368 struct hclge_fd_rule *rule)
4370 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4372 switch (flow_type) {
4376 rule->tuples.src_ip[3] =
4377 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
4378 rule->tuples_mask.src_ip[3] =
4379 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
4381 rule->tuples.dst_ip[3] =
4382 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
4383 rule->tuples_mask.dst_ip[3] =
4384 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
4386 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
4387 rule->tuples_mask.src_port =
4388 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
4390 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
4391 rule->tuples_mask.dst_port =
4392 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
4394 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
4395 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
4397 rule->tuples.ether_proto = ETH_P_IP;
4398 rule->tuples_mask.ether_proto = 0xFFFF;
4402 rule->tuples.src_ip[3] =
4403 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
4404 rule->tuples_mask.src_ip[3] =
4405 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
4407 rule->tuples.dst_ip[3] =
4408 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
4409 rule->tuples_mask.dst_ip[3] =
4410 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
4412 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
4413 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
4415 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
4416 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
4418 rule->tuples.ether_proto = ETH_P_IP;
4419 rule->tuples_mask.ether_proto = 0xFFFF;
4425 be32_to_cpu_array(rule->tuples.src_ip,
4426 fs->h_u.tcp_ip6_spec.ip6src, 4);
4427 be32_to_cpu_array(rule->tuples_mask.src_ip,
4428 fs->m_u.tcp_ip6_spec.ip6src, 4);
4430 be32_to_cpu_array(rule->tuples.dst_ip,
4431 fs->h_u.tcp_ip6_spec.ip6dst, 4);
4432 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4433 fs->m_u.tcp_ip6_spec.ip6dst, 4);
4435 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
4436 rule->tuples_mask.src_port =
4437 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
4439 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
4440 rule->tuples_mask.dst_port =
4441 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
4443 rule->tuples.ether_proto = ETH_P_IPV6;
4444 rule->tuples_mask.ether_proto = 0xFFFF;
4447 case IPV6_USER_FLOW:
4448 be32_to_cpu_array(rule->tuples.src_ip,
4449 fs->h_u.usr_ip6_spec.ip6src, 4);
4450 be32_to_cpu_array(rule->tuples_mask.src_ip,
4451 fs->m_u.usr_ip6_spec.ip6src, 4);
4453 be32_to_cpu_array(rule->tuples.dst_ip,
4454 fs->h_u.usr_ip6_spec.ip6dst, 4);
4455 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4456 fs->m_u.usr_ip6_spec.ip6dst, 4);
4458 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
4459 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
4461 rule->tuples.ether_proto = ETH_P_IPV6;
4462 rule->tuples_mask.ether_proto = 0xFFFF;
4466 ether_addr_copy(rule->tuples.src_mac,
4467 fs->h_u.ether_spec.h_source);
4468 ether_addr_copy(rule->tuples_mask.src_mac,
4469 fs->m_u.ether_spec.h_source);
4471 ether_addr_copy(rule->tuples.dst_mac,
4472 fs->h_u.ether_spec.h_dest);
4473 ether_addr_copy(rule->tuples_mask.dst_mac,
4474 fs->m_u.ether_spec.h_dest);
4476 rule->tuples.ether_proto =
4477 be16_to_cpu(fs->h_u.ether_spec.h_proto);
4478 rule->tuples_mask.ether_proto =
4479 be16_to_cpu(fs->m_u.ether_spec.h_proto);
4486 switch (flow_type) {
4489 rule->tuples.ip_proto = IPPROTO_SCTP;
4490 rule->tuples_mask.ip_proto = 0xFF;
4494 rule->tuples.ip_proto = IPPROTO_TCP;
4495 rule->tuples_mask.ip_proto = 0xFF;
4499 rule->tuples.ip_proto = IPPROTO_UDP;
4500 rule->tuples_mask.ip_proto = 0xFF;
4506 if ((fs->flow_type & FLOW_EXT)) {
4507 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
4508 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
4511 if (fs->flow_type & FLOW_MAC_EXT) {
4512 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
4513 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
4519 static int hclge_add_fd_entry(struct hnae3_handle *handle,
4520 struct ethtool_rxnfc *cmd)
4522 struct hclge_vport *vport = hclge_get_vport(handle);
4523 struct hclge_dev *hdev = vport->back;
4524 u16 dst_vport_id = 0, q_index = 0;
4525 struct ethtool_rx_flow_spec *fs;
4526 struct hclge_fd_rule *rule;
4531 if (!hnae3_dev_fd_supported(hdev))
4534 if (!hdev->fd_cfg.fd_en) {
4535 dev_warn(&hdev->pdev->dev,
4536 "Please enable flow director first\n");
4540 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4542 ret = hclge_fd_check_spec(hdev, fs, &unused);
4544 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
4548 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
4549 action = HCLGE_FD_ACTION_DROP_PACKET;
4551 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
4552 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
4555 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
4556 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
4559 dev_err(&hdev->pdev->dev,
4560 "Error: queue id (%d) > max tqp num (%d)\n",
4565 if (vf > hdev->num_req_vfs) {
4566 dev_err(&hdev->pdev->dev,
4567 "Error: vf id (%d) > max vf num (%d)\n",
4568 vf, hdev->num_req_vfs);
4572 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
4576 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
4580 ret = hclge_fd_get_tuple(hdev, fs, rule);
4584 rule->flow_type = fs->flow_type;
4586 rule->location = fs->location;
4587 rule->unused_tuple = unused;
4588 rule->vf_id = dst_vport_id;
4589 rule->queue_id = q_index;
4590 rule->action = action;
4592 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4596 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4600 ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true);
4611 static int hclge_del_fd_entry(struct hnae3_handle *handle,
4612 struct ethtool_rxnfc *cmd)
4614 struct hclge_vport *vport = hclge_get_vport(handle);
4615 struct hclge_dev *hdev = vport->back;
4616 struct ethtool_rx_flow_spec *fs;
4619 if (!hnae3_dev_fd_supported(hdev))
4622 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4624 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4627 if (!hclge_fd_rule_exist(hdev, fs->location)) {
4628 dev_err(&hdev->pdev->dev,
4629 "Delete fail, rule %d is inexistent\n",
4634 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4635 fs->location, NULL, false);
4639 return hclge_fd_update_rule_list(hdev, NULL, fs->location,
4643 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
4646 struct hclge_vport *vport = hclge_get_vport(handle);
4647 struct hclge_dev *hdev = vport->back;
4648 struct hclge_fd_rule *rule;
4649 struct hlist_node *node;
4651 if (!hnae3_dev_fd_supported(hdev))
4655 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4657 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4658 rule->location, NULL, false);
4659 hlist_del(&rule->rule_node);
4661 hdev->hclge_fd_rule_num--;
4664 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4666 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4667 rule->location, NULL, false);
4671 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
4673 struct hclge_vport *vport = hclge_get_vport(handle);
4674 struct hclge_dev *hdev = vport->back;
4675 struct hclge_fd_rule *rule;
4676 struct hlist_node *node;
4679 /* Return ok here, because reset error handling will check this
4680 * return value. If error is returned here, the reset process will
4683 if (!hnae3_dev_fd_supported(hdev))
4686 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
4687 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4689 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4692 dev_warn(&hdev->pdev->dev,
4693 "Restore rule %d failed, remove it\n",
4695 hlist_del(&rule->rule_node);
4697 hdev->hclge_fd_rule_num--;
4703 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
4704 struct ethtool_rxnfc *cmd)
4706 struct hclge_vport *vport = hclge_get_vport(handle);
4707 struct hclge_dev *hdev = vport->back;
4709 if (!hnae3_dev_fd_supported(hdev))
4712 cmd->rule_cnt = hdev->hclge_fd_rule_num;
4713 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
4718 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
4719 struct ethtool_rxnfc *cmd)
4721 struct hclge_vport *vport = hclge_get_vport(handle);
4722 struct hclge_fd_rule *rule = NULL;
4723 struct hclge_dev *hdev = vport->back;
4724 struct ethtool_rx_flow_spec *fs;
4725 struct hlist_node *node2;
4727 if (!hnae3_dev_fd_supported(hdev))
4730 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4732 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4733 if (rule->location >= fs->location)
4737 if (!rule || fs->location != rule->location)
4740 fs->flow_type = rule->flow_type;
4741 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4745 fs->h_u.tcp_ip4_spec.ip4src =
4746 cpu_to_be32(rule->tuples.src_ip[3]);
4747 fs->m_u.tcp_ip4_spec.ip4src =
4748 rule->unused_tuple & BIT(INNER_SRC_IP) ?
4749 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
4751 fs->h_u.tcp_ip4_spec.ip4dst =
4752 cpu_to_be32(rule->tuples.dst_ip[3]);
4753 fs->m_u.tcp_ip4_spec.ip4dst =
4754 rule->unused_tuple & BIT(INNER_DST_IP) ?
4755 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
4757 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
4758 fs->m_u.tcp_ip4_spec.psrc =
4759 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
4760 0 : cpu_to_be16(rule->tuples_mask.src_port);
4762 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
4763 fs->m_u.tcp_ip4_spec.pdst =
4764 rule->unused_tuple & BIT(INNER_DST_PORT) ?
4765 0 : cpu_to_be16(rule->tuples_mask.dst_port);
4767 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
4768 fs->m_u.tcp_ip4_spec.tos =
4769 rule->unused_tuple & BIT(INNER_IP_TOS) ?
4770 0 : rule->tuples_mask.ip_tos;
4774 fs->h_u.usr_ip4_spec.ip4src =
4775 cpu_to_be32(rule->tuples.src_ip[3]);
4776 fs->m_u.tcp_ip4_spec.ip4src =
4777 rule->unused_tuple & BIT(INNER_SRC_IP) ?
4778 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
4780 fs->h_u.usr_ip4_spec.ip4dst =
4781 cpu_to_be32(rule->tuples.dst_ip[3]);
4782 fs->m_u.usr_ip4_spec.ip4dst =
4783 rule->unused_tuple & BIT(INNER_DST_IP) ?
4784 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
4786 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
4787 fs->m_u.usr_ip4_spec.tos =
4788 rule->unused_tuple & BIT(INNER_IP_TOS) ?
4789 0 : rule->tuples_mask.ip_tos;
4791 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
4792 fs->m_u.usr_ip4_spec.proto =
4793 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
4794 0 : rule->tuples_mask.ip_proto;
4796 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
4802 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
4803 rule->tuples.src_ip, 4);
4804 if (rule->unused_tuple & BIT(INNER_SRC_IP))
4805 memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
4807 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
4808 rule->tuples_mask.src_ip, 4);
4810 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
4811 rule->tuples.dst_ip, 4);
4812 if (rule->unused_tuple & BIT(INNER_DST_IP))
4813 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
4815 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
4816 rule->tuples_mask.dst_ip, 4);
4818 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
4819 fs->m_u.tcp_ip6_spec.psrc =
4820 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
4821 0 : cpu_to_be16(rule->tuples_mask.src_port);
4823 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
4824 fs->m_u.tcp_ip6_spec.pdst =
4825 rule->unused_tuple & BIT(INNER_DST_PORT) ?
4826 0 : cpu_to_be16(rule->tuples_mask.dst_port);
4829 case IPV6_USER_FLOW:
4830 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
4831 rule->tuples.src_ip, 4);
4832 if (rule->unused_tuple & BIT(INNER_SRC_IP))
4833 memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
4835 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
4836 rule->tuples_mask.src_ip, 4);
4838 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
4839 rule->tuples.dst_ip, 4);
4840 if (rule->unused_tuple & BIT(INNER_DST_IP))
4841 memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
4843 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
4844 rule->tuples_mask.dst_ip, 4);
4846 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
4847 fs->m_u.usr_ip6_spec.l4_proto =
4848 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
4849 0 : rule->tuples_mask.ip_proto;
4853 ether_addr_copy(fs->h_u.ether_spec.h_source,
4854 rule->tuples.src_mac);
4855 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
4856 eth_zero_addr(fs->m_u.ether_spec.h_source);
4858 ether_addr_copy(fs->m_u.ether_spec.h_source,
4859 rule->tuples_mask.src_mac);
4861 ether_addr_copy(fs->h_u.ether_spec.h_dest,
4862 rule->tuples.dst_mac);
4863 if (rule->unused_tuple & BIT(INNER_DST_MAC))
4864 eth_zero_addr(fs->m_u.ether_spec.h_dest);
4866 ether_addr_copy(fs->m_u.ether_spec.h_dest,
4867 rule->tuples_mask.dst_mac);
4869 fs->h_u.ether_spec.h_proto =
4870 cpu_to_be16(rule->tuples.ether_proto);
4871 fs->m_u.ether_spec.h_proto =
4872 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
4873 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
4880 if (fs->flow_type & FLOW_EXT) {
4881 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
4882 fs->m_ext.vlan_tci =
4883 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
4884 cpu_to_be16(VLAN_VID_MASK) :
4885 cpu_to_be16(rule->tuples_mask.vlan_tag1);
4888 if (fs->flow_type & FLOW_MAC_EXT) {
4889 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
4890 if (rule->unused_tuple & BIT(INNER_DST_MAC))
4891 eth_zero_addr(fs->m_u.ether_spec.h_dest);
4893 ether_addr_copy(fs->m_u.ether_spec.h_dest,
4894 rule->tuples_mask.dst_mac);
4897 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4898 fs->ring_cookie = RX_CLS_FLOW_DISC;
4902 fs->ring_cookie = rule->queue_id;
4903 vf_id = rule->vf_id;
4904 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
4905 fs->ring_cookie |= vf_id;
4911 static int hclge_get_all_rules(struct hnae3_handle *handle,
4912 struct ethtool_rxnfc *cmd, u32 *rule_locs)
4914 struct hclge_vport *vport = hclge_get_vport(handle);
4915 struct hclge_dev *hdev = vport->back;
4916 struct hclge_fd_rule *rule;
4917 struct hlist_node *node2;
4920 if (!hnae3_dev_fd_supported(hdev))
4923 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
4925 hlist_for_each_entry_safe(rule, node2,
4926 &hdev->fd_rule_list, rule_node) {
4927 if (cnt == cmd->rule_cnt)
4930 rule_locs[cnt] = rule->location;
4934 cmd->rule_cnt = cnt;
4939 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
4941 struct hclge_vport *vport = hclge_get_vport(handle);
4942 struct hclge_dev *hdev = vport->back;
4944 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
4945 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
4948 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
4950 struct hclge_vport *vport = hclge_get_vport(handle);
4951 struct hclge_dev *hdev = vport->back;
4953 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4956 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
4958 struct hclge_vport *vport = hclge_get_vport(handle);
4959 struct hclge_dev *hdev = vport->back;
4961 return hdev->reset_count;
4964 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
4966 struct hclge_vport *vport = hclge_get_vport(handle);
4967 struct hclge_dev *hdev = vport->back;
4969 hdev->fd_cfg.fd_en = enable;
4971 hclge_del_all_fd_entries(handle, false);
4973 hclge_restore_fd_entries(handle);
4976 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
4978 struct hclge_desc desc;
4979 struct hclge_config_mac_mode_cmd *req =
4980 (struct hclge_config_mac_mode_cmd *)desc.data;
4984 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
4985 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
4986 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
4987 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
4988 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
4989 hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
4990 hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
4991 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
4992 hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
4993 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
4994 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
4995 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
4996 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
4997 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
4998 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
4999 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5001 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5003 dev_err(&hdev->pdev->dev,
5004 "mac enable fail, ret =%d.\n", ret);
5007 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5009 struct hclge_config_mac_mode_cmd *req;
5010 struct hclge_desc desc;
5014 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5015 /* 1 Read out the MAC mode config at first */
5016 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5017 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5019 dev_err(&hdev->pdev->dev,
5020 "mac loopback get fail, ret =%d.\n", ret);
5024 /* 2 Then setup the loopback flag */
5025 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5026 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5027 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5028 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5030 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5032 /* 3 Config mac work mode with loopback flag
5033 * and its original configure parameters
5035 hclge_cmd_reuse_desc(&desc, false);
5036 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5038 dev_err(&hdev->pdev->dev,
5039 "mac loopback set fail, ret =%d.\n", ret);
5043 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5044 enum hnae3_loop loop_mode)
5046 #define HCLGE_SERDES_RETRY_MS 10
5047 #define HCLGE_SERDES_RETRY_NUM 100
5048 struct hclge_serdes_lb_cmd *req;
5049 struct hclge_desc desc;
5053 req = (struct hclge_serdes_lb_cmd *)desc.data;
5054 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5056 switch (loop_mode) {
5057 case HNAE3_LOOP_SERIAL_SERDES:
5058 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5060 case HNAE3_LOOP_PARALLEL_SERDES:
5061 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5064 dev_err(&hdev->pdev->dev,
5065 "unsupported serdes loopback mode %d\n", loop_mode);
5070 req->enable = loop_mode_b;
5071 req->mask = loop_mode_b;
5073 req->mask = loop_mode_b;
5076 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5078 dev_err(&hdev->pdev->dev,
5079 "serdes loopback set fail, ret = %d\n", ret);
5084 msleep(HCLGE_SERDES_RETRY_MS);
5085 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5087 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5089 dev_err(&hdev->pdev->dev,
5090 "serdes loopback get, ret = %d\n", ret);
5093 } while (++i < HCLGE_SERDES_RETRY_NUM &&
5094 !(req->result & HCLGE_CMD_SERDES_DONE_B));
5096 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5097 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5099 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5100 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5104 hclge_cfg_mac_mode(hdev, en);
5108 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
5109 int stream_id, bool enable)
5111 struct hclge_desc desc;
5112 struct hclge_cfg_com_tqp_queue_cmd *req =
5113 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
5116 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
5117 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
5118 req->stream_id = cpu_to_le16(stream_id);
5119 req->enable |= enable << HCLGE_TQP_ENABLE_B;
5121 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5123 dev_err(&hdev->pdev->dev,
5124 "Tqp enable fail, status =%d.\n", ret);
5128 static int hclge_set_loopback(struct hnae3_handle *handle,
5129 enum hnae3_loop loop_mode, bool en)
5131 struct hclge_vport *vport = hclge_get_vport(handle);
5132 struct hclge_dev *hdev = vport->back;
5135 switch (loop_mode) {
5136 case HNAE3_LOOP_APP:
5137 ret = hclge_set_app_loopback(hdev, en);
5139 case HNAE3_LOOP_SERIAL_SERDES:
5140 case HNAE3_LOOP_PARALLEL_SERDES:
5141 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
5145 dev_err(&hdev->pdev->dev,
5146 "loop_mode %d is not supported\n", loop_mode);
5150 for (i = 0; i < vport->alloc_tqps; i++) {
5151 ret = hclge_tqp_enable(hdev, i, 0, en);
5159 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
5161 struct hclge_vport *vport = hclge_get_vport(handle);
5162 struct hnae3_queue *queue;
5163 struct hclge_tqp *tqp;
5166 for (i = 0; i < vport->alloc_tqps; i++) {
5167 queue = handle->kinfo.tqp[i];
5168 tqp = container_of(queue, struct hclge_tqp, q);
5169 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
5173 static int hclge_ae_start(struct hnae3_handle *handle)
5175 struct hclge_vport *vport = hclge_get_vport(handle);
5176 struct hclge_dev *hdev = vport->back;
5179 hclge_cfg_mac_mode(hdev, true);
5180 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
5181 mod_timer(&hdev->service_timer, jiffies + HZ);
5182 hdev->hw.mac.link = 0;
5184 /* reset tqp stats */
5185 hclge_reset_tqp_stats(handle);
5187 hclge_mac_start_phy(hdev);
5192 static void hclge_ae_stop(struct hnae3_handle *handle)
5194 struct hclge_vport *vport = hclge_get_vport(handle);
5195 struct hclge_dev *hdev = vport->back;
5197 set_bit(HCLGE_STATE_DOWN, &hdev->state);
5199 del_timer_sync(&hdev->service_timer);
5200 cancel_work_sync(&hdev->service_task);
5201 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
5203 /* If it is not PF reset, the firmware will disable the MAC,
5204 * so it only need to stop phy here.
5206 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
5207 hdev->reset_type != HNAE3_FUNC_RESET) {
5208 hclge_mac_stop_phy(hdev);
5213 hclge_cfg_mac_mode(hdev, false);
5215 hclge_mac_stop_phy(hdev);
5217 /* reset tqp stats */
5218 hclge_reset_tqp_stats(handle);
5219 del_timer_sync(&hdev->service_timer);
5220 cancel_work_sync(&hdev->service_task);
5221 hclge_update_link_status(hdev);
5224 int hclge_vport_start(struct hclge_vport *vport)
5226 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5227 vport->last_active_jiffies = jiffies;
5231 void hclge_vport_stop(struct hclge_vport *vport)
5233 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5236 static int hclge_client_start(struct hnae3_handle *handle)
5238 struct hclge_vport *vport = hclge_get_vport(handle);
5240 return hclge_vport_start(vport);
5243 static void hclge_client_stop(struct hnae3_handle *handle)
5245 struct hclge_vport *vport = hclge_get_vport(handle);
5247 hclge_vport_stop(vport);
5250 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
5251 u16 cmdq_resp, u8 resp_code,
5252 enum hclge_mac_vlan_tbl_opcode op)
5254 struct hclge_dev *hdev = vport->back;
5255 int return_status = -EIO;
5258 dev_err(&hdev->pdev->dev,
5259 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
5264 if (op == HCLGE_MAC_VLAN_ADD) {
5265 if ((!resp_code) || (resp_code == 1)) {
5267 } else if (resp_code == 2) {
5268 return_status = -ENOSPC;
5269 dev_err(&hdev->pdev->dev,
5270 "add mac addr failed for uc_overflow.\n");
5271 } else if (resp_code == 3) {
5272 return_status = -ENOSPC;
5273 dev_err(&hdev->pdev->dev,
5274 "add mac addr failed for mc_overflow.\n");
5276 dev_err(&hdev->pdev->dev,
5277 "add mac addr failed for undefined, code=%d.\n",
5280 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
5283 } else if (resp_code == 1) {
5284 return_status = -ENOENT;
5285 dev_dbg(&hdev->pdev->dev,
5286 "remove mac addr failed for miss.\n");
5288 dev_err(&hdev->pdev->dev,
5289 "remove mac addr failed for undefined, code=%d.\n",
5292 } else if (op == HCLGE_MAC_VLAN_LKUP) {
5295 } else if (resp_code == 1) {
5296 return_status = -ENOENT;
5297 dev_dbg(&hdev->pdev->dev,
5298 "lookup mac addr failed for miss.\n");
5300 dev_err(&hdev->pdev->dev,
5301 "lookup mac addr failed for undefined, code=%d.\n",
5305 return_status = -EINVAL;
5306 dev_err(&hdev->pdev->dev,
5307 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
5311 return return_status;
5314 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
5319 if (vfid > 255 || vfid < 0)
5322 if (vfid >= 0 && vfid <= 191) {
5323 word_num = vfid / 32;
5324 bit_num = vfid % 32;
5326 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5328 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
5330 word_num = (vfid - 192) / 32;
5331 bit_num = vfid % 32;
5333 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5335 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
5341 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
5343 #define HCLGE_DESC_NUMBER 3
5344 #define HCLGE_FUNC_NUMBER_PER_DESC 6
5347 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
5348 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
5349 if (desc[i].data[j])
5355 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
5358 const unsigned char *mac_addr = addr;
5359 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
5360 (mac_addr[0]) | (mac_addr[1] << 8);
5361 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
5363 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
5364 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
5367 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
5368 struct hclge_mac_vlan_tbl_entry_cmd *req)
5370 struct hclge_dev *hdev = vport->back;
5371 struct hclge_desc desc;
5376 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
5378 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5380 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5382 dev_err(&hdev->pdev->dev,
5383 "del mac addr failed for cmd_send, ret =%d.\n",
5387 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5388 retval = le16_to_cpu(desc.retval);
5390 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5391 HCLGE_MAC_VLAN_REMOVE);
5394 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
5395 struct hclge_mac_vlan_tbl_entry_cmd *req,
5396 struct hclge_desc *desc,
5399 struct hclge_dev *hdev = vport->back;
5404 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
5406 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5407 memcpy(desc[0].data,
5409 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5410 hclge_cmd_setup_basic_desc(&desc[1],
5411 HCLGE_OPC_MAC_VLAN_ADD,
5413 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5414 hclge_cmd_setup_basic_desc(&desc[2],
5415 HCLGE_OPC_MAC_VLAN_ADD,
5417 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5419 memcpy(desc[0].data,
5421 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5422 ret = hclge_cmd_send(&hdev->hw, desc, 1);
5425 dev_err(&hdev->pdev->dev,
5426 "lookup mac addr failed for cmd_send, ret =%d.\n",
5430 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
5431 retval = le16_to_cpu(desc[0].retval);
5433 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5434 HCLGE_MAC_VLAN_LKUP);
5437 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
5438 struct hclge_mac_vlan_tbl_entry_cmd *req,
5439 struct hclge_desc *mc_desc)
5441 struct hclge_dev *hdev = vport->back;
5448 struct hclge_desc desc;
5450 hclge_cmd_setup_basic_desc(&desc,
5451 HCLGE_OPC_MAC_VLAN_ADD,
5453 memcpy(desc.data, req,
5454 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5455 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5456 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5457 retval = le16_to_cpu(desc.retval);
5459 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5461 HCLGE_MAC_VLAN_ADD);
5463 hclge_cmd_reuse_desc(&mc_desc[0], false);
5464 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5465 hclge_cmd_reuse_desc(&mc_desc[1], false);
5466 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5467 hclge_cmd_reuse_desc(&mc_desc[2], false);
5468 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
5469 memcpy(mc_desc[0].data, req,
5470 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5471 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
5472 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
5473 retval = le16_to_cpu(mc_desc[0].retval);
5475 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5477 HCLGE_MAC_VLAN_ADD);
5481 dev_err(&hdev->pdev->dev,
5482 "add mac addr failed for cmd_send, ret =%d.\n",
5490 static int hclge_init_umv_space(struct hclge_dev *hdev)
5492 u16 allocated_size = 0;
5495 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
5500 if (allocated_size < hdev->wanted_umv_size)
5501 dev_warn(&hdev->pdev->dev,
5502 "Alloc umv space failed, want %d, get %d\n",
5503 hdev->wanted_umv_size, allocated_size);
5505 mutex_init(&hdev->umv_mutex);
5506 hdev->max_umv_size = allocated_size;
5507 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
5508 hdev->share_umv_size = hdev->priv_umv_size +
5509 hdev->max_umv_size % (hdev->num_req_vfs + 2);
5514 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
5518 if (hdev->max_umv_size > 0) {
5519 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
5523 hdev->max_umv_size = 0;
5525 mutex_destroy(&hdev->umv_mutex);
5530 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
5531 u16 *allocated_size, bool is_alloc)
5533 struct hclge_umv_spc_alc_cmd *req;
5534 struct hclge_desc desc;
5537 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
5538 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
5539 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
5540 req->space_size = cpu_to_le32(space_size);
5542 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5544 dev_err(&hdev->pdev->dev,
5545 "%s umv space failed for cmd_send, ret =%d\n",
5546 is_alloc ? "allocate" : "free", ret);
5550 if (is_alloc && allocated_size)
5551 *allocated_size = le32_to_cpu(desc.data[1]);
5556 static void hclge_reset_umv_space(struct hclge_dev *hdev)
5558 struct hclge_vport *vport;
5561 for (i = 0; i < hdev->num_alloc_vport; i++) {
5562 vport = &hdev->vport[i];
5563 vport->used_umv_num = 0;
5566 mutex_lock(&hdev->umv_mutex);
5567 hdev->share_umv_size = hdev->priv_umv_size +
5568 hdev->max_umv_size % (hdev->num_req_vfs + 2);
5569 mutex_unlock(&hdev->umv_mutex);
5572 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
5574 struct hclge_dev *hdev = vport->back;
5577 mutex_lock(&hdev->umv_mutex);
5578 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
5579 hdev->share_umv_size == 0);
5580 mutex_unlock(&hdev->umv_mutex);
5585 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
5587 struct hclge_dev *hdev = vport->back;
5589 mutex_lock(&hdev->umv_mutex);
5591 if (vport->used_umv_num > hdev->priv_umv_size)
5592 hdev->share_umv_size++;
5593 vport->used_umv_num--;
5595 if (vport->used_umv_num >= hdev->priv_umv_size)
5596 hdev->share_umv_size--;
5597 vport->used_umv_num++;
5599 mutex_unlock(&hdev->umv_mutex);
5602 static int hclge_add_uc_addr(struct hnae3_handle *handle,
5603 const unsigned char *addr)
5605 struct hclge_vport *vport = hclge_get_vport(handle);
5607 return hclge_add_uc_addr_common(vport, addr);
5610 int hclge_add_uc_addr_common(struct hclge_vport *vport,
5611 const unsigned char *addr)
5613 struct hclge_dev *hdev = vport->back;
5614 struct hclge_mac_vlan_tbl_entry_cmd req;
5615 struct hclge_desc desc;
5616 u16 egress_port = 0;
5619 /* mac addr check */
5620 if (is_zero_ether_addr(addr) ||
5621 is_broadcast_ether_addr(addr) ||
5622 is_multicast_ether_addr(addr)) {
5623 dev_err(&hdev->pdev->dev,
5624 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
5626 is_zero_ether_addr(addr),
5627 is_broadcast_ether_addr(addr),
5628 is_multicast_ether_addr(addr));
5632 memset(&req, 0, sizeof(req));
5633 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5635 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
5636 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
5638 req.egress_port = cpu_to_le16(egress_port);
5640 hclge_prepare_mac_addr(&req, addr);
5642 /* Lookup the mac address in the mac_vlan table, and add
5643 * it if the entry is inexistent. Repeated unicast entry
5644 * is not allowed in the mac vlan table.
5646 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
5647 if (ret == -ENOENT) {
5648 if (!hclge_is_umv_space_full(vport)) {
5649 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
5651 hclge_update_umv_space(vport, false);
5655 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
5656 hdev->priv_umv_size);
5661 /* check if we just hit the duplicate */
5665 dev_err(&hdev->pdev->dev,
5666 "PF failed to add unicast entry(%pM) in the MAC table\n",
5672 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
5673 const unsigned char *addr)
5675 struct hclge_vport *vport = hclge_get_vport(handle);
5677 return hclge_rm_uc_addr_common(vport, addr);
5680 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
5681 const unsigned char *addr)
5683 struct hclge_dev *hdev = vport->back;
5684 struct hclge_mac_vlan_tbl_entry_cmd req;
5687 /* mac addr check */
5688 if (is_zero_ether_addr(addr) ||
5689 is_broadcast_ether_addr(addr) ||
5690 is_multicast_ether_addr(addr)) {
5691 dev_dbg(&hdev->pdev->dev,
5692 "Remove mac err! invalid mac:%pM.\n",
5697 memset(&req, 0, sizeof(req));
5698 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5699 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
5700 hclge_prepare_mac_addr(&req, addr);
5701 ret = hclge_remove_mac_vlan_tbl(vport, &req);
5703 hclge_update_umv_space(vport, true);
5708 static int hclge_add_mc_addr(struct hnae3_handle *handle,
5709 const unsigned char *addr)
5711 struct hclge_vport *vport = hclge_get_vport(handle);
5713 return hclge_add_mc_addr_common(vport, addr);
5716 int hclge_add_mc_addr_common(struct hclge_vport *vport,
5717 const unsigned char *addr)
5719 struct hclge_dev *hdev = vport->back;
5720 struct hclge_mac_vlan_tbl_entry_cmd req;
5721 struct hclge_desc desc[3];
5724 /* mac addr check */
5725 if (!is_multicast_ether_addr(addr)) {
5726 dev_err(&hdev->pdev->dev,
5727 "Add mc mac err! invalid mac:%pM.\n",
5731 memset(&req, 0, sizeof(req));
5732 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5733 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
5734 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
5735 hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5736 hclge_prepare_mac_addr(&req, addr);
5737 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
5739 /* This mac addr exist, update VFID for it */
5740 hclge_update_desc_vfid(desc, vport->vport_id, false);
5741 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
5743 /* This mac addr do not exist, add new entry for it */
5744 memset(desc[0].data, 0, sizeof(desc[0].data));
5745 memset(desc[1].data, 0, sizeof(desc[0].data));
5746 memset(desc[2].data, 0, sizeof(desc[0].data));
5747 hclge_update_desc_vfid(desc, vport->vport_id, false);
5748 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
5751 if (status == -ENOSPC)
5752 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
5757 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
5758 const unsigned char *addr)
5760 struct hclge_vport *vport = hclge_get_vport(handle);
5762 return hclge_rm_mc_addr_common(vport, addr);
5765 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
5766 const unsigned char *addr)
5768 struct hclge_dev *hdev = vport->back;
5769 struct hclge_mac_vlan_tbl_entry_cmd req;
5770 enum hclge_cmd_status status;
5771 struct hclge_desc desc[3];
5773 /* mac addr check */
5774 if (!is_multicast_ether_addr(addr)) {
5775 dev_dbg(&hdev->pdev->dev,
5776 "Remove mc mac err! invalid mac:%pM.\n",
5781 memset(&req, 0, sizeof(req));
5782 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5783 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
5784 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
5785 hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5786 hclge_prepare_mac_addr(&req, addr);
5787 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
5789 /* This mac addr exist, remove this handle's VFID for it */
5790 hclge_update_desc_vfid(desc, vport->vport_id, true);
5792 if (hclge_is_all_function_id_zero(desc))
5793 /* All the vfid is zero, so need to delete this entry */
5794 status = hclge_remove_mac_vlan_tbl(vport, &req);
5796 /* Not all the vfid is zero, update the vfid */
5797 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
5800 /* Maybe this mac address is in mta table, but it cannot be
5801 * deleted here because an entry of mta represents an address
5802 * range rather than a specific address. the delete action to
5803 * all entries will take effect in update_mta_status called by
5804 * hns3_nic_set_rx_mode.
5812 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
5813 u16 cmdq_resp, u8 resp_code)
5815 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
5816 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
5817 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
5818 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
5823 dev_err(&hdev->pdev->dev,
5824 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
5829 switch (resp_code) {
5830 case HCLGE_ETHERTYPE_SUCCESS_ADD:
5831 case HCLGE_ETHERTYPE_ALREADY_ADD:
5834 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
5835 dev_err(&hdev->pdev->dev,
5836 "add mac ethertype failed for manager table overflow.\n");
5837 return_status = -EIO;
5839 case HCLGE_ETHERTYPE_KEY_CONFLICT:
5840 dev_err(&hdev->pdev->dev,
5841 "add mac ethertype failed for key conflict.\n");
5842 return_status = -EIO;
5845 dev_err(&hdev->pdev->dev,
5846 "add mac ethertype failed for undefined, code=%d.\n",
5848 return_status = -EIO;
5851 return return_status;
5854 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
5855 const struct hclge_mac_mgr_tbl_entry_cmd *req)
5857 struct hclge_desc desc;
5862 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
5863 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
5865 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5867 dev_err(&hdev->pdev->dev,
5868 "add mac ethertype failed for cmd_send, ret =%d.\n",
5873 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5874 retval = le16_to_cpu(desc.retval);
5876 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
5879 static int init_mgr_tbl(struct hclge_dev *hdev)
5884 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
5885 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
5887 dev_err(&hdev->pdev->dev,
5888 "add mac ethertype failed, ret =%d.\n",
5897 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
5899 struct hclge_vport *vport = hclge_get_vport(handle);
5900 struct hclge_dev *hdev = vport->back;
5902 ether_addr_copy(p, hdev->hw.mac.mac_addr);
5905 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
5908 const unsigned char *new_addr = (const unsigned char *)p;
5909 struct hclge_vport *vport = hclge_get_vport(handle);
5910 struct hclge_dev *hdev = vport->back;
5913 /* mac addr check */
5914 if (is_zero_ether_addr(new_addr) ||
5915 is_broadcast_ether_addr(new_addr) ||
5916 is_multicast_ether_addr(new_addr)) {
5917 dev_err(&hdev->pdev->dev,
5918 "Change uc mac err! invalid mac:%p.\n",
5923 if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
5924 dev_warn(&hdev->pdev->dev,
5925 "remove old uc mac address fail.\n");
5927 ret = hclge_add_uc_addr(handle, new_addr);
5929 dev_err(&hdev->pdev->dev,
5930 "add uc mac address fail, ret =%d.\n",
5934 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
5935 dev_err(&hdev->pdev->dev,
5936 "restore uc mac address fail.\n");
5941 ret = hclge_pause_addr_cfg(hdev, new_addr);
5943 dev_err(&hdev->pdev->dev,
5944 "configure mac pause address fail, ret =%d.\n",
5949 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
5954 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
5957 struct hclge_vport *vport = hclge_get_vport(handle);
5958 struct hclge_dev *hdev = vport->back;
5960 if (!hdev->hw.mac.phydev)
5963 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
5966 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
5967 u8 fe_type, bool filter_en)
5969 struct hclge_vlan_filter_ctrl_cmd *req;
5970 struct hclge_desc desc;
5973 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
5975 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
5976 req->vlan_type = vlan_type;
5977 req->vlan_fe = filter_en ? fe_type : 0;
5979 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5981 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
5987 #define HCLGE_FILTER_TYPE_VF 0
5988 #define HCLGE_FILTER_TYPE_PORT 1
5989 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
5990 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
5991 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
5992 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
5993 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
5994 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
5995 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
5996 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
5997 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
5999 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
6001 struct hclge_vport *vport = hclge_get_vport(handle);
6002 struct hclge_dev *hdev = vport->back;
6004 if (hdev->pdev->revision >= 0x21) {
6005 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6006 HCLGE_FILTER_FE_EGRESS, enable);
6007 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6008 HCLGE_FILTER_FE_INGRESS, enable);
6010 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6011 HCLGE_FILTER_FE_EGRESS_V1_B, enable);
6014 handle->netdev_flags |= HNAE3_VLAN_FLTR;
6016 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
6019 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
6020 bool is_kill, u16 vlan, u8 qos,
6023 #define HCLGE_MAX_VF_BYTES 16
6024 struct hclge_vlan_filter_vf_cfg_cmd *req0;
6025 struct hclge_vlan_filter_vf_cfg_cmd *req1;
6026 struct hclge_desc desc[2];
6031 hclge_cmd_setup_basic_desc(&desc[0],
6032 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6033 hclge_cmd_setup_basic_desc(&desc[1],
6034 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6036 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6038 vf_byte_off = vfid / 8;
6039 vf_byte_val = 1 << (vfid % 8);
6041 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
6042 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
6044 req0->vlan_id = cpu_to_le16(vlan);
6045 req0->vlan_cfg = is_kill;
6047 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
6048 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
6050 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
6052 ret = hclge_cmd_send(&hdev->hw, desc, 2);
6054 dev_err(&hdev->pdev->dev,
6055 "Send vf vlan command fail, ret =%d.\n",
6061 #define HCLGE_VF_VLAN_NO_ENTRY 2
6062 if (!req0->resp_code || req0->resp_code == 1)
6065 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
6066 dev_warn(&hdev->pdev->dev,
6067 "vf vlan table is full, vf vlan filter is disabled\n");
6071 dev_err(&hdev->pdev->dev,
6072 "Add vf vlan filter fail, ret =%d.\n",
6075 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
6076 if (!req0->resp_code)
6079 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
6080 dev_warn(&hdev->pdev->dev,
6081 "vlan %d filter is not in vf vlan table\n",
6086 dev_err(&hdev->pdev->dev,
6087 "Kill vf vlan filter fail, ret =%d.\n",
6094 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
6095 u16 vlan_id, bool is_kill)
6097 struct hclge_vlan_filter_pf_cfg_cmd *req;
6098 struct hclge_desc desc;
6099 u8 vlan_offset_byte_val;
6100 u8 vlan_offset_byte;
6104 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
6106 vlan_offset_160 = vlan_id / 160;
6107 vlan_offset_byte = (vlan_id % 160) / 8;
6108 vlan_offset_byte_val = 1 << (vlan_id % 8);
6110 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
6111 req->vlan_offset = vlan_offset_160;
6112 req->vlan_cfg = is_kill;
6113 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
6115 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6117 dev_err(&hdev->pdev->dev,
6118 "port vlan command, send fail, ret =%d.\n", ret);
6122 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
6123 u16 vport_id, u16 vlan_id, u8 qos,
6126 u16 vport_idx, vport_num = 0;
6129 if (is_kill && !vlan_id)
6132 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
6135 dev_err(&hdev->pdev->dev,
6136 "Set %d vport vlan filter config fail, ret =%d.\n",
6141 /* vlan 0 may be added twice when 8021q module is enabled */
6142 if (!is_kill && !vlan_id &&
6143 test_bit(vport_id, hdev->vlan_table[vlan_id]))
6146 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
6147 dev_err(&hdev->pdev->dev,
6148 "Add port vlan failed, vport %d is already in vlan %d\n",
6154 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
6155 dev_err(&hdev->pdev->dev,
6156 "Delete port vlan failed, vport %d is not in vlan %d\n",
6161 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
6164 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
6165 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
6171 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
6172 u16 vlan_id, bool is_kill)
6174 struct hclge_vport *vport = hclge_get_vport(handle);
6175 struct hclge_dev *hdev = vport->back;
6177 return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id,
6181 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
6182 u16 vlan, u8 qos, __be16 proto)
6184 struct hclge_vport *vport = hclge_get_vport(handle);
6185 struct hclge_dev *hdev = vport->back;
6187 if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7))
6189 if (proto != htons(ETH_P_8021Q))
6190 return -EPROTONOSUPPORT;
6192 return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false);
6195 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
6197 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
6198 struct hclge_vport_vtag_tx_cfg_cmd *req;
6199 struct hclge_dev *hdev = vport->back;
6200 struct hclge_desc desc;
6203 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
6205 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
6206 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
6207 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
6208 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
6209 vcfg->accept_tag1 ? 1 : 0);
6210 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
6211 vcfg->accept_untag1 ? 1 : 0);
6212 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
6213 vcfg->accept_tag2 ? 1 : 0);
6214 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
6215 vcfg->accept_untag2 ? 1 : 0);
6216 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
6217 vcfg->insert_tag1_en ? 1 : 0);
6218 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
6219 vcfg->insert_tag2_en ? 1 : 0);
6220 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
6222 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6223 req->vf_bitmap[req->vf_offset] =
6224 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6226 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6228 dev_err(&hdev->pdev->dev,
6229 "Send port txvlan cfg command fail, ret =%d\n",
6235 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
6237 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
6238 struct hclge_vport_vtag_rx_cfg_cmd *req;
6239 struct hclge_dev *hdev = vport->back;
6240 struct hclge_desc desc;
6243 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
6245 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
6246 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
6247 vcfg->strip_tag1_en ? 1 : 0);
6248 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
6249 vcfg->strip_tag2_en ? 1 : 0);
6250 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
6251 vcfg->vlan1_vlan_prionly ? 1 : 0);
6252 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
6253 vcfg->vlan2_vlan_prionly ? 1 : 0);
6255 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6256 req->vf_bitmap[req->vf_offset] =
6257 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6259 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6261 dev_err(&hdev->pdev->dev,
6262 "Send port rxvlan cfg command fail, ret =%d\n",
6268 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
6270 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
6271 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
6272 struct hclge_desc desc;
6275 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
6276 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
6277 rx_req->ot_fst_vlan_type =
6278 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
6279 rx_req->ot_sec_vlan_type =
6280 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
6281 rx_req->in_fst_vlan_type =
6282 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
6283 rx_req->in_sec_vlan_type =
6284 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
6286 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6288 dev_err(&hdev->pdev->dev,
6289 "Send rxvlan protocol type command fail, ret =%d\n",
6294 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
6296 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
6297 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
6298 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
6300 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6302 dev_err(&hdev->pdev->dev,
6303 "Send txvlan protocol type command fail, ret =%d\n",
6309 static int hclge_init_vlan_config(struct hclge_dev *hdev)
6311 #define HCLGE_DEF_VLAN_TYPE 0x8100
6313 struct hnae3_handle *handle = &hdev->vport[0].nic;
6314 struct hclge_vport *vport;
6318 if (hdev->pdev->revision >= 0x21) {
6319 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6320 HCLGE_FILTER_FE_EGRESS, true);
6324 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6325 HCLGE_FILTER_FE_INGRESS, true);
6329 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6330 HCLGE_FILTER_FE_EGRESS_V1_B,
6336 handle->netdev_flags |= HNAE3_VLAN_FLTR;
6338 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6339 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6340 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6341 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6342 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
6343 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
6345 ret = hclge_set_vlan_protocol_type(hdev);
6349 for (i = 0; i < hdev->num_alloc_vport; i++) {
6350 vport = &hdev->vport[i];
6351 vport->txvlan_cfg.accept_tag1 = true;
6352 vport->txvlan_cfg.accept_untag1 = true;
6354 /* accept_tag2 and accept_untag2 are not supported on
6355 * pdev revision(0x20), new revision support them. The
6356 * value of this two fields will not return error when driver
6357 * send command to fireware in revision(0x20).
6358 * This two fields can not configured by user.
6360 vport->txvlan_cfg.accept_tag2 = true;
6361 vport->txvlan_cfg.accept_untag2 = true;
6363 vport->txvlan_cfg.insert_tag1_en = false;
6364 vport->txvlan_cfg.insert_tag2_en = false;
6365 vport->txvlan_cfg.default_tag1 = 0;
6366 vport->txvlan_cfg.default_tag2 = 0;
6368 ret = hclge_set_vlan_tx_offload_cfg(vport);
6372 vport->rxvlan_cfg.strip_tag1_en = false;
6373 vport->rxvlan_cfg.strip_tag2_en = true;
6374 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6375 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6377 ret = hclge_set_vlan_rx_offload_cfg(vport);
6382 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
6385 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
6387 struct hclge_vport *vport = hclge_get_vport(handle);
6389 vport->rxvlan_cfg.strip_tag1_en = false;
6390 vport->rxvlan_cfg.strip_tag2_en = enable;
6391 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6392 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6394 return hclge_set_vlan_rx_offload_cfg(vport);
6397 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
6399 struct hclge_config_max_frm_size_cmd *req;
6400 struct hclge_desc desc;
6402 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
6404 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
6405 req->max_frm_size = cpu_to_le16(new_mps);
6406 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
6408 return hclge_cmd_send(&hdev->hw, &desc, 1);
6411 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
6413 struct hclge_vport *vport = hclge_get_vport(handle);
6415 return hclge_set_vport_mtu(vport, new_mtu);
6418 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
6420 struct hclge_dev *hdev = vport->back;
6421 int i, max_frm_size, ret = 0;
6423 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
6424 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
6425 max_frm_size > HCLGE_MAC_MAX_FRAME)
6428 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
6429 mutex_lock(&hdev->vport_lock);
6430 /* VF's mps must fit within hdev->mps */
6431 if (vport->vport_id && max_frm_size > hdev->mps) {
6432 mutex_unlock(&hdev->vport_lock);
6434 } else if (vport->vport_id) {
6435 vport->mps = max_frm_size;
6436 mutex_unlock(&hdev->vport_lock);
6440 /* PF's mps must be greater then VF's mps */
6441 for (i = 1; i < hdev->num_alloc_vport; i++)
6442 if (max_frm_size < hdev->vport[i].mps) {
6443 mutex_unlock(&hdev->vport_lock);
6447 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
6449 ret = hclge_set_mac_mtu(hdev, max_frm_size);
6451 dev_err(&hdev->pdev->dev,
6452 "Change mtu fail, ret =%d\n", ret);
6456 hdev->mps = max_frm_size;
6457 vport->mps = max_frm_size;
6459 ret = hclge_buffer_alloc(hdev);
6461 dev_err(&hdev->pdev->dev,
6462 "Allocate buffer fail, ret =%d\n", ret);
6465 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
6466 mutex_unlock(&hdev->vport_lock);
6470 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
6473 struct hclge_reset_tqp_queue_cmd *req;
6474 struct hclge_desc desc;
6477 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
6479 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
6480 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
6481 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
6483 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6485 dev_err(&hdev->pdev->dev,
6486 "Send tqp reset cmd error, status =%d\n", ret);
6493 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
6495 struct hclge_reset_tqp_queue_cmd *req;
6496 struct hclge_desc desc;
6499 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
6501 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
6502 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
6504 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6506 dev_err(&hdev->pdev->dev,
6507 "Get reset status error, status =%d\n", ret);
6511 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
6514 static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle,
6517 struct hnae3_queue *queue;
6518 struct hclge_tqp *tqp;
6520 queue = handle->kinfo.tqp[queue_id];
6521 tqp = container_of(queue, struct hclge_tqp, q);
6526 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
6528 struct hclge_vport *vport = hclge_get_vport(handle);
6529 struct hclge_dev *hdev = vport->back;
6530 int reset_try_times = 0;
6535 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
6537 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
6539 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
6543 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
6545 dev_err(&hdev->pdev->dev,
6546 "Send reset tqp cmd fail, ret = %d\n", ret);
6550 reset_try_times = 0;
6551 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
6552 /* Wait for tqp hw reset */
6554 reset_status = hclge_get_reset_status(hdev, queue_gid);
6559 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
6560 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
6564 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
6566 dev_err(&hdev->pdev->dev,
6567 "Deassert the soft reset fail, ret = %d\n", ret);
6572 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
6574 struct hclge_dev *hdev = vport->back;
6575 int reset_try_times = 0;
6580 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
6582 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
6584 dev_warn(&hdev->pdev->dev,
6585 "Send reset tqp cmd fail, ret = %d\n", ret);
6589 reset_try_times = 0;
6590 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
6591 /* Wait for tqp hw reset */
6593 reset_status = hclge_get_reset_status(hdev, queue_gid);
6598 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
6599 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
6603 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
6605 dev_warn(&hdev->pdev->dev,
6606 "Deassert the soft reset fail, ret = %d\n", ret);
6609 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
6611 struct hclge_vport *vport = hclge_get_vport(handle);
6612 struct hclge_dev *hdev = vport->back;
6614 return hdev->fw_version;
6617 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
6619 struct phy_device *phydev = hdev->hw.mac.phydev;
6624 phy_set_asym_pause(phydev, rx_en, tx_en);
6627 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
6632 hdev->fc_mode_last_time = HCLGE_FC_FULL;
6633 else if (rx_en && !tx_en)
6634 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
6635 else if (!rx_en && tx_en)
6636 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
6638 hdev->fc_mode_last_time = HCLGE_FC_NONE;
6640 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
6643 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
6645 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
6650 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
6655 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
6657 struct phy_device *phydev = hdev->hw.mac.phydev;
6658 u16 remote_advertising = 0;
6659 u16 local_advertising = 0;
6660 u32 rx_pause, tx_pause;
6663 if (!phydev->link || !phydev->autoneg)
6666 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
6669 remote_advertising = LPA_PAUSE_CAP;
6671 if (phydev->asym_pause)
6672 remote_advertising |= LPA_PAUSE_ASYM;
6674 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
6675 remote_advertising);
6676 tx_pause = flowctl & FLOW_CTRL_TX;
6677 rx_pause = flowctl & FLOW_CTRL_RX;
6679 if (phydev->duplex == HCLGE_MAC_HALF) {
6684 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
6687 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
6688 u32 *rx_en, u32 *tx_en)
6690 struct hclge_vport *vport = hclge_get_vport(handle);
6691 struct hclge_dev *hdev = vport->back;
6693 *auto_neg = hclge_get_autoneg(handle);
6695 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
6701 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
6704 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
6707 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
6716 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
6717 u32 rx_en, u32 tx_en)
6719 struct hclge_vport *vport = hclge_get_vport(handle);
6720 struct hclge_dev *hdev = vport->back;
6721 struct phy_device *phydev = hdev->hw.mac.phydev;
6724 fc_autoneg = hclge_get_autoneg(handle);
6725 if (auto_neg != fc_autoneg) {
6726 dev_info(&hdev->pdev->dev,
6727 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
6731 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
6732 dev_info(&hdev->pdev->dev,
6733 "Priority flow control enabled. Cannot set link flow control.\n");
6737 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
6740 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
6742 /* Only support flow control negotiation for netdev with
6743 * phy attached for now.
6748 return phy_start_aneg(phydev);
6751 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
6752 u8 *auto_neg, u32 *speed, u8 *duplex)
6754 struct hclge_vport *vport = hclge_get_vport(handle);
6755 struct hclge_dev *hdev = vport->back;
6758 *speed = hdev->hw.mac.speed;
6760 *duplex = hdev->hw.mac.duplex;
6762 *auto_neg = hdev->hw.mac.autoneg;
6765 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
6767 struct hclge_vport *vport = hclge_get_vport(handle);
6768 struct hclge_dev *hdev = vport->back;
6771 *media_type = hdev->hw.mac.media_type;
6774 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
6775 u8 *tp_mdix_ctrl, u8 *tp_mdix)
6777 struct hclge_vport *vport = hclge_get_vport(handle);
6778 struct hclge_dev *hdev = vport->back;
6779 struct phy_device *phydev = hdev->hw.mac.phydev;
6780 int mdix_ctrl, mdix, retval, is_resolved;
6783 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
6784 *tp_mdix = ETH_TP_MDI_INVALID;
6788 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
6790 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
6791 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
6792 HCLGE_PHY_MDIX_CTRL_S);
6794 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
6795 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
6796 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
6798 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
6800 switch (mdix_ctrl) {
6802 *tp_mdix_ctrl = ETH_TP_MDI;
6805 *tp_mdix_ctrl = ETH_TP_MDI_X;
6808 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
6811 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
6816 *tp_mdix = ETH_TP_MDI_INVALID;
6818 *tp_mdix = ETH_TP_MDI_X;
6820 *tp_mdix = ETH_TP_MDI;
6823 static int hclge_init_instance_hw(struct hclge_dev *hdev)
6825 return hclge_mac_connect_phy(hdev);
6828 static void hclge_uninit_instance_hw(struct hclge_dev *hdev)
6830 hclge_mac_disconnect_phy(hdev);
6833 static int hclge_init_client_instance(struct hnae3_client *client,
6834 struct hnae3_ae_dev *ae_dev)
6836 struct hclge_dev *hdev = ae_dev->priv;
6837 struct hclge_vport *vport;
6840 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
6841 vport = &hdev->vport[i];
6843 switch (client->type) {
6844 case HNAE3_CLIENT_KNIC:
6846 hdev->nic_client = client;
6847 vport->nic.client = client;
6848 ret = client->ops->init_instance(&vport->nic);
6852 ret = hclge_init_instance_hw(hdev);
6854 client->ops->uninit_instance(&vport->nic,
6859 hnae3_set_client_init_flag(client, ae_dev, 1);
6861 if (hdev->roce_client &&
6862 hnae3_dev_roce_supported(hdev)) {
6863 struct hnae3_client *rc = hdev->roce_client;
6865 ret = hclge_init_roce_base_info(vport);
6869 ret = rc->ops->init_instance(&vport->roce);
6873 hnae3_set_client_init_flag(hdev->roce_client,
6878 case HNAE3_CLIENT_UNIC:
6879 hdev->nic_client = client;
6880 vport->nic.client = client;
6882 ret = client->ops->init_instance(&vport->nic);
6886 hnae3_set_client_init_flag(client, ae_dev, 1);
6889 case HNAE3_CLIENT_ROCE:
6890 if (hnae3_dev_roce_supported(hdev)) {
6891 hdev->roce_client = client;
6892 vport->roce.client = client;
6895 if (hdev->roce_client && hdev->nic_client) {
6896 ret = hclge_init_roce_base_info(vport);
6900 ret = client->ops->init_instance(&vport->roce);
6904 hnae3_set_client_init_flag(client, ae_dev, 1);
6916 hdev->nic_client = NULL;
6917 vport->nic.client = NULL;
6920 hdev->roce_client = NULL;
6921 vport->roce.client = NULL;
6925 static void hclge_uninit_client_instance(struct hnae3_client *client,
6926 struct hnae3_ae_dev *ae_dev)
6928 struct hclge_dev *hdev = ae_dev->priv;
6929 struct hclge_vport *vport;
6932 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
6933 vport = &hdev->vport[i];
6934 if (hdev->roce_client) {
6935 hdev->roce_client->ops->uninit_instance(&vport->roce,
6937 hdev->roce_client = NULL;
6938 vport->roce.client = NULL;
6940 if (client->type == HNAE3_CLIENT_ROCE)
6942 if (hdev->nic_client && client->ops->uninit_instance) {
6943 hclge_uninit_instance_hw(hdev);
6944 client->ops->uninit_instance(&vport->nic, 0);
6945 hdev->nic_client = NULL;
6946 vport->nic.client = NULL;
6951 static int hclge_pci_init(struct hclge_dev *hdev)
6953 struct pci_dev *pdev = hdev->pdev;
6954 struct hclge_hw *hw;
6957 ret = pci_enable_device(pdev);
6959 dev_err(&pdev->dev, "failed to enable PCI device\n");
6963 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6965 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6968 "can't set consistent PCI DMA");
6969 goto err_disable_device;
6971 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
6974 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
6976 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
6977 goto err_disable_device;
6980 pci_set_master(pdev);
6982 hw->io_base = pcim_iomap(pdev, 2, 0);
6984 dev_err(&pdev->dev, "Can't map configuration register space\n");
6986 goto err_clr_master;
6989 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
6993 pci_clear_master(pdev);
6994 pci_release_regions(pdev);
6996 pci_disable_device(pdev);
7001 static void hclge_pci_uninit(struct hclge_dev *hdev)
7003 struct pci_dev *pdev = hdev->pdev;
7005 pcim_iounmap(pdev, hdev->hw.io_base);
7006 pci_free_irq_vectors(pdev);
7007 pci_clear_master(pdev);
7008 pci_release_mem_regions(pdev);
7009 pci_disable_device(pdev);
7012 static void hclge_state_init(struct hclge_dev *hdev)
7014 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
7015 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7016 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
7017 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7018 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
7019 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
7022 static void hclge_state_uninit(struct hclge_dev *hdev)
7024 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7026 if (hdev->service_timer.function)
7027 del_timer_sync(&hdev->service_timer);
7028 if (hdev->reset_timer.function)
7029 del_timer_sync(&hdev->reset_timer);
7030 if (hdev->service_task.func)
7031 cancel_work_sync(&hdev->service_task);
7032 if (hdev->rst_service_task.func)
7033 cancel_work_sync(&hdev->rst_service_task);
7034 if (hdev->mbx_service_task.func)
7035 cancel_work_sync(&hdev->mbx_service_task);
7038 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
7040 #define HCLGE_FLR_WAIT_MS 100
7041 #define HCLGE_FLR_WAIT_CNT 50
7042 struct hclge_dev *hdev = ae_dev->priv;
7045 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
7046 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7047 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
7048 hclge_reset_event(hdev->pdev, NULL);
7050 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
7051 cnt++ < HCLGE_FLR_WAIT_CNT)
7052 msleep(HCLGE_FLR_WAIT_MS);
7054 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
7055 dev_err(&hdev->pdev->dev,
7056 "flr wait down timeout: %d\n", cnt);
7059 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
7061 struct hclge_dev *hdev = ae_dev->priv;
7063 set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7066 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
7068 struct pci_dev *pdev = ae_dev->pdev;
7069 struct hclge_dev *hdev;
7072 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
7079 hdev->ae_dev = ae_dev;
7080 hdev->reset_type = HNAE3_NONE_RESET;
7081 hdev->reset_level = HNAE3_FUNC_RESET;
7082 ae_dev->priv = hdev;
7083 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7085 mutex_init(&hdev->vport_lock);
7087 ret = hclge_pci_init(hdev);
7089 dev_err(&pdev->dev, "PCI init failed\n");
7093 /* Firmware command queue initialize */
7094 ret = hclge_cmd_queue_init(hdev);
7096 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
7097 goto err_pci_uninit;
7100 /* Firmware command initialize */
7101 ret = hclge_cmd_init(hdev);
7103 goto err_cmd_uninit;
7105 ret = hclge_get_cap(hdev);
7107 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
7109 goto err_cmd_uninit;
7112 ret = hclge_configure(hdev);
7114 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
7115 goto err_cmd_uninit;
7118 ret = hclge_init_msi(hdev);
7120 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
7121 goto err_cmd_uninit;
7124 ret = hclge_misc_irq_init(hdev);
7127 "Misc IRQ(vector0) init error, ret = %d.\n",
7129 goto err_msi_uninit;
7132 ret = hclge_alloc_tqps(hdev);
7134 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
7135 goto err_msi_irq_uninit;
7138 ret = hclge_alloc_vport(hdev);
7140 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
7141 goto err_msi_irq_uninit;
7144 ret = hclge_map_tqp(hdev);
7146 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
7147 goto err_msi_irq_uninit;
7150 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
7151 ret = hclge_mac_mdio_config(hdev);
7153 dev_err(&hdev->pdev->dev,
7154 "mdio config fail ret=%d\n", ret);
7155 goto err_msi_irq_uninit;
7159 ret = hclge_init_umv_space(hdev);
7161 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
7162 goto err_msi_irq_uninit;
7165 ret = hclge_mac_init(hdev);
7167 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
7168 goto err_mdiobus_unreg;
7171 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7173 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
7174 goto err_mdiobus_unreg;
7177 ret = hclge_config_gro(hdev, true);
7179 goto err_mdiobus_unreg;
7181 ret = hclge_init_vlan_config(hdev);
7183 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
7184 goto err_mdiobus_unreg;
7187 ret = hclge_tm_schd_init(hdev);
7189 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
7190 goto err_mdiobus_unreg;
7193 hclge_rss_init_cfg(hdev);
7194 ret = hclge_rss_init_hw(hdev);
7196 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
7197 goto err_mdiobus_unreg;
7200 ret = init_mgr_tbl(hdev);
7202 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
7203 goto err_mdiobus_unreg;
7206 ret = hclge_init_fd_config(hdev);
7209 "fd table init fail, ret=%d\n", ret);
7210 goto err_mdiobus_unreg;
7213 ret = hclge_hw_error_set_state(hdev, true);
7216 "hw error interrupts enable failed, ret =%d\n", ret);
7217 goto err_mdiobus_unreg;
7220 hclge_dcb_ops_set(hdev);
7222 timer_setup(&hdev->service_timer, hclge_service_timer, 0);
7223 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
7224 INIT_WORK(&hdev->service_task, hclge_service_task);
7225 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
7226 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
7228 hclge_clear_all_event_cause(hdev);
7230 /* Enable MISC vector(vector0) */
7231 hclge_enable_vector(&hdev->misc_vector, true);
7233 hclge_state_init(hdev);
7234 hdev->last_reset_time = jiffies;
7236 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
7240 if (hdev->hw.mac.phydev)
7241 mdiobus_unregister(hdev->hw.mac.mdio_bus);
7243 hclge_misc_irq_uninit(hdev);
7245 pci_free_irq_vectors(pdev);
7247 hclge_destroy_cmd_queue(&hdev->hw);
7249 pcim_iounmap(pdev, hdev->hw.io_base);
7250 pci_clear_master(pdev);
7251 pci_release_regions(pdev);
7252 pci_disable_device(pdev);
7257 static void hclge_stats_clear(struct hclge_dev *hdev)
7259 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
7262 static void hclge_reset_vport_state(struct hclge_dev *hdev)
7264 struct hclge_vport *vport = hdev->vport;
7267 for (i = 0; i < hdev->num_alloc_vport; i++) {
7268 hclge_vport_start(vport);
7273 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
7275 struct hclge_dev *hdev = ae_dev->priv;
7276 struct pci_dev *pdev = ae_dev->pdev;
7279 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7281 hclge_stats_clear(hdev);
7282 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
7284 ret = hclge_cmd_init(hdev);
7286 dev_err(&pdev->dev, "Cmd queue init failed\n");
7290 ret = hclge_get_cap(hdev);
7292 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
7297 ret = hclge_configure(hdev);
7299 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
7303 ret = hclge_map_tqp(hdev);
7305 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
7309 hclge_reset_umv_space(hdev);
7311 ret = hclge_mac_init(hdev);
7313 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
7317 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7319 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
7323 ret = hclge_config_gro(hdev, true);
7327 ret = hclge_init_vlan_config(hdev);
7329 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
7333 ret = hclge_tm_init_hw(hdev);
7335 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
7339 ret = hclge_rss_init_hw(hdev);
7341 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
7345 ret = hclge_init_fd_config(hdev);
7348 "fd table init fail, ret=%d\n", ret);
7352 /* Re-enable the TM hw error interrupts because
7353 * they get disabled on core/global reset.
7355 if (hclge_enable_tm_hw_error(hdev, true))
7356 dev_err(&pdev->dev, "failed to enable TM hw error interrupts\n");
7358 hclge_reset_vport_state(hdev);
7360 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
7366 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
7368 struct hclge_dev *hdev = ae_dev->priv;
7369 struct hclge_mac *mac = &hdev->hw.mac;
7371 hclge_state_uninit(hdev);
7374 mdiobus_unregister(mac->mdio_bus);
7376 hclge_uninit_umv_space(hdev);
7378 /* Disable MISC vector(vector0) */
7379 hclge_enable_vector(&hdev->misc_vector, false);
7380 synchronize_irq(hdev->misc_vector.vector_irq);
7382 hclge_hw_error_set_state(hdev, false);
7383 hclge_destroy_cmd_queue(&hdev->hw);
7384 hclge_misc_irq_uninit(hdev);
7385 hclge_pci_uninit(hdev);
7386 mutex_destroy(&hdev->vport_lock);
7387 ae_dev->priv = NULL;
7390 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
7392 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
7393 struct hclge_vport *vport = hclge_get_vport(handle);
7394 struct hclge_dev *hdev = vport->back;
7396 return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
7399 static void hclge_get_channels(struct hnae3_handle *handle,
7400 struct ethtool_channels *ch)
7402 struct hclge_vport *vport = hclge_get_vport(handle);
7404 ch->max_combined = hclge_get_max_channels(handle);
7405 ch->other_count = 1;
7407 ch->combined_count = vport->alloc_tqps;
7410 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
7411 u16 *alloc_tqps, u16 *max_rss_size)
7413 struct hclge_vport *vport = hclge_get_vport(handle);
7414 struct hclge_dev *hdev = vport->back;
7416 *alloc_tqps = vport->alloc_tqps;
7417 *max_rss_size = hdev->rss_size_max;
7420 static void hclge_release_tqp(struct hclge_vport *vport)
7422 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
7423 struct hclge_dev *hdev = vport->back;
7426 for (i = 0; i < kinfo->num_tqps; i++) {
7427 struct hclge_tqp *tqp =
7428 container_of(kinfo->tqp[i], struct hclge_tqp, q);
7430 tqp->q.handle = NULL;
7431 tqp->q.tqp_index = 0;
7432 tqp->alloced = false;
7435 devm_kfree(&hdev->pdev->dev, kinfo->tqp);
7439 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
7441 struct hclge_vport *vport = hclge_get_vport(handle);
7442 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
7443 struct hclge_dev *hdev = vport->back;
7444 int cur_rss_size = kinfo->rss_size;
7445 int cur_tqps = kinfo->num_tqps;
7446 u16 tc_offset[HCLGE_MAX_TC_NUM];
7447 u16 tc_valid[HCLGE_MAX_TC_NUM];
7448 u16 tc_size[HCLGE_MAX_TC_NUM];
7453 /* Free old tqps, and reallocate with new tqp number when nic setup */
7454 hclge_release_tqp(vport);
7456 ret = hclge_knic_setup(vport, new_tqps_num, kinfo->num_desc);
7458 dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret);
7462 ret = hclge_map_tqp_to_vport(hdev, vport);
7464 dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret);
7468 ret = hclge_tm_schd_init(hdev);
7470 dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret);
7474 roundup_size = roundup_pow_of_two(kinfo->rss_size);
7475 roundup_size = ilog2(roundup_size);
7476 /* Set the RSS TC mode according to the new RSS size */
7477 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
7480 if (!(hdev->hw_tc_map & BIT(i)))
7484 tc_size[i] = roundup_size;
7485 tc_offset[i] = kinfo->rss_size * i;
7487 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
7491 /* Reinitializes the rss indirect table according to the new RSS size */
7492 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
7496 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
7497 rss_indir[i] = i % kinfo->rss_size;
7499 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
7501 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
7507 dev_info(&hdev->pdev->dev,
7508 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
7509 cur_rss_size, kinfo->rss_size,
7510 cur_tqps, kinfo->rss_size * kinfo->num_tc);
7515 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
7516 u32 *regs_num_64_bit)
7518 struct hclge_desc desc;
7522 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
7523 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7525 dev_err(&hdev->pdev->dev,
7526 "Query register number cmd failed, ret = %d.\n", ret);
7530 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
7531 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
7533 total_num = *regs_num_32_bit + *regs_num_64_bit;
7540 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
7543 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
7545 struct hclge_desc *desc;
7546 u32 *reg_val = data;
7555 cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
7556 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
7560 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
7561 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
7563 dev_err(&hdev->pdev->dev,
7564 "Query 32 bit register cmd failed, ret = %d.\n", ret);
7569 for (i = 0; i < cmd_num; i++) {
7571 desc_data = (__le32 *)(&desc[i].data[0]);
7572 n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
7574 desc_data = (__le32 *)(&desc[i]);
7575 n = HCLGE_32_BIT_REG_RTN_DATANUM;
7577 for (k = 0; k < n; k++) {
7578 *reg_val++ = le32_to_cpu(*desc_data++);
7590 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
7593 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
7595 struct hclge_desc *desc;
7596 u64 *reg_val = data;
7605 cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
7606 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
7610 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
7611 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
7613 dev_err(&hdev->pdev->dev,
7614 "Query 64 bit register cmd failed, ret = %d.\n", ret);
7619 for (i = 0; i < cmd_num; i++) {
7621 desc_data = (__le64 *)(&desc[i].data[0]);
7622 n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
7624 desc_data = (__le64 *)(&desc[i]);
7625 n = HCLGE_64_BIT_REG_RTN_DATANUM;
7627 for (k = 0; k < n; k++) {
7628 *reg_val++ = le64_to_cpu(*desc_data++);
7640 static int hclge_get_regs_len(struct hnae3_handle *handle)
7642 struct hclge_vport *vport = hclge_get_vport(handle);
7643 struct hclge_dev *hdev = vport->back;
7644 u32 regs_num_32_bit, regs_num_64_bit;
7647 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
7649 dev_err(&hdev->pdev->dev,
7650 "Get register number failed, ret = %d.\n", ret);
7654 return regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
7657 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
7660 struct hclge_vport *vport = hclge_get_vport(handle);
7661 struct hclge_dev *hdev = vport->back;
7662 u32 regs_num_32_bit, regs_num_64_bit;
7665 *version = hdev->fw_version;
7667 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
7669 dev_err(&hdev->pdev->dev,
7670 "Get register number failed, ret = %d.\n", ret);
7674 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, data);
7676 dev_err(&hdev->pdev->dev,
7677 "Get 32 bit register failed, ret = %d.\n", ret);
7681 data = (u32 *)data + regs_num_32_bit;
7682 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit,
7685 dev_err(&hdev->pdev->dev,
7686 "Get 64 bit register failed, ret = %d.\n", ret);
7689 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
7691 struct hclge_set_led_state_cmd *req;
7692 struct hclge_desc desc;
7695 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
7697 req = (struct hclge_set_led_state_cmd *)desc.data;
7698 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
7699 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
7701 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7703 dev_err(&hdev->pdev->dev,
7704 "Send set led state cmd error, ret =%d\n", ret);
7709 enum hclge_led_status {
7712 HCLGE_LED_NO_CHANGE = 0xFF,
7715 static int hclge_set_led_id(struct hnae3_handle *handle,
7716 enum ethtool_phys_id_state status)
7718 struct hclge_vport *vport = hclge_get_vport(handle);
7719 struct hclge_dev *hdev = vport->back;
7722 case ETHTOOL_ID_ACTIVE:
7723 return hclge_set_led_status(hdev, HCLGE_LED_ON);
7724 case ETHTOOL_ID_INACTIVE:
7725 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
7731 static void hclge_get_link_mode(struct hnae3_handle *handle,
7732 unsigned long *supported,
7733 unsigned long *advertising)
7735 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
7736 struct hclge_vport *vport = hclge_get_vport(handle);
7737 struct hclge_dev *hdev = vport->back;
7738 unsigned int idx = 0;
7740 for (; idx < size; idx++) {
7741 supported[idx] = hdev->hw.mac.supported[idx];
7742 advertising[idx] = hdev->hw.mac.advertising[idx];
7746 static int hclge_gro_en(struct hnae3_handle *handle, int enable)
7748 struct hclge_vport *vport = hclge_get_vport(handle);
7749 struct hclge_dev *hdev = vport->back;
7751 return hclge_config_gro(hdev, enable);
7754 static const struct hnae3_ae_ops hclge_ops = {
7755 .init_ae_dev = hclge_init_ae_dev,
7756 .uninit_ae_dev = hclge_uninit_ae_dev,
7757 .flr_prepare = hclge_flr_prepare,
7758 .flr_done = hclge_flr_done,
7759 .init_client_instance = hclge_init_client_instance,
7760 .uninit_client_instance = hclge_uninit_client_instance,
7761 .map_ring_to_vector = hclge_map_ring_to_vector,
7762 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
7763 .get_vector = hclge_get_vector,
7764 .put_vector = hclge_put_vector,
7765 .set_promisc_mode = hclge_set_promisc_mode,
7766 .set_loopback = hclge_set_loopback,
7767 .start = hclge_ae_start,
7768 .stop = hclge_ae_stop,
7769 .client_start = hclge_client_start,
7770 .client_stop = hclge_client_stop,
7771 .get_status = hclge_get_status,
7772 .get_ksettings_an_result = hclge_get_ksettings_an_result,
7773 .update_speed_duplex_h = hclge_update_speed_duplex_h,
7774 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
7775 .get_media_type = hclge_get_media_type,
7776 .get_rss_key_size = hclge_get_rss_key_size,
7777 .get_rss_indir_size = hclge_get_rss_indir_size,
7778 .get_rss = hclge_get_rss,
7779 .set_rss = hclge_set_rss,
7780 .set_rss_tuple = hclge_set_rss_tuple,
7781 .get_rss_tuple = hclge_get_rss_tuple,
7782 .get_tc_size = hclge_get_tc_size,
7783 .get_mac_addr = hclge_get_mac_addr,
7784 .set_mac_addr = hclge_set_mac_addr,
7785 .do_ioctl = hclge_do_ioctl,
7786 .add_uc_addr = hclge_add_uc_addr,
7787 .rm_uc_addr = hclge_rm_uc_addr,
7788 .add_mc_addr = hclge_add_mc_addr,
7789 .rm_mc_addr = hclge_rm_mc_addr,
7790 .set_autoneg = hclge_set_autoneg,
7791 .get_autoneg = hclge_get_autoneg,
7792 .get_pauseparam = hclge_get_pauseparam,
7793 .set_pauseparam = hclge_set_pauseparam,
7794 .set_mtu = hclge_set_mtu,
7795 .reset_queue = hclge_reset_tqp,
7796 .get_stats = hclge_get_stats,
7797 .update_stats = hclge_update_stats,
7798 .get_strings = hclge_get_strings,
7799 .get_sset_count = hclge_get_sset_count,
7800 .get_fw_version = hclge_get_fw_version,
7801 .get_mdix_mode = hclge_get_mdix_mode,
7802 .enable_vlan_filter = hclge_enable_vlan_filter,
7803 .set_vlan_filter = hclge_set_vlan_filter,
7804 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
7805 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
7806 .reset_event = hclge_reset_event,
7807 .set_default_reset_request = hclge_set_def_reset_request,
7808 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
7809 .set_channels = hclge_set_channels,
7810 .get_channels = hclge_get_channels,
7811 .get_regs_len = hclge_get_regs_len,
7812 .get_regs = hclge_get_regs,
7813 .set_led_id = hclge_set_led_id,
7814 .get_link_mode = hclge_get_link_mode,
7815 .add_fd_entry = hclge_add_fd_entry,
7816 .del_fd_entry = hclge_del_fd_entry,
7817 .del_all_fd_entries = hclge_del_all_fd_entries,
7818 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
7819 .get_fd_rule_info = hclge_get_fd_rule_info,
7820 .get_fd_all_rules = hclge_get_all_rules,
7821 .restore_fd_rules = hclge_restore_fd_entries,
7822 .enable_fd = hclge_enable_fd,
7823 .dbg_run_cmd = hclge_dbg_run_cmd,
7824 .process_hw_error = hclge_process_ras_hw_error,
7825 .get_hw_reset_stat = hclge_get_hw_reset_stat,
7826 .ae_dev_resetting = hclge_ae_dev_resetting,
7827 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
7828 .set_gro_en = hclge_gro_en,
7831 static struct hnae3_ae_algo ae_algo = {
7833 .pdev_id_table = ae_algo_pci_tbl,
7836 static int hclge_init(void)
7838 pr_info("%s is initializing\n", HCLGE_NAME);
7840 hnae3_register_ae_algo(&ae_algo);
7845 static void hclge_exit(void)
7847 hnae3_unregister_ae_algo(&ae_algo);
7849 module_init(hclge_init);
7850 module_exit(hclge_exit);
7852 MODULE_LICENSE("GPL");
7853 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
7854 MODULE_DESCRIPTION("HCLGE Driver");
7855 MODULE_VERSION(HCLGE_MOD_VERSION);