1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
23 #include "hclge_err.h"
26 #define HCLGE_NAME "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 #define HCLGE_BUF_SIZE_UNIT 256
32 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
33 static int hclge_init_vlan_config(struct hclge_dev *hdev);
34 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
35 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
36 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
37 u16 *allocated_size, bool is_alloc);
39 static struct hnae3_ae_algo ae_algo;
41 static const struct pci_device_id ae_algo_pci_tbl[] = {
42 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
45 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
49 /* required last entry */
53 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
55 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
56 HCLGE_CMDQ_TX_ADDR_H_REG,
57 HCLGE_CMDQ_TX_DEPTH_REG,
58 HCLGE_CMDQ_TX_TAIL_REG,
59 HCLGE_CMDQ_TX_HEAD_REG,
60 HCLGE_CMDQ_RX_ADDR_L_REG,
61 HCLGE_CMDQ_RX_ADDR_H_REG,
62 HCLGE_CMDQ_RX_DEPTH_REG,
63 HCLGE_CMDQ_RX_TAIL_REG,
64 HCLGE_CMDQ_RX_HEAD_REG,
65 HCLGE_VECTOR0_CMDQ_SRC_REG,
66 HCLGE_CMDQ_INTR_STS_REG,
67 HCLGE_CMDQ_INTR_EN_REG,
68 HCLGE_CMDQ_INTR_GEN_REG};
70 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
71 HCLGE_VECTOR0_OTER_EN_REG,
72 HCLGE_MISC_RESET_STS_REG,
73 HCLGE_MISC_VECTOR_INT_STS,
74 HCLGE_GLOBAL_RESET_REG,
78 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
79 HCLGE_RING_RX_ADDR_H_REG,
80 HCLGE_RING_RX_BD_NUM_REG,
81 HCLGE_RING_RX_BD_LENGTH_REG,
82 HCLGE_RING_RX_MERGE_EN_REG,
83 HCLGE_RING_RX_TAIL_REG,
84 HCLGE_RING_RX_HEAD_REG,
85 HCLGE_RING_RX_FBD_NUM_REG,
86 HCLGE_RING_RX_OFFSET_REG,
87 HCLGE_RING_RX_FBD_OFFSET_REG,
88 HCLGE_RING_RX_STASH_REG,
89 HCLGE_RING_RX_BD_ERR_REG,
90 HCLGE_RING_TX_ADDR_L_REG,
91 HCLGE_RING_TX_ADDR_H_REG,
92 HCLGE_RING_TX_BD_NUM_REG,
93 HCLGE_RING_TX_PRIORITY_REG,
95 HCLGE_RING_TX_MERGE_EN_REG,
96 HCLGE_RING_TX_TAIL_REG,
97 HCLGE_RING_TX_HEAD_REG,
98 HCLGE_RING_TX_FBD_NUM_REG,
99 HCLGE_RING_TX_OFFSET_REG,
100 HCLGE_RING_TX_EBD_NUM_REG,
101 HCLGE_RING_TX_EBD_OFFSET_REG,
102 HCLGE_RING_TX_BD_ERR_REG,
105 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
106 HCLGE_TQP_INTR_GL0_REG,
107 HCLGE_TQP_INTR_GL1_REG,
108 HCLGE_TQP_INTR_GL2_REG,
109 HCLGE_TQP_INTR_RL_REG};
111 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
113 "Serdes serial Loopback test",
114 "Serdes parallel Loopback test",
118 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
119 {"mac_tx_mac_pause_num",
120 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
121 {"mac_rx_mac_pause_num",
122 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
123 {"mac_tx_control_pkt_num",
124 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
125 {"mac_rx_control_pkt_num",
126 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
127 {"mac_tx_pfc_pkt_num",
128 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
129 {"mac_tx_pfc_pri0_pkt_num",
130 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
131 {"mac_tx_pfc_pri1_pkt_num",
132 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
133 {"mac_tx_pfc_pri2_pkt_num",
134 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
135 {"mac_tx_pfc_pri3_pkt_num",
136 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
137 {"mac_tx_pfc_pri4_pkt_num",
138 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
139 {"mac_tx_pfc_pri5_pkt_num",
140 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
141 {"mac_tx_pfc_pri6_pkt_num",
142 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
143 {"mac_tx_pfc_pri7_pkt_num",
144 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
145 {"mac_rx_pfc_pkt_num",
146 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
147 {"mac_rx_pfc_pri0_pkt_num",
148 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
149 {"mac_rx_pfc_pri1_pkt_num",
150 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
151 {"mac_rx_pfc_pri2_pkt_num",
152 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
153 {"mac_rx_pfc_pri3_pkt_num",
154 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
155 {"mac_rx_pfc_pri4_pkt_num",
156 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
157 {"mac_rx_pfc_pri5_pkt_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
159 {"mac_rx_pfc_pri6_pkt_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
161 {"mac_rx_pfc_pri7_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
163 {"mac_tx_total_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
165 {"mac_tx_total_oct_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
167 {"mac_tx_good_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
169 {"mac_tx_bad_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
171 {"mac_tx_good_oct_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
173 {"mac_tx_bad_oct_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
175 {"mac_tx_uni_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
177 {"mac_tx_multi_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
179 {"mac_tx_broad_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
181 {"mac_tx_undersize_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
183 {"mac_tx_oversize_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
185 {"mac_tx_64_oct_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
187 {"mac_tx_65_127_oct_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
189 {"mac_tx_128_255_oct_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
191 {"mac_tx_256_511_oct_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
193 {"mac_tx_512_1023_oct_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
195 {"mac_tx_1024_1518_oct_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
197 {"mac_tx_1519_2047_oct_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
199 {"mac_tx_2048_4095_oct_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
201 {"mac_tx_4096_8191_oct_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
203 {"mac_tx_8192_9216_oct_pkt_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
205 {"mac_tx_9217_12287_oct_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
207 {"mac_tx_12288_16383_oct_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
209 {"mac_tx_1519_max_good_pkt_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
211 {"mac_tx_1519_max_bad_pkt_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
213 {"mac_rx_total_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
215 {"mac_rx_total_oct_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
217 {"mac_rx_good_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
219 {"mac_rx_bad_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
221 {"mac_rx_good_oct_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
223 {"mac_rx_bad_oct_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
225 {"mac_rx_uni_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
227 {"mac_rx_multi_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
229 {"mac_rx_broad_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
231 {"mac_rx_undersize_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
233 {"mac_rx_oversize_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
235 {"mac_rx_64_oct_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
237 {"mac_rx_65_127_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
239 {"mac_rx_128_255_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
241 {"mac_rx_256_511_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
243 {"mac_rx_512_1023_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
245 {"mac_rx_1024_1518_oct_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
247 {"mac_rx_1519_2047_oct_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
249 {"mac_rx_2048_4095_oct_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
251 {"mac_rx_4096_8191_oct_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
253 {"mac_rx_8192_9216_oct_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
255 {"mac_rx_9217_12287_oct_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
257 {"mac_rx_12288_16383_oct_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
259 {"mac_rx_1519_max_good_pkt_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
261 {"mac_rx_1519_max_bad_pkt_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
264 {"mac_tx_fragment_pkt_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
266 {"mac_tx_undermin_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
268 {"mac_tx_jabber_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
270 {"mac_tx_err_all_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
272 {"mac_tx_from_app_good_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
274 {"mac_tx_from_app_bad_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
276 {"mac_rx_fragment_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
278 {"mac_rx_undermin_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
280 {"mac_rx_jabber_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
282 {"mac_rx_fcs_err_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
284 {"mac_rx_send_app_good_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
286 {"mac_rx_send_app_bad_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
290 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
292 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
293 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
294 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
295 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
296 .i_port_bitmap = 0x1,
300 static const u8 hclge_hash_key[] = {
301 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
302 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
303 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
304 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
305 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
308 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
310 #define HCLGE_MAC_CMD_NUM 21
312 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
313 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
318 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
319 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
321 dev_err(&hdev->pdev->dev,
322 "Get MAC pkt stats fail, status = %d.\n", ret);
327 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
328 /* for special opcode 0032, only the first desc has the head */
329 if (unlikely(i == 0)) {
330 desc_data = (__le64 *)(&desc[i].data[0]);
331 n = HCLGE_RD_FIRST_STATS_NUM;
333 desc_data = (__le64 *)(&desc[i]);
334 n = HCLGE_RD_OTHER_STATS_NUM;
337 for (k = 0; k < n; k++) {
338 *data += le64_to_cpu(*desc_data);
347 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
349 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
350 struct hclge_desc *desc;
355 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
358 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
359 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
365 for (i = 0; i < desc_num; i++) {
366 /* for special opcode 0034, only the first desc has the head */
368 desc_data = (__le64 *)(&desc[i].data[0]);
369 n = HCLGE_RD_FIRST_STATS_NUM;
371 desc_data = (__le64 *)(&desc[i]);
372 n = HCLGE_RD_OTHER_STATS_NUM;
375 for (k = 0; k < n; k++) {
376 *data += le64_to_cpu(*desc_data);
387 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
389 struct hclge_desc desc;
394 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
395 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
399 desc_data = (__le32 *)(&desc.data[0]);
400 reg_num = le32_to_cpu(*desc_data);
402 *desc_num = 1 + ((reg_num - 3) >> 2) +
403 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
408 static int hclge_mac_update_stats(struct hclge_dev *hdev)
413 ret = hclge_mac_query_reg_num(hdev, &desc_num);
415 /* The firmware supports the new statistics acquisition method */
417 ret = hclge_mac_update_stats_complete(hdev, desc_num);
418 else if (ret == -EOPNOTSUPP)
419 ret = hclge_mac_update_stats_defective(hdev);
421 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
426 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
428 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
429 struct hclge_vport *vport = hclge_get_vport(handle);
430 struct hclge_dev *hdev = vport->back;
431 struct hnae3_queue *queue;
432 struct hclge_desc desc[1];
433 struct hclge_tqp *tqp;
436 for (i = 0; i < kinfo->num_tqps; i++) {
437 queue = handle->kinfo.tqp[i];
438 tqp = container_of(queue, struct hclge_tqp, q);
439 /* command : HCLGE_OPC_QUERY_IGU_STAT */
440 hclge_cmd_setup_basic_desc(&desc[0],
441 HCLGE_OPC_QUERY_RX_STATUS,
444 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
445 ret = hclge_cmd_send(&hdev->hw, desc, 1);
447 dev_err(&hdev->pdev->dev,
448 "Query tqp stat fail, status = %d,queue = %d\n",
452 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
453 le32_to_cpu(desc[0].data[1]);
456 for (i = 0; i < kinfo->num_tqps; i++) {
457 queue = handle->kinfo.tqp[i];
458 tqp = container_of(queue, struct hclge_tqp, q);
459 /* command : HCLGE_OPC_QUERY_IGU_STAT */
460 hclge_cmd_setup_basic_desc(&desc[0],
461 HCLGE_OPC_QUERY_TX_STATUS,
464 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
465 ret = hclge_cmd_send(&hdev->hw, desc, 1);
467 dev_err(&hdev->pdev->dev,
468 "Query tqp stat fail, status = %d,queue = %d\n",
472 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
473 le32_to_cpu(desc[0].data[1]);
479 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
481 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
482 struct hclge_tqp *tqp;
486 for (i = 0; i < kinfo->num_tqps; i++) {
487 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
488 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
491 for (i = 0; i < kinfo->num_tqps; i++) {
492 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
493 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
499 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
501 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
503 return kinfo->num_tqps * (2);
506 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
508 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
512 for (i = 0; i < kinfo->num_tqps; i++) {
513 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
514 struct hclge_tqp, q);
515 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
517 buff = buff + ETH_GSTRING_LEN;
520 for (i = 0; i < kinfo->num_tqps; i++) {
521 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
522 struct hclge_tqp, q);
523 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
525 buff = buff + ETH_GSTRING_LEN;
531 static u64 *hclge_comm_get_stats(void *comm_stats,
532 const struct hclge_comm_stats_str strs[],
538 for (i = 0; i < size; i++)
539 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
544 static u8 *hclge_comm_get_strings(u32 stringset,
545 const struct hclge_comm_stats_str strs[],
548 char *buff = (char *)data;
551 if (stringset != ETH_SS_STATS)
554 for (i = 0; i < size; i++) {
555 snprintf(buff, ETH_GSTRING_LEN,
557 buff = buff + ETH_GSTRING_LEN;
563 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
565 struct hnae3_handle *handle;
568 handle = &hdev->vport[0].nic;
569 if (handle->client) {
570 status = hclge_tqps_update_stats(handle);
572 dev_err(&hdev->pdev->dev,
573 "Update TQPS stats fail, status = %d.\n",
578 status = hclge_mac_update_stats(hdev);
580 dev_err(&hdev->pdev->dev,
581 "Update MAC stats fail, status = %d.\n", status);
584 static void hclge_update_stats(struct hnae3_handle *handle,
585 struct net_device_stats *net_stats)
587 struct hclge_vport *vport = hclge_get_vport(handle);
588 struct hclge_dev *hdev = vport->back;
591 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
594 status = hclge_mac_update_stats(hdev);
596 dev_err(&hdev->pdev->dev,
597 "Update MAC stats fail, status = %d.\n",
600 status = hclge_tqps_update_stats(handle);
602 dev_err(&hdev->pdev->dev,
603 "Update TQPS stats fail, status = %d.\n",
606 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
609 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
611 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
612 HNAE3_SUPPORT_PHY_LOOPBACK |\
613 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
614 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
616 struct hclge_vport *vport = hclge_get_vport(handle);
617 struct hclge_dev *hdev = vport->back;
620 /* Loopback test support rules:
621 * mac: only GE mode support
622 * serdes: all mac mode will support include GE/XGE/LGE/CGE
623 * phy: only support when phy device exist on board
625 if (stringset == ETH_SS_TEST) {
626 /* clear loopback bit flags at first */
627 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
628 if (hdev->pdev->revision >= 0x21 ||
629 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
630 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
631 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
633 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
637 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
638 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
639 } else if (stringset == ETH_SS_STATS) {
640 count = ARRAY_SIZE(g_mac_stats_string) +
641 hclge_tqps_get_sset_count(handle, stringset);
647 static void hclge_get_strings(struct hnae3_handle *handle,
651 u8 *p = (char *)data;
654 if (stringset == ETH_SS_STATS) {
655 size = ARRAY_SIZE(g_mac_stats_string);
656 p = hclge_comm_get_strings(stringset,
660 p = hclge_tqps_get_strings(handle, p);
661 } else if (stringset == ETH_SS_TEST) {
662 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
664 hns3_nic_test_strs[HNAE3_LOOP_APP],
666 p += ETH_GSTRING_LEN;
668 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
670 hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
672 p += ETH_GSTRING_LEN;
674 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
676 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
678 p += ETH_GSTRING_LEN;
680 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
682 hns3_nic_test_strs[HNAE3_LOOP_PHY],
684 p += ETH_GSTRING_LEN;
689 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
691 struct hclge_vport *vport = hclge_get_vport(handle);
692 struct hclge_dev *hdev = vport->back;
695 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
697 ARRAY_SIZE(g_mac_stats_string),
699 p = hclge_tqps_get_stats(handle, p);
702 static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
705 struct hclge_vport *vport = hclge_get_vport(handle);
706 struct hclge_dev *hdev = vport->back;
708 *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
709 *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
712 static int hclge_parse_func_status(struct hclge_dev *hdev,
713 struct hclge_func_status_cmd *status)
715 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
718 /* Set the pf to main pf */
719 if (status->pf_state & HCLGE_PF_STATE_MAIN)
720 hdev->flag |= HCLGE_FLAG_MAIN;
722 hdev->flag &= ~HCLGE_FLAG_MAIN;
727 static int hclge_query_function_status(struct hclge_dev *hdev)
729 struct hclge_func_status_cmd *req;
730 struct hclge_desc desc;
734 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
735 req = (struct hclge_func_status_cmd *)desc.data;
738 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
740 dev_err(&hdev->pdev->dev,
741 "query function status failed %d.\n",
747 /* Check pf reset is done */
750 usleep_range(1000, 2000);
751 } while (timeout++ < 5);
753 ret = hclge_parse_func_status(hdev, req);
758 static int hclge_query_pf_resource(struct hclge_dev *hdev)
760 struct hclge_pf_res_cmd *req;
761 struct hclge_desc desc;
764 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
765 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
767 dev_err(&hdev->pdev->dev,
768 "query pf resource failed %d.\n", ret);
772 req = (struct hclge_pf_res_cmd *)desc.data;
773 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
774 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
776 if (req->tx_buf_size)
778 __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
780 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
782 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
784 if (req->dv_buf_size)
786 __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
788 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
790 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
792 if (hnae3_dev_roce_supported(hdev)) {
793 hdev->roce_base_msix_offset =
794 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
795 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
797 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
798 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
800 /* PF should have NIC vectors and Roce vectors,
801 * NIC vectors are queued before Roce vectors.
803 hdev->num_msi = hdev->num_roce_msi +
804 hdev->roce_base_msix_offset;
807 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
808 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
814 static int hclge_parse_speed(int speed_cmd, int *speed)
818 *speed = HCLGE_MAC_SPEED_10M;
821 *speed = HCLGE_MAC_SPEED_100M;
824 *speed = HCLGE_MAC_SPEED_1G;
827 *speed = HCLGE_MAC_SPEED_10G;
830 *speed = HCLGE_MAC_SPEED_25G;
833 *speed = HCLGE_MAC_SPEED_40G;
836 *speed = HCLGE_MAC_SPEED_50G;
839 *speed = HCLGE_MAC_SPEED_100G;
848 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
850 struct hclge_vport *vport = hclge_get_vport(handle);
851 struct hclge_dev *hdev = vport->back;
852 u32 speed_ability = hdev->hw.mac.speed_ability;
856 case HCLGE_MAC_SPEED_10M:
857 speed_bit = HCLGE_SUPPORT_10M_BIT;
859 case HCLGE_MAC_SPEED_100M:
860 speed_bit = HCLGE_SUPPORT_100M_BIT;
862 case HCLGE_MAC_SPEED_1G:
863 speed_bit = HCLGE_SUPPORT_1G_BIT;
865 case HCLGE_MAC_SPEED_10G:
866 speed_bit = HCLGE_SUPPORT_10G_BIT;
868 case HCLGE_MAC_SPEED_25G:
869 speed_bit = HCLGE_SUPPORT_25G_BIT;
871 case HCLGE_MAC_SPEED_40G:
872 speed_bit = HCLGE_SUPPORT_40G_BIT;
874 case HCLGE_MAC_SPEED_50G:
875 speed_bit = HCLGE_SUPPORT_50G_BIT;
877 case HCLGE_MAC_SPEED_100G:
878 speed_bit = HCLGE_SUPPORT_100G_BIT;
884 if (speed_bit & speed_ability)
890 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
892 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
893 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
895 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
896 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
898 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
899 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
901 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
902 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
904 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
905 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
909 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
911 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
912 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
914 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
915 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
917 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
918 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
920 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
921 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
923 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
924 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
928 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
930 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
931 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
933 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
934 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
936 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
937 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
939 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
940 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
942 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
943 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
947 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
949 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
950 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
952 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
953 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
955 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
956 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
958 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
959 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
961 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
962 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
964 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
965 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
969 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
972 struct hclge_mac *mac = &hdev->hw.mac;
974 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
975 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
978 hclge_convert_setting_sr(mac, speed_ability);
979 hclge_convert_setting_lr(mac, speed_ability);
980 hclge_convert_setting_cr(mac, speed_ability);
982 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
983 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
986 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
989 struct hclge_mac *mac = &hdev->hw.mac;
991 hclge_convert_setting_kr(mac, speed_ability);
992 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
993 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
996 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
999 unsigned long *supported = hdev->hw.mac.supported;
1001 /* default to support all speed for GE port */
1003 speed_ability = HCLGE_SUPPORT_GE;
1005 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1006 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1009 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1010 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1012 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1016 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1017 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1018 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1021 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1022 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1023 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1026 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1028 u8 media_type = hdev->hw.mac.media_type;
1030 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1031 hclge_parse_fiber_link_mode(hdev, speed_ability);
1032 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1033 hclge_parse_copper_link_mode(hdev, speed_ability);
1034 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1035 hclge_parse_backplane_link_mode(hdev, speed_ability);
1037 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1039 struct hclge_cfg_param_cmd *req;
1040 u64 mac_addr_tmp_high;
1044 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1046 /* get the configuration */
1047 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1050 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1051 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1052 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1053 HCLGE_CFG_TQP_DESC_N_M,
1054 HCLGE_CFG_TQP_DESC_N_S);
1056 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1057 HCLGE_CFG_PHY_ADDR_M,
1058 HCLGE_CFG_PHY_ADDR_S);
1059 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1060 HCLGE_CFG_MEDIA_TP_M,
1061 HCLGE_CFG_MEDIA_TP_S);
1062 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1063 HCLGE_CFG_RX_BUF_LEN_M,
1064 HCLGE_CFG_RX_BUF_LEN_S);
1065 /* get mac_address */
1066 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1067 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1068 HCLGE_CFG_MAC_ADDR_H_M,
1069 HCLGE_CFG_MAC_ADDR_H_S);
1071 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1073 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1074 HCLGE_CFG_DEFAULT_SPEED_M,
1075 HCLGE_CFG_DEFAULT_SPEED_S);
1076 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1077 HCLGE_CFG_RSS_SIZE_M,
1078 HCLGE_CFG_RSS_SIZE_S);
1080 for (i = 0; i < ETH_ALEN; i++)
1081 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1083 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1084 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1086 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1087 HCLGE_CFG_SPEED_ABILITY_M,
1088 HCLGE_CFG_SPEED_ABILITY_S);
1089 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1090 HCLGE_CFG_UMV_TBL_SPACE_M,
1091 HCLGE_CFG_UMV_TBL_SPACE_S);
1092 if (!cfg->umv_space)
1093 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1096 /* hclge_get_cfg: query the static parameter from flash
1097 * @hdev: pointer to struct hclge_dev
1098 * @hcfg: the config structure to be getted
1100 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1102 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1103 struct hclge_cfg_param_cmd *req;
1106 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1109 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1110 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1112 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1113 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1114 /* Len should be united by 4 bytes when send to hardware */
1115 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1116 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1117 req->offset = cpu_to_le32(offset);
1120 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1122 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1126 hclge_parse_cfg(hcfg, desc);
1131 static int hclge_get_cap(struct hclge_dev *hdev)
1135 ret = hclge_query_function_status(hdev);
1137 dev_err(&hdev->pdev->dev,
1138 "query function status error %d.\n", ret);
1142 /* get pf resource */
1143 ret = hclge_query_pf_resource(hdev);
1145 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1150 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1152 #define HCLGE_MIN_TX_DESC 64
1153 #define HCLGE_MIN_RX_DESC 64
1155 if (!is_kdump_kernel())
1158 dev_info(&hdev->pdev->dev,
1159 "Running kdump kernel. Using minimal resources\n");
1161 /* minimal queue pairs equals to the number of vports */
1162 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1163 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1164 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1167 static int hclge_configure(struct hclge_dev *hdev)
1169 struct hclge_cfg cfg;
1172 ret = hclge_get_cfg(hdev, &cfg);
1174 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1178 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1179 hdev->base_tqp_pid = 0;
1180 hdev->rss_size_max = cfg.rss_size_max;
1181 hdev->rx_buf_len = cfg.rx_buf_len;
1182 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1183 hdev->hw.mac.media_type = cfg.media_type;
1184 hdev->hw.mac.phy_addr = cfg.phy_addr;
1185 hdev->num_tx_desc = cfg.tqp_desc_num;
1186 hdev->num_rx_desc = cfg.tqp_desc_num;
1187 hdev->tm_info.num_pg = 1;
1188 hdev->tc_max = cfg.tc_num;
1189 hdev->tm_info.hw_pfc_map = 0;
1190 hdev->wanted_umv_size = cfg.umv_space;
1192 if (hnae3_dev_fd_supported(hdev))
1195 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1197 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1201 hclge_parse_link_mode(hdev, cfg.speed_ability);
1203 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1204 (hdev->tc_max < 1)) {
1205 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1210 /* Dev does not support DCB */
1211 if (!hnae3_dev_dcb_supported(hdev)) {
1215 hdev->pfc_max = hdev->tc_max;
1218 hdev->tm_info.num_tc = 1;
1220 /* Currently not support uncontiuous tc */
1221 for (i = 0; i < hdev->tm_info.num_tc; i++)
1222 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1224 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1226 hclge_init_kdump_kernel_config(hdev);
1231 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1234 struct hclge_cfg_tso_status_cmd *req;
1235 struct hclge_desc desc;
1238 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1240 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1243 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1244 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1245 req->tso_mss_min = cpu_to_le16(tso_mss);
1248 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1249 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1250 req->tso_mss_max = cpu_to_le16(tso_mss);
1252 return hclge_cmd_send(&hdev->hw, &desc, 1);
1255 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1257 struct hclge_cfg_gro_status_cmd *req;
1258 struct hclge_desc desc;
1261 if (!hnae3_dev_gro_supported(hdev))
1264 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1265 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1267 req->gro_en = cpu_to_le16(en ? 1 : 0);
1269 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1271 dev_err(&hdev->pdev->dev,
1272 "GRO hardware config cmd failed, ret = %d\n", ret);
1277 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1279 struct hclge_tqp *tqp;
1282 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1283 sizeof(struct hclge_tqp), GFP_KERNEL);
1289 for (i = 0; i < hdev->num_tqps; i++) {
1290 tqp->dev = &hdev->pdev->dev;
1293 tqp->q.ae_algo = &ae_algo;
1294 tqp->q.buf_size = hdev->rx_buf_len;
1295 tqp->q.tx_desc_num = hdev->num_tx_desc;
1296 tqp->q.rx_desc_num = hdev->num_rx_desc;
1297 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1298 i * HCLGE_TQP_REG_SIZE;
1306 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1307 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1309 struct hclge_tqp_map_cmd *req;
1310 struct hclge_desc desc;
1313 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1315 req = (struct hclge_tqp_map_cmd *)desc.data;
1316 req->tqp_id = cpu_to_le16(tqp_pid);
1317 req->tqp_vf = func_id;
1318 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1319 1 << HCLGE_TQP_MAP_EN_B;
1320 req->tqp_vid = cpu_to_le16(tqp_vid);
1322 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1324 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1329 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1331 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1332 struct hclge_dev *hdev = vport->back;
1335 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1336 alloced < num_tqps; i++) {
1337 if (!hdev->htqp[i].alloced) {
1338 hdev->htqp[i].q.handle = &vport->nic;
1339 hdev->htqp[i].q.tqp_index = alloced;
1340 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1341 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1342 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1343 hdev->htqp[i].alloced = true;
1347 vport->alloc_tqps = alloced;
1348 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1349 vport->alloc_tqps / hdev->tm_info.num_tc);
1354 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1355 u16 num_tx_desc, u16 num_rx_desc)
1358 struct hnae3_handle *nic = &vport->nic;
1359 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1360 struct hclge_dev *hdev = vport->back;
1363 kinfo->num_tx_desc = num_tx_desc;
1364 kinfo->num_rx_desc = num_rx_desc;
1366 kinfo->rx_buf_len = hdev->rx_buf_len;
1368 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1369 sizeof(struct hnae3_queue *), GFP_KERNEL);
1373 ret = hclge_assign_tqp(vport, num_tqps);
1375 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1380 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1381 struct hclge_vport *vport)
1383 struct hnae3_handle *nic = &vport->nic;
1384 struct hnae3_knic_private_info *kinfo;
1387 kinfo = &nic->kinfo;
1388 for (i = 0; i < vport->alloc_tqps; i++) {
1389 struct hclge_tqp *q =
1390 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1394 is_pf = !(vport->vport_id);
1395 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1404 static int hclge_map_tqp(struct hclge_dev *hdev)
1406 struct hclge_vport *vport = hdev->vport;
1409 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1410 for (i = 0; i < num_vport; i++) {
1413 ret = hclge_map_tqp_to_vport(hdev, vport);
1423 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1425 /* this would be initialized later */
1428 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1430 struct hnae3_handle *nic = &vport->nic;
1431 struct hclge_dev *hdev = vport->back;
1434 nic->pdev = hdev->pdev;
1435 nic->ae_algo = &ae_algo;
1436 nic->numa_node_mask = hdev->numa_node_mask;
1438 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1439 ret = hclge_knic_setup(vport, num_tqps,
1440 hdev->num_tx_desc, hdev->num_rx_desc);
1443 dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1448 hclge_unic_setup(vport, num_tqps);
1454 static int hclge_alloc_vport(struct hclge_dev *hdev)
1456 struct pci_dev *pdev = hdev->pdev;
1457 struct hclge_vport *vport;
1463 /* We need to alloc a vport for main NIC of PF */
1464 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1466 if (hdev->num_tqps < num_vport) {
1467 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1468 hdev->num_tqps, num_vport);
1472 /* Alloc the same number of TQPs for every vport */
1473 tqp_per_vport = hdev->num_tqps / num_vport;
1474 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1476 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1481 hdev->vport = vport;
1482 hdev->num_alloc_vport = num_vport;
1484 if (IS_ENABLED(CONFIG_PCI_IOV))
1485 hdev->num_alloc_vfs = hdev->num_req_vfs;
1487 for (i = 0; i < num_vport; i++) {
1489 vport->vport_id = i;
1490 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1491 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1492 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1493 INIT_LIST_HEAD(&vport->vlan_list);
1494 INIT_LIST_HEAD(&vport->uc_mac_list);
1495 INIT_LIST_HEAD(&vport->mc_mac_list);
1498 ret = hclge_vport_setup(vport, tqp_main_vport);
1500 ret = hclge_vport_setup(vport, tqp_per_vport);
1503 "vport setup failed for vport %d, %d\n",
1514 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1515 struct hclge_pkt_buf_alloc *buf_alloc)
1517 /* TX buffer size is unit by 128 byte */
1518 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1519 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1520 struct hclge_tx_buff_alloc_cmd *req;
1521 struct hclge_desc desc;
1525 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1527 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1528 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1529 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1531 req->tx_pkt_buff[i] =
1532 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1533 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1536 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1538 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1544 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1545 struct hclge_pkt_buf_alloc *buf_alloc)
1547 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1550 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1555 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1559 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1560 if (hdev->hw_tc_map & BIT(i))
1565 /* Get the number of pfc enabled TCs, which have private buffer */
1566 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1567 struct hclge_pkt_buf_alloc *buf_alloc)
1569 struct hclge_priv_buf *priv;
1572 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1573 priv = &buf_alloc->priv_buf[i];
1574 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1582 /* Get the number of pfc disabled TCs, which have private buffer */
1583 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1584 struct hclge_pkt_buf_alloc *buf_alloc)
1586 struct hclge_priv_buf *priv;
1589 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1590 priv = &buf_alloc->priv_buf[i];
1591 if (hdev->hw_tc_map & BIT(i) &&
1592 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1600 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1602 struct hclge_priv_buf *priv;
1606 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1607 priv = &buf_alloc->priv_buf[i];
1609 rx_priv += priv->buf_size;
1614 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1616 u32 i, total_tx_size = 0;
1618 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1619 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1621 return total_tx_size;
1624 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1625 struct hclge_pkt_buf_alloc *buf_alloc,
1628 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1629 u32 tc_num = hclge_get_tc_num(hdev);
1630 u32 shared_buf, aligned_mps;
1634 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1636 if (hnae3_dev_dcb_supported(hdev))
1637 shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
1639 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1640 + hdev->dv_buf_size;
1642 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1643 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1644 HCLGE_BUF_SIZE_UNIT);
1646 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1647 if (rx_all < rx_priv + shared_std)
1650 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1651 buf_alloc->s_buf.buf_size = shared_buf;
1652 if (hnae3_dev_dcb_supported(hdev)) {
1653 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1654 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1655 - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1657 buf_alloc->s_buf.self.high = aligned_mps +
1658 HCLGE_NON_DCB_ADDITIONAL_BUF;
1659 buf_alloc->s_buf.self.low = aligned_mps;
1662 if (hnae3_dev_dcb_supported(hdev)) {
1664 hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num;
1666 hi_thrd = shared_buf - hdev->dv_buf_size;
1668 hi_thrd = max_t(u32, hi_thrd, 2 * aligned_mps);
1669 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1670 lo_thrd = hi_thrd - aligned_mps / 2;
1672 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1673 lo_thrd = aligned_mps;
1676 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1677 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1678 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1684 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1685 struct hclge_pkt_buf_alloc *buf_alloc)
1689 total_size = hdev->pkt_buf_size;
1691 /* alloc tx buffer for all enabled tc */
1692 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1693 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1695 if (hdev->hw_tc_map & BIT(i)) {
1696 if (total_size < hdev->tx_buf_size)
1699 priv->tx_buf_size = hdev->tx_buf_size;
1701 priv->tx_buf_size = 0;
1704 total_size -= priv->tx_buf_size;
1710 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1711 struct hclge_pkt_buf_alloc *buf_alloc)
1713 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1714 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1717 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1718 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1725 if (!(hdev->hw_tc_map & BIT(i)))
1730 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1731 priv->wl.low = max ? aligned_mps : 256;
1732 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1733 HCLGE_BUF_SIZE_UNIT);
1736 priv->wl.high = max ? (aligned_mps * 2) : aligned_mps;
1739 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1742 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1745 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1746 struct hclge_pkt_buf_alloc *buf_alloc)
1748 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1749 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1752 /* let the last to be cleared first */
1753 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1754 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1756 if (hdev->hw_tc_map & BIT(i) &&
1757 !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1758 /* Clear the no pfc TC private buffer */
1766 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1767 no_pfc_priv_num == 0)
1771 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1774 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1775 struct hclge_pkt_buf_alloc *buf_alloc)
1777 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1778 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1781 /* let the last to be cleared first */
1782 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1783 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1785 if (hdev->hw_tc_map & BIT(i) &&
1786 hdev->tm_info.hw_pfc_map & BIT(i)) {
1787 /* Reduce the number of pfc TC with private buffer */
1795 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1800 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1803 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1804 * @hdev: pointer to struct hclge_dev
1805 * @buf_alloc: pointer to buffer calculation data
1806 * @return: 0: calculate sucessful, negative: fail
1808 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1809 struct hclge_pkt_buf_alloc *buf_alloc)
1811 /* When DCB is not supported, rx private buffer is not allocated. */
1812 if (!hnae3_dev_dcb_supported(hdev)) {
1813 u32 rx_all = hdev->pkt_buf_size;
1815 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1816 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1822 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1825 /* try to decrease the buffer size */
1826 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1829 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1832 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1838 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1839 struct hclge_pkt_buf_alloc *buf_alloc)
1841 struct hclge_rx_priv_buff_cmd *req;
1842 struct hclge_desc desc;
1846 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1847 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1849 /* Alloc private buffer TCs */
1850 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1851 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1854 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1856 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1860 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1861 (1 << HCLGE_TC0_PRI_BUF_EN_B));
1863 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1865 dev_err(&hdev->pdev->dev,
1866 "rx private buffer alloc cmd failed %d\n", ret);
1871 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1872 struct hclge_pkt_buf_alloc *buf_alloc)
1874 struct hclge_rx_priv_wl_buf *req;
1875 struct hclge_priv_buf *priv;
1876 struct hclge_desc desc[2];
1880 for (i = 0; i < 2; i++) {
1881 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1883 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1885 /* The first descriptor set the NEXT bit to 1 */
1887 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1889 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1891 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1892 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1894 priv = &buf_alloc->priv_buf[idx];
1895 req->tc_wl[j].high =
1896 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1897 req->tc_wl[j].high |=
1898 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1900 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1901 req->tc_wl[j].low |=
1902 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1906 /* Send 2 descriptor at one time */
1907 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1909 dev_err(&hdev->pdev->dev,
1910 "rx private waterline config cmd failed %d\n",
1915 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1916 struct hclge_pkt_buf_alloc *buf_alloc)
1918 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1919 struct hclge_rx_com_thrd *req;
1920 struct hclge_desc desc[2];
1921 struct hclge_tc_thrd *tc;
1925 for (i = 0; i < 2; i++) {
1926 hclge_cmd_setup_basic_desc(&desc[i],
1927 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1928 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1930 /* The first descriptor set the NEXT bit to 1 */
1932 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1934 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1936 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1937 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1939 req->com_thrd[j].high =
1940 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1941 req->com_thrd[j].high |=
1942 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1943 req->com_thrd[j].low =
1944 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1945 req->com_thrd[j].low |=
1946 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1950 /* Send 2 descriptors at one time */
1951 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1953 dev_err(&hdev->pdev->dev,
1954 "common threshold config cmd failed %d\n", ret);
1958 static int hclge_common_wl_config(struct hclge_dev *hdev,
1959 struct hclge_pkt_buf_alloc *buf_alloc)
1961 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1962 struct hclge_rx_com_wl *req;
1963 struct hclge_desc desc;
1966 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1968 req = (struct hclge_rx_com_wl *)desc.data;
1969 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1970 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1972 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
1973 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1975 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1977 dev_err(&hdev->pdev->dev,
1978 "common waterline config cmd failed %d\n", ret);
1983 int hclge_buffer_alloc(struct hclge_dev *hdev)
1985 struct hclge_pkt_buf_alloc *pkt_buf;
1988 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
1992 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
1994 dev_err(&hdev->pdev->dev,
1995 "could not calc tx buffer size for all TCs %d\n", ret);
1999 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2001 dev_err(&hdev->pdev->dev,
2002 "could not alloc tx buffers %d\n", ret);
2006 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2008 dev_err(&hdev->pdev->dev,
2009 "could not calc rx priv buffer size for all TCs %d\n",
2014 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2016 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2021 if (hnae3_dev_dcb_supported(hdev)) {
2022 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2024 dev_err(&hdev->pdev->dev,
2025 "could not configure rx private waterline %d\n",
2030 ret = hclge_common_thrd_config(hdev, pkt_buf);
2032 dev_err(&hdev->pdev->dev,
2033 "could not configure common threshold %d\n",
2039 ret = hclge_common_wl_config(hdev, pkt_buf);
2041 dev_err(&hdev->pdev->dev,
2042 "could not configure common waterline %d\n", ret);
2049 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2051 struct hnae3_handle *roce = &vport->roce;
2052 struct hnae3_handle *nic = &vport->nic;
2054 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2056 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2057 vport->back->num_msi_left == 0)
2060 roce->rinfo.base_vector = vport->back->roce_base_vector;
2062 roce->rinfo.netdev = nic->kinfo.netdev;
2063 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2065 roce->pdev = nic->pdev;
2066 roce->ae_algo = nic->ae_algo;
2067 roce->numa_node_mask = nic->numa_node_mask;
2072 static int hclge_init_msi(struct hclge_dev *hdev)
2074 struct pci_dev *pdev = hdev->pdev;
2078 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2079 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2082 "failed(%d) to allocate MSI/MSI-X vectors\n",
2086 if (vectors < hdev->num_msi)
2087 dev_warn(&hdev->pdev->dev,
2088 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2089 hdev->num_msi, vectors);
2091 hdev->num_msi = vectors;
2092 hdev->num_msi_left = vectors;
2093 hdev->base_msi_vector = pdev->irq;
2094 hdev->roce_base_vector = hdev->base_msi_vector +
2095 hdev->roce_base_msix_offset;
2097 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2098 sizeof(u16), GFP_KERNEL);
2099 if (!hdev->vector_status) {
2100 pci_free_irq_vectors(pdev);
2104 for (i = 0; i < hdev->num_msi; i++)
2105 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2107 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2108 sizeof(int), GFP_KERNEL);
2109 if (!hdev->vector_irq) {
2110 pci_free_irq_vectors(pdev);
2117 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2120 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2121 duplex = HCLGE_MAC_FULL;
2126 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2129 struct hclge_config_mac_speed_dup_cmd *req;
2130 struct hclge_desc desc;
2133 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2135 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2137 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2140 case HCLGE_MAC_SPEED_10M:
2141 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2142 HCLGE_CFG_SPEED_S, 6);
2144 case HCLGE_MAC_SPEED_100M:
2145 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2146 HCLGE_CFG_SPEED_S, 7);
2148 case HCLGE_MAC_SPEED_1G:
2149 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2150 HCLGE_CFG_SPEED_S, 0);
2152 case HCLGE_MAC_SPEED_10G:
2153 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2154 HCLGE_CFG_SPEED_S, 1);
2156 case HCLGE_MAC_SPEED_25G:
2157 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2158 HCLGE_CFG_SPEED_S, 2);
2160 case HCLGE_MAC_SPEED_40G:
2161 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2162 HCLGE_CFG_SPEED_S, 3);
2164 case HCLGE_MAC_SPEED_50G:
2165 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2166 HCLGE_CFG_SPEED_S, 4);
2168 case HCLGE_MAC_SPEED_100G:
2169 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2170 HCLGE_CFG_SPEED_S, 5);
2173 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2177 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2180 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2182 dev_err(&hdev->pdev->dev,
2183 "mac speed/duplex config cmd failed %d.\n", ret);
2190 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2194 duplex = hclge_check_speed_dup(duplex, speed);
2195 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2198 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2202 hdev->hw.mac.speed = speed;
2203 hdev->hw.mac.duplex = duplex;
2208 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2211 struct hclge_vport *vport = hclge_get_vport(handle);
2212 struct hclge_dev *hdev = vport->back;
2214 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2217 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2219 struct hclge_config_auto_neg_cmd *req;
2220 struct hclge_desc desc;
2224 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2226 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2227 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2228 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2230 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2232 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2238 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2240 struct hclge_vport *vport = hclge_get_vport(handle);
2241 struct hclge_dev *hdev = vport->back;
2243 if (!hdev->hw.mac.support_autoneg) {
2245 dev_err(&hdev->pdev->dev,
2246 "autoneg is not supported by current port\n");
2253 return hclge_set_autoneg_en(hdev, enable);
2256 static int hclge_get_autoneg(struct hnae3_handle *handle)
2258 struct hclge_vport *vport = hclge_get_vport(handle);
2259 struct hclge_dev *hdev = vport->back;
2260 struct phy_device *phydev = hdev->hw.mac.phydev;
2263 return phydev->autoneg;
2265 return hdev->hw.mac.autoneg;
2268 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2270 struct hclge_vport *vport = hclge_get_vport(handle);
2271 struct hclge_dev *hdev = vport->back;
2274 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2276 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2279 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2282 static int hclge_mac_init(struct hclge_dev *hdev)
2284 struct hclge_mac *mac = &hdev->hw.mac;
2287 hdev->support_sfp_query = true;
2288 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2289 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2290 hdev->hw.mac.duplex);
2292 dev_err(&hdev->pdev->dev,
2293 "Config mac speed dup fail ret=%d\n", ret);
2299 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2301 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2305 ret = hclge_buffer_alloc(hdev);
2307 dev_err(&hdev->pdev->dev,
2308 "allocate buffer fail, ret=%d\n", ret);
2313 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2315 if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2316 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2317 schedule_work(&hdev->mbx_service_task);
2320 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2322 if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2323 schedule_work(&hdev->rst_service_task);
2326 static void hclge_task_schedule(struct hclge_dev *hdev)
2328 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2329 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2330 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2331 (void)schedule_work(&hdev->service_task);
2334 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2336 struct hclge_link_status_cmd *req;
2337 struct hclge_desc desc;
2341 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2342 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2344 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2349 req = (struct hclge_link_status_cmd *)desc.data;
2350 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2352 return !!link_status;
2355 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2360 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2363 mac_state = hclge_get_mac_link_status(hdev);
2365 if (hdev->hw.mac.phydev) {
2366 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2367 link_stat = mac_state &
2368 hdev->hw.mac.phydev->link;
2373 link_stat = mac_state;
2379 static void hclge_update_link_status(struct hclge_dev *hdev)
2381 struct hnae3_client *rclient = hdev->roce_client;
2382 struct hnae3_client *client = hdev->nic_client;
2383 struct hnae3_handle *rhandle;
2384 struct hnae3_handle *handle;
2390 state = hclge_get_mac_phy_link(hdev);
2391 if (state != hdev->hw.mac.link) {
2392 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2393 handle = &hdev->vport[i].nic;
2394 client->ops->link_status_change(handle, state);
2395 hclge_config_mac_tnl_int(hdev, state);
2396 rhandle = &hdev->vport[i].roce;
2397 if (rclient && rclient->ops->link_status_change)
2398 rclient->ops->link_status_change(rhandle,
2401 hdev->hw.mac.link = state;
2405 static void hclge_update_port_capability(struct hclge_mac *mac)
2407 /* firmware can not identify back plane type, the media type
2408 * read from configuration can help deal it
2410 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2411 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2412 mac->module_type = HNAE3_MODULE_TYPE_KR;
2413 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2414 mac->module_type = HNAE3_MODULE_TYPE_TP;
2416 if (mac->support_autoneg == true) {
2417 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2418 linkmode_copy(mac->advertising, mac->supported);
2420 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2422 linkmode_zero(mac->advertising);
2426 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2428 struct hclge_sfp_info_cmd *resp = NULL;
2429 struct hclge_desc desc;
2432 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2433 resp = (struct hclge_sfp_info_cmd *)desc.data;
2434 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2435 if (ret == -EOPNOTSUPP) {
2436 dev_warn(&hdev->pdev->dev,
2437 "IMP do not support get SFP speed %d\n", ret);
2440 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2444 *speed = le32_to_cpu(resp->speed);
2449 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2451 struct hclge_sfp_info_cmd *resp;
2452 struct hclge_desc desc;
2455 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2456 resp = (struct hclge_sfp_info_cmd *)desc.data;
2458 resp->query_type = QUERY_ACTIVE_SPEED;
2460 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2461 if (ret == -EOPNOTSUPP) {
2462 dev_warn(&hdev->pdev->dev,
2463 "IMP does not support get SFP info %d\n", ret);
2466 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2470 mac->speed = le32_to_cpu(resp->speed);
2471 /* if resp->speed_ability is 0, it means it's an old version
2472 * firmware, do not update these params
2474 if (resp->speed_ability) {
2475 mac->module_type = le32_to_cpu(resp->module_type);
2476 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2477 mac->autoneg = resp->autoneg;
2478 mac->support_autoneg = resp->autoneg_ability;
2480 mac->speed_type = QUERY_SFP_SPEED;
2486 static int hclge_update_port_info(struct hclge_dev *hdev)
2488 struct hclge_mac *mac = &hdev->hw.mac;
2489 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2492 /* get the port info from SFP cmd if not copper port */
2493 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2496 /* if IMP does not support get SFP/qSFP info, return directly */
2497 if (!hdev->support_sfp_query)
2500 if (hdev->pdev->revision >= 0x21)
2501 ret = hclge_get_sfp_info(hdev, mac);
2503 ret = hclge_get_sfp_speed(hdev, &speed);
2505 if (ret == -EOPNOTSUPP) {
2506 hdev->support_sfp_query = false;
2512 if (hdev->pdev->revision >= 0x21) {
2513 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2514 hclge_update_port_capability(mac);
2517 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2520 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2521 return 0; /* do nothing if no SFP */
2523 /* must config full duplex for SFP */
2524 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2528 static int hclge_get_status(struct hnae3_handle *handle)
2530 struct hclge_vport *vport = hclge_get_vport(handle);
2531 struct hclge_dev *hdev = vport->back;
2533 hclge_update_link_status(hdev);
2535 return hdev->hw.mac.link;
2538 static void hclge_service_timer(struct timer_list *t)
2540 struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2542 mod_timer(&hdev->service_timer, jiffies + HZ);
2543 hdev->hw_stats.stats_timer++;
2544 hclge_task_schedule(hdev);
2547 static void hclge_service_complete(struct hclge_dev *hdev)
2549 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2551 /* Flush memory before next watchdog */
2552 smp_mb__before_atomic();
2553 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2556 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2558 u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2560 /* fetch the events from their corresponding regs */
2561 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2562 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2563 msix_src_reg = hclge_read_dev(&hdev->hw,
2564 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2566 /* Assumption: If by any chance reset and mailbox events are reported
2567 * together then we will only process reset event in this go and will
2568 * defer the processing of the mailbox events. Since, we would have not
2569 * cleared RX CMDQ event this time we would receive again another
2570 * interrupt from H/W just for the mailbox.
2573 /* check for vector0 reset event sources */
2574 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2575 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2576 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2577 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2578 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2579 hdev->rst_stats.imp_rst_cnt++;
2580 return HCLGE_VECTOR0_EVENT_RST;
2583 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2584 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2585 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2586 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2587 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2588 hdev->rst_stats.global_rst_cnt++;
2589 return HCLGE_VECTOR0_EVENT_RST;
2592 if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2593 dev_info(&hdev->pdev->dev, "core reset interrupt\n");
2594 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2595 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2596 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2597 hdev->rst_stats.core_rst_cnt++;
2598 return HCLGE_VECTOR0_EVENT_RST;
2601 /* check for vector0 msix event source */
2602 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2603 dev_dbg(&hdev->pdev->dev, "received event 0x%x\n",
2605 return HCLGE_VECTOR0_EVENT_ERR;
2608 /* check for vector0 mailbox(=CMDQ RX) event source */
2609 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2610 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2611 *clearval = cmdq_src_reg;
2612 return HCLGE_VECTOR0_EVENT_MBX;
2615 /* print other vector0 event source */
2616 dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n",
2617 cmdq_src_reg, msix_src_reg);
2618 return HCLGE_VECTOR0_EVENT_OTHER;
2621 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2624 switch (event_type) {
2625 case HCLGE_VECTOR0_EVENT_RST:
2626 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2628 case HCLGE_VECTOR0_EVENT_MBX:
2629 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2636 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2638 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2639 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2640 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2641 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2642 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2645 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2647 writel(enable ? 1 : 0, vector->addr);
2650 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2652 struct hclge_dev *hdev = data;
2656 hclge_enable_vector(&hdev->misc_vector, false);
2657 event_cause = hclge_check_event_cause(hdev, &clearval);
2659 /* vector 0 interrupt is shared with reset and mailbox source events.*/
2660 switch (event_cause) {
2661 case HCLGE_VECTOR0_EVENT_ERR:
2662 /* we do not know what type of reset is required now. This could
2663 * only be decided after we fetch the type of errors which
2664 * caused this event. Therefore, we will do below for now:
2665 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2666 * have defered type of reset to be used.
2667 * 2. Schedule the reset serivce task.
2668 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
2669 * will fetch the correct type of reset. This would be done
2670 * by first decoding the types of errors.
2672 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2674 case HCLGE_VECTOR0_EVENT_RST:
2675 hclge_reset_task_schedule(hdev);
2677 case HCLGE_VECTOR0_EVENT_MBX:
2678 /* If we are here then,
2679 * 1. Either we are not handling any mbx task and we are not
2682 * 2. We could be handling a mbx task but nothing more is
2684 * In both cases, we should schedule mbx task as there are more
2685 * mbx messages reported by this interrupt.
2687 hclge_mbx_task_schedule(hdev);
2690 dev_warn(&hdev->pdev->dev,
2691 "received unknown or unhandled event of vector0\n");
2695 /* clear the source of interrupt if it is not cause by reset */
2696 if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2697 hclge_clear_event_cause(hdev, event_cause, clearval);
2698 hclge_enable_vector(&hdev->misc_vector, true);
2704 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2706 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2707 dev_warn(&hdev->pdev->dev,
2708 "vector(vector_id %d) has been freed.\n", vector_id);
2712 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2713 hdev->num_msi_left += 1;
2714 hdev->num_msi_used -= 1;
2717 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2719 struct hclge_misc_vector *vector = &hdev->misc_vector;
2721 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2723 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2724 hdev->vector_status[0] = 0;
2726 hdev->num_msi_left -= 1;
2727 hdev->num_msi_used += 1;
2730 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2734 hclge_get_misc_vector(hdev);
2736 /* this would be explicitly freed in the end */
2737 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2738 0, "hclge_misc", hdev);
2740 hclge_free_vector(hdev, 0);
2741 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2742 hdev->misc_vector.vector_irq);
2748 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2750 free_irq(hdev->misc_vector.vector_irq, hdev);
2751 hclge_free_vector(hdev, 0);
2754 int hclge_notify_client(struct hclge_dev *hdev,
2755 enum hnae3_reset_notify_type type)
2757 struct hnae3_client *client = hdev->nic_client;
2760 if (!client->ops->reset_notify)
2763 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2764 struct hnae3_handle *handle = &hdev->vport[i].nic;
2767 ret = client->ops->reset_notify(handle, type);
2769 dev_err(&hdev->pdev->dev,
2770 "notify nic client failed %d(%d)\n", type, ret);
2778 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2779 enum hnae3_reset_notify_type type)
2781 struct hnae3_client *client = hdev->roce_client;
2788 if (!client->ops->reset_notify)
2791 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2792 struct hnae3_handle *handle = &hdev->vport[i].roce;
2794 ret = client->ops->reset_notify(handle, type);
2796 dev_err(&hdev->pdev->dev,
2797 "notify roce client failed %d(%d)",
2806 static int hclge_reset_wait(struct hclge_dev *hdev)
2808 #define HCLGE_RESET_WATI_MS 100
2809 #define HCLGE_RESET_WAIT_CNT 200
2810 u32 val, reg, reg_bit;
2813 switch (hdev->reset_type) {
2814 case HNAE3_IMP_RESET:
2815 reg = HCLGE_GLOBAL_RESET_REG;
2816 reg_bit = HCLGE_IMP_RESET_BIT;
2818 case HNAE3_GLOBAL_RESET:
2819 reg = HCLGE_GLOBAL_RESET_REG;
2820 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2822 case HNAE3_CORE_RESET:
2823 reg = HCLGE_GLOBAL_RESET_REG;
2824 reg_bit = HCLGE_CORE_RESET_BIT;
2826 case HNAE3_FUNC_RESET:
2827 reg = HCLGE_FUN_RST_ING;
2828 reg_bit = HCLGE_FUN_RST_ING_B;
2830 case HNAE3_FLR_RESET:
2833 dev_err(&hdev->pdev->dev,
2834 "Wait for unsupported reset type: %d\n",
2839 if (hdev->reset_type == HNAE3_FLR_RESET) {
2840 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2841 cnt++ < HCLGE_RESET_WAIT_CNT)
2842 msleep(HCLGE_RESET_WATI_MS);
2844 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2845 dev_err(&hdev->pdev->dev,
2846 "flr wait timeout: %d\n", cnt);
2853 val = hclge_read_dev(&hdev->hw, reg);
2854 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2855 msleep(HCLGE_RESET_WATI_MS);
2856 val = hclge_read_dev(&hdev->hw, reg);
2860 if (cnt >= HCLGE_RESET_WAIT_CNT) {
2861 dev_warn(&hdev->pdev->dev,
2862 "Wait for reset timeout: %d\n", hdev->reset_type);
2869 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2871 struct hclge_vf_rst_cmd *req;
2872 struct hclge_desc desc;
2874 req = (struct hclge_vf_rst_cmd *)desc.data;
2875 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2876 req->dest_vfid = func_id;
2881 return hclge_cmd_send(&hdev->hw, &desc, 1);
2884 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
2888 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
2889 struct hclge_vport *vport = &hdev->vport[i];
2892 /* Send cmd to set/clear VF's FUNC_RST_ING */
2893 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
2895 dev_err(&hdev->pdev->dev,
2896 "set vf(%d) rst failed %d!\n",
2897 vport->vport_id, ret);
2901 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
2904 /* Inform VF to process the reset.
2905 * hclge_inform_reset_assert_to_vf may fail if VF
2906 * driver is not loaded.
2908 ret = hclge_inform_reset_assert_to_vf(vport);
2910 dev_warn(&hdev->pdev->dev,
2911 "inform reset to vf(%d) failed %d!\n",
2912 vport->vport_id, ret);
2918 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
2920 struct hclge_desc desc;
2921 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
2924 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
2925 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
2926 req->fun_reset_vfid = func_id;
2928 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2930 dev_err(&hdev->pdev->dev,
2931 "send function reset cmd fail, status =%d\n", ret);
2936 static void hclge_do_reset(struct hclge_dev *hdev)
2938 struct hnae3_handle *handle = &hdev->vport[0].nic;
2939 struct pci_dev *pdev = hdev->pdev;
2942 if (hclge_get_hw_reset_stat(handle)) {
2943 dev_info(&pdev->dev, "Hardware reset not finish\n");
2944 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
2945 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
2946 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
2950 switch (hdev->reset_type) {
2951 case HNAE3_GLOBAL_RESET:
2952 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2953 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
2954 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2955 dev_info(&pdev->dev, "Global Reset requested\n");
2957 case HNAE3_CORE_RESET:
2958 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2959 hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
2960 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2961 dev_info(&pdev->dev, "Core Reset requested\n");
2963 case HNAE3_FUNC_RESET:
2964 dev_info(&pdev->dev, "PF Reset requested\n");
2965 /* schedule again to check later */
2966 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
2967 hclge_reset_task_schedule(hdev);
2969 case HNAE3_FLR_RESET:
2970 dev_info(&pdev->dev, "FLR requested\n");
2971 /* schedule again to check later */
2972 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
2973 hclge_reset_task_schedule(hdev);
2976 dev_warn(&pdev->dev,
2977 "Unsupported reset type: %d\n", hdev->reset_type);
2982 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
2983 unsigned long *addr)
2985 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
2987 /* first, resolve any unknown reset type to the known type(s) */
2988 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
2989 /* we will intentionally ignore any errors from this function
2990 * as we will end up in *some* reset request in any case
2992 hclge_handle_hw_msix_error(hdev, addr);
2993 clear_bit(HNAE3_UNKNOWN_RESET, addr);
2994 /* We defered the clearing of the error event which caused
2995 * interrupt since it was not posssible to do that in
2996 * interrupt context (and this is the reason we introduced
2997 * new UNKNOWN reset type). Now, the errors have been
2998 * handled and cleared in hardware we can safely enable
2999 * interrupts. This is an exception to the norm.
3001 hclge_enable_vector(&hdev->misc_vector, true);
3004 /* return the highest priority reset level amongst all */
3005 if (test_bit(HNAE3_IMP_RESET, addr)) {
3006 rst_level = HNAE3_IMP_RESET;
3007 clear_bit(HNAE3_IMP_RESET, addr);
3008 clear_bit(HNAE3_GLOBAL_RESET, addr);
3009 clear_bit(HNAE3_CORE_RESET, addr);
3010 clear_bit(HNAE3_FUNC_RESET, addr);
3011 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3012 rst_level = HNAE3_GLOBAL_RESET;
3013 clear_bit(HNAE3_GLOBAL_RESET, addr);
3014 clear_bit(HNAE3_CORE_RESET, addr);
3015 clear_bit(HNAE3_FUNC_RESET, addr);
3016 } else if (test_bit(HNAE3_CORE_RESET, addr)) {
3017 rst_level = HNAE3_CORE_RESET;
3018 clear_bit(HNAE3_CORE_RESET, addr);
3019 clear_bit(HNAE3_FUNC_RESET, addr);
3020 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3021 rst_level = HNAE3_FUNC_RESET;
3022 clear_bit(HNAE3_FUNC_RESET, addr);
3023 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3024 rst_level = HNAE3_FLR_RESET;
3025 clear_bit(HNAE3_FLR_RESET, addr);
3028 if (hdev->reset_type != HNAE3_NONE_RESET &&
3029 rst_level < hdev->reset_type)
3030 return HNAE3_NONE_RESET;
3035 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3039 switch (hdev->reset_type) {
3040 case HNAE3_IMP_RESET:
3041 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3043 case HNAE3_GLOBAL_RESET:
3044 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3046 case HNAE3_CORE_RESET:
3047 clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
3056 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
3057 hclge_enable_vector(&hdev->misc_vector, true);
3060 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3064 switch (hdev->reset_type) {
3065 case HNAE3_FUNC_RESET:
3067 case HNAE3_FLR_RESET:
3068 ret = hclge_set_all_vf_rst(hdev, true);
3077 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3082 switch (hdev->reset_type) {
3083 case HNAE3_FUNC_RESET:
3084 /* There is no mechanism for PF to know if VF has stopped IO
3085 * for now, just wait 100 ms for VF to stop IO
3088 ret = hclge_func_reset_cmd(hdev, 0);
3090 dev_err(&hdev->pdev->dev,
3091 "asserting function reset fail %d!\n", ret);
3095 /* After performaning pf reset, it is not necessary to do the
3096 * mailbox handling or send any command to firmware, because
3097 * any mailbox handling or command to firmware is only valid
3098 * after hclge_cmd_init is called.
3100 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3101 hdev->rst_stats.pf_rst_cnt++;
3103 case HNAE3_FLR_RESET:
3104 /* There is no mechanism for PF to know if VF has stopped IO
3105 * for now, just wait 100 ms for VF to stop IO
3108 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3109 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3110 hdev->rst_stats.flr_rst_cnt++;
3112 case HNAE3_IMP_RESET:
3113 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3114 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3115 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3121 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3126 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
3128 #define MAX_RESET_FAIL_CNT 5
3129 #define RESET_UPGRADE_DELAY_SEC 10
3131 if (hdev->reset_pending) {
3132 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3133 hdev->reset_pending);
3135 } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
3136 (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
3137 BIT(HCLGE_IMP_RESET_BIT))) {
3138 dev_info(&hdev->pdev->dev,
3139 "reset failed because IMP Reset is pending\n");
3140 hclge_clear_reset_cause(hdev);
3142 } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3143 hdev->reset_fail_cnt++;
3145 set_bit(hdev->reset_type, &hdev->reset_pending);
3146 dev_info(&hdev->pdev->dev,
3147 "re-schedule to wait for hw reset done\n");
3151 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
3152 hclge_clear_reset_cause(hdev);
3153 mod_timer(&hdev->reset_timer,
3154 jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
3159 hclge_clear_reset_cause(hdev);
3160 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3164 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3168 switch (hdev->reset_type) {
3169 case HNAE3_FUNC_RESET:
3171 case HNAE3_FLR_RESET:
3172 ret = hclge_set_all_vf_rst(hdev, false);
3181 static void hclge_reset(struct hclge_dev *hdev)
3183 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3184 bool is_timeout = false;
3187 /* Initialize ae_dev reset status as well, in case enet layer wants to
3188 * know if device is undergoing reset
3190 ae_dev->reset_type = hdev->reset_type;
3191 hdev->rst_stats.reset_cnt++;
3192 /* perform reset of the stack & ae device for a client */
3193 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3197 ret = hclge_reset_prepare_down(hdev);
3202 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3204 goto err_reset_lock;
3208 ret = hclge_reset_prepare_wait(hdev);
3212 if (hclge_reset_wait(hdev)) {
3217 hdev->rst_stats.hw_reset_done_cnt++;
3219 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3224 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3226 goto err_reset_lock;
3228 ret = hclge_reset_ae_dev(hdev->ae_dev);
3230 goto err_reset_lock;
3232 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3234 goto err_reset_lock;
3236 ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3238 goto err_reset_lock;
3240 hclge_clear_reset_cause(hdev);
3242 ret = hclge_reset_prepare_up(hdev);
3244 goto err_reset_lock;
3246 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3248 goto err_reset_lock;
3252 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3256 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3260 hdev->last_reset_time = jiffies;
3261 hdev->reset_fail_cnt = 0;
3262 hdev->rst_stats.reset_done_cnt++;
3263 ae_dev->reset_type = HNAE3_NONE_RESET;
3264 del_timer(&hdev->reset_timer);
3271 if (hclge_reset_err_handle(hdev, is_timeout))
3272 hclge_reset_task_schedule(hdev);
3275 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3277 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3278 struct hclge_dev *hdev = ae_dev->priv;
3280 /* We might end up getting called broadly because of 2 below cases:
3281 * 1. Recoverable error was conveyed through APEI and only way to bring
3282 * normalcy is to reset.
3283 * 2. A new reset request from the stack due to timeout
3285 * For the first case,error event might not have ae handle available.
3286 * check if this is a new reset request and we are not here just because
3287 * last reset attempt did not succeed and watchdog hit us again. We will
3288 * know this if last reset request did not occur very recently (watchdog
3289 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3290 * In case of new request we reset the "reset level" to PF reset.
3291 * And if it is a repeat reset request of the most recent one then we
3292 * want to make sure we throttle the reset request. Therefore, we will
3293 * not allow it again before 3*HZ times.
3296 handle = &hdev->vport[0].nic;
3298 if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
3300 else if (hdev->default_reset_request)
3302 hclge_get_reset_level(hdev,
3303 &hdev->default_reset_request);
3304 else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3305 hdev->reset_level = HNAE3_FUNC_RESET;
3307 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3310 /* request reset & schedule reset task */
3311 set_bit(hdev->reset_level, &hdev->reset_request);
3312 hclge_reset_task_schedule(hdev);
3314 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3315 hdev->reset_level++;
3318 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3319 enum hnae3_reset_type rst_type)
3321 struct hclge_dev *hdev = ae_dev->priv;
3323 set_bit(rst_type, &hdev->default_reset_request);
3326 static void hclge_reset_timer(struct timer_list *t)
3328 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3330 dev_info(&hdev->pdev->dev,
3331 "triggering global reset in reset timer\n");
3332 set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3333 hclge_reset_event(hdev->pdev, NULL);
3336 static void hclge_reset_subtask(struct hclge_dev *hdev)
3338 /* check if there is any ongoing reset in the hardware. This status can
3339 * be checked from reset_pending. If there is then, we need to wait for
3340 * hardware to complete reset.
3341 * a. If we are able to figure out in reasonable time that hardware
3342 * has fully resetted then, we can proceed with driver, client
3344 * b. else, we can come back later to check this status so re-sched
3347 hdev->last_reset_time = jiffies;
3348 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
3349 if (hdev->reset_type != HNAE3_NONE_RESET)
3352 /* check if we got any *new* reset requests to be honored */
3353 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
3354 if (hdev->reset_type != HNAE3_NONE_RESET)
3355 hclge_do_reset(hdev);
3357 hdev->reset_type = HNAE3_NONE_RESET;
3360 static void hclge_reset_service_task(struct work_struct *work)
3362 struct hclge_dev *hdev =
3363 container_of(work, struct hclge_dev, rst_service_task);
3365 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3368 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3370 hclge_reset_subtask(hdev);
3372 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3375 static void hclge_mailbox_service_task(struct work_struct *work)
3377 struct hclge_dev *hdev =
3378 container_of(work, struct hclge_dev, mbx_service_task);
3380 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3383 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3385 hclge_mbx_handler(hdev);
3387 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3390 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3394 /* start from vport 1 for PF is always alive */
3395 for (i = 1; i < hdev->num_alloc_vport; i++) {
3396 struct hclge_vport *vport = &hdev->vport[i];
3398 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3399 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3401 /* If vf is not alive, set to default value */
3402 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3403 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3407 static void hclge_service_task(struct work_struct *work)
3409 struct hclge_dev *hdev =
3410 container_of(work, struct hclge_dev, service_task);
3412 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3413 hclge_update_stats_for_all(hdev);
3414 hdev->hw_stats.stats_timer = 0;
3417 hclge_update_port_info(hdev);
3418 hclge_update_link_status(hdev);
3419 hclge_update_vport_alive(hdev);
3420 hclge_service_complete(hdev);
3423 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3425 /* VF handle has no client */
3426 if (!handle->client)
3427 return container_of(handle, struct hclge_vport, nic);
3428 else if (handle->client->type == HNAE3_CLIENT_ROCE)
3429 return container_of(handle, struct hclge_vport, roce);
3431 return container_of(handle, struct hclge_vport, nic);
3434 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3435 struct hnae3_vector_info *vector_info)
3437 struct hclge_vport *vport = hclge_get_vport(handle);
3438 struct hnae3_vector_info *vector = vector_info;
3439 struct hclge_dev *hdev = vport->back;
3443 vector_num = min(hdev->num_msi_left, vector_num);
3445 for (j = 0; j < vector_num; j++) {
3446 for (i = 1; i < hdev->num_msi; i++) {
3447 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3448 vector->vector = pci_irq_vector(hdev->pdev, i);
3449 vector->io_addr = hdev->hw.io_base +
3450 HCLGE_VECTOR_REG_BASE +
3451 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3453 HCLGE_VECTOR_VF_OFFSET;
3454 hdev->vector_status[i] = vport->vport_id;
3455 hdev->vector_irq[i] = vector->vector;
3464 hdev->num_msi_left -= alloc;
3465 hdev->num_msi_used += alloc;
3470 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3474 for (i = 0; i < hdev->num_msi; i++)
3475 if (vector == hdev->vector_irq[i])
3481 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3483 struct hclge_vport *vport = hclge_get_vport(handle);
3484 struct hclge_dev *hdev = vport->back;
3487 vector_id = hclge_get_vector_index(hdev, vector);
3488 if (vector_id < 0) {
3489 dev_err(&hdev->pdev->dev,
3490 "Get vector index fail. vector_id =%d\n", vector_id);
3494 hclge_free_vector(hdev, vector_id);
3499 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3501 return HCLGE_RSS_KEY_SIZE;
3504 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3506 return HCLGE_RSS_IND_TBL_SIZE;
3509 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3510 const u8 hfunc, const u8 *key)
3512 struct hclge_rss_config_cmd *req;
3513 struct hclge_desc desc;
3518 req = (struct hclge_rss_config_cmd *)desc.data;
3520 for (key_offset = 0; key_offset < 3; key_offset++) {
3521 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3524 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3525 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3527 if (key_offset == 2)
3529 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3531 key_size = HCLGE_RSS_HASH_KEY_NUM;
3533 memcpy(req->hash_key,
3534 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3536 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3538 dev_err(&hdev->pdev->dev,
3539 "Configure RSS config fail, status = %d\n",
3547 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3549 struct hclge_rss_indirection_table_cmd *req;
3550 struct hclge_desc desc;
3554 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3556 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3557 hclge_cmd_setup_basic_desc
3558 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3560 req->start_table_index =
3561 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3562 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3564 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3565 req->rss_result[j] =
3566 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3568 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3570 dev_err(&hdev->pdev->dev,
3571 "Configure rss indir table fail,status = %d\n",
3579 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3580 u16 *tc_size, u16 *tc_offset)
3582 struct hclge_rss_tc_mode_cmd *req;
3583 struct hclge_desc desc;
3587 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3588 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3590 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3593 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3594 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3595 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3596 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3597 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3599 req->rss_tc_mode[i] = cpu_to_le16(mode);
3602 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3604 dev_err(&hdev->pdev->dev,
3605 "Configure rss tc mode fail, status = %d\n", ret);
3610 static void hclge_get_rss_type(struct hclge_vport *vport)
3612 if (vport->rss_tuple_sets.ipv4_tcp_en ||
3613 vport->rss_tuple_sets.ipv4_udp_en ||
3614 vport->rss_tuple_sets.ipv4_sctp_en ||
3615 vport->rss_tuple_sets.ipv6_tcp_en ||
3616 vport->rss_tuple_sets.ipv6_udp_en ||
3617 vport->rss_tuple_sets.ipv6_sctp_en)
3618 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3619 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3620 vport->rss_tuple_sets.ipv6_fragment_en)
3621 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3623 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3626 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3628 struct hclge_rss_input_tuple_cmd *req;
3629 struct hclge_desc desc;
3632 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3634 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3636 /* Get the tuple cfg from pf */
3637 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3638 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3639 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3640 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3641 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3642 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3643 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3644 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3645 hclge_get_rss_type(&hdev->vport[0]);
3646 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3648 dev_err(&hdev->pdev->dev,
3649 "Configure rss input fail, status = %d\n", ret);
3653 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3656 struct hclge_vport *vport = hclge_get_vport(handle);
3659 /* Get hash algorithm */
3661 switch (vport->rss_algo) {
3662 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3663 *hfunc = ETH_RSS_HASH_TOP;
3665 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3666 *hfunc = ETH_RSS_HASH_XOR;
3669 *hfunc = ETH_RSS_HASH_UNKNOWN;
3674 /* Get the RSS Key required by the user */
3676 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3678 /* Get indirect table */
3680 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3681 indir[i] = vport->rss_indirection_tbl[i];
3686 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3687 const u8 *key, const u8 hfunc)
3689 struct hclge_vport *vport = hclge_get_vport(handle);
3690 struct hclge_dev *hdev = vport->back;
3694 /* Set the RSS Hash Key if specififed by the user */
3697 case ETH_RSS_HASH_TOP:
3698 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3700 case ETH_RSS_HASH_XOR:
3701 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3703 case ETH_RSS_HASH_NO_CHANGE:
3704 hash_algo = vport->rss_algo;
3710 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3714 /* Update the shadow RSS key with user specified qids */
3715 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3716 vport->rss_algo = hash_algo;
3719 /* Update the shadow RSS table with user specified qids */
3720 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3721 vport->rss_indirection_tbl[i] = indir[i];
3723 /* Update the hardware */
3724 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3727 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3729 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3731 if (nfc->data & RXH_L4_B_2_3)
3732 hash_sets |= HCLGE_D_PORT_BIT;
3734 hash_sets &= ~HCLGE_D_PORT_BIT;
3736 if (nfc->data & RXH_IP_SRC)
3737 hash_sets |= HCLGE_S_IP_BIT;
3739 hash_sets &= ~HCLGE_S_IP_BIT;
3741 if (nfc->data & RXH_IP_DST)
3742 hash_sets |= HCLGE_D_IP_BIT;
3744 hash_sets &= ~HCLGE_D_IP_BIT;
3746 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3747 hash_sets |= HCLGE_V_TAG_BIT;
3752 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3753 struct ethtool_rxnfc *nfc)
3755 struct hclge_vport *vport = hclge_get_vport(handle);
3756 struct hclge_dev *hdev = vport->back;
3757 struct hclge_rss_input_tuple_cmd *req;
3758 struct hclge_desc desc;
3762 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3763 RXH_L4_B_0_1 | RXH_L4_B_2_3))
3766 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3767 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3769 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3770 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3771 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3772 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3773 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3774 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3775 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3776 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3778 tuple_sets = hclge_get_rss_hash_bits(nfc);
3779 switch (nfc->flow_type) {
3781 req->ipv4_tcp_en = tuple_sets;
3784 req->ipv6_tcp_en = tuple_sets;
3787 req->ipv4_udp_en = tuple_sets;
3790 req->ipv6_udp_en = tuple_sets;
3793 req->ipv4_sctp_en = tuple_sets;
3796 if ((nfc->data & RXH_L4_B_0_1) ||
3797 (nfc->data & RXH_L4_B_2_3))
3800 req->ipv6_sctp_en = tuple_sets;
3803 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3806 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3812 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3814 dev_err(&hdev->pdev->dev,
3815 "Set rss tuple fail, status = %d\n", ret);
3819 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3820 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3821 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3822 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3823 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3824 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3825 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3826 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3827 hclge_get_rss_type(vport);
3831 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3832 struct ethtool_rxnfc *nfc)
3834 struct hclge_vport *vport = hclge_get_vport(handle);
3839 switch (nfc->flow_type) {
3841 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3844 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3847 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3850 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3853 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3856 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3860 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3869 if (tuple_sets & HCLGE_D_PORT_BIT)
3870 nfc->data |= RXH_L4_B_2_3;
3871 if (tuple_sets & HCLGE_S_PORT_BIT)
3872 nfc->data |= RXH_L4_B_0_1;
3873 if (tuple_sets & HCLGE_D_IP_BIT)
3874 nfc->data |= RXH_IP_DST;
3875 if (tuple_sets & HCLGE_S_IP_BIT)
3876 nfc->data |= RXH_IP_SRC;
3881 static int hclge_get_tc_size(struct hnae3_handle *handle)
3883 struct hclge_vport *vport = hclge_get_vport(handle);
3884 struct hclge_dev *hdev = vport->back;
3886 return hdev->rss_size_max;
3889 int hclge_rss_init_hw(struct hclge_dev *hdev)
3891 struct hclge_vport *vport = hdev->vport;
3892 u8 *rss_indir = vport[0].rss_indirection_tbl;
3893 u16 rss_size = vport[0].alloc_rss_size;
3894 u8 *key = vport[0].rss_hash_key;
3895 u8 hfunc = vport[0].rss_algo;
3896 u16 tc_offset[HCLGE_MAX_TC_NUM];
3897 u16 tc_valid[HCLGE_MAX_TC_NUM];
3898 u16 tc_size[HCLGE_MAX_TC_NUM];
3902 ret = hclge_set_rss_indir_table(hdev, rss_indir);
3906 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
3910 ret = hclge_set_rss_input_tuple(hdev);
3914 /* Each TC have the same queue size, and tc_size set to hardware is
3915 * the log2 of roundup power of two of rss_size, the acutal queue
3916 * size is limited by indirection table.
3918 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
3919 dev_err(&hdev->pdev->dev,
3920 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
3925 roundup_size = roundup_pow_of_two(rss_size);
3926 roundup_size = ilog2(roundup_size);
3928 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3931 if (!(hdev->hw_tc_map & BIT(i)))
3935 tc_size[i] = roundup_size;
3936 tc_offset[i] = rss_size * i;
3939 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
3942 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
3944 struct hclge_vport *vport = hdev->vport;
3947 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
3948 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3949 vport[j].rss_indirection_tbl[i] =
3950 i % vport[j].alloc_rss_size;
3954 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
3956 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3957 struct hclge_vport *vport = hdev->vport;
3959 if (hdev->pdev->revision >= 0x21)
3960 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3962 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3963 vport[i].rss_tuple_sets.ipv4_tcp_en =
3964 HCLGE_RSS_INPUT_TUPLE_OTHER;
3965 vport[i].rss_tuple_sets.ipv4_udp_en =
3966 HCLGE_RSS_INPUT_TUPLE_OTHER;
3967 vport[i].rss_tuple_sets.ipv4_sctp_en =
3968 HCLGE_RSS_INPUT_TUPLE_SCTP;
3969 vport[i].rss_tuple_sets.ipv4_fragment_en =
3970 HCLGE_RSS_INPUT_TUPLE_OTHER;
3971 vport[i].rss_tuple_sets.ipv6_tcp_en =
3972 HCLGE_RSS_INPUT_TUPLE_OTHER;
3973 vport[i].rss_tuple_sets.ipv6_udp_en =
3974 HCLGE_RSS_INPUT_TUPLE_OTHER;
3975 vport[i].rss_tuple_sets.ipv6_sctp_en =
3976 HCLGE_RSS_INPUT_TUPLE_SCTP;
3977 vport[i].rss_tuple_sets.ipv6_fragment_en =
3978 HCLGE_RSS_INPUT_TUPLE_OTHER;
3980 vport[i].rss_algo = rss_algo;
3982 memcpy(vport[i].rss_hash_key, hclge_hash_key,
3983 HCLGE_RSS_KEY_SIZE);
3986 hclge_rss_indir_init_cfg(hdev);
3989 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
3990 int vector_id, bool en,
3991 struct hnae3_ring_chain_node *ring_chain)
3993 struct hclge_dev *hdev = vport->back;
3994 struct hnae3_ring_chain_node *node;
3995 struct hclge_desc desc;
3996 struct hclge_ctrl_vector_chain_cmd *req
3997 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3998 enum hclge_cmd_status status;
3999 enum hclge_opcode_type op;
4000 u16 tqp_type_and_id;
4003 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4004 hclge_cmd_setup_basic_desc(&desc, op, false);
4005 req->int_vector_id = vector_id;
4008 for (node = ring_chain; node; node = node->next) {
4009 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4010 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4012 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4013 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4014 HCLGE_TQP_ID_S, node->tqp_index);
4015 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4017 hnae3_get_field(node->int_gl_idx,
4018 HNAE3_RING_GL_IDX_M,
4019 HNAE3_RING_GL_IDX_S));
4020 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4021 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4022 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4023 req->vfid = vport->vport_id;
4025 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4027 dev_err(&hdev->pdev->dev,
4028 "Map TQP fail, status is %d.\n",
4034 hclge_cmd_setup_basic_desc(&desc,
4037 req->int_vector_id = vector_id;
4042 req->int_cause_num = i;
4043 req->vfid = vport->vport_id;
4044 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4046 dev_err(&hdev->pdev->dev,
4047 "Map TQP fail, status is %d.\n", status);
4055 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
4057 struct hnae3_ring_chain_node *ring_chain)
4059 struct hclge_vport *vport = hclge_get_vport(handle);
4060 struct hclge_dev *hdev = vport->back;
4063 vector_id = hclge_get_vector_index(hdev, vector);
4064 if (vector_id < 0) {
4065 dev_err(&hdev->pdev->dev,
4066 "Get vector index fail. vector_id =%d\n", vector_id);
4070 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4073 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
4075 struct hnae3_ring_chain_node *ring_chain)
4077 struct hclge_vport *vport = hclge_get_vport(handle);
4078 struct hclge_dev *hdev = vport->back;
4081 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4084 vector_id = hclge_get_vector_index(hdev, vector);
4085 if (vector_id < 0) {
4086 dev_err(&handle->pdev->dev,
4087 "Get vector index fail. ret =%d\n", vector_id);
4091 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4093 dev_err(&handle->pdev->dev,
4094 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4101 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4102 struct hclge_promisc_param *param)
4104 struct hclge_promisc_cfg_cmd *req;
4105 struct hclge_desc desc;
4108 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4110 req = (struct hclge_promisc_cfg_cmd *)desc.data;
4111 req->vf_id = param->vf_id;
4113 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4114 * pdev revision(0x20), new revision support them. The
4115 * value of this two fields will not return error when driver
4116 * send command to fireware in revision(0x20).
4118 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4119 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4121 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4123 dev_err(&hdev->pdev->dev,
4124 "Set promisc mode fail, status is %d.\n", ret);
4129 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4130 bool en_mc, bool en_bc, int vport_id)
4135 memset(param, 0, sizeof(struct hclge_promisc_param));
4137 param->enable = HCLGE_PROMISC_EN_UC;
4139 param->enable |= HCLGE_PROMISC_EN_MC;
4141 param->enable |= HCLGE_PROMISC_EN_BC;
4142 param->vf_id = vport_id;
4145 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4148 struct hclge_vport *vport = hclge_get_vport(handle);
4149 struct hclge_dev *hdev = vport->back;
4150 struct hclge_promisc_param param;
4151 bool en_bc_pmc = true;
4153 /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4154 * always bypassed. So broadcast promisc should be disabled until
4155 * user enable promisc mode
4157 if (handle->pdev->revision == 0x20)
4158 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4160 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4162 return hclge_cmd_set_promisc_mode(hdev, ¶m);
4165 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4167 struct hclge_get_fd_mode_cmd *req;
4168 struct hclge_desc desc;
4171 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4173 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4175 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4177 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4181 *fd_mode = req->mode;
4186 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4187 u32 *stage1_entry_num,
4188 u32 *stage2_entry_num,
4189 u16 *stage1_counter_num,
4190 u16 *stage2_counter_num)
4192 struct hclge_get_fd_allocation_cmd *req;
4193 struct hclge_desc desc;
4196 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4198 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4200 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4202 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4207 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4208 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4209 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4210 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4215 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4217 struct hclge_set_fd_key_config_cmd *req;
4218 struct hclge_fd_key_cfg *stage;
4219 struct hclge_desc desc;
4222 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4224 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4225 stage = &hdev->fd_cfg.key_cfg[stage_num];
4226 req->stage = stage_num;
4227 req->key_select = stage->key_sel;
4228 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4229 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4230 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4231 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4232 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4233 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4235 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4237 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4242 static int hclge_init_fd_config(struct hclge_dev *hdev)
4244 #define LOW_2_WORDS 0x03
4245 struct hclge_fd_key_cfg *key_cfg;
4248 if (!hnae3_dev_fd_supported(hdev))
4251 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4255 switch (hdev->fd_cfg.fd_mode) {
4256 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4257 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4259 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4260 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4263 dev_err(&hdev->pdev->dev,
4264 "Unsupported flow director mode %d\n",
4265 hdev->fd_cfg.fd_mode);
4269 hdev->fd_cfg.proto_support =
4270 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4271 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4272 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4273 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4274 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4275 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4276 key_cfg->outer_sipv6_word_en = 0;
4277 key_cfg->outer_dipv6_word_en = 0;
4279 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4280 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4281 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4282 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4284 /* If use max 400bit key, we can support tuples for ether type */
4285 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4286 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4287 key_cfg->tuple_active |=
4288 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4291 /* roce_type is used to filter roce frames
4292 * dst_vport is used to specify the rule
4294 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4296 ret = hclge_get_fd_allocation(hdev,
4297 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4298 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4299 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4300 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4304 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4307 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4308 int loc, u8 *key, bool is_add)
4310 struct hclge_fd_tcam_config_1_cmd *req1;
4311 struct hclge_fd_tcam_config_2_cmd *req2;
4312 struct hclge_fd_tcam_config_3_cmd *req3;
4313 struct hclge_desc desc[3];
4316 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4317 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4318 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4319 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4320 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4322 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4323 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4324 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4326 req1->stage = stage;
4327 req1->xy_sel = sel_x ? 1 : 0;
4328 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4329 req1->index = cpu_to_le32(loc);
4330 req1->entry_vld = sel_x ? is_add : 0;
4333 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4334 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4335 sizeof(req2->tcam_data));
4336 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4337 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4340 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4342 dev_err(&hdev->pdev->dev,
4343 "config tcam key fail, ret=%d\n",
4349 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4350 struct hclge_fd_ad_data *action)
4352 struct hclge_fd_ad_config_cmd *req;
4353 struct hclge_desc desc;
4357 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4359 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4360 req->index = cpu_to_le32(loc);
4363 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4364 action->write_rule_id_to_bd);
4365 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4368 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4369 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4370 action->forward_to_direct_queue);
4371 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4373 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4374 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4375 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4376 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4377 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4378 action->counter_id);
4380 req->ad_data = cpu_to_le64(ad_data);
4381 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4383 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4388 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4389 struct hclge_fd_rule *rule)
4391 u16 tmp_x_s, tmp_y_s;
4392 u32 tmp_x_l, tmp_y_l;
4395 if (rule->unused_tuple & tuple_bit)
4398 switch (tuple_bit) {
4401 case BIT(INNER_DST_MAC):
4402 for (i = 0; i < 6; i++) {
4403 calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
4404 rule->tuples_mask.dst_mac[i]);
4405 calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
4406 rule->tuples_mask.dst_mac[i]);
4410 case BIT(INNER_SRC_MAC):
4411 for (i = 0; i < 6; i++) {
4412 calc_x(key_x[5 - i], rule->tuples.src_mac[i],
4413 rule->tuples.src_mac[i]);
4414 calc_y(key_y[5 - i], rule->tuples.src_mac[i],
4415 rule->tuples.src_mac[i]);
4419 case BIT(INNER_VLAN_TAG_FST):
4420 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4421 rule->tuples_mask.vlan_tag1);
4422 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4423 rule->tuples_mask.vlan_tag1);
4424 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4425 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4428 case BIT(INNER_ETH_TYPE):
4429 calc_x(tmp_x_s, rule->tuples.ether_proto,
4430 rule->tuples_mask.ether_proto);
4431 calc_y(tmp_y_s, rule->tuples.ether_proto,
4432 rule->tuples_mask.ether_proto);
4433 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4434 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4437 case BIT(INNER_IP_TOS):
4438 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4439 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4442 case BIT(INNER_IP_PROTO):
4443 calc_x(*key_x, rule->tuples.ip_proto,
4444 rule->tuples_mask.ip_proto);
4445 calc_y(*key_y, rule->tuples.ip_proto,
4446 rule->tuples_mask.ip_proto);
4449 case BIT(INNER_SRC_IP):
4450 calc_x(tmp_x_l, rule->tuples.src_ip[3],
4451 rule->tuples_mask.src_ip[3]);
4452 calc_y(tmp_y_l, rule->tuples.src_ip[3],
4453 rule->tuples_mask.src_ip[3]);
4454 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4455 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4458 case BIT(INNER_DST_IP):
4459 calc_x(tmp_x_l, rule->tuples.dst_ip[3],
4460 rule->tuples_mask.dst_ip[3]);
4461 calc_y(tmp_y_l, rule->tuples.dst_ip[3],
4462 rule->tuples_mask.dst_ip[3]);
4463 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4464 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4467 case BIT(INNER_SRC_PORT):
4468 calc_x(tmp_x_s, rule->tuples.src_port,
4469 rule->tuples_mask.src_port);
4470 calc_y(tmp_y_s, rule->tuples.src_port,
4471 rule->tuples_mask.src_port);
4472 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4473 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4476 case BIT(INNER_DST_PORT):
4477 calc_x(tmp_x_s, rule->tuples.dst_port,
4478 rule->tuples_mask.dst_port);
4479 calc_y(tmp_y_s, rule->tuples.dst_port,
4480 rule->tuples_mask.dst_port);
4481 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4482 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4490 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4491 u8 vf_id, u8 network_port_id)
4493 u32 port_number = 0;
4495 if (port_type == HOST_PORT) {
4496 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4498 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4500 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4502 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4503 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4504 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4510 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4511 __le32 *key_x, __le32 *key_y,
4512 struct hclge_fd_rule *rule)
4514 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4515 u8 cur_pos = 0, tuple_size, shift_bits;
4518 for (i = 0; i < MAX_META_DATA; i++) {
4519 tuple_size = meta_data_key_info[i].key_length;
4520 tuple_bit = key_cfg->meta_data_active & BIT(i);
4522 switch (tuple_bit) {
4523 case BIT(ROCE_TYPE):
4524 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4525 cur_pos += tuple_size;
4527 case BIT(DST_VPORT):
4528 port_number = hclge_get_port_number(HOST_PORT, 0,
4530 hnae3_set_field(meta_data,
4531 GENMASK(cur_pos + tuple_size, cur_pos),
4532 cur_pos, port_number);
4533 cur_pos += tuple_size;
4540 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4541 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4542 shift_bits = sizeof(meta_data) * 8 - cur_pos;
4544 *key_x = cpu_to_le32(tmp_x << shift_bits);
4545 *key_y = cpu_to_le32(tmp_y << shift_bits);
4548 /* A complete key is combined with meta data key and tuple key.
4549 * Meta data key is stored at the MSB region, and tuple key is stored at
4550 * the LSB region, unused bits will be filled 0.
4552 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4553 struct hclge_fd_rule *rule)
4555 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4556 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4557 u8 *cur_key_x, *cur_key_y;
4558 int i, ret, tuple_size;
4559 u8 meta_data_region;
4561 memset(key_x, 0, sizeof(key_x));
4562 memset(key_y, 0, sizeof(key_y));
4566 for (i = 0 ; i < MAX_TUPLE; i++) {
4570 tuple_size = tuple_key_info[i].key_length / 8;
4571 check_tuple = key_cfg->tuple_active & BIT(i);
4573 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4576 cur_key_x += tuple_size;
4577 cur_key_y += tuple_size;
4581 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4582 MAX_META_DATA_LENGTH / 8;
4584 hclge_fd_convert_meta_data(key_cfg,
4585 (__le32 *)(key_x + meta_data_region),
4586 (__le32 *)(key_y + meta_data_region),
4589 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4592 dev_err(&hdev->pdev->dev,
4593 "fd key_y config fail, loc=%d, ret=%d\n",
4594 rule->queue_id, ret);
4598 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4601 dev_err(&hdev->pdev->dev,
4602 "fd key_x config fail, loc=%d, ret=%d\n",
4603 rule->queue_id, ret);
4607 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4608 struct hclge_fd_rule *rule)
4610 struct hclge_fd_ad_data ad_data;
4612 ad_data.ad_id = rule->location;
4614 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4615 ad_data.drop_packet = true;
4616 ad_data.forward_to_direct_queue = false;
4617 ad_data.queue_id = 0;
4619 ad_data.drop_packet = false;
4620 ad_data.forward_to_direct_queue = true;
4621 ad_data.queue_id = rule->queue_id;
4624 ad_data.use_counter = false;
4625 ad_data.counter_id = 0;
4627 ad_data.use_next_stage = false;
4628 ad_data.next_input_key = 0;
4630 ad_data.write_rule_id_to_bd = true;
4631 ad_data.rule_id = rule->location;
4633 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4636 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4637 struct ethtool_rx_flow_spec *fs, u32 *unused)
4639 struct ethtool_tcpip4_spec *tcp_ip4_spec;
4640 struct ethtool_usrip4_spec *usr_ip4_spec;
4641 struct ethtool_tcpip6_spec *tcp_ip6_spec;
4642 struct ethtool_usrip6_spec *usr_ip6_spec;
4643 struct ethhdr *ether_spec;
4645 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4648 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4651 if ((fs->flow_type & FLOW_EXT) &&
4652 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4653 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4657 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4661 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4662 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4664 if (!tcp_ip4_spec->ip4src)
4665 *unused |= BIT(INNER_SRC_IP);
4667 if (!tcp_ip4_spec->ip4dst)
4668 *unused |= BIT(INNER_DST_IP);
4670 if (!tcp_ip4_spec->psrc)
4671 *unused |= BIT(INNER_SRC_PORT);
4673 if (!tcp_ip4_spec->pdst)
4674 *unused |= BIT(INNER_DST_PORT);
4676 if (!tcp_ip4_spec->tos)
4677 *unused |= BIT(INNER_IP_TOS);
4681 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4682 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4683 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4685 if (!usr_ip4_spec->ip4src)
4686 *unused |= BIT(INNER_SRC_IP);
4688 if (!usr_ip4_spec->ip4dst)
4689 *unused |= BIT(INNER_DST_IP);
4691 if (!usr_ip4_spec->tos)
4692 *unused |= BIT(INNER_IP_TOS);
4694 if (!usr_ip4_spec->proto)
4695 *unused |= BIT(INNER_IP_PROTO);
4697 if (usr_ip4_spec->l4_4_bytes)
4700 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4707 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4708 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4711 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4712 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4713 *unused |= BIT(INNER_SRC_IP);
4715 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4716 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4717 *unused |= BIT(INNER_DST_IP);
4719 if (!tcp_ip6_spec->psrc)
4720 *unused |= BIT(INNER_SRC_PORT);
4722 if (!tcp_ip6_spec->pdst)
4723 *unused |= BIT(INNER_DST_PORT);
4725 if (tcp_ip6_spec->tclass)
4729 case IPV6_USER_FLOW:
4730 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4731 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4732 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4733 BIT(INNER_DST_PORT);
4735 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4736 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4737 *unused |= BIT(INNER_SRC_IP);
4739 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4740 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4741 *unused |= BIT(INNER_DST_IP);
4743 if (!usr_ip6_spec->l4_proto)
4744 *unused |= BIT(INNER_IP_PROTO);
4746 if (usr_ip6_spec->tclass)
4749 if (usr_ip6_spec->l4_4_bytes)
4754 ether_spec = &fs->h_u.ether_spec;
4755 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4756 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4757 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4759 if (is_zero_ether_addr(ether_spec->h_source))
4760 *unused |= BIT(INNER_SRC_MAC);
4762 if (is_zero_ether_addr(ether_spec->h_dest))
4763 *unused |= BIT(INNER_DST_MAC);
4765 if (!ether_spec->h_proto)
4766 *unused |= BIT(INNER_ETH_TYPE);
4773 if ((fs->flow_type & FLOW_EXT)) {
4774 if (fs->h_ext.vlan_etype)
4776 if (!fs->h_ext.vlan_tci)
4777 *unused |= BIT(INNER_VLAN_TAG_FST);
4779 if (fs->m_ext.vlan_tci) {
4780 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4784 *unused |= BIT(INNER_VLAN_TAG_FST);
4787 if (fs->flow_type & FLOW_MAC_EXT) {
4788 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4791 if (is_zero_ether_addr(fs->h_ext.h_dest))
4792 *unused |= BIT(INNER_DST_MAC);
4794 *unused &= ~(BIT(INNER_DST_MAC));
4800 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4802 struct hclge_fd_rule *rule = NULL;
4803 struct hlist_node *node2;
4805 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4806 if (rule->location >= location)
4810 return rule && rule->location == location;
4813 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4814 struct hclge_fd_rule *new_rule,
4818 struct hclge_fd_rule *rule = NULL, *parent = NULL;
4819 struct hlist_node *node2;
4821 if (is_add && !new_rule)
4824 hlist_for_each_entry_safe(rule, node2,
4825 &hdev->fd_rule_list, rule_node) {
4826 if (rule->location >= location)
4831 if (rule && rule->location == location) {
4832 hlist_del(&rule->rule_node);
4834 hdev->hclge_fd_rule_num--;
4839 } else if (!is_add) {
4840 dev_err(&hdev->pdev->dev,
4841 "delete fail, rule %d is inexistent\n",
4846 INIT_HLIST_NODE(&new_rule->rule_node);
4849 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4851 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4853 hdev->hclge_fd_rule_num++;
4858 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4859 struct ethtool_rx_flow_spec *fs,
4860 struct hclge_fd_rule *rule)
4862 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4864 switch (flow_type) {
4868 rule->tuples.src_ip[3] =
4869 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
4870 rule->tuples_mask.src_ip[3] =
4871 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
4873 rule->tuples.dst_ip[3] =
4874 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
4875 rule->tuples_mask.dst_ip[3] =
4876 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
4878 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
4879 rule->tuples_mask.src_port =
4880 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
4882 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
4883 rule->tuples_mask.dst_port =
4884 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
4886 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
4887 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
4889 rule->tuples.ether_proto = ETH_P_IP;
4890 rule->tuples_mask.ether_proto = 0xFFFF;
4894 rule->tuples.src_ip[3] =
4895 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
4896 rule->tuples_mask.src_ip[3] =
4897 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
4899 rule->tuples.dst_ip[3] =
4900 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
4901 rule->tuples_mask.dst_ip[3] =
4902 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
4904 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
4905 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
4907 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
4908 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
4910 rule->tuples.ether_proto = ETH_P_IP;
4911 rule->tuples_mask.ether_proto = 0xFFFF;
4917 be32_to_cpu_array(rule->tuples.src_ip,
4918 fs->h_u.tcp_ip6_spec.ip6src, 4);
4919 be32_to_cpu_array(rule->tuples_mask.src_ip,
4920 fs->m_u.tcp_ip6_spec.ip6src, 4);
4922 be32_to_cpu_array(rule->tuples.dst_ip,
4923 fs->h_u.tcp_ip6_spec.ip6dst, 4);
4924 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4925 fs->m_u.tcp_ip6_spec.ip6dst, 4);
4927 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
4928 rule->tuples_mask.src_port =
4929 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
4931 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
4932 rule->tuples_mask.dst_port =
4933 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
4935 rule->tuples.ether_proto = ETH_P_IPV6;
4936 rule->tuples_mask.ether_proto = 0xFFFF;
4939 case IPV6_USER_FLOW:
4940 be32_to_cpu_array(rule->tuples.src_ip,
4941 fs->h_u.usr_ip6_spec.ip6src, 4);
4942 be32_to_cpu_array(rule->tuples_mask.src_ip,
4943 fs->m_u.usr_ip6_spec.ip6src, 4);
4945 be32_to_cpu_array(rule->tuples.dst_ip,
4946 fs->h_u.usr_ip6_spec.ip6dst, 4);
4947 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4948 fs->m_u.usr_ip6_spec.ip6dst, 4);
4950 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
4951 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
4953 rule->tuples.ether_proto = ETH_P_IPV6;
4954 rule->tuples_mask.ether_proto = 0xFFFF;
4958 ether_addr_copy(rule->tuples.src_mac,
4959 fs->h_u.ether_spec.h_source);
4960 ether_addr_copy(rule->tuples_mask.src_mac,
4961 fs->m_u.ether_spec.h_source);
4963 ether_addr_copy(rule->tuples.dst_mac,
4964 fs->h_u.ether_spec.h_dest);
4965 ether_addr_copy(rule->tuples_mask.dst_mac,
4966 fs->m_u.ether_spec.h_dest);
4968 rule->tuples.ether_proto =
4969 be16_to_cpu(fs->h_u.ether_spec.h_proto);
4970 rule->tuples_mask.ether_proto =
4971 be16_to_cpu(fs->m_u.ether_spec.h_proto);
4978 switch (flow_type) {
4981 rule->tuples.ip_proto = IPPROTO_SCTP;
4982 rule->tuples_mask.ip_proto = 0xFF;
4986 rule->tuples.ip_proto = IPPROTO_TCP;
4987 rule->tuples_mask.ip_proto = 0xFF;
4991 rule->tuples.ip_proto = IPPROTO_UDP;
4992 rule->tuples_mask.ip_proto = 0xFF;
4998 if ((fs->flow_type & FLOW_EXT)) {
4999 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5000 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5003 if (fs->flow_type & FLOW_MAC_EXT) {
5004 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5005 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5011 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5012 struct ethtool_rxnfc *cmd)
5014 struct hclge_vport *vport = hclge_get_vport(handle);
5015 struct hclge_dev *hdev = vport->back;
5016 u16 dst_vport_id = 0, q_index = 0;
5017 struct ethtool_rx_flow_spec *fs;
5018 struct hclge_fd_rule *rule;
5023 if (!hnae3_dev_fd_supported(hdev))
5027 dev_warn(&hdev->pdev->dev,
5028 "Please enable flow director first\n");
5032 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5034 ret = hclge_fd_check_spec(hdev, fs, &unused);
5036 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5040 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5041 action = HCLGE_FD_ACTION_DROP_PACKET;
5043 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5044 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5047 if (vf > hdev->num_req_vfs) {
5048 dev_err(&hdev->pdev->dev,
5049 "Error: vf id (%d) > max vf num (%d)\n",
5050 vf, hdev->num_req_vfs);
5054 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5055 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5058 dev_err(&hdev->pdev->dev,
5059 "Error: queue id (%d) > max tqp num (%d)\n",
5064 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5068 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5072 ret = hclge_fd_get_tuple(hdev, fs, rule);
5076 rule->flow_type = fs->flow_type;
5078 rule->location = fs->location;
5079 rule->unused_tuple = unused;
5080 rule->vf_id = dst_vport_id;
5081 rule->queue_id = q_index;
5082 rule->action = action;
5084 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5088 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5092 ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true);
5103 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5104 struct ethtool_rxnfc *cmd)
5106 struct hclge_vport *vport = hclge_get_vport(handle);
5107 struct hclge_dev *hdev = vport->back;
5108 struct ethtool_rx_flow_spec *fs;
5111 if (!hnae3_dev_fd_supported(hdev))
5114 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5116 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5119 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5120 dev_err(&hdev->pdev->dev,
5121 "Delete fail, rule %d is inexistent\n",
5126 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5127 fs->location, NULL, false);
5131 return hclge_fd_update_rule_list(hdev, NULL, fs->location,
5135 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5138 struct hclge_vport *vport = hclge_get_vport(handle);
5139 struct hclge_dev *hdev = vport->back;
5140 struct hclge_fd_rule *rule;
5141 struct hlist_node *node;
5143 if (!hnae3_dev_fd_supported(hdev))
5147 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5149 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5150 rule->location, NULL, false);
5151 hlist_del(&rule->rule_node);
5153 hdev->hclge_fd_rule_num--;
5156 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5158 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5159 rule->location, NULL, false);
5163 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5165 struct hclge_vport *vport = hclge_get_vport(handle);
5166 struct hclge_dev *hdev = vport->back;
5167 struct hclge_fd_rule *rule;
5168 struct hlist_node *node;
5171 /* Return ok here, because reset error handling will check this
5172 * return value. If error is returned here, the reset process will
5175 if (!hnae3_dev_fd_supported(hdev))
5178 /* if fd is disabled, should not restore it when reset */
5182 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5183 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5185 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5188 dev_warn(&hdev->pdev->dev,
5189 "Restore rule %d failed, remove it\n",
5191 hlist_del(&rule->rule_node);
5193 hdev->hclge_fd_rule_num--;
5199 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5200 struct ethtool_rxnfc *cmd)
5202 struct hclge_vport *vport = hclge_get_vport(handle);
5203 struct hclge_dev *hdev = vport->back;
5205 if (!hnae3_dev_fd_supported(hdev))
5208 cmd->rule_cnt = hdev->hclge_fd_rule_num;
5209 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5214 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5215 struct ethtool_rxnfc *cmd)
5217 struct hclge_vport *vport = hclge_get_vport(handle);
5218 struct hclge_fd_rule *rule = NULL;
5219 struct hclge_dev *hdev = vport->back;
5220 struct ethtool_rx_flow_spec *fs;
5221 struct hlist_node *node2;
5223 if (!hnae3_dev_fd_supported(hdev))
5226 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5228 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5229 if (rule->location >= fs->location)
5233 if (!rule || fs->location != rule->location)
5236 fs->flow_type = rule->flow_type;
5237 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5241 fs->h_u.tcp_ip4_spec.ip4src =
5242 cpu_to_be32(rule->tuples.src_ip[3]);
5243 fs->m_u.tcp_ip4_spec.ip4src =
5244 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5245 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5247 fs->h_u.tcp_ip4_spec.ip4dst =
5248 cpu_to_be32(rule->tuples.dst_ip[3]);
5249 fs->m_u.tcp_ip4_spec.ip4dst =
5250 rule->unused_tuple & BIT(INNER_DST_IP) ?
5251 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5253 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5254 fs->m_u.tcp_ip4_spec.psrc =
5255 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5256 0 : cpu_to_be16(rule->tuples_mask.src_port);
5258 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5259 fs->m_u.tcp_ip4_spec.pdst =
5260 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5261 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5263 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5264 fs->m_u.tcp_ip4_spec.tos =
5265 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5266 0 : rule->tuples_mask.ip_tos;
5270 fs->h_u.usr_ip4_spec.ip4src =
5271 cpu_to_be32(rule->tuples.src_ip[3]);
5272 fs->m_u.tcp_ip4_spec.ip4src =
5273 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5274 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5276 fs->h_u.usr_ip4_spec.ip4dst =
5277 cpu_to_be32(rule->tuples.dst_ip[3]);
5278 fs->m_u.usr_ip4_spec.ip4dst =
5279 rule->unused_tuple & BIT(INNER_DST_IP) ?
5280 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5282 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5283 fs->m_u.usr_ip4_spec.tos =
5284 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5285 0 : rule->tuples_mask.ip_tos;
5287 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5288 fs->m_u.usr_ip4_spec.proto =
5289 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5290 0 : rule->tuples_mask.ip_proto;
5292 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5298 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5299 rule->tuples.src_ip, 4);
5300 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5301 memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
5303 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5304 rule->tuples_mask.src_ip, 4);
5306 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5307 rule->tuples.dst_ip, 4);
5308 if (rule->unused_tuple & BIT(INNER_DST_IP))
5309 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5311 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5312 rule->tuples_mask.dst_ip, 4);
5314 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5315 fs->m_u.tcp_ip6_spec.psrc =
5316 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5317 0 : cpu_to_be16(rule->tuples_mask.src_port);
5319 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5320 fs->m_u.tcp_ip6_spec.pdst =
5321 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5322 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5325 case IPV6_USER_FLOW:
5326 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5327 rule->tuples.src_ip, 4);
5328 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5329 memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
5331 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5332 rule->tuples_mask.src_ip, 4);
5334 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5335 rule->tuples.dst_ip, 4);
5336 if (rule->unused_tuple & BIT(INNER_DST_IP))
5337 memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5339 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5340 rule->tuples_mask.dst_ip, 4);
5342 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5343 fs->m_u.usr_ip6_spec.l4_proto =
5344 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5345 0 : rule->tuples_mask.ip_proto;
5349 ether_addr_copy(fs->h_u.ether_spec.h_source,
5350 rule->tuples.src_mac);
5351 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5352 eth_zero_addr(fs->m_u.ether_spec.h_source);
5354 ether_addr_copy(fs->m_u.ether_spec.h_source,
5355 rule->tuples_mask.src_mac);
5357 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5358 rule->tuples.dst_mac);
5359 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5360 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5362 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5363 rule->tuples_mask.dst_mac);
5365 fs->h_u.ether_spec.h_proto =
5366 cpu_to_be16(rule->tuples.ether_proto);
5367 fs->m_u.ether_spec.h_proto =
5368 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5369 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5376 if (fs->flow_type & FLOW_EXT) {
5377 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5378 fs->m_ext.vlan_tci =
5379 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5380 cpu_to_be16(VLAN_VID_MASK) :
5381 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5384 if (fs->flow_type & FLOW_MAC_EXT) {
5385 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5386 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5387 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5389 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5390 rule->tuples_mask.dst_mac);
5393 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5394 fs->ring_cookie = RX_CLS_FLOW_DISC;
5398 fs->ring_cookie = rule->queue_id;
5399 vf_id = rule->vf_id;
5400 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5401 fs->ring_cookie |= vf_id;
5407 static int hclge_get_all_rules(struct hnae3_handle *handle,
5408 struct ethtool_rxnfc *cmd, u32 *rule_locs)
5410 struct hclge_vport *vport = hclge_get_vport(handle);
5411 struct hclge_dev *hdev = vport->back;
5412 struct hclge_fd_rule *rule;
5413 struct hlist_node *node2;
5416 if (!hnae3_dev_fd_supported(hdev))
5419 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5421 hlist_for_each_entry_safe(rule, node2,
5422 &hdev->fd_rule_list, rule_node) {
5423 if (cnt == cmd->rule_cnt)
5426 rule_locs[cnt] = rule->location;
5430 cmd->rule_cnt = cnt;
5435 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5437 struct hclge_vport *vport = hclge_get_vport(handle);
5438 struct hclge_dev *hdev = vport->back;
5440 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5441 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5444 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5446 struct hclge_vport *vport = hclge_get_vport(handle);
5447 struct hclge_dev *hdev = vport->back;
5449 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5452 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5454 struct hclge_vport *vport = hclge_get_vport(handle);
5455 struct hclge_dev *hdev = vport->back;
5457 return hdev->rst_stats.hw_reset_done_cnt;
5460 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5462 struct hclge_vport *vport = hclge_get_vport(handle);
5463 struct hclge_dev *hdev = vport->back;
5465 hdev->fd_en = enable;
5467 hclge_del_all_fd_entries(handle, false);
5469 hclge_restore_fd_entries(handle);
5472 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5474 struct hclge_desc desc;
5475 struct hclge_config_mac_mode_cmd *req =
5476 (struct hclge_config_mac_mode_cmd *)desc.data;
5480 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5481 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5482 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5483 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5484 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5485 hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5486 hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5487 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5488 hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5489 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5490 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5491 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5492 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5493 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5494 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5495 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5497 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5499 dev_err(&hdev->pdev->dev,
5500 "mac enable fail, ret =%d.\n", ret);
5503 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5505 struct hclge_config_mac_mode_cmd *req;
5506 struct hclge_desc desc;
5510 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5511 /* 1 Read out the MAC mode config at first */
5512 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5513 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5515 dev_err(&hdev->pdev->dev,
5516 "mac loopback get fail, ret =%d.\n", ret);
5520 /* 2 Then setup the loopback flag */
5521 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5522 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5523 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5524 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5526 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5528 /* 3 Config mac work mode with loopback flag
5529 * and its original configure parameters
5531 hclge_cmd_reuse_desc(&desc, false);
5532 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5534 dev_err(&hdev->pdev->dev,
5535 "mac loopback set fail, ret =%d.\n", ret);
5539 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5540 enum hnae3_loop loop_mode)
5542 #define HCLGE_SERDES_RETRY_MS 10
5543 #define HCLGE_SERDES_RETRY_NUM 100
5545 #define HCLGE_MAC_LINK_STATUS_MS 10
5546 #define HCLGE_MAC_LINK_STATUS_NUM 100
5547 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5548 #define HCLGE_MAC_LINK_STATUS_UP 1
5550 struct hclge_serdes_lb_cmd *req;
5551 struct hclge_desc desc;
5552 int mac_link_ret = 0;
5556 req = (struct hclge_serdes_lb_cmd *)desc.data;
5557 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5559 switch (loop_mode) {
5560 case HNAE3_LOOP_SERIAL_SERDES:
5561 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5563 case HNAE3_LOOP_PARALLEL_SERDES:
5564 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5567 dev_err(&hdev->pdev->dev,
5568 "unsupported serdes loopback mode %d\n", loop_mode);
5573 req->enable = loop_mode_b;
5574 req->mask = loop_mode_b;
5575 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5577 req->mask = loop_mode_b;
5578 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5581 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5583 dev_err(&hdev->pdev->dev,
5584 "serdes loopback set fail, ret = %d\n", ret);
5589 msleep(HCLGE_SERDES_RETRY_MS);
5590 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5592 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5594 dev_err(&hdev->pdev->dev,
5595 "serdes loopback get, ret = %d\n", ret);
5598 } while (++i < HCLGE_SERDES_RETRY_NUM &&
5599 !(req->result & HCLGE_CMD_SERDES_DONE_B));
5601 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5602 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5604 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5605 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5609 hclge_cfg_mac_mode(hdev, en);
5613 /* serdes Internal loopback, independent of the network cable.*/
5614 msleep(HCLGE_MAC_LINK_STATUS_MS);
5615 ret = hclge_get_mac_link_status(hdev);
5616 if (ret == mac_link_ret)
5618 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
5620 dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
5625 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
5626 int stream_id, bool enable)
5628 struct hclge_desc desc;
5629 struct hclge_cfg_com_tqp_queue_cmd *req =
5630 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
5633 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
5634 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
5635 req->stream_id = cpu_to_le16(stream_id);
5636 req->enable |= enable << HCLGE_TQP_ENABLE_B;
5638 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5640 dev_err(&hdev->pdev->dev,
5641 "Tqp enable fail, status =%d.\n", ret);
5645 static int hclge_set_loopback(struct hnae3_handle *handle,
5646 enum hnae3_loop loop_mode, bool en)
5648 struct hclge_vport *vport = hclge_get_vport(handle);
5649 struct hnae3_knic_private_info *kinfo;
5650 struct hclge_dev *hdev = vport->back;
5653 switch (loop_mode) {
5654 case HNAE3_LOOP_APP:
5655 ret = hclge_set_app_loopback(hdev, en);
5657 case HNAE3_LOOP_SERIAL_SERDES:
5658 case HNAE3_LOOP_PARALLEL_SERDES:
5659 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
5663 dev_err(&hdev->pdev->dev,
5664 "loop_mode %d is not supported\n", loop_mode);
5671 kinfo = &vport->nic.kinfo;
5672 for (i = 0; i < kinfo->num_tqps; i++) {
5673 ret = hclge_tqp_enable(hdev, i, 0, en);
5681 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
5683 struct hclge_vport *vport = hclge_get_vport(handle);
5684 struct hnae3_knic_private_info *kinfo;
5685 struct hnae3_queue *queue;
5686 struct hclge_tqp *tqp;
5689 kinfo = &vport->nic.kinfo;
5690 for (i = 0; i < kinfo->num_tqps; i++) {
5691 queue = handle->kinfo.tqp[i];
5692 tqp = container_of(queue, struct hclge_tqp, q);
5693 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
5697 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
5699 struct hclge_vport *vport = hclge_get_vport(handle);
5700 struct hclge_dev *hdev = vport->back;
5703 mod_timer(&hdev->service_timer, jiffies + HZ);
5705 del_timer_sync(&hdev->service_timer);
5706 cancel_work_sync(&hdev->service_task);
5707 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
5711 static int hclge_ae_start(struct hnae3_handle *handle)
5713 struct hclge_vport *vport = hclge_get_vport(handle);
5714 struct hclge_dev *hdev = vport->back;
5717 hclge_cfg_mac_mode(hdev, true);
5718 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
5719 hdev->hw.mac.link = 0;
5721 /* reset tqp stats */
5722 hclge_reset_tqp_stats(handle);
5724 hclge_mac_start_phy(hdev);
5729 static void hclge_ae_stop(struct hnae3_handle *handle)
5731 struct hclge_vport *vport = hclge_get_vport(handle);
5732 struct hclge_dev *hdev = vport->back;
5735 set_bit(HCLGE_STATE_DOWN, &hdev->state);
5737 /* If it is not PF reset, the firmware will disable the MAC,
5738 * so it only need to stop phy here.
5740 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
5741 hdev->reset_type != HNAE3_FUNC_RESET) {
5742 hclge_mac_stop_phy(hdev);
5746 for (i = 0; i < handle->kinfo.num_tqps; i++)
5747 hclge_reset_tqp(handle, i);
5750 hclge_cfg_mac_mode(hdev, false);
5752 hclge_mac_stop_phy(hdev);
5754 /* reset tqp stats */
5755 hclge_reset_tqp_stats(handle);
5756 hclge_update_link_status(hdev);
5759 int hclge_vport_start(struct hclge_vport *vport)
5761 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5762 vport->last_active_jiffies = jiffies;
5766 void hclge_vport_stop(struct hclge_vport *vport)
5768 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5771 static int hclge_client_start(struct hnae3_handle *handle)
5773 struct hclge_vport *vport = hclge_get_vport(handle);
5775 return hclge_vport_start(vport);
5778 static void hclge_client_stop(struct hnae3_handle *handle)
5780 struct hclge_vport *vport = hclge_get_vport(handle);
5782 hclge_vport_stop(vport);
5785 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
5786 u16 cmdq_resp, u8 resp_code,
5787 enum hclge_mac_vlan_tbl_opcode op)
5789 struct hclge_dev *hdev = vport->back;
5790 int return_status = -EIO;
5793 dev_err(&hdev->pdev->dev,
5794 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
5799 if (op == HCLGE_MAC_VLAN_ADD) {
5800 if ((!resp_code) || (resp_code == 1)) {
5802 } else if (resp_code == 2) {
5803 return_status = -ENOSPC;
5804 dev_err(&hdev->pdev->dev,
5805 "add mac addr failed for uc_overflow.\n");
5806 } else if (resp_code == 3) {
5807 return_status = -ENOSPC;
5808 dev_err(&hdev->pdev->dev,
5809 "add mac addr failed for mc_overflow.\n");
5811 dev_err(&hdev->pdev->dev,
5812 "add mac addr failed for undefined, code=%d.\n",
5815 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
5818 } else if (resp_code == 1) {
5819 return_status = -ENOENT;
5820 dev_dbg(&hdev->pdev->dev,
5821 "remove mac addr failed for miss.\n");
5823 dev_err(&hdev->pdev->dev,
5824 "remove mac addr failed for undefined, code=%d.\n",
5827 } else if (op == HCLGE_MAC_VLAN_LKUP) {
5830 } else if (resp_code == 1) {
5831 return_status = -ENOENT;
5832 dev_dbg(&hdev->pdev->dev,
5833 "lookup mac addr failed for miss.\n");
5835 dev_err(&hdev->pdev->dev,
5836 "lookup mac addr failed for undefined, code=%d.\n",
5840 return_status = -EINVAL;
5841 dev_err(&hdev->pdev->dev,
5842 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
5846 return return_status;
5849 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
5854 if (vfid > 255 || vfid < 0)
5857 if (vfid >= 0 && vfid <= 191) {
5858 word_num = vfid / 32;
5859 bit_num = vfid % 32;
5861 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5863 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
5865 word_num = (vfid - 192) / 32;
5866 bit_num = vfid % 32;
5868 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5870 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
5876 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
5878 #define HCLGE_DESC_NUMBER 3
5879 #define HCLGE_FUNC_NUMBER_PER_DESC 6
5882 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
5883 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
5884 if (desc[i].data[j])
5890 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
5891 const u8 *addr, bool is_mc)
5893 const unsigned char *mac_addr = addr;
5894 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
5895 (mac_addr[0]) | (mac_addr[1] << 8);
5896 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
5898 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5900 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
5901 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5904 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
5905 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
5908 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
5909 struct hclge_mac_vlan_tbl_entry_cmd *req)
5911 struct hclge_dev *hdev = vport->back;
5912 struct hclge_desc desc;
5917 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
5919 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5921 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5923 dev_err(&hdev->pdev->dev,
5924 "del mac addr failed for cmd_send, ret =%d.\n",
5928 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5929 retval = le16_to_cpu(desc.retval);
5931 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5932 HCLGE_MAC_VLAN_REMOVE);
5935 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
5936 struct hclge_mac_vlan_tbl_entry_cmd *req,
5937 struct hclge_desc *desc,
5940 struct hclge_dev *hdev = vport->back;
5945 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
5947 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5948 memcpy(desc[0].data,
5950 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5951 hclge_cmd_setup_basic_desc(&desc[1],
5952 HCLGE_OPC_MAC_VLAN_ADD,
5954 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5955 hclge_cmd_setup_basic_desc(&desc[2],
5956 HCLGE_OPC_MAC_VLAN_ADD,
5958 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5960 memcpy(desc[0].data,
5962 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5963 ret = hclge_cmd_send(&hdev->hw, desc, 1);
5966 dev_err(&hdev->pdev->dev,
5967 "lookup mac addr failed for cmd_send, ret =%d.\n",
5971 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
5972 retval = le16_to_cpu(desc[0].retval);
5974 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5975 HCLGE_MAC_VLAN_LKUP);
5978 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
5979 struct hclge_mac_vlan_tbl_entry_cmd *req,
5980 struct hclge_desc *mc_desc)
5982 struct hclge_dev *hdev = vport->back;
5989 struct hclge_desc desc;
5991 hclge_cmd_setup_basic_desc(&desc,
5992 HCLGE_OPC_MAC_VLAN_ADD,
5994 memcpy(desc.data, req,
5995 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5996 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5997 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5998 retval = le16_to_cpu(desc.retval);
6000 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6002 HCLGE_MAC_VLAN_ADD);
6004 hclge_cmd_reuse_desc(&mc_desc[0], false);
6005 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6006 hclge_cmd_reuse_desc(&mc_desc[1], false);
6007 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6008 hclge_cmd_reuse_desc(&mc_desc[2], false);
6009 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6010 memcpy(mc_desc[0].data, req,
6011 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6012 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6013 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6014 retval = le16_to_cpu(mc_desc[0].retval);
6016 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6018 HCLGE_MAC_VLAN_ADD);
6022 dev_err(&hdev->pdev->dev,
6023 "add mac addr failed for cmd_send, ret =%d.\n",
6031 static int hclge_init_umv_space(struct hclge_dev *hdev)
6033 u16 allocated_size = 0;
6036 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6041 if (allocated_size < hdev->wanted_umv_size)
6042 dev_warn(&hdev->pdev->dev,
6043 "Alloc umv space failed, want %d, get %d\n",
6044 hdev->wanted_umv_size, allocated_size);
6046 mutex_init(&hdev->umv_mutex);
6047 hdev->max_umv_size = allocated_size;
6048 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6049 hdev->share_umv_size = hdev->priv_umv_size +
6050 hdev->max_umv_size % (hdev->num_req_vfs + 2);
6055 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6059 if (hdev->max_umv_size > 0) {
6060 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
6064 hdev->max_umv_size = 0;
6066 mutex_destroy(&hdev->umv_mutex);
6071 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6072 u16 *allocated_size, bool is_alloc)
6074 struct hclge_umv_spc_alc_cmd *req;
6075 struct hclge_desc desc;
6078 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6079 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6080 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
6081 req->space_size = cpu_to_le32(space_size);
6083 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6085 dev_err(&hdev->pdev->dev,
6086 "%s umv space failed for cmd_send, ret =%d\n",
6087 is_alloc ? "allocate" : "free", ret);
6091 if (is_alloc && allocated_size)
6092 *allocated_size = le32_to_cpu(desc.data[1]);
6097 static void hclge_reset_umv_space(struct hclge_dev *hdev)
6099 struct hclge_vport *vport;
6102 for (i = 0; i < hdev->num_alloc_vport; i++) {
6103 vport = &hdev->vport[i];
6104 vport->used_umv_num = 0;
6107 mutex_lock(&hdev->umv_mutex);
6108 hdev->share_umv_size = hdev->priv_umv_size +
6109 hdev->max_umv_size % (hdev->num_req_vfs + 2);
6110 mutex_unlock(&hdev->umv_mutex);
6113 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
6115 struct hclge_dev *hdev = vport->back;
6118 mutex_lock(&hdev->umv_mutex);
6119 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
6120 hdev->share_umv_size == 0);
6121 mutex_unlock(&hdev->umv_mutex);
6126 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
6128 struct hclge_dev *hdev = vport->back;
6130 mutex_lock(&hdev->umv_mutex);
6132 if (vport->used_umv_num > hdev->priv_umv_size)
6133 hdev->share_umv_size++;
6135 if (vport->used_umv_num > 0)
6136 vport->used_umv_num--;
6138 if (vport->used_umv_num >= hdev->priv_umv_size &&
6139 hdev->share_umv_size > 0)
6140 hdev->share_umv_size--;
6141 vport->used_umv_num++;
6143 mutex_unlock(&hdev->umv_mutex);
6146 static int hclge_add_uc_addr(struct hnae3_handle *handle,
6147 const unsigned char *addr)
6149 struct hclge_vport *vport = hclge_get_vport(handle);
6151 return hclge_add_uc_addr_common(vport, addr);
6154 int hclge_add_uc_addr_common(struct hclge_vport *vport,
6155 const unsigned char *addr)
6157 struct hclge_dev *hdev = vport->back;
6158 struct hclge_mac_vlan_tbl_entry_cmd req;
6159 struct hclge_desc desc;
6160 u16 egress_port = 0;
6163 /* mac addr check */
6164 if (is_zero_ether_addr(addr) ||
6165 is_broadcast_ether_addr(addr) ||
6166 is_multicast_ether_addr(addr)) {
6167 dev_err(&hdev->pdev->dev,
6168 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
6170 is_zero_ether_addr(addr),
6171 is_broadcast_ether_addr(addr),
6172 is_multicast_ether_addr(addr));
6176 memset(&req, 0, sizeof(req));
6178 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
6179 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
6181 req.egress_port = cpu_to_le16(egress_port);
6183 hclge_prepare_mac_addr(&req, addr, false);
6185 /* Lookup the mac address in the mac_vlan table, and add
6186 * it if the entry is inexistent. Repeated unicast entry
6187 * is not allowed in the mac vlan table.
6189 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
6190 if (ret == -ENOENT) {
6191 if (!hclge_is_umv_space_full(vport)) {
6192 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
6194 hclge_update_umv_space(vport, false);
6198 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
6199 hdev->priv_umv_size);
6204 /* check if we just hit the duplicate */
6206 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6207 vport->vport_id, addr);
6211 dev_err(&hdev->pdev->dev,
6212 "PF failed to add unicast entry(%pM) in the MAC table\n",
6218 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6219 const unsigned char *addr)
6221 struct hclge_vport *vport = hclge_get_vport(handle);
6223 return hclge_rm_uc_addr_common(vport, addr);
6226 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6227 const unsigned char *addr)
6229 struct hclge_dev *hdev = vport->back;
6230 struct hclge_mac_vlan_tbl_entry_cmd req;
6233 /* mac addr check */
6234 if (is_zero_ether_addr(addr) ||
6235 is_broadcast_ether_addr(addr) ||
6236 is_multicast_ether_addr(addr)) {
6237 dev_dbg(&hdev->pdev->dev,
6238 "Remove mac err! invalid mac:%pM.\n",
6243 memset(&req, 0, sizeof(req));
6244 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6245 hclge_prepare_mac_addr(&req, addr, false);
6246 ret = hclge_remove_mac_vlan_tbl(vport, &req);
6248 hclge_update_umv_space(vport, true);
6253 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6254 const unsigned char *addr)
6256 struct hclge_vport *vport = hclge_get_vport(handle);
6258 return hclge_add_mc_addr_common(vport, addr);
6261 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6262 const unsigned char *addr)
6264 struct hclge_dev *hdev = vport->back;
6265 struct hclge_mac_vlan_tbl_entry_cmd req;
6266 struct hclge_desc desc[3];
6269 /* mac addr check */
6270 if (!is_multicast_ether_addr(addr)) {
6271 dev_err(&hdev->pdev->dev,
6272 "Add mc mac err! invalid mac:%pM.\n",
6276 memset(&req, 0, sizeof(req));
6277 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6278 hclge_prepare_mac_addr(&req, addr, true);
6279 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6281 /* This mac addr exist, update VFID for it */
6282 hclge_update_desc_vfid(desc, vport->vport_id, false);
6283 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6285 /* This mac addr do not exist, add new entry for it */
6286 memset(desc[0].data, 0, sizeof(desc[0].data));
6287 memset(desc[1].data, 0, sizeof(desc[0].data));
6288 memset(desc[2].data, 0, sizeof(desc[0].data));
6289 hclge_update_desc_vfid(desc, vport->vport_id, false);
6290 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6293 if (status == -ENOSPC)
6294 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6299 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6300 const unsigned char *addr)
6302 struct hclge_vport *vport = hclge_get_vport(handle);
6304 return hclge_rm_mc_addr_common(vport, addr);
6307 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6308 const unsigned char *addr)
6310 struct hclge_dev *hdev = vport->back;
6311 struct hclge_mac_vlan_tbl_entry_cmd req;
6312 enum hclge_cmd_status status;
6313 struct hclge_desc desc[3];
6315 /* mac addr check */
6316 if (!is_multicast_ether_addr(addr)) {
6317 dev_dbg(&hdev->pdev->dev,
6318 "Remove mc mac err! invalid mac:%pM.\n",
6323 memset(&req, 0, sizeof(req));
6324 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6325 hclge_prepare_mac_addr(&req, addr, true);
6326 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6328 /* This mac addr exist, remove this handle's VFID for it */
6329 hclge_update_desc_vfid(desc, vport->vport_id, true);
6331 if (hclge_is_all_function_id_zero(desc))
6332 /* All the vfid is zero, so need to delete this entry */
6333 status = hclge_remove_mac_vlan_tbl(vport, &req);
6335 /* Not all the vfid is zero, update the vfid */
6336 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6339 /* Maybe this mac address is in mta table, but it cannot be
6340 * deleted here because an entry of mta represents an address
6341 * range rather than a specific address. the delete action to
6342 * all entries will take effect in update_mta_status called by
6343 * hns3_nic_set_rx_mode.
6351 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6352 enum HCLGE_MAC_ADDR_TYPE mac_type)
6354 struct hclge_vport_mac_addr_cfg *mac_cfg;
6355 struct list_head *list;
6357 if (!vport->vport_id)
6360 mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6364 mac_cfg->hd_tbl_status = true;
6365 memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6367 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6368 &vport->uc_mac_list : &vport->mc_mac_list;
6370 list_add_tail(&mac_cfg->node, list);
6373 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6375 enum HCLGE_MAC_ADDR_TYPE mac_type)
6377 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6378 struct list_head *list;
6379 bool uc_flag, mc_flag;
6381 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6382 &vport->uc_mac_list : &vport->mc_mac_list;
6384 uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6385 mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6387 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6388 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6389 if (uc_flag && mac_cfg->hd_tbl_status)
6390 hclge_rm_uc_addr_common(vport, mac_addr);
6392 if (mc_flag && mac_cfg->hd_tbl_status)
6393 hclge_rm_mc_addr_common(vport, mac_addr);
6395 list_del(&mac_cfg->node);
6402 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6403 enum HCLGE_MAC_ADDR_TYPE mac_type)
6405 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6406 struct list_head *list;
6408 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6409 &vport->uc_mac_list : &vport->mc_mac_list;
6411 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6412 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6413 hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6415 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6416 hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6418 mac_cfg->hd_tbl_status = false;
6420 list_del(&mac_cfg->node);
6426 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6428 struct hclge_vport_mac_addr_cfg *mac, *tmp;
6429 struct hclge_vport *vport;
6432 mutex_lock(&hdev->vport_cfg_mutex);
6433 for (i = 0; i < hdev->num_alloc_vport; i++) {
6434 vport = &hdev->vport[i];
6435 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6436 list_del(&mac->node);
6440 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6441 list_del(&mac->node);
6445 mutex_unlock(&hdev->vport_cfg_mutex);
6448 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6449 u16 cmdq_resp, u8 resp_code)
6451 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
6452 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
6453 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
6454 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
6459 dev_err(&hdev->pdev->dev,
6460 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6465 switch (resp_code) {
6466 case HCLGE_ETHERTYPE_SUCCESS_ADD:
6467 case HCLGE_ETHERTYPE_ALREADY_ADD:
6470 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6471 dev_err(&hdev->pdev->dev,
6472 "add mac ethertype failed for manager table overflow.\n");
6473 return_status = -EIO;
6475 case HCLGE_ETHERTYPE_KEY_CONFLICT:
6476 dev_err(&hdev->pdev->dev,
6477 "add mac ethertype failed for key conflict.\n");
6478 return_status = -EIO;
6481 dev_err(&hdev->pdev->dev,
6482 "add mac ethertype failed for undefined, code=%d.\n",
6484 return_status = -EIO;
6487 return return_status;
6490 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6491 const struct hclge_mac_mgr_tbl_entry_cmd *req)
6493 struct hclge_desc desc;
6498 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6499 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6501 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6503 dev_err(&hdev->pdev->dev,
6504 "add mac ethertype failed for cmd_send, ret =%d.\n",
6509 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6510 retval = le16_to_cpu(desc.retval);
6512 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6515 static int init_mgr_tbl(struct hclge_dev *hdev)
6520 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6521 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6523 dev_err(&hdev->pdev->dev,
6524 "add mac ethertype failed, ret =%d.\n",
6533 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6535 struct hclge_vport *vport = hclge_get_vport(handle);
6536 struct hclge_dev *hdev = vport->back;
6538 ether_addr_copy(p, hdev->hw.mac.mac_addr);
6541 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6544 const unsigned char *new_addr = (const unsigned char *)p;
6545 struct hclge_vport *vport = hclge_get_vport(handle);
6546 struct hclge_dev *hdev = vport->back;
6549 /* mac addr check */
6550 if (is_zero_ether_addr(new_addr) ||
6551 is_broadcast_ether_addr(new_addr) ||
6552 is_multicast_ether_addr(new_addr)) {
6553 dev_err(&hdev->pdev->dev,
6554 "Change uc mac err! invalid mac:%p.\n",
6559 if ((!is_first || is_kdump_kernel()) &&
6560 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6561 dev_warn(&hdev->pdev->dev,
6562 "remove old uc mac address fail.\n");
6564 ret = hclge_add_uc_addr(handle, new_addr);
6566 dev_err(&hdev->pdev->dev,
6567 "add uc mac address fail, ret =%d.\n",
6571 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6572 dev_err(&hdev->pdev->dev,
6573 "restore uc mac address fail.\n");
6578 ret = hclge_pause_addr_cfg(hdev, new_addr);
6580 dev_err(&hdev->pdev->dev,
6581 "configure mac pause address fail, ret =%d.\n",
6586 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6591 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6594 struct hclge_vport *vport = hclge_get_vport(handle);
6595 struct hclge_dev *hdev = vport->back;
6597 if (!hdev->hw.mac.phydev)
6600 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6603 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6604 u8 fe_type, bool filter_en, u8 vf_id)
6606 struct hclge_vlan_filter_ctrl_cmd *req;
6607 struct hclge_desc desc;
6610 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6612 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6613 req->vlan_type = vlan_type;
6614 req->vlan_fe = filter_en ? fe_type : 0;
6617 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6619 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
6625 #define HCLGE_FILTER_TYPE_VF 0
6626 #define HCLGE_FILTER_TYPE_PORT 1
6627 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
6628 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
6629 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
6630 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
6631 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
6632 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
6633 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
6634 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
6635 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
6637 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
6639 struct hclge_vport *vport = hclge_get_vport(handle);
6640 struct hclge_dev *hdev = vport->back;
6642 if (hdev->pdev->revision >= 0x21) {
6643 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6644 HCLGE_FILTER_FE_EGRESS, enable, 0);
6645 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6646 HCLGE_FILTER_FE_INGRESS, enable, 0);
6648 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6649 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
6653 handle->netdev_flags |= HNAE3_VLAN_FLTR;
6655 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
6658 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
6659 bool is_kill, u16 vlan, u8 qos,
6662 #define HCLGE_MAX_VF_BYTES 16
6663 struct hclge_vlan_filter_vf_cfg_cmd *req0;
6664 struct hclge_vlan_filter_vf_cfg_cmd *req1;
6665 struct hclge_desc desc[2];
6670 hclge_cmd_setup_basic_desc(&desc[0],
6671 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6672 hclge_cmd_setup_basic_desc(&desc[1],
6673 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6675 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6677 vf_byte_off = vfid / 8;
6678 vf_byte_val = 1 << (vfid % 8);
6680 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
6681 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
6683 req0->vlan_id = cpu_to_le16(vlan);
6684 req0->vlan_cfg = is_kill;
6686 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
6687 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
6689 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
6691 ret = hclge_cmd_send(&hdev->hw, desc, 2);
6693 dev_err(&hdev->pdev->dev,
6694 "Send vf vlan command fail, ret =%d.\n",
6700 #define HCLGE_VF_VLAN_NO_ENTRY 2
6701 if (!req0->resp_code || req0->resp_code == 1)
6704 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
6705 dev_warn(&hdev->pdev->dev,
6706 "vf vlan table is full, vf vlan filter is disabled\n");
6710 dev_err(&hdev->pdev->dev,
6711 "Add vf vlan filter fail, ret =%d.\n",
6714 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
6715 if (!req0->resp_code)
6718 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
6719 dev_warn(&hdev->pdev->dev,
6720 "vlan %d filter is not in vf vlan table\n",
6725 dev_err(&hdev->pdev->dev,
6726 "Kill vf vlan filter fail, ret =%d.\n",
6733 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
6734 u16 vlan_id, bool is_kill)
6736 struct hclge_vlan_filter_pf_cfg_cmd *req;
6737 struct hclge_desc desc;
6738 u8 vlan_offset_byte_val;
6739 u8 vlan_offset_byte;
6743 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
6745 vlan_offset_160 = vlan_id / 160;
6746 vlan_offset_byte = (vlan_id % 160) / 8;
6747 vlan_offset_byte_val = 1 << (vlan_id % 8);
6749 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
6750 req->vlan_offset = vlan_offset_160;
6751 req->vlan_cfg = is_kill;
6752 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
6754 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6756 dev_err(&hdev->pdev->dev,
6757 "port vlan command, send fail, ret =%d.\n", ret);
6761 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
6762 u16 vport_id, u16 vlan_id, u8 qos,
6765 u16 vport_idx, vport_num = 0;
6768 if (is_kill && !vlan_id)
6771 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
6774 dev_err(&hdev->pdev->dev,
6775 "Set %d vport vlan filter config fail, ret =%d.\n",
6780 /* vlan 0 may be added twice when 8021q module is enabled */
6781 if (!is_kill && !vlan_id &&
6782 test_bit(vport_id, hdev->vlan_table[vlan_id]))
6785 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
6786 dev_err(&hdev->pdev->dev,
6787 "Add port vlan failed, vport %d is already in vlan %d\n",
6793 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
6794 dev_err(&hdev->pdev->dev,
6795 "Delete port vlan failed, vport %d is not in vlan %d\n",
6800 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
6803 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
6804 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
6810 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
6812 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
6813 struct hclge_vport_vtag_tx_cfg_cmd *req;
6814 struct hclge_dev *hdev = vport->back;
6815 struct hclge_desc desc;
6818 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
6820 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
6821 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
6822 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
6823 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
6824 vcfg->accept_tag1 ? 1 : 0);
6825 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
6826 vcfg->accept_untag1 ? 1 : 0);
6827 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
6828 vcfg->accept_tag2 ? 1 : 0);
6829 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
6830 vcfg->accept_untag2 ? 1 : 0);
6831 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
6832 vcfg->insert_tag1_en ? 1 : 0);
6833 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
6834 vcfg->insert_tag2_en ? 1 : 0);
6835 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
6837 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6838 req->vf_bitmap[req->vf_offset] =
6839 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6841 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6843 dev_err(&hdev->pdev->dev,
6844 "Send port txvlan cfg command fail, ret =%d\n",
6850 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
6852 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
6853 struct hclge_vport_vtag_rx_cfg_cmd *req;
6854 struct hclge_dev *hdev = vport->back;
6855 struct hclge_desc desc;
6858 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
6860 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
6861 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
6862 vcfg->strip_tag1_en ? 1 : 0);
6863 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
6864 vcfg->strip_tag2_en ? 1 : 0);
6865 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
6866 vcfg->vlan1_vlan_prionly ? 1 : 0);
6867 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
6868 vcfg->vlan2_vlan_prionly ? 1 : 0);
6870 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6871 req->vf_bitmap[req->vf_offset] =
6872 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6874 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6876 dev_err(&hdev->pdev->dev,
6877 "Send port rxvlan cfg command fail, ret =%d\n",
6883 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
6884 u16 port_base_vlan_state,
6889 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
6890 vport->txvlan_cfg.accept_tag1 = true;
6891 vport->txvlan_cfg.insert_tag1_en = false;
6892 vport->txvlan_cfg.default_tag1 = 0;
6894 vport->txvlan_cfg.accept_tag1 = false;
6895 vport->txvlan_cfg.insert_tag1_en = true;
6896 vport->txvlan_cfg.default_tag1 = vlan_tag;
6899 vport->txvlan_cfg.accept_untag1 = true;
6901 /* accept_tag2 and accept_untag2 are not supported on
6902 * pdev revision(0x20), new revision support them,
6903 * this two fields can not be configured by user.
6905 vport->txvlan_cfg.accept_tag2 = true;
6906 vport->txvlan_cfg.accept_untag2 = true;
6907 vport->txvlan_cfg.insert_tag2_en = false;
6908 vport->txvlan_cfg.default_tag2 = 0;
6910 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
6911 vport->rxvlan_cfg.strip_tag1_en = false;
6912 vport->rxvlan_cfg.strip_tag2_en =
6913 vport->rxvlan_cfg.rx_vlan_offload_en;
6915 vport->rxvlan_cfg.strip_tag1_en =
6916 vport->rxvlan_cfg.rx_vlan_offload_en;
6917 vport->rxvlan_cfg.strip_tag2_en = true;
6919 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6920 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6922 ret = hclge_set_vlan_tx_offload_cfg(vport);
6926 return hclge_set_vlan_rx_offload_cfg(vport);
6929 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
6931 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
6932 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
6933 struct hclge_desc desc;
6936 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
6937 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
6938 rx_req->ot_fst_vlan_type =
6939 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
6940 rx_req->ot_sec_vlan_type =
6941 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
6942 rx_req->in_fst_vlan_type =
6943 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
6944 rx_req->in_sec_vlan_type =
6945 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
6947 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6949 dev_err(&hdev->pdev->dev,
6950 "Send rxvlan protocol type command fail, ret =%d\n",
6955 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
6957 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
6958 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
6959 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
6961 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6963 dev_err(&hdev->pdev->dev,
6964 "Send txvlan protocol type command fail, ret =%d\n",
6970 static int hclge_init_vlan_config(struct hclge_dev *hdev)
6972 #define HCLGE_DEF_VLAN_TYPE 0x8100
6974 struct hnae3_handle *handle = &hdev->vport[0].nic;
6975 struct hclge_vport *vport;
6979 if (hdev->pdev->revision >= 0x21) {
6980 /* for revision 0x21, vf vlan filter is per function */
6981 for (i = 0; i < hdev->num_alloc_vport; i++) {
6982 vport = &hdev->vport[i];
6983 ret = hclge_set_vlan_filter_ctrl(hdev,
6984 HCLGE_FILTER_TYPE_VF,
6985 HCLGE_FILTER_FE_EGRESS,
6992 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6993 HCLGE_FILTER_FE_INGRESS, true,
6998 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6999 HCLGE_FILTER_FE_EGRESS_V1_B,
7005 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7007 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7008 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7009 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7010 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7011 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7012 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7014 ret = hclge_set_vlan_protocol_type(hdev);
7018 for (i = 0; i < hdev->num_alloc_vport; i++) {
7021 vport = &hdev->vport[i];
7022 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7024 ret = hclge_vlan_offload_cfg(vport,
7025 vport->port_base_vlan_cfg.state,
7031 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7034 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7037 struct hclge_vport_vlan_cfg *vlan;
7039 /* vlan 0 is reserved */
7043 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7047 vlan->hd_tbl_status = writen_to_tbl;
7048 vlan->vlan_id = vlan_id;
7050 list_add_tail(&vlan->node, &vport->vlan_list);
7053 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
7055 struct hclge_vport_vlan_cfg *vlan, *tmp;
7056 struct hclge_dev *hdev = vport->back;
7059 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7060 if (!vlan->hd_tbl_status) {
7061 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7063 vlan->vlan_id, 0, false);
7065 dev_err(&hdev->pdev->dev,
7066 "restore vport vlan list failed, ret=%d\n",
7071 vlan->hd_tbl_status = true;
7077 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7080 struct hclge_vport_vlan_cfg *vlan, *tmp;
7081 struct hclge_dev *hdev = vport->back;
7083 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7084 if (vlan->vlan_id == vlan_id) {
7085 if (is_write_tbl && vlan->hd_tbl_status)
7086 hclge_set_vlan_filter_hw(hdev,
7092 list_del(&vlan->node);
7099 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
7101 struct hclge_vport_vlan_cfg *vlan, *tmp;
7102 struct hclge_dev *hdev = vport->back;
7104 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7105 if (vlan->hd_tbl_status)
7106 hclge_set_vlan_filter_hw(hdev,
7112 vlan->hd_tbl_status = false;
7114 list_del(&vlan->node);
7120 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
7122 struct hclge_vport_vlan_cfg *vlan, *tmp;
7123 struct hclge_vport *vport;
7126 mutex_lock(&hdev->vport_cfg_mutex);
7127 for (i = 0; i < hdev->num_alloc_vport; i++) {
7128 vport = &hdev->vport[i];
7129 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7130 list_del(&vlan->node);
7134 mutex_unlock(&hdev->vport_cfg_mutex);
7137 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
7139 struct hclge_vport *vport = hclge_get_vport(handle);
7141 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7142 vport->rxvlan_cfg.strip_tag1_en = false;
7143 vport->rxvlan_cfg.strip_tag2_en = enable;
7145 vport->rxvlan_cfg.strip_tag1_en = enable;
7146 vport->rxvlan_cfg.strip_tag2_en = true;
7148 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7149 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7150 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
7152 return hclge_set_vlan_rx_offload_cfg(vport);
7155 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
7156 u16 port_base_vlan_state,
7157 struct hclge_vlan_info *new_info,
7158 struct hclge_vlan_info *old_info)
7160 struct hclge_dev *hdev = vport->back;
7163 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
7164 hclge_rm_vport_all_vlan_table(vport, false);
7165 return hclge_set_vlan_filter_hw(hdev,
7166 htons(new_info->vlan_proto),
7169 new_info->qos, false);
7172 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
7173 vport->vport_id, old_info->vlan_tag,
7174 old_info->qos, true);
7178 return hclge_add_vport_all_vlan_table(vport);
7181 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
7182 struct hclge_vlan_info *vlan_info)
7184 struct hnae3_handle *nic = &vport->nic;
7185 struct hclge_vlan_info *old_vlan_info;
7186 struct hclge_dev *hdev = vport->back;
7189 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
7191 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
7195 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
7196 /* add new VLAN tag */
7197 ret = hclge_set_vlan_filter_hw(hdev,
7198 htons(vlan_info->vlan_proto),
7200 vlan_info->vlan_tag,
7201 vlan_info->qos, false);
7205 /* remove old VLAN tag */
7206 ret = hclge_set_vlan_filter_hw(hdev,
7207 htons(old_vlan_info->vlan_proto),
7209 old_vlan_info->vlan_tag,
7210 old_vlan_info->qos, true);
7217 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7222 /* update state only when disable/enable port based VLAN */
7223 vport->port_base_vlan_cfg.state = state;
7224 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7225 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7227 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7230 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7231 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7232 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7237 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7238 enum hnae3_port_base_vlan_state state,
7241 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7243 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7245 return HNAE3_PORT_BASE_VLAN_ENABLE;
7248 return HNAE3_PORT_BASE_VLAN_DISABLE;
7249 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7250 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7252 return HNAE3_PORT_BASE_VLAN_MODIFY;
7256 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7257 u16 vlan, u8 qos, __be16 proto)
7259 struct hclge_vport *vport = hclge_get_vport(handle);
7260 struct hclge_dev *hdev = vport->back;
7261 struct hclge_vlan_info vlan_info;
7265 if (hdev->pdev->revision == 0x20)
7268 /* qos is a 3 bits value, so can not be bigger than 7 */
7269 if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7271 if (proto != htons(ETH_P_8021Q))
7272 return -EPROTONOSUPPORT;
7274 vport = &hdev->vport[vfid];
7275 state = hclge_get_port_base_vlan_state(vport,
7276 vport->port_base_vlan_cfg.state,
7278 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7281 vlan_info.vlan_tag = vlan;
7282 vlan_info.qos = qos;
7283 vlan_info.vlan_proto = ntohs(proto);
7285 /* update port based VLAN for PF */
7287 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7288 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7289 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7294 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7295 return hclge_update_port_base_vlan_cfg(vport, state,
7298 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7306 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7307 u16 vlan_id, bool is_kill)
7309 struct hclge_vport *vport = hclge_get_vport(handle);
7310 struct hclge_dev *hdev = vport->back;
7311 bool writen_to_tbl = false;
7314 /* when port based VLAN enabled, we use port based VLAN as the VLAN
7315 * filter entry. In this case, we don't update VLAN filter table
7316 * when user add new VLAN or remove exist VLAN, just update the vport
7317 * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
7318 * table until port based VLAN disabled
7320 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7321 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
7322 vlan_id, 0, is_kill);
7323 writen_to_tbl = true;
7330 hclge_rm_vport_vlan_table(vport, vlan_id, false);
7332 hclge_add_vport_vlan_table(vport, vlan_id,
7338 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
7340 struct hclge_config_max_frm_size_cmd *req;
7341 struct hclge_desc desc;
7343 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
7345 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
7346 req->max_frm_size = cpu_to_le16(new_mps);
7347 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
7349 return hclge_cmd_send(&hdev->hw, &desc, 1);
7352 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
7354 struct hclge_vport *vport = hclge_get_vport(handle);
7356 return hclge_set_vport_mtu(vport, new_mtu);
7359 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
7361 struct hclge_dev *hdev = vport->back;
7362 int i, max_frm_size, ret = 0;
7364 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7365 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
7366 max_frm_size > HCLGE_MAC_MAX_FRAME)
7369 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
7370 mutex_lock(&hdev->vport_lock);
7371 /* VF's mps must fit within hdev->mps */
7372 if (vport->vport_id && max_frm_size > hdev->mps) {
7373 mutex_unlock(&hdev->vport_lock);
7375 } else if (vport->vport_id) {
7376 vport->mps = max_frm_size;
7377 mutex_unlock(&hdev->vport_lock);
7381 /* PF's mps must be greater then VF's mps */
7382 for (i = 1; i < hdev->num_alloc_vport; i++)
7383 if (max_frm_size < hdev->vport[i].mps) {
7384 mutex_unlock(&hdev->vport_lock);
7388 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7390 ret = hclge_set_mac_mtu(hdev, max_frm_size);
7392 dev_err(&hdev->pdev->dev,
7393 "Change mtu fail, ret =%d\n", ret);
7397 hdev->mps = max_frm_size;
7398 vport->mps = max_frm_size;
7400 ret = hclge_buffer_alloc(hdev);
7402 dev_err(&hdev->pdev->dev,
7403 "Allocate buffer fail, ret =%d\n", ret);
7406 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7407 mutex_unlock(&hdev->vport_lock);
7411 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7414 struct hclge_reset_tqp_queue_cmd *req;
7415 struct hclge_desc desc;
7418 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7420 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7421 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7422 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
7424 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7426 dev_err(&hdev->pdev->dev,
7427 "Send tqp reset cmd error, status =%d\n", ret);
7434 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
7436 struct hclge_reset_tqp_queue_cmd *req;
7437 struct hclge_desc desc;
7440 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
7442 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7443 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7445 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7447 dev_err(&hdev->pdev->dev,
7448 "Get reset status error, status =%d\n", ret);
7452 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
7455 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
7457 struct hnae3_queue *queue;
7458 struct hclge_tqp *tqp;
7460 queue = handle->kinfo.tqp[queue_id];
7461 tqp = container_of(queue, struct hclge_tqp, q);
7466 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
7468 struct hclge_vport *vport = hclge_get_vport(handle);
7469 struct hclge_dev *hdev = vport->back;
7470 int reset_try_times = 0;
7475 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
7477 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
7479 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
7483 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7485 dev_err(&hdev->pdev->dev,
7486 "Send reset tqp cmd fail, ret = %d\n", ret);
7490 reset_try_times = 0;
7491 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7492 /* Wait for tqp hw reset */
7494 reset_status = hclge_get_reset_status(hdev, queue_gid);
7499 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7500 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
7504 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7506 dev_err(&hdev->pdev->dev,
7507 "Deassert the soft reset fail, ret = %d\n", ret);
7512 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
7514 struct hclge_dev *hdev = vport->back;
7515 int reset_try_times = 0;
7520 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
7522 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7524 dev_warn(&hdev->pdev->dev,
7525 "Send reset tqp cmd fail, ret = %d\n", ret);
7529 reset_try_times = 0;
7530 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7531 /* Wait for tqp hw reset */
7533 reset_status = hclge_get_reset_status(hdev, queue_gid);
7538 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7539 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
7543 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7545 dev_warn(&hdev->pdev->dev,
7546 "Deassert the soft reset fail, ret = %d\n", ret);
7549 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
7551 struct hclge_vport *vport = hclge_get_vport(handle);
7552 struct hclge_dev *hdev = vport->back;
7554 return hdev->fw_version;
7557 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7559 struct phy_device *phydev = hdev->hw.mac.phydev;
7564 phy_set_asym_pause(phydev, rx_en, tx_en);
7567 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7572 hdev->fc_mode_last_time = HCLGE_FC_FULL;
7573 else if (rx_en && !tx_en)
7574 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
7575 else if (!rx_en && tx_en)
7576 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
7578 hdev->fc_mode_last_time = HCLGE_FC_NONE;
7580 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
7583 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
7585 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
7590 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
7595 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
7597 struct phy_device *phydev = hdev->hw.mac.phydev;
7598 u16 remote_advertising = 0;
7599 u16 local_advertising = 0;
7600 u32 rx_pause, tx_pause;
7603 if (!phydev->link || !phydev->autoneg)
7606 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
7609 remote_advertising = LPA_PAUSE_CAP;
7611 if (phydev->asym_pause)
7612 remote_advertising |= LPA_PAUSE_ASYM;
7614 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
7615 remote_advertising);
7616 tx_pause = flowctl & FLOW_CTRL_TX;
7617 rx_pause = flowctl & FLOW_CTRL_RX;
7619 if (phydev->duplex == HCLGE_MAC_HALF) {
7624 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
7627 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
7628 u32 *rx_en, u32 *tx_en)
7630 struct hclge_vport *vport = hclge_get_vport(handle);
7631 struct hclge_dev *hdev = vport->back;
7633 *auto_neg = hclge_get_autoneg(handle);
7635 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
7641 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
7644 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
7647 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
7656 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
7657 u32 rx_en, u32 tx_en)
7659 struct hclge_vport *vport = hclge_get_vport(handle);
7660 struct hclge_dev *hdev = vport->back;
7661 struct phy_device *phydev = hdev->hw.mac.phydev;
7664 fc_autoneg = hclge_get_autoneg(handle);
7665 if (auto_neg != fc_autoneg) {
7666 dev_info(&hdev->pdev->dev,
7667 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
7671 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
7672 dev_info(&hdev->pdev->dev,
7673 "Priority flow control enabled. Cannot set link flow control.\n");
7677 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
7680 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
7683 return phy_start_aneg(phydev);
7685 if (hdev->pdev->revision == 0x20)
7688 return hclge_restart_autoneg(handle);
7691 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
7692 u8 *auto_neg, u32 *speed, u8 *duplex)
7694 struct hclge_vport *vport = hclge_get_vport(handle);
7695 struct hclge_dev *hdev = vport->back;
7698 *speed = hdev->hw.mac.speed;
7700 *duplex = hdev->hw.mac.duplex;
7702 *auto_neg = hdev->hw.mac.autoneg;
7705 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
7708 struct hclge_vport *vport = hclge_get_vport(handle);
7709 struct hclge_dev *hdev = vport->back;
7712 *media_type = hdev->hw.mac.media_type;
7715 *module_type = hdev->hw.mac.module_type;
7718 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
7719 u8 *tp_mdix_ctrl, u8 *tp_mdix)
7721 struct hclge_vport *vport = hclge_get_vport(handle);
7722 struct hclge_dev *hdev = vport->back;
7723 struct phy_device *phydev = hdev->hw.mac.phydev;
7724 int mdix_ctrl, mdix, retval, is_resolved;
7727 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
7728 *tp_mdix = ETH_TP_MDI_INVALID;
7732 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
7734 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
7735 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
7736 HCLGE_PHY_MDIX_CTRL_S);
7738 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
7739 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
7740 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
7742 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
7744 switch (mdix_ctrl) {
7746 *tp_mdix_ctrl = ETH_TP_MDI;
7749 *tp_mdix_ctrl = ETH_TP_MDI_X;
7752 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
7755 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
7760 *tp_mdix = ETH_TP_MDI_INVALID;
7762 *tp_mdix = ETH_TP_MDI_X;
7764 *tp_mdix = ETH_TP_MDI;
7767 static void hclge_info_show(struct hclge_dev *hdev)
7769 struct device *dev = &hdev->pdev->dev;
7771 dev_info(dev, "PF info begin:\n");
7773 dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
7774 dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
7775 dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
7776 dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
7777 dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
7778 dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
7779 dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
7780 dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
7781 dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
7782 dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
7783 dev_info(dev, "This is %s PF\n",
7784 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
7785 dev_info(dev, "DCB %s\n",
7786 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
7787 dev_info(dev, "MQPRIO %s\n",
7788 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
7790 dev_info(dev, "PF info end.\n");
7793 static int hclge_init_client_instance(struct hnae3_client *client,
7794 struct hnae3_ae_dev *ae_dev)
7796 struct hclge_dev *hdev = ae_dev->priv;
7797 struct hclge_vport *vport;
7800 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
7801 vport = &hdev->vport[i];
7803 switch (client->type) {
7804 case HNAE3_CLIENT_KNIC:
7806 hdev->nic_client = client;
7807 vport->nic.client = client;
7808 ret = client->ops->init_instance(&vport->nic);
7812 hnae3_set_client_init_flag(client, ae_dev, 1);
7814 if (netif_msg_drv(&hdev->vport->nic))
7815 hclge_info_show(hdev);
7817 if (hdev->roce_client &&
7818 hnae3_dev_roce_supported(hdev)) {
7819 struct hnae3_client *rc = hdev->roce_client;
7821 ret = hclge_init_roce_base_info(vport);
7825 ret = rc->ops->init_instance(&vport->roce);
7829 hnae3_set_client_init_flag(hdev->roce_client,
7834 case HNAE3_CLIENT_UNIC:
7835 hdev->nic_client = client;
7836 vport->nic.client = client;
7838 ret = client->ops->init_instance(&vport->nic);
7842 hnae3_set_client_init_flag(client, ae_dev, 1);
7845 case HNAE3_CLIENT_ROCE:
7846 if (hnae3_dev_roce_supported(hdev)) {
7847 hdev->roce_client = client;
7848 vport->roce.client = client;
7851 if (hdev->roce_client && hdev->nic_client) {
7852 ret = hclge_init_roce_base_info(vport);
7856 ret = client->ops->init_instance(&vport->roce);
7860 hnae3_set_client_init_flag(client, ae_dev, 1);
7872 hdev->nic_client = NULL;
7873 vport->nic.client = NULL;
7876 hdev->roce_client = NULL;
7877 vport->roce.client = NULL;
7881 static void hclge_uninit_client_instance(struct hnae3_client *client,
7882 struct hnae3_ae_dev *ae_dev)
7884 struct hclge_dev *hdev = ae_dev->priv;
7885 struct hclge_vport *vport;
7888 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
7889 vport = &hdev->vport[i];
7890 if (hdev->roce_client) {
7891 hdev->roce_client->ops->uninit_instance(&vport->roce,
7893 hdev->roce_client = NULL;
7894 vport->roce.client = NULL;
7896 if (client->type == HNAE3_CLIENT_ROCE)
7898 if (hdev->nic_client && client->ops->uninit_instance) {
7899 client->ops->uninit_instance(&vport->nic, 0);
7900 hdev->nic_client = NULL;
7901 vport->nic.client = NULL;
7906 static int hclge_pci_init(struct hclge_dev *hdev)
7908 struct pci_dev *pdev = hdev->pdev;
7909 struct hclge_hw *hw;
7912 ret = pci_enable_device(pdev);
7914 dev_err(&pdev->dev, "failed to enable PCI device\n");
7918 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7920 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7923 "can't set consistent PCI DMA");
7924 goto err_disable_device;
7926 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
7929 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
7931 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
7932 goto err_disable_device;
7935 pci_set_master(pdev);
7937 hw->io_base = pcim_iomap(pdev, 2, 0);
7939 dev_err(&pdev->dev, "Can't map configuration register space\n");
7941 goto err_clr_master;
7944 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
7948 pci_clear_master(pdev);
7949 pci_release_regions(pdev);
7951 pci_disable_device(pdev);
7956 static void hclge_pci_uninit(struct hclge_dev *hdev)
7958 struct pci_dev *pdev = hdev->pdev;
7960 pcim_iounmap(pdev, hdev->hw.io_base);
7961 pci_free_irq_vectors(pdev);
7962 pci_clear_master(pdev);
7963 pci_release_mem_regions(pdev);
7964 pci_disable_device(pdev);
7967 static void hclge_state_init(struct hclge_dev *hdev)
7969 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
7970 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7971 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
7972 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7973 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
7974 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
7977 static void hclge_state_uninit(struct hclge_dev *hdev)
7979 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7981 if (hdev->service_timer.function)
7982 del_timer_sync(&hdev->service_timer);
7983 if (hdev->reset_timer.function)
7984 del_timer_sync(&hdev->reset_timer);
7985 if (hdev->service_task.func)
7986 cancel_work_sync(&hdev->service_task);
7987 if (hdev->rst_service_task.func)
7988 cancel_work_sync(&hdev->rst_service_task);
7989 if (hdev->mbx_service_task.func)
7990 cancel_work_sync(&hdev->mbx_service_task);
7993 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
7995 #define HCLGE_FLR_WAIT_MS 100
7996 #define HCLGE_FLR_WAIT_CNT 50
7997 struct hclge_dev *hdev = ae_dev->priv;
8000 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
8001 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8002 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
8003 hclge_reset_event(hdev->pdev, NULL);
8005 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
8006 cnt++ < HCLGE_FLR_WAIT_CNT)
8007 msleep(HCLGE_FLR_WAIT_MS);
8009 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
8010 dev_err(&hdev->pdev->dev,
8011 "flr wait down timeout: %d\n", cnt);
8014 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
8016 struct hclge_dev *hdev = ae_dev->priv;
8018 set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8021 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
8023 struct pci_dev *pdev = ae_dev->pdev;
8024 struct hclge_dev *hdev;
8027 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
8034 hdev->ae_dev = ae_dev;
8035 hdev->reset_type = HNAE3_NONE_RESET;
8036 hdev->reset_level = HNAE3_FUNC_RESET;
8037 ae_dev->priv = hdev;
8038 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8040 mutex_init(&hdev->vport_lock);
8041 mutex_init(&hdev->vport_cfg_mutex);
8043 ret = hclge_pci_init(hdev);
8045 dev_err(&pdev->dev, "PCI init failed\n");
8049 /* Firmware command queue initialize */
8050 ret = hclge_cmd_queue_init(hdev);
8052 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
8053 goto err_pci_uninit;
8056 /* Firmware command initialize */
8057 ret = hclge_cmd_init(hdev);
8059 goto err_cmd_uninit;
8061 ret = hclge_get_cap(hdev);
8063 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
8065 goto err_cmd_uninit;
8068 ret = hclge_configure(hdev);
8070 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
8071 goto err_cmd_uninit;
8074 ret = hclge_init_msi(hdev);
8076 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
8077 goto err_cmd_uninit;
8080 ret = hclge_misc_irq_init(hdev);
8083 "Misc IRQ(vector0) init error, ret = %d.\n",
8085 goto err_msi_uninit;
8088 ret = hclge_alloc_tqps(hdev);
8090 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
8091 goto err_msi_irq_uninit;
8094 ret = hclge_alloc_vport(hdev);
8096 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
8097 goto err_msi_irq_uninit;
8100 ret = hclge_map_tqp(hdev);
8102 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8103 goto err_msi_irq_uninit;
8106 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
8107 ret = hclge_mac_mdio_config(hdev);
8109 dev_err(&hdev->pdev->dev,
8110 "mdio config fail ret=%d\n", ret);
8111 goto err_msi_irq_uninit;
8115 ret = hclge_init_umv_space(hdev);
8117 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
8118 goto err_mdiobus_unreg;
8121 ret = hclge_mac_init(hdev);
8123 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8124 goto err_mdiobus_unreg;
8127 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8129 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8130 goto err_mdiobus_unreg;
8133 ret = hclge_config_gro(hdev, true);
8135 goto err_mdiobus_unreg;
8137 ret = hclge_init_vlan_config(hdev);
8139 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8140 goto err_mdiobus_unreg;
8143 ret = hclge_tm_schd_init(hdev);
8145 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
8146 goto err_mdiobus_unreg;
8149 hclge_rss_init_cfg(hdev);
8150 ret = hclge_rss_init_hw(hdev);
8152 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8153 goto err_mdiobus_unreg;
8156 ret = init_mgr_tbl(hdev);
8158 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
8159 goto err_mdiobus_unreg;
8162 ret = hclge_init_fd_config(hdev);
8165 "fd table init fail, ret=%d\n", ret);
8166 goto err_mdiobus_unreg;
8169 ret = hclge_hw_error_set_state(hdev, true);
8172 "fail(%d) to enable hw error interrupts\n", ret);
8173 goto err_mdiobus_unreg;
8176 INIT_KFIFO(hdev->mac_tnl_log);
8178 hclge_dcb_ops_set(hdev);
8180 timer_setup(&hdev->service_timer, hclge_service_timer, 0);
8181 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
8182 INIT_WORK(&hdev->service_task, hclge_service_task);
8183 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
8184 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
8186 hclge_clear_all_event_cause(hdev);
8188 /* Enable MISC vector(vector0) */
8189 hclge_enable_vector(&hdev->misc_vector, true);
8191 hclge_state_init(hdev);
8192 hdev->last_reset_time = jiffies;
8194 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
8198 if (hdev->hw.mac.phydev)
8199 mdiobus_unregister(hdev->hw.mac.mdio_bus);
8201 hclge_misc_irq_uninit(hdev);
8203 pci_free_irq_vectors(pdev);
8205 hclge_cmd_uninit(hdev);
8207 pcim_iounmap(pdev, hdev->hw.io_base);
8208 pci_clear_master(pdev);
8209 pci_release_regions(pdev);
8210 pci_disable_device(pdev);
8215 static void hclge_stats_clear(struct hclge_dev *hdev)
8217 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
8220 static void hclge_reset_vport_state(struct hclge_dev *hdev)
8222 struct hclge_vport *vport = hdev->vport;
8225 for (i = 0; i < hdev->num_alloc_vport; i++) {
8226 hclge_vport_stop(vport);
8231 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
8233 struct hclge_dev *hdev = ae_dev->priv;
8234 struct pci_dev *pdev = ae_dev->pdev;
8237 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8239 hclge_stats_clear(hdev);
8240 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
8242 ret = hclge_cmd_init(hdev);
8244 dev_err(&pdev->dev, "Cmd queue init failed\n");
8248 ret = hclge_map_tqp(hdev);
8250 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8254 hclge_reset_umv_space(hdev);
8256 ret = hclge_mac_init(hdev);
8258 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8262 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8264 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8268 ret = hclge_config_gro(hdev, true);
8272 ret = hclge_init_vlan_config(hdev);
8274 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8278 ret = hclge_tm_init_hw(hdev, true);
8280 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
8284 ret = hclge_rss_init_hw(hdev);
8286 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8290 ret = hclge_init_fd_config(hdev);
8293 "fd table init fail, ret=%d\n", ret);
8297 /* Re-enable the hw error interrupts because
8298 * the interrupts get disabled on core/global reset.
8300 ret = hclge_hw_error_set_state(hdev, true);
8303 "fail(%d) to re-enable HNS hw error interrupts\n", ret);
8307 hclge_reset_vport_state(hdev);
8309 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
8315 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
8317 struct hclge_dev *hdev = ae_dev->priv;
8318 struct hclge_mac *mac = &hdev->hw.mac;
8320 hclge_state_uninit(hdev);
8323 mdiobus_unregister(mac->mdio_bus);
8325 hclge_uninit_umv_space(hdev);
8327 /* Disable MISC vector(vector0) */
8328 hclge_enable_vector(&hdev->misc_vector, false);
8329 synchronize_irq(hdev->misc_vector.vector_irq);
8331 hclge_config_mac_tnl_int(hdev, false);
8332 hclge_hw_error_set_state(hdev, false);
8333 hclge_cmd_uninit(hdev);
8334 hclge_misc_irq_uninit(hdev);
8335 hclge_pci_uninit(hdev);
8336 mutex_destroy(&hdev->vport_lock);
8337 hclge_uninit_vport_mac_table(hdev);
8338 hclge_uninit_vport_vlan_table(hdev);
8339 mutex_destroy(&hdev->vport_cfg_mutex);
8340 ae_dev->priv = NULL;
8343 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
8345 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8346 struct hclge_vport *vport = hclge_get_vport(handle);
8347 struct hclge_dev *hdev = vport->back;
8349 return min_t(u32, hdev->rss_size_max,
8350 vport->alloc_tqps / kinfo->num_tc);
8353 static void hclge_get_channels(struct hnae3_handle *handle,
8354 struct ethtool_channels *ch)
8356 ch->max_combined = hclge_get_max_channels(handle);
8357 ch->other_count = 1;
8359 ch->combined_count = handle->kinfo.rss_size;
8362 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
8363 u16 *alloc_tqps, u16 *max_rss_size)
8365 struct hclge_vport *vport = hclge_get_vport(handle);
8366 struct hclge_dev *hdev = vport->back;
8368 *alloc_tqps = vport->alloc_tqps;
8369 *max_rss_size = hdev->rss_size_max;
8372 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
8373 bool rxfh_configured)
8375 struct hclge_vport *vport = hclge_get_vport(handle);
8376 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
8377 struct hclge_dev *hdev = vport->back;
8378 int cur_rss_size = kinfo->rss_size;
8379 int cur_tqps = kinfo->num_tqps;
8380 u16 tc_offset[HCLGE_MAX_TC_NUM];
8381 u16 tc_valid[HCLGE_MAX_TC_NUM];
8382 u16 tc_size[HCLGE_MAX_TC_NUM];
8387 kinfo->req_rss_size = new_tqps_num;
8389 ret = hclge_tm_vport_map_update(hdev);
8391 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
8395 roundup_size = roundup_pow_of_two(kinfo->rss_size);
8396 roundup_size = ilog2(roundup_size);
8397 /* Set the RSS TC mode according to the new RSS size */
8398 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
8401 if (!(hdev->hw_tc_map & BIT(i)))
8405 tc_size[i] = roundup_size;
8406 tc_offset[i] = kinfo->rss_size * i;
8408 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
8412 /* RSS indirection table has been configuared by user */
8413 if (rxfh_configured)
8416 /* Reinitializes the rss indirect table according to the new RSS size */
8417 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
8421 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
8422 rss_indir[i] = i % kinfo->rss_size;
8424 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
8426 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
8433 dev_info(&hdev->pdev->dev,
8434 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
8435 cur_rss_size, kinfo->rss_size,
8436 cur_tqps, kinfo->rss_size * kinfo->num_tc);
8441 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
8442 u32 *regs_num_64_bit)
8444 struct hclge_desc desc;
8448 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
8449 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8451 dev_err(&hdev->pdev->dev,
8452 "Query register number cmd failed, ret = %d.\n", ret);
8456 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
8457 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
8459 total_num = *regs_num_32_bit + *regs_num_64_bit;
8466 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8469 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
8471 struct hclge_desc *desc;
8472 u32 *reg_val = data;
8481 cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
8482 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8486 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
8487 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8489 dev_err(&hdev->pdev->dev,
8490 "Query 32 bit register cmd failed, ret = %d.\n", ret);
8495 for (i = 0; i < cmd_num; i++) {
8497 desc_data = (__le32 *)(&desc[i].data[0]);
8498 n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
8500 desc_data = (__le32 *)(&desc[i]);
8501 n = HCLGE_32_BIT_REG_RTN_DATANUM;
8503 for (k = 0; k < n; k++) {
8504 *reg_val++ = le32_to_cpu(*desc_data++);
8516 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8519 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
8521 struct hclge_desc *desc;
8522 u64 *reg_val = data;
8531 cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
8532 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8536 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
8537 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8539 dev_err(&hdev->pdev->dev,
8540 "Query 64 bit register cmd failed, ret = %d.\n", ret);
8545 for (i = 0; i < cmd_num; i++) {
8547 desc_data = (__le64 *)(&desc[i].data[0]);
8548 n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
8550 desc_data = (__le64 *)(&desc[i]);
8551 n = HCLGE_64_BIT_REG_RTN_DATANUM;
8553 for (k = 0; k < n; k++) {
8554 *reg_val++ = le64_to_cpu(*desc_data++);
8566 #define MAX_SEPARATE_NUM 4
8567 #define SEPARATOR_VALUE 0xFFFFFFFF
8568 #define REG_NUM_PER_LINE 4
8569 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
8571 static int hclge_get_regs_len(struct hnae3_handle *handle)
8573 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
8574 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8575 struct hclge_vport *vport = hclge_get_vport(handle);
8576 struct hclge_dev *hdev = vport->back;
8577 u32 regs_num_32_bit, regs_num_64_bit;
8580 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
8582 dev_err(&hdev->pdev->dev,
8583 "Get register number failed, ret = %d.\n", ret);
8587 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
8588 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
8589 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
8590 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
8592 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
8593 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
8594 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
8597 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
8600 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8601 struct hclge_vport *vport = hclge_get_vport(handle);
8602 struct hclge_dev *hdev = vport->back;
8603 u32 regs_num_32_bit, regs_num_64_bit;
8604 int i, j, reg_um, separator_num;
8608 *version = hdev->fw_version;
8610 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
8612 dev_err(&hdev->pdev->dev,
8613 "Get register number failed, ret = %d.\n", ret);
8617 /* fetching per-PF registers valus from PF PCIe register space */
8618 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
8619 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8620 for (i = 0; i < reg_um; i++)
8621 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
8622 for (i = 0; i < separator_num; i++)
8623 *reg++ = SEPARATOR_VALUE;
8625 reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
8626 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8627 for (i = 0; i < reg_um; i++)
8628 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
8629 for (i = 0; i < separator_num; i++)
8630 *reg++ = SEPARATOR_VALUE;
8632 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
8633 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8634 for (j = 0; j < kinfo->num_tqps; j++) {
8635 for (i = 0; i < reg_um; i++)
8636 *reg++ = hclge_read_dev(&hdev->hw,
8637 ring_reg_addr_list[i] +
8639 for (i = 0; i < separator_num; i++)
8640 *reg++ = SEPARATOR_VALUE;
8643 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
8644 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8645 for (j = 0; j < hdev->num_msi_used - 1; j++) {
8646 for (i = 0; i < reg_um; i++)
8647 *reg++ = hclge_read_dev(&hdev->hw,
8648 tqp_intr_reg_addr_list[i] +
8650 for (i = 0; i < separator_num; i++)
8651 *reg++ = SEPARATOR_VALUE;
8654 /* fetching PF common registers values from firmware */
8655 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
8657 dev_err(&hdev->pdev->dev,
8658 "Get 32 bit register failed, ret = %d.\n", ret);
8662 reg += regs_num_32_bit;
8663 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
8665 dev_err(&hdev->pdev->dev,
8666 "Get 64 bit register failed, ret = %d.\n", ret);
8669 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
8671 struct hclge_set_led_state_cmd *req;
8672 struct hclge_desc desc;
8675 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
8677 req = (struct hclge_set_led_state_cmd *)desc.data;
8678 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
8679 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
8681 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8683 dev_err(&hdev->pdev->dev,
8684 "Send set led state cmd error, ret =%d\n", ret);
8689 enum hclge_led_status {
8692 HCLGE_LED_NO_CHANGE = 0xFF,
8695 static int hclge_set_led_id(struct hnae3_handle *handle,
8696 enum ethtool_phys_id_state status)
8698 struct hclge_vport *vport = hclge_get_vport(handle);
8699 struct hclge_dev *hdev = vport->back;
8702 case ETHTOOL_ID_ACTIVE:
8703 return hclge_set_led_status(hdev, HCLGE_LED_ON);
8704 case ETHTOOL_ID_INACTIVE:
8705 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
8711 static void hclge_get_link_mode(struct hnae3_handle *handle,
8712 unsigned long *supported,
8713 unsigned long *advertising)
8715 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
8716 struct hclge_vport *vport = hclge_get_vport(handle);
8717 struct hclge_dev *hdev = vport->back;
8718 unsigned int idx = 0;
8720 for (; idx < size; idx++) {
8721 supported[idx] = hdev->hw.mac.supported[idx];
8722 advertising[idx] = hdev->hw.mac.advertising[idx];
8726 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
8728 struct hclge_vport *vport = hclge_get_vport(handle);
8729 struct hclge_dev *hdev = vport->back;
8731 return hclge_config_gro(hdev, enable);
8734 static const struct hnae3_ae_ops hclge_ops = {
8735 .init_ae_dev = hclge_init_ae_dev,
8736 .uninit_ae_dev = hclge_uninit_ae_dev,
8737 .flr_prepare = hclge_flr_prepare,
8738 .flr_done = hclge_flr_done,
8739 .init_client_instance = hclge_init_client_instance,
8740 .uninit_client_instance = hclge_uninit_client_instance,
8741 .map_ring_to_vector = hclge_map_ring_to_vector,
8742 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
8743 .get_vector = hclge_get_vector,
8744 .put_vector = hclge_put_vector,
8745 .set_promisc_mode = hclge_set_promisc_mode,
8746 .set_loopback = hclge_set_loopback,
8747 .start = hclge_ae_start,
8748 .stop = hclge_ae_stop,
8749 .client_start = hclge_client_start,
8750 .client_stop = hclge_client_stop,
8751 .get_status = hclge_get_status,
8752 .get_ksettings_an_result = hclge_get_ksettings_an_result,
8753 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
8754 .get_media_type = hclge_get_media_type,
8755 .check_port_speed = hclge_check_port_speed,
8756 .get_rss_key_size = hclge_get_rss_key_size,
8757 .get_rss_indir_size = hclge_get_rss_indir_size,
8758 .get_rss = hclge_get_rss,
8759 .set_rss = hclge_set_rss,
8760 .set_rss_tuple = hclge_set_rss_tuple,
8761 .get_rss_tuple = hclge_get_rss_tuple,
8762 .get_tc_size = hclge_get_tc_size,
8763 .get_mac_addr = hclge_get_mac_addr,
8764 .set_mac_addr = hclge_set_mac_addr,
8765 .do_ioctl = hclge_do_ioctl,
8766 .add_uc_addr = hclge_add_uc_addr,
8767 .rm_uc_addr = hclge_rm_uc_addr,
8768 .add_mc_addr = hclge_add_mc_addr,
8769 .rm_mc_addr = hclge_rm_mc_addr,
8770 .set_autoneg = hclge_set_autoneg,
8771 .get_autoneg = hclge_get_autoneg,
8772 .restart_autoneg = hclge_restart_autoneg,
8773 .get_pauseparam = hclge_get_pauseparam,
8774 .set_pauseparam = hclge_set_pauseparam,
8775 .set_mtu = hclge_set_mtu,
8776 .reset_queue = hclge_reset_tqp,
8777 .get_stats = hclge_get_stats,
8778 .get_mac_pause_stats = hclge_get_mac_pause_stat,
8779 .update_stats = hclge_update_stats,
8780 .get_strings = hclge_get_strings,
8781 .get_sset_count = hclge_get_sset_count,
8782 .get_fw_version = hclge_get_fw_version,
8783 .get_mdix_mode = hclge_get_mdix_mode,
8784 .enable_vlan_filter = hclge_enable_vlan_filter,
8785 .set_vlan_filter = hclge_set_vlan_filter,
8786 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
8787 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
8788 .reset_event = hclge_reset_event,
8789 .set_default_reset_request = hclge_set_def_reset_request,
8790 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
8791 .set_channels = hclge_set_channels,
8792 .get_channels = hclge_get_channels,
8793 .get_regs_len = hclge_get_regs_len,
8794 .get_regs = hclge_get_regs,
8795 .set_led_id = hclge_set_led_id,
8796 .get_link_mode = hclge_get_link_mode,
8797 .add_fd_entry = hclge_add_fd_entry,
8798 .del_fd_entry = hclge_del_fd_entry,
8799 .del_all_fd_entries = hclge_del_all_fd_entries,
8800 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
8801 .get_fd_rule_info = hclge_get_fd_rule_info,
8802 .get_fd_all_rules = hclge_get_all_rules,
8803 .restore_fd_rules = hclge_restore_fd_entries,
8804 .enable_fd = hclge_enable_fd,
8805 .dbg_run_cmd = hclge_dbg_run_cmd,
8806 .handle_hw_ras_error = hclge_handle_hw_ras_error,
8807 .get_hw_reset_stat = hclge_get_hw_reset_stat,
8808 .ae_dev_resetting = hclge_ae_dev_resetting,
8809 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
8810 .set_gro_en = hclge_gro_en,
8811 .get_global_queue_id = hclge_covert_handle_qid_global,
8812 .set_timer_task = hclge_set_timer_task,
8813 .mac_connect_phy = hclge_mac_connect_phy,
8814 .mac_disconnect_phy = hclge_mac_disconnect_phy,
8817 static struct hnae3_ae_algo ae_algo = {
8819 .pdev_id_table = ae_algo_pci_tbl,
8822 static int hclge_init(void)
8824 pr_info("%s is initializing\n", HCLGE_NAME);
8826 hnae3_register_ae_algo(&ae_algo);
8831 static void hclge_exit(void)
8833 hnae3_unregister_ae_algo(&ae_algo);
8835 module_init(hclge_init);
8836 module_exit(hclge_exit);
8838 MODULE_LICENSE("GPL");
8839 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
8840 MODULE_DESCRIPTION("HCLGE Driver");
8841 MODULE_VERSION(HCLGE_MOD_VERSION);