1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
23 #include "hclge_err.h"
26 #define HCLGE_NAME "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 #define HCLGE_BUF_SIZE_UNIT 256
32 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
33 static int hclge_init_vlan_config(struct hclge_dev *hdev);
34 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
35 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
36 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
37 u16 *allocated_size, bool is_alloc);
39 static struct hnae3_ae_algo ae_algo;
41 static const struct pci_device_id ae_algo_pci_tbl[] = {
42 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
45 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
49 /* required last entry */
53 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
55 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
56 HCLGE_CMDQ_TX_ADDR_H_REG,
57 HCLGE_CMDQ_TX_DEPTH_REG,
58 HCLGE_CMDQ_TX_TAIL_REG,
59 HCLGE_CMDQ_TX_HEAD_REG,
60 HCLGE_CMDQ_RX_ADDR_L_REG,
61 HCLGE_CMDQ_RX_ADDR_H_REG,
62 HCLGE_CMDQ_RX_DEPTH_REG,
63 HCLGE_CMDQ_RX_TAIL_REG,
64 HCLGE_CMDQ_RX_HEAD_REG,
65 HCLGE_VECTOR0_CMDQ_SRC_REG,
66 HCLGE_CMDQ_INTR_STS_REG,
67 HCLGE_CMDQ_INTR_EN_REG,
68 HCLGE_CMDQ_INTR_GEN_REG};
70 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
71 HCLGE_VECTOR0_OTER_EN_REG,
72 HCLGE_MISC_RESET_STS_REG,
73 HCLGE_MISC_VECTOR_INT_STS,
74 HCLGE_GLOBAL_RESET_REG,
78 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
79 HCLGE_RING_RX_ADDR_H_REG,
80 HCLGE_RING_RX_BD_NUM_REG,
81 HCLGE_RING_RX_BD_LENGTH_REG,
82 HCLGE_RING_RX_MERGE_EN_REG,
83 HCLGE_RING_RX_TAIL_REG,
84 HCLGE_RING_RX_HEAD_REG,
85 HCLGE_RING_RX_FBD_NUM_REG,
86 HCLGE_RING_RX_OFFSET_REG,
87 HCLGE_RING_RX_FBD_OFFSET_REG,
88 HCLGE_RING_RX_STASH_REG,
89 HCLGE_RING_RX_BD_ERR_REG,
90 HCLGE_RING_TX_ADDR_L_REG,
91 HCLGE_RING_TX_ADDR_H_REG,
92 HCLGE_RING_TX_BD_NUM_REG,
93 HCLGE_RING_TX_PRIORITY_REG,
95 HCLGE_RING_TX_MERGE_EN_REG,
96 HCLGE_RING_TX_TAIL_REG,
97 HCLGE_RING_TX_HEAD_REG,
98 HCLGE_RING_TX_FBD_NUM_REG,
99 HCLGE_RING_TX_OFFSET_REG,
100 HCLGE_RING_TX_EBD_NUM_REG,
101 HCLGE_RING_TX_EBD_OFFSET_REG,
102 HCLGE_RING_TX_BD_ERR_REG,
105 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
106 HCLGE_TQP_INTR_GL0_REG,
107 HCLGE_TQP_INTR_GL1_REG,
108 HCLGE_TQP_INTR_GL2_REG,
109 HCLGE_TQP_INTR_RL_REG};
111 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
113 "Serdes serial Loopback test",
114 "Serdes parallel Loopback test",
118 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
119 {"mac_tx_mac_pause_num",
120 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
121 {"mac_rx_mac_pause_num",
122 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
123 {"mac_tx_control_pkt_num",
124 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
125 {"mac_rx_control_pkt_num",
126 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
127 {"mac_tx_pfc_pkt_num",
128 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
129 {"mac_tx_pfc_pri0_pkt_num",
130 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
131 {"mac_tx_pfc_pri1_pkt_num",
132 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
133 {"mac_tx_pfc_pri2_pkt_num",
134 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
135 {"mac_tx_pfc_pri3_pkt_num",
136 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
137 {"mac_tx_pfc_pri4_pkt_num",
138 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
139 {"mac_tx_pfc_pri5_pkt_num",
140 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
141 {"mac_tx_pfc_pri6_pkt_num",
142 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
143 {"mac_tx_pfc_pri7_pkt_num",
144 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
145 {"mac_rx_pfc_pkt_num",
146 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
147 {"mac_rx_pfc_pri0_pkt_num",
148 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
149 {"mac_rx_pfc_pri1_pkt_num",
150 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
151 {"mac_rx_pfc_pri2_pkt_num",
152 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
153 {"mac_rx_pfc_pri3_pkt_num",
154 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
155 {"mac_rx_pfc_pri4_pkt_num",
156 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
157 {"mac_rx_pfc_pri5_pkt_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
159 {"mac_rx_pfc_pri6_pkt_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
161 {"mac_rx_pfc_pri7_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
163 {"mac_tx_total_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
165 {"mac_tx_total_oct_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
167 {"mac_tx_good_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
169 {"mac_tx_bad_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
171 {"mac_tx_good_oct_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
173 {"mac_tx_bad_oct_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
175 {"mac_tx_uni_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
177 {"mac_tx_multi_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
179 {"mac_tx_broad_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
181 {"mac_tx_undersize_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
183 {"mac_tx_oversize_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
185 {"mac_tx_64_oct_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
187 {"mac_tx_65_127_oct_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
189 {"mac_tx_128_255_oct_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
191 {"mac_tx_256_511_oct_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
193 {"mac_tx_512_1023_oct_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
195 {"mac_tx_1024_1518_oct_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
197 {"mac_tx_1519_2047_oct_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
199 {"mac_tx_2048_4095_oct_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
201 {"mac_tx_4096_8191_oct_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
203 {"mac_tx_8192_9216_oct_pkt_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
205 {"mac_tx_9217_12287_oct_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
207 {"mac_tx_12288_16383_oct_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
209 {"mac_tx_1519_max_good_pkt_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
211 {"mac_tx_1519_max_bad_pkt_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
213 {"mac_rx_total_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
215 {"mac_rx_total_oct_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
217 {"mac_rx_good_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
219 {"mac_rx_bad_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
221 {"mac_rx_good_oct_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
223 {"mac_rx_bad_oct_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
225 {"mac_rx_uni_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
227 {"mac_rx_multi_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
229 {"mac_rx_broad_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
231 {"mac_rx_undersize_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
233 {"mac_rx_oversize_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
235 {"mac_rx_64_oct_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
237 {"mac_rx_65_127_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
239 {"mac_rx_128_255_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
241 {"mac_rx_256_511_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
243 {"mac_rx_512_1023_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
245 {"mac_rx_1024_1518_oct_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
247 {"mac_rx_1519_2047_oct_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
249 {"mac_rx_2048_4095_oct_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
251 {"mac_rx_4096_8191_oct_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
253 {"mac_rx_8192_9216_oct_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
255 {"mac_rx_9217_12287_oct_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
257 {"mac_rx_12288_16383_oct_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
259 {"mac_rx_1519_max_good_pkt_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
261 {"mac_rx_1519_max_bad_pkt_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
264 {"mac_tx_fragment_pkt_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
266 {"mac_tx_undermin_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
268 {"mac_tx_jabber_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
270 {"mac_tx_err_all_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
272 {"mac_tx_from_app_good_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
274 {"mac_tx_from_app_bad_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
276 {"mac_rx_fragment_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
278 {"mac_rx_undermin_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
280 {"mac_rx_jabber_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
282 {"mac_rx_fcs_err_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
284 {"mac_rx_send_app_good_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
286 {"mac_rx_send_app_bad_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
290 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
292 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
293 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
294 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
295 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
296 .i_port_bitmap = 0x1,
300 static const u8 hclge_hash_key[] = {
301 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
302 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
303 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
304 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
305 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
308 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
310 #define HCLGE_MAC_CMD_NUM 21
312 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
313 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
318 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
319 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
321 dev_err(&hdev->pdev->dev,
322 "Get MAC pkt stats fail, status = %d.\n", ret);
327 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
328 /* for special opcode 0032, only the first desc has the head */
329 if (unlikely(i == 0)) {
330 desc_data = (__le64 *)(&desc[i].data[0]);
331 n = HCLGE_RD_FIRST_STATS_NUM;
333 desc_data = (__le64 *)(&desc[i]);
334 n = HCLGE_RD_OTHER_STATS_NUM;
337 for (k = 0; k < n; k++) {
338 *data += le64_to_cpu(*desc_data);
347 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
349 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
350 struct hclge_desc *desc;
355 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
358 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
359 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
365 for (i = 0; i < desc_num; i++) {
366 /* for special opcode 0034, only the first desc has the head */
368 desc_data = (__le64 *)(&desc[i].data[0]);
369 n = HCLGE_RD_FIRST_STATS_NUM;
371 desc_data = (__le64 *)(&desc[i]);
372 n = HCLGE_RD_OTHER_STATS_NUM;
375 for (k = 0; k < n; k++) {
376 *data += le64_to_cpu(*desc_data);
387 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
389 struct hclge_desc desc;
394 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
395 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
399 desc_data = (__le32 *)(&desc.data[0]);
400 reg_num = le32_to_cpu(*desc_data);
402 *desc_num = 1 + ((reg_num - 3) >> 2) +
403 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
408 static int hclge_mac_update_stats(struct hclge_dev *hdev)
413 ret = hclge_mac_query_reg_num(hdev, &desc_num);
415 /* The firmware supports the new statistics acquisition method */
417 ret = hclge_mac_update_stats_complete(hdev, desc_num);
418 else if (ret == -EOPNOTSUPP)
419 ret = hclge_mac_update_stats_defective(hdev);
421 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
426 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
428 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
429 struct hclge_vport *vport = hclge_get_vport(handle);
430 struct hclge_dev *hdev = vport->back;
431 struct hnae3_queue *queue;
432 struct hclge_desc desc[1];
433 struct hclge_tqp *tqp;
436 for (i = 0; i < kinfo->num_tqps; i++) {
437 queue = handle->kinfo.tqp[i];
438 tqp = container_of(queue, struct hclge_tqp, q);
439 /* command : HCLGE_OPC_QUERY_IGU_STAT */
440 hclge_cmd_setup_basic_desc(&desc[0],
441 HCLGE_OPC_QUERY_RX_STATUS,
444 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
445 ret = hclge_cmd_send(&hdev->hw, desc, 1);
447 dev_err(&hdev->pdev->dev,
448 "Query tqp stat fail, status = %d,queue = %d\n",
452 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
453 le32_to_cpu(desc[0].data[1]);
456 for (i = 0; i < kinfo->num_tqps; i++) {
457 queue = handle->kinfo.tqp[i];
458 tqp = container_of(queue, struct hclge_tqp, q);
459 /* command : HCLGE_OPC_QUERY_IGU_STAT */
460 hclge_cmd_setup_basic_desc(&desc[0],
461 HCLGE_OPC_QUERY_TX_STATUS,
464 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
465 ret = hclge_cmd_send(&hdev->hw, desc, 1);
467 dev_err(&hdev->pdev->dev,
468 "Query tqp stat fail, status = %d,queue = %d\n",
472 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
473 le32_to_cpu(desc[0].data[1]);
479 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
481 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
482 struct hclge_tqp *tqp;
486 for (i = 0; i < kinfo->num_tqps; i++) {
487 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
488 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
491 for (i = 0; i < kinfo->num_tqps; i++) {
492 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
493 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
499 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
501 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
503 return kinfo->num_tqps * (2);
506 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
508 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
512 for (i = 0; i < kinfo->num_tqps; i++) {
513 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
514 struct hclge_tqp, q);
515 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
517 buff = buff + ETH_GSTRING_LEN;
520 for (i = 0; i < kinfo->num_tqps; i++) {
521 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
522 struct hclge_tqp, q);
523 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
525 buff = buff + ETH_GSTRING_LEN;
531 static u64 *hclge_comm_get_stats(void *comm_stats,
532 const struct hclge_comm_stats_str strs[],
538 for (i = 0; i < size; i++)
539 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
544 static u8 *hclge_comm_get_strings(u32 stringset,
545 const struct hclge_comm_stats_str strs[],
548 char *buff = (char *)data;
551 if (stringset != ETH_SS_STATS)
554 for (i = 0; i < size; i++) {
555 snprintf(buff, ETH_GSTRING_LEN,
557 buff = buff + ETH_GSTRING_LEN;
563 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
565 struct hnae3_handle *handle;
568 handle = &hdev->vport[0].nic;
569 if (handle->client) {
570 status = hclge_tqps_update_stats(handle);
572 dev_err(&hdev->pdev->dev,
573 "Update TQPS stats fail, status = %d.\n",
578 status = hclge_mac_update_stats(hdev);
580 dev_err(&hdev->pdev->dev,
581 "Update MAC stats fail, status = %d.\n", status);
584 static void hclge_update_stats(struct hnae3_handle *handle,
585 struct net_device_stats *net_stats)
587 struct hclge_vport *vport = hclge_get_vport(handle);
588 struct hclge_dev *hdev = vport->back;
591 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
594 status = hclge_mac_update_stats(hdev);
596 dev_err(&hdev->pdev->dev,
597 "Update MAC stats fail, status = %d.\n",
600 status = hclge_tqps_update_stats(handle);
602 dev_err(&hdev->pdev->dev,
603 "Update TQPS stats fail, status = %d.\n",
606 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
609 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
611 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
612 HNAE3_SUPPORT_PHY_LOOPBACK |\
613 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
614 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
616 struct hclge_vport *vport = hclge_get_vport(handle);
617 struct hclge_dev *hdev = vport->back;
620 /* Loopback test support rules:
621 * mac: only GE mode support
622 * serdes: all mac mode will support include GE/XGE/LGE/CGE
623 * phy: only support when phy device exist on board
625 if (stringset == ETH_SS_TEST) {
626 /* clear loopback bit flags at first */
627 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
628 if (hdev->pdev->revision >= 0x21 ||
629 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
630 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
631 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
633 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
637 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
638 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
639 } else if (stringset == ETH_SS_STATS) {
640 count = ARRAY_SIZE(g_mac_stats_string) +
641 hclge_tqps_get_sset_count(handle, stringset);
647 static void hclge_get_strings(struct hnae3_handle *handle,
651 u8 *p = (char *)data;
654 if (stringset == ETH_SS_STATS) {
655 size = ARRAY_SIZE(g_mac_stats_string);
656 p = hclge_comm_get_strings(stringset,
660 p = hclge_tqps_get_strings(handle, p);
661 } else if (stringset == ETH_SS_TEST) {
662 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
664 hns3_nic_test_strs[HNAE3_LOOP_APP],
666 p += ETH_GSTRING_LEN;
668 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
670 hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
672 p += ETH_GSTRING_LEN;
674 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
676 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
678 p += ETH_GSTRING_LEN;
680 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
682 hns3_nic_test_strs[HNAE3_LOOP_PHY],
684 p += ETH_GSTRING_LEN;
689 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
691 struct hclge_vport *vport = hclge_get_vport(handle);
692 struct hclge_dev *hdev = vport->back;
695 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
697 ARRAY_SIZE(g_mac_stats_string),
699 p = hclge_tqps_get_stats(handle, p);
702 static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
705 struct hclge_vport *vport = hclge_get_vport(handle);
706 struct hclge_dev *hdev = vport->back;
708 *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
709 *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
712 static int hclge_parse_func_status(struct hclge_dev *hdev,
713 struct hclge_func_status_cmd *status)
715 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
718 /* Set the pf to main pf */
719 if (status->pf_state & HCLGE_PF_STATE_MAIN)
720 hdev->flag |= HCLGE_FLAG_MAIN;
722 hdev->flag &= ~HCLGE_FLAG_MAIN;
727 static int hclge_query_function_status(struct hclge_dev *hdev)
729 struct hclge_func_status_cmd *req;
730 struct hclge_desc desc;
734 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
735 req = (struct hclge_func_status_cmd *)desc.data;
738 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
740 dev_err(&hdev->pdev->dev,
741 "query function status failed %d.\n",
747 /* Check pf reset is done */
750 usleep_range(1000, 2000);
751 } while (timeout++ < 5);
753 ret = hclge_parse_func_status(hdev, req);
758 static int hclge_query_pf_resource(struct hclge_dev *hdev)
760 struct hclge_pf_res_cmd *req;
761 struct hclge_desc desc;
764 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
765 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
767 dev_err(&hdev->pdev->dev,
768 "query pf resource failed %d.\n", ret);
772 req = (struct hclge_pf_res_cmd *)desc.data;
773 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
774 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
776 if (req->tx_buf_size)
778 __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
780 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
782 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
784 if (req->dv_buf_size)
786 __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
788 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
790 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
792 if (hnae3_dev_roce_supported(hdev)) {
793 hdev->roce_base_msix_offset =
794 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
795 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
797 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
798 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
800 /* PF should have NIC vectors and Roce vectors,
801 * NIC vectors are queued before Roce vectors.
803 hdev->num_msi = hdev->num_roce_msi +
804 hdev->roce_base_msix_offset;
807 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
808 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
814 static int hclge_parse_speed(int speed_cmd, int *speed)
818 *speed = HCLGE_MAC_SPEED_10M;
821 *speed = HCLGE_MAC_SPEED_100M;
824 *speed = HCLGE_MAC_SPEED_1G;
827 *speed = HCLGE_MAC_SPEED_10G;
830 *speed = HCLGE_MAC_SPEED_25G;
833 *speed = HCLGE_MAC_SPEED_40G;
836 *speed = HCLGE_MAC_SPEED_50G;
839 *speed = HCLGE_MAC_SPEED_100G;
848 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
850 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
851 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
853 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
854 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
856 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
857 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
859 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
860 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
862 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
863 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
867 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
869 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
870 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
872 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
873 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
875 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
876 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
878 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
879 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
881 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
882 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
886 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
888 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
889 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
891 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
892 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
894 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
895 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
897 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
898 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
900 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
901 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
905 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
907 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
908 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
910 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
911 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
913 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
914 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
916 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
917 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
919 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
920 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
922 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
923 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
927 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
930 struct hclge_mac *mac = &hdev->hw.mac;
932 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
933 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
936 hclge_convert_setting_sr(mac, speed_ability);
937 hclge_convert_setting_lr(mac, speed_ability);
938 hclge_convert_setting_cr(mac, speed_ability);
940 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
941 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
944 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
947 struct hclge_mac *mac = &hdev->hw.mac;
949 hclge_convert_setting_kr(mac, speed_ability);
950 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
951 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
954 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
957 unsigned long *supported = hdev->hw.mac.supported;
959 /* default to support all speed for GE port */
961 speed_ability = HCLGE_SUPPORT_GE;
963 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
964 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
967 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
968 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
970 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
974 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
975 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
976 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
979 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
980 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
981 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
984 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
986 u8 media_type = hdev->hw.mac.media_type;
988 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
989 hclge_parse_fiber_link_mode(hdev, speed_ability);
990 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
991 hclge_parse_copper_link_mode(hdev, speed_ability);
992 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
993 hclge_parse_backplane_link_mode(hdev, speed_ability);
995 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
997 struct hclge_cfg_param_cmd *req;
998 u64 mac_addr_tmp_high;
1002 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1004 /* get the configuration */
1005 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1008 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1009 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1010 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1011 HCLGE_CFG_TQP_DESC_N_M,
1012 HCLGE_CFG_TQP_DESC_N_S);
1014 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1015 HCLGE_CFG_PHY_ADDR_M,
1016 HCLGE_CFG_PHY_ADDR_S);
1017 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1018 HCLGE_CFG_MEDIA_TP_M,
1019 HCLGE_CFG_MEDIA_TP_S);
1020 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1021 HCLGE_CFG_RX_BUF_LEN_M,
1022 HCLGE_CFG_RX_BUF_LEN_S);
1023 /* get mac_address */
1024 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1025 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1026 HCLGE_CFG_MAC_ADDR_H_M,
1027 HCLGE_CFG_MAC_ADDR_H_S);
1029 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1031 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1032 HCLGE_CFG_DEFAULT_SPEED_M,
1033 HCLGE_CFG_DEFAULT_SPEED_S);
1034 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1035 HCLGE_CFG_RSS_SIZE_M,
1036 HCLGE_CFG_RSS_SIZE_S);
1038 for (i = 0; i < ETH_ALEN; i++)
1039 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1041 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1042 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1044 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1045 HCLGE_CFG_SPEED_ABILITY_M,
1046 HCLGE_CFG_SPEED_ABILITY_S);
1047 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1048 HCLGE_CFG_UMV_TBL_SPACE_M,
1049 HCLGE_CFG_UMV_TBL_SPACE_S);
1050 if (!cfg->umv_space)
1051 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1054 /* hclge_get_cfg: query the static parameter from flash
1055 * @hdev: pointer to struct hclge_dev
1056 * @hcfg: the config structure to be getted
1058 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1060 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1061 struct hclge_cfg_param_cmd *req;
1064 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1067 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1068 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1070 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1071 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1072 /* Len should be united by 4 bytes when send to hardware */
1073 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1074 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1075 req->offset = cpu_to_le32(offset);
1078 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1080 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1084 hclge_parse_cfg(hcfg, desc);
1089 static int hclge_get_cap(struct hclge_dev *hdev)
1093 ret = hclge_query_function_status(hdev);
1095 dev_err(&hdev->pdev->dev,
1096 "query function status error %d.\n", ret);
1100 /* get pf resource */
1101 ret = hclge_query_pf_resource(hdev);
1103 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1108 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1110 #define HCLGE_MIN_TX_DESC 64
1111 #define HCLGE_MIN_RX_DESC 64
1113 if (!is_kdump_kernel())
1116 dev_info(&hdev->pdev->dev,
1117 "Running kdump kernel. Using minimal resources\n");
1119 /* minimal queue pairs equals to the number of vports */
1120 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1121 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1122 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1125 static int hclge_configure(struct hclge_dev *hdev)
1127 struct hclge_cfg cfg;
1130 ret = hclge_get_cfg(hdev, &cfg);
1132 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1136 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1137 hdev->base_tqp_pid = 0;
1138 hdev->rss_size_max = cfg.rss_size_max;
1139 hdev->rx_buf_len = cfg.rx_buf_len;
1140 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1141 hdev->hw.mac.media_type = cfg.media_type;
1142 hdev->hw.mac.phy_addr = cfg.phy_addr;
1143 hdev->num_tx_desc = cfg.tqp_desc_num;
1144 hdev->num_rx_desc = cfg.tqp_desc_num;
1145 hdev->tm_info.num_pg = 1;
1146 hdev->tc_max = cfg.tc_num;
1147 hdev->tm_info.hw_pfc_map = 0;
1148 hdev->wanted_umv_size = cfg.umv_space;
1150 if (hnae3_dev_fd_supported(hdev))
1153 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1155 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1159 hclge_parse_link_mode(hdev, cfg.speed_ability);
1161 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1162 (hdev->tc_max < 1)) {
1163 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1168 /* Dev does not support DCB */
1169 if (!hnae3_dev_dcb_supported(hdev)) {
1173 hdev->pfc_max = hdev->tc_max;
1176 hdev->tm_info.num_tc = 1;
1178 /* Currently not support uncontiuous tc */
1179 for (i = 0; i < hdev->tm_info.num_tc; i++)
1180 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1182 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1184 hclge_init_kdump_kernel_config(hdev);
1189 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1192 struct hclge_cfg_tso_status_cmd *req;
1193 struct hclge_desc desc;
1196 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1198 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1201 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1202 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1203 req->tso_mss_min = cpu_to_le16(tso_mss);
1206 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1207 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1208 req->tso_mss_max = cpu_to_le16(tso_mss);
1210 return hclge_cmd_send(&hdev->hw, &desc, 1);
1213 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1215 struct hclge_cfg_gro_status_cmd *req;
1216 struct hclge_desc desc;
1219 if (!hnae3_dev_gro_supported(hdev))
1222 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1223 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1225 req->gro_en = cpu_to_le16(en ? 1 : 0);
1227 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1229 dev_err(&hdev->pdev->dev,
1230 "GRO hardware config cmd failed, ret = %d\n", ret);
1235 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1237 struct hclge_tqp *tqp;
1240 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1241 sizeof(struct hclge_tqp), GFP_KERNEL);
1247 for (i = 0; i < hdev->num_tqps; i++) {
1248 tqp->dev = &hdev->pdev->dev;
1251 tqp->q.ae_algo = &ae_algo;
1252 tqp->q.buf_size = hdev->rx_buf_len;
1253 tqp->q.tx_desc_num = hdev->num_tx_desc;
1254 tqp->q.rx_desc_num = hdev->num_rx_desc;
1255 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1256 i * HCLGE_TQP_REG_SIZE;
1264 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1265 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1267 struct hclge_tqp_map_cmd *req;
1268 struct hclge_desc desc;
1271 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1273 req = (struct hclge_tqp_map_cmd *)desc.data;
1274 req->tqp_id = cpu_to_le16(tqp_pid);
1275 req->tqp_vf = func_id;
1276 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1277 1 << HCLGE_TQP_MAP_EN_B;
1278 req->tqp_vid = cpu_to_le16(tqp_vid);
1280 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1282 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1287 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1289 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1290 struct hclge_dev *hdev = vport->back;
1293 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1294 alloced < num_tqps; i++) {
1295 if (!hdev->htqp[i].alloced) {
1296 hdev->htqp[i].q.handle = &vport->nic;
1297 hdev->htqp[i].q.tqp_index = alloced;
1298 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1299 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1300 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1301 hdev->htqp[i].alloced = true;
1305 vport->alloc_tqps = alloced;
1306 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1307 vport->alloc_tqps / hdev->tm_info.num_tc);
1312 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1313 u16 num_tx_desc, u16 num_rx_desc)
1316 struct hnae3_handle *nic = &vport->nic;
1317 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1318 struct hclge_dev *hdev = vport->back;
1321 kinfo->num_tx_desc = num_tx_desc;
1322 kinfo->num_rx_desc = num_rx_desc;
1324 kinfo->rx_buf_len = hdev->rx_buf_len;
1326 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1327 sizeof(struct hnae3_queue *), GFP_KERNEL);
1331 ret = hclge_assign_tqp(vport, num_tqps);
1333 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1338 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1339 struct hclge_vport *vport)
1341 struct hnae3_handle *nic = &vport->nic;
1342 struct hnae3_knic_private_info *kinfo;
1345 kinfo = &nic->kinfo;
1346 for (i = 0; i < vport->alloc_tqps; i++) {
1347 struct hclge_tqp *q =
1348 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1352 is_pf = !(vport->vport_id);
1353 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1362 static int hclge_map_tqp(struct hclge_dev *hdev)
1364 struct hclge_vport *vport = hdev->vport;
1367 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1368 for (i = 0; i < num_vport; i++) {
1371 ret = hclge_map_tqp_to_vport(hdev, vport);
1381 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1383 /* this would be initialized later */
1386 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1388 struct hnae3_handle *nic = &vport->nic;
1389 struct hclge_dev *hdev = vport->back;
1392 nic->pdev = hdev->pdev;
1393 nic->ae_algo = &ae_algo;
1394 nic->numa_node_mask = hdev->numa_node_mask;
1396 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1397 ret = hclge_knic_setup(vport, num_tqps,
1398 hdev->num_tx_desc, hdev->num_rx_desc);
1401 dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1406 hclge_unic_setup(vport, num_tqps);
1412 static int hclge_alloc_vport(struct hclge_dev *hdev)
1414 struct pci_dev *pdev = hdev->pdev;
1415 struct hclge_vport *vport;
1421 /* We need to alloc a vport for main NIC of PF */
1422 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1424 if (hdev->num_tqps < num_vport) {
1425 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1426 hdev->num_tqps, num_vport);
1430 /* Alloc the same number of TQPs for every vport */
1431 tqp_per_vport = hdev->num_tqps / num_vport;
1432 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1434 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1439 hdev->vport = vport;
1440 hdev->num_alloc_vport = num_vport;
1442 if (IS_ENABLED(CONFIG_PCI_IOV))
1443 hdev->num_alloc_vfs = hdev->num_req_vfs;
1445 for (i = 0; i < num_vport; i++) {
1447 vport->vport_id = i;
1448 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1449 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1450 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1451 INIT_LIST_HEAD(&vport->vlan_list);
1452 INIT_LIST_HEAD(&vport->uc_mac_list);
1453 INIT_LIST_HEAD(&vport->mc_mac_list);
1456 ret = hclge_vport_setup(vport, tqp_main_vport);
1458 ret = hclge_vport_setup(vport, tqp_per_vport);
1461 "vport setup failed for vport %d, %d\n",
1472 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1473 struct hclge_pkt_buf_alloc *buf_alloc)
1475 /* TX buffer size is unit by 128 byte */
1476 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1477 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1478 struct hclge_tx_buff_alloc_cmd *req;
1479 struct hclge_desc desc;
1483 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1485 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1486 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1487 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1489 req->tx_pkt_buff[i] =
1490 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1491 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1494 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1496 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1502 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1503 struct hclge_pkt_buf_alloc *buf_alloc)
1505 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1508 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1513 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1517 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1518 if (hdev->hw_tc_map & BIT(i))
1523 /* Get the number of pfc enabled TCs, which have private buffer */
1524 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1525 struct hclge_pkt_buf_alloc *buf_alloc)
1527 struct hclge_priv_buf *priv;
1530 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1531 priv = &buf_alloc->priv_buf[i];
1532 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1540 /* Get the number of pfc disabled TCs, which have private buffer */
1541 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1542 struct hclge_pkt_buf_alloc *buf_alloc)
1544 struct hclge_priv_buf *priv;
1547 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1548 priv = &buf_alloc->priv_buf[i];
1549 if (hdev->hw_tc_map & BIT(i) &&
1550 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1558 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1560 struct hclge_priv_buf *priv;
1564 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1565 priv = &buf_alloc->priv_buf[i];
1567 rx_priv += priv->buf_size;
1572 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1574 u32 i, total_tx_size = 0;
1576 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1577 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1579 return total_tx_size;
1582 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1583 struct hclge_pkt_buf_alloc *buf_alloc,
1586 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1587 u32 tc_num = hclge_get_tc_num(hdev);
1588 u32 shared_buf, aligned_mps;
1592 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1594 if (hnae3_dev_dcb_supported(hdev))
1595 shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
1597 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1598 + hdev->dv_buf_size;
1600 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1601 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1602 HCLGE_BUF_SIZE_UNIT);
1604 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1605 if (rx_all < rx_priv + shared_std)
1608 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1609 buf_alloc->s_buf.buf_size = shared_buf;
1610 if (hnae3_dev_dcb_supported(hdev)) {
1611 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1612 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1613 - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1615 buf_alloc->s_buf.self.high = aligned_mps +
1616 HCLGE_NON_DCB_ADDITIONAL_BUF;
1617 buf_alloc->s_buf.self.low = aligned_mps;
1620 if (hnae3_dev_dcb_supported(hdev)) {
1622 hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num;
1624 hi_thrd = shared_buf - hdev->dv_buf_size;
1626 hi_thrd = max_t(u32, hi_thrd, 2 * aligned_mps);
1627 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1628 lo_thrd = hi_thrd - aligned_mps / 2;
1630 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1631 lo_thrd = aligned_mps;
1634 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1635 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1636 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1642 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1643 struct hclge_pkt_buf_alloc *buf_alloc)
1647 total_size = hdev->pkt_buf_size;
1649 /* alloc tx buffer for all enabled tc */
1650 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1651 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1653 if (hdev->hw_tc_map & BIT(i)) {
1654 if (total_size < hdev->tx_buf_size)
1657 priv->tx_buf_size = hdev->tx_buf_size;
1659 priv->tx_buf_size = 0;
1662 total_size -= priv->tx_buf_size;
1668 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1669 struct hclge_pkt_buf_alloc *buf_alloc)
1671 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1672 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1675 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1676 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1683 if (!(hdev->hw_tc_map & BIT(i)))
1688 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1689 priv->wl.low = max ? aligned_mps : 256;
1690 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1691 HCLGE_BUF_SIZE_UNIT);
1694 priv->wl.high = max ? (aligned_mps * 2) : aligned_mps;
1697 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1700 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1703 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1704 struct hclge_pkt_buf_alloc *buf_alloc)
1706 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1707 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1710 /* let the last to be cleared first */
1711 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1712 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1714 if (hdev->hw_tc_map & BIT(i) &&
1715 !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1716 /* Clear the no pfc TC private buffer */
1724 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1725 no_pfc_priv_num == 0)
1729 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1732 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1733 struct hclge_pkt_buf_alloc *buf_alloc)
1735 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1736 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1739 /* let the last to be cleared first */
1740 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1741 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1743 if (hdev->hw_tc_map & BIT(i) &&
1744 hdev->tm_info.hw_pfc_map & BIT(i)) {
1745 /* Reduce the number of pfc TC with private buffer */
1753 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1758 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1761 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1762 * @hdev: pointer to struct hclge_dev
1763 * @buf_alloc: pointer to buffer calculation data
1764 * @return: 0: calculate sucessful, negative: fail
1766 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1767 struct hclge_pkt_buf_alloc *buf_alloc)
1769 /* When DCB is not supported, rx private buffer is not allocated. */
1770 if (!hnae3_dev_dcb_supported(hdev)) {
1771 u32 rx_all = hdev->pkt_buf_size;
1773 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1774 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1780 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1783 /* try to decrease the buffer size */
1784 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1787 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1790 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1796 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1797 struct hclge_pkt_buf_alloc *buf_alloc)
1799 struct hclge_rx_priv_buff_cmd *req;
1800 struct hclge_desc desc;
1804 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1805 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1807 /* Alloc private buffer TCs */
1808 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1809 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1812 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1814 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1818 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1819 (1 << HCLGE_TC0_PRI_BUF_EN_B));
1821 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1823 dev_err(&hdev->pdev->dev,
1824 "rx private buffer alloc cmd failed %d\n", ret);
1829 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1830 struct hclge_pkt_buf_alloc *buf_alloc)
1832 struct hclge_rx_priv_wl_buf *req;
1833 struct hclge_priv_buf *priv;
1834 struct hclge_desc desc[2];
1838 for (i = 0; i < 2; i++) {
1839 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1841 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1843 /* The first descriptor set the NEXT bit to 1 */
1845 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1847 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1849 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1850 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1852 priv = &buf_alloc->priv_buf[idx];
1853 req->tc_wl[j].high =
1854 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1855 req->tc_wl[j].high |=
1856 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1858 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1859 req->tc_wl[j].low |=
1860 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1864 /* Send 2 descriptor at one time */
1865 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1867 dev_err(&hdev->pdev->dev,
1868 "rx private waterline config cmd failed %d\n",
1873 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1874 struct hclge_pkt_buf_alloc *buf_alloc)
1876 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1877 struct hclge_rx_com_thrd *req;
1878 struct hclge_desc desc[2];
1879 struct hclge_tc_thrd *tc;
1883 for (i = 0; i < 2; i++) {
1884 hclge_cmd_setup_basic_desc(&desc[i],
1885 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1886 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1888 /* The first descriptor set the NEXT bit to 1 */
1890 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1892 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1894 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1895 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1897 req->com_thrd[j].high =
1898 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1899 req->com_thrd[j].high |=
1900 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1901 req->com_thrd[j].low =
1902 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1903 req->com_thrd[j].low |=
1904 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1908 /* Send 2 descriptors at one time */
1909 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1911 dev_err(&hdev->pdev->dev,
1912 "common threshold config cmd failed %d\n", ret);
1916 static int hclge_common_wl_config(struct hclge_dev *hdev,
1917 struct hclge_pkt_buf_alloc *buf_alloc)
1919 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1920 struct hclge_rx_com_wl *req;
1921 struct hclge_desc desc;
1924 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1926 req = (struct hclge_rx_com_wl *)desc.data;
1927 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1928 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1930 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
1931 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1933 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1935 dev_err(&hdev->pdev->dev,
1936 "common waterline config cmd failed %d\n", ret);
1941 int hclge_buffer_alloc(struct hclge_dev *hdev)
1943 struct hclge_pkt_buf_alloc *pkt_buf;
1946 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
1950 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
1952 dev_err(&hdev->pdev->dev,
1953 "could not calc tx buffer size for all TCs %d\n", ret);
1957 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
1959 dev_err(&hdev->pdev->dev,
1960 "could not alloc tx buffers %d\n", ret);
1964 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
1966 dev_err(&hdev->pdev->dev,
1967 "could not calc rx priv buffer size for all TCs %d\n",
1972 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
1974 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
1979 if (hnae3_dev_dcb_supported(hdev)) {
1980 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
1982 dev_err(&hdev->pdev->dev,
1983 "could not configure rx private waterline %d\n",
1988 ret = hclge_common_thrd_config(hdev, pkt_buf);
1990 dev_err(&hdev->pdev->dev,
1991 "could not configure common threshold %d\n",
1997 ret = hclge_common_wl_config(hdev, pkt_buf);
1999 dev_err(&hdev->pdev->dev,
2000 "could not configure common waterline %d\n", ret);
2007 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2009 struct hnae3_handle *roce = &vport->roce;
2010 struct hnae3_handle *nic = &vport->nic;
2012 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2014 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2015 vport->back->num_msi_left == 0)
2018 roce->rinfo.base_vector = vport->back->roce_base_vector;
2020 roce->rinfo.netdev = nic->kinfo.netdev;
2021 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2023 roce->pdev = nic->pdev;
2024 roce->ae_algo = nic->ae_algo;
2025 roce->numa_node_mask = nic->numa_node_mask;
2030 static int hclge_init_msi(struct hclge_dev *hdev)
2032 struct pci_dev *pdev = hdev->pdev;
2036 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2037 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2040 "failed(%d) to allocate MSI/MSI-X vectors\n",
2044 if (vectors < hdev->num_msi)
2045 dev_warn(&hdev->pdev->dev,
2046 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2047 hdev->num_msi, vectors);
2049 hdev->num_msi = vectors;
2050 hdev->num_msi_left = vectors;
2051 hdev->base_msi_vector = pdev->irq;
2052 hdev->roce_base_vector = hdev->base_msi_vector +
2053 hdev->roce_base_msix_offset;
2055 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2056 sizeof(u16), GFP_KERNEL);
2057 if (!hdev->vector_status) {
2058 pci_free_irq_vectors(pdev);
2062 for (i = 0; i < hdev->num_msi; i++)
2063 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2065 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2066 sizeof(int), GFP_KERNEL);
2067 if (!hdev->vector_irq) {
2068 pci_free_irq_vectors(pdev);
2075 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2078 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2079 duplex = HCLGE_MAC_FULL;
2084 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2087 struct hclge_config_mac_speed_dup_cmd *req;
2088 struct hclge_desc desc;
2091 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2093 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2095 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2098 case HCLGE_MAC_SPEED_10M:
2099 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2100 HCLGE_CFG_SPEED_S, 6);
2102 case HCLGE_MAC_SPEED_100M:
2103 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2104 HCLGE_CFG_SPEED_S, 7);
2106 case HCLGE_MAC_SPEED_1G:
2107 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2108 HCLGE_CFG_SPEED_S, 0);
2110 case HCLGE_MAC_SPEED_10G:
2111 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2112 HCLGE_CFG_SPEED_S, 1);
2114 case HCLGE_MAC_SPEED_25G:
2115 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2116 HCLGE_CFG_SPEED_S, 2);
2118 case HCLGE_MAC_SPEED_40G:
2119 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2120 HCLGE_CFG_SPEED_S, 3);
2122 case HCLGE_MAC_SPEED_50G:
2123 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2124 HCLGE_CFG_SPEED_S, 4);
2126 case HCLGE_MAC_SPEED_100G:
2127 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2128 HCLGE_CFG_SPEED_S, 5);
2131 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2135 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2138 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2140 dev_err(&hdev->pdev->dev,
2141 "mac speed/duplex config cmd failed %d.\n", ret);
2148 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2152 duplex = hclge_check_speed_dup(duplex, speed);
2153 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2156 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2160 hdev->hw.mac.speed = speed;
2161 hdev->hw.mac.duplex = duplex;
2166 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2169 struct hclge_vport *vport = hclge_get_vport(handle);
2170 struct hclge_dev *hdev = vport->back;
2172 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2175 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2177 struct hclge_config_auto_neg_cmd *req;
2178 struct hclge_desc desc;
2182 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2184 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2185 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2186 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2188 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2190 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2196 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2198 struct hclge_vport *vport = hclge_get_vport(handle);
2199 struct hclge_dev *hdev = vport->back;
2201 return hclge_set_autoneg_en(hdev, enable);
2204 static int hclge_get_autoneg(struct hnae3_handle *handle)
2206 struct hclge_vport *vport = hclge_get_vport(handle);
2207 struct hclge_dev *hdev = vport->back;
2208 struct phy_device *phydev = hdev->hw.mac.phydev;
2211 return phydev->autoneg;
2213 return hdev->hw.mac.autoneg;
2216 static int hclge_mac_init(struct hclge_dev *hdev)
2218 struct hclge_mac *mac = &hdev->hw.mac;
2221 hdev->support_sfp_query = true;
2222 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2223 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2224 hdev->hw.mac.duplex);
2226 dev_err(&hdev->pdev->dev,
2227 "Config mac speed dup fail ret=%d\n", ret);
2233 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2235 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2239 ret = hclge_buffer_alloc(hdev);
2241 dev_err(&hdev->pdev->dev,
2242 "allocate buffer fail, ret=%d\n", ret);
2247 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2249 if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2250 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2251 schedule_work(&hdev->mbx_service_task);
2254 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2256 if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2257 schedule_work(&hdev->rst_service_task);
2260 static void hclge_task_schedule(struct hclge_dev *hdev)
2262 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2263 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2264 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2265 (void)schedule_work(&hdev->service_task);
2268 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2270 struct hclge_link_status_cmd *req;
2271 struct hclge_desc desc;
2275 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2276 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2278 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2283 req = (struct hclge_link_status_cmd *)desc.data;
2284 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2286 return !!link_status;
2289 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2294 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2297 mac_state = hclge_get_mac_link_status(hdev);
2299 if (hdev->hw.mac.phydev) {
2300 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2301 link_stat = mac_state &
2302 hdev->hw.mac.phydev->link;
2307 link_stat = mac_state;
2313 static void hclge_update_link_status(struct hclge_dev *hdev)
2315 struct hnae3_client *rclient = hdev->roce_client;
2316 struct hnae3_client *client = hdev->nic_client;
2317 struct hnae3_handle *rhandle;
2318 struct hnae3_handle *handle;
2324 state = hclge_get_mac_phy_link(hdev);
2325 if (state != hdev->hw.mac.link) {
2326 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2327 handle = &hdev->vport[i].nic;
2328 client->ops->link_status_change(handle, state);
2329 hclge_config_mac_tnl_int(hdev, state);
2330 rhandle = &hdev->vport[i].roce;
2331 if (rclient && rclient->ops->link_status_change)
2332 rclient->ops->link_status_change(rhandle,
2335 hdev->hw.mac.link = state;
2339 static void hclge_update_port_capability(struct hclge_mac *mac)
2341 /* firmware can not identify back plane type, the media type
2342 * read from configuration can help deal it
2344 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2345 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2346 mac->module_type = HNAE3_MODULE_TYPE_KR;
2347 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2348 mac->module_type = HNAE3_MODULE_TYPE_TP;
2350 if (mac->support_autoneg == true) {
2351 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2352 linkmode_copy(mac->advertising, mac->supported);
2354 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2356 linkmode_zero(mac->advertising);
2360 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2362 struct hclge_sfp_info_cmd *resp = NULL;
2363 struct hclge_desc desc;
2366 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2367 resp = (struct hclge_sfp_info_cmd *)desc.data;
2368 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2369 if (ret == -EOPNOTSUPP) {
2370 dev_warn(&hdev->pdev->dev,
2371 "IMP do not support get SFP speed %d\n", ret);
2374 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2378 *speed = le32_to_cpu(resp->speed);
2383 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2385 struct hclge_sfp_info_cmd *resp;
2386 struct hclge_desc desc;
2389 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2390 resp = (struct hclge_sfp_info_cmd *)desc.data;
2392 resp->query_type = QUERY_ACTIVE_SPEED;
2394 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2395 if (ret == -EOPNOTSUPP) {
2396 dev_warn(&hdev->pdev->dev,
2397 "IMP does not support get SFP info %d\n", ret);
2400 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2404 mac->speed = le32_to_cpu(resp->speed);
2405 /* if resp->speed_ability is 0, it means it's an old version
2406 * firmware, do not update these params
2408 if (resp->speed_ability) {
2409 mac->module_type = le32_to_cpu(resp->module_type);
2410 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2411 mac->autoneg = resp->autoneg;
2412 mac->support_autoneg = resp->autoneg_ability;
2414 mac->speed_type = QUERY_SFP_SPEED;
2420 static int hclge_update_port_info(struct hclge_dev *hdev)
2422 struct hclge_mac *mac = &hdev->hw.mac;
2423 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2426 /* get the port info from SFP cmd if not copper port */
2427 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2430 /* if IMP does not support get SFP/qSFP info, return directly */
2431 if (!hdev->support_sfp_query)
2434 if (hdev->pdev->revision >= 0x21)
2435 ret = hclge_get_sfp_info(hdev, mac);
2437 ret = hclge_get_sfp_speed(hdev, &speed);
2439 if (ret == -EOPNOTSUPP) {
2440 hdev->support_sfp_query = false;
2446 if (hdev->pdev->revision >= 0x21) {
2447 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2448 hclge_update_port_capability(mac);
2451 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2454 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2455 return 0; /* do nothing if no SFP */
2457 /* must config full duplex for SFP */
2458 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2462 static int hclge_get_status(struct hnae3_handle *handle)
2464 struct hclge_vport *vport = hclge_get_vport(handle);
2465 struct hclge_dev *hdev = vport->back;
2467 hclge_update_link_status(hdev);
2469 return hdev->hw.mac.link;
2472 static void hclge_service_timer(struct timer_list *t)
2474 struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2476 mod_timer(&hdev->service_timer, jiffies + HZ);
2477 hdev->hw_stats.stats_timer++;
2478 hclge_task_schedule(hdev);
2481 static void hclge_service_complete(struct hclge_dev *hdev)
2483 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2485 /* Flush memory before next watchdog */
2486 smp_mb__before_atomic();
2487 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2490 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2492 u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2494 /* fetch the events from their corresponding regs */
2495 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2496 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2497 msix_src_reg = hclge_read_dev(&hdev->hw,
2498 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2500 /* Assumption: If by any chance reset and mailbox events are reported
2501 * together then we will only process reset event in this go and will
2502 * defer the processing of the mailbox events. Since, we would have not
2503 * cleared RX CMDQ event this time we would receive again another
2504 * interrupt from H/W just for the mailbox.
2507 /* check for vector0 reset event sources */
2508 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2509 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2510 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2511 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2512 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2513 hdev->rst_stats.imp_rst_cnt++;
2514 return HCLGE_VECTOR0_EVENT_RST;
2517 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2518 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2519 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2520 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2521 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2522 hdev->rst_stats.global_rst_cnt++;
2523 return HCLGE_VECTOR0_EVENT_RST;
2526 if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2527 dev_info(&hdev->pdev->dev, "core reset interrupt\n");
2528 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2529 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2530 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2531 hdev->rst_stats.core_rst_cnt++;
2532 return HCLGE_VECTOR0_EVENT_RST;
2535 /* check for vector0 msix event source */
2536 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2537 dev_dbg(&hdev->pdev->dev, "received event 0x%x\n",
2539 return HCLGE_VECTOR0_EVENT_ERR;
2542 /* check for vector0 mailbox(=CMDQ RX) event source */
2543 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2544 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2545 *clearval = cmdq_src_reg;
2546 return HCLGE_VECTOR0_EVENT_MBX;
2549 /* print other vector0 event source */
2550 dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n",
2551 cmdq_src_reg, msix_src_reg);
2552 return HCLGE_VECTOR0_EVENT_OTHER;
2555 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2558 switch (event_type) {
2559 case HCLGE_VECTOR0_EVENT_RST:
2560 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2562 case HCLGE_VECTOR0_EVENT_MBX:
2563 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2570 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2572 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2573 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2574 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2575 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2576 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2579 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2581 writel(enable ? 1 : 0, vector->addr);
2584 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2586 struct hclge_dev *hdev = data;
2590 hclge_enable_vector(&hdev->misc_vector, false);
2591 event_cause = hclge_check_event_cause(hdev, &clearval);
2593 /* vector 0 interrupt is shared with reset and mailbox source events.*/
2594 switch (event_cause) {
2595 case HCLGE_VECTOR0_EVENT_ERR:
2596 /* we do not know what type of reset is required now. This could
2597 * only be decided after we fetch the type of errors which
2598 * caused this event. Therefore, we will do below for now:
2599 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2600 * have defered type of reset to be used.
2601 * 2. Schedule the reset serivce task.
2602 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
2603 * will fetch the correct type of reset. This would be done
2604 * by first decoding the types of errors.
2606 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2608 case HCLGE_VECTOR0_EVENT_RST:
2609 hclge_reset_task_schedule(hdev);
2611 case HCLGE_VECTOR0_EVENT_MBX:
2612 /* If we are here then,
2613 * 1. Either we are not handling any mbx task and we are not
2616 * 2. We could be handling a mbx task but nothing more is
2618 * In both cases, we should schedule mbx task as there are more
2619 * mbx messages reported by this interrupt.
2621 hclge_mbx_task_schedule(hdev);
2624 dev_warn(&hdev->pdev->dev,
2625 "received unknown or unhandled event of vector0\n");
2629 /* clear the source of interrupt if it is not cause by reset */
2630 if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2631 hclge_clear_event_cause(hdev, event_cause, clearval);
2632 hclge_enable_vector(&hdev->misc_vector, true);
2638 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2640 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2641 dev_warn(&hdev->pdev->dev,
2642 "vector(vector_id %d) has been freed.\n", vector_id);
2646 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2647 hdev->num_msi_left += 1;
2648 hdev->num_msi_used -= 1;
2651 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2653 struct hclge_misc_vector *vector = &hdev->misc_vector;
2655 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2657 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2658 hdev->vector_status[0] = 0;
2660 hdev->num_msi_left -= 1;
2661 hdev->num_msi_used += 1;
2664 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2668 hclge_get_misc_vector(hdev);
2670 /* this would be explicitly freed in the end */
2671 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2672 0, "hclge_misc", hdev);
2674 hclge_free_vector(hdev, 0);
2675 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2676 hdev->misc_vector.vector_irq);
2682 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2684 free_irq(hdev->misc_vector.vector_irq, hdev);
2685 hclge_free_vector(hdev, 0);
2688 int hclge_notify_client(struct hclge_dev *hdev,
2689 enum hnae3_reset_notify_type type)
2691 struct hnae3_client *client = hdev->nic_client;
2694 if (!client->ops->reset_notify)
2697 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2698 struct hnae3_handle *handle = &hdev->vport[i].nic;
2701 ret = client->ops->reset_notify(handle, type);
2703 dev_err(&hdev->pdev->dev,
2704 "notify nic client failed %d(%d)\n", type, ret);
2712 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2713 enum hnae3_reset_notify_type type)
2715 struct hnae3_client *client = hdev->roce_client;
2722 if (!client->ops->reset_notify)
2725 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2726 struct hnae3_handle *handle = &hdev->vport[i].roce;
2728 ret = client->ops->reset_notify(handle, type);
2730 dev_err(&hdev->pdev->dev,
2731 "notify roce client failed %d(%d)",
2740 static int hclge_reset_wait(struct hclge_dev *hdev)
2742 #define HCLGE_RESET_WATI_MS 100
2743 #define HCLGE_RESET_WAIT_CNT 200
2744 u32 val, reg, reg_bit;
2747 switch (hdev->reset_type) {
2748 case HNAE3_IMP_RESET:
2749 reg = HCLGE_GLOBAL_RESET_REG;
2750 reg_bit = HCLGE_IMP_RESET_BIT;
2752 case HNAE3_GLOBAL_RESET:
2753 reg = HCLGE_GLOBAL_RESET_REG;
2754 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2756 case HNAE3_CORE_RESET:
2757 reg = HCLGE_GLOBAL_RESET_REG;
2758 reg_bit = HCLGE_CORE_RESET_BIT;
2760 case HNAE3_FUNC_RESET:
2761 reg = HCLGE_FUN_RST_ING;
2762 reg_bit = HCLGE_FUN_RST_ING_B;
2764 case HNAE3_FLR_RESET:
2767 dev_err(&hdev->pdev->dev,
2768 "Wait for unsupported reset type: %d\n",
2773 if (hdev->reset_type == HNAE3_FLR_RESET) {
2774 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2775 cnt++ < HCLGE_RESET_WAIT_CNT)
2776 msleep(HCLGE_RESET_WATI_MS);
2778 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2779 dev_err(&hdev->pdev->dev,
2780 "flr wait timeout: %d\n", cnt);
2787 val = hclge_read_dev(&hdev->hw, reg);
2788 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2789 msleep(HCLGE_RESET_WATI_MS);
2790 val = hclge_read_dev(&hdev->hw, reg);
2794 if (cnt >= HCLGE_RESET_WAIT_CNT) {
2795 dev_warn(&hdev->pdev->dev,
2796 "Wait for reset timeout: %d\n", hdev->reset_type);
2803 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2805 struct hclge_vf_rst_cmd *req;
2806 struct hclge_desc desc;
2808 req = (struct hclge_vf_rst_cmd *)desc.data;
2809 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2810 req->dest_vfid = func_id;
2815 return hclge_cmd_send(&hdev->hw, &desc, 1);
2818 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
2822 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
2823 struct hclge_vport *vport = &hdev->vport[i];
2826 /* Send cmd to set/clear VF's FUNC_RST_ING */
2827 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
2829 dev_err(&hdev->pdev->dev,
2830 "set vf(%d) rst failed %d!\n",
2831 vport->vport_id, ret);
2835 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
2838 /* Inform VF to process the reset.
2839 * hclge_inform_reset_assert_to_vf may fail if VF
2840 * driver is not loaded.
2842 ret = hclge_inform_reset_assert_to_vf(vport);
2844 dev_warn(&hdev->pdev->dev,
2845 "inform reset to vf(%d) failed %d!\n",
2846 vport->vport_id, ret);
2852 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
2854 struct hclge_desc desc;
2855 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
2858 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
2859 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
2860 req->fun_reset_vfid = func_id;
2862 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2864 dev_err(&hdev->pdev->dev,
2865 "send function reset cmd fail, status =%d\n", ret);
2870 static void hclge_do_reset(struct hclge_dev *hdev)
2872 struct hnae3_handle *handle = &hdev->vport[0].nic;
2873 struct pci_dev *pdev = hdev->pdev;
2876 if (hclge_get_hw_reset_stat(handle)) {
2877 dev_info(&pdev->dev, "Hardware reset not finish\n");
2878 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
2879 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
2880 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
2884 switch (hdev->reset_type) {
2885 case HNAE3_GLOBAL_RESET:
2886 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2887 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
2888 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2889 dev_info(&pdev->dev, "Global Reset requested\n");
2891 case HNAE3_CORE_RESET:
2892 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2893 hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
2894 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2895 dev_info(&pdev->dev, "Core Reset requested\n");
2897 case HNAE3_FUNC_RESET:
2898 dev_info(&pdev->dev, "PF Reset requested\n");
2899 /* schedule again to check later */
2900 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
2901 hclge_reset_task_schedule(hdev);
2903 case HNAE3_FLR_RESET:
2904 dev_info(&pdev->dev, "FLR requested\n");
2905 /* schedule again to check later */
2906 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
2907 hclge_reset_task_schedule(hdev);
2910 dev_warn(&pdev->dev,
2911 "Unsupported reset type: %d\n", hdev->reset_type);
2916 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
2917 unsigned long *addr)
2919 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
2921 /* first, resolve any unknown reset type to the known type(s) */
2922 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
2923 /* we will intentionally ignore any errors from this function
2924 * as we will end up in *some* reset request in any case
2926 hclge_handle_hw_msix_error(hdev, addr);
2927 clear_bit(HNAE3_UNKNOWN_RESET, addr);
2928 /* We defered the clearing of the error event which caused
2929 * interrupt since it was not posssible to do that in
2930 * interrupt context (and this is the reason we introduced
2931 * new UNKNOWN reset type). Now, the errors have been
2932 * handled and cleared in hardware we can safely enable
2933 * interrupts. This is an exception to the norm.
2935 hclge_enable_vector(&hdev->misc_vector, true);
2938 /* return the highest priority reset level amongst all */
2939 if (test_bit(HNAE3_IMP_RESET, addr)) {
2940 rst_level = HNAE3_IMP_RESET;
2941 clear_bit(HNAE3_IMP_RESET, addr);
2942 clear_bit(HNAE3_GLOBAL_RESET, addr);
2943 clear_bit(HNAE3_CORE_RESET, addr);
2944 clear_bit(HNAE3_FUNC_RESET, addr);
2945 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
2946 rst_level = HNAE3_GLOBAL_RESET;
2947 clear_bit(HNAE3_GLOBAL_RESET, addr);
2948 clear_bit(HNAE3_CORE_RESET, addr);
2949 clear_bit(HNAE3_FUNC_RESET, addr);
2950 } else if (test_bit(HNAE3_CORE_RESET, addr)) {
2951 rst_level = HNAE3_CORE_RESET;
2952 clear_bit(HNAE3_CORE_RESET, addr);
2953 clear_bit(HNAE3_FUNC_RESET, addr);
2954 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
2955 rst_level = HNAE3_FUNC_RESET;
2956 clear_bit(HNAE3_FUNC_RESET, addr);
2957 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
2958 rst_level = HNAE3_FLR_RESET;
2959 clear_bit(HNAE3_FLR_RESET, addr);
2962 if (hdev->reset_type != HNAE3_NONE_RESET &&
2963 rst_level < hdev->reset_type)
2964 return HNAE3_NONE_RESET;
2969 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
2973 switch (hdev->reset_type) {
2974 case HNAE3_IMP_RESET:
2975 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2977 case HNAE3_GLOBAL_RESET:
2978 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2980 case HNAE3_CORE_RESET:
2981 clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2990 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
2991 hclge_enable_vector(&hdev->misc_vector, true);
2994 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
2998 switch (hdev->reset_type) {
2999 case HNAE3_FUNC_RESET:
3001 case HNAE3_FLR_RESET:
3002 ret = hclge_set_all_vf_rst(hdev, true);
3011 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3016 switch (hdev->reset_type) {
3017 case HNAE3_FUNC_RESET:
3018 /* There is no mechanism for PF to know if VF has stopped IO
3019 * for now, just wait 100 ms for VF to stop IO
3022 ret = hclge_func_reset_cmd(hdev, 0);
3024 dev_err(&hdev->pdev->dev,
3025 "asserting function reset fail %d!\n", ret);
3029 /* After performaning pf reset, it is not necessary to do the
3030 * mailbox handling or send any command to firmware, because
3031 * any mailbox handling or command to firmware is only valid
3032 * after hclge_cmd_init is called.
3034 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3035 hdev->rst_stats.pf_rst_cnt++;
3037 case HNAE3_FLR_RESET:
3038 /* There is no mechanism for PF to know if VF has stopped IO
3039 * for now, just wait 100 ms for VF to stop IO
3042 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3043 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3044 hdev->rst_stats.flr_rst_cnt++;
3046 case HNAE3_IMP_RESET:
3047 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3048 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3049 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3055 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3060 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
3062 #define MAX_RESET_FAIL_CNT 5
3063 #define RESET_UPGRADE_DELAY_SEC 10
3065 if (hdev->reset_pending) {
3066 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3067 hdev->reset_pending);
3069 } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
3070 (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
3071 BIT(HCLGE_IMP_RESET_BIT))) {
3072 dev_info(&hdev->pdev->dev,
3073 "reset failed because IMP Reset is pending\n");
3074 hclge_clear_reset_cause(hdev);
3076 } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3077 hdev->reset_fail_cnt++;
3079 set_bit(hdev->reset_type, &hdev->reset_pending);
3080 dev_info(&hdev->pdev->dev,
3081 "re-schedule to wait for hw reset done\n");
3085 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
3086 hclge_clear_reset_cause(hdev);
3087 mod_timer(&hdev->reset_timer,
3088 jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
3093 hclge_clear_reset_cause(hdev);
3094 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3098 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3102 switch (hdev->reset_type) {
3103 case HNAE3_FUNC_RESET:
3105 case HNAE3_FLR_RESET:
3106 ret = hclge_set_all_vf_rst(hdev, false);
3115 static void hclge_reset(struct hclge_dev *hdev)
3117 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3118 bool is_timeout = false;
3121 /* Initialize ae_dev reset status as well, in case enet layer wants to
3122 * know if device is undergoing reset
3124 ae_dev->reset_type = hdev->reset_type;
3125 hdev->rst_stats.reset_cnt++;
3126 /* perform reset of the stack & ae device for a client */
3127 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3131 ret = hclge_reset_prepare_down(hdev);
3136 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3138 goto err_reset_lock;
3142 ret = hclge_reset_prepare_wait(hdev);
3146 if (hclge_reset_wait(hdev)) {
3151 hdev->rst_stats.hw_reset_done_cnt++;
3153 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3158 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3160 goto err_reset_lock;
3162 ret = hclge_reset_ae_dev(hdev->ae_dev);
3164 goto err_reset_lock;
3166 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3168 goto err_reset_lock;
3170 ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3172 goto err_reset_lock;
3174 hclge_clear_reset_cause(hdev);
3176 ret = hclge_reset_prepare_up(hdev);
3178 goto err_reset_lock;
3180 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3182 goto err_reset_lock;
3186 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3190 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3194 hdev->last_reset_time = jiffies;
3195 hdev->reset_fail_cnt = 0;
3196 hdev->rst_stats.reset_done_cnt++;
3197 ae_dev->reset_type = HNAE3_NONE_RESET;
3198 del_timer(&hdev->reset_timer);
3205 if (hclge_reset_err_handle(hdev, is_timeout))
3206 hclge_reset_task_schedule(hdev);
3209 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3211 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3212 struct hclge_dev *hdev = ae_dev->priv;
3214 /* We might end up getting called broadly because of 2 below cases:
3215 * 1. Recoverable error was conveyed through APEI and only way to bring
3216 * normalcy is to reset.
3217 * 2. A new reset request from the stack due to timeout
3219 * For the first case,error event might not have ae handle available.
3220 * check if this is a new reset request and we are not here just because
3221 * last reset attempt did not succeed and watchdog hit us again. We will
3222 * know this if last reset request did not occur very recently (watchdog
3223 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3224 * In case of new request we reset the "reset level" to PF reset.
3225 * And if it is a repeat reset request of the most recent one then we
3226 * want to make sure we throttle the reset request. Therefore, we will
3227 * not allow it again before 3*HZ times.
3230 handle = &hdev->vport[0].nic;
3232 if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
3234 else if (hdev->default_reset_request)
3236 hclge_get_reset_level(hdev,
3237 &hdev->default_reset_request);
3238 else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3239 hdev->reset_level = HNAE3_FUNC_RESET;
3241 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3244 /* request reset & schedule reset task */
3245 set_bit(hdev->reset_level, &hdev->reset_request);
3246 hclge_reset_task_schedule(hdev);
3248 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3249 hdev->reset_level++;
3252 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3253 enum hnae3_reset_type rst_type)
3255 struct hclge_dev *hdev = ae_dev->priv;
3257 set_bit(rst_type, &hdev->default_reset_request);
3260 static void hclge_reset_timer(struct timer_list *t)
3262 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3264 dev_info(&hdev->pdev->dev,
3265 "triggering global reset in reset timer\n");
3266 set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3267 hclge_reset_event(hdev->pdev, NULL);
3270 static void hclge_reset_subtask(struct hclge_dev *hdev)
3272 /* check if there is any ongoing reset in the hardware. This status can
3273 * be checked from reset_pending. If there is then, we need to wait for
3274 * hardware to complete reset.
3275 * a. If we are able to figure out in reasonable time that hardware
3276 * has fully resetted then, we can proceed with driver, client
3278 * b. else, we can come back later to check this status so re-sched
3281 hdev->last_reset_time = jiffies;
3282 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
3283 if (hdev->reset_type != HNAE3_NONE_RESET)
3286 /* check if we got any *new* reset requests to be honored */
3287 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
3288 if (hdev->reset_type != HNAE3_NONE_RESET)
3289 hclge_do_reset(hdev);
3291 hdev->reset_type = HNAE3_NONE_RESET;
3294 static void hclge_reset_service_task(struct work_struct *work)
3296 struct hclge_dev *hdev =
3297 container_of(work, struct hclge_dev, rst_service_task);
3299 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3302 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3304 hclge_reset_subtask(hdev);
3306 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3309 static void hclge_mailbox_service_task(struct work_struct *work)
3311 struct hclge_dev *hdev =
3312 container_of(work, struct hclge_dev, mbx_service_task);
3314 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3317 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3319 hclge_mbx_handler(hdev);
3321 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3324 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3328 /* start from vport 1 for PF is always alive */
3329 for (i = 1; i < hdev->num_alloc_vport; i++) {
3330 struct hclge_vport *vport = &hdev->vport[i];
3332 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3333 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3335 /* If vf is not alive, set to default value */
3336 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3337 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3341 static void hclge_service_task(struct work_struct *work)
3343 struct hclge_dev *hdev =
3344 container_of(work, struct hclge_dev, service_task);
3346 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3347 hclge_update_stats_for_all(hdev);
3348 hdev->hw_stats.stats_timer = 0;
3351 hclge_update_port_info(hdev);
3352 hclge_update_link_status(hdev);
3353 hclge_update_vport_alive(hdev);
3354 hclge_service_complete(hdev);
3357 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3359 /* VF handle has no client */
3360 if (!handle->client)
3361 return container_of(handle, struct hclge_vport, nic);
3362 else if (handle->client->type == HNAE3_CLIENT_ROCE)
3363 return container_of(handle, struct hclge_vport, roce);
3365 return container_of(handle, struct hclge_vport, nic);
3368 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3369 struct hnae3_vector_info *vector_info)
3371 struct hclge_vport *vport = hclge_get_vport(handle);
3372 struct hnae3_vector_info *vector = vector_info;
3373 struct hclge_dev *hdev = vport->back;
3377 vector_num = min(hdev->num_msi_left, vector_num);
3379 for (j = 0; j < vector_num; j++) {
3380 for (i = 1; i < hdev->num_msi; i++) {
3381 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3382 vector->vector = pci_irq_vector(hdev->pdev, i);
3383 vector->io_addr = hdev->hw.io_base +
3384 HCLGE_VECTOR_REG_BASE +
3385 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3387 HCLGE_VECTOR_VF_OFFSET;
3388 hdev->vector_status[i] = vport->vport_id;
3389 hdev->vector_irq[i] = vector->vector;
3398 hdev->num_msi_left -= alloc;
3399 hdev->num_msi_used += alloc;
3404 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3408 for (i = 0; i < hdev->num_msi; i++)
3409 if (vector == hdev->vector_irq[i])
3415 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3417 struct hclge_vport *vport = hclge_get_vport(handle);
3418 struct hclge_dev *hdev = vport->back;
3421 vector_id = hclge_get_vector_index(hdev, vector);
3422 if (vector_id < 0) {
3423 dev_err(&hdev->pdev->dev,
3424 "Get vector index fail. vector_id =%d\n", vector_id);
3428 hclge_free_vector(hdev, vector_id);
3433 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3435 return HCLGE_RSS_KEY_SIZE;
3438 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3440 return HCLGE_RSS_IND_TBL_SIZE;
3443 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3444 const u8 hfunc, const u8 *key)
3446 struct hclge_rss_config_cmd *req;
3447 struct hclge_desc desc;
3452 req = (struct hclge_rss_config_cmd *)desc.data;
3454 for (key_offset = 0; key_offset < 3; key_offset++) {
3455 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3458 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3459 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3461 if (key_offset == 2)
3463 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3465 key_size = HCLGE_RSS_HASH_KEY_NUM;
3467 memcpy(req->hash_key,
3468 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3470 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3472 dev_err(&hdev->pdev->dev,
3473 "Configure RSS config fail, status = %d\n",
3481 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3483 struct hclge_rss_indirection_table_cmd *req;
3484 struct hclge_desc desc;
3488 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3490 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3491 hclge_cmd_setup_basic_desc
3492 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3494 req->start_table_index =
3495 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3496 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3498 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3499 req->rss_result[j] =
3500 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3502 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3504 dev_err(&hdev->pdev->dev,
3505 "Configure rss indir table fail,status = %d\n",
3513 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3514 u16 *tc_size, u16 *tc_offset)
3516 struct hclge_rss_tc_mode_cmd *req;
3517 struct hclge_desc desc;
3521 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3522 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3524 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3527 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3528 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3529 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3530 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3531 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3533 req->rss_tc_mode[i] = cpu_to_le16(mode);
3536 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3538 dev_err(&hdev->pdev->dev,
3539 "Configure rss tc mode fail, status = %d\n", ret);
3544 static void hclge_get_rss_type(struct hclge_vport *vport)
3546 if (vport->rss_tuple_sets.ipv4_tcp_en ||
3547 vport->rss_tuple_sets.ipv4_udp_en ||
3548 vport->rss_tuple_sets.ipv4_sctp_en ||
3549 vport->rss_tuple_sets.ipv6_tcp_en ||
3550 vport->rss_tuple_sets.ipv6_udp_en ||
3551 vport->rss_tuple_sets.ipv6_sctp_en)
3552 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3553 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3554 vport->rss_tuple_sets.ipv6_fragment_en)
3555 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3557 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3560 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3562 struct hclge_rss_input_tuple_cmd *req;
3563 struct hclge_desc desc;
3566 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3568 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3570 /* Get the tuple cfg from pf */
3571 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3572 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3573 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3574 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3575 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3576 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3577 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3578 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3579 hclge_get_rss_type(&hdev->vport[0]);
3580 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3582 dev_err(&hdev->pdev->dev,
3583 "Configure rss input fail, status = %d\n", ret);
3587 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3590 struct hclge_vport *vport = hclge_get_vport(handle);
3593 /* Get hash algorithm */
3595 switch (vport->rss_algo) {
3596 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3597 *hfunc = ETH_RSS_HASH_TOP;
3599 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3600 *hfunc = ETH_RSS_HASH_XOR;
3603 *hfunc = ETH_RSS_HASH_UNKNOWN;
3608 /* Get the RSS Key required by the user */
3610 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3612 /* Get indirect table */
3614 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3615 indir[i] = vport->rss_indirection_tbl[i];
3620 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3621 const u8 *key, const u8 hfunc)
3623 struct hclge_vport *vport = hclge_get_vport(handle);
3624 struct hclge_dev *hdev = vport->back;
3628 /* Set the RSS Hash Key if specififed by the user */
3631 case ETH_RSS_HASH_TOP:
3632 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3634 case ETH_RSS_HASH_XOR:
3635 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3637 case ETH_RSS_HASH_NO_CHANGE:
3638 hash_algo = vport->rss_algo;
3644 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3648 /* Update the shadow RSS key with user specified qids */
3649 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3650 vport->rss_algo = hash_algo;
3653 /* Update the shadow RSS table with user specified qids */
3654 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3655 vport->rss_indirection_tbl[i] = indir[i];
3657 /* Update the hardware */
3658 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3661 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3663 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3665 if (nfc->data & RXH_L4_B_2_3)
3666 hash_sets |= HCLGE_D_PORT_BIT;
3668 hash_sets &= ~HCLGE_D_PORT_BIT;
3670 if (nfc->data & RXH_IP_SRC)
3671 hash_sets |= HCLGE_S_IP_BIT;
3673 hash_sets &= ~HCLGE_S_IP_BIT;
3675 if (nfc->data & RXH_IP_DST)
3676 hash_sets |= HCLGE_D_IP_BIT;
3678 hash_sets &= ~HCLGE_D_IP_BIT;
3680 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3681 hash_sets |= HCLGE_V_TAG_BIT;
3686 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3687 struct ethtool_rxnfc *nfc)
3689 struct hclge_vport *vport = hclge_get_vport(handle);
3690 struct hclge_dev *hdev = vport->back;
3691 struct hclge_rss_input_tuple_cmd *req;
3692 struct hclge_desc desc;
3696 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3697 RXH_L4_B_0_1 | RXH_L4_B_2_3))
3700 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3701 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3703 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3704 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3705 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3706 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3707 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3708 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3709 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3710 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3712 tuple_sets = hclge_get_rss_hash_bits(nfc);
3713 switch (nfc->flow_type) {
3715 req->ipv4_tcp_en = tuple_sets;
3718 req->ipv6_tcp_en = tuple_sets;
3721 req->ipv4_udp_en = tuple_sets;
3724 req->ipv6_udp_en = tuple_sets;
3727 req->ipv4_sctp_en = tuple_sets;
3730 if ((nfc->data & RXH_L4_B_0_1) ||
3731 (nfc->data & RXH_L4_B_2_3))
3734 req->ipv6_sctp_en = tuple_sets;
3737 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3740 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3746 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3748 dev_err(&hdev->pdev->dev,
3749 "Set rss tuple fail, status = %d\n", ret);
3753 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3754 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3755 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3756 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3757 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3758 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3759 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3760 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3761 hclge_get_rss_type(vport);
3765 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3766 struct ethtool_rxnfc *nfc)
3768 struct hclge_vport *vport = hclge_get_vport(handle);
3773 switch (nfc->flow_type) {
3775 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3778 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3781 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3784 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3787 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3790 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3794 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3803 if (tuple_sets & HCLGE_D_PORT_BIT)
3804 nfc->data |= RXH_L4_B_2_3;
3805 if (tuple_sets & HCLGE_S_PORT_BIT)
3806 nfc->data |= RXH_L4_B_0_1;
3807 if (tuple_sets & HCLGE_D_IP_BIT)
3808 nfc->data |= RXH_IP_DST;
3809 if (tuple_sets & HCLGE_S_IP_BIT)
3810 nfc->data |= RXH_IP_SRC;
3815 static int hclge_get_tc_size(struct hnae3_handle *handle)
3817 struct hclge_vport *vport = hclge_get_vport(handle);
3818 struct hclge_dev *hdev = vport->back;
3820 return hdev->rss_size_max;
3823 int hclge_rss_init_hw(struct hclge_dev *hdev)
3825 struct hclge_vport *vport = hdev->vport;
3826 u8 *rss_indir = vport[0].rss_indirection_tbl;
3827 u16 rss_size = vport[0].alloc_rss_size;
3828 u8 *key = vport[0].rss_hash_key;
3829 u8 hfunc = vport[0].rss_algo;
3830 u16 tc_offset[HCLGE_MAX_TC_NUM];
3831 u16 tc_valid[HCLGE_MAX_TC_NUM];
3832 u16 tc_size[HCLGE_MAX_TC_NUM];
3836 ret = hclge_set_rss_indir_table(hdev, rss_indir);
3840 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
3844 ret = hclge_set_rss_input_tuple(hdev);
3848 /* Each TC have the same queue size, and tc_size set to hardware is
3849 * the log2 of roundup power of two of rss_size, the acutal queue
3850 * size is limited by indirection table.
3852 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
3853 dev_err(&hdev->pdev->dev,
3854 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
3859 roundup_size = roundup_pow_of_two(rss_size);
3860 roundup_size = ilog2(roundup_size);
3862 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3865 if (!(hdev->hw_tc_map & BIT(i)))
3869 tc_size[i] = roundup_size;
3870 tc_offset[i] = rss_size * i;
3873 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
3876 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
3878 struct hclge_vport *vport = hdev->vport;
3881 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
3882 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3883 vport[j].rss_indirection_tbl[i] =
3884 i % vport[j].alloc_rss_size;
3888 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
3890 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3891 struct hclge_vport *vport = hdev->vport;
3893 if (hdev->pdev->revision >= 0x21)
3894 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3896 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3897 vport[i].rss_tuple_sets.ipv4_tcp_en =
3898 HCLGE_RSS_INPUT_TUPLE_OTHER;
3899 vport[i].rss_tuple_sets.ipv4_udp_en =
3900 HCLGE_RSS_INPUT_TUPLE_OTHER;
3901 vport[i].rss_tuple_sets.ipv4_sctp_en =
3902 HCLGE_RSS_INPUT_TUPLE_SCTP;
3903 vport[i].rss_tuple_sets.ipv4_fragment_en =
3904 HCLGE_RSS_INPUT_TUPLE_OTHER;
3905 vport[i].rss_tuple_sets.ipv6_tcp_en =
3906 HCLGE_RSS_INPUT_TUPLE_OTHER;
3907 vport[i].rss_tuple_sets.ipv6_udp_en =
3908 HCLGE_RSS_INPUT_TUPLE_OTHER;
3909 vport[i].rss_tuple_sets.ipv6_sctp_en =
3910 HCLGE_RSS_INPUT_TUPLE_SCTP;
3911 vport[i].rss_tuple_sets.ipv6_fragment_en =
3912 HCLGE_RSS_INPUT_TUPLE_OTHER;
3914 vport[i].rss_algo = rss_algo;
3916 memcpy(vport[i].rss_hash_key, hclge_hash_key,
3917 HCLGE_RSS_KEY_SIZE);
3920 hclge_rss_indir_init_cfg(hdev);
3923 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
3924 int vector_id, bool en,
3925 struct hnae3_ring_chain_node *ring_chain)
3927 struct hclge_dev *hdev = vport->back;
3928 struct hnae3_ring_chain_node *node;
3929 struct hclge_desc desc;
3930 struct hclge_ctrl_vector_chain_cmd *req
3931 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3932 enum hclge_cmd_status status;
3933 enum hclge_opcode_type op;
3934 u16 tqp_type_and_id;
3937 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
3938 hclge_cmd_setup_basic_desc(&desc, op, false);
3939 req->int_vector_id = vector_id;
3942 for (node = ring_chain; node; node = node->next) {
3943 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
3944 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
3946 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
3947 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
3948 HCLGE_TQP_ID_S, node->tqp_index);
3949 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
3951 hnae3_get_field(node->int_gl_idx,
3952 HNAE3_RING_GL_IDX_M,
3953 HNAE3_RING_GL_IDX_S));
3954 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
3955 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
3956 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
3957 req->vfid = vport->vport_id;
3959 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3961 dev_err(&hdev->pdev->dev,
3962 "Map TQP fail, status is %d.\n",
3968 hclge_cmd_setup_basic_desc(&desc,
3971 req->int_vector_id = vector_id;
3976 req->int_cause_num = i;
3977 req->vfid = vport->vport_id;
3978 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3980 dev_err(&hdev->pdev->dev,
3981 "Map TQP fail, status is %d.\n", status);
3989 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
3991 struct hnae3_ring_chain_node *ring_chain)
3993 struct hclge_vport *vport = hclge_get_vport(handle);
3994 struct hclge_dev *hdev = vport->back;
3997 vector_id = hclge_get_vector_index(hdev, vector);
3998 if (vector_id < 0) {
3999 dev_err(&hdev->pdev->dev,
4000 "Get vector index fail. vector_id =%d\n", vector_id);
4004 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4007 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
4009 struct hnae3_ring_chain_node *ring_chain)
4011 struct hclge_vport *vport = hclge_get_vport(handle);
4012 struct hclge_dev *hdev = vport->back;
4015 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4018 vector_id = hclge_get_vector_index(hdev, vector);
4019 if (vector_id < 0) {
4020 dev_err(&handle->pdev->dev,
4021 "Get vector index fail. ret =%d\n", vector_id);
4025 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4027 dev_err(&handle->pdev->dev,
4028 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4035 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4036 struct hclge_promisc_param *param)
4038 struct hclge_promisc_cfg_cmd *req;
4039 struct hclge_desc desc;
4042 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4044 req = (struct hclge_promisc_cfg_cmd *)desc.data;
4045 req->vf_id = param->vf_id;
4047 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4048 * pdev revision(0x20), new revision support them. The
4049 * value of this two fields will not return error when driver
4050 * send command to fireware in revision(0x20).
4052 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4053 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4055 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4057 dev_err(&hdev->pdev->dev,
4058 "Set promisc mode fail, status is %d.\n", ret);
4063 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4064 bool en_mc, bool en_bc, int vport_id)
4069 memset(param, 0, sizeof(struct hclge_promisc_param));
4071 param->enable = HCLGE_PROMISC_EN_UC;
4073 param->enable |= HCLGE_PROMISC_EN_MC;
4075 param->enable |= HCLGE_PROMISC_EN_BC;
4076 param->vf_id = vport_id;
4079 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4082 struct hclge_vport *vport = hclge_get_vport(handle);
4083 struct hclge_dev *hdev = vport->back;
4084 struct hclge_promisc_param param;
4085 bool en_bc_pmc = true;
4087 /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4088 * always bypassed. So broadcast promisc should be disabled until
4089 * user enable promisc mode
4091 if (handle->pdev->revision == 0x20)
4092 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4094 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4096 return hclge_cmd_set_promisc_mode(hdev, ¶m);
4099 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4101 struct hclge_get_fd_mode_cmd *req;
4102 struct hclge_desc desc;
4105 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4107 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4109 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4111 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4115 *fd_mode = req->mode;
4120 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4121 u32 *stage1_entry_num,
4122 u32 *stage2_entry_num,
4123 u16 *stage1_counter_num,
4124 u16 *stage2_counter_num)
4126 struct hclge_get_fd_allocation_cmd *req;
4127 struct hclge_desc desc;
4130 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4132 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4134 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4136 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4141 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4142 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4143 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4144 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4149 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4151 struct hclge_set_fd_key_config_cmd *req;
4152 struct hclge_fd_key_cfg *stage;
4153 struct hclge_desc desc;
4156 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4158 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4159 stage = &hdev->fd_cfg.key_cfg[stage_num];
4160 req->stage = stage_num;
4161 req->key_select = stage->key_sel;
4162 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4163 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4164 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4165 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4166 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4167 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4169 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4171 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4176 static int hclge_init_fd_config(struct hclge_dev *hdev)
4178 #define LOW_2_WORDS 0x03
4179 struct hclge_fd_key_cfg *key_cfg;
4182 if (!hnae3_dev_fd_supported(hdev))
4185 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4189 switch (hdev->fd_cfg.fd_mode) {
4190 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4191 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4193 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4194 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4197 dev_err(&hdev->pdev->dev,
4198 "Unsupported flow director mode %d\n",
4199 hdev->fd_cfg.fd_mode);
4203 hdev->fd_cfg.proto_support =
4204 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4205 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4206 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4207 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4208 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4209 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4210 key_cfg->outer_sipv6_word_en = 0;
4211 key_cfg->outer_dipv6_word_en = 0;
4213 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4214 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4215 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4216 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4218 /* If use max 400bit key, we can support tuples for ether type */
4219 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4220 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4221 key_cfg->tuple_active |=
4222 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4225 /* roce_type is used to filter roce frames
4226 * dst_vport is used to specify the rule
4228 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4230 ret = hclge_get_fd_allocation(hdev,
4231 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4232 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4233 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4234 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4238 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4241 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4242 int loc, u8 *key, bool is_add)
4244 struct hclge_fd_tcam_config_1_cmd *req1;
4245 struct hclge_fd_tcam_config_2_cmd *req2;
4246 struct hclge_fd_tcam_config_3_cmd *req3;
4247 struct hclge_desc desc[3];
4250 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4251 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4252 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4253 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4254 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4256 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4257 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4258 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4260 req1->stage = stage;
4261 req1->xy_sel = sel_x ? 1 : 0;
4262 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4263 req1->index = cpu_to_le32(loc);
4264 req1->entry_vld = sel_x ? is_add : 0;
4267 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4268 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4269 sizeof(req2->tcam_data));
4270 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4271 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4274 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4276 dev_err(&hdev->pdev->dev,
4277 "config tcam key fail, ret=%d\n",
4283 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4284 struct hclge_fd_ad_data *action)
4286 struct hclge_fd_ad_config_cmd *req;
4287 struct hclge_desc desc;
4291 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4293 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4294 req->index = cpu_to_le32(loc);
4297 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4298 action->write_rule_id_to_bd);
4299 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4302 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4303 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4304 action->forward_to_direct_queue);
4305 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4307 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4308 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4309 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4310 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4311 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4312 action->counter_id);
4314 req->ad_data = cpu_to_le64(ad_data);
4315 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4317 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4322 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4323 struct hclge_fd_rule *rule)
4325 u16 tmp_x_s, tmp_y_s;
4326 u32 tmp_x_l, tmp_y_l;
4329 if (rule->unused_tuple & tuple_bit)
4332 switch (tuple_bit) {
4335 case BIT(INNER_DST_MAC):
4336 for (i = 0; i < 6; i++) {
4337 calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
4338 rule->tuples_mask.dst_mac[i]);
4339 calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
4340 rule->tuples_mask.dst_mac[i]);
4344 case BIT(INNER_SRC_MAC):
4345 for (i = 0; i < 6; i++) {
4346 calc_x(key_x[5 - i], rule->tuples.src_mac[i],
4347 rule->tuples.src_mac[i]);
4348 calc_y(key_y[5 - i], rule->tuples.src_mac[i],
4349 rule->tuples.src_mac[i]);
4353 case BIT(INNER_VLAN_TAG_FST):
4354 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4355 rule->tuples_mask.vlan_tag1);
4356 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4357 rule->tuples_mask.vlan_tag1);
4358 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4359 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4362 case BIT(INNER_ETH_TYPE):
4363 calc_x(tmp_x_s, rule->tuples.ether_proto,
4364 rule->tuples_mask.ether_proto);
4365 calc_y(tmp_y_s, rule->tuples.ether_proto,
4366 rule->tuples_mask.ether_proto);
4367 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4368 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4371 case BIT(INNER_IP_TOS):
4372 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4373 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4376 case BIT(INNER_IP_PROTO):
4377 calc_x(*key_x, rule->tuples.ip_proto,
4378 rule->tuples_mask.ip_proto);
4379 calc_y(*key_y, rule->tuples.ip_proto,
4380 rule->tuples_mask.ip_proto);
4383 case BIT(INNER_SRC_IP):
4384 calc_x(tmp_x_l, rule->tuples.src_ip[3],
4385 rule->tuples_mask.src_ip[3]);
4386 calc_y(tmp_y_l, rule->tuples.src_ip[3],
4387 rule->tuples_mask.src_ip[3]);
4388 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4389 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4392 case BIT(INNER_DST_IP):
4393 calc_x(tmp_x_l, rule->tuples.dst_ip[3],
4394 rule->tuples_mask.dst_ip[3]);
4395 calc_y(tmp_y_l, rule->tuples.dst_ip[3],
4396 rule->tuples_mask.dst_ip[3]);
4397 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4398 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4401 case BIT(INNER_SRC_PORT):
4402 calc_x(tmp_x_s, rule->tuples.src_port,
4403 rule->tuples_mask.src_port);
4404 calc_y(tmp_y_s, rule->tuples.src_port,
4405 rule->tuples_mask.src_port);
4406 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4407 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4410 case BIT(INNER_DST_PORT):
4411 calc_x(tmp_x_s, rule->tuples.dst_port,
4412 rule->tuples_mask.dst_port);
4413 calc_y(tmp_y_s, rule->tuples.dst_port,
4414 rule->tuples_mask.dst_port);
4415 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4416 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4424 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4425 u8 vf_id, u8 network_port_id)
4427 u32 port_number = 0;
4429 if (port_type == HOST_PORT) {
4430 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4432 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4434 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4436 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4437 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4438 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4444 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4445 __le32 *key_x, __le32 *key_y,
4446 struct hclge_fd_rule *rule)
4448 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4449 u8 cur_pos = 0, tuple_size, shift_bits;
4452 for (i = 0; i < MAX_META_DATA; i++) {
4453 tuple_size = meta_data_key_info[i].key_length;
4454 tuple_bit = key_cfg->meta_data_active & BIT(i);
4456 switch (tuple_bit) {
4457 case BIT(ROCE_TYPE):
4458 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4459 cur_pos += tuple_size;
4461 case BIT(DST_VPORT):
4462 port_number = hclge_get_port_number(HOST_PORT, 0,
4464 hnae3_set_field(meta_data,
4465 GENMASK(cur_pos + tuple_size, cur_pos),
4466 cur_pos, port_number);
4467 cur_pos += tuple_size;
4474 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4475 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4476 shift_bits = sizeof(meta_data) * 8 - cur_pos;
4478 *key_x = cpu_to_le32(tmp_x << shift_bits);
4479 *key_y = cpu_to_le32(tmp_y << shift_bits);
4482 /* A complete key is combined with meta data key and tuple key.
4483 * Meta data key is stored at the MSB region, and tuple key is stored at
4484 * the LSB region, unused bits will be filled 0.
4486 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4487 struct hclge_fd_rule *rule)
4489 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4490 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4491 u8 *cur_key_x, *cur_key_y;
4492 int i, ret, tuple_size;
4493 u8 meta_data_region;
4495 memset(key_x, 0, sizeof(key_x));
4496 memset(key_y, 0, sizeof(key_y));
4500 for (i = 0 ; i < MAX_TUPLE; i++) {
4504 tuple_size = tuple_key_info[i].key_length / 8;
4505 check_tuple = key_cfg->tuple_active & BIT(i);
4507 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4510 cur_key_x += tuple_size;
4511 cur_key_y += tuple_size;
4515 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4516 MAX_META_DATA_LENGTH / 8;
4518 hclge_fd_convert_meta_data(key_cfg,
4519 (__le32 *)(key_x + meta_data_region),
4520 (__le32 *)(key_y + meta_data_region),
4523 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4526 dev_err(&hdev->pdev->dev,
4527 "fd key_y config fail, loc=%d, ret=%d\n",
4528 rule->queue_id, ret);
4532 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4535 dev_err(&hdev->pdev->dev,
4536 "fd key_x config fail, loc=%d, ret=%d\n",
4537 rule->queue_id, ret);
4541 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4542 struct hclge_fd_rule *rule)
4544 struct hclge_fd_ad_data ad_data;
4546 ad_data.ad_id = rule->location;
4548 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4549 ad_data.drop_packet = true;
4550 ad_data.forward_to_direct_queue = false;
4551 ad_data.queue_id = 0;
4553 ad_data.drop_packet = false;
4554 ad_data.forward_to_direct_queue = true;
4555 ad_data.queue_id = rule->queue_id;
4558 ad_data.use_counter = false;
4559 ad_data.counter_id = 0;
4561 ad_data.use_next_stage = false;
4562 ad_data.next_input_key = 0;
4564 ad_data.write_rule_id_to_bd = true;
4565 ad_data.rule_id = rule->location;
4567 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4570 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4571 struct ethtool_rx_flow_spec *fs, u32 *unused)
4573 struct ethtool_tcpip4_spec *tcp_ip4_spec;
4574 struct ethtool_usrip4_spec *usr_ip4_spec;
4575 struct ethtool_tcpip6_spec *tcp_ip6_spec;
4576 struct ethtool_usrip6_spec *usr_ip6_spec;
4577 struct ethhdr *ether_spec;
4579 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4582 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4585 if ((fs->flow_type & FLOW_EXT) &&
4586 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4587 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4591 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4595 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4596 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4598 if (!tcp_ip4_spec->ip4src)
4599 *unused |= BIT(INNER_SRC_IP);
4601 if (!tcp_ip4_spec->ip4dst)
4602 *unused |= BIT(INNER_DST_IP);
4604 if (!tcp_ip4_spec->psrc)
4605 *unused |= BIT(INNER_SRC_PORT);
4607 if (!tcp_ip4_spec->pdst)
4608 *unused |= BIT(INNER_DST_PORT);
4610 if (!tcp_ip4_spec->tos)
4611 *unused |= BIT(INNER_IP_TOS);
4615 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4616 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4617 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4619 if (!usr_ip4_spec->ip4src)
4620 *unused |= BIT(INNER_SRC_IP);
4622 if (!usr_ip4_spec->ip4dst)
4623 *unused |= BIT(INNER_DST_IP);
4625 if (!usr_ip4_spec->tos)
4626 *unused |= BIT(INNER_IP_TOS);
4628 if (!usr_ip4_spec->proto)
4629 *unused |= BIT(INNER_IP_PROTO);
4631 if (usr_ip4_spec->l4_4_bytes)
4634 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4641 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4642 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4645 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4646 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4647 *unused |= BIT(INNER_SRC_IP);
4649 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4650 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4651 *unused |= BIT(INNER_DST_IP);
4653 if (!tcp_ip6_spec->psrc)
4654 *unused |= BIT(INNER_SRC_PORT);
4656 if (!tcp_ip6_spec->pdst)
4657 *unused |= BIT(INNER_DST_PORT);
4659 if (tcp_ip6_spec->tclass)
4663 case IPV6_USER_FLOW:
4664 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4665 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4666 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4667 BIT(INNER_DST_PORT);
4669 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4670 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4671 *unused |= BIT(INNER_SRC_IP);
4673 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4674 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4675 *unused |= BIT(INNER_DST_IP);
4677 if (!usr_ip6_spec->l4_proto)
4678 *unused |= BIT(INNER_IP_PROTO);
4680 if (usr_ip6_spec->tclass)
4683 if (usr_ip6_spec->l4_4_bytes)
4688 ether_spec = &fs->h_u.ether_spec;
4689 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4690 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4691 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4693 if (is_zero_ether_addr(ether_spec->h_source))
4694 *unused |= BIT(INNER_SRC_MAC);
4696 if (is_zero_ether_addr(ether_spec->h_dest))
4697 *unused |= BIT(INNER_DST_MAC);
4699 if (!ether_spec->h_proto)
4700 *unused |= BIT(INNER_ETH_TYPE);
4707 if ((fs->flow_type & FLOW_EXT)) {
4708 if (fs->h_ext.vlan_etype)
4710 if (!fs->h_ext.vlan_tci)
4711 *unused |= BIT(INNER_VLAN_TAG_FST);
4713 if (fs->m_ext.vlan_tci) {
4714 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4718 *unused |= BIT(INNER_VLAN_TAG_FST);
4721 if (fs->flow_type & FLOW_MAC_EXT) {
4722 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4725 if (is_zero_ether_addr(fs->h_ext.h_dest))
4726 *unused |= BIT(INNER_DST_MAC);
4728 *unused &= ~(BIT(INNER_DST_MAC));
4734 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4736 struct hclge_fd_rule *rule = NULL;
4737 struct hlist_node *node2;
4739 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4740 if (rule->location >= location)
4744 return rule && rule->location == location;
4747 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4748 struct hclge_fd_rule *new_rule,
4752 struct hclge_fd_rule *rule = NULL, *parent = NULL;
4753 struct hlist_node *node2;
4755 if (is_add && !new_rule)
4758 hlist_for_each_entry_safe(rule, node2,
4759 &hdev->fd_rule_list, rule_node) {
4760 if (rule->location >= location)
4765 if (rule && rule->location == location) {
4766 hlist_del(&rule->rule_node);
4768 hdev->hclge_fd_rule_num--;
4773 } else if (!is_add) {
4774 dev_err(&hdev->pdev->dev,
4775 "delete fail, rule %d is inexistent\n",
4780 INIT_HLIST_NODE(&new_rule->rule_node);
4783 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4785 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4787 hdev->hclge_fd_rule_num++;
4792 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4793 struct ethtool_rx_flow_spec *fs,
4794 struct hclge_fd_rule *rule)
4796 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4798 switch (flow_type) {
4802 rule->tuples.src_ip[3] =
4803 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
4804 rule->tuples_mask.src_ip[3] =
4805 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
4807 rule->tuples.dst_ip[3] =
4808 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
4809 rule->tuples_mask.dst_ip[3] =
4810 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
4812 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
4813 rule->tuples_mask.src_port =
4814 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
4816 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
4817 rule->tuples_mask.dst_port =
4818 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
4820 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
4821 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
4823 rule->tuples.ether_proto = ETH_P_IP;
4824 rule->tuples_mask.ether_proto = 0xFFFF;
4828 rule->tuples.src_ip[3] =
4829 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
4830 rule->tuples_mask.src_ip[3] =
4831 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
4833 rule->tuples.dst_ip[3] =
4834 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
4835 rule->tuples_mask.dst_ip[3] =
4836 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
4838 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
4839 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
4841 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
4842 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
4844 rule->tuples.ether_proto = ETH_P_IP;
4845 rule->tuples_mask.ether_proto = 0xFFFF;
4851 be32_to_cpu_array(rule->tuples.src_ip,
4852 fs->h_u.tcp_ip6_spec.ip6src, 4);
4853 be32_to_cpu_array(rule->tuples_mask.src_ip,
4854 fs->m_u.tcp_ip6_spec.ip6src, 4);
4856 be32_to_cpu_array(rule->tuples.dst_ip,
4857 fs->h_u.tcp_ip6_spec.ip6dst, 4);
4858 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4859 fs->m_u.tcp_ip6_spec.ip6dst, 4);
4861 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
4862 rule->tuples_mask.src_port =
4863 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
4865 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
4866 rule->tuples_mask.dst_port =
4867 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
4869 rule->tuples.ether_proto = ETH_P_IPV6;
4870 rule->tuples_mask.ether_proto = 0xFFFF;
4873 case IPV6_USER_FLOW:
4874 be32_to_cpu_array(rule->tuples.src_ip,
4875 fs->h_u.usr_ip6_spec.ip6src, 4);
4876 be32_to_cpu_array(rule->tuples_mask.src_ip,
4877 fs->m_u.usr_ip6_spec.ip6src, 4);
4879 be32_to_cpu_array(rule->tuples.dst_ip,
4880 fs->h_u.usr_ip6_spec.ip6dst, 4);
4881 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4882 fs->m_u.usr_ip6_spec.ip6dst, 4);
4884 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
4885 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
4887 rule->tuples.ether_proto = ETH_P_IPV6;
4888 rule->tuples_mask.ether_proto = 0xFFFF;
4892 ether_addr_copy(rule->tuples.src_mac,
4893 fs->h_u.ether_spec.h_source);
4894 ether_addr_copy(rule->tuples_mask.src_mac,
4895 fs->m_u.ether_spec.h_source);
4897 ether_addr_copy(rule->tuples.dst_mac,
4898 fs->h_u.ether_spec.h_dest);
4899 ether_addr_copy(rule->tuples_mask.dst_mac,
4900 fs->m_u.ether_spec.h_dest);
4902 rule->tuples.ether_proto =
4903 be16_to_cpu(fs->h_u.ether_spec.h_proto);
4904 rule->tuples_mask.ether_proto =
4905 be16_to_cpu(fs->m_u.ether_spec.h_proto);
4912 switch (flow_type) {
4915 rule->tuples.ip_proto = IPPROTO_SCTP;
4916 rule->tuples_mask.ip_proto = 0xFF;
4920 rule->tuples.ip_proto = IPPROTO_TCP;
4921 rule->tuples_mask.ip_proto = 0xFF;
4925 rule->tuples.ip_proto = IPPROTO_UDP;
4926 rule->tuples_mask.ip_proto = 0xFF;
4932 if ((fs->flow_type & FLOW_EXT)) {
4933 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
4934 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
4937 if (fs->flow_type & FLOW_MAC_EXT) {
4938 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
4939 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
4945 static int hclge_add_fd_entry(struct hnae3_handle *handle,
4946 struct ethtool_rxnfc *cmd)
4948 struct hclge_vport *vport = hclge_get_vport(handle);
4949 struct hclge_dev *hdev = vport->back;
4950 u16 dst_vport_id = 0, q_index = 0;
4951 struct ethtool_rx_flow_spec *fs;
4952 struct hclge_fd_rule *rule;
4957 if (!hnae3_dev_fd_supported(hdev))
4961 dev_warn(&hdev->pdev->dev,
4962 "Please enable flow director first\n");
4966 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4968 ret = hclge_fd_check_spec(hdev, fs, &unused);
4970 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
4974 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
4975 action = HCLGE_FD_ACTION_DROP_PACKET;
4977 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
4978 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
4981 if (vf > hdev->num_req_vfs) {
4982 dev_err(&hdev->pdev->dev,
4983 "Error: vf id (%d) > max vf num (%d)\n",
4984 vf, hdev->num_req_vfs);
4988 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
4989 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
4992 dev_err(&hdev->pdev->dev,
4993 "Error: queue id (%d) > max tqp num (%d)\n",
4998 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5002 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5006 ret = hclge_fd_get_tuple(hdev, fs, rule);
5010 rule->flow_type = fs->flow_type;
5012 rule->location = fs->location;
5013 rule->unused_tuple = unused;
5014 rule->vf_id = dst_vport_id;
5015 rule->queue_id = q_index;
5016 rule->action = action;
5018 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5022 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5026 ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true);
5037 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5038 struct ethtool_rxnfc *cmd)
5040 struct hclge_vport *vport = hclge_get_vport(handle);
5041 struct hclge_dev *hdev = vport->back;
5042 struct ethtool_rx_flow_spec *fs;
5045 if (!hnae3_dev_fd_supported(hdev))
5048 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5050 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5053 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5054 dev_err(&hdev->pdev->dev,
5055 "Delete fail, rule %d is inexistent\n",
5060 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5061 fs->location, NULL, false);
5065 return hclge_fd_update_rule_list(hdev, NULL, fs->location,
5069 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5072 struct hclge_vport *vport = hclge_get_vport(handle);
5073 struct hclge_dev *hdev = vport->back;
5074 struct hclge_fd_rule *rule;
5075 struct hlist_node *node;
5077 if (!hnae3_dev_fd_supported(hdev))
5081 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5083 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5084 rule->location, NULL, false);
5085 hlist_del(&rule->rule_node);
5087 hdev->hclge_fd_rule_num--;
5090 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5092 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5093 rule->location, NULL, false);
5097 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5099 struct hclge_vport *vport = hclge_get_vport(handle);
5100 struct hclge_dev *hdev = vport->back;
5101 struct hclge_fd_rule *rule;
5102 struct hlist_node *node;
5105 /* Return ok here, because reset error handling will check this
5106 * return value. If error is returned here, the reset process will
5109 if (!hnae3_dev_fd_supported(hdev))
5112 /* if fd is disabled, should not restore it when reset */
5116 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5117 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5119 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5122 dev_warn(&hdev->pdev->dev,
5123 "Restore rule %d failed, remove it\n",
5125 hlist_del(&rule->rule_node);
5127 hdev->hclge_fd_rule_num--;
5133 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5134 struct ethtool_rxnfc *cmd)
5136 struct hclge_vport *vport = hclge_get_vport(handle);
5137 struct hclge_dev *hdev = vport->back;
5139 if (!hnae3_dev_fd_supported(hdev))
5142 cmd->rule_cnt = hdev->hclge_fd_rule_num;
5143 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5148 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5149 struct ethtool_rxnfc *cmd)
5151 struct hclge_vport *vport = hclge_get_vport(handle);
5152 struct hclge_fd_rule *rule = NULL;
5153 struct hclge_dev *hdev = vport->back;
5154 struct ethtool_rx_flow_spec *fs;
5155 struct hlist_node *node2;
5157 if (!hnae3_dev_fd_supported(hdev))
5160 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5162 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5163 if (rule->location >= fs->location)
5167 if (!rule || fs->location != rule->location)
5170 fs->flow_type = rule->flow_type;
5171 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5175 fs->h_u.tcp_ip4_spec.ip4src =
5176 cpu_to_be32(rule->tuples.src_ip[3]);
5177 fs->m_u.tcp_ip4_spec.ip4src =
5178 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5179 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5181 fs->h_u.tcp_ip4_spec.ip4dst =
5182 cpu_to_be32(rule->tuples.dst_ip[3]);
5183 fs->m_u.tcp_ip4_spec.ip4dst =
5184 rule->unused_tuple & BIT(INNER_DST_IP) ?
5185 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5187 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5188 fs->m_u.tcp_ip4_spec.psrc =
5189 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5190 0 : cpu_to_be16(rule->tuples_mask.src_port);
5192 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5193 fs->m_u.tcp_ip4_spec.pdst =
5194 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5195 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5197 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5198 fs->m_u.tcp_ip4_spec.tos =
5199 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5200 0 : rule->tuples_mask.ip_tos;
5204 fs->h_u.usr_ip4_spec.ip4src =
5205 cpu_to_be32(rule->tuples.src_ip[3]);
5206 fs->m_u.tcp_ip4_spec.ip4src =
5207 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5208 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5210 fs->h_u.usr_ip4_spec.ip4dst =
5211 cpu_to_be32(rule->tuples.dst_ip[3]);
5212 fs->m_u.usr_ip4_spec.ip4dst =
5213 rule->unused_tuple & BIT(INNER_DST_IP) ?
5214 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5216 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5217 fs->m_u.usr_ip4_spec.tos =
5218 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5219 0 : rule->tuples_mask.ip_tos;
5221 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5222 fs->m_u.usr_ip4_spec.proto =
5223 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5224 0 : rule->tuples_mask.ip_proto;
5226 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5232 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5233 rule->tuples.src_ip, 4);
5234 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5235 memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
5237 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5238 rule->tuples_mask.src_ip, 4);
5240 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5241 rule->tuples.dst_ip, 4);
5242 if (rule->unused_tuple & BIT(INNER_DST_IP))
5243 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5245 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5246 rule->tuples_mask.dst_ip, 4);
5248 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5249 fs->m_u.tcp_ip6_spec.psrc =
5250 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5251 0 : cpu_to_be16(rule->tuples_mask.src_port);
5253 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5254 fs->m_u.tcp_ip6_spec.pdst =
5255 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5256 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5259 case IPV6_USER_FLOW:
5260 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5261 rule->tuples.src_ip, 4);
5262 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5263 memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
5265 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5266 rule->tuples_mask.src_ip, 4);
5268 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5269 rule->tuples.dst_ip, 4);
5270 if (rule->unused_tuple & BIT(INNER_DST_IP))
5271 memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5273 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5274 rule->tuples_mask.dst_ip, 4);
5276 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5277 fs->m_u.usr_ip6_spec.l4_proto =
5278 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5279 0 : rule->tuples_mask.ip_proto;
5283 ether_addr_copy(fs->h_u.ether_spec.h_source,
5284 rule->tuples.src_mac);
5285 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5286 eth_zero_addr(fs->m_u.ether_spec.h_source);
5288 ether_addr_copy(fs->m_u.ether_spec.h_source,
5289 rule->tuples_mask.src_mac);
5291 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5292 rule->tuples.dst_mac);
5293 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5294 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5296 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5297 rule->tuples_mask.dst_mac);
5299 fs->h_u.ether_spec.h_proto =
5300 cpu_to_be16(rule->tuples.ether_proto);
5301 fs->m_u.ether_spec.h_proto =
5302 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5303 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5310 if (fs->flow_type & FLOW_EXT) {
5311 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5312 fs->m_ext.vlan_tci =
5313 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5314 cpu_to_be16(VLAN_VID_MASK) :
5315 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5318 if (fs->flow_type & FLOW_MAC_EXT) {
5319 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5320 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5321 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5323 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5324 rule->tuples_mask.dst_mac);
5327 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5328 fs->ring_cookie = RX_CLS_FLOW_DISC;
5332 fs->ring_cookie = rule->queue_id;
5333 vf_id = rule->vf_id;
5334 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5335 fs->ring_cookie |= vf_id;
5341 static int hclge_get_all_rules(struct hnae3_handle *handle,
5342 struct ethtool_rxnfc *cmd, u32 *rule_locs)
5344 struct hclge_vport *vport = hclge_get_vport(handle);
5345 struct hclge_dev *hdev = vport->back;
5346 struct hclge_fd_rule *rule;
5347 struct hlist_node *node2;
5350 if (!hnae3_dev_fd_supported(hdev))
5353 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5355 hlist_for_each_entry_safe(rule, node2,
5356 &hdev->fd_rule_list, rule_node) {
5357 if (cnt == cmd->rule_cnt)
5360 rule_locs[cnt] = rule->location;
5364 cmd->rule_cnt = cnt;
5369 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5371 struct hclge_vport *vport = hclge_get_vport(handle);
5372 struct hclge_dev *hdev = vport->back;
5374 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5375 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5378 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5380 struct hclge_vport *vport = hclge_get_vport(handle);
5381 struct hclge_dev *hdev = vport->back;
5383 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5386 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5388 struct hclge_vport *vport = hclge_get_vport(handle);
5389 struct hclge_dev *hdev = vport->back;
5391 return hdev->rst_stats.hw_reset_done_cnt;
5394 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5396 struct hclge_vport *vport = hclge_get_vport(handle);
5397 struct hclge_dev *hdev = vport->back;
5399 hdev->fd_en = enable;
5401 hclge_del_all_fd_entries(handle, false);
5403 hclge_restore_fd_entries(handle);
5406 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5408 struct hclge_desc desc;
5409 struct hclge_config_mac_mode_cmd *req =
5410 (struct hclge_config_mac_mode_cmd *)desc.data;
5414 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5415 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5416 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5417 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5418 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5419 hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5420 hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5421 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5422 hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5423 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5424 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5425 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5426 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5427 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5428 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5429 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5431 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5433 dev_err(&hdev->pdev->dev,
5434 "mac enable fail, ret =%d.\n", ret);
5437 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5439 struct hclge_config_mac_mode_cmd *req;
5440 struct hclge_desc desc;
5444 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5445 /* 1 Read out the MAC mode config at first */
5446 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5447 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5449 dev_err(&hdev->pdev->dev,
5450 "mac loopback get fail, ret =%d.\n", ret);
5454 /* 2 Then setup the loopback flag */
5455 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5456 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5457 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5458 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5460 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5462 /* 3 Config mac work mode with loopback flag
5463 * and its original configure parameters
5465 hclge_cmd_reuse_desc(&desc, false);
5466 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5468 dev_err(&hdev->pdev->dev,
5469 "mac loopback set fail, ret =%d.\n", ret);
5473 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5474 enum hnae3_loop loop_mode)
5476 #define HCLGE_SERDES_RETRY_MS 10
5477 #define HCLGE_SERDES_RETRY_NUM 100
5479 #define HCLGE_MAC_LINK_STATUS_MS 10
5480 #define HCLGE_MAC_LINK_STATUS_NUM 100
5481 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5482 #define HCLGE_MAC_LINK_STATUS_UP 1
5484 struct hclge_serdes_lb_cmd *req;
5485 struct hclge_desc desc;
5486 int mac_link_ret = 0;
5490 req = (struct hclge_serdes_lb_cmd *)desc.data;
5491 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5493 switch (loop_mode) {
5494 case HNAE3_LOOP_SERIAL_SERDES:
5495 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5497 case HNAE3_LOOP_PARALLEL_SERDES:
5498 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5501 dev_err(&hdev->pdev->dev,
5502 "unsupported serdes loopback mode %d\n", loop_mode);
5507 req->enable = loop_mode_b;
5508 req->mask = loop_mode_b;
5509 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5511 req->mask = loop_mode_b;
5512 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5515 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5517 dev_err(&hdev->pdev->dev,
5518 "serdes loopback set fail, ret = %d\n", ret);
5523 msleep(HCLGE_SERDES_RETRY_MS);
5524 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5526 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5528 dev_err(&hdev->pdev->dev,
5529 "serdes loopback get, ret = %d\n", ret);
5532 } while (++i < HCLGE_SERDES_RETRY_NUM &&
5533 !(req->result & HCLGE_CMD_SERDES_DONE_B));
5535 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5536 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5538 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5539 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5543 hclge_cfg_mac_mode(hdev, en);
5547 /* serdes Internal loopback, independent of the network cable.*/
5548 msleep(HCLGE_MAC_LINK_STATUS_MS);
5549 ret = hclge_get_mac_link_status(hdev);
5550 if (ret == mac_link_ret)
5552 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
5554 dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
5559 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
5560 int stream_id, bool enable)
5562 struct hclge_desc desc;
5563 struct hclge_cfg_com_tqp_queue_cmd *req =
5564 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
5567 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
5568 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
5569 req->stream_id = cpu_to_le16(stream_id);
5570 req->enable |= enable << HCLGE_TQP_ENABLE_B;
5572 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5574 dev_err(&hdev->pdev->dev,
5575 "Tqp enable fail, status =%d.\n", ret);
5579 static int hclge_set_loopback(struct hnae3_handle *handle,
5580 enum hnae3_loop loop_mode, bool en)
5582 struct hclge_vport *vport = hclge_get_vport(handle);
5583 struct hnae3_knic_private_info *kinfo;
5584 struct hclge_dev *hdev = vport->back;
5587 switch (loop_mode) {
5588 case HNAE3_LOOP_APP:
5589 ret = hclge_set_app_loopback(hdev, en);
5591 case HNAE3_LOOP_SERIAL_SERDES:
5592 case HNAE3_LOOP_PARALLEL_SERDES:
5593 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
5597 dev_err(&hdev->pdev->dev,
5598 "loop_mode %d is not supported\n", loop_mode);
5605 kinfo = &vport->nic.kinfo;
5606 for (i = 0; i < kinfo->num_tqps; i++) {
5607 ret = hclge_tqp_enable(hdev, i, 0, en);
5615 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
5617 struct hclge_vport *vport = hclge_get_vport(handle);
5618 struct hnae3_knic_private_info *kinfo;
5619 struct hnae3_queue *queue;
5620 struct hclge_tqp *tqp;
5623 kinfo = &vport->nic.kinfo;
5624 for (i = 0; i < kinfo->num_tqps; i++) {
5625 queue = handle->kinfo.tqp[i];
5626 tqp = container_of(queue, struct hclge_tqp, q);
5627 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
5631 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
5633 struct hclge_vport *vport = hclge_get_vport(handle);
5634 struct hclge_dev *hdev = vport->back;
5637 mod_timer(&hdev->service_timer, jiffies + HZ);
5639 del_timer_sync(&hdev->service_timer);
5640 cancel_work_sync(&hdev->service_task);
5641 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
5645 static int hclge_ae_start(struct hnae3_handle *handle)
5647 struct hclge_vport *vport = hclge_get_vport(handle);
5648 struct hclge_dev *hdev = vport->back;
5651 hclge_cfg_mac_mode(hdev, true);
5652 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
5653 hdev->hw.mac.link = 0;
5655 /* reset tqp stats */
5656 hclge_reset_tqp_stats(handle);
5658 hclge_mac_start_phy(hdev);
5663 static void hclge_ae_stop(struct hnae3_handle *handle)
5665 struct hclge_vport *vport = hclge_get_vport(handle);
5666 struct hclge_dev *hdev = vport->back;
5669 set_bit(HCLGE_STATE_DOWN, &hdev->state);
5671 /* If it is not PF reset, the firmware will disable the MAC,
5672 * so it only need to stop phy here.
5674 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
5675 hdev->reset_type != HNAE3_FUNC_RESET) {
5676 hclge_mac_stop_phy(hdev);
5680 for (i = 0; i < handle->kinfo.num_tqps; i++)
5681 hclge_reset_tqp(handle, i);
5684 hclge_cfg_mac_mode(hdev, false);
5686 hclge_mac_stop_phy(hdev);
5688 /* reset tqp stats */
5689 hclge_reset_tqp_stats(handle);
5690 hclge_update_link_status(hdev);
5693 int hclge_vport_start(struct hclge_vport *vport)
5695 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5696 vport->last_active_jiffies = jiffies;
5700 void hclge_vport_stop(struct hclge_vport *vport)
5702 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5705 static int hclge_client_start(struct hnae3_handle *handle)
5707 struct hclge_vport *vport = hclge_get_vport(handle);
5709 return hclge_vport_start(vport);
5712 static void hclge_client_stop(struct hnae3_handle *handle)
5714 struct hclge_vport *vport = hclge_get_vport(handle);
5716 hclge_vport_stop(vport);
5719 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
5720 u16 cmdq_resp, u8 resp_code,
5721 enum hclge_mac_vlan_tbl_opcode op)
5723 struct hclge_dev *hdev = vport->back;
5724 int return_status = -EIO;
5727 dev_err(&hdev->pdev->dev,
5728 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
5733 if (op == HCLGE_MAC_VLAN_ADD) {
5734 if ((!resp_code) || (resp_code == 1)) {
5736 } else if (resp_code == 2) {
5737 return_status = -ENOSPC;
5738 dev_err(&hdev->pdev->dev,
5739 "add mac addr failed for uc_overflow.\n");
5740 } else if (resp_code == 3) {
5741 return_status = -ENOSPC;
5742 dev_err(&hdev->pdev->dev,
5743 "add mac addr failed for mc_overflow.\n");
5745 dev_err(&hdev->pdev->dev,
5746 "add mac addr failed for undefined, code=%d.\n",
5749 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
5752 } else if (resp_code == 1) {
5753 return_status = -ENOENT;
5754 dev_dbg(&hdev->pdev->dev,
5755 "remove mac addr failed for miss.\n");
5757 dev_err(&hdev->pdev->dev,
5758 "remove mac addr failed for undefined, code=%d.\n",
5761 } else if (op == HCLGE_MAC_VLAN_LKUP) {
5764 } else if (resp_code == 1) {
5765 return_status = -ENOENT;
5766 dev_dbg(&hdev->pdev->dev,
5767 "lookup mac addr failed for miss.\n");
5769 dev_err(&hdev->pdev->dev,
5770 "lookup mac addr failed for undefined, code=%d.\n",
5774 return_status = -EINVAL;
5775 dev_err(&hdev->pdev->dev,
5776 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
5780 return return_status;
5783 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
5788 if (vfid > 255 || vfid < 0)
5791 if (vfid >= 0 && vfid <= 191) {
5792 word_num = vfid / 32;
5793 bit_num = vfid % 32;
5795 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5797 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
5799 word_num = (vfid - 192) / 32;
5800 bit_num = vfid % 32;
5802 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5804 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
5810 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
5812 #define HCLGE_DESC_NUMBER 3
5813 #define HCLGE_FUNC_NUMBER_PER_DESC 6
5816 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
5817 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
5818 if (desc[i].data[j])
5824 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
5825 const u8 *addr, bool is_mc)
5827 const unsigned char *mac_addr = addr;
5828 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
5829 (mac_addr[0]) | (mac_addr[1] << 8);
5830 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
5832 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5834 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
5835 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5838 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
5839 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
5842 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
5843 struct hclge_mac_vlan_tbl_entry_cmd *req)
5845 struct hclge_dev *hdev = vport->back;
5846 struct hclge_desc desc;
5851 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
5853 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5855 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5857 dev_err(&hdev->pdev->dev,
5858 "del mac addr failed for cmd_send, ret =%d.\n",
5862 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5863 retval = le16_to_cpu(desc.retval);
5865 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5866 HCLGE_MAC_VLAN_REMOVE);
5869 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
5870 struct hclge_mac_vlan_tbl_entry_cmd *req,
5871 struct hclge_desc *desc,
5874 struct hclge_dev *hdev = vport->back;
5879 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
5881 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5882 memcpy(desc[0].data,
5884 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5885 hclge_cmd_setup_basic_desc(&desc[1],
5886 HCLGE_OPC_MAC_VLAN_ADD,
5888 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5889 hclge_cmd_setup_basic_desc(&desc[2],
5890 HCLGE_OPC_MAC_VLAN_ADD,
5892 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5894 memcpy(desc[0].data,
5896 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5897 ret = hclge_cmd_send(&hdev->hw, desc, 1);
5900 dev_err(&hdev->pdev->dev,
5901 "lookup mac addr failed for cmd_send, ret =%d.\n",
5905 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
5906 retval = le16_to_cpu(desc[0].retval);
5908 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5909 HCLGE_MAC_VLAN_LKUP);
5912 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
5913 struct hclge_mac_vlan_tbl_entry_cmd *req,
5914 struct hclge_desc *mc_desc)
5916 struct hclge_dev *hdev = vport->back;
5923 struct hclge_desc desc;
5925 hclge_cmd_setup_basic_desc(&desc,
5926 HCLGE_OPC_MAC_VLAN_ADD,
5928 memcpy(desc.data, req,
5929 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5930 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5931 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5932 retval = le16_to_cpu(desc.retval);
5934 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5936 HCLGE_MAC_VLAN_ADD);
5938 hclge_cmd_reuse_desc(&mc_desc[0], false);
5939 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5940 hclge_cmd_reuse_desc(&mc_desc[1], false);
5941 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5942 hclge_cmd_reuse_desc(&mc_desc[2], false);
5943 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
5944 memcpy(mc_desc[0].data, req,
5945 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5946 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
5947 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
5948 retval = le16_to_cpu(mc_desc[0].retval);
5950 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5952 HCLGE_MAC_VLAN_ADD);
5956 dev_err(&hdev->pdev->dev,
5957 "add mac addr failed for cmd_send, ret =%d.\n",
5965 static int hclge_init_umv_space(struct hclge_dev *hdev)
5967 u16 allocated_size = 0;
5970 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
5975 if (allocated_size < hdev->wanted_umv_size)
5976 dev_warn(&hdev->pdev->dev,
5977 "Alloc umv space failed, want %d, get %d\n",
5978 hdev->wanted_umv_size, allocated_size);
5980 mutex_init(&hdev->umv_mutex);
5981 hdev->max_umv_size = allocated_size;
5982 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
5983 hdev->share_umv_size = hdev->priv_umv_size +
5984 hdev->max_umv_size % (hdev->num_req_vfs + 2);
5989 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
5993 if (hdev->max_umv_size > 0) {
5994 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
5998 hdev->max_umv_size = 0;
6000 mutex_destroy(&hdev->umv_mutex);
6005 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6006 u16 *allocated_size, bool is_alloc)
6008 struct hclge_umv_spc_alc_cmd *req;
6009 struct hclge_desc desc;
6012 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6013 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6014 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
6015 req->space_size = cpu_to_le32(space_size);
6017 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6019 dev_err(&hdev->pdev->dev,
6020 "%s umv space failed for cmd_send, ret =%d\n",
6021 is_alloc ? "allocate" : "free", ret);
6025 if (is_alloc && allocated_size)
6026 *allocated_size = le32_to_cpu(desc.data[1]);
6031 static void hclge_reset_umv_space(struct hclge_dev *hdev)
6033 struct hclge_vport *vport;
6036 for (i = 0; i < hdev->num_alloc_vport; i++) {
6037 vport = &hdev->vport[i];
6038 vport->used_umv_num = 0;
6041 mutex_lock(&hdev->umv_mutex);
6042 hdev->share_umv_size = hdev->priv_umv_size +
6043 hdev->max_umv_size % (hdev->num_req_vfs + 2);
6044 mutex_unlock(&hdev->umv_mutex);
6047 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
6049 struct hclge_dev *hdev = vport->back;
6052 mutex_lock(&hdev->umv_mutex);
6053 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
6054 hdev->share_umv_size == 0);
6055 mutex_unlock(&hdev->umv_mutex);
6060 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
6062 struct hclge_dev *hdev = vport->back;
6064 mutex_lock(&hdev->umv_mutex);
6066 if (vport->used_umv_num > hdev->priv_umv_size)
6067 hdev->share_umv_size++;
6069 if (vport->used_umv_num > 0)
6070 vport->used_umv_num--;
6072 if (vport->used_umv_num >= hdev->priv_umv_size &&
6073 hdev->share_umv_size > 0)
6074 hdev->share_umv_size--;
6075 vport->used_umv_num++;
6077 mutex_unlock(&hdev->umv_mutex);
6080 static int hclge_add_uc_addr(struct hnae3_handle *handle,
6081 const unsigned char *addr)
6083 struct hclge_vport *vport = hclge_get_vport(handle);
6085 return hclge_add_uc_addr_common(vport, addr);
6088 int hclge_add_uc_addr_common(struct hclge_vport *vport,
6089 const unsigned char *addr)
6091 struct hclge_dev *hdev = vport->back;
6092 struct hclge_mac_vlan_tbl_entry_cmd req;
6093 struct hclge_desc desc;
6094 u16 egress_port = 0;
6097 /* mac addr check */
6098 if (is_zero_ether_addr(addr) ||
6099 is_broadcast_ether_addr(addr) ||
6100 is_multicast_ether_addr(addr)) {
6101 dev_err(&hdev->pdev->dev,
6102 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
6104 is_zero_ether_addr(addr),
6105 is_broadcast_ether_addr(addr),
6106 is_multicast_ether_addr(addr));
6110 memset(&req, 0, sizeof(req));
6112 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
6113 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
6115 req.egress_port = cpu_to_le16(egress_port);
6117 hclge_prepare_mac_addr(&req, addr, false);
6119 /* Lookup the mac address in the mac_vlan table, and add
6120 * it if the entry is inexistent. Repeated unicast entry
6121 * is not allowed in the mac vlan table.
6123 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
6124 if (ret == -ENOENT) {
6125 if (!hclge_is_umv_space_full(vport)) {
6126 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
6128 hclge_update_umv_space(vport, false);
6132 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
6133 hdev->priv_umv_size);
6138 /* check if we just hit the duplicate */
6140 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6141 vport->vport_id, addr);
6145 dev_err(&hdev->pdev->dev,
6146 "PF failed to add unicast entry(%pM) in the MAC table\n",
6152 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6153 const unsigned char *addr)
6155 struct hclge_vport *vport = hclge_get_vport(handle);
6157 return hclge_rm_uc_addr_common(vport, addr);
6160 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6161 const unsigned char *addr)
6163 struct hclge_dev *hdev = vport->back;
6164 struct hclge_mac_vlan_tbl_entry_cmd req;
6167 /* mac addr check */
6168 if (is_zero_ether_addr(addr) ||
6169 is_broadcast_ether_addr(addr) ||
6170 is_multicast_ether_addr(addr)) {
6171 dev_dbg(&hdev->pdev->dev,
6172 "Remove mac err! invalid mac:%pM.\n",
6177 memset(&req, 0, sizeof(req));
6178 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6179 hclge_prepare_mac_addr(&req, addr, false);
6180 ret = hclge_remove_mac_vlan_tbl(vport, &req);
6182 hclge_update_umv_space(vport, true);
6187 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6188 const unsigned char *addr)
6190 struct hclge_vport *vport = hclge_get_vport(handle);
6192 return hclge_add_mc_addr_common(vport, addr);
6195 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6196 const unsigned char *addr)
6198 struct hclge_dev *hdev = vport->back;
6199 struct hclge_mac_vlan_tbl_entry_cmd req;
6200 struct hclge_desc desc[3];
6203 /* mac addr check */
6204 if (!is_multicast_ether_addr(addr)) {
6205 dev_err(&hdev->pdev->dev,
6206 "Add mc mac err! invalid mac:%pM.\n",
6210 memset(&req, 0, sizeof(req));
6211 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6212 hclge_prepare_mac_addr(&req, addr, true);
6213 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6215 /* This mac addr exist, update VFID for it */
6216 hclge_update_desc_vfid(desc, vport->vport_id, false);
6217 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6219 /* This mac addr do not exist, add new entry for it */
6220 memset(desc[0].data, 0, sizeof(desc[0].data));
6221 memset(desc[1].data, 0, sizeof(desc[0].data));
6222 memset(desc[2].data, 0, sizeof(desc[0].data));
6223 hclge_update_desc_vfid(desc, vport->vport_id, false);
6224 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6227 if (status == -ENOSPC)
6228 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6233 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6234 const unsigned char *addr)
6236 struct hclge_vport *vport = hclge_get_vport(handle);
6238 return hclge_rm_mc_addr_common(vport, addr);
6241 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6242 const unsigned char *addr)
6244 struct hclge_dev *hdev = vport->back;
6245 struct hclge_mac_vlan_tbl_entry_cmd req;
6246 enum hclge_cmd_status status;
6247 struct hclge_desc desc[3];
6249 /* mac addr check */
6250 if (!is_multicast_ether_addr(addr)) {
6251 dev_dbg(&hdev->pdev->dev,
6252 "Remove mc mac err! invalid mac:%pM.\n",
6257 memset(&req, 0, sizeof(req));
6258 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6259 hclge_prepare_mac_addr(&req, addr, true);
6260 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6262 /* This mac addr exist, remove this handle's VFID for it */
6263 hclge_update_desc_vfid(desc, vport->vport_id, true);
6265 if (hclge_is_all_function_id_zero(desc))
6266 /* All the vfid is zero, so need to delete this entry */
6267 status = hclge_remove_mac_vlan_tbl(vport, &req);
6269 /* Not all the vfid is zero, update the vfid */
6270 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6273 /* Maybe this mac address is in mta table, but it cannot be
6274 * deleted here because an entry of mta represents an address
6275 * range rather than a specific address. the delete action to
6276 * all entries will take effect in update_mta_status called by
6277 * hns3_nic_set_rx_mode.
6285 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6286 enum HCLGE_MAC_ADDR_TYPE mac_type)
6288 struct hclge_vport_mac_addr_cfg *mac_cfg;
6289 struct list_head *list;
6291 if (!vport->vport_id)
6294 mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6298 mac_cfg->hd_tbl_status = true;
6299 memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6301 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6302 &vport->uc_mac_list : &vport->mc_mac_list;
6304 list_add_tail(&mac_cfg->node, list);
6307 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6309 enum HCLGE_MAC_ADDR_TYPE mac_type)
6311 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6312 struct list_head *list;
6313 bool uc_flag, mc_flag;
6315 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6316 &vport->uc_mac_list : &vport->mc_mac_list;
6318 uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6319 mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6321 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6322 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6323 if (uc_flag && mac_cfg->hd_tbl_status)
6324 hclge_rm_uc_addr_common(vport, mac_addr);
6326 if (mc_flag && mac_cfg->hd_tbl_status)
6327 hclge_rm_mc_addr_common(vport, mac_addr);
6329 list_del(&mac_cfg->node);
6336 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6337 enum HCLGE_MAC_ADDR_TYPE mac_type)
6339 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6340 struct list_head *list;
6342 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6343 &vport->uc_mac_list : &vport->mc_mac_list;
6345 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6346 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6347 hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6349 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6350 hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6352 mac_cfg->hd_tbl_status = false;
6354 list_del(&mac_cfg->node);
6360 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6362 struct hclge_vport_mac_addr_cfg *mac, *tmp;
6363 struct hclge_vport *vport;
6366 mutex_lock(&hdev->vport_cfg_mutex);
6367 for (i = 0; i < hdev->num_alloc_vport; i++) {
6368 vport = &hdev->vport[i];
6369 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6370 list_del(&mac->node);
6374 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6375 list_del(&mac->node);
6379 mutex_unlock(&hdev->vport_cfg_mutex);
6382 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6383 u16 cmdq_resp, u8 resp_code)
6385 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
6386 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
6387 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
6388 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
6393 dev_err(&hdev->pdev->dev,
6394 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6399 switch (resp_code) {
6400 case HCLGE_ETHERTYPE_SUCCESS_ADD:
6401 case HCLGE_ETHERTYPE_ALREADY_ADD:
6404 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6405 dev_err(&hdev->pdev->dev,
6406 "add mac ethertype failed for manager table overflow.\n");
6407 return_status = -EIO;
6409 case HCLGE_ETHERTYPE_KEY_CONFLICT:
6410 dev_err(&hdev->pdev->dev,
6411 "add mac ethertype failed for key conflict.\n");
6412 return_status = -EIO;
6415 dev_err(&hdev->pdev->dev,
6416 "add mac ethertype failed for undefined, code=%d.\n",
6418 return_status = -EIO;
6421 return return_status;
6424 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6425 const struct hclge_mac_mgr_tbl_entry_cmd *req)
6427 struct hclge_desc desc;
6432 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6433 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6435 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6437 dev_err(&hdev->pdev->dev,
6438 "add mac ethertype failed for cmd_send, ret =%d.\n",
6443 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6444 retval = le16_to_cpu(desc.retval);
6446 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6449 static int init_mgr_tbl(struct hclge_dev *hdev)
6454 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6455 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6457 dev_err(&hdev->pdev->dev,
6458 "add mac ethertype failed, ret =%d.\n",
6467 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6469 struct hclge_vport *vport = hclge_get_vport(handle);
6470 struct hclge_dev *hdev = vport->back;
6472 ether_addr_copy(p, hdev->hw.mac.mac_addr);
6475 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6478 const unsigned char *new_addr = (const unsigned char *)p;
6479 struct hclge_vport *vport = hclge_get_vport(handle);
6480 struct hclge_dev *hdev = vport->back;
6483 /* mac addr check */
6484 if (is_zero_ether_addr(new_addr) ||
6485 is_broadcast_ether_addr(new_addr) ||
6486 is_multicast_ether_addr(new_addr)) {
6487 dev_err(&hdev->pdev->dev,
6488 "Change uc mac err! invalid mac:%p.\n",
6493 if ((!is_first || is_kdump_kernel()) &&
6494 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6495 dev_warn(&hdev->pdev->dev,
6496 "remove old uc mac address fail.\n");
6498 ret = hclge_add_uc_addr(handle, new_addr);
6500 dev_err(&hdev->pdev->dev,
6501 "add uc mac address fail, ret =%d.\n",
6505 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6506 dev_err(&hdev->pdev->dev,
6507 "restore uc mac address fail.\n");
6512 ret = hclge_pause_addr_cfg(hdev, new_addr);
6514 dev_err(&hdev->pdev->dev,
6515 "configure mac pause address fail, ret =%d.\n",
6520 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6525 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6528 struct hclge_vport *vport = hclge_get_vport(handle);
6529 struct hclge_dev *hdev = vport->back;
6531 if (!hdev->hw.mac.phydev)
6534 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6537 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6538 u8 fe_type, bool filter_en, u8 vf_id)
6540 struct hclge_vlan_filter_ctrl_cmd *req;
6541 struct hclge_desc desc;
6544 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6546 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6547 req->vlan_type = vlan_type;
6548 req->vlan_fe = filter_en ? fe_type : 0;
6551 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6553 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
6559 #define HCLGE_FILTER_TYPE_VF 0
6560 #define HCLGE_FILTER_TYPE_PORT 1
6561 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
6562 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
6563 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
6564 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
6565 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
6566 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
6567 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
6568 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
6569 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
6571 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
6573 struct hclge_vport *vport = hclge_get_vport(handle);
6574 struct hclge_dev *hdev = vport->back;
6576 if (hdev->pdev->revision >= 0x21) {
6577 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6578 HCLGE_FILTER_FE_EGRESS, enable, 0);
6579 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6580 HCLGE_FILTER_FE_INGRESS, enable, 0);
6582 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6583 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
6587 handle->netdev_flags |= HNAE3_VLAN_FLTR;
6589 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
6592 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
6593 bool is_kill, u16 vlan, u8 qos,
6596 #define HCLGE_MAX_VF_BYTES 16
6597 struct hclge_vlan_filter_vf_cfg_cmd *req0;
6598 struct hclge_vlan_filter_vf_cfg_cmd *req1;
6599 struct hclge_desc desc[2];
6604 hclge_cmd_setup_basic_desc(&desc[0],
6605 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6606 hclge_cmd_setup_basic_desc(&desc[1],
6607 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6609 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6611 vf_byte_off = vfid / 8;
6612 vf_byte_val = 1 << (vfid % 8);
6614 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
6615 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
6617 req0->vlan_id = cpu_to_le16(vlan);
6618 req0->vlan_cfg = is_kill;
6620 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
6621 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
6623 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
6625 ret = hclge_cmd_send(&hdev->hw, desc, 2);
6627 dev_err(&hdev->pdev->dev,
6628 "Send vf vlan command fail, ret =%d.\n",
6634 #define HCLGE_VF_VLAN_NO_ENTRY 2
6635 if (!req0->resp_code || req0->resp_code == 1)
6638 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
6639 dev_warn(&hdev->pdev->dev,
6640 "vf vlan table is full, vf vlan filter is disabled\n");
6644 dev_err(&hdev->pdev->dev,
6645 "Add vf vlan filter fail, ret =%d.\n",
6648 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
6649 if (!req0->resp_code)
6652 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
6653 dev_warn(&hdev->pdev->dev,
6654 "vlan %d filter is not in vf vlan table\n",
6659 dev_err(&hdev->pdev->dev,
6660 "Kill vf vlan filter fail, ret =%d.\n",
6667 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
6668 u16 vlan_id, bool is_kill)
6670 struct hclge_vlan_filter_pf_cfg_cmd *req;
6671 struct hclge_desc desc;
6672 u8 vlan_offset_byte_val;
6673 u8 vlan_offset_byte;
6677 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
6679 vlan_offset_160 = vlan_id / 160;
6680 vlan_offset_byte = (vlan_id % 160) / 8;
6681 vlan_offset_byte_val = 1 << (vlan_id % 8);
6683 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
6684 req->vlan_offset = vlan_offset_160;
6685 req->vlan_cfg = is_kill;
6686 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
6688 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6690 dev_err(&hdev->pdev->dev,
6691 "port vlan command, send fail, ret =%d.\n", ret);
6695 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
6696 u16 vport_id, u16 vlan_id, u8 qos,
6699 u16 vport_idx, vport_num = 0;
6702 if (is_kill && !vlan_id)
6705 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
6708 dev_err(&hdev->pdev->dev,
6709 "Set %d vport vlan filter config fail, ret =%d.\n",
6714 /* vlan 0 may be added twice when 8021q module is enabled */
6715 if (!is_kill && !vlan_id &&
6716 test_bit(vport_id, hdev->vlan_table[vlan_id]))
6719 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
6720 dev_err(&hdev->pdev->dev,
6721 "Add port vlan failed, vport %d is already in vlan %d\n",
6727 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
6728 dev_err(&hdev->pdev->dev,
6729 "Delete port vlan failed, vport %d is not in vlan %d\n",
6734 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
6737 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
6738 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
6744 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
6746 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
6747 struct hclge_vport_vtag_tx_cfg_cmd *req;
6748 struct hclge_dev *hdev = vport->back;
6749 struct hclge_desc desc;
6752 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
6754 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
6755 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
6756 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
6757 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
6758 vcfg->accept_tag1 ? 1 : 0);
6759 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
6760 vcfg->accept_untag1 ? 1 : 0);
6761 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
6762 vcfg->accept_tag2 ? 1 : 0);
6763 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
6764 vcfg->accept_untag2 ? 1 : 0);
6765 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
6766 vcfg->insert_tag1_en ? 1 : 0);
6767 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
6768 vcfg->insert_tag2_en ? 1 : 0);
6769 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
6771 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6772 req->vf_bitmap[req->vf_offset] =
6773 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6775 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6777 dev_err(&hdev->pdev->dev,
6778 "Send port txvlan cfg command fail, ret =%d\n",
6784 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
6786 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
6787 struct hclge_vport_vtag_rx_cfg_cmd *req;
6788 struct hclge_dev *hdev = vport->back;
6789 struct hclge_desc desc;
6792 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
6794 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
6795 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
6796 vcfg->strip_tag1_en ? 1 : 0);
6797 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
6798 vcfg->strip_tag2_en ? 1 : 0);
6799 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
6800 vcfg->vlan1_vlan_prionly ? 1 : 0);
6801 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
6802 vcfg->vlan2_vlan_prionly ? 1 : 0);
6804 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6805 req->vf_bitmap[req->vf_offset] =
6806 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6808 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6810 dev_err(&hdev->pdev->dev,
6811 "Send port rxvlan cfg command fail, ret =%d\n",
6817 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
6818 u16 port_base_vlan_state,
6823 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
6824 vport->txvlan_cfg.accept_tag1 = true;
6825 vport->txvlan_cfg.insert_tag1_en = false;
6826 vport->txvlan_cfg.default_tag1 = 0;
6828 vport->txvlan_cfg.accept_tag1 = false;
6829 vport->txvlan_cfg.insert_tag1_en = true;
6830 vport->txvlan_cfg.default_tag1 = vlan_tag;
6833 vport->txvlan_cfg.accept_untag1 = true;
6835 /* accept_tag2 and accept_untag2 are not supported on
6836 * pdev revision(0x20), new revision support them,
6837 * this two fields can not be configured by user.
6839 vport->txvlan_cfg.accept_tag2 = true;
6840 vport->txvlan_cfg.accept_untag2 = true;
6841 vport->txvlan_cfg.insert_tag2_en = false;
6842 vport->txvlan_cfg.default_tag2 = 0;
6844 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
6845 vport->rxvlan_cfg.strip_tag1_en = false;
6846 vport->rxvlan_cfg.strip_tag2_en =
6847 vport->rxvlan_cfg.rx_vlan_offload_en;
6849 vport->rxvlan_cfg.strip_tag1_en =
6850 vport->rxvlan_cfg.rx_vlan_offload_en;
6851 vport->rxvlan_cfg.strip_tag2_en = true;
6853 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6854 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6856 ret = hclge_set_vlan_tx_offload_cfg(vport);
6860 return hclge_set_vlan_rx_offload_cfg(vport);
6863 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
6865 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
6866 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
6867 struct hclge_desc desc;
6870 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
6871 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
6872 rx_req->ot_fst_vlan_type =
6873 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
6874 rx_req->ot_sec_vlan_type =
6875 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
6876 rx_req->in_fst_vlan_type =
6877 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
6878 rx_req->in_sec_vlan_type =
6879 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
6881 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6883 dev_err(&hdev->pdev->dev,
6884 "Send rxvlan protocol type command fail, ret =%d\n",
6889 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
6891 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
6892 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
6893 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
6895 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6897 dev_err(&hdev->pdev->dev,
6898 "Send txvlan protocol type command fail, ret =%d\n",
6904 static int hclge_init_vlan_config(struct hclge_dev *hdev)
6906 #define HCLGE_DEF_VLAN_TYPE 0x8100
6908 struct hnae3_handle *handle = &hdev->vport[0].nic;
6909 struct hclge_vport *vport;
6913 if (hdev->pdev->revision >= 0x21) {
6914 /* for revision 0x21, vf vlan filter is per function */
6915 for (i = 0; i < hdev->num_alloc_vport; i++) {
6916 vport = &hdev->vport[i];
6917 ret = hclge_set_vlan_filter_ctrl(hdev,
6918 HCLGE_FILTER_TYPE_VF,
6919 HCLGE_FILTER_FE_EGRESS,
6926 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6927 HCLGE_FILTER_FE_INGRESS, true,
6932 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6933 HCLGE_FILTER_FE_EGRESS_V1_B,
6939 handle->netdev_flags |= HNAE3_VLAN_FLTR;
6941 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6942 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6943 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6944 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6945 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
6946 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
6948 ret = hclge_set_vlan_protocol_type(hdev);
6952 for (i = 0; i < hdev->num_alloc_vport; i++) {
6955 vport = &hdev->vport[i];
6956 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
6958 ret = hclge_vlan_offload_cfg(vport,
6959 vport->port_base_vlan_cfg.state,
6965 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
6968 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
6971 struct hclge_vport_vlan_cfg *vlan;
6973 /* vlan 0 is reserved */
6977 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
6981 vlan->hd_tbl_status = writen_to_tbl;
6982 vlan->vlan_id = vlan_id;
6984 list_add_tail(&vlan->node, &vport->vlan_list);
6987 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
6989 struct hclge_vport_vlan_cfg *vlan, *tmp;
6990 struct hclge_dev *hdev = vport->back;
6993 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
6994 if (!vlan->hd_tbl_status) {
6995 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
6997 vlan->vlan_id, 0, false);
6999 dev_err(&hdev->pdev->dev,
7000 "restore vport vlan list failed, ret=%d\n",
7005 vlan->hd_tbl_status = true;
7011 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7014 struct hclge_vport_vlan_cfg *vlan, *tmp;
7015 struct hclge_dev *hdev = vport->back;
7017 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7018 if (vlan->vlan_id == vlan_id) {
7019 if (is_write_tbl && vlan->hd_tbl_status)
7020 hclge_set_vlan_filter_hw(hdev,
7026 list_del(&vlan->node);
7033 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
7035 struct hclge_vport_vlan_cfg *vlan, *tmp;
7036 struct hclge_dev *hdev = vport->back;
7038 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7039 if (vlan->hd_tbl_status)
7040 hclge_set_vlan_filter_hw(hdev,
7046 vlan->hd_tbl_status = false;
7048 list_del(&vlan->node);
7054 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
7056 struct hclge_vport_vlan_cfg *vlan, *tmp;
7057 struct hclge_vport *vport;
7060 mutex_lock(&hdev->vport_cfg_mutex);
7061 for (i = 0; i < hdev->num_alloc_vport; i++) {
7062 vport = &hdev->vport[i];
7063 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7064 list_del(&vlan->node);
7068 mutex_unlock(&hdev->vport_cfg_mutex);
7071 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
7073 struct hclge_vport *vport = hclge_get_vport(handle);
7075 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7076 vport->rxvlan_cfg.strip_tag1_en = false;
7077 vport->rxvlan_cfg.strip_tag2_en = enable;
7079 vport->rxvlan_cfg.strip_tag1_en = enable;
7080 vport->rxvlan_cfg.strip_tag2_en = true;
7082 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7083 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7084 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
7086 return hclge_set_vlan_rx_offload_cfg(vport);
7089 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
7090 u16 port_base_vlan_state,
7091 struct hclge_vlan_info *new_info,
7092 struct hclge_vlan_info *old_info)
7094 struct hclge_dev *hdev = vport->back;
7097 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
7098 hclge_rm_vport_all_vlan_table(vport, false);
7099 return hclge_set_vlan_filter_hw(hdev,
7100 htons(new_info->vlan_proto),
7103 new_info->qos, false);
7106 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
7107 vport->vport_id, old_info->vlan_tag,
7108 old_info->qos, true);
7112 return hclge_add_vport_all_vlan_table(vport);
7115 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
7116 struct hclge_vlan_info *vlan_info)
7118 struct hnae3_handle *nic = &vport->nic;
7119 struct hclge_vlan_info *old_vlan_info;
7120 struct hclge_dev *hdev = vport->back;
7123 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
7125 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
7129 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
7130 /* add new VLAN tag */
7131 ret = hclge_set_vlan_filter_hw(hdev,
7132 htons(vlan_info->vlan_proto),
7134 vlan_info->vlan_tag,
7135 vlan_info->qos, false);
7139 /* remove old VLAN tag */
7140 ret = hclge_set_vlan_filter_hw(hdev,
7141 htons(old_vlan_info->vlan_proto),
7143 old_vlan_info->vlan_tag,
7144 old_vlan_info->qos, true);
7151 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7156 /* update state only when disable/enable port based VLAN */
7157 vport->port_base_vlan_cfg.state = state;
7158 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7159 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7161 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7164 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7165 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7166 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7171 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7172 enum hnae3_port_base_vlan_state state,
7175 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7177 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7179 return HNAE3_PORT_BASE_VLAN_ENABLE;
7182 return HNAE3_PORT_BASE_VLAN_DISABLE;
7183 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7184 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7186 return HNAE3_PORT_BASE_VLAN_MODIFY;
7190 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7191 u16 vlan, u8 qos, __be16 proto)
7193 struct hclge_vport *vport = hclge_get_vport(handle);
7194 struct hclge_dev *hdev = vport->back;
7195 struct hclge_vlan_info vlan_info;
7199 if (hdev->pdev->revision == 0x20)
7202 /* qos is a 3 bits value, so can not be bigger than 7 */
7203 if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7205 if (proto != htons(ETH_P_8021Q))
7206 return -EPROTONOSUPPORT;
7208 vport = &hdev->vport[vfid];
7209 state = hclge_get_port_base_vlan_state(vport,
7210 vport->port_base_vlan_cfg.state,
7212 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7215 vlan_info.vlan_tag = vlan;
7216 vlan_info.qos = qos;
7217 vlan_info.vlan_proto = ntohs(proto);
7219 /* update port based VLAN for PF */
7221 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7222 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7223 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7228 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7229 return hclge_update_port_base_vlan_cfg(vport, state,
7232 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7240 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7241 u16 vlan_id, bool is_kill)
7243 struct hclge_vport *vport = hclge_get_vport(handle);
7244 struct hclge_dev *hdev = vport->back;
7245 bool writen_to_tbl = false;
7248 /* when port based VLAN enabled, we use port based VLAN as the VLAN
7249 * filter entry. In this case, we don't update VLAN filter table
7250 * when user add new VLAN or remove exist VLAN, just update the vport
7251 * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
7252 * table until port based VLAN disabled
7254 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7255 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
7256 vlan_id, 0, is_kill);
7257 writen_to_tbl = true;
7264 hclge_rm_vport_vlan_table(vport, vlan_id, false);
7266 hclge_add_vport_vlan_table(vport, vlan_id,
7272 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
7274 struct hclge_config_max_frm_size_cmd *req;
7275 struct hclge_desc desc;
7277 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
7279 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
7280 req->max_frm_size = cpu_to_le16(new_mps);
7281 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
7283 return hclge_cmd_send(&hdev->hw, &desc, 1);
7286 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
7288 struct hclge_vport *vport = hclge_get_vport(handle);
7290 return hclge_set_vport_mtu(vport, new_mtu);
7293 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
7295 struct hclge_dev *hdev = vport->back;
7296 int i, max_frm_size, ret = 0;
7298 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7299 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
7300 max_frm_size > HCLGE_MAC_MAX_FRAME)
7303 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
7304 mutex_lock(&hdev->vport_lock);
7305 /* VF's mps must fit within hdev->mps */
7306 if (vport->vport_id && max_frm_size > hdev->mps) {
7307 mutex_unlock(&hdev->vport_lock);
7309 } else if (vport->vport_id) {
7310 vport->mps = max_frm_size;
7311 mutex_unlock(&hdev->vport_lock);
7315 /* PF's mps must be greater then VF's mps */
7316 for (i = 1; i < hdev->num_alloc_vport; i++)
7317 if (max_frm_size < hdev->vport[i].mps) {
7318 mutex_unlock(&hdev->vport_lock);
7322 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7324 ret = hclge_set_mac_mtu(hdev, max_frm_size);
7326 dev_err(&hdev->pdev->dev,
7327 "Change mtu fail, ret =%d\n", ret);
7331 hdev->mps = max_frm_size;
7332 vport->mps = max_frm_size;
7334 ret = hclge_buffer_alloc(hdev);
7336 dev_err(&hdev->pdev->dev,
7337 "Allocate buffer fail, ret =%d\n", ret);
7340 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7341 mutex_unlock(&hdev->vport_lock);
7345 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7348 struct hclge_reset_tqp_queue_cmd *req;
7349 struct hclge_desc desc;
7352 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7354 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7355 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7356 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
7358 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7360 dev_err(&hdev->pdev->dev,
7361 "Send tqp reset cmd error, status =%d\n", ret);
7368 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
7370 struct hclge_reset_tqp_queue_cmd *req;
7371 struct hclge_desc desc;
7374 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
7376 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7377 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7379 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7381 dev_err(&hdev->pdev->dev,
7382 "Get reset status error, status =%d\n", ret);
7386 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
7389 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
7391 struct hnae3_queue *queue;
7392 struct hclge_tqp *tqp;
7394 queue = handle->kinfo.tqp[queue_id];
7395 tqp = container_of(queue, struct hclge_tqp, q);
7400 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
7402 struct hclge_vport *vport = hclge_get_vport(handle);
7403 struct hclge_dev *hdev = vport->back;
7404 int reset_try_times = 0;
7409 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
7411 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
7413 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
7417 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7419 dev_err(&hdev->pdev->dev,
7420 "Send reset tqp cmd fail, ret = %d\n", ret);
7424 reset_try_times = 0;
7425 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7426 /* Wait for tqp hw reset */
7428 reset_status = hclge_get_reset_status(hdev, queue_gid);
7433 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7434 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
7438 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7440 dev_err(&hdev->pdev->dev,
7441 "Deassert the soft reset fail, ret = %d\n", ret);
7446 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
7448 struct hclge_dev *hdev = vport->back;
7449 int reset_try_times = 0;
7454 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
7456 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7458 dev_warn(&hdev->pdev->dev,
7459 "Send reset tqp cmd fail, ret = %d\n", ret);
7463 reset_try_times = 0;
7464 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7465 /* Wait for tqp hw reset */
7467 reset_status = hclge_get_reset_status(hdev, queue_gid);
7472 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7473 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
7477 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7479 dev_warn(&hdev->pdev->dev,
7480 "Deassert the soft reset fail, ret = %d\n", ret);
7483 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
7485 struct hclge_vport *vport = hclge_get_vport(handle);
7486 struct hclge_dev *hdev = vport->back;
7488 return hdev->fw_version;
7491 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7493 struct phy_device *phydev = hdev->hw.mac.phydev;
7498 phy_set_asym_pause(phydev, rx_en, tx_en);
7501 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7506 hdev->fc_mode_last_time = HCLGE_FC_FULL;
7507 else if (rx_en && !tx_en)
7508 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
7509 else if (!rx_en && tx_en)
7510 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
7512 hdev->fc_mode_last_time = HCLGE_FC_NONE;
7514 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
7517 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
7519 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
7524 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
7529 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
7531 struct phy_device *phydev = hdev->hw.mac.phydev;
7532 u16 remote_advertising = 0;
7533 u16 local_advertising = 0;
7534 u32 rx_pause, tx_pause;
7537 if (!phydev->link || !phydev->autoneg)
7540 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
7543 remote_advertising = LPA_PAUSE_CAP;
7545 if (phydev->asym_pause)
7546 remote_advertising |= LPA_PAUSE_ASYM;
7548 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
7549 remote_advertising);
7550 tx_pause = flowctl & FLOW_CTRL_TX;
7551 rx_pause = flowctl & FLOW_CTRL_RX;
7553 if (phydev->duplex == HCLGE_MAC_HALF) {
7558 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
7561 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
7562 u32 *rx_en, u32 *tx_en)
7564 struct hclge_vport *vport = hclge_get_vport(handle);
7565 struct hclge_dev *hdev = vport->back;
7567 *auto_neg = hclge_get_autoneg(handle);
7569 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
7575 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
7578 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
7581 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
7590 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
7591 u32 rx_en, u32 tx_en)
7593 struct hclge_vport *vport = hclge_get_vport(handle);
7594 struct hclge_dev *hdev = vport->back;
7595 struct phy_device *phydev = hdev->hw.mac.phydev;
7598 fc_autoneg = hclge_get_autoneg(handle);
7599 if (auto_neg != fc_autoneg) {
7600 dev_info(&hdev->pdev->dev,
7601 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
7605 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
7606 dev_info(&hdev->pdev->dev,
7607 "Priority flow control enabled. Cannot set link flow control.\n");
7611 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
7614 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
7616 /* Only support flow control negotiation for netdev with
7617 * phy attached for now.
7622 return phy_start_aneg(phydev);
7625 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
7626 u8 *auto_neg, u32 *speed, u8 *duplex)
7628 struct hclge_vport *vport = hclge_get_vport(handle);
7629 struct hclge_dev *hdev = vport->back;
7632 *speed = hdev->hw.mac.speed;
7634 *duplex = hdev->hw.mac.duplex;
7636 *auto_neg = hdev->hw.mac.autoneg;
7639 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
7642 struct hclge_vport *vport = hclge_get_vport(handle);
7643 struct hclge_dev *hdev = vport->back;
7646 *media_type = hdev->hw.mac.media_type;
7649 *module_type = hdev->hw.mac.module_type;
7652 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
7653 u8 *tp_mdix_ctrl, u8 *tp_mdix)
7655 struct hclge_vport *vport = hclge_get_vport(handle);
7656 struct hclge_dev *hdev = vport->back;
7657 struct phy_device *phydev = hdev->hw.mac.phydev;
7658 int mdix_ctrl, mdix, retval, is_resolved;
7661 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
7662 *tp_mdix = ETH_TP_MDI_INVALID;
7666 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
7668 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
7669 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
7670 HCLGE_PHY_MDIX_CTRL_S);
7672 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
7673 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
7674 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
7676 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
7678 switch (mdix_ctrl) {
7680 *tp_mdix_ctrl = ETH_TP_MDI;
7683 *tp_mdix_ctrl = ETH_TP_MDI_X;
7686 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
7689 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
7694 *tp_mdix = ETH_TP_MDI_INVALID;
7696 *tp_mdix = ETH_TP_MDI_X;
7698 *tp_mdix = ETH_TP_MDI;
7701 static void hclge_info_show(struct hclge_dev *hdev)
7703 struct device *dev = &hdev->pdev->dev;
7705 dev_info(dev, "PF info begin:\n");
7707 dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
7708 dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
7709 dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
7710 dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
7711 dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
7712 dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
7713 dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
7714 dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
7715 dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
7716 dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
7717 dev_info(dev, "This is %s PF\n",
7718 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
7719 dev_info(dev, "DCB %s\n",
7720 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
7721 dev_info(dev, "MQPRIO %s\n",
7722 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
7724 dev_info(dev, "PF info end.\n");
7727 static int hclge_init_client_instance(struct hnae3_client *client,
7728 struct hnae3_ae_dev *ae_dev)
7730 struct hclge_dev *hdev = ae_dev->priv;
7731 struct hclge_vport *vport;
7734 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
7735 vport = &hdev->vport[i];
7737 switch (client->type) {
7738 case HNAE3_CLIENT_KNIC:
7740 hdev->nic_client = client;
7741 vport->nic.client = client;
7742 ret = client->ops->init_instance(&vport->nic);
7746 hnae3_set_client_init_flag(client, ae_dev, 1);
7748 if (netif_msg_drv(&hdev->vport->nic))
7749 hclge_info_show(hdev);
7751 if (hdev->roce_client &&
7752 hnae3_dev_roce_supported(hdev)) {
7753 struct hnae3_client *rc = hdev->roce_client;
7755 ret = hclge_init_roce_base_info(vport);
7759 ret = rc->ops->init_instance(&vport->roce);
7763 hnae3_set_client_init_flag(hdev->roce_client,
7768 case HNAE3_CLIENT_UNIC:
7769 hdev->nic_client = client;
7770 vport->nic.client = client;
7772 ret = client->ops->init_instance(&vport->nic);
7776 hnae3_set_client_init_flag(client, ae_dev, 1);
7779 case HNAE3_CLIENT_ROCE:
7780 if (hnae3_dev_roce_supported(hdev)) {
7781 hdev->roce_client = client;
7782 vport->roce.client = client;
7785 if (hdev->roce_client && hdev->nic_client) {
7786 ret = hclge_init_roce_base_info(vport);
7790 ret = client->ops->init_instance(&vport->roce);
7794 hnae3_set_client_init_flag(client, ae_dev, 1);
7806 hdev->nic_client = NULL;
7807 vport->nic.client = NULL;
7810 hdev->roce_client = NULL;
7811 vport->roce.client = NULL;
7815 static void hclge_uninit_client_instance(struct hnae3_client *client,
7816 struct hnae3_ae_dev *ae_dev)
7818 struct hclge_dev *hdev = ae_dev->priv;
7819 struct hclge_vport *vport;
7822 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
7823 vport = &hdev->vport[i];
7824 if (hdev->roce_client) {
7825 hdev->roce_client->ops->uninit_instance(&vport->roce,
7827 hdev->roce_client = NULL;
7828 vport->roce.client = NULL;
7830 if (client->type == HNAE3_CLIENT_ROCE)
7832 if (hdev->nic_client && client->ops->uninit_instance) {
7833 client->ops->uninit_instance(&vport->nic, 0);
7834 hdev->nic_client = NULL;
7835 vport->nic.client = NULL;
7840 static int hclge_pci_init(struct hclge_dev *hdev)
7842 struct pci_dev *pdev = hdev->pdev;
7843 struct hclge_hw *hw;
7846 ret = pci_enable_device(pdev);
7848 dev_err(&pdev->dev, "failed to enable PCI device\n");
7852 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7854 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7857 "can't set consistent PCI DMA");
7858 goto err_disable_device;
7860 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
7863 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
7865 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
7866 goto err_disable_device;
7869 pci_set_master(pdev);
7871 hw->io_base = pcim_iomap(pdev, 2, 0);
7873 dev_err(&pdev->dev, "Can't map configuration register space\n");
7875 goto err_clr_master;
7878 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
7882 pci_clear_master(pdev);
7883 pci_release_regions(pdev);
7885 pci_disable_device(pdev);
7890 static void hclge_pci_uninit(struct hclge_dev *hdev)
7892 struct pci_dev *pdev = hdev->pdev;
7894 pcim_iounmap(pdev, hdev->hw.io_base);
7895 pci_free_irq_vectors(pdev);
7896 pci_clear_master(pdev);
7897 pci_release_mem_regions(pdev);
7898 pci_disable_device(pdev);
7901 static void hclge_state_init(struct hclge_dev *hdev)
7903 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
7904 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7905 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
7906 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7907 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
7908 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
7911 static void hclge_state_uninit(struct hclge_dev *hdev)
7913 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7915 if (hdev->service_timer.function)
7916 del_timer_sync(&hdev->service_timer);
7917 if (hdev->reset_timer.function)
7918 del_timer_sync(&hdev->reset_timer);
7919 if (hdev->service_task.func)
7920 cancel_work_sync(&hdev->service_task);
7921 if (hdev->rst_service_task.func)
7922 cancel_work_sync(&hdev->rst_service_task);
7923 if (hdev->mbx_service_task.func)
7924 cancel_work_sync(&hdev->mbx_service_task);
7927 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
7929 #define HCLGE_FLR_WAIT_MS 100
7930 #define HCLGE_FLR_WAIT_CNT 50
7931 struct hclge_dev *hdev = ae_dev->priv;
7934 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
7935 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7936 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
7937 hclge_reset_event(hdev->pdev, NULL);
7939 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
7940 cnt++ < HCLGE_FLR_WAIT_CNT)
7941 msleep(HCLGE_FLR_WAIT_MS);
7943 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
7944 dev_err(&hdev->pdev->dev,
7945 "flr wait down timeout: %d\n", cnt);
7948 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
7950 struct hclge_dev *hdev = ae_dev->priv;
7952 set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7955 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
7957 struct pci_dev *pdev = ae_dev->pdev;
7958 struct hclge_dev *hdev;
7961 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
7968 hdev->ae_dev = ae_dev;
7969 hdev->reset_type = HNAE3_NONE_RESET;
7970 hdev->reset_level = HNAE3_FUNC_RESET;
7971 ae_dev->priv = hdev;
7972 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7974 mutex_init(&hdev->vport_lock);
7975 mutex_init(&hdev->vport_cfg_mutex);
7977 ret = hclge_pci_init(hdev);
7979 dev_err(&pdev->dev, "PCI init failed\n");
7983 /* Firmware command queue initialize */
7984 ret = hclge_cmd_queue_init(hdev);
7986 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
7987 goto err_pci_uninit;
7990 /* Firmware command initialize */
7991 ret = hclge_cmd_init(hdev);
7993 goto err_cmd_uninit;
7995 ret = hclge_get_cap(hdev);
7997 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
7999 goto err_cmd_uninit;
8002 ret = hclge_configure(hdev);
8004 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
8005 goto err_cmd_uninit;
8008 ret = hclge_init_msi(hdev);
8010 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
8011 goto err_cmd_uninit;
8014 ret = hclge_misc_irq_init(hdev);
8017 "Misc IRQ(vector0) init error, ret = %d.\n",
8019 goto err_msi_uninit;
8022 ret = hclge_alloc_tqps(hdev);
8024 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
8025 goto err_msi_irq_uninit;
8028 ret = hclge_alloc_vport(hdev);
8030 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
8031 goto err_msi_irq_uninit;
8034 ret = hclge_map_tqp(hdev);
8036 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8037 goto err_msi_irq_uninit;
8040 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
8041 ret = hclge_mac_mdio_config(hdev);
8043 dev_err(&hdev->pdev->dev,
8044 "mdio config fail ret=%d\n", ret);
8045 goto err_msi_irq_uninit;
8049 ret = hclge_init_umv_space(hdev);
8051 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
8052 goto err_mdiobus_unreg;
8055 ret = hclge_mac_init(hdev);
8057 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8058 goto err_mdiobus_unreg;
8061 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8063 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8064 goto err_mdiobus_unreg;
8067 ret = hclge_config_gro(hdev, true);
8069 goto err_mdiobus_unreg;
8071 ret = hclge_init_vlan_config(hdev);
8073 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8074 goto err_mdiobus_unreg;
8077 ret = hclge_tm_schd_init(hdev);
8079 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
8080 goto err_mdiobus_unreg;
8083 hclge_rss_init_cfg(hdev);
8084 ret = hclge_rss_init_hw(hdev);
8086 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8087 goto err_mdiobus_unreg;
8090 ret = init_mgr_tbl(hdev);
8092 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
8093 goto err_mdiobus_unreg;
8096 ret = hclge_init_fd_config(hdev);
8099 "fd table init fail, ret=%d\n", ret);
8100 goto err_mdiobus_unreg;
8103 ret = hclge_hw_error_set_state(hdev, true);
8106 "fail(%d) to enable hw error interrupts\n", ret);
8107 goto err_mdiobus_unreg;
8110 INIT_KFIFO(hdev->mac_tnl_log);
8112 hclge_dcb_ops_set(hdev);
8114 timer_setup(&hdev->service_timer, hclge_service_timer, 0);
8115 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
8116 INIT_WORK(&hdev->service_task, hclge_service_task);
8117 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
8118 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
8120 hclge_clear_all_event_cause(hdev);
8122 /* Enable MISC vector(vector0) */
8123 hclge_enable_vector(&hdev->misc_vector, true);
8125 hclge_state_init(hdev);
8126 hdev->last_reset_time = jiffies;
8128 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
8132 if (hdev->hw.mac.phydev)
8133 mdiobus_unregister(hdev->hw.mac.mdio_bus);
8135 hclge_misc_irq_uninit(hdev);
8137 pci_free_irq_vectors(pdev);
8139 hclge_cmd_uninit(hdev);
8141 pcim_iounmap(pdev, hdev->hw.io_base);
8142 pci_clear_master(pdev);
8143 pci_release_regions(pdev);
8144 pci_disable_device(pdev);
8149 static void hclge_stats_clear(struct hclge_dev *hdev)
8151 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
8154 static void hclge_reset_vport_state(struct hclge_dev *hdev)
8156 struct hclge_vport *vport = hdev->vport;
8159 for (i = 0; i < hdev->num_alloc_vport; i++) {
8160 hclge_vport_stop(vport);
8165 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
8167 struct hclge_dev *hdev = ae_dev->priv;
8168 struct pci_dev *pdev = ae_dev->pdev;
8171 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8173 hclge_stats_clear(hdev);
8174 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
8176 ret = hclge_cmd_init(hdev);
8178 dev_err(&pdev->dev, "Cmd queue init failed\n");
8182 ret = hclge_map_tqp(hdev);
8184 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8188 hclge_reset_umv_space(hdev);
8190 ret = hclge_mac_init(hdev);
8192 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8196 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8198 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8202 ret = hclge_config_gro(hdev, true);
8206 ret = hclge_init_vlan_config(hdev);
8208 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8212 ret = hclge_tm_init_hw(hdev, true);
8214 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
8218 ret = hclge_rss_init_hw(hdev);
8220 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8224 ret = hclge_init_fd_config(hdev);
8227 "fd table init fail, ret=%d\n", ret);
8231 /* Re-enable the hw error interrupts because
8232 * the interrupts get disabled on core/global reset.
8234 ret = hclge_hw_error_set_state(hdev, true);
8237 "fail(%d) to re-enable HNS hw error interrupts\n", ret);
8241 hclge_reset_vport_state(hdev);
8243 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
8249 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
8251 struct hclge_dev *hdev = ae_dev->priv;
8252 struct hclge_mac *mac = &hdev->hw.mac;
8254 hclge_state_uninit(hdev);
8257 mdiobus_unregister(mac->mdio_bus);
8259 hclge_uninit_umv_space(hdev);
8261 /* Disable MISC vector(vector0) */
8262 hclge_enable_vector(&hdev->misc_vector, false);
8263 synchronize_irq(hdev->misc_vector.vector_irq);
8265 hclge_config_mac_tnl_int(hdev, false);
8266 hclge_hw_error_set_state(hdev, false);
8267 hclge_cmd_uninit(hdev);
8268 hclge_misc_irq_uninit(hdev);
8269 hclge_pci_uninit(hdev);
8270 mutex_destroy(&hdev->vport_lock);
8271 hclge_uninit_vport_mac_table(hdev);
8272 hclge_uninit_vport_vlan_table(hdev);
8273 mutex_destroy(&hdev->vport_cfg_mutex);
8274 ae_dev->priv = NULL;
8277 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
8279 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8280 struct hclge_vport *vport = hclge_get_vport(handle);
8281 struct hclge_dev *hdev = vport->back;
8283 return min_t(u32, hdev->rss_size_max,
8284 vport->alloc_tqps / kinfo->num_tc);
8287 static void hclge_get_channels(struct hnae3_handle *handle,
8288 struct ethtool_channels *ch)
8290 ch->max_combined = hclge_get_max_channels(handle);
8291 ch->other_count = 1;
8293 ch->combined_count = handle->kinfo.rss_size;
8296 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
8297 u16 *alloc_tqps, u16 *max_rss_size)
8299 struct hclge_vport *vport = hclge_get_vport(handle);
8300 struct hclge_dev *hdev = vport->back;
8302 *alloc_tqps = vport->alloc_tqps;
8303 *max_rss_size = hdev->rss_size_max;
8306 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
8307 bool rxfh_configured)
8309 struct hclge_vport *vport = hclge_get_vport(handle);
8310 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
8311 struct hclge_dev *hdev = vport->back;
8312 int cur_rss_size = kinfo->rss_size;
8313 int cur_tqps = kinfo->num_tqps;
8314 u16 tc_offset[HCLGE_MAX_TC_NUM];
8315 u16 tc_valid[HCLGE_MAX_TC_NUM];
8316 u16 tc_size[HCLGE_MAX_TC_NUM];
8321 kinfo->req_rss_size = new_tqps_num;
8323 ret = hclge_tm_vport_map_update(hdev);
8325 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
8329 roundup_size = roundup_pow_of_two(kinfo->rss_size);
8330 roundup_size = ilog2(roundup_size);
8331 /* Set the RSS TC mode according to the new RSS size */
8332 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
8335 if (!(hdev->hw_tc_map & BIT(i)))
8339 tc_size[i] = roundup_size;
8340 tc_offset[i] = kinfo->rss_size * i;
8342 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
8346 /* RSS indirection table has been configuared by user */
8347 if (rxfh_configured)
8350 /* Reinitializes the rss indirect table according to the new RSS size */
8351 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
8355 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
8356 rss_indir[i] = i % kinfo->rss_size;
8358 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
8360 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
8367 dev_info(&hdev->pdev->dev,
8368 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
8369 cur_rss_size, kinfo->rss_size,
8370 cur_tqps, kinfo->rss_size * kinfo->num_tc);
8375 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
8376 u32 *regs_num_64_bit)
8378 struct hclge_desc desc;
8382 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
8383 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8385 dev_err(&hdev->pdev->dev,
8386 "Query register number cmd failed, ret = %d.\n", ret);
8390 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
8391 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
8393 total_num = *regs_num_32_bit + *regs_num_64_bit;
8400 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8403 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
8405 struct hclge_desc *desc;
8406 u32 *reg_val = data;
8415 cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
8416 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8420 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
8421 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8423 dev_err(&hdev->pdev->dev,
8424 "Query 32 bit register cmd failed, ret = %d.\n", ret);
8429 for (i = 0; i < cmd_num; i++) {
8431 desc_data = (__le32 *)(&desc[i].data[0]);
8432 n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
8434 desc_data = (__le32 *)(&desc[i]);
8435 n = HCLGE_32_BIT_REG_RTN_DATANUM;
8437 for (k = 0; k < n; k++) {
8438 *reg_val++ = le32_to_cpu(*desc_data++);
8450 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8453 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
8455 struct hclge_desc *desc;
8456 u64 *reg_val = data;
8465 cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
8466 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8470 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
8471 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8473 dev_err(&hdev->pdev->dev,
8474 "Query 64 bit register cmd failed, ret = %d.\n", ret);
8479 for (i = 0; i < cmd_num; i++) {
8481 desc_data = (__le64 *)(&desc[i].data[0]);
8482 n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
8484 desc_data = (__le64 *)(&desc[i]);
8485 n = HCLGE_64_BIT_REG_RTN_DATANUM;
8487 for (k = 0; k < n; k++) {
8488 *reg_val++ = le64_to_cpu(*desc_data++);
8500 #define MAX_SEPARATE_NUM 4
8501 #define SEPARATOR_VALUE 0xFFFFFFFF
8502 #define REG_NUM_PER_LINE 4
8503 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
8505 static int hclge_get_regs_len(struct hnae3_handle *handle)
8507 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
8508 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8509 struct hclge_vport *vport = hclge_get_vport(handle);
8510 struct hclge_dev *hdev = vport->back;
8511 u32 regs_num_32_bit, regs_num_64_bit;
8514 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
8516 dev_err(&hdev->pdev->dev,
8517 "Get register number failed, ret = %d.\n", ret);
8521 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
8522 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
8523 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
8524 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
8526 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
8527 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
8528 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
8531 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
8534 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8535 struct hclge_vport *vport = hclge_get_vport(handle);
8536 struct hclge_dev *hdev = vport->back;
8537 u32 regs_num_32_bit, regs_num_64_bit;
8538 int i, j, reg_um, separator_num;
8542 *version = hdev->fw_version;
8544 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
8546 dev_err(&hdev->pdev->dev,
8547 "Get register number failed, ret = %d.\n", ret);
8551 /* fetching per-PF registers valus from PF PCIe register space */
8552 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
8553 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8554 for (i = 0; i < reg_um; i++)
8555 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
8556 for (i = 0; i < separator_num; i++)
8557 *reg++ = SEPARATOR_VALUE;
8559 reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
8560 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8561 for (i = 0; i < reg_um; i++)
8562 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
8563 for (i = 0; i < separator_num; i++)
8564 *reg++ = SEPARATOR_VALUE;
8566 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
8567 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8568 for (j = 0; j < kinfo->num_tqps; j++) {
8569 for (i = 0; i < reg_um; i++)
8570 *reg++ = hclge_read_dev(&hdev->hw,
8571 ring_reg_addr_list[i] +
8573 for (i = 0; i < separator_num; i++)
8574 *reg++ = SEPARATOR_VALUE;
8577 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
8578 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8579 for (j = 0; j < hdev->num_msi_used - 1; j++) {
8580 for (i = 0; i < reg_um; i++)
8581 *reg++ = hclge_read_dev(&hdev->hw,
8582 tqp_intr_reg_addr_list[i] +
8584 for (i = 0; i < separator_num; i++)
8585 *reg++ = SEPARATOR_VALUE;
8588 /* fetching PF common registers values from firmware */
8589 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
8591 dev_err(&hdev->pdev->dev,
8592 "Get 32 bit register failed, ret = %d.\n", ret);
8596 reg += regs_num_32_bit;
8597 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
8599 dev_err(&hdev->pdev->dev,
8600 "Get 64 bit register failed, ret = %d.\n", ret);
8603 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
8605 struct hclge_set_led_state_cmd *req;
8606 struct hclge_desc desc;
8609 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
8611 req = (struct hclge_set_led_state_cmd *)desc.data;
8612 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
8613 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
8615 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8617 dev_err(&hdev->pdev->dev,
8618 "Send set led state cmd error, ret =%d\n", ret);
8623 enum hclge_led_status {
8626 HCLGE_LED_NO_CHANGE = 0xFF,
8629 static int hclge_set_led_id(struct hnae3_handle *handle,
8630 enum ethtool_phys_id_state status)
8632 struct hclge_vport *vport = hclge_get_vport(handle);
8633 struct hclge_dev *hdev = vport->back;
8636 case ETHTOOL_ID_ACTIVE:
8637 return hclge_set_led_status(hdev, HCLGE_LED_ON);
8638 case ETHTOOL_ID_INACTIVE:
8639 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
8645 static void hclge_get_link_mode(struct hnae3_handle *handle,
8646 unsigned long *supported,
8647 unsigned long *advertising)
8649 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
8650 struct hclge_vport *vport = hclge_get_vport(handle);
8651 struct hclge_dev *hdev = vport->back;
8652 unsigned int idx = 0;
8654 for (; idx < size; idx++) {
8655 supported[idx] = hdev->hw.mac.supported[idx];
8656 advertising[idx] = hdev->hw.mac.advertising[idx];
8660 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
8662 struct hclge_vport *vport = hclge_get_vport(handle);
8663 struct hclge_dev *hdev = vport->back;
8665 return hclge_config_gro(hdev, enable);
8668 static const struct hnae3_ae_ops hclge_ops = {
8669 .init_ae_dev = hclge_init_ae_dev,
8670 .uninit_ae_dev = hclge_uninit_ae_dev,
8671 .flr_prepare = hclge_flr_prepare,
8672 .flr_done = hclge_flr_done,
8673 .init_client_instance = hclge_init_client_instance,
8674 .uninit_client_instance = hclge_uninit_client_instance,
8675 .map_ring_to_vector = hclge_map_ring_to_vector,
8676 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
8677 .get_vector = hclge_get_vector,
8678 .put_vector = hclge_put_vector,
8679 .set_promisc_mode = hclge_set_promisc_mode,
8680 .set_loopback = hclge_set_loopback,
8681 .start = hclge_ae_start,
8682 .stop = hclge_ae_stop,
8683 .client_start = hclge_client_start,
8684 .client_stop = hclge_client_stop,
8685 .get_status = hclge_get_status,
8686 .get_ksettings_an_result = hclge_get_ksettings_an_result,
8687 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
8688 .get_media_type = hclge_get_media_type,
8689 .get_rss_key_size = hclge_get_rss_key_size,
8690 .get_rss_indir_size = hclge_get_rss_indir_size,
8691 .get_rss = hclge_get_rss,
8692 .set_rss = hclge_set_rss,
8693 .set_rss_tuple = hclge_set_rss_tuple,
8694 .get_rss_tuple = hclge_get_rss_tuple,
8695 .get_tc_size = hclge_get_tc_size,
8696 .get_mac_addr = hclge_get_mac_addr,
8697 .set_mac_addr = hclge_set_mac_addr,
8698 .do_ioctl = hclge_do_ioctl,
8699 .add_uc_addr = hclge_add_uc_addr,
8700 .rm_uc_addr = hclge_rm_uc_addr,
8701 .add_mc_addr = hclge_add_mc_addr,
8702 .rm_mc_addr = hclge_rm_mc_addr,
8703 .set_autoneg = hclge_set_autoneg,
8704 .get_autoneg = hclge_get_autoneg,
8705 .get_pauseparam = hclge_get_pauseparam,
8706 .set_pauseparam = hclge_set_pauseparam,
8707 .set_mtu = hclge_set_mtu,
8708 .reset_queue = hclge_reset_tqp,
8709 .get_stats = hclge_get_stats,
8710 .get_mac_pause_stats = hclge_get_mac_pause_stat,
8711 .update_stats = hclge_update_stats,
8712 .get_strings = hclge_get_strings,
8713 .get_sset_count = hclge_get_sset_count,
8714 .get_fw_version = hclge_get_fw_version,
8715 .get_mdix_mode = hclge_get_mdix_mode,
8716 .enable_vlan_filter = hclge_enable_vlan_filter,
8717 .set_vlan_filter = hclge_set_vlan_filter,
8718 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
8719 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
8720 .reset_event = hclge_reset_event,
8721 .set_default_reset_request = hclge_set_def_reset_request,
8722 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
8723 .set_channels = hclge_set_channels,
8724 .get_channels = hclge_get_channels,
8725 .get_regs_len = hclge_get_regs_len,
8726 .get_regs = hclge_get_regs,
8727 .set_led_id = hclge_set_led_id,
8728 .get_link_mode = hclge_get_link_mode,
8729 .add_fd_entry = hclge_add_fd_entry,
8730 .del_fd_entry = hclge_del_fd_entry,
8731 .del_all_fd_entries = hclge_del_all_fd_entries,
8732 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
8733 .get_fd_rule_info = hclge_get_fd_rule_info,
8734 .get_fd_all_rules = hclge_get_all_rules,
8735 .restore_fd_rules = hclge_restore_fd_entries,
8736 .enable_fd = hclge_enable_fd,
8737 .dbg_run_cmd = hclge_dbg_run_cmd,
8738 .handle_hw_ras_error = hclge_handle_hw_ras_error,
8739 .get_hw_reset_stat = hclge_get_hw_reset_stat,
8740 .ae_dev_resetting = hclge_ae_dev_resetting,
8741 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
8742 .set_gro_en = hclge_gro_en,
8743 .get_global_queue_id = hclge_covert_handle_qid_global,
8744 .set_timer_task = hclge_set_timer_task,
8745 .mac_connect_phy = hclge_mac_connect_phy,
8746 .mac_disconnect_phy = hclge_mac_disconnect_phy,
8749 static struct hnae3_ae_algo ae_algo = {
8751 .pdev_id_table = ae_algo_pci_tbl,
8754 static int hclge_init(void)
8756 pr_info("%s is initializing\n", HCLGE_NAME);
8758 hnae3_register_ae_algo(&ae_algo);
8763 static void hclge_exit(void)
8765 hnae3_unregister_ae_algo(&ae_algo);
8767 module_init(hclge_init);
8768 module_exit(hclge_exit);
8770 MODULE_LICENSE("GPL");
8771 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
8772 MODULE_DESCRIPTION("HCLGE Driver");
8773 MODULE_VERSION(HCLGE_MOD_VERSION);