1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
23 #include "hclge_err.h"
26 #define HCLGE_NAME "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 #define HCLGE_BUF_SIZE_UNIT 256
31 #define HCLGE_BUF_MUL_BY 2
32 #define HCLGE_BUF_DIV_BY 2
34 #define HCLGE_RESET_MAX_FAIL_CNT 5
36 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
37 static int hclge_init_vlan_config(struct hclge_dev *hdev);
38 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
39 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
40 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
41 u16 *allocated_size, bool is_alloc);
42 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
43 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
45 static struct hnae3_ae_algo ae_algo;
47 static const struct pci_device_id ae_algo_pci_tbl[] = {
48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
49 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
50 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
51 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
52 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
53 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
54 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
55 /* required last entry */
59 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
61 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
62 HCLGE_CMDQ_TX_ADDR_H_REG,
63 HCLGE_CMDQ_TX_DEPTH_REG,
64 HCLGE_CMDQ_TX_TAIL_REG,
65 HCLGE_CMDQ_TX_HEAD_REG,
66 HCLGE_CMDQ_RX_ADDR_L_REG,
67 HCLGE_CMDQ_RX_ADDR_H_REG,
68 HCLGE_CMDQ_RX_DEPTH_REG,
69 HCLGE_CMDQ_RX_TAIL_REG,
70 HCLGE_CMDQ_RX_HEAD_REG,
71 HCLGE_VECTOR0_CMDQ_SRC_REG,
72 HCLGE_CMDQ_INTR_STS_REG,
73 HCLGE_CMDQ_INTR_EN_REG,
74 HCLGE_CMDQ_INTR_GEN_REG};
76 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
77 HCLGE_VECTOR0_OTER_EN_REG,
78 HCLGE_MISC_RESET_STS_REG,
79 HCLGE_MISC_VECTOR_INT_STS,
80 HCLGE_GLOBAL_RESET_REG,
84 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
85 HCLGE_RING_RX_ADDR_H_REG,
86 HCLGE_RING_RX_BD_NUM_REG,
87 HCLGE_RING_RX_BD_LENGTH_REG,
88 HCLGE_RING_RX_MERGE_EN_REG,
89 HCLGE_RING_RX_TAIL_REG,
90 HCLGE_RING_RX_HEAD_REG,
91 HCLGE_RING_RX_FBD_NUM_REG,
92 HCLGE_RING_RX_OFFSET_REG,
93 HCLGE_RING_RX_FBD_OFFSET_REG,
94 HCLGE_RING_RX_STASH_REG,
95 HCLGE_RING_RX_BD_ERR_REG,
96 HCLGE_RING_TX_ADDR_L_REG,
97 HCLGE_RING_TX_ADDR_H_REG,
98 HCLGE_RING_TX_BD_NUM_REG,
99 HCLGE_RING_TX_PRIORITY_REG,
100 HCLGE_RING_TX_TC_REG,
101 HCLGE_RING_TX_MERGE_EN_REG,
102 HCLGE_RING_TX_TAIL_REG,
103 HCLGE_RING_TX_HEAD_REG,
104 HCLGE_RING_TX_FBD_NUM_REG,
105 HCLGE_RING_TX_OFFSET_REG,
106 HCLGE_RING_TX_EBD_NUM_REG,
107 HCLGE_RING_TX_EBD_OFFSET_REG,
108 HCLGE_RING_TX_BD_ERR_REG,
111 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
112 HCLGE_TQP_INTR_GL0_REG,
113 HCLGE_TQP_INTR_GL1_REG,
114 HCLGE_TQP_INTR_GL2_REG,
115 HCLGE_TQP_INTR_RL_REG};
117 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
119 "Serdes serial Loopback test",
120 "Serdes parallel Loopback test",
124 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
125 {"mac_tx_mac_pause_num",
126 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
127 {"mac_rx_mac_pause_num",
128 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
129 {"mac_tx_control_pkt_num",
130 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
131 {"mac_rx_control_pkt_num",
132 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
133 {"mac_tx_pfc_pkt_num",
134 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
135 {"mac_tx_pfc_pri0_pkt_num",
136 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
137 {"mac_tx_pfc_pri1_pkt_num",
138 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
139 {"mac_tx_pfc_pri2_pkt_num",
140 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
141 {"mac_tx_pfc_pri3_pkt_num",
142 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
143 {"mac_tx_pfc_pri4_pkt_num",
144 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
145 {"mac_tx_pfc_pri5_pkt_num",
146 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
147 {"mac_tx_pfc_pri6_pkt_num",
148 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
149 {"mac_tx_pfc_pri7_pkt_num",
150 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
151 {"mac_rx_pfc_pkt_num",
152 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
153 {"mac_rx_pfc_pri0_pkt_num",
154 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
155 {"mac_rx_pfc_pri1_pkt_num",
156 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
157 {"mac_rx_pfc_pri2_pkt_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
159 {"mac_rx_pfc_pri3_pkt_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
161 {"mac_rx_pfc_pri4_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
163 {"mac_rx_pfc_pri5_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
165 {"mac_rx_pfc_pri6_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
167 {"mac_rx_pfc_pri7_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
169 {"mac_tx_total_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
171 {"mac_tx_total_oct_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
173 {"mac_tx_good_pkt_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
175 {"mac_tx_bad_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
177 {"mac_tx_good_oct_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
179 {"mac_tx_bad_oct_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
181 {"mac_tx_uni_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
183 {"mac_tx_multi_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
185 {"mac_tx_broad_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
187 {"mac_tx_undersize_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
189 {"mac_tx_oversize_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
191 {"mac_tx_64_oct_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
193 {"mac_tx_65_127_oct_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
195 {"mac_tx_128_255_oct_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
197 {"mac_tx_256_511_oct_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
199 {"mac_tx_512_1023_oct_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
201 {"mac_tx_1024_1518_oct_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
203 {"mac_tx_1519_2047_oct_pkt_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
205 {"mac_tx_2048_4095_oct_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
207 {"mac_tx_4096_8191_oct_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
209 {"mac_tx_8192_9216_oct_pkt_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
211 {"mac_tx_9217_12287_oct_pkt_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
213 {"mac_tx_12288_16383_oct_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
215 {"mac_tx_1519_max_good_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
217 {"mac_tx_1519_max_bad_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
219 {"mac_rx_total_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
221 {"mac_rx_total_oct_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
223 {"mac_rx_good_pkt_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
225 {"mac_rx_bad_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
227 {"mac_rx_good_oct_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
229 {"mac_rx_bad_oct_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
231 {"mac_rx_uni_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
233 {"mac_rx_multi_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
235 {"mac_rx_broad_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
237 {"mac_rx_undersize_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
239 {"mac_rx_oversize_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
241 {"mac_rx_64_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
243 {"mac_rx_65_127_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
245 {"mac_rx_128_255_oct_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
247 {"mac_rx_256_511_oct_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
249 {"mac_rx_512_1023_oct_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
251 {"mac_rx_1024_1518_oct_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
253 {"mac_rx_1519_2047_oct_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
255 {"mac_rx_2048_4095_oct_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
257 {"mac_rx_4096_8191_oct_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
259 {"mac_rx_8192_9216_oct_pkt_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
261 {"mac_rx_9217_12287_oct_pkt_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
263 {"mac_rx_12288_16383_oct_pkt_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
265 {"mac_rx_1519_max_good_pkt_num",
266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
267 {"mac_rx_1519_max_bad_pkt_num",
268 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
270 {"mac_tx_fragment_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
272 {"mac_tx_undermin_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
274 {"mac_tx_jabber_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
276 {"mac_tx_err_all_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
278 {"mac_tx_from_app_good_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
280 {"mac_tx_from_app_bad_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
282 {"mac_rx_fragment_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
284 {"mac_rx_undermin_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
286 {"mac_rx_jabber_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
288 {"mac_rx_fcs_err_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
290 {"mac_rx_send_app_good_pkt_num",
291 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
292 {"mac_rx_send_app_bad_pkt_num",
293 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
296 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
298 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
299 .ethter_type = cpu_to_le16(ETH_P_LLDP),
300 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
301 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
302 .i_port_bitmap = 0x1,
306 static const u8 hclge_hash_key[] = {
307 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
308 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
309 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
310 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
311 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
314 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
316 #define HCLGE_MAC_CMD_NUM 21
318 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
319 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
324 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
325 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
327 dev_err(&hdev->pdev->dev,
328 "Get MAC pkt stats fail, status = %d.\n", ret);
333 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
334 /* for special opcode 0032, only the first desc has the head */
335 if (unlikely(i == 0)) {
336 desc_data = (__le64 *)(&desc[i].data[0]);
337 n = HCLGE_RD_FIRST_STATS_NUM;
339 desc_data = (__le64 *)(&desc[i]);
340 n = HCLGE_RD_OTHER_STATS_NUM;
343 for (k = 0; k < n; k++) {
344 *data += le64_to_cpu(*desc_data);
353 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
355 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
356 struct hclge_desc *desc;
361 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
364 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
365 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
371 for (i = 0; i < desc_num; i++) {
372 /* for special opcode 0034, only the first desc has the head */
374 desc_data = (__le64 *)(&desc[i].data[0]);
375 n = HCLGE_RD_FIRST_STATS_NUM;
377 desc_data = (__le64 *)(&desc[i]);
378 n = HCLGE_RD_OTHER_STATS_NUM;
381 for (k = 0; k < n; k++) {
382 *data += le64_to_cpu(*desc_data);
393 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
395 struct hclge_desc desc;
400 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
401 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
405 desc_data = (__le32 *)(&desc.data[0]);
406 reg_num = le32_to_cpu(*desc_data);
408 *desc_num = 1 + ((reg_num - 3) >> 2) +
409 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
414 static int hclge_mac_update_stats(struct hclge_dev *hdev)
419 ret = hclge_mac_query_reg_num(hdev, &desc_num);
421 /* The firmware supports the new statistics acquisition method */
423 ret = hclge_mac_update_stats_complete(hdev, desc_num);
424 else if (ret == -EOPNOTSUPP)
425 ret = hclge_mac_update_stats_defective(hdev);
427 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
432 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
434 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
435 struct hclge_vport *vport = hclge_get_vport(handle);
436 struct hclge_dev *hdev = vport->back;
437 struct hnae3_queue *queue;
438 struct hclge_desc desc[1];
439 struct hclge_tqp *tqp;
442 for (i = 0; i < kinfo->num_tqps; i++) {
443 queue = handle->kinfo.tqp[i];
444 tqp = container_of(queue, struct hclge_tqp, q);
445 /* command : HCLGE_OPC_QUERY_IGU_STAT */
446 hclge_cmd_setup_basic_desc(&desc[0],
447 HCLGE_OPC_QUERY_RX_STATUS,
450 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
451 ret = hclge_cmd_send(&hdev->hw, desc, 1);
453 dev_err(&hdev->pdev->dev,
454 "Query tqp stat fail, status = %d,queue = %d\n",
458 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
459 le32_to_cpu(desc[0].data[1]);
462 for (i = 0; i < kinfo->num_tqps; i++) {
463 queue = handle->kinfo.tqp[i];
464 tqp = container_of(queue, struct hclge_tqp, q);
465 /* command : HCLGE_OPC_QUERY_IGU_STAT */
466 hclge_cmd_setup_basic_desc(&desc[0],
467 HCLGE_OPC_QUERY_TX_STATUS,
470 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
471 ret = hclge_cmd_send(&hdev->hw, desc, 1);
473 dev_err(&hdev->pdev->dev,
474 "Query tqp stat fail, status = %d,queue = %d\n",
478 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
479 le32_to_cpu(desc[0].data[1]);
485 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
487 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
488 struct hclge_tqp *tqp;
492 for (i = 0; i < kinfo->num_tqps; i++) {
493 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
494 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
497 for (i = 0; i < kinfo->num_tqps; i++) {
498 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
499 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
505 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
507 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
509 return kinfo->num_tqps * (2);
512 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
514 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
518 for (i = 0; i < kinfo->num_tqps; i++) {
519 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
520 struct hclge_tqp, q);
521 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
523 buff = buff + ETH_GSTRING_LEN;
526 for (i = 0; i < kinfo->num_tqps; i++) {
527 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
528 struct hclge_tqp, q);
529 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
531 buff = buff + ETH_GSTRING_LEN;
537 static u64 *hclge_comm_get_stats(void *comm_stats,
538 const struct hclge_comm_stats_str strs[],
544 for (i = 0; i < size; i++)
545 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
550 static u8 *hclge_comm_get_strings(u32 stringset,
551 const struct hclge_comm_stats_str strs[],
554 char *buff = (char *)data;
557 if (stringset != ETH_SS_STATS)
560 for (i = 0; i < size; i++) {
561 snprintf(buff, ETH_GSTRING_LEN,
563 buff = buff + ETH_GSTRING_LEN;
569 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
571 struct hnae3_handle *handle;
574 handle = &hdev->vport[0].nic;
575 if (handle->client) {
576 status = hclge_tqps_update_stats(handle);
578 dev_err(&hdev->pdev->dev,
579 "Update TQPS stats fail, status = %d.\n",
584 status = hclge_mac_update_stats(hdev);
586 dev_err(&hdev->pdev->dev,
587 "Update MAC stats fail, status = %d.\n", status);
590 static void hclge_update_stats(struct hnae3_handle *handle,
591 struct net_device_stats *net_stats)
593 struct hclge_vport *vport = hclge_get_vport(handle);
594 struct hclge_dev *hdev = vport->back;
597 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
600 status = hclge_mac_update_stats(hdev);
602 dev_err(&hdev->pdev->dev,
603 "Update MAC stats fail, status = %d.\n",
606 status = hclge_tqps_update_stats(handle);
608 dev_err(&hdev->pdev->dev,
609 "Update TQPS stats fail, status = %d.\n",
612 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
615 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
617 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
618 HNAE3_SUPPORT_PHY_LOOPBACK |\
619 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
620 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
622 struct hclge_vport *vport = hclge_get_vport(handle);
623 struct hclge_dev *hdev = vport->back;
626 /* Loopback test support rules:
627 * mac: only GE mode support
628 * serdes: all mac mode will support include GE/XGE/LGE/CGE
629 * phy: only support when phy device exist on board
631 if (stringset == ETH_SS_TEST) {
632 /* clear loopback bit flags at first */
633 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
634 if (hdev->pdev->revision >= 0x21 ||
635 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
636 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
637 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
639 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
643 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
644 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
645 } else if (stringset == ETH_SS_STATS) {
646 count = ARRAY_SIZE(g_mac_stats_string) +
647 hclge_tqps_get_sset_count(handle, stringset);
653 static void hclge_get_strings(struct hnae3_handle *handle,
657 u8 *p = (char *)data;
660 if (stringset == ETH_SS_STATS) {
661 size = ARRAY_SIZE(g_mac_stats_string);
662 p = hclge_comm_get_strings(stringset,
666 p = hclge_tqps_get_strings(handle, p);
667 } else if (stringset == ETH_SS_TEST) {
668 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
670 hns3_nic_test_strs[HNAE3_LOOP_APP],
672 p += ETH_GSTRING_LEN;
674 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
676 hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
678 p += ETH_GSTRING_LEN;
680 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
682 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
684 p += ETH_GSTRING_LEN;
686 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
688 hns3_nic_test_strs[HNAE3_LOOP_PHY],
690 p += ETH_GSTRING_LEN;
695 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
697 struct hclge_vport *vport = hclge_get_vport(handle);
698 struct hclge_dev *hdev = vport->back;
701 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
703 ARRAY_SIZE(g_mac_stats_string),
705 p = hclge_tqps_get_stats(handle, p);
708 static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
711 struct hclge_vport *vport = hclge_get_vport(handle);
712 struct hclge_dev *hdev = vport->back;
714 *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
715 *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
718 static int hclge_parse_func_status(struct hclge_dev *hdev,
719 struct hclge_func_status_cmd *status)
721 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
724 /* Set the pf to main pf */
725 if (status->pf_state & HCLGE_PF_STATE_MAIN)
726 hdev->flag |= HCLGE_FLAG_MAIN;
728 hdev->flag &= ~HCLGE_FLAG_MAIN;
733 static int hclge_query_function_status(struct hclge_dev *hdev)
735 #define HCLGE_QUERY_MAX_CNT 5
737 struct hclge_func_status_cmd *req;
738 struct hclge_desc desc;
742 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
743 req = (struct hclge_func_status_cmd *)desc.data;
746 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
748 dev_err(&hdev->pdev->dev,
749 "query function status failed %d.\n",
755 /* Check pf reset is done */
758 usleep_range(1000, 2000);
759 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
761 ret = hclge_parse_func_status(hdev, req);
766 static int hclge_query_pf_resource(struct hclge_dev *hdev)
768 struct hclge_pf_res_cmd *req;
769 struct hclge_desc desc;
772 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
773 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
775 dev_err(&hdev->pdev->dev,
776 "query pf resource failed %d.\n", ret);
780 req = (struct hclge_pf_res_cmd *)desc.data;
781 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
782 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
784 if (req->tx_buf_size)
786 __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
788 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
790 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
792 if (req->dv_buf_size)
794 __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
796 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
798 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
800 if (hnae3_dev_roce_supported(hdev)) {
801 hdev->roce_base_msix_offset =
802 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
803 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
805 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
806 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
808 /* PF should have NIC vectors and Roce vectors,
809 * NIC vectors are queued before Roce vectors.
811 hdev->num_msi = hdev->num_roce_msi +
812 hdev->roce_base_msix_offset;
815 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
816 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
822 static int hclge_parse_speed(int speed_cmd, int *speed)
826 *speed = HCLGE_MAC_SPEED_10M;
829 *speed = HCLGE_MAC_SPEED_100M;
832 *speed = HCLGE_MAC_SPEED_1G;
835 *speed = HCLGE_MAC_SPEED_10G;
838 *speed = HCLGE_MAC_SPEED_25G;
841 *speed = HCLGE_MAC_SPEED_40G;
844 *speed = HCLGE_MAC_SPEED_50G;
847 *speed = HCLGE_MAC_SPEED_100G;
856 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
858 struct hclge_vport *vport = hclge_get_vport(handle);
859 struct hclge_dev *hdev = vport->back;
860 u32 speed_ability = hdev->hw.mac.speed_ability;
864 case HCLGE_MAC_SPEED_10M:
865 speed_bit = HCLGE_SUPPORT_10M_BIT;
867 case HCLGE_MAC_SPEED_100M:
868 speed_bit = HCLGE_SUPPORT_100M_BIT;
870 case HCLGE_MAC_SPEED_1G:
871 speed_bit = HCLGE_SUPPORT_1G_BIT;
873 case HCLGE_MAC_SPEED_10G:
874 speed_bit = HCLGE_SUPPORT_10G_BIT;
876 case HCLGE_MAC_SPEED_25G:
877 speed_bit = HCLGE_SUPPORT_25G_BIT;
879 case HCLGE_MAC_SPEED_40G:
880 speed_bit = HCLGE_SUPPORT_40G_BIT;
882 case HCLGE_MAC_SPEED_50G:
883 speed_bit = HCLGE_SUPPORT_50G_BIT;
885 case HCLGE_MAC_SPEED_100G:
886 speed_bit = HCLGE_SUPPORT_100G_BIT;
892 if (speed_bit & speed_ability)
898 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
900 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
901 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
903 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
904 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
906 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
907 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
909 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
910 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
912 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
913 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
917 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
919 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
920 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
922 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
923 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
925 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
926 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
928 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
929 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
931 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
932 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
936 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
938 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
939 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
941 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
942 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
944 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
945 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
947 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
948 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
950 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
951 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
955 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
957 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
958 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
960 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
961 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
963 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
964 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
966 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
967 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
969 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
970 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
972 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
973 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
977 static void hclge_convert_setting_fec(struct hclge_mac *mac)
979 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
980 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
982 switch (mac->speed) {
983 case HCLGE_MAC_SPEED_10G:
984 case HCLGE_MAC_SPEED_40G:
985 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
988 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
990 case HCLGE_MAC_SPEED_25G:
991 case HCLGE_MAC_SPEED_50G:
992 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
995 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
998 case HCLGE_MAC_SPEED_100G:
999 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1000 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1003 mac->fec_ability = 0;
1008 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1011 struct hclge_mac *mac = &hdev->hw.mac;
1013 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1014 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1017 hclge_convert_setting_sr(mac, speed_ability);
1018 hclge_convert_setting_lr(mac, speed_ability);
1019 hclge_convert_setting_cr(mac, speed_ability);
1020 if (hdev->pdev->revision >= 0x21)
1021 hclge_convert_setting_fec(mac);
1023 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1024 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1025 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1028 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1031 struct hclge_mac *mac = &hdev->hw.mac;
1033 hclge_convert_setting_kr(mac, speed_ability);
1034 if (hdev->pdev->revision >= 0x21)
1035 hclge_convert_setting_fec(mac);
1036 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1037 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1038 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1041 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1044 unsigned long *supported = hdev->hw.mac.supported;
1046 /* default to support all speed for GE port */
1048 speed_ability = HCLGE_SUPPORT_GE;
1050 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1051 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1054 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1055 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1057 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1061 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1062 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1063 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1066 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1067 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1068 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1071 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1073 u8 media_type = hdev->hw.mac.media_type;
1075 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1076 hclge_parse_fiber_link_mode(hdev, speed_ability);
1077 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1078 hclge_parse_copper_link_mode(hdev, speed_ability);
1079 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1080 hclge_parse_backplane_link_mode(hdev, speed_ability);
1082 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1084 struct hclge_cfg_param_cmd *req;
1085 u64 mac_addr_tmp_high;
1089 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1091 /* get the configuration */
1092 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1095 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1096 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1097 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1098 HCLGE_CFG_TQP_DESC_N_M,
1099 HCLGE_CFG_TQP_DESC_N_S);
1101 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1102 HCLGE_CFG_PHY_ADDR_M,
1103 HCLGE_CFG_PHY_ADDR_S);
1104 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1105 HCLGE_CFG_MEDIA_TP_M,
1106 HCLGE_CFG_MEDIA_TP_S);
1107 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1108 HCLGE_CFG_RX_BUF_LEN_M,
1109 HCLGE_CFG_RX_BUF_LEN_S);
1110 /* get mac_address */
1111 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1112 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1113 HCLGE_CFG_MAC_ADDR_H_M,
1114 HCLGE_CFG_MAC_ADDR_H_S);
1116 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1118 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1119 HCLGE_CFG_DEFAULT_SPEED_M,
1120 HCLGE_CFG_DEFAULT_SPEED_S);
1121 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1122 HCLGE_CFG_RSS_SIZE_M,
1123 HCLGE_CFG_RSS_SIZE_S);
1125 for (i = 0; i < ETH_ALEN; i++)
1126 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1128 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1129 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1131 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1132 HCLGE_CFG_SPEED_ABILITY_M,
1133 HCLGE_CFG_SPEED_ABILITY_S);
1134 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1135 HCLGE_CFG_UMV_TBL_SPACE_M,
1136 HCLGE_CFG_UMV_TBL_SPACE_S);
1137 if (!cfg->umv_space)
1138 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1141 /* hclge_get_cfg: query the static parameter from flash
1142 * @hdev: pointer to struct hclge_dev
1143 * @hcfg: the config structure to be getted
1145 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1147 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1148 struct hclge_cfg_param_cmd *req;
1151 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1154 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1155 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1157 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1158 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1159 /* Len should be united by 4 bytes when send to hardware */
1160 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1161 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1162 req->offset = cpu_to_le32(offset);
1165 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1167 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1171 hclge_parse_cfg(hcfg, desc);
1176 static int hclge_get_cap(struct hclge_dev *hdev)
1180 ret = hclge_query_function_status(hdev);
1182 dev_err(&hdev->pdev->dev,
1183 "query function status error %d.\n", ret);
1187 /* get pf resource */
1188 ret = hclge_query_pf_resource(hdev);
1190 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1195 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1197 #define HCLGE_MIN_TX_DESC 64
1198 #define HCLGE_MIN_RX_DESC 64
1200 if (!is_kdump_kernel())
1203 dev_info(&hdev->pdev->dev,
1204 "Running kdump kernel. Using minimal resources\n");
1206 /* minimal queue pairs equals to the number of vports */
1207 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1208 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1209 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1212 static int hclge_configure(struct hclge_dev *hdev)
1214 struct hclge_cfg cfg;
1217 ret = hclge_get_cfg(hdev, &cfg);
1219 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1223 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1224 hdev->base_tqp_pid = 0;
1225 hdev->rss_size_max = cfg.rss_size_max;
1226 hdev->rx_buf_len = cfg.rx_buf_len;
1227 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1228 hdev->hw.mac.media_type = cfg.media_type;
1229 hdev->hw.mac.phy_addr = cfg.phy_addr;
1230 hdev->num_tx_desc = cfg.tqp_desc_num;
1231 hdev->num_rx_desc = cfg.tqp_desc_num;
1232 hdev->tm_info.num_pg = 1;
1233 hdev->tc_max = cfg.tc_num;
1234 hdev->tm_info.hw_pfc_map = 0;
1235 hdev->wanted_umv_size = cfg.umv_space;
1237 if (hnae3_dev_fd_supported(hdev)) {
1239 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1242 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1244 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1248 hclge_parse_link_mode(hdev, cfg.speed_ability);
1250 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1251 (hdev->tc_max < 1)) {
1252 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1257 /* Dev does not support DCB */
1258 if (!hnae3_dev_dcb_supported(hdev)) {
1262 hdev->pfc_max = hdev->tc_max;
1265 hdev->tm_info.num_tc = 1;
1267 /* Currently not support uncontiuous tc */
1268 for (i = 0; i < hdev->tm_info.num_tc; i++)
1269 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1271 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1273 hclge_init_kdump_kernel_config(hdev);
1278 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1281 struct hclge_cfg_tso_status_cmd *req;
1282 struct hclge_desc desc;
1285 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1287 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1290 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1291 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1292 req->tso_mss_min = cpu_to_le16(tso_mss);
1295 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1296 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1297 req->tso_mss_max = cpu_to_le16(tso_mss);
1299 return hclge_cmd_send(&hdev->hw, &desc, 1);
1302 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1304 struct hclge_cfg_gro_status_cmd *req;
1305 struct hclge_desc desc;
1308 if (!hnae3_dev_gro_supported(hdev))
1311 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1312 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1314 req->gro_en = cpu_to_le16(en ? 1 : 0);
1316 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1318 dev_err(&hdev->pdev->dev,
1319 "GRO hardware config cmd failed, ret = %d\n", ret);
1324 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1326 struct hclge_tqp *tqp;
1329 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1330 sizeof(struct hclge_tqp), GFP_KERNEL);
1336 for (i = 0; i < hdev->num_tqps; i++) {
1337 tqp->dev = &hdev->pdev->dev;
1340 tqp->q.ae_algo = &ae_algo;
1341 tqp->q.buf_size = hdev->rx_buf_len;
1342 tqp->q.tx_desc_num = hdev->num_tx_desc;
1343 tqp->q.rx_desc_num = hdev->num_rx_desc;
1344 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1345 i * HCLGE_TQP_REG_SIZE;
1353 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1354 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1356 struct hclge_tqp_map_cmd *req;
1357 struct hclge_desc desc;
1360 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1362 req = (struct hclge_tqp_map_cmd *)desc.data;
1363 req->tqp_id = cpu_to_le16(tqp_pid);
1364 req->tqp_vf = func_id;
1365 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1366 1 << HCLGE_TQP_MAP_EN_B;
1367 req->tqp_vid = cpu_to_le16(tqp_vid);
1369 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1371 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1376 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1378 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1379 struct hclge_dev *hdev = vport->back;
1382 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1383 alloced < num_tqps; i++) {
1384 if (!hdev->htqp[i].alloced) {
1385 hdev->htqp[i].q.handle = &vport->nic;
1386 hdev->htqp[i].q.tqp_index = alloced;
1387 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1388 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1389 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1390 hdev->htqp[i].alloced = true;
1394 vport->alloc_tqps = alloced;
1395 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1396 vport->alloc_tqps / hdev->tm_info.num_tc);
1401 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1402 u16 num_tx_desc, u16 num_rx_desc)
1405 struct hnae3_handle *nic = &vport->nic;
1406 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1407 struct hclge_dev *hdev = vport->back;
1410 kinfo->num_tx_desc = num_tx_desc;
1411 kinfo->num_rx_desc = num_rx_desc;
1413 kinfo->rx_buf_len = hdev->rx_buf_len;
1415 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1416 sizeof(struct hnae3_queue *), GFP_KERNEL);
1420 ret = hclge_assign_tqp(vport, num_tqps);
1422 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1427 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1428 struct hclge_vport *vport)
1430 struct hnae3_handle *nic = &vport->nic;
1431 struct hnae3_knic_private_info *kinfo;
1434 kinfo = &nic->kinfo;
1435 for (i = 0; i < vport->alloc_tqps; i++) {
1436 struct hclge_tqp *q =
1437 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1441 is_pf = !(vport->vport_id);
1442 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1451 static int hclge_map_tqp(struct hclge_dev *hdev)
1453 struct hclge_vport *vport = hdev->vport;
1456 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1457 for (i = 0; i < num_vport; i++) {
1460 ret = hclge_map_tqp_to_vport(hdev, vport);
1470 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1472 struct hnae3_handle *nic = &vport->nic;
1473 struct hclge_dev *hdev = vport->back;
1476 nic->pdev = hdev->pdev;
1477 nic->ae_algo = &ae_algo;
1478 nic->numa_node_mask = hdev->numa_node_mask;
1480 ret = hclge_knic_setup(vport, num_tqps,
1481 hdev->num_tx_desc, hdev->num_rx_desc);
1483 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1488 static int hclge_alloc_vport(struct hclge_dev *hdev)
1490 struct pci_dev *pdev = hdev->pdev;
1491 struct hclge_vport *vport;
1497 /* We need to alloc a vport for main NIC of PF */
1498 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1500 if (hdev->num_tqps < num_vport) {
1501 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1502 hdev->num_tqps, num_vport);
1506 /* Alloc the same number of TQPs for every vport */
1507 tqp_per_vport = hdev->num_tqps / num_vport;
1508 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1510 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1515 hdev->vport = vport;
1516 hdev->num_alloc_vport = num_vport;
1518 if (IS_ENABLED(CONFIG_PCI_IOV))
1519 hdev->num_alloc_vfs = hdev->num_req_vfs;
1521 for (i = 0; i < num_vport; i++) {
1523 vport->vport_id = i;
1524 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1525 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1526 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1527 INIT_LIST_HEAD(&vport->vlan_list);
1528 INIT_LIST_HEAD(&vport->uc_mac_list);
1529 INIT_LIST_HEAD(&vport->mc_mac_list);
1532 ret = hclge_vport_setup(vport, tqp_main_vport);
1534 ret = hclge_vport_setup(vport, tqp_per_vport);
1537 "vport setup failed for vport %d, %d\n",
1548 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1549 struct hclge_pkt_buf_alloc *buf_alloc)
1551 /* TX buffer size is unit by 128 byte */
1552 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1553 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1554 struct hclge_tx_buff_alloc_cmd *req;
1555 struct hclge_desc desc;
1559 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1561 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1562 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1563 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1565 req->tx_pkt_buff[i] =
1566 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1567 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1570 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1572 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1578 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1579 struct hclge_pkt_buf_alloc *buf_alloc)
1581 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1584 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1589 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1593 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1594 if (hdev->hw_tc_map & BIT(i))
1599 /* Get the number of pfc enabled TCs, which have private buffer */
1600 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1601 struct hclge_pkt_buf_alloc *buf_alloc)
1603 struct hclge_priv_buf *priv;
1606 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1607 priv = &buf_alloc->priv_buf[i];
1608 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1616 /* Get the number of pfc disabled TCs, which have private buffer */
1617 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1618 struct hclge_pkt_buf_alloc *buf_alloc)
1620 struct hclge_priv_buf *priv;
1623 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1624 priv = &buf_alloc->priv_buf[i];
1625 if (hdev->hw_tc_map & BIT(i) &&
1626 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1634 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1636 struct hclge_priv_buf *priv;
1640 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1641 priv = &buf_alloc->priv_buf[i];
1643 rx_priv += priv->buf_size;
1648 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1650 u32 i, total_tx_size = 0;
1652 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1653 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1655 return total_tx_size;
1658 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1659 struct hclge_pkt_buf_alloc *buf_alloc,
1662 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1663 u32 tc_num = hclge_get_tc_num(hdev);
1664 u32 shared_buf, aligned_mps;
1668 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1670 if (hnae3_dev_dcb_supported(hdev))
1671 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1674 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1675 + hdev->dv_buf_size;
1677 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1678 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1679 HCLGE_BUF_SIZE_UNIT);
1681 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1682 if (rx_all < rx_priv + shared_std)
1685 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1686 buf_alloc->s_buf.buf_size = shared_buf;
1687 if (hnae3_dev_dcb_supported(hdev)) {
1688 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1689 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1690 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1691 HCLGE_BUF_SIZE_UNIT);
1693 buf_alloc->s_buf.self.high = aligned_mps +
1694 HCLGE_NON_DCB_ADDITIONAL_BUF;
1695 buf_alloc->s_buf.self.low = aligned_mps;
1698 if (hnae3_dev_dcb_supported(hdev)) {
1700 hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num;
1702 hi_thrd = shared_buf - hdev->dv_buf_size;
1704 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1705 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1706 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1708 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1709 lo_thrd = aligned_mps;
1712 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1713 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1714 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1720 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1721 struct hclge_pkt_buf_alloc *buf_alloc)
1725 total_size = hdev->pkt_buf_size;
1727 /* alloc tx buffer for all enabled tc */
1728 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1729 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1731 if (hdev->hw_tc_map & BIT(i)) {
1732 if (total_size < hdev->tx_buf_size)
1735 priv->tx_buf_size = hdev->tx_buf_size;
1737 priv->tx_buf_size = 0;
1740 total_size -= priv->tx_buf_size;
1746 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1747 struct hclge_pkt_buf_alloc *buf_alloc)
1749 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1750 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1753 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1754 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1761 if (!(hdev->hw_tc_map & BIT(i)))
1766 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1767 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1768 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1769 HCLGE_BUF_SIZE_UNIT);
1772 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1776 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1779 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1782 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1783 struct hclge_pkt_buf_alloc *buf_alloc)
1785 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1786 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1789 /* let the last to be cleared first */
1790 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1791 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1793 if (hdev->hw_tc_map & BIT(i) &&
1794 !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1795 /* Clear the no pfc TC private buffer */
1803 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1804 no_pfc_priv_num == 0)
1808 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1811 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1812 struct hclge_pkt_buf_alloc *buf_alloc)
1814 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1815 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1818 /* let the last to be cleared first */
1819 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1820 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1822 if (hdev->hw_tc_map & BIT(i) &&
1823 hdev->tm_info.hw_pfc_map & BIT(i)) {
1824 /* Reduce the number of pfc TC with private buffer */
1832 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1837 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1840 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1841 * @hdev: pointer to struct hclge_dev
1842 * @buf_alloc: pointer to buffer calculation data
1843 * @return: 0: calculate sucessful, negative: fail
1845 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1846 struct hclge_pkt_buf_alloc *buf_alloc)
1848 /* When DCB is not supported, rx private buffer is not allocated. */
1849 if (!hnae3_dev_dcb_supported(hdev)) {
1850 u32 rx_all = hdev->pkt_buf_size;
1852 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1853 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1859 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1862 /* try to decrease the buffer size */
1863 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1866 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1869 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1875 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1876 struct hclge_pkt_buf_alloc *buf_alloc)
1878 struct hclge_rx_priv_buff_cmd *req;
1879 struct hclge_desc desc;
1883 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1884 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1886 /* Alloc private buffer TCs */
1887 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1888 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1891 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1893 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1897 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1898 (1 << HCLGE_TC0_PRI_BUF_EN_B));
1900 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1902 dev_err(&hdev->pdev->dev,
1903 "rx private buffer alloc cmd failed %d\n", ret);
1908 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1909 struct hclge_pkt_buf_alloc *buf_alloc)
1911 struct hclge_rx_priv_wl_buf *req;
1912 struct hclge_priv_buf *priv;
1913 struct hclge_desc desc[2];
1917 for (i = 0; i < 2; i++) {
1918 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1920 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1922 /* The first descriptor set the NEXT bit to 1 */
1924 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1926 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1928 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1929 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1931 priv = &buf_alloc->priv_buf[idx];
1932 req->tc_wl[j].high =
1933 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1934 req->tc_wl[j].high |=
1935 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1937 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1938 req->tc_wl[j].low |=
1939 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1943 /* Send 2 descriptor at one time */
1944 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1946 dev_err(&hdev->pdev->dev,
1947 "rx private waterline config cmd failed %d\n",
1952 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1953 struct hclge_pkt_buf_alloc *buf_alloc)
1955 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1956 struct hclge_rx_com_thrd *req;
1957 struct hclge_desc desc[2];
1958 struct hclge_tc_thrd *tc;
1962 for (i = 0; i < 2; i++) {
1963 hclge_cmd_setup_basic_desc(&desc[i],
1964 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1965 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1967 /* The first descriptor set the NEXT bit to 1 */
1969 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1971 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1973 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1974 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1976 req->com_thrd[j].high =
1977 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1978 req->com_thrd[j].high |=
1979 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1980 req->com_thrd[j].low =
1981 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1982 req->com_thrd[j].low |=
1983 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1987 /* Send 2 descriptors at one time */
1988 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1990 dev_err(&hdev->pdev->dev,
1991 "common threshold config cmd failed %d\n", ret);
1995 static int hclge_common_wl_config(struct hclge_dev *hdev,
1996 struct hclge_pkt_buf_alloc *buf_alloc)
1998 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1999 struct hclge_rx_com_wl *req;
2000 struct hclge_desc desc;
2003 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2005 req = (struct hclge_rx_com_wl *)desc.data;
2006 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2007 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2009 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2010 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2012 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2014 dev_err(&hdev->pdev->dev,
2015 "common waterline config cmd failed %d\n", ret);
2020 int hclge_buffer_alloc(struct hclge_dev *hdev)
2022 struct hclge_pkt_buf_alloc *pkt_buf;
2025 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2029 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2031 dev_err(&hdev->pdev->dev,
2032 "could not calc tx buffer size for all TCs %d\n", ret);
2036 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2038 dev_err(&hdev->pdev->dev,
2039 "could not alloc tx buffers %d\n", ret);
2043 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2045 dev_err(&hdev->pdev->dev,
2046 "could not calc rx priv buffer size for all TCs %d\n",
2051 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2053 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2058 if (hnae3_dev_dcb_supported(hdev)) {
2059 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2061 dev_err(&hdev->pdev->dev,
2062 "could not configure rx private waterline %d\n",
2067 ret = hclge_common_thrd_config(hdev, pkt_buf);
2069 dev_err(&hdev->pdev->dev,
2070 "could not configure common threshold %d\n",
2076 ret = hclge_common_wl_config(hdev, pkt_buf);
2078 dev_err(&hdev->pdev->dev,
2079 "could not configure common waterline %d\n", ret);
2086 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2088 struct hnae3_handle *roce = &vport->roce;
2089 struct hnae3_handle *nic = &vport->nic;
2091 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2093 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2094 vport->back->num_msi_left == 0)
2097 roce->rinfo.base_vector = vport->back->roce_base_vector;
2099 roce->rinfo.netdev = nic->kinfo.netdev;
2100 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2102 roce->pdev = nic->pdev;
2103 roce->ae_algo = nic->ae_algo;
2104 roce->numa_node_mask = nic->numa_node_mask;
2109 static int hclge_init_msi(struct hclge_dev *hdev)
2111 struct pci_dev *pdev = hdev->pdev;
2115 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2116 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2119 "failed(%d) to allocate MSI/MSI-X vectors\n",
2123 if (vectors < hdev->num_msi)
2124 dev_warn(&hdev->pdev->dev,
2125 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2126 hdev->num_msi, vectors);
2128 hdev->num_msi = vectors;
2129 hdev->num_msi_left = vectors;
2130 hdev->base_msi_vector = pdev->irq;
2131 hdev->roce_base_vector = hdev->base_msi_vector +
2132 hdev->roce_base_msix_offset;
2134 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2135 sizeof(u16), GFP_KERNEL);
2136 if (!hdev->vector_status) {
2137 pci_free_irq_vectors(pdev);
2141 for (i = 0; i < hdev->num_msi; i++)
2142 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2144 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2145 sizeof(int), GFP_KERNEL);
2146 if (!hdev->vector_irq) {
2147 pci_free_irq_vectors(pdev);
2154 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2157 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2158 duplex = HCLGE_MAC_FULL;
2163 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2166 struct hclge_config_mac_speed_dup_cmd *req;
2167 struct hclge_desc desc;
2170 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2172 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2175 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2178 case HCLGE_MAC_SPEED_10M:
2179 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2180 HCLGE_CFG_SPEED_S, 6);
2182 case HCLGE_MAC_SPEED_100M:
2183 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2184 HCLGE_CFG_SPEED_S, 7);
2186 case HCLGE_MAC_SPEED_1G:
2187 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2188 HCLGE_CFG_SPEED_S, 0);
2190 case HCLGE_MAC_SPEED_10G:
2191 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2192 HCLGE_CFG_SPEED_S, 1);
2194 case HCLGE_MAC_SPEED_25G:
2195 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2196 HCLGE_CFG_SPEED_S, 2);
2198 case HCLGE_MAC_SPEED_40G:
2199 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2200 HCLGE_CFG_SPEED_S, 3);
2202 case HCLGE_MAC_SPEED_50G:
2203 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2204 HCLGE_CFG_SPEED_S, 4);
2206 case HCLGE_MAC_SPEED_100G:
2207 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2208 HCLGE_CFG_SPEED_S, 5);
2211 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2215 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2218 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2220 dev_err(&hdev->pdev->dev,
2221 "mac speed/duplex config cmd failed %d.\n", ret);
2228 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2232 duplex = hclge_check_speed_dup(duplex, speed);
2233 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2236 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2240 hdev->hw.mac.speed = speed;
2241 hdev->hw.mac.duplex = duplex;
2246 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2249 struct hclge_vport *vport = hclge_get_vport(handle);
2250 struct hclge_dev *hdev = vport->back;
2252 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2255 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2257 struct hclge_config_auto_neg_cmd *req;
2258 struct hclge_desc desc;
2262 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2264 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2265 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2266 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2268 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2270 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2276 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2278 struct hclge_vport *vport = hclge_get_vport(handle);
2279 struct hclge_dev *hdev = vport->back;
2281 if (!hdev->hw.mac.support_autoneg) {
2283 dev_err(&hdev->pdev->dev,
2284 "autoneg is not supported by current port\n");
2291 return hclge_set_autoneg_en(hdev, enable);
2294 static int hclge_get_autoneg(struct hnae3_handle *handle)
2296 struct hclge_vport *vport = hclge_get_vport(handle);
2297 struct hclge_dev *hdev = vport->back;
2298 struct phy_device *phydev = hdev->hw.mac.phydev;
2301 return phydev->autoneg;
2303 return hdev->hw.mac.autoneg;
2306 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2308 struct hclge_vport *vport = hclge_get_vport(handle);
2309 struct hclge_dev *hdev = vport->back;
2312 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2314 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2317 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2320 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2322 struct hclge_config_fec_cmd *req;
2323 struct hclge_desc desc;
2326 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2328 req = (struct hclge_config_fec_cmd *)desc.data;
2329 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2330 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2331 if (fec_mode & BIT(HNAE3_FEC_RS))
2332 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2333 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2334 if (fec_mode & BIT(HNAE3_FEC_BASER))
2335 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2336 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2338 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2340 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2345 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2347 struct hclge_vport *vport = hclge_get_vport(handle);
2348 struct hclge_dev *hdev = vport->back;
2349 struct hclge_mac *mac = &hdev->hw.mac;
2352 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2353 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2357 ret = hclge_set_fec_hw(hdev, fec_mode);
2361 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2365 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2368 struct hclge_vport *vport = hclge_get_vport(handle);
2369 struct hclge_dev *hdev = vport->back;
2370 struct hclge_mac *mac = &hdev->hw.mac;
2373 *fec_ability = mac->fec_ability;
2375 *fec_mode = mac->fec_mode;
2378 static int hclge_mac_init(struct hclge_dev *hdev)
2380 struct hclge_mac *mac = &hdev->hw.mac;
2383 hdev->support_sfp_query = true;
2384 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2385 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2386 hdev->hw.mac.duplex);
2388 dev_err(&hdev->pdev->dev,
2389 "Config mac speed dup fail ret=%d\n", ret);
2395 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2396 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2398 dev_err(&hdev->pdev->dev,
2399 "Fec mode init fail, ret = %d\n", ret);
2404 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2406 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2410 ret = hclge_buffer_alloc(hdev);
2412 dev_err(&hdev->pdev->dev,
2413 "allocate buffer fail, ret=%d\n", ret);
2418 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2420 if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2421 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2422 schedule_work(&hdev->mbx_service_task);
2425 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2427 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2428 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2429 schedule_work(&hdev->rst_service_task);
2432 static void hclge_task_schedule(struct hclge_dev *hdev)
2434 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2435 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2436 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2437 (void)schedule_work(&hdev->service_task);
2440 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2442 struct hclge_link_status_cmd *req;
2443 struct hclge_desc desc;
2447 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2448 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2450 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2455 req = (struct hclge_link_status_cmd *)desc.data;
2456 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2458 return !!link_status;
2461 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2466 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2469 mac_state = hclge_get_mac_link_status(hdev);
2471 if (hdev->hw.mac.phydev) {
2472 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2473 link_stat = mac_state &
2474 hdev->hw.mac.phydev->link;
2479 link_stat = mac_state;
2485 static void hclge_update_link_status(struct hclge_dev *hdev)
2487 struct hnae3_client *rclient = hdev->roce_client;
2488 struct hnae3_client *client = hdev->nic_client;
2489 struct hnae3_handle *rhandle;
2490 struct hnae3_handle *handle;
2496 state = hclge_get_mac_phy_link(hdev);
2497 if (state != hdev->hw.mac.link) {
2498 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2499 handle = &hdev->vport[i].nic;
2500 client->ops->link_status_change(handle, state);
2501 hclge_config_mac_tnl_int(hdev, state);
2502 rhandle = &hdev->vport[i].roce;
2503 if (rclient && rclient->ops->link_status_change)
2504 rclient->ops->link_status_change(rhandle,
2507 hdev->hw.mac.link = state;
2511 static void hclge_update_port_capability(struct hclge_mac *mac)
2513 /* update fec ability by speed */
2514 hclge_convert_setting_fec(mac);
2516 /* firmware can not identify back plane type, the media type
2517 * read from configuration can help deal it
2519 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2520 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2521 mac->module_type = HNAE3_MODULE_TYPE_KR;
2522 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2523 mac->module_type = HNAE3_MODULE_TYPE_TP;
2525 if (mac->support_autoneg == true) {
2526 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2527 linkmode_copy(mac->advertising, mac->supported);
2529 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2531 linkmode_zero(mac->advertising);
2535 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2537 struct hclge_sfp_info_cmd *resp;
2538 struct hclge_desc desc;
2541 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2542 resp = (struct hclge_sfp_info_cmd *)desc.data;
2543 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2544 if (ret == -EOPNOTSUPP) {
2545 dev_warn(&hdev->pdev->dev,
2546 "IMP do not support get SFP speed %d\n", ret);
2549 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2553 *speed = le32_to_cpu(resp->speed);
2558 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2560 struct hclge_sfp_info_cmd *resp;
2561 struct hclge_desc desc;
2564 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2565 resp = (struct hclge_sfp_info_cmd *)desc.data;
2567 resp->query_type = QUERY_ACTIVE_SPEED;
2569 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2570 if (ret == -EOPNOTSUPP) {
2571 dev_warn(&hdev->pdev->dev,
2572 "IMP does not support get SFP info %d\n", ret);
2575 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2579 mac->speed = le32_to_cpu(resp->speed);
2580 /* if resp->speed_ability is 0, it means it's an old version
2581 * firmware, do not update these params
2583 if (resp->speed_ability) {
2584 mac->module_type = le32_to_cpu(resp->module_type);
2585 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2586 mac->autoneg = resp->autoneg;
2587 mac->support_autoneg = resp->autoneg_ability;
2588 if (!resp->active_fec)
2591 mac->fec_mode = BIT(resp->active_fec);
2593 mac->speed_type = QUERY_SFP_SPEED;
2599 static int hclge_update_port_info(struct hclge_dev *hdev)
2601 struct hclge_mac *mac = &hdev->hw.mac;
2602 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2605 /* get the port info from SFP cmd if not copper port */
2606 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2609 /* if IMP does not support get SFP/qSFP info, return directly */
2610 if (!hdev->support_sfp_query)
2613 if (hdev->pdev->revision >= 0x21)
2614 ret = hclge_get_sfp_info(hdev, mac);
2616 ret = hclge_get_sfp_speed(hdev, &speed);
2618 if (ret == -EOPNOTSUPP) {
2619 hdev->support_sfp_query = false;
2625 if (hdev->pdev->revision >= 0x21) {
2626 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2627 hclge_update_port_capability(mac);
2630 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2633 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2634 return 0; /* do nothing if no SFP */
2636 /* must config full duplex for SFP */
2637 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2641 static int hclge_get_status(struct hnae3_handle *handle)
2643 struct hclge_vport *vport = hclge_get_vport(handle);
2644 struct hclge_dev *hdev = vport->back;
2646 hclge_update_link_status(hdev);
2648 return hdev->hw.mac.link;
2651 static void hclge_service_timer(struct timer_list *t)
2653 struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2655 mod_timer(&hdev->service_timer, jiffies + HZ);
2656 hdev->hw_stats.stats_timer++;
2657 hdev->fd_arfs_expire_timer++;
2658 hclge_task_schedule(hdev);
2661 static void hclge_service_complete(struct hclge_dev *hdev)
2663 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2665 /* Flush memory before next watchdog */
2666 smp_mb__before_atomic();
2667 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2670 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2672 u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2674 /* fetch the events from their corresponding regs */
2675 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2676 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2677 msix_src_reg = hclge_read_dev(&hdev->hw,
2678 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2680 /* Assumption: If by any chance reset and mailbox events are reported
2681 * together then we will only process reset event in this go and will
2682 * defer the processing of the mailbox events. Since, we would have not
2683 * cleared RX CMDQ event this time we would receive again another
2684 * interrupt from H/W just for the mailbox.
2687 /* check for vector0 reset event sources */
2688 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2689 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2690 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2691 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2692 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2693 hdev->rst_stats.imp_rst_cnt++;
2694 return HCLGE_VECTOR0_EVENT_RST;
2697 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2698 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2699 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2700 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2701 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2702 hdev->rst_stats.global_rst_cnt++;
2703 return HCLGE_VECTOR0_EVENT_RST;
2706 /* check for vector0 msix event source */
2707 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2708 dev_dbg(&hdev->pdev->dev, "received event 0x%x\n",
2710 return HCLGE_VECTOR0_EVENT_ERR;
2713 /* check for vector0 mailbox(=CMDQ RX) event source */
2714 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2715 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2716 *clearval = cmdq_src_reg;
2717 return HCLGE_VECTOR0_EVENT_MBX;
2720 /* print other vector0 event source */
2721 dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n",
2722 cmdq_src_reg, msix_src_reg);
2723 return HCLGE_VECTOR0_EVENT_OTHER;
2726 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2729 switch (event_type) {
2730 case HCLGE_VECTOR0_EVENT_RST:
2731 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2733 case HCLGE_VECTOR0_EVENT_MBX:
2734 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2741 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2743 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2744 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2745 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2746 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2747 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2750 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2752 writel(enable ? 1 : 0, vector->addr);
2755 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2757 struct hclge_dev *hdev = data;
2761 hclge_enable_vector(&hdev->misc_vector, false);
2762 event_cause = hclge_check_event_cause(hdev, &clearval);
2764 /* vector 0 interrupt is shared with reset and mailbox source events.*/
2765 switch (event_cause) {
2766 case HCLGE_VECTOR0_EVENT_ERR:
2767 /* we do not know what type of reset is required now. This could
2768 * only be decided after we fetch the type of errors which
2769 * caused this event. Therefore, we will do below for now:
2770 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2771 * have defered type of reset to be used.
2772 * 2. Schedule the reset serivce task.
2773 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
2774 * will fetch the correct type of reset. This would be done
2775 * by first decoding the types of errors.
2777 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2779 case HCLGE_VECTOR0_EVENT_RST:
2780 hclge_reset_task_schedule(hdev);
2782 case HCLGE_VECTOR0_EVENT_MBX:
2783 /* If we are here then,
2784 * 1. Either we are not handling any mbx task and we are not
2787 * 2. We could be handling a mbx task but nothing more is
2789 * In both cases, we should schedule mbx task as there are more
2790 * mbx messages reported by this interrupt.
2792 hclge_mbx_task_schedule(hdev);
2795 dev_warn(&hdev->pdev->dev,
2796 "received unknown or unhandled event of vector0\n");
2800 /* clear the source of interrupt if it is not cause by reset */
2801 if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2802 hclge_clear_event_cause(hdev, event_cause, clearval);
2803 hclge_enable_vector(&hdev->misc_vector, true);
2809 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2811 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2812 dev_warn(&hdev->pdev->dev,
2813 "vector(vector_id %d) has been freed.\n", vector_id);
2817 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2818 hdev->num_msi_left += 1;
2819 hdev->num_msi_used -= 1;
2822 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2824 struct hclge_misc_vector *vector = &hdev->misc_vector;
2826 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2828 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2829 hdev->vector_status[0] = 0;
2831 hdev->num_msi_left -= 1;
2832 hdev->num_msi_used += 1;
2835 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2839 hclge_get_misc_vector(hdev);
2841 /* this would be explicitly freed in the end */
2842 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2843 0, "hclge_misc", hdev);
2845 hclge_free_vector(hdev, 0);
2846 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2847 hdev->misc_vector.vector_irq);
2853 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2855 free_irq(hdev->misc_vector.vector_irq, hdev);
2856 hclge_free_vector(hdev, 0);
2859 int hclge_notify_client(struct hclge_dev *hdev,
2860 enum hnae3_reset_notify_type type)
2862 struct hnae3_client *client = hdev->nic_client;
2865 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) ||
2869 if (!client->ops->reset_notify)
2872 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2873 struct hnae3_handle *handle = &hdev->vport[i].nic;
2876 ret = client->ops->reset_notify(handle, type);
2878 dev_err(&hdev->pdev->dev,
2879 "notify nic client failed %d(%d)\n", type, ret);
2887 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2888 enum hnae3_reset_notify_type type)
2890 struct hnae3_client *client = hdev->roce_client;
2894 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) ||
2898 if (!client->ops->reset_notify)
2901 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2902 struct hnae3_handle *handle = &hdev->vport[i].roce;
2904 ret = client->ops->reset_notify(handle, type);
2906 dev_err(&hdev->pdev->dev,
2907 "notify roce client failed %d(%d)",
2916 static int hclge_reset_wait(struct hclge_dev *hdev)
2918 #define HCLGE_RESET_WATI_MS 100
2919 #define HCLGE_RESET_WAIT_CNT 200
2920 u32 val, reg, reg_bit;
2923 switch (hdev->reset_type) {
2924 case HNAE3_IMP_RESET:
2925 reg = HCLGE_GLOBAL_RESET_REG;
2926 reg_bit = HCLGE_IMP_RESET_BIT;
2928 case HNAE3_GLOBAL_RESET:
2929 reg = HCLGE_GLOBAL_RESET_REG;
2930 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2932 case HNAE3_FUNC_RESET:
2933 reg = HCLGE_FUN_RST_ING;
2934 reg_bit = HCLGE_FUN_RST_ING_B;
2936 case HNAE3_FLR_RESET:
2939 dev_err(&hdev->pdev->dev,
2940 "Wait for unsupported reset type: %d\n",
2945 if (hdev->reset_type == HNAE3_FLR_RESET) {
2946 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2947 cnt++ < HCLGE_RESET_WAIT_CNT)
2948 msleep(HCLGE_RESET_WATI_MS);
2950 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2951 dev_err(&hdev->pdev->dev,
2952 "flr wait timeout: %d\n", cnt);
2959 val = hclge_read_dev(&hdev->hw, reg);
2960 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2961 msleep(HCLGE_RESET_WATI_MS);
2962 val = hclge_read_dev(&hdev->hw, reg);
2966 if (cnt >= HCLGE_RESET_WAIT_CNT) {
2967 dev_warn(&hdev->pdev->dev,
2968 "Wait for reset timeout: %d\n", hdev->reset_type);
2975 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2977 struct hclge_vf_rst_cmd *req;
2978 struct hclge_desc desc;
2980 req = (struct hclge_vf_rst_cmd *)desc.data;
2981 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2982 req->dest_vfid = func_id;
2987 return hclge_cmd_send(&hdev->hw, &desc, 1);
2990 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
2994 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
2995 struct hclge_vport *vport = &hdev->vport[i];
2998 /* Send cmd to set/clear VF's FUNC_RST_ING */
2999 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3001 dev_err(&hdev->pdev->dev,
3002 "set vf(%d) rst failed %d!\n",
3003 vport->vport_id, ret);
3007 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3010 /* Inform VF to process the reset.
3011 * hclge_inform_reset_assert_to_vf may fail if VF
3012 * driver is not loaded.
3014 ret = hclge_inform_reset_assert_to_vf(vport);
3016 dev_warn(&hdev->pdev->dev,
3017 "inform reset to vf(%d) failed %d!\n",
3018 vport->vport_id, ret);
3024 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3026 struct hclge_desc desc;
3027 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3030 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3031 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3032 req->fun_reset_vfid = func_id;
3034 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3036 dev_err(&hdev->pdev->dev,
3037 "send function reset cmd fail, status =%d\n", ret);
3042 static void hclge_do_reset(struct hclge_dev *hdev)
3044 struct hnae3_handle *handle = &hdev->vport[0].nic;
3045 struct pci_dev *pdev = hdev->pdev;
3048 if (hclge_get_hw_reset_stat(handle)) {
3049 dev_info(&pdev->dev, "Hardware reset not finish\n");
3050 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3051 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3052 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3056 switch (hdev->reset_type) {
3057 case HNAE3_GLOBAL_RESET:
3058 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3059 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3060 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3061 dev_info(&pdev->dev, "Global Reset requested\n");
3063 case HNAE3_FUNC_RESET:
3064 dev_info(&pdev->dev, "PF Reset requested\n");
3065 /* schedule again to check later */
3066 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3067 hclge_reset_task_schedule(hdev);
3069 case HNAE3_FLR_RESET:
3070 dev_info(&pdev->dev, "FLR requested\n");
3071 /* schedule again to check later */
3072 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3073 hclge_reset_task_schedule(hdev);
3076 dev_warn(&pdev->dev,
3077 "Unsupported reset type: %d\n", hdev->reset_type);
3082 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
3083 unsigned long *addr)
3085 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3087 /* first, resolve any unknown reset type to the known type(s) */
3088 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3089 /* we will intentionally ignore any errors from this function
3090 * as we will end up in *some* reset request in any case
3092 hclge_handle_hw_msix_error(hdev, addr);
3093 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3094 /* We defered the clearing of the error event which caused
3095 * interrupt since it was not posssible to do that in
3096 * interrupt context (and this is the reason we introduced
3097 * new UNKNOWN reset type). Now, the errors have been
3098 * handled and cleared in hardware we can safely enable
3099 * interrupts. This is an exception to the norm.
3101 hclge_enable_vector(&hdev->misc_vector, true);
3104 /* return the highest priority reset level amongst all */
3105 if (test_bit(HNAE3_IMP_RESET, addr)) {
3106 rst_level = HNAE3_IMP_RESET;
3107 clear_bit(HNAE3_IMP_RESET, addr);
3108 clear_bit(HNAE3_GLOBAL_RESET, addr);
3109 clear_bit(HNAE3_FUNC_RESET, addr);
3110 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3111 rst_level = HNAE3_GLOBAL_RESET;
3112 clear_bit(HNAE3_GLOBAL_RESET, addr);
3113 clear_bit(HNAE3_FUNC_RESET, addr);
3114 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3115 rst_level = HNAE3_FUNC_RESET;
3116 clear_bit(HNAE3_FUNC_RESET, addr);
3117 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3118 rst_level = HNAE3_FLR_RESET;
3119 clear_bit(HNAE3_FLR_RESET, addr);
3122 if (hdev->reset_type != HNAE3_NONE_RESET &&
3123 rst_level < hdev->reset_type)
3124 return HNAE3_NONE_RESET;
3129 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3133 switch (hdev->reset_type) {
3134 case HNAE3_IMP_RESET:
3135 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3137 case HNAE3_GLOBAL_RESET:
3138 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3147 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
3148 hclge_enable_vector(&hdev->misc_vector, true);
3151 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3155 switch (hdev->reset_type) {
3156 case HNAE3_FUNC_RESET:
3158 case HNAE3_FLR_RESET:
3159 ret = hclge_set_all_vf_rst(hdev, true);
3168 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3170 #define HCLGE_RESET_SYNC_TIME 100
3175 switch (hdev->reset_type) {
3176 case HNAE3_FUNC_RESET:
3177 /* There is no mechanism for PF to know if VF has stopped IO
3178 * for now, just wait 100 ms for VF to stop IO
3180 msleep(HCLGE_RESET_SYNC_TIME);
3181 ret = hclge_func_reset_cmd(hdev, 0);
3183 dev_err(&hdev->pdev->dev,
3184 "asserting function reset fail %d!\n", ret);
3188 /* After performaning pf reset, it is not necessary to do the
3189 * mailbox handling or send any command to firmware, because
3190 * any mailbox handling or command to firmware is only valid
3191 * after hclge_cmd_init is called.
3193 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3194 hdev->rst_stats.pf_rst_cnt++;
3196 case HNAE3_FLR_RESET:
3197 /* There is no mechanism for PF to know if VF has stopped IO
3198 * for now, just wait 100 ms for VF to stop IO
3200 msleep(HCLGE_RESET_SYNC_TIME);
3201 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3202 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3203 hdev->rst_stats.flr_rst_cnt++;
3205 case HNAE3_IMP_RESET:
3206 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3207 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3208 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3214 /* inform hardware that preparatory work is done */
3215 msleep(HCLGE_RESET_SYNC_TIME);
3216 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG,
3217 HCLGE_NIC_CMQ_ENABLE);
3218 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3223 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
3225 #define MAX_RESET_FAIL_CNT 5
3227 if (hdev->reset_pending) {
3228 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3229 hdev->reset_pending);
3231 } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
3232 (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
3233 BIT(HCLGE_IMP_RESET_BIT))) {
3234 dev_info(&hdev->pdev->dev,
3235 "reset failed because IMP Reset is pending\n");
3236 hclge_clear_reset_cause(hdev);
3238 } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3239 hdev->reset_fail_cnt++;
3241 set_bit(hdev->reset_type, &hdev->reset_pending);
3242 dev_info(&hdev->pdev->dev,
3243 "re-schedule to wait for hw reset done\n");
3247 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
3248 hclge_clear_reset_cause(hdev);
3249 mod_timer(&hdev->reset_timer,
3250 jiffies + HCLGE_RESET_INTERVAL);
3255 hclge_clear_reset_cause(hdev);
3256 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3260 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3264 switch (hdev->reset_type) {
3265 case HNAE3_FUNC_RESET:
3267 case HNAE3_FLR_RESET:
3268 ret = hclge_set_all_vf_rst(hdev, false);
3277 static int hclge_reset_stack(struct hclge_dev *hdev)
3281 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3285 ret = hclge_reset_ae_dev(hdev->ae_dev);
3289 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3293 return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3296 static void hclge_reset(struct hclge_dev *hdev)
3298 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3299 bool is_timeout = false;
3302 /* Initialize ae_dev reset status as well, in case enet layer wants to
3303 * know if device is undergoing reset
3305 ae_dev->reset_type = hdev->reset_type;
3306 hdev->rst_stats.reset_cnt++;
3307 /* perform reset of the stack & ae device for a client */
3308 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3312 ret = hclge_reset_prepare_down(hdev);
3317 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3319 goto err_reset_lock;
3323 ret = hclge_reset_prepare_wait(hdev);
3327 if (hclge_reset_wait(hdev)) {
3332 hdev->rst_stats.hw_reset_done_cnt++;
3334 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3340 ret = hclge_reset_stack(hdev);
3342 goto err_reset_lock;
3344 hclge_clear_reset_cause(hdev);
3346 ret = hclge_reset_prepare_up(hdev);
3348 goto err_reset_lock;
3352 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3353 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3356 if (ret && hdev->reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3361 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3363 goto err_reset_lock;
3367 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3371 hdev->last_reset_time = jiffies;
3372 hdev->reset_fail_cnt = 0;
3373 hdev->rst_stats.reset_done_cnt++;
3374 ae_dev->reset_type = HNAE3_NONE_RESET;
3375 del_timer(&hdev->reset_timer);
3382 if (hclge_reset_err_handle(hdev, is_timeout))
3383 hclge_reset_task_schedule(hdev);
3386 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3388 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3389 struct hclge_dev *hdev = ae_dev->priv;
3391 /* We might end up getting called broadly because of 2 below cases:
3392 * 1. Recoverable error was conveyed through APEI and only way to bring
3393 * normalcy is to reset.
3394 * 2. A new reset request from the stack due to timeout
3396 * For the first case,error event might not have ae handle available.
3397 * check if this is a new reset request and we are not here just because
3398 * last reset attempt did not succeed and watchdog hit us again. We will
3399 * know this if last reset request did not occur very recently (watchdog
3400 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3401 * In case of new request we reset the "reset level" to PF reset.
3402 * And if it is a repeat reset request of the most recent one then we
3403 * want to make sure we throttle the reset request. Therefore, we will
3404 * not allow it again before 3*HZ times.
3407 handle = &hdev->vport[0].nic;
3409 if (time_before(jiffies, (hdev->last_reset_time +
3410 HCLGE_RESET_INTERVAL)))
3412 else if (hdev->default_reset_request)
3414 hclge_get_reset_level(hdev,
3415 &hdev->default_reset_request);
3416 else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3417 hdev->reset_level = HNAE3_FUNC_RESET;
3419 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3422 /* request reset & schedule reset task */
3423 set_bit(hdev->reset_level, &hdev->reset_request);
3424 hclge_reset_task_schedule(hdev);
3426 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3427 hdev->reset_level++;
3430 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3431 enum hnae3_reset_type rst_type)
3433 struct hclge_dev *hdev = ae_dev->priv;
3435 set_bit(rst_type, &hdev->default_reset_request);
3438 static void hclge_reset_timer(struct timer_list *t)
3440 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3442 dev_info(&hdev->pdev->dev,
3443 "triggering global reset in reset timer\n");
3444 set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3445 hclge_reset_event(hdev->pdev, NULL);
3448 static void hclge_reset_subtask(struct hclge_dev *hdev)
3450 /* check if there is any ongoing reset in the hardware. This status can
3451 * be checked from reset_pending. If there is then, we need to wait for
3452 * hardware to complete reset.
3453 * a. If we are able to figure out in reasonable time that hardware
3454 * has fully resetted then, we can proceed with driver, client
3456 * b. else, we can come back later to check this status so re-sched
3459 hdev->last_reset_time = jiffies;
3460 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
3461 if (hdev->reset_type != HNAE3_NONE_RESET)
3464 /* check if we got any *new* reset requests to be honored */
3465 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
3466 if (hdev->reset_type != HNAE3_NONE_RESET)
3467 hclge_do_reset(hdev);
3469 hdev->reset_type = HNAE3_NONE_RESET;
3472 static void hclge_reset_service_task(struct work_struct *work)
3474 struct hclge_dev *hdev =
3475 container_of(work, struct hclge_dev, rst_service_task);
3477 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3480 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3482 hclge_reset_subtask(hdev);
3484 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3487 static void hclge_mailbox_service_task(struct work_struct *work)
3489 struct hclge_dev *hdev =
3490 container_of(work, struct hclge_dev, mbx_service_task);
3492 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3495 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3497 hclge_mbx_handler(hdev);
3499 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3502 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3506 /* start from vport 1 for PF is always alive */
3507 for (i = 1; i < hdev->num_alloc_vport; i++) {
3508 struct hclge_vport *vport = &hdev->vport[i];
3510 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3511 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3513 /* If vf is not alive, set to default value */
3514 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3515 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3519 static void hclge_service_task(struct work_struct *work)
3521 struct hclge_dev *hdev =
3522 container_of(work, struct hclge_dev, service_task);
3524 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3525 hclge_update_stats_for_all(hdev);
3526 hdev->hw_stats.stats_timer = 0;
3529 hclge_update_port_info(hdev);
3530 hclge_update_link_status(hdev);
3531 hclge_update_vport_alive(hdev);
3532 if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3533 hclge_rfs_filter_expire(hdev);
3534 hdev->fd_arfs_expire_timer = 0;
3536 hclge_service_complete(hdev);
3539 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3541 /* VF handle has no client */
3542 if (!handle->client)
3543 return container_of(handle, struct hclge_vport, nic);
3544 else if (handle->client->type == HNAE3_CLIENT_ROCE)
3545 return container_of(handle, struct hclge_vport, roce);
3547 return container_of(handle, struct hclge_vport, nic);
3550 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3551 struct hnae3_vector_info *vector_info)
3553 struct hclge_vport *vport = hclge_get_vport(handle);
3554 struct hnae3_vector_info *vector = vector_info;
3555 struct hclge_dev *hdev = vport->back;
3559 vector_num = min(hdev->num_msi_left, vector_num);
3561 for (j = 0; j < vector_num; j++) {
3562 for (i = 1; i < hdev->num_msi; i++) {
3563 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3564 vector->vector = pci_irq_vector(hdev->pdev, i);
3565 vector->io_addr = hdev->hw.io_base +
3566 HCLGE_VECTOR_REG_BASE +
3567 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3569 HCLGE_VECTOR_VF_OFFSET;
3570 hdev->vector_status[i] = vport->vport_id;
3571 hdev->vector_irq[i] = vector->vector;
3580 hdev->num_msi_left -= alloc;
3581 hdev->num_msi_used += alloc;
3586 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3590 for (i = 0; i < hdev->num_msi; i++)
3591 if (vector == hdev->vector_irq[i])
3597 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3599 struct hclge_vport *vport = hclge_get_vport(handle);
3600 struct hclge_dev *hdev = vport->back;
3603 vector_id = hclge_get_vector_index(hdev, vector);
3604 if (vector_id < 0) {
3605 dev_err(&hdev->pdev->dev,
3606 "Get vector index fail. vector_id =%d\n", vector_id);
3610 hclge_free_vector(hdev, vector_id);
3615 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3617 return HCLGE_RSS_KEY_SIZE;
3620 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3622 return HCLGE_RSS_IND_TBL_SIZE;
3625 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3626 const u8 hfunc, const u8 *key)
3628 struct hclge_rss_config_cmd *req;
3629 struct hclge_desc desc;
3635 key_counts = HCLGE_RSS_KEY_SIZE;
3636 req = (struct hclge_rss_config_cmd *)desc.data;
3638 while (key_counts) {
3639 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3642 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3643 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3645 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
3646 memcpy(req->hash_key,
3647 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3649 key_counts -= key_size;
3651 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3653 dev_err(&hdev->pdev->dev,
3654 "Configure RSS config fail, status = %d\n",
3662 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3664 struct hclge_rss_indirection_table_cmd *req;
3665 struct hclge_desc desc;
3669 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3671 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3672 hclge_cmd_setup_basic_desc
3673 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3675 req->start_table_index =
3676 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3677 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3679 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3680 req->rss_result[j] =
3681 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3683 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3685 dev_err(&hdev->pdev->dev,
3686 "Configure rss indir table fail,status = %d\n",
3694 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3695 u16 *tc_size, u16 *tc_offset)
3697 struct hclge_rss_tc_mode_cmd *req;
3698 struct hclge_desc desc;
3702 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3703 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3705 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3708 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3709 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3710 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3711 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3712 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3714 req->rss_tc_mode[i] = cpu_to_le16(mode);
3717 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3719 dev_err(&hdev->pdev->dev,
3720 "Configure rss tc mode fail, status = %d\n", ret);
3725 static void hclge_get_rss_type(struct hclge_vport *vport)
3727 if (vport->rss_tuple_sets.ipv4_tcp_en ||
3728 vport->rss_tuple_sets.ipv4_udp_en ||
3729 vport->rss_tuple_sets.ipv4_sctp_en ||
3730 vport->rss_tuple_sets.ipv6_tcp_en ||
3731 vport->rss_tuple_sets.ipv6_udp_en ||
3732 vport->rss_tuple_sets.ipv6_sctp_en)
3733 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3734 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3735 vport->rss_tuple_sets.ipv6_fragment_en)
3736 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3738 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3741 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3743 struct hclge_rss_input_tuple_cmd *req;
3744 struct hclge_desc desc;
3747 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3749 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3751 /* Get the tuple cfg from pf */
3752 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3753 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3754 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3755 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3756 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3757 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3758 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3759 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3760 hclge_get_rss_type(&hdev->vport[0]);
3761 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3763 dev_err(&hdev->pdev->dev,
3764 "Configure rss input fail, status = %d\n", ret);
3768 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3771 struct hclge_vport *vport = hclge_get_vport(handle);
3774 /* Get hash algorithm */
3776 switch (vport->rss_algo) {
3777 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3778 *hfunc = ETH_RSS_HASH_TOP;
3780 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3781 *hfunc = ETH_RSS_HASH_XOR;
3784 *hfunc = ETH_RSS_HASH_UNKNOWN;
3789 /* Get the RSS Key required by the user */
3791 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3793 /* Get indirect table */
3795 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3796 indir[i] = vport->rss_indirection_tbl[i];
3801 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3802 const u8 *key, const u8 hfunc)
3804 struct hclge_vport *vport = hclge_get_vport(handle);
3805 struct hclge_dev *hdev = vport->back;
3809 /* Set the RSS Hash Key if specififed by the user */
3812 case ETH_RSS_HASH_TOP:
3813 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3815 case ETH_RSS_HASH_XOR:
3816 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3818 case ETH_RSS_HASH_NO_CHANGE:
3819 hash_algo = vport->rss_algo;
3825 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3829 /* Update the shadow RSS key with user specified qids */
3830 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3831 vport->rss_algo = hash_algo;
3834 /* Update the shadow RSS table with user specified qids */
3835 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3836 vport->rss_indirection_tbl[i] = indir[i];
3838 /* Update the hardware */
3839 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3842 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3844 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3846 if (nfc->data & RXH_L4_B_2_3)
3847 hash_sets |= HCLGE_D_PORT_BIT;
3849 hash_sets &= ~HCLGE_D_PORT_BIT;
3851 if (nfc->data & RXH_IP_SRC)
3852 hash_sets |= HCLGE_S_IP_BIT;
3854 hash_sets &= ~HCLGE_S_IP_BIT;
3856 if (nfc->data & RXH_IP_DST)
3857 hash_sets |= HCLGE_D_IP_BIT;
3859 hash_sets &= ~HCLGE_D_IP_BIT;
3861 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3862 hash_sets |= HCLGE_V_TAG_BIT;
3867 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3868 struct ethtool_rxnfc *nfc)
3870 struct hclge_vport *vport = hclge_get_vport(handle);
3871 struct hclge_dev *hdev = vport->back;
3872 struct hclge_rss_input_tuple_cmd *req;
3873 struct hclge_desc desc;
3877 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3878 RXH_L4_B_0_1 | RXH_L4_B_2_3))
3881 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3882 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3884 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3885 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3886 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3887 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3888 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3889 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3890 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3891 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3893 tuple_sets = hclge_get_rss_hash_bits(nfc);
3894 switch (nfc->flow_type) {
3896 req->ipv4_tcp_en = tuple_sets;
3899 req->ipv6_tcp_en = tuple_sets;
3902 req->ipv4_udp_en = tuple_sets;
3905 req->ipv6_udp_en = tuple_sets;
3908 req->ipv4_sctp_en = tuple_sets;
3911 if ((nfc->data & RXH_L4_B_0_1) ||
3912 (nfc->data & RXH_L4_B_2_3))
3915 req->ipv6_sctp_en = tuple_sets;
3918 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3921 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3927 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3929 dev_err(&hdev->pdev->dev,
3930 "Set rss tuple fail, status = %d\n", ret);
3934 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3935 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3936 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3937 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3938 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3939 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3940 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3941 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3942 hclge_get_rss_type(vport);
3946 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3947 struct ethtool_rxnfc *nfc)
3949 struct hclge_vport *vport = hclge_get_vport(handle);
3954 switch (nfc->flow_type) {
3956 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3959 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3962 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3965 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3968 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3971 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3975 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3984 if (tuple_sets & HCLGE_D_PORT_BIT)
3985 nfc->data |= RXH_L4_B_2_3;
3986 if (tuple_sets & HCLGE_S_PORT_BIT)
3987 nfc->data |= RXH_L4_B_0_1;
3988 if (tuple_sets & HCLGE_D_IP_BIT)
3989 nfc->data |= RXH_IP_DST;
3990 if (tuple_sets & HCLGE_S_IP_BIT)
3991 nfc->data |= RXH_IP_SRC;
3996 static int hclge_get_tc_size(struct hnae3_handle *handle)
3998 struct hclge_vport *vport = hclge_get_vport(handle);
3999 struct hclge_dev *hdev = vport->back;
4001 return hdev->rss_size_max;
4004 int hclge_rss_init_hw(struct hclge_dev *hdev)
4006 struct hclge_vport *vport = hdev->vport;
4007 u8 *rss_indir = vport[0].rss_indirection_tbl;
4008 u16 rss_size = vport[0].alloc_rss_size;
4009 u8 *key = vport[0].rss_hash_key;
4010 u8 hfunc = vport[0].rss_algo;
4011 u16 tc_offset[HCLGE_MAX_TC_NUM];
4012 u16 tc_valid[HCLGE_MAX_TC_NUM];
4013 u16 tc_size[HCLGE_MAX_TC_NUM];
4017 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4021 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4025 ret = hclge_set_rss_input_tuple(hdev);
4029 /* Each TC have the same queue size, and tc_size set to hardware is
4030 * the log2 of roundup power of two of rss_size, the acutal queue
4031 * size is limited by indirection table.
4033 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4034 dev_err(&hdev->pdev->dev,
4035 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
4040 roundup_size = roundup_pow_of_two(rss_size);
4041 roundup_size = ilog2(roundup_size);
4043 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4046 if (!(hdev->hw_tc_map & BIT(i)))
4050 tc_size[i] = roundup_size;
4051 tc_offset[i] = rss_size * i;
4054 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4057 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4059 struct hclge_vport *vport = hdev->vport;
4062 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4063 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4064 vport[j].rss_indirection_tbl[i] =
4065 i % vport[j].alloc_rss_size;
4069 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4071 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4072 struct hclge_vport *vport = hdev->vport;
4074 if (hdev->pdev->revision >= 0x21)
4075 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4077 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4078 vport[i].rss_tuple_sets.ipv4_tcp_en =
4079 HCLGE_RSS_INPUT_TUPLE_OTHER;
4080 vport[i].rss_tuple_sets.ipv4_udp_en =
4081 HCLGE_RSS_INPUT_TUPLE_OTHER;
4082 vport[i].rss_tuple_sets.ipv4_sctp_en =
4083 HCLGE_RSS_INPUT_TUPLE_SCTP;
4084 vport[i].rss_tuple_sets.ipv4_fragment_en =
4085 HCLGE_RSS_INPUT_TUPLE_OTHER;
4086 vport[i].rss_tuple_sets.ipv6_tcp_en =
4087 HCLGE_RSS_INPUT_TUPLE_OTHER;
4088 vport[i].rss_tuple_sets.ipv6_udp_en =
4089 HCLGE_RSS_INPUT_TUPLE_OTHER;
4090 vport[i].rss_tuple_sets.ipv6_sctp_en =
4091 HCLGE_RSS_INPUT_TUPLE_SCTP;
4092 vport[i].rss_tuple_sets.ipv6_fragment_en =
4093 HCLGE_RSS_INPUT_TUPLE_OTHER;
4095 vport[i].rss_algo = rss_algo;
4097 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4098 HCLGE_RSS_KEY_SIZE);
4101 hclge_rss_indir_init_cfg(hdev);
4104 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4105 int vector_id, bool en,
4106 struct hnae3_ring_chain_node *ring_chain)
4108 struct hclge_dev *hdev = vport->back;
4109 struct hnae3_ring_chain_node *node;
4110 struct hclge_desc desc;
4111 struct hclge_ctrl_vector_chain_cmd *req
4112 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4113 enum hclge_cmd_status status;
4114 enum hclge_opcode_type op;
4115 u16 tqp_type_and_id;
4118 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4119 hclge_cmd_setup_basic_desc(&desc, op, false);
4120 req->int_vector_id = vector_id;
4123 for (node = ring_chain; node; node = node->next) {
4124 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4125 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4127 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4128 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4129 HCLGE_TQP_ID_S, node->tqp_index);
4130 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4132 hnae3_get_field(node->int_gl_idx,
4133 HNAE3_RING_GL_IDX_M,
4134 HNAE3_RING_GL_IDX_S));
4135 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4136 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4137 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4138 req->vfid = vport->vport_id;
4140 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4142 dev_err(&hdev->pdev->dev,
4143 "Map TQP fail, status is %d.\n",
4149 hclge_cmd_setup_basic_desc(&desc,
4152 req->int_vector_id = vector_id;
4157 req->int_cause_num = i;
4158 req->vfid = vport->vport_id;
4159 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4161 dev_err(&hdev->pdev->dev,
4162 "Map TQP fail, status is %d.\n", status);
4170 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
4172 struct hnae3_ring_chain_node *ring_chain)
4174 struct hclge_vport *vport = hclge_get_vport(handle);
4175 struct hclge_dev *hdev = vport->back;
4178 vector_id = hclge_get_vector_index(hdev, vector);
4179 if (vector_id < 0) {
4180 dev_err(&hdev->pdev->dev,
4181 "Get vector index fail. vector_id =%d\n", vector_id);
4185 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4188 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
4190 struct hnae3_ring_chain_node *ring_chain)
4192 struct hclge_vport *vport = hclge_get_vport(handle);
4193 struct hclge_dev *hdev = vport->back;
4196 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4199 vector_id = hclge_get_vector_index(hdev, vector);
4200 if (vector_id < 0) {
4201 dev_err(&handle->pdev->dev,
4202 "Get vector index fail. ret =%d\n", vector_id);
4206 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4208 dev_err(&handle->pdev->dev,
4209 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4216 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4217 struct hclge_promisc_param *param)
4219 struct hclge_promisc_cfg_cmd *req;
4220 struct hclge_desc desc;
4223 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4225 req = (struct hclge_promisc_cfg_cmd *)desc.data;
4226 req->vf_id = param->vf_id;
4228 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4229 * pdev revision(0x20), new revision support them. The
4230 * value of this two fields will not return error when driver
4231 * send command to fireware in revision(0x20).
4233 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4234 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4236 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4238 dev_err(&hdev->pdev->dev,
4239 "Set promisc mode fail, status is %d.\n", ret);
4244 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4245 bool en_mc, bool en_bc, int vport_id)
4250 memset(param, 0, sizeof(struct hclge_promisc_param));
4252 param->enable = HCLGE_PROMISC_EN_UC;
4254 param->enable |= HCLGE_PROMISC_EN_MC;
4256 param->enable |= HCLGE_PROMISC_EN_BC;
4257 param->vf_id = vport_id;
4260 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4263 struct hclge_vport *vport = hclge_get_vport(handle);
4264 struct hclge_dev *hdev = vport->back;
4265 struct hclge_promisc_param param;
4266 bool en_bc_pmc = true;
4268 /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4269 * always bypassed. So broadcast promisc should be disabled until
4270 * user enable promisc mode
4272 if (handle->pdev->revision == 0x20)
4273 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4275 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4277 return hclge_cmd_set_promisc_mode(hdev, ¶m);
4280 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4282 struct hclge_get_fd_mode_cmd *req;
4283 struct hclge_desc desc;
4286 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4288 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4290 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4292 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4296 *fd_mode = req->mode;
4301 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4302 u32 *stage1_entry_num,
4303 u32 *stage2_entry_num,
4304 u16 *stage1_counter_num,
4305 u16 *stage2_counter_num)
4307 struct hclge_get_fd_allocation_cmd *req;
4308 struct hclge_desc desc;
4311 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4313 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4315 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4317 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4322 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4323 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4324 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4325 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4330 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4332 struct hclge_set_fd_key_config_cmd *req;
4333 struct hclge_fd_key_cfg *stage;
4334 struct hclge_desc desc;
4337 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4339 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4340 stage = &hdev->fd_cfg.key_cfg[stage_num];
4341 req->stage = stage_num;
4342 req->key_select = stage->key_sel;
4343 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4344 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4345 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4346 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4347 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4348 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4350 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4352 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4357 static int hclge_init_fd_config(struct hclge_dev *hdev)
4359 #define LOW_2_WORDS 0x03
4360 struct hclge_fd_key_cfg *key_cfg;
4363 if (!hnae3_dev_fd_supported(hdev))
4366 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4370 switch (hdev->fd_cfg.fd_mode) {
4371 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4372 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4374 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4375 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4378 dev_err(&hdev->pdev->dev,
4379 "Unsupported flow director mode %d\n",
4380 hdev->fd_cfg.fd_mode);
4384 hdev->fd_cfg.proto_support =
4385 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4386 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4387 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4388 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4389 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4390 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4391 key_cfg->outer_sipv6_word_en = 0;
4392 key_cfg->outer_dipv6_word_en = 0;
4394 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4395 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4396 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4397 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4399 /* If use max 400bit key, we can support tuples for ether type */
4400 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4401 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4402 key_cfg->tuple_active |=
4403 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4406 /* roce_type is used to filter roce frames
4407 * dst_vport is used to specify the rule
4409 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4411 ret = hclge_get_fd_allocation(hdev,
4412 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4413 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4414 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4415 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4419 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4422 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4423 int loc, u8 *key, bool is_add)
4425 struct hclge_fd_tcam_config_1_cmd *req1;
4426 struct hclge_fd_tcam_config_2_cmd *req2;
4427 struct hclge_fd_tcam_config_3_cmd *req3;
4428 struct hclge_desc desc[3];
4431 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4432 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4433 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4434 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4435 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4437 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4438 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4439 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4441 req1->stage = stage;
4442 req1->xy_sel = sel_x ? 1 : 0;
4443 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4444 req1->index = cpu_to_le32(loc);
4445 req1->entry_vld = sel_x ? is_add : 0;
4448 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4449 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4450 sizeof(req2->tcam_data));
4451 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4452 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4455 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4457 dev_err(&hdev->pdev->dev,
4458 "config tcam key fail, ret=%d\n",
4464 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4465 struct hclge_fd_ad_data *action)
4467 struct hclge_fd_ad_config_cmd *req;
4468 struct hclge_desc desc;
4472 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4474 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4475 req->index = cpu_to_le32(loc);
4478 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4479 action->write_rule_id_to_bd);
4480 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4483 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4484 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4485 action->forward_to_direct_queue);
4486 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4488 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4489 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4490 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4491 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4492 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4493 action->counter_id);
4495 req->ad_data = cpu_to_le64(ad_data);
4496 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4498 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4503 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4504 struct hclge_fd_rule *rule)
4506 u16 tmp_x_s, tmp_y_s;
4507 u32 tmp_x_l, tmp_y_l;
4510 if (rule->unused_tuple & tuple_bit)
4513 switch (tuple_bit) {
4516 case BIT(INNER_DST_MAC):
4517 for (i = 0; i < ETH_ALEN; i++) {
4518 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4519 rule->tuples_mask.dst_mac[i]);
4520 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4521 rule->tuples_mask.dst_mac[i]);
4525 case BIT(INNER_SRC_MAC):
4526 for (i = 0; i < ETH_ALEN; i++) {
4527 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4528 rule->tuples.src_mac[i]);
4529 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4530 rule->tuples.src_mac[i]);
4534 case BIT(INNER_VLAN_TAG_FST):
4535 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4536 rule->tuples_mask.vlan_tag1);
4537 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4538 rule->tuples_mask.vlan_tag1);
4539 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4540 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4543 case BIT(INNER_ETH_TYPE):
4544 calc_x(tmp_x_s, rule->tuples.ether_proto,
4545 rule->tuples_mask.ether_proto);
4546 calc_y(tmp_y_s, rule->tuples.ether_proto,
4547 rule->tuples_mask.ether_proto);
4548 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4549 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4552 case BIT(INNER_IP_TOS):
4553 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4554 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4557 case BIT(INNER_IP_PROTO):
4558 calc_x(*key_x, rule->tuples.ip_proto,
4559 rule->tuples_mask.ip_proto);
4560 calc_y(*key_y, rule->tuples.ip_proto,
4561 rule->tuples_mask.ip_proto);
4564 case BIT(INNER_SRC_IP):
4565 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
4566 rule->tuples_mask.src_ip[IPV4_INDEX]);
4567 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
4568 rule->tuples_mask.src_ip[IPV4_INDEX]);
4569 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4570 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4573 case BIT(INNER_DST_IP):
4574 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
4575 rule->tuples_mask.dst_ip[IPV4_INDEX]);
4576 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
4577 rule->tuples_mask.dst_ip[IPV4_INDEX]);
4578 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4579 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4582 case BIT(INNER_SRC_PORT):
4583 calc_x(tmp_x_s, rule->tuples.src_port,
4584 rule->tuples_mask.src_port);
4585 calc_y(tmp_y_s, rule->tuples.src_port,
4586 rule->tuples_mask.src_port);
4587 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4588 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4591 case BIT(INNER_DST_PORT):
4592 calc_x(tmp_x_s, rule->tuples.dst_port,
4593 rule->tuples_mask.dst_port);
4594 calc_y(tmp_y_s, rule->tuples.dst_port,
4595 rule->tuples_mask.dst_port);
4596 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4597 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4605 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4606 u8 vf_id, u8 network_port_id)
4608 u32 port_number = 0;
4610 if (port_type == HOST_PORT) {
4611 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4613 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4615 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4617 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4618 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4619 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4625 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4626 __le32 *key_x, __le32 *key_y,
4627 struct hclge_fd_rule *rule)
4629 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4630 u8 cur_pos = 0, tuple_size, shift_bits;
4633 for (i = 0; i < MAX_META_DATA; i++) {
4634 tuple_size = meta_data_key_info[i].key_length;
4635 tuple_bit = key_cfg->meta_data_active & BIT(i);
4637 switch (tuple_bit) {
4638 case BIT(ROCE_TYPE):
4639 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4640 cur_pos += tuple_size;
4642 case BIT(DST_VPORT):
4643 port_number = hclge_get_port_number(HOST_PORT, 0,
4645 hnae3_set_field(meta_data,
4646 GENMASK(cur_pos + tuple_size, cur_pos),
4647 cur_pos, port_number);
4648 cur_pos += tuple_size;
4655 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4656 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4657 shift_bits = sizeof(meta_data) * 8 - cur_pos;
4659 *key_x = cpu_to_le32(tmp_x << shift_bits);
4660 *key_y = cpu_to_le32(tmp_y << shift_bits);
4663 /* A complete key is combined with meta data key and tuple key.
4664 * Meta data key is stored at the MSB region, and tuple key is stored at
4665 * the LSB region, unused bits will be filled 0.
4667 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4668 struct hclge_fd_rule *rule)
4670 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4671 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4672 u8 *cur_key_x, *cur_key_y;
4673 int i, ret, tuple_size;
4674 u8 meta_data_region;
4676 memset(key_x, 0, sizeof(key_x));
4677 memset(key_y, 0, sizeof(key_y));
4681 for (i = 0 ; i < MAX_TUPLE; i++) {
4685 tuple_size = tuple_key_info[i].key_length / 8;
4686 check_tuple = key_cfg->tuple_active & BIT(i);
4688 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4691 cur_key_x += tuple_size;
4692 cur_key_y += tuple_size;
4696 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4697 MAX_META_DATA_LENGTH / 8;
4699 hclge_fd_convert_meta_data(key_cfg,
4700 (__le32 *)(key_x + meta_data_region),
4701 (__le32 *)(key_y + meta_data_region),
4704 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4707 dev_err(&hdev->pdev->dev,
4708 "fd key_y config fail, loc=%d, ret=%d\n",
4709 rule->queue_id, ret);
4713 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4716 dev_err(&hdev->pdev->dev,
4717 "fd key_x config fail, loc=%d, ret=%d\n",
4718 rule->queue_id, ret);
4722 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4723 struct hclge_fd_rule *rule)
4725 struct hclge_fd_ad_data ad_data;
4727 ad_data.ad_id = rule->location;
4729 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4730 ad_data.drop_packet = true;
4731 ad_data.forward_to_direct_queue = false;
4732 ad_data.queue_id = 0;
4734 ad_data.drop_packet = false;
4735 ad_data.forward_to_direct_queue = true;
4736 ad_data.queue_id = rule->queue_id;
4739 ad_data.use_counter = false;
4740 ad_data.counter_id = 0;
4742 ad_data.use_next_stage = false;
4743 ad_data.next_input_key = 0;
4745 ad_data.write_rule_id_to_bd = true;
4746 ad_data.rule_id = rule->location;
4748 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4751 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4752 struct ethtool_rx_flow_spec *fs, u32 *unused)
4754 struct ethtool_tcpip4_spec *tcp_ip4_spec;
4755 struct ethtool_usrip4_spec *usr_ip4_spec;
4756 struct ethtool_tcpip6_spec *tcp_ip6_spec;
4757 struct ethtool_usrip6_spec *usr_ip6_spec;
4758 struct ethhdr *ether_spec;
4760 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4763 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4766 if ((fs->flow_type & FLOW_EXT) &&
4767 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4768 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4772 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4776 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4777 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4779 if (!tcp_ip4_spec->ip4src)
4780 *unused |= BIT(INNER_SRC_IP);
4782 if (!tcp_ip4_spec->ip4dst)
4783 *unused |= BIT(INNER_DST_IP);
4785 if (!tcp_ip4_spec->psrc)
4786 *unused |= BIT(INNER_SRC_PORT);
4788 if (!tcp_ip4_spec->pdst)
4789 *unused |= BIT(INNER_DST_PORT);
4791 if (!tcp_ip4_spec->tos)
4792 *unused |= BIT(INNER_IP_TOS);
4796 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4797 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4798 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4800 if (!usr_ip4_spec->ip4src)
4801 *unused |= BIT(INNER_SRC_IP);
4803 if (!usr_ip4_spec->ip4dst)
4804 *unused |= BIT(INNER_DST_IP);
4806 if (!usr_ip4_spec->tos)
4807 *unused |= BIT(INNER_IP_TOS);
4809 if (!usr_ip4_spec->proto)
4810 *unused |= BIT(INNER_IP_PROTO);
4812 if (usr_ip4_spec->l4_4_bytes)
4815 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4822 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4823 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4826 /* check whether src/dst ip address used */
4827 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4828 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4829 *unused |= BIT(INNER_SRC_IP);
4831 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4832 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4833 *unused |= BIT(INNER_DST_IP);
4835 if (!tcp_ip6_spec->psrc)
4836 *unused |= BIT(INNER_SRC_PORT);
4838 if (!tcp_ip6_spec->pdst)
4839 *unused |= BIT(INNER_DST_PORT);
4841 if (tcp_ip6_spec->tclass)
4845 case IPV6_USER_FLOW:
4846 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4847 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4848 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4849 BIT(INNER_DST_PORT);
4851 /* check whether src/dst ip address used */
4852 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4853 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4854 *unused |= BIT(INNER_SRC_IP);
4856 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4857 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4858 *unused |= BIT(INNER_DST_IP);
4860 if (!usr_ip6_spec->l4_proto)
4861 *unused |= BIT(INNER_IP_PROTO);
4863 if (usr_ip6_spec->tclass)
4866 if (usr_ip6_spec->l4_4_bytes)
4871 ether_spec = &fs->h_u.ether_spec;
4872 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4873 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4874 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4876 if (is_zero_ether_addr(ether_spec->h_source))
4877 *unused |= BIT(INNER_SRC_MAC);
4879 if (is_zero_ether_addr(ether_spec->h_dest))
4880 *unused |= BIT(INNER_DST_MAC);
4882 if (!ether_spec->h_proto)
4883 *unused |= BIT(INNER_ETH_TYPE);
4890 if ((fs->flow_type & FLOW_EXT)) {
4891 if (fs->h_ext.vlan_etype)
4893 if (!fs->h_ext.vlan_tci)
4894 *unused |= BIT(INNER_VLAN_TAG_FST);
4896 if (fs->m_ext.vlan_tci) {
4897 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4901 *unused |= BIT(INNER_VLAN_TAG_FST);
4904 if (fs->flow_type & FLOW_MAC_EXT) {
4905 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4908 if (is_zero_ether_addr(fs->h_ext.h_dest))
4909 *unused |= BIT(INNER_DST_MAC);
4911 *unused &= ~(BIT(INNER_DST_MAC));
4917 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4919 struct hclge_fd_rule *rule = NULL;
4920 struct hlist_node *node2;
4922 spin_lock_bh(&hdev->fd_rule_lock);
4923 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4924 if (rule->location >= location)
4928 spin_unlock_bh(&hdev->fd_rule_lock);
4930 return rule && rule->location == location;
4933 /* make sure being called after lock up with fd_rule_lock */
4934 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4935 struct hclge_fd_rule *new_rule,
4939 struct hclge_fd_rule *rule = NULL, *parent = NULL;
4940 struct hlist_node *node2;
4942 if (is_add && !new_rule)
4945 hlist_for_each_entry_safe(rule, node2,
4946 &hdev->fd_rule_list, rule_node) {
4947 if (rule->location >= location)
4952 if (rule && rule->location == location) {
4953 hlist_del(&rule->rule_node);
4955 hdev->hclge_fd_rule_num--;
4958 if (!hdev->hclge_fd_rule_num)
4959 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
4960 clear_bit(location, hdev->fd_bmap);
4964 } else if (!is_add) {
4965 dev_err(&hdev->pdev->dev,
4966 "delete fail, rule %d is inexistent\n",
4971 INIT_HLIST_NODE(&new_rule->rule_node);
4974 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4976 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4978 set_bit(location, hdev->fd_bmap);
4979 hdev->hclge_fd_rule_num++;
4980 hdev->fd_active_type = new_rule->rule_type;
4985 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4986 struct ethtool_rx_flow_spec *fs,
4987 struct hclge_fd_rule *rule)
4989 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4991 switch (flow_type) {
4995 rule->tuples.src_ip[IPV4_INDEX] =
4996 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
4997 rule->tuples_mask.src_ip[IPV4_INDEX] =
4998 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5000 rule->tuples.dst_ip[IPV4_INDEX] =
5001 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5002 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5003 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5005 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5006 rule->tuples_mask.src_port =
5007 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5009 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5010 rule->tuples_mask.dst_port =
5011 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5013 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5014 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5016 rule->tuples.ether_proto = ETH_P_IP;
5017 rule->tuples_mask.ether_proto = 0xFFFF;
5021 rule->tuples.src_ip[IPV4_INDEX] =
5022 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5023 rule->tuples_mask.src_ip[IPV4_INDEX] =
5024 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5026 rule->tuples.dst_ip[IPV4_INDEX] =
5027 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5028 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5029 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5031 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5032 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5034 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5035 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5037 rule->tuples.ether_proto = ETH_P_IP;
5038 rule->tuples_mask.ether_proto = 0xFFFF;
5044 be32_to_cpu_array(rule->tuples.src_ip,
5045 fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5046 be32_to_cpu_array(rule->tuples_mask.src_ip,
5047 fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5049 be32_to_cpu_array(rule->tuples.dst_ip,
5050 fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5051 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5052 fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5054 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5055 rule->tuples_mask.src_port =
5056 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5058 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5059 rule->tuples_mask.dst_port =
5060 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5062 rule->tuples.ether_proto = ETH_P_IPV6;
5063 rule->tuples_mask.ether_proto = 0xFFFF;
5066 case IPV6_USER_FLOW:
5067 be32_to_cpu_array(rule->tuples.src_ip,
5068 fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5069 be32_to_cpu_array(rule->tuples_mask.src_ip,
5070 fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5072 be32_to_cpu_array(rule->tuples.dst_ip,
5073 fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5074 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5075 fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5077 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5078 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5080 rule->tuples.ether_proto = ETH_P_IPV6;
5081 rule->tuples_mask.ether_proto = 0xFFFF;
5085 ether_addr_copy(rule->tuples.src_mac,
5086 fs->h_u.ether_spec.h_source);
5087 ether_addr_copy(rule->tuples_mask.src_mac,
5088 fs->m_u.ether_spec.h_source);
5090 ether_addr_copy(rule->tuples.dst_mac,
5091 fs->h_u.ether_spec.h_dest);
5092 ether_addr_copy(rule->tuples_mask.dst_mac,
5093 fs->m_u.ether_spec.h_dest);
5095 rule->tuples.ether_proto =
5096 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5097 rule->tuples_mask.ether_proto =
5098 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5105 switch (flow_type) {
5108 rule->tuples.ip_proto = IPPROTO_SCTP;
5109 rule->tuples_mask.ip_proto = 0xFF;
5113 rule->tuples.ip_proto = IPPROTO_TCP;
5114 rule->tuples_mask.ip_proto = 0xFF;
5118 rule->tuples.ip_proto = IPPROTO_UDP;
5119 rule->tuples_mask.ip_proto = 0xFF;
5125 if ((fs->flow_type & FLOW_EXT)) {
5126 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5127 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5130 if (fs->flow_type & FLOW_MAC_EXT) {
5131 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5132 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5138 /* make sure being called after lock up with fd_rule_lock */
5139 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5140 struct hclge_fd_rule *rule)
5145 dev_err(&hdev->pdev->dev,
5146 "The flow director rule is NULL\n");
5150 /* it will never fail here, so needn't to check return value */
5151 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5153 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5157 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5164 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5168 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5169 struct ethtool_rxnfc *cmd)
5171 struct hclge_vport *vport = hclge_get_vport(handle);
5172 struct hclge_dev *hdev = vport->back;
5173 u16 dst_vport_id = 0, q_index = 0;
5174 struct ethtool_rx_flow_spec *fs;
5175 struct hclge_fd_rule *rule;
5180 if (!hnae3_dev_fd_supported(hdev))
5184 dev_warn(&hdev->pdev->dev,
5185 "Please enable flow director first\n");
5189 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5191 ret = hclge_fd_check_spec(hdev, fs, &unused);
5193 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5197 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5198 action = HCLGE_FD_ACTION_DROP_PACKET;
5200 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5201 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5204 if (vf > hdev->num_req_vfs) {
5205 dev_err(&hdev->pdev->dev,
5206 "Error: vf id (%d) > max vf num (%d)\n",
5207 vf, hdev->num_req_vfs);
5211 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5212 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5215 dev_err(&hdev->pdev->dev,
5216 "Error: queue id (%d) > max tqp num (%d)\n",
5221 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5225 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5229 ret = hclge_fd_get_tuple(hdev, fs, rule);
5235 rule->flow_type = fs->flow_type;
5237 rule->location = fs->location;
5238 rule->unused_tuple = unused;
5239 rule->vf_id = dst_vport_id;
5240 rule->queue_id = q_index;
5241 rule->action = action;
5242 rule->rule_type = HCLGE_FD_EP_ACTIVE;
5244 /* to avoid rule conflict, when user configure rule by ethtool,
5245 * we need to clear all arfs rules
5247 hclge_clear_arfs_rules(handle);
5249 spin_lock_bh(&hdev->fd_rule_lock);
5250 ret = hclge_fd_config_rule(hdev, rule);
5252 spin_unlock_bh(&hdev->fd_rule_lock);
5257 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5258 struct ethtool_rxnfc *cmd)
5260 struct hclge_vport *vport = hclge_get_vport(handle);
5261 struct hclge_dev *hdev = vport->back;
5262 struct ethtool_rx_flow_spec *fs;
5265 if (!hnae3_dev_fd_supported(hdev))
5268 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5270 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5273 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5274 dev_err(&hdev->pdev->dev,
5275 "Delete fail, rule %d is inexistent\n",
5280 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5281 fs->location, NULL, false);
5285 spin_lock_bh(&hdev->fd_rule_lock);
5286 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5288 spin_unlock_bh(&hdev->fd_rule_lock);
5293 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5296 struct hclge_vport *vport = hclge_get_vport(handle);
5297 struct hclge_dev *hdev = vport->back;
5298 struct hclge_fd_rule *rule;
5299 struct hlist_node *node;
5302 if (!hnae3_dev_fd_supported(hdev))
5305 spin_lock_bh(&hdev->fd_rule_lock);
5306 for_each_set_bit(location, hdev->fd_bmap,
5307 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5308 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5312 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5314 hlist_del(&rule->rule_node);
5317 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5318 hdev->hclge_fd_rule_num = 0;
5319 bitmap_zero(hdev->fd_bmap,
5320 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5323 spin_unlock_bh(&hdev->fd_rule_lock);
5326 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5328 struct hclge_vport *vport = hclge_get_vport(handle);
5329 struct hclge_dev *hdev = vport->back;
5330 struct hclge_fd_rule *rule;
5331 struct hlist_node *node;
5334 /* Return ok here, because reset error handling will check this
5335 * return value. If error is returned here, the reset process will
5338 if (!hnae3_dev_fd_supported(hdev))
5341 /* if fd is disabled, should not restore it when reset */
5345 spin_lock_bh(&hdev->fd_rule_lock);
5346 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5347 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5349 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5352 dev_warn(&hdev->pdev->dev,
5353 "Restore rule %d failed, remove it\n",
5355 clear_bit(rule->location, hdev->fd_bmap);
5356 hlist_del(&rule->rule_node);
5358 hdev->hclge_fd_rule_num--;
5362 if (hdev->hclge_fd_rule_num)
5363 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5365 spin_unlock_bh(&hdev->fd_rule_lock);
5370 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5371 struct ethtool_rxnfc *cmd)
5373 struct hclge_vport *vport = hclge_get_vport(handle);
5374 struct hclge_dev *hdev = vport->back;
5376 if (!hnae3_dev_fd_supported(hdev))
5379 cmd->rule_cnt = hdev->hclge_fd_rule_num;
5380 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5385 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5386 struct ethtool_rxnfc *cmd)
5388 struct hclge_vport *vport = hclge_get_vport(handle);
5389 struct hclge_fd_rule *rule = NULL;
5390 struct hclge_dev *hdev = vport->back;
5391 struct ethtool_rx_flow_spec *fs;
5392 struct hlist_node *node2;
5394 if (!hnae3_dev_fd_supported(hdev))
5397 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5399 spin_lock_bh(&hdev->fd_rule_lock);
5401 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5402 if (rule->location >= fs->location)
5406 if (!rule || fs->location != rule->location) {
5407 spin_unlock_bh(&hdev->fd_rule_lock);
5412 fs->flow_type = rule->flow_type;
5413 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5417 fs->h_u.tcp_ip4_spec.ip4src =
5418 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5419 fs->m_u.tcp_ip4_spec.ip4src =
5420 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5421 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5423 fs->h_u.tcp_ip4_spec.ip4dst =
5424 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5425 fs->m_u.tcp_ip4_spec.ip4dst =
5426 rule->unused_tuple & BIT(INNER_DST_IP) ?
5427 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5429 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5430 fs->m_u.tcp_ip4_spec.psrc =
5431 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5432 0 : cpu_to_be16(rule->tuples_mask.src_port);
5434 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5435 fs->m_u.tcp_ip4_spec.pdst =
5436 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5437 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5439 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5440 fs->m_u.tcp_ip4_spec.tos =
5441 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5442 0 : rule->tuples_mask.ip_tos;
5446 fs->h_u.usr_ip4_spec.ip4src =
5447 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5448 fs->m_u.tcp_ip4_spec.ip4src =
5449 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5450 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5452 fs->h_u.usr_ip4_spec.ip4dst =
5453 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5454 fs->m_u.usr_ip4_spec.ip4dst =
5455 rule->unused_tuple & BIT(INNER_DST_IP) ?
5456 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5458 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5459 fs->m_u.usr_ip4_spec.tos =
5460 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5461 0 : rule->tuples_mask.ip_tos;
5463 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5464 fs->m_u.usr_ip4_spec.proto =
5465 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5466 0 : rule->tuples_mask.ip_proto;
5468 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5474 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5475 rule->tuples.src_ip, IPV6_SIZE);
5476 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5477 memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5478 sizeof(int) * IPV6_SIZE);
5480 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5481 rule->tuples_mask.src_ip, IPV6_SIZE);
5483 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5484 rule->tuples.dst_ip, IPV6_SIZE);
5485 if (rule->unused_tuple & BIT(INNER_DST_IP))
5486 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
5487 sizeof(int) * IPV6_SIZE);
5489 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5490 rule->tuples_mask.dst_ip, IPV6_SIZE);
5492 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5493 fs->m_u.tcp_ip6_spec.psrc =
5494 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5495 0 : cpu_to_be16(rule->tuples_mask.src_port);
5497 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5498 fs->m_u.tcp_ip6_spec.pdst =
5499 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5500 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5503 case IPV6_USER_FLOW:
5504 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5505 rule->tuples.src_ip, IPV6_SIZE);
5506 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5507 memset(fs->m_u.usr_ip6_spec.ip6src, 0,
5508 sizeof(int) * IPV6_SIZE);
5510 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5511 rule->tuples_mask.src_ip, IPV6_SIZE);
5513 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5514 rule->tuples.dst_ip, IPV6_SIZE);
5515 if (rule->unused_tuple & BIT(INNER_DST_IP))
5516 memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
5517 sizeof(int) * IPV6_SIZE);
5519 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5520 rule->tuples_mask.dst_ip, IPV6_SIZE);
5522 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5523 fs->m_u.usr_ip6_spec.l4_proto =
5524 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5525 0 : rule->tuples_mask.ip_proto;
5529 ether_addr_copy(fs->h_u.ether_spec.h_source,
5530 rule->tuples.src_mac);
5531 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5532 eth_zero_addr(fs->m_u.ether_spec.h_source);
5534 ether_addr_copy(fs->m_u.ether_spec.h_source,
5535 rule->tuples_mask.src_mac);
5537 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5538 rule->tuples.dst_mac);
5539 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5540 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5542 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5543 rule->tuples_mask.dst_mac);
5545 fs->h_u.ether_spec.h_proto =
5546 cpu_to_be16(rule->tuples.ether_proto);
5547 fs->m_u.ether_spec.h_proto =
5548 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5549 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5553 spin_unlock_bh(&hdev->fd_rule_lock);
5557 if (fs->flow_type & FLOW_EXT) {
5558 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5559 fs->m_ext.vlan_tci =
5560 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5561 cpu_to_be16(VLAN_VID_MASK) :
5562 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5565 if (fs->flow_type & FLOW_MAC_EXT) {
5566 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5567 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5568 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5570 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5571 rule->tuples_mask.dst_mac);
5574 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5575 fs->ring_cookie = RX_CLS_FLOW_DISC;
5579 fs->ring_cookie = rule->queue_id;
5580 vf_id = rule->vf_id;
5581 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5582 fs->ring_cookie |= vf_id;
5585 spin_unlock_bh(&hdev->fd_rule_lock);
5590 static int hclge_get_all_rules(struct hnae3_handle *handle,
5591 struct ethtool_rxnfc *cmd, u32 *rule_locs)
5593 struct hclge_vport *vport = hclge_get_vport(handle);
5594 struct hclge_dev *hdev = vport->back;
5595 struct hclge_fd_rule *rule;
5596 struct hlist_node *node2;
5599 if (!hnae3_dev_fd_supported(hdev))
5602 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5604 spin_lock_bh(&hdev->fd_rule_lock);
5605 hlist_for_each_entry_safe(rule, node2,
5606 &hdev->fd_rule_list, rule_node) {
5607 if (cnt == cmd->rule_cnt) {
5608 spin_unlock_bh(&hdev->fd_rule_lock);
5612 rule_locs[cnt] = rule->location;
5616 spin_unlock_bh(&hdev->fd_rule_lock);
5618 cmd->rule_cnt = cnt;
5623 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
5624 struct hclge_fd_rule_tuples *tuples)
5626 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
5627 tuples->ip_proto = fkeys->basic.ip_proto;
5628 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
5630 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5631 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
5632 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
5634 memcpy(tuples->src_ip,
5635 fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
5636 sizeof(tuples->src_ip));
5637 memcpy(tuples->dst_ip,
5638 fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
5639 sizeof(tuples->dst_ip));
5643 /* traverse all rules, check whether an existed rule has the same tuples */
5644 static struct hclge_fd_rule *
5645 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
5646 const struct hclge_fd_rule_tuples *tuples)
5648 struct hclge_fd_rule *rule = NULL;
5649 struct hlist_node *node;
5651 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5652 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
5659 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
5660 struct hclge_fd_rule *rule)
5662 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5663 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
5664 BIT(INNER_SRC_PORT);
5667 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
5668 if (tuples->ether_proto == ETH_P_IP) {
5669 if (tuples->ip_proto == IPPROTO_TCP)
5670 rule->flow_type = TCP_V4_FLOW;
5672 rule->flow_type = UDP_V4_FLOW;
5674 if (tuples->ip_proto == IPPROTO_TCP)
5675 rule->flow_type = TCP_V6_FLOW;
5677 rule->flow_type = UDP_V6_FLOW;
5679 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
5680 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
5683 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
5684 u16 flow_id, struct flow_keys *fkeys)
5686 struct hclge_vport *vport = hclge_get_vport(handle);
5687 struct hclge_fd_rule_tuples new_tuples;
5688 struct hclge_dev *hdev = vport->back;
5689 struct hclge_fd_rule *rule;
5694 if (!hnae3_dev_fd_supported(hdev))
5697 memset(&new_tuples, 0, sizeof(new_tuples));
5698 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
5700 spin_lock_bh(&hdev->fd_rule_lock);
5702 /* when there is already fd rule existed add by user,
5703 * arfs should not work
5705 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
5706 spin_unlock_bh(&hdev->fd_rule_lock);
5711 /* check is there flow director filter existed for this flow,
5712 * if not, create a new filter for it;
5713 * if filter exist with different queue id, modify the filter;
5714 * if filter exist with same queue id, do nothing
5716 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
5718 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
5719 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5720 spin_unlock_bh(&hdev->fd_rule_lock);
5725 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5727 spin_unlock_bh(&hdev->fd_rule_lock);
5732 set_bit(bit_id, hdev->fd_bmap);
5733 rule->location = bit_id;
5734 rule->flow_id = flow_id;
5735 rule->queue_id = queue_id;
5736 hclge_fd_build_arfs_rule(&new_tuples, rule);
5737 ret = hclge_fd_config_rule(hdev, rule);
5739 spin_unlock_bh(&hdev->fd_rule_lock);
5744 return rule->location;
5747 spin_unlock_bh(&hdev->fd_rule_lock);
5749 if (rule->queue_id == queue_id)
5750 return rule->location;
5752 tmp_queue_id = rule->queue_id;
5753 rule->queue_id = queue_id;
5754 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5756 rule->queue_id = tmp_queue_id;
5760 return rule->location;
5763 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
5765 #ifdef CONFIG_RFS_ACCEL
5766 struct hnae3_handle *handle = &hdev->vport[0].nic;
5767 struct hclge_fd_rule *rule;
5768 struct hlist_node *node;
5769 HLIST_HEAD(del_list);
5771 spin_lock_bh(&hdev->fd_rule_lock);
5772 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
5773 spin_unlock_bh(&hdev->fd_rule_lock);
5776 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5777 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
5778 rule->flow_id, rule->location)) {
5779 hlist_del_init(&rule->rule_node);
5780 hlist_add_head(&rule->rule_node, &del_list);
5781 hdev->hclge_fd_rule_num--;
5782 clear_bit(rule->location, hdev->fd_bmap);
5785 spin_unlock_bh(&hdev->fd_rule_lock);
5787 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
5788 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5789 rule->location, NULL, false);
5795 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
5797 #ifdef CONFIG_RFS_ACCEL
5798 struct hclge_vport *vport = hclge_get_vport(handle);
5799 struct hclge_dev *hdev = vport->back;
5801 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
5802 hclge_del_all_fd_entries(handle, true);
5806 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5808 struct hclge_vport *vport = hclge_get_vport(handle);
5809 struct hclge_dev *hdev = vport->back;
5811 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5812 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5815 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5817 struct hclge_vport *vport = hclge_get_vport(handle);
5818 struct hclge_dev *hdev = vport->back;
5820 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5823 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5825 struct hclge_vport *vport = hclge_get_vport(handle);
5826 struct hclge_dev *hdev = vport->back;
5828 return hdev->rst_stats.hw_reset_done_cnt;
5831 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5833 struct hclge_vport *vport = hclge_get_vport(handle);
5834 struct hclge_dev *hdev = vport->back;
5837 hdev->fd_en = enable;
5838 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false;
5840 hclge_del_all_fd_entries(handle, clear);
5842 hclge_restore_fd_entries(handle);
5845 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5847 struct hclge_desc desc;
5848 struct hclge_config_mac_mode_cmd *req =
5849 (struct hclge_config_mac_mode_cmd *)desc.data;
5853 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5854 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5855 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5856 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5857 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5858 hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5859 hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5860 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5861 hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5862 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5863 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5864 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5865 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5866 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5867 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5868 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5870 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5872 dev_err(&hdev->pdev->dev,
5873 "mac enable fail, ret =%d.\n", ret);
5876 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5878 struct hclge_config_mac_mode_cmd *req;
5879 struct hclge_desc desc;
5883 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5884 /* 1 Read out the MAC mode config at first */
5885 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5886 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5888 dev_err(&hdev->pdev->dev,
5889 "mac loopback get fail, ret =%d.\n", ret);
5893 /* 2 Then setup the loopback flag */
5894 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5895 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5896 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5897 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5899 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5901 /* 3 Config mac work mode with loopback flag
5902 * and its original configure parameters
5904 hclge_cmd_reuse_desc(&desc, false);
5905 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5907 dev_err(&hdev->pdev->dev,
5908 "mac loopback set fail, ret =%d.\n", ret);
5912 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5913 enum hnae3_loop loop_mode)
5915 #define HCLGE_SERDES_RETRY_MS 10
5916 #define HCLGE_SERDES_RETRY_NUM 100
5918 #define HCLGE_MAC_LINK_STATUS_MS 10
5919 #define HCLGE_MAC_LINK_STATUS_NUM 100
5920 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5921 #define HCLGE_MAC_LINK_STATUS_UP 1
5923 struct hclge_serdes_lb_cmd *req;
5924 struct hclge_desc desc;
5925 int mac_link_ret = 0;
5929 req = (struct hclge_serdes_lb_cmd *)desc.data;
5930 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5932 switch (loop_mode) {
5933 case HNAE3_LOOP_SERIAL_SERDES:
5934 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5936 case HNAE3_LOOP_PARALLEL_SERDES:
5937 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5940 dev_err(&hdev->pdev->dev,
5941 "unsupported serdes loopback mode %d\n", loop_mode);
5946 req->enable = loop_mode_b;
5947 req->mask = loop_mode_b;
5948 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5950 req->mask = loop_mode_b;
5951 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5954 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5956 dev_err(&hdev->pdev->dev,
5957 "serdes loopback set fail, ret = %d\n", ret);
5962 msleep(HCLGE_SERDES_RETRY_MS);
5963 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5965 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5967 dev_err(&hdev->pdev->dev,
5968 "serdes loopback get, ret = %d\n", ret);
5971 } while (++i < HCLGE_SERDES_RETRY_NUM &&
5972 !(req->result & HCLGE_CMD_SERDES_DONE_B));
5974 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5975 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5977 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5978 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5982 hclge_cfg_mac_mode(hdev, en);
5986 /* serdes Internal loopback, independent of the network cable.*/
5987 msleep(HCLGE_MAC_LINK_STATUS_MS);
5988 ret = hclge_get_mac_link_status(hdev);
5989 if (ret == mac_link_ret)
5991 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
5993 dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
5998 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
5999 int stream_id, bool enable)
6001 struct hclge_desc desc;
6002 struct hclge_cfg_com_tqp_queue_cmd *req =
6003 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6006 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6007 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6008 req->stream_id = cpu_to_le16(stream_id);
6009 req->enable |= enable << HCLGE_TQP_ENABLE_B;
6011 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6013 dev_err(&hdev->pdev->dev,
6014 "Tqp enable fail, status =%d.\n", ret);
6018 static int hclge_set_loopback(struct hnae3_handle *handle,
6019 enum hnae3_loop loop_mode, bool en)
6021 struct hclge_vport *vport = hclge_get_vport(handle);
6022 struct hnae3_knic_private_info *kinfo;
6023 struct hclge_dev *hdev = vport->back;
6026 switch (loop_mode) {
6027 case HNAE3_LOOP_APP:
6028 ret = hclge_set_app_loopback(hdev, en);
6030 case HNAE3_LOOP_SERIAL_SERDES:
6031 case HNAE3_LOOP_PARALLEL_SERDES:
6032 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6036 dev_err(&hdev->pdev->dev,
6037 "loop_mode %d is not supported\n", loop_mode);
6044 kinfo = &vport->nic.kinfo;
6045 for (i = 0; i < kinfo->num_tqps; i++) {
6046 ret = hclge_tqp_enable(hdev, i, 0, en);
6054 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6056 struct hclge_vport *vport = hclge_get_vport(handle);
6057 struct hnae3_knic_private_info *kinfo;
6058 struct hnae3_queue *queue;
6059 struct hclge_tqp *tqp;
6062 kinfo = &vport->nic.kinfo;
6063 for (i = 0; i < kinfo->num_tqps; i++) {
6064 queue = handle->kinfo.tqp[i];
6065 tqp = container_of(queue, struct hclge_tqp, q);
6066 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6070 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6072 struct hclge_vport *vport = hclge_get_vport(handle);
6073 struct hclge_dev *hdev = vport->back;
6076 mod_timer(&hdev->service_timer, jiffies + HZ);
6078 del_timer_sync(&hdev->service_timer);
6079 cancel_work_sync(&hdev->service_task);
6080 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6084 static int hclge_ae_start(struct hnae3_handle *handle)
6086 struct hclge_vport *vport = hclge_get_vport(handle);
6087 struct hclge_dev *hdev = vport->back;
6090 hclge_cfg_mac_mode(hdev, true);
6091 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6092 hdev->hw.mac.link = 0;
6094 /* reset tqp stats */
6095 hclge_reset_tqp_stats(handle);
6097 hclge_mac_start_phy(hdev);
6102 static void hclge_ae_stop(struct hnae3_handle *handle)
6104 struct hclge_vport *vport = hclge_get_vport(handle);
6105 struct hclge_dev *hdev = vport->back;
6108 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6110 hclge_clear_arfs_rules(handle);
6112 /* If it is not PF reset, the firmware will disable the MAC,
6113 * so it only need to stop phy here.
6115 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6116 hdev->reset_type != HNAE3_FUNC_RESET) {
6117 hclge_mac_stop_phy(hdev);
6121 for (i = 0; i < handle->kinfo.num_tqps; i++)
6122 hclge_reset_tqp(handle, i);
6125 hclge_cfg_mac_mode(hdev, false);
6127 hclge_mac_stop_phy(hdev);
6129 /* reset tqp stats */
6130 hclge_reset_tqp_stats(handle);
6131 hclge_update_link_status(hdev);
6134 int hclge_vport_start(struct hclge_vport *vport)
6136 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6137 vport->last_active_jiffies = jiffies;
6141 void hclge_vport_stop(struct hclge_vport *vport)
6143 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6146 static int hclge_client_start(struct hnae3_handle *handle)
6148 struct hclge_vport *vport = hclge_get_vport(handle);
6150 return hclge_vport_start(vport);
6153 static void hclge_client_stop(struct hnae3_handle *handle)
6155 struct hclge_vport *vport = hclge_get_vport(handle);
6157 hclge_vport_stop(vport);
6160 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6161 u16 cmdq_resp, u8 resp_code,
6162 enum hclge_mac_vlan_tbl_opcode op)
6164 struct hclge_dev *hdev = vport->back;
6165 int return_status = -EIO;
6168 dev_err(&hdev->pdev->dev,
6169 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
6174 if (op == HCLGE_MAC_VLAN_ADD) {
6175 if ((!resp_code) || (resp_code == 1)) {
6177 } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6178 return_status = -ENOSPC;
6179 dev_err(&hdev->pdev->dev,
6180 "add mac addr failed for uc_overflow.\n");
6181 } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6182 return_status = -ENOSPC;
6183 dev_err(&hdev->pdev->dev,
6184 "add mac addr failed for mc_overflow.\n");
6186 dev_err(&hdev->pdev->dev,
6187 "add mac addr failed for undefined, code=%d.\n",
6190 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6193 } else if (resp_code == 1) {
6194 return_status = -ENOENT;
6195 dev_dbg(&hdev->pdev->dev,
6196 "remove mac addr failed for miss.\n");
6198 dev_err(&hdev->pdev->dev,
6199 "remove mac addr failed for undefined, code=%d.\n",
6202 } else if (op == HCLGE_MAC_VLAN_LKUP) {
6205 } else if (resp_code == 1) {
6206 return_status = -ENOENT;
6207 dev_dbg(&hdev->pdev->dev,
6208 "lookup mac addr failed for miss.\n");
6210 dev_err(&hdev->pdev->dev,
6211 "lookup mac addr failed for undefined, code=%d.\n",
6215 return_status = -EINVAL;
6216 dev_err(&hdev->pdev->dev,
6217 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
6221 return return_status;
6224 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6226 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6231 if (vfid > 255 || vfid < 0)
6234 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6235 word_num = vfid / 32;
6236 bit_num = vfid % 32;
6238 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6240 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6242 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6243 bit_num = vfid % 32;
6245 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6247 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6253 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6255 #define HCLGE_DESC_NUMBER 3
6256 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6259 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6260 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6261 if (desc[i].data[j])
6267 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6268 const u8 *addr, bool is_mc)
6270 const unsigned char *mac_addr = addr;
6271 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6272 (mac_addr[0]) | (mac_addr[1] << 8);
6273 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
6275 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6277 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6278 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6281 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6282 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6285 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6286 struct hclge_mac_vlan_tbl_entry_cmd *req)
6288 struct hclge_dev *hdev = vport->back;
6289 struct hclge_desc desc;
6294 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6296 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6298 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6300 dev_err(&hdev->pdev->dev,
6301 "del mac addr failed for cmd_send, ret =%d.\n",
6305 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6306 retval = le16_to_cpu(desc.retval);
6308 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6309 HCLGE_MAC_VLAN_REMOVE);
6312 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6313 struct hclge_mac_vlan_tbl_entry_cmd *req,
6314 struct hclge_desc *desc,
6317 struct hclge_dev *hdev = vport->back;
6322 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6324 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6325 memcpy(desc[0].data,
6327 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6328 hclge_cmd_setup_basic_desc(&desc[1],
6329 HCLGE_OPC_MAC_VLAN_ADD,
6331 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6332 hclge_cmd_setup_basic_desc(&desc[2],
6333 HCLGE_OPC_MAC_VLAN_ADD,
6335 ret = hclge_cmd_send(&hdev->hw, desc, 3);
6337 memcpy(desc[0].data,
6339 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6340 ret = hclge_cmd_send(&hdev->hw, desc, 1);
6343 dev_err(&hdev->pdev->dev,
6344 "lookup mac addr failed for cmd_send, ret =%d.\n",
6348 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6349 retval = le16_to_cpu(desc[0].retval);
6351 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6352 HCLGE_MAC_VLAN_LKUP);
6355 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6356 struct hclge_mac_vlan_tbl_entry_cmd *req,
6357 struct hclge_desc *mc_desc)
6359 struct hclge_dev *hdev = vport->back;
6366 struct hclge_desc desc;
6368 hclge_cmd_setup_basic_desc(&desc,
6369 HCLGE_OPC_MAC_VLAN_ADD,
6371 memcpy(desc.data, req,
6372 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6373 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6374 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6375 retval = le16_to_cpu(desc.retval);
6377 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6379 HCLGE_MAC_VLAN_ADD);
6381 hclge_cmd_reuse_desc(&mc_desc[0], false);
6382 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6383 hclge_cmd_reuse_desc(&mc_desc[1], false);
6384 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6385 hclge_cmd_reuse_desc(&mc_desc[2], false);
6386 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6387 memcpy(mc_desc[0].data, req,
6388 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6389 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6390 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6391 retval = le16_to_cpu(mc_desc[0].retval);
6393 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6395 HCLGE_MAC_VLAN_ADD);
6399 dev_err(&hdev->pdev->dev,
6400 "add mac addr failed for cmd_send, ret =%d.\n",
6408 static int hclge_init_umv_space(struct hclge_dev *hdev)
6410 u16 allocated_size = 0;
6413 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6418 if (allocated_size < hdev->wanted_umv_size)
6419 dev_warn(&hdev->pdev->dev,
6420 "Alloc umv space failed, want %d, get %d\n",
6421 hdev->wanted_umv_size, allocated_size);
6423 mutex_init(&hdev->umv_mutex);
6424 hdev->max_umv_size = allocated_size;
6425 /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
6426 * preserve some unicast mac vlan table entries shared by pf
6429 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6430 hdev->share_umv_size = hdev->priv_umv_size +
6431 hdev->max_umv_size % (hdev->num_req_vfs + 2);
6436 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6440 if (hdev->max_umv_size > 0) {
6441 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
6445 hdev->max_umv_size = 0;
6447 mutex_destroy(&hdev->umv_mutex);
6452 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6453 u16 *allocated_size, bool is_alloc)
6455 struct hclge_umv_spc_alc_cmd *req;
6456 struct hclge_desc desc;
6459 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6460 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6462 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
6464 req->space_size = cpu_to_le32(space_size);
6466 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6468 dev_err(&hdev->pdev->dev,
6469 "%s umv space failed for cmd_send, ret =%d\n",
6470 is_alloc ? "allocate" : "free", ret);
6474 if (is_alloc && allocated_size)
6475 *allocated_size = le32_to_cpu(desc.data[1]);
6480 static void hclge_reset_umv_space(struct hclge_dev *hdev)
6482 struct hclge_vport *vport;
6485 for (i = 0; i < hdev->num_alloc_vport; i++) {
6486 vport = &hdev->vport[i];
6487 vport->used_umv_num = 0;
6490 mutex_lock(&hdev->umv_mutex);
6491 hdev->share_umv_size = hdev->priv_umv_size +
6492 hdev->max_umv_size % (hdev->num_req_vfs + 2);
6493 mutex_unlock(&hdev->umv_mutex);
6496 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
6498 struct hclge_dev *hdev = vport->back;
6501 mutex_lock(&hdev->umv_mutex);
6502 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
6503 hdev->share_umv_size == 0);
6504 mutex_unlock(&hdev->umv_mutex);
6509 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
6511 struct hclge_dev *hdev = vport->back;
6513 mutex_lock(&hdev->umv_mutex);
6515 if (vport->used_umv_num > hdev->priv_umv_size)
6516 hdev->share_umv_size++;
6518 if (vport->used_umv_num > 0)
6519 vport->used_umv_num--;
6521 if (vport->used_umv_num >= hdev->priv_umv_size &&
6522 hdev->share_umv_size > 0)
6523 hdev->share_umv_size--;
6524 vport->used_umv_num++;
6526 mutex_unlock(&hdev->umv_mutex);
6529 static int hclge_add_uc_addr(struct hnae3_handle *handle,
6530 const unsigned char *addr)
6532 struct hclge_vport *vport = hclge_get_vport(handle);
6534 return hclge_add_uc_addr_common(vport, addr);
6537 int hclge_add_uc_addr_common(struct hclge_vport *vport,
6538 const unsigned char *addr)
6540 struct hclge_dev *hdev = vport->back;
6541 struct hclge_mac_vlan_tbl_entry_cmd req;
6542 struct hclge_desc desc;
6543 u16 egress_port = 0;
6546 /* mac addr check */
6547 if (is_zero_ether_addr(addr) ||
6548 is_broadcast_ether_addr(addr) ||
6549 is_multicast_ether_addr(addr)) {
6550 dev_err(&hdev->pdev->dev,
6551 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
6553 is_zero_ether_addr(addr),
6554 is_broadcast_ether_addr(addr),
6555 is_multicast_ether_addr(addr));
6559 memset(&req, 0, sizeof(req));
6561 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
6562 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
6564 req.egress_port = cpu_to_le16(egress_port);
6566 hclge_prepare_mac_addr(&req, addr, false);
6568 /* Lookup the mac address in the mac_vlan table, and add
6569 * it if the entry is inexistent. Repeated unicast entry
6570 * is not allowed in the mac vlan table.
6572 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
6573 if (ret == -ENOENT) {
6574 if (!hclge_is_umv_space_full(vport)) {
6575 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
6577 hclge_update_umv_space(vport, false);
6581 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
6582 hdev->priv_umv_size);
6587 /* check if we just hit the duplicate */
6589 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6590 vport->vport_id, addr);
6594 dev_err(&hdev->pdev->dev,
6595 "PF failed to add unicast entry(%pM) in the MAC table\n",
6601 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6602 const unsigned char *addr)
6604 struct hclge_vport *vport = hclge_get_vport(handle);
6606 return hclge_rm_uc_addr_common(vport, addr);
6609 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6610 const unsigned char *addr)
6612 struct hclge_dev *hdev = vport->back;
6613 struct hclge_mac_vlan_tbl_entry_cmd req;
6616 /* mac addr check */
6617 if (is_zero_ether_addr(addr) ||
6618 is_broadcast_ether_addr(addr) ||
6619 is_multicast_ether_addr(addr)) {
6620 dev_dbg(&hdev->pdev->dev,
6621 "Remove mac err! invalid mac:%pM.\n",
6626 memset(&req, 0, sizeof(req));
6627 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6628 hclge_prepare_mac_addr(&req, addr, false);
6629 ret = hclge_remove_mac_vlan_tbl(vport, &req);
6631 hclge_update_umv_space(vport, true);
6636 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6637 const unsigned char *addr)
6639 struct hclge_vport *vport = hclge_get_vport(handle);
6641 return hclge_add_mc_addr_common(vport, addr);
6644 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6645 const unsigned char *addr)
6647 struct hclge_dev *hdev = vport->back;
6648 struct hclge_mac_vlan_tbl_entry_cmd req;
6649 struct hclge_desc desc[3];
6652 /* mac addr check */
6653 if (!is_multicast_ether_addr(addr)) {
6654 dev_err(&hdev->pdev->dev,
6655 "Add mc mac err! invalid mac:%pM.\n",
6659 memset(&req, 0, sizeof(req));
6660 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6661 hclge_prepare_mac_addr(&req, addr, true);
6662 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6664 /* This mac addr do not exist, add new entry for it */
6665 memset(desc[0].data, 0, sizeof(desc[0].data));
6666 memset(desc[1].data, 0, sizeof(desc[0].data));
6667 memset(desc[2].data, 0, sizeof(desc[0].data));
6669 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
6672 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6674 if (status == -ENOSPC)
6675 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6680 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6681 const unsigned char *addr)
6683 struct hclge_vport *vport = hclge_get_vport(handle);
6685 return hclge_rm_mc_addr_common(vport, addr);
6688 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6689 const unsigned char *addr)
6691 struct hclge_dev *hdev = vport->back;
6692 struct hclge_mac_vlan_tbl_entry_cmd req;
6693 enum hclge_cmd_status status;
6694 struct hclge_desc desc[3];
6696 /* mac addr check */
6697 if (!is_multicast_ether_addr(addr)) {
6698 dev_dbg(&hdev->pdev->dev,
6699 "Remove mc mac err! invalid mac:%pM.\n",
6704 memset(&req, 0, sizeof(req));
6705 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6706 hclge_prepare_mac_addr(&req, addr, true);
6707 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6709 /* This mac addr exist, remove this handle's VFID for it */
6710 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
6714 if (hclge_is_all_function_id_zero(desc))
6715 /* All the vfid is zero, so need to delete this entry */
6716 status = hclge_remove_mac_vlan_tbl(vport, &req);
6718 /* Not all the vfid is zero, update the vfid */
6719 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6722 /* Maybe this mac address is in mta table, but it cannot be
6723 * deleted here because an entry of mta represents an address
6724 * range rather than a specific address. the delete action to
6725 * all entries will take effect in update_mta_status called by
6726 * hns3_nic_set_rx_mode.
6734 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6735 enum HCLGE_MAC_ADDR_TYPE mac_type)
6737 struct hclge_vport_mac_addr_cfg *mac_cfg;
6738 struct list_head *list;
6740 if (!vport->vport_id)
6743 mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6747 mac_cfg->hd_tbl_status = true;
6748 memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6750 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6751 &vport->uc_mac_list : &vport->mc_mac_list;
6753 list_add_tail(&mac_cfg->node, list);
6756 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6758 enum HCLGE_MAC_ADDR_TYPE mac_type)
6760 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6761 struct list_head *list;
6762 bool uc_flag, mc_flag;
6764 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6765 &vport->uc_mac_list : &vport->mc_mac_list;
6767 uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6768 mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6770 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6771 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6772 if (uc_flag && mac_cfg->hd_tbl_status)
6773 hclge_rm_uc_addr_common(vport, mac_addr);
6775 if (mc_flag && mac_cfg->hd_tbl_status)
6776 hclge_rm_mc_addr_common(vport, mac_addr);
6778 list_del(&mac_cfg->node);
6785 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6786 enum HCLGE_MAC_ADDR_TYPE mac_type)
6788 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6789 struct list_head *list;
6791 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6792 &vport->uc_mac_list : &vport->mc_mac_list;
6794 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6795 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6796 hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6798 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6799 hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6801 mac_cfg->hd_tbl_status = false;
6803 list_del(&mac_cfg->node);
6809 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6811 struct hclge_vport_mac_addr_cfg *mac, *tmp;
6812 struct hclge_vport *vport;
6815 mutex_lock(&hdev->vport_cfg_mutex);
6816 for (i = 0; i < hdev->num_alloc_vport; i++) {
6817 vport = &hdev->vport[i];
6818 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6819 list_del(&mac->node);
6823 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6824 list_del(&mac->node);
6828 mutex_unlock(&hdev->vport_cfg_mutex);
6831 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6832 u16 cmdq_resp, u8 resp_code)
6834 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
6835 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
6836 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
6837 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
6842 dev_err(&hdev->pdev->dev,
6843 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6848 switch (resp_code) {
6849 case HCLGE_ETHERTYPE_SUCCESS_ADD:
6850 case HCLGE_ETHERTYPE_ALREADY_ADD:
6853 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6854 dev_err(&hdev->pdev->dev,
6855 "add mac ethertype failed for manager table overflow.\n");
6856 return_status = -EIO;
6858 case HCLGE_ETHERTYPE_KEY_CONFLICT:
6859 dev_err(&hdev->pdev->dev,
6860 "add mac ethertype failed for key conflict.\n");
6861 return_status = -EIO;
6864 dev_err(&hdev->pdev->dev,
6865 "add mac ethertype failed for undefined, code=%d.\n",
6867 return_status = -EIO;
6870 return return_status;
6873 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6874 const struct hclge_mac_mgr_tbl_entry_cmd *req)
6876 struct hclge_desc desc;
6881 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6882 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6884 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6886 dev_err(&hdev->pdev->dev,
6887 "add mac ethertype failed for cmd_send, ret =%d.\n",
6892 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6893 retval = le16_to_cpu(desc.retval);
6895 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6898 static int init_mgr_tbl(struct hclge_dev *hdev)
6903 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6904 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6906 dev_err(&hdev->pdev->dev,
6907 "add mac ethertype failed, ret =%d.\n",
6916 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6918 struct hclge_vport *vport = hclge_get_vport(handle);
6919 struct hclge_dev *hdev = vport->back;
6921 ether_addr_copy(p, hdev->hw.mac.mac_addr);
6924 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6927 const unsigned char *new_addr = (const unsigned char *)p;
6928 struct hclge_vport *vport = hclge_get_vport(handle);
6929 struct hclge_dev *hdev = vport->back;
6932 /* mac addr check */
6933 if (is_zero_ether_addr(new_addr) ||
6934 is_broadcast_ether_addr(new_addr) ||
6935 is_multicast_ether_addr(new_addr)) {
6936 dev_err(&hdev->pdev->dev,
6937 "Change uc mac err! invalid mac:%p.\n",
6942 if ((!is_first || is_kdump_kernel()) &&
6943 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6944 dev_warn(&hdev->pdev->dev,
6945 "remove old uc mac address fail.\n");
6947 ret = hclge_add_uc_addr(handle, new_addr);
6949 dev_err(&hdev->pdev->dev,
6950 "add uc mac address fail, ret =%d.\n",
6954 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6955 dev_err(&hdev->pdev->dev,
6956 "restore uc mac address fail.\n");
6961 ret = hclge_pause_addr_cfg(hdev, new_addr);
6963 dev_err(&hdev->pdev->dev,
6964 "configure mac pause address fail, ret =%d.\n",
6969 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6974 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6977 struct hclge_vport *vport = hclge_get_vport(handle);
6978 struct hclge_dev *hdev = vport->back;
6980 if (!hdev->hw.mac.phydev)
6983 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6986 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6987 u8 fe_type, bool filter_en, u8 vf_id)
6989 struct hclge_vlan_filter_ctrl_cmd *req;
6990 struct hclge_desc desc;
6993 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6995 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6996 req->vlan_type = vlan_type;
6997 req->vlan_fe = filter_en ? fe_type : 0;
7000 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7002 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7008 #define HCLGE_FILTER_TYPE_VF 0
7009 #define HCLGE_FILTER_TYPE_PORT 1
7010 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
7011 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
7012 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
7013 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
7014 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
7015 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
7016 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7017 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
7018 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7020 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7022 struct hclge_vport *vport = hclge_get_vport(handle);
7023 struct hclge_dev *hdev = vport->back;
7025 if (hdev->pdev->revision >= 0x21) {
7026 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7027 HCLGE_FILTER_FE_EGRESS, enable, 0);
7028 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7029 HCLGE_FILTER_FE_INGRESS, enable, 0);
7031 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7032 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7036 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7038 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7041 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
7042 bool is_kill, u16 vlan, u8 qos,
7045 #define HCLGE_MAX_VF_BYTES 16
7046 struct hclge_vlan_filter_vf_cfg_cmd *req0;
7047 struct hclge_vlan_filter_vf_cfg_cmd *req1;
7048 struct hclge_desc desc[2];
7053 /* if vf vlan table is full, firmware will close vf vlan filter, it
7054 * is unable and unnecessary to add new vlan id to vf vlan filter
7056 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill)
7059 hclge_cmd_setup_basic_desc(&desc[0],
7060 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7061 hclge_cmd_setup_basic_desc(&desc[1],
7062 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7064 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7066 vf_byte_off = vfid / 8;
7067 vf_byte_val = 1 << (vfid % 8);
7069 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7070 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7072 req0->vlan_id = cpu_to_le16(vlan);
7073 req0->vlan_cfg = is_kill;
7075 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7076 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7078 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7080 ret = hclge_cmd_send(&hdev->hw, desc, 2);
7082 dev_err(&hdev->pdev->dev,
7083 "Send vf vlan command fail, ret =%d.\n",
7089 #define HCLGE_VF_VLAN_NO_ENTRY 2
7090 if (!req0->resp_code || req0->resp_code == 1)
7093 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7094 set_bit(vfid, hdev->vf_vlan_full);
7095 dev_warn(&hdev->pdev->dev,
7096 "vf vlan table is full, vf vlan filter is disabled\n");
7100 dev_err(&hdev->pdev->dev,
7101 "Add vf vlan filter fail, ret =%d.\n",
7104 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
7105 if (!req0->resp_code)
7108 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
7109 dev_warn(&hdev->pdev->dev,
7110 "vlan %d filter is not in vf vlan table\n",
7115 dev_err(&hdev->pdev->dev,
7116 "Kill vf vlan filter fail, ret =%d.\n",
7123 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7124 u16 vlan_id, bool is_kill)
7126 struct hclge_vlan_filter_pf_cfg_cmd *req;
7127 struct hclge_desc desc;
7128 u8 vlan_offset_byte_val;
7129 u8 vlan_offset_byte;
7133 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7135 vlan_offset_160 = vlan_id / 160;
7136 vlan_offset_byte = (vlan_id % 160) / 8;
7137 vlan_offset_byte_val = 1 << (vlan_id % 8);
7139 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7140 req->vlan_offset = vlan_offset_160;
7141 req->vlan_cfg = is_kill;
7142 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7144 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7146 dev_err(&hdev->pdev->dev,
7147 "port vlan command, send fail, ret =%d.\n", ret);
7151 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7152 u16 vport_id, u16 vlan_id, u8 qos,
7155 u16 vport_idx, vport_num = 0;
7158 if (is_kill && !vlan_id)
7161 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7164 dev_err(&hdev->pdev->dev,
7165 "Set %d vport vlan filter config fail, ret =%d.\n",
7170 /* vlan 0 may be added twice when 8021q module is enabled */
7171 if (!is_kill && !vlan_id &&
7172 test_bit(vport_id, hdev->vlan_table[vlan_id]))
7175 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7176 dev_err(&hdev->pdev->dev,
7177 "Add port vlan failed, vport %d is already in vlan %d\n",
7183 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7184 dev_err(&hdev->pdev->dev,
7185 "Delete port vlan failed, vport %d is not in vlan %d\n",
7190 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7193 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7194 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7200 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7202 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7203 struct hclge_vport_vtag_tx_cfg_cmd *req;
7204 struct hclge_dev *hdev = vport->back;
7205 struct hclge_desc desc;
7208 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7210 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7211 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7212 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7213 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7214 vcfg->accept_tag1 ? 1 : 0);
7215 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7216 vcfg->accept_untag1 ? 1 : 0);
7217 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7218 vcfg->accept_tag2 ? 1 : 0);
7219 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7220 vcfg->accept_untag2 ? 1 : 0);
7221 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7222 vcfg->insert_tag1_en ? 1 : 0);
7223 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7224 vcfg->insert_tag2_en ? 1 : 0);
7225 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7227 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7228 req->vf_bitmap[req->vf_offset] =
7229 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7231 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7233 dev_err(&hdev->pdev->dev,
7234 "Send port txvlan cfg command fail, ret =%d\n",
7240 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7242 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7243 struct hclge_vport_vtag_rx_cfg_cmd *req;
7244 struct hclge_dev *hdev = vport->back;
7245 struct hclge_desc desc;
7248 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7250 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7251 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7252 vcfg->strip_tag1_en ? 1 : 0);
7253 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7254 vcfg->strip_tag2_en ? 1 : 0);
7255 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7256 vcfg->vlan1_vlan_prionly ? 1 : 0);
7257 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7258 vcfg->vlan2_vlan_prionly ? 1 : 0);
7260 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7261 req->vf_bitmap[req->vf_offset] =
7262 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7264 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7266 dev_err(&hdev->pdev->dev,
7267 "Send port rxvlan cfg command fail, ret =%d\n",
7273 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7274 u16 port_base_vlan_state,
7279 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7280 vport->txvlan_cfg.accept_tag1 = true;
7281 vport->txvlan_cfg.insert_tag1_en = false;
7282 vport->txvlan_cfg.default_tag1 = 0;
7284 vport->txvlan_cfg.accept_tag1 = false;
7285 vport->txvlan_cfg.insert_tag1_en = true;
7286 vport->txvlan_cfg.default_tag1 = vlan_tag;
7289 vport->txvlan_cfg.accept_untag1 = true;
7291 /* accept_tag2 and accept_untag2 are not supported on
7292 * pdev revision(0x20), new revision support them,
7293 * this two fields can not be configured by user.
7295 vport->txvlan_cfg.accept_tag2 = true;
7296 vport->txvlan_cfg.accept_untag2 = true;
7297 vport->txvlan_cfg.insert_tag2_en = false;
7298 vport->txvlan_cfg.default_tag2 = 0;
7300 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7301 vport->rxvlan_cfg.strip_tag1_en = false;
7302 vport->rxvlan_cfg.strip_tag2_en =
7303 vport->rxvlan_cfg.rx_vlan_offload_en;
7305 vport->rxvlan_cfg.strip_tag1_en =
7306 vport->rxvlan_cfg.rx_vlan_offload_en;
7307 vport->rxvlan_cfg.strip_tag2_en = true;
7309 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7310 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7312 ret = hclge_set_vlan_tx_offload_cfg(vport);
7316 return hclge_set_vlan_rx_offload_cfg(vport);
7319 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7321 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7322 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7323 struct hclge_desc desc;
7326 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7327 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7328 rx_req->ot_fst_vlan_type =
7329 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7330 rx_req->ot_sec_vlan_type =
7331 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7332 rx_req->in_fst_vlan_type =
7333 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7334 rx_req->in_sec_vlan_type =
7335 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7337 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7339 dev_err(&hdev->pdev->dev,
7340 "Send rxvlan protocol type command fail, ret =%d\n",
7345 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7347 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7348 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7349 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7351 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7353 dev_err(&hdev->pdev->dev,
7354 "Send txvlan protocol type command fail, ret =%d\n",
7360 static int hclge_init_vlan_config(struct hclge_dev *hdev)
7362 #define HCLGE_DEF_VLAN_TYPE 0x8100
7364 struct hnae3_handle *handle = &hdev->vport[0].nic;
7365 struct hclge_vport *vport;
7369 if (hdev->pdev->revision >= 0x21) {
7370 /* for revision 0x21, vf vlan filter is per function */
7371 for (i = 0; i < hdev->num_alloc_vport; i++) {
7372 vport = &hdev->vport[i];
7373 ret = hclge_set_vlan_filter_ctrl(hdev,
7374 HCLGE_FILTER_TYPE_VF,
7375 HCLGE_FILTER_FE_EGRESS,
7382 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7383 HCLGE_FILTER_FE_INGRESS, true,
7388 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7389 HCLGE_FILTER_FE_EGRESS_V1_B,
7395 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7397 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7398 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7399 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7400 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7401 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7402 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7404 ret = hclge_set_vlan_protocol_type(hdev);
7408 for (i = 0; i < hdev->num_alloc_vport; i++) {
7411 vport = &hdev->vport[i];
7412 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7414 ret = hclge_vlan_offload_cfg(vport,
7415 vport->port_base_vlan_cfg.state,
7421 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7424 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7427 struct hclge_vport_vlan_cfg *vlan;
7429 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7433 vlan->hd_tbl_status = writen_to_tbl;
7434 vlan->vlan_id = vlan_id;
7436 list_add_tail(&vlan->node, &vport->vlan_list);
7439 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
7441 struct hclge_vport_vlan_cfg *vlan, *tmp;
7442 struct hclge_dev *hdev = vport->back;
7445 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7446 if (!vlan->hd_tbl_status) {
7447 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7449 vlan->vlan_id, 0, false);
7451 dev_err(&hdev->pdev->dev,
7452 "restore vport vlan list failed, ret=%d\n",
7457 vlan->hd_tbl_status = true;
7463 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7466 struct hclge_vport_vlan_cfg *vlan, *tmp;
7467 struct hclge_dev *hdev = vport->back;
7469 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7470 if (vlan->vlan_id == vlan_id) {
7471 if (is_write_tbl && vlan->hd_tbl_status)
7472 hclge_set_vlan_filter_hw(hdev,
7478 list_del(&vlan->node);
7485 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
7487 struct hclge_vport_vlan_cfg *vlan, *tmp;
7488 struct hclge_dev *hdev = vport->back;
7490 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7491 if (vlan->hd_tbl_status)
7492 hclge_set_vlan_filter_hw(hdev,
7498 vlan->hd_tbl_status = false;
7500 list_del(&vlan->node);
7506 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
7508 struct hclge_vport_vlan_cfg *vlan, *tmp;
7509 struct hclge_vport *vport;
7512 mutex_lock(&hdev->vport_cfg_mutex);
7513 for (i = 0; i < hdev->num_alloc_vport; i++) {
7514 vport = &hdev->vport[i];
7515 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7516 list_del(&vlan->node);
7520 mutex_unlock(&hdev->vport_cfg_mutex);
7523 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
7525 struct hclge_vport *vport = hclge_get_vport(handle);
7526 struct hclge_vport_vlan_cfg *vlan, *tmp;
7527 struct hclge_dev *hdev = vport->back;
7528 u16 vlan_proto, qos;
7532 mutex_lock(&hdev->vport_cfg_mutex);
7533 for (i = 0; i < hdev->num_alloc_vport; i++) {
7534 vport = &hdev->vport[i];
7535 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
7536 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7537 qos = vport->port_base_vlan_cfg.vlan_info.qos;
7538 state = vport->port_base_vlan_cfg.state;
7540 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
7541 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
7542 vport->vport_id, vlan_id, qos,
7547 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7548 if (vlan->hd_tbl_status)
7549 hclge_set_vlan_filter_hw(hdev,
7557 mutex_unlock(&hdev->vport_cfg_mutex);
7560 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
7562 struct hclge_vport *vport = hclge_get_vport(handle);
7564 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7565 vport->rxvlan_cfg.strip_tag1_en = false;
7566 vport->rxvlan_cfg.strip_tag2_en = enable;
7568 vport->rxvlan_cfg.strip_tag1_en = enable;
7569 vport->rxvlan_cfg.strip_tag2_en = true;
7571 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7572 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7573 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
7575 return hclge_set_vlan_rx_offload_cfg(vport);
7578 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
7579 u16 port_base_vlan_state,
7580 struct hclge_vlan_info *new_info,
7581 struct hclge_vlan_info *old_info)
7583 struct hclge_dev *hdev = vport->back;
7586 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
7587 hclge_rm_vport_all_vlan_table(vport, false);
7588 return hclge_set_vlan_filter_hw(hdev,
7589 htons(new_info->vlan_proto),
7592 new_info->qos, false);
7595 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
7596 vport->vport_id, old_info->vlan_tag,
7597 old_info->qos, true);
7601 return hclge_add_vport_all_vlan_table(vport);
7604 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
7605 struct hclge_vlan_info *vlan_info)
7607 struct hnae3_handle *nic = &vport->nic;
7608 struct hclge_vlan_info *old_vlan_info;
7609 struct hclge_dev *hdev = vport->back;
7612 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
7614 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
7618 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
7619 /* add new VLAN tag */
7620 ret = hclge_set_vlan_filter_hw(hdev,
7621 htons(vlan_info->vlan_proto),
7623 vlan_info->vlan_tag,
7624 vlan_info->qos, false);
7628 /* remove old VLAN tag */
7629 ret = hclge_set_vlan_filter_hw(hdev,
7630 htons(old_vlan_info->vlan_proto),
7632 old_vlan_info->vlan_tag,
7633 old_vlan_info->qos, true);
7640 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7645 /* update state only when disable/enable port based VLAN */
7646 vport->port_base_vlan_cfg.state = state;
7647 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7648 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7650 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7653 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7654 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7655 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7660 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7661 enum hnae3_port_base_vlan_state state,
7664 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7666 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7668 return HNAE3_PORT_BASE_VLAN_ENABLE;
7671 return HNAE3_PORT_BASE_VLAN_DISABLE;
7672 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7673 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7675 return HNAE3_PORT_BASE_VLAN_MODIFY;
7679 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7680 u16 vlan, u8 qos, __be16 proto)
7682 struct hclge_vport *vport = hclge_get_vport(handle);
7683 struct hclge_dev *hdev = vport->back;
7684 struct hclge_vlan_info vlan_info;
7688 if (hdev->pdev->revision == 0x20)
7691 /* qos is a 3 bits value, so can not be bigger than 7 */
7692 if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7694 if (proto != htons(ETH_P_8021Q))
7695 return -EPROTONOSUPPORT;
7697 vport = &hdev->vport[vfid];
7698 state = hclge_get_port_base_vlan_state(vport,
7699 vport->port_base_vlan_cfg.state,
7701 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7704 vlan_info.vlan_tag = vlan;
7705 vlan_info.qos = qos;
7706 vlan_info.vlan_proto = ntohs(proto);
7708 /* update port based VLAN for PF */
7710 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7711 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7712 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7717 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7718 return hclge_update_port_base_vlan_cfg(vport, state,
7721 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7729 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7730 u16 vlan_id, bool is_kill)
7732 struct hclge_vport *vport = hclge_get_vport(handle);
7733 struct hclge_dev *hdev = vport->back;
7734 bool writen_to_tbl = false;
7737 /* when port based VLAN enabled, we use port based VLAN as the VLAN
7738 * filter entry. In this case, we don't update VLAN filter table
7739 * when user add new VLAN or remove exist VLAN, just update the vport
7740 * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
7741 * table until port based VLAN disabled
7743 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7744 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
7745 vlan_id, 0, is_kill);
7746 writen_to_tbl = true;
7753 hclge_rm_vport_vlan_table(vport, vlan_id, false);
7755 hclge_add_vport_vlan_table(vport, vlan_id,
7761 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
7763 struct hclge_config_max_frm_size_cmd *req;
7764 struct hclge_desc desc;
7766 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
7768 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
7769 req->max_frm_size = cpu_to_le16(new_mps);
7770 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
7772 return hclge_cmd_send(&hdev->hw, &desc, 1);
7775 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
7777 struct hclge_vport *vport = hclge_get_vport(handle);
7779 return hclge_set_vport_mtu(vport, new_mtu);
7782 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
7784 struct hclge_dev *hdev = vport->back;
7785 int i, max_frm_size, ret;
7787 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7788 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
7789 max_frm_size > HCLGE_MAC_MAX_FRAME)
7792 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
7793 mutex_lock(&hdev->vport_lock);
7794 /* VF's mps must fit within hdev->mps */
7795 if (vport->vport_id && max_frm_size > hdev->mps) {
7796 mutex_unlock(&hdev->vport_lock);
7798 } else if (vport->vport_id) {
7799 vport->mps = max_frm_size;
7800 mutex_unlock(&hdev->vport_lock);
7804 /* PF's mps must be greater then VF's mps */
7805 for (i = 1; i < hdev->num_alloc_vport; i++)
7806 if (max_frm_size < hdev->vport[i].mps) {
7807 mutex_unlock(&hdev->vport_lock);
7811 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7813 ret = hclge_set_mac_mtu(hdev, max_frm_size);
7815 dev_err(&hdev->pdev->dev,
7816 "Change mtu fail, ret =%d\n", ret);
7820 hdev->mps = max_frm_size;
7821 vport->mps = max_frm_size;
7823 ret = hclge_buffer_alloc(hdev);
7825 dev_err(&hdev->pdev->dev,
7826 "Allocate buffer fail, ret =%d\n", ret);
7829 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7830 mutex_unlock(&hdev->vport_lock);
7834 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7837 struct hclge_reset_tqp_queue_cmd *req;
7838 struct hclge_desc desc;
7841 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7843 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7844 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7845 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
7847 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7849 dev_err(&hdev->pdev->dev,
7850 "Send tqp reset cmd error, status =%d\n", ret);
7857 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
7859 struct hclge_reset_tqp_queue_cmd *req;
7860 struct hclge_desc desc;
7863 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
7865 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7866 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7868 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7870 dev_err(&hdev->pdev->dev,
7871 "Get reset status error, status =%d\n", ret);
7875 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
7878 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
7880 struct hnae3_queue *queue;
7881 struct hclge_tqp *tqp;
7883 queue = handle->kinfo.tqp[queue_id];
7884 tqp = container_of(queue, struct hclge_tqp, q);
7889 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
7891 struct hclge_vport *vport = hclge_get_vport(handle);
7892 struct hclge_dev *hdev = vport->back;
7893 int reset_try_times = 0;
7898 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
7900 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
7902 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
7906 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7908 dev_err(&hdev->pdev->dev,
7909 "Send reset tqp cmd fail, ret = %d\n", ret);
7913 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7914 /* Wait for tqp hw reset */
7916 reset_status = hclge_get_reset_status(hdev, queue_gid);
7921 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7922 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
7926 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7928 dev_err(&hdev->pdev->dev,
7929 "Deassert the soft reset fail, ret = %d\n", ret);
7934 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
7936 struct hclge_dev *hdev = vport->back;
7937 int reset_try_times = 0;
7942 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
7944 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7946 dev_warn(&hdev->pdev->dev,
7947 "Send reset tqp cmd fail, ret = %d\n", ret);
7951 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7952 /* Wait for tqp hw reset */
7954 reset_status = hclge_get_reset_status(hdev, queue_gid);
7959 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7960 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
7964 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7966 dev_warn(&hdev->pdev->dev,
7967 "Deassert the soft reset fail, ret = %d\n", ret);
7970 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
7972 struct hclge_vport *vport = hclge_get_vport(handle);
7973 struct hclge_dev *hdev = vport->back;
7975 return hdev->fw_version;
7978 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7980 struct phy_device *phydev = hdev->hw.mac.phydev;
7985 phy_set_asym_pause(phydev, rx_en, tx_en);
7988 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7993 hdev->fc_mode_last_time = HCLGE_FC_FULL;
7994 else if (rx_en && !tx_en)
7995 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
7996 else if (!rx_en && tx_en)
7997 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
7999 hdev->fc_mode_last_time = HCLGE_FC_NONE;
8001 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8004 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8006 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
8011 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8016 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8018 struct phy_device *phydev = hdev->hw.mac.phydev;
8019 u16 remote_advertising = 0;
8020 u16 local_advertising;
8021 u32 rx_pause, tx_pause;
8024 if (!phydev->link || !phydev->autoneg)
8027 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8030 remote_advertising = LPA_PAUSE_CAP;
8032 if (phydev->asym_pause)
8033 remote_advertising |= LPA_PAUSE_ASYM;
8035 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8036 remote_advertising);
8037 tx_pause = flowctl & FLOW_CTRL_TX;
8038 rx_pause = flowctl & FLOW_CTRL_RX;
8040 if (phydev->duplex == HCLGE_MAC_HALF) {
8045 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8048 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8049 u32 *rx_en, u32 *tx_en)
8051 struct hclge_vport *vport = hclge_get_vport(handle);
8052 struct hclge_dev *hdev = vport->back;
8054 *auto_neg = hclge_get_autoneg(handle);
8056 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8062 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8065 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8068 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8077 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8078 u32 rx_en, u32 tx_en)
8080 struct hclge_vport *vport = hclge_get_vport(handle);
8081 struct hclge_dev *hdev = vport->back;
8082 struct phy_device *phydev = hdev->hw.mac.phydev;
8085 fc_autoneg = hclge_get_autoneg(handle);
8086 if (auto_neg != fc_autoneg) {
8087 dev_info(&hdev->pdev->dev,
8088 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8092 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8093 dev_info(&hdev->pdev->dev,
8094 "Priority flow control enabled. Cannot set link flow control.\n");
8098 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8101 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8104 return phy_start_aneg(phydev);
8106 if (hdev->pdev->revision == 0x20)
8109 return hclge_restart_autoneg(handle);
8112 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8113 u8 *auto_neg, u32 *speed, u8 *duplex)
8115 struct hclge_vport *vport = hclge_get_vport(handle);
8116 struct hclge_dev *hdev = vport->back;
8119 *speed = hdev->hw.mac.speed;
8121 *duplex = hdev->hw.mac.duplex;
8123 *auto_neg = hdev->hw.mac.autoneg;
8126 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8129 struct hclge_vport *vport = hclge_get_vport(handle);
8130 struct hclge_dev *hdev = vport->back;
8133 *media_type = hdev->hw.mac.media_type;
8136 *module_type = hdev->hw.mac.module_type;
8139 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8140 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8142 struct hclge_vport *vport = hclge_get_vport(handle);
8143 struct hclge_dev *hdev = vport->back;
8144 struct phy_device *phydev = hdev->hw.mac.phydev;
8145 int mdix_ctrl, mdix, retval, is_resolved;
8148 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8149 *tp_mdix = ETH_TP_MDI_INVALID;
8153 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8155 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8156 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8157 HCLGE_PHY_MDIX_CTRL_S);
8159 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8160 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8161 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8163 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8165 switch (mdix_ctrl) {
8167 *tp_mdix_ctrl = ETH_TP_MDI;
8170 *tp_mdix_ctrl = ETH_TP_MDI_X;
8173 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8176 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8181 *tp_mdix = ETH_TP_MDI_INVALID;
8183 *tp_mdix = ETH_TP_MDI_X;
8185 *tp_mdix = ETH_TP_MDI;
8188 static void hclge_info_show(struct hclge_dev *hdev)
8190 struct device *dev = &hdev->pdev->dev;
8192 dev_info(dev, "PF info begin:\n");
8194 dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
8195 dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
8196 dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
8197 dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
8198 dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
8199 dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
8200 dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
8201 dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
8202 dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
8203 dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
8204 dev_info(dev, "This is %s PF\n",
8205 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8206 dev_info(dev, "DCB %s\n",
8207 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8208 dev_info(dev, "MQPRIO %s\n",
8209 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8211 dev_info(dev, "PF info end.\n");
8214 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
8215 struct hclge_vport *vport)
8217 struct hnae3_client *client = vport->nic.client;
8218 struct hclge_dev *hdev = ae_dev->priv;
8221 ret = client->ops->init_instance(&vport->nic);
8225 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8226 hnae3_set_client_init_flag(client, ae_dev, 1);
8228 /* Enable nic hw error interrupts */
8229 ret = hclge_config_nic_hw_error(hdev, true);
8231 dev_err(&ae_dev->pdev->dev,
8232 "fail(%d) to enable hw error interrupts\n", ret);
8234 if (netif_msg_drv(&hdev->vport->nic))
8235 hclge_info_show(hdev);
8240 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
8241 struct hclge_vport *vport)
8243 struct hnae3_client *client = vport->roce.client;
8244 struct hclge_dev *hdev = ae_dev->priv;
8247 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
8251 client = hdev->roce_client;
8252 ret = hclge_init_roce_base_info(vport);
8256 ret = client->ops->init_instance(&vport->roce);
8260 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8261 hnae3_set_client_init_flag(client, ae_dev, 1);
8266 static int hclge_init_client_instance(struct hnae3_client *client,
8267 struct hnae3_ae_dev *ae_dev)
8269 struct hclge_dev *hdev = ae_dev->priv;
8270 struct hclge_vport *vport;
8273 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8274 vport = &hdev->vport[i];
8276 switch (client->type) {
8277 case HNAE3_CLIENT_KNIC:
8279 hdev->nic_client = client;
8280 vport->nic.client = client;
8281 ret = hclge_init_nic_client_instance(ae_dev, vport);
8285 ret = hclge_init_roce_client_instance(ae_dev, vport);
8290 case HNAE3_CLIENT_ROCE:
8291 if (hnae3_dev_roce_supported(hdev)) {
8292 hdev->roce_client = client;
8293 vport->roce.client = client;
8296 ret = hclge_init_roce_client_instance(ae_dev, vport);
8306 /* Enable roce ras interrupts */
8307 ret = hclge_config_rocee_ras_interrupt(hdev, true);
8309 dev_err(&ae_dev->pdev->dev,
8310 "fail(%d) to enable roce ras interrupts\n", ret);
8315 hdev->nic_client = NULL;
8316 vport->nic.client = NULL;
8319 hdev->roce_client = NULL;
8320 vport->roce.client = NULL;
8324 static void hclge_uninit_client_instance(struct hnae3_client *client,
8325 struct hnae3_ae_dev *ae_dev)
8327 struct hclge_dev *hdev = ae_dev->priv;
8328 struct hclge_vport *vport;
8331 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8332 vport = &hdev->vport[i];
8333 if (hdev->roce_client) {
8334 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8335 hdev->roce_client->ops->uninit_instance(&vport->roce,
8337 hdev->roce_client = NULL;
8338 vport->roce.client = NULL;
8340 if (client->type == HNAE3_CLIENT_ROCE)
8342 if (hdev->nic_client && client->ops->uninit_instance) {
8343 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8344 client->ops->uninit_instance(&vport->nic, 0);
8345 hdev->nic_client = NULL;
8346 vport->nic.client = NULL;
8351 static int hclge_pci_init(struct hclge_dev *hdev)
8353 struct pci_dev *pdev = hdev->pdev;
8354 struct hclge_hw *hw;
8357 ret = pci_enable_device(pdev);
8359 dev_err(&pdev->dev, "failed to enable PCI device\n");
8363 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
8365 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8368 "can't set consistent PCI DMA");
8369 goto err_disable_device;
8371 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
8374 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
8376 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
8377 goto err_disable_device;
8380 pci_set_master(pdev);
8382 hw->io_base = pcim_iomap(pdev, 2, 0);
8384 dev_err(&pdev->dev, "Can't map configuration register space\n");
8386 goto err_clr_master;
8389 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
8393 pci_clear_master(pdev);
8394 pci_release_regions(pdev);
8396 pci_disable_device(pdev);
8401 static void hclge_pci_uninit(struct hclge_dev *hdev)
8403 struct pci_dev *pdev = hdev->pdev;
8405 pcim_iounmap(pdev, hdev->hw.io_base);
8406 pci_free_irq_vectors(pdev);
8407 pci_clear_master(pdev);
8408 pci_release_mem_regions(pdev);
8409 pci_disable_device(pdev);
8412 static void hclge_state_init(struct hclge_dev *hdev)
8414 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
8415 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8416 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
8417 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
8418 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
8419 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
8422 static void hclge_state_uninit(struct hclge_dev *hdev)
8424 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8425 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
8427 if (hdev->service_timer.function)
8428 del_timer_sync(&hdev->service_timer);
8429 if (hdev->reset_timer.function)
8430 del_timer_sync(&hdev->reset_timer);
8431 if (hdev->service_task.func)
8432 cancel_work_sync(&hdev->service_task);
8433 if (hdev->rst_service_task.func)
8434 cancel_work_sync(&hdev->rst_service_task);
8435 if (hdev->mbx_service_task.func)
8436 cancel_work_sync(&hdev->mbx_service_task);
8439 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
8441 #define HCLGE_FLR_WAIT_MS 100
8442 #define HCLGE_FLR_WAIT_CNT 50
8443 struct hclge_dev *hdev = ae_dev->priv;
8446 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
8447 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8448 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
8449 hclge_reset_event(hdev->pdev, NULL);
8451 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
8452 cnt++ < HCLGE_FLR_WAIT_CNT)
8453 msleep(HCLGE_FLR_WAIT_MS);
8455 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
8456 dev_err(&hdev->pdev->dev,
8457 "flr wait down timeout: %d\n", cnt);
8460 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
8462 struct hclge_dev *hdev = ae_dev->priv;
8464 set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8467 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
8469 struct pci_dev *pdev = ae_dev->pdev;
8470 struct hclge_dev *hdev;
8473 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
8480 hdev->ae_dev = ae_dev;
8481 hdev->reset_type = HNAE3_NONE_RESET;
8482 hdev->reset_level = HNAE3_FUNC_RESET;
8483 ae_dev->priv = hdev;
8484 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8486 mutex_init(&hdev->vport_lock);
8487 mutex_init(&hdev->vport_cfg_mutex);
8488 spin_lock_init(&hdev->fd_rule_lock);
8490 ret = hclge_pci_init(hdev);
8492 dev_err(&pdev->dev, "PCI init failed\n");
8496 /* Firmware command queue initialize */
8497 ret = hclge_cmd_queue_init(hdev);
8499 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
8500 goto err_pci_uninit;
8503 /* Firmware command initialize */
8504 ret = hclge_cmd_init(hdev);
8506 goto err_cmd_uninit;
8508 ret = hclge_get_cap(hdev);
8510 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
8512 goto err_cmd_uninit;
8515 ret = hclge_configure(hdev);
8517 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
8518 goto err_cmd_uninit;
8521 ret = hclge_init_msi(hdev);
8523 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
8524 goto err_cmd_uninit;
8527 ret = hclge_misc_irq_init(hdev);
8530 "Misc IRQ(vector0) init error, ret = %d.\n",
8532 goto err_msi_uninit;
8535 ret = hclge_alloc_tqps(hdev);
8537 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
8538 goto err_msi_irq_uninit;
8541 ret = hclge_alloc_vport(hdev);
8543 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
8544 goto err_msi_irq_uninit;
8547 ret = hclge_map_tqp(hdev);
8549 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8550 goto err_msi_irq_uninit;
8553 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
8554 ret = hclge_mac_mdio_config(hdev);
8556 dev_err(&hdev->pdev->dev,
8557 "mdio config fail ret=%d\n", ret);
8558 goto err_msi_irq_uninit;
8562 ret = hclge_init_umv_space(hdev);
8564 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
8565 goto err_mdiobus_unreg;
8568 ret = hclge_mac_init(hdev);
8570 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8571 goto err_mdiobus_unreg;
8574 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8576 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8577 goto err_mdiobus_unreg;
8580 ret = hclge_config_gro(hdev, true);
8582 goto err_mdiobus_unreg;
8584 ret = hclge_init_vlan_config(hdev);
8586 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8587 goto err_mdiobus_unreg;
8590 ret = hclge_tm_schd_init(hdev);
8592 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
8593 goto err_mdiobus_unreg;
8596 hclge_rss_init_cfg(hdev);
8597 ret = hclge_rss_init_hw(hdev);
8599 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8600 goto err_mdiobus_unreg;
8603 ret = init_mgr_tbl(hdev);
8605 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
8606 goto err_mdiobus_unreg;
8609 ret = hclge_init_fd_config(hdev);
8612 "fd table init fail, ret=%d\n", ret);
8613 goto err_mdiobus_unreg;
8616 INIT_KFIFO(hdev->mac_tnl_log);
8618 hclge_dcb_ops_set(hdev);
8620 timer_setup(&hdev->service_timer, hclge_service_timer, 0);
8621 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
8622 INIT_WORK(&hdev->service_task, hclge_service_task);
8623 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
8624 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
8626 hclge_clear_all_event_cause(hdev);
8628 /* Enable MISC vector(vector0) */
8629 hclge_enable_vector(&hdev->misc_vector, true);
8631 hclge_state_init(hdev);
8632 hdev->last_reset_time = jiffies;
8634 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
8638 if (hdev->hw.mac.phydev)
8639 mdiobus_unregister(hdev->hw.mac.mdio_bus);
8641 hclge_misc_irq_uninit(hdev);
8643 pci_free_irq_vectors(pdev);
8645 hclge_cmd_uninit(hdev);
8647 pcim_iounmap(pdev, hdev->hw.io_base);
8648 pci_clear_master(pdev);
8649 pci_release_regions(pdev);
8650 pci_disable_device(pdev);
8655 static void hclge_stats_clear(struct hclge_dev *hdev)
8657 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
8660 static void hclge_reset_vport_state(struct hclge_dev *hdev)
8662 struct hclge_vport *vport = hdev->vport;
8665 for (i = 0; i < hdev->num_alloc_vport; i++) {
8666 hclge_vport_stop(vport);
8671 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
8673 struct hclge_dev *hdev = ae_dev->priv;
8674 struct pci_dev *pdev = ae_dev->pdev;
8677 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8679 hclge_stats_clear(hdev);
8680 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
8681 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
8683 ret = hclge_cmd_init(hdev);
8685 dev_err(&pdev->dev, "Cmd queue init failed\n");
8689 ret = hclge_map_tqp(hdev);
8691 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8695 hclge_reset_umv_space(hdev);
8697 ret = hclge_mac_init(hdev);
8699 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8703 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8705 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8709 ret = hclge_config_gro(hdev, true);
8713 ret = hclge_init_vlan_config(hdev);
8715 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8719 ret = hclge_tm_init_hw(hdev, true);
8721 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
8725 ret = hclge_rss_init_hw(hdev);
8727 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8731 ret = hclge_init_fd_config(hdev);
8734 "fd table init fail, ret=%d\n", ret);
8738 /* Re-enable the hw error interrupts because
8739 * the interrupts get disabled on global reset.
8741 ret = hclge_config_nic_hw_error(hdev, true);
8744 "fail(%d) to re-enable NIC hw error interrupts\n",
8749 if (hdev->roce_client) {
8750 ret = hclge_config_rocee_ras_interrupt(hdev, true);
8753 "fail(%d) to re-enable roce ras interrupts\n",
8759 hclge_reset_vport_state(hdev);
8761 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
8767 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
8769 struct hclge_dev *hdev = ae_dev->priv;
8770 struct hclge_mac *mac = &hdev->hw.mac;
8772 hclge_state_uninit(hdev);
8775 mdiobus_unregister(mac->mdio_bus);
8777 hclge_uninit_umv_space(hdev);
8779 /* Disable MISC vector(vector0) */
8780 hclge_enable_vector(&hdev->misc_vector, false);
8781 synchronize_irq(hdev->misc_vector.vector_irq);
8783 /* Disable all hw interrupts */
8784 hclge_config_mac_tnl_int(hdev, false);
8785 hclge_config_nic_hw_error(hdev, false);
8786 hclge_config_rocee_ras_interrupt(hdev, false);
8788 hclge_cmd_uninit(hdev);
8789 hclge_misc_irq_uninit(hdev);
8790 hclge_pci_uninit(hdev);
8791 mutex_destroy(&hdev->vport_lock);
8792 hclge_uninit_vport_mac_table(hdev);
8793 hclge_uninit_vport_vlan_table(hdev);
8794 mutex_destroy(&hdev->vport_cfg_mutex);
8795 ae_dev->priv = NULL;
8798 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
8800 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8801 struct hclge_vport *vport = hclge_get_vport(handle);
8802 struct hclge_dev *hdev = vport->back;
8804 return min_t(u32, hdev->rss_size_max,
8805 vport->alloc_tqps / kinfo->num_tc);
8808 static void hclge_get_channels(struct hnae3_handle *handle,
8809 struct ethtool_channels *ch)
8811 ch->max_combined = hclge_get_max_channels(handle);
8812 ch->other_count = 1;
8814 ch->combined_count = handle->kinfo.rss_size;
8817 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
8818 u16 *alloc_tqps, u16 *max_rss_size)
8820 struct hclge_vport *vport = hclge_get_vport(handle);
8821 struct hclge_dev *hdev = vport->back;
8823 *alloc_tqps = vport->alloc_tqps;
8824 *max_rss_size = hdev->rss_size_max;
8827 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
8828 bool rxfh_configured)
8830 struct hclge_vport *vport = hclge_get_vport(handle);
8831 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
8832 struct hclge_dev *hdev = vport->back;
8833 int cur_rss_size = kinfo->rss_size;
8834 int cur_tqps = kinfo->num_tqps;
8835 u16 tc_offset[HCLGE_MAX_TC_NUM];
8836 u16 tc_valid[HCLGE_MAX_TC_NUM];
8837 u16 tc_size[HCLGE_MAX_TC_NUM];
8842 kinfo->req_rss_size = new_tqps_num;
8844 ret = hclge_tm_vport_map_update(hdev);
8846 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
8850 roundup_size = roundup_pow_of_two(kinfo->rss_size);
8851 roundup_size = ilog2(roundup_size);
8852 /* Set the RSS TC mode according to the new RSS size */
8853 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
8856 if (!(hdev->hw_tc_map & BIT(i)))
8860 tc_size[i] = roundup_size;
8861 tc_offset[i] = kinfo->rss_size * i;
8863 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
8867 /* RSS indirection table has been configuared by user */
8868 if (rxfh_configured)
8871 /* Reinitializes the rss indirect table according to the new RSS size */
8872 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
8876 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
8877 rss_indir[i] = i % kinfo->rss_size;
8879 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
8881 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
8888 dev_info(&hdev->pdev->dev,
8889 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
8890 cur_rss_size, kinfo->rss_size,
8891 cur_tqps, kinfo->rss_size * kinfo->num_tc);
8896 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
8897 u32 *regs_num_64_bit)
8899 struct hclge_desc desc;
8903 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
8904 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8906 dev_err(&hdev->pdev->dev,
8907 "Query register number cmd failed, ret = %d.\n", ret);
8911 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
8912 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
8914 total_num = *regs_num_32_bit + *regs_num_64_bit;
8921 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8924 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
8925 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
8927 struct hclge_desc *desc;
8928 u32 *reg_val = data;
8938 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
8939 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
8940 HCLGE_32_BIT_REG_RTN_DATANUM);
8941 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8945 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
8946 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8948 dev_err(&hdev->pdev->dev,
8949 "Query 32 bit register cmd failed, ret = %d.\n", ret);
8954 for (i = 0; i < cmd_num; i++) {
8956 desc_data = (__le32 *)(&desc[i].data[0]);
8957 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
8959 desc_data = (__le32 *)(&desc[i]);
8960 n = HCLGE_32_BIT_REG_RTN_DATANUM;
8962 for (k = 0; k < n; k++) {
8963 *reg_val++ = le32_to_cpu(*desc_data++);
8975 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8978 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
8979 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
8981 struct hclge_desc *desc;
8982 u64 *reg_val = data;
8992 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
8993 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
8994 HCLGE_64_BIT_REG_RTN_DATANUM);
8995 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8999 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
9000 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
9002 dev_err(&hdev->pdev->dev,
9003 "Query 64 bit register cmd failed, ret = %d.\n", ret);
9008 for (i = 0; i < cmd_num; i++) {
9010 desc_data = (__le64 *)(&desc[i].data[0]);
9011 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
9013 desc_data = (__le64 *)(&desc[i]);
9014 n = HCLGE_64_BIT_REG_RTN_DATANUM;
9016 for (k = 0; k < n; k++) {
9017 *reg_val++ = le64_to_cpu(*desc_data++);
9029 #define MAX_SEPARATE_NUM 4
9030 #define SEPARATOR_VALUE 0xFFFFFFFF
9031 #define REG_NUM_PER_LINE 4
9032 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
9034 static int hclge_get_regs_len(struct hnae3_handle *handle)
9036 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
9037 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9038 struct hclge_vport *vport = hclge_get_vport(handle);
9039 struct hclge_dev *hdev = vport->back;
9040 u32 regs_num_32_bit, regs_num_64_bit;
9043 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
9045 dev_err(&hdev->pdev->dev,
9046 "Get register number failed, ret = %d.\n", ret);
9050 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
9051 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
9052 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
9053 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
9055 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
9056 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
9057 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
9060 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
9063 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9064 struct hclge_vport *vport = hclge_get_vport(handle);
9065 struct hclge_dev *hdev = vport->back;
9066 u32 regs_num_32_bit, regs_num_64_bit;
9067 int i, j, reg_um, separator_num;
9071 *version = hdev->fw_version;
9073 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
9075 dev_err(&hdev->pdev->dev,
9076 "Get register number failed, ret = %d.\n", ret);
9080 /* fetching per-PF registers valus from PF PCIe register space */
9081 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
9082 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9083 for (i = 0; i < reg_um; i++)
9084 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
9085 for (i = 0; i < separator_num; i++)
9086 *reg++ = SEPARATOR_VALUE;
9088 reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
9089 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9090 for (i = 0; i < reg_um; i++)
9091 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
9092 for (i = 0; i < separator_num; i++)
9093 *reg++ = SEPARATOR_VALUE;
9095 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
9096 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9097 for (j = 0; j < kinfo->num_tqps; j++) {
9098 for (i = 0; i < reg_um; i++)
9099 *reg++ = hclge_read_dev(&hdev->hw,
9100 ring_reg_addr_list[i] +
9102 for (i = 0; i < separator_num; i++)
9103 *reg++ = SEPARATOR_VALUE;
9106 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
9107 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9108 for (j = 0; j < hdev->num_msi_used - 1; j++) {
9109 for (i = 0; i < reg_um; i++)
9110 *reg++ = hclge_read_dev(&hdev->hw,
9111 tqp_intr_reg_addr_list[i] +
9113 for (i = 0; i < separator_num; i++)
9114 *reg++ = SEPARATOR_VALUE;
9117 /* fetching PF common registers values from firmware */
9118 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
9120 dev_err(&hdev->pdev->dev,
9121 "Get 32 bit register failed, ret = %d.\n", ret);
9125 reg += regs_num_32_bit;
9126 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
9128 dev_err(&hdev->pdev->dev,
9129 "Get 64 bit register failed, ret = %d.\n", ret);
9132 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
9134 struct hclge_set_led_state_cmd *req;
9135 struct hclge_desc desc;
9138 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
9140 req = (struct hclge_set_led_state_cmd *)desc.data;
9141 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
9142 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
9144 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9146 dev_err(&hdev->pdev->dev,
9147 "Send set led state cmd error, ret =%d\n", ret);
9152 enum hclge_led_status {
9155 HCLGE_LED_NO_CHANGE = 0xFF,
9158 static int hclge_set_led_id(struct hnae3_handle *handle,
9159 enum ethtool_phys_id_state status)
9161 struct hclge_vport *vport = hclge_get_vport(handle);
9162 struct hclge_dev *hdev = vport->back;
9165 case ETHTOOL_ID_ACTIVE:
9166 return hclge_set_led_status(hdev, HCLGE_LED_ON);
9167 case ETHTOOL_ID_INACTIVE:
9168 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
9174 static void hclge_get_link_mode(struct hnae3_handle *handle,
9175 unsigned long *supported,
9176 unsigned long *advertising)
9178 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
9179 struct hclge_vport *vport = hclge_get_vport(handle);
9180 struct hclge_dev *hdev = vport->back;
9181 unsigned int idx = 0;
9183 for (; idx < size; idx++) {
9184 supported[idx] = hdev->hw.mac.supported[idx];
9185 advertising[idx] = hdev->hw.mac.advertising[idx];
9189 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
9191 struct hclge_vport *vport = hclge_get_vport(handle);
9192 struct hclge_dev *hdev = vport->back;
9194 return hclge_config_gro(hdev, enable);
9197 static const struct hnae3_ae_ops hclge_ops = {
9198 .init_ae_dev = hclge_init_ae_dev,
9199 .uninit_ae_dev = hclge_uninit_ae_dev,
9200 .flr_prepare = hclge_flr_prepare,
9201 .flr_done = hclge_flr_done,
9202 .init_client_instance = hclge_init_client_instance,
9203 .uninit_client_instance = hclge_uninit_client_instance,
9204 .map_ring_to_vector = hclge_map_ring_to_vector,
9205 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
9206 .get_vector = hclge_get_vector,
9207 .put_vector = hclge_put_vector,
9208 .set_promisc_mode = hclge_set_promisc_mode,
9209 .set_loopback = hclge_set_loopback,
9210 .start = hclge_ae_start,
9211 .stop = hclge_ae_stop,
9212 .client_start = hclge_client_start,
9213 .client_stop = hclge_client_stop,
9214 .get_status = hclge_get_status,
9215 .get_ksettings_an_result = hclge_get_ksettings_an_result,
9216 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
9217 .get_media_type = hclge_get_media_type,
9218 .check_port_speed = hclge_check_port_speed,
9219 .get_fec = hclge_get_fec,
9220 .set_fec = hclge_set_fec,
9221 .get_rss_key_size = hclge_get_rss_key_size,
9222 .get_rss_indir_size = hclge_get_rss_indir_size,
9223 .get_rss = hclge_get_rss,
9224 .set_rss = hclge_set_rss,
9225 .set_rss_tuple = hclge_set_rss_tuple,
9226 .get_rss_tuple = hclge_get_rss_tuple,
9227 .get_tc_size = hclge_get_tc_size,
9228 .get_mac_addr = hclge_get_mac_addr,
9229 .set_mac_addr = hclge_set_mac_addr,
9230 .do_ioctl = hclge_do_ioctl,
9231 .add_uc_addr = hclge_add_uc_addr,
9232 .rm_uc_addr = hclge_rm_uc_addr,
9233 .add_mc_addr = hclge_add_mc_addr,
9234 .rm_mc_addr = hclge_rm_mc_addr,
9235 .set_autoneg = hclge_set_autoneg,
9236 .get_autoneg = hclge_get_autoneg,
9237 .restart_autoneg = hclge_restart_autoneg,
9238 .get_pauseparam = hclge_get_pauseparam,
9239 .set_pauseparam = hclge_set_pauseparam,
9240 .set_mtu = hclge_set_mtu,
9241 .reset_queue = hclge_reset_tqp,
9242 .get_stats = hclge_get_stats,
9243 .get_mac_pause_stats = hclge_get_mac_pause_stat,
9244 .update_stats = hclge_update_stats,
9245 .get_strings = hclge_get_strings,
9246 .get_sset_count = hclge_get_sset_count,
9247 .get_fw_version = hclge_get_fw_version,
9248 .get_mdix_mode = hclge_get_mdix_mode,
9249 .enable_vlan_filter = hclge_enable_vlan_filter,
9250 .set_vlan_filter = hclge_set_vlan_filter,
9251 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
9252 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
9253 .reset_event = hclge_reset_event,
9254 .set_default_reset_request = hclge_set_def_reset_request,
9255 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
9256 .set_channels = hclge_set_channels,
9257 .get_channels = hclge_get_channels,
9258 .get_regs_len = hclge_get_regs_len,
9259 .get_regs = hclge_get_regs,
9260 .set_led_id = hclge_set_led_id,
9261 .get_link_mode = hclge_get_link_mode,
9262 .add_fd_entry = hclge_add_fd_entry,
9263 .del_fd_entry = hclge_del_fd_entry,
9264 .del_all_fd_entries = hclge_del_all_fd_entries,
9265 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
9266 .get_fd_rule_info = hclge_get_fd_rule_info,
9267 .get_fd_all_rules = hclge_get_all_rules,
9268 .restore_fd_rules = hclge_restore_fd_entries,
9269 .enable_fd = hclge_enable_fd,
9270 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
9271 .dbg_run_cmd = hclge_dbg_run_cmd,
9272 .handle_hw_ras_error = hclge_handle_hw_ras_error,
9273 .get_hw_reset_stat = hclge_get_hw_reset_stat,
9274 .ae_dev_resetting = hclge_ae_dev_resetting,
9275 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
9276 .set_gro_en = hclge_gro_en,
9277 .get_global_queue_id = hclge_covert_handle_qid_global,
9278 .set_timer_task = hclge_set_timer_task,
9279 .mac_connect_phy = hclge_mac_connect_phy,
9280 .mac_disconnect_phy = hclge_mac_disconnect_phy,
9281 .restore_vlan_table = hclge_restore_vlan_table,
9284 static struct hnae3_ae_algo ae_algo = {
9286 .pdev_id_table = ae_algo_pci_tbl,
9289 static int hclge_init(void)
9291 pr_info("%s is initializing\n", HCLGE_NAME);
9293 hnae3_register_ae_algo(&ae_algo);
9298 static void hclge_exit(void)
9300 hnae3_unregister_ae_algo(&ae_algo);
9302 module_init(hclge_init);
9303 module_exit(hclge_exit);
9305 MODULE_LICENSE("GPL");
9306 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
9307 MODULE_DESCRIPTION("HCLGE Driver");
9308 MODULE_VERSION(HCLGE_MOD_VERSION);