1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
23 #include "hclge_err.h"
26 #define HCLGE_NAME "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 #define HCLGE_BUF_SIZE_UNIT 256
31 #define HCLGE_BUF_MUL_BY 2
32 #define HCLGE_BUF_DIV_BY 2
34 #define HCLGE_RESET_MAX_FAIL_CNT 5
36 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
37 static int hclge_init_vlan_config(struct hclge_dev *hdev);
38 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
39 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
40 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
41 u16 *allocated_size, bool is_alloc);
42 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
43 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
45 static struct hnae3_ae_algo ae_algo;
47 static const struct pci_device_id ae_algo_pci_tbl[] = {
48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
49 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
50 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
51 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
52 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
53 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
54 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
55 /* required last entry */
59 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
61 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
62 HCLGE_CMDQ_TX_ADDR_H_REG,
63 HCLGE_CMDQ_TX_DEPTH_REG,
64 HCLGE_CMDQ_TX_TAIL_REG,
65 HCLGE_CMDQ_TX_HEAD_REG,
66 HCLGE_CMDQ_RX_ADDR_L_REG,
67 HCLGE_CMDQ_RX_ADDR_H_REG,
68 HCLGE_CMDQ_RX_DEPTH_REG,
69 HCLGE_CMDQ_RX_TAIL_REG,
70 HCLGE_CMDQ_RX_HEAD_REG,
71 HCLGE_VECTOR0_CMDQ_SRC_REG,
72 HCLGE_CMDQ_INTR_STS_REG,
73 HCLGE_CMDQ_INTR_EN_REG,
74 HCLGE_CMDQ_INTR_GEN_REG};
76 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
77 HCLGE_VECTOR0_OTER_EN_REG,
78 HCLGE_MISC_RESET_STS_REG,
79 HCLGE_MISC_VECTOR_INT_STS,
80 HCLGE_GLOBAL_RESET_REG,
84 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
85 HCLGE_RING_RX_ADDR_H_REG,
86 HCLGE_RING_RX_BD_NUM_REG,
87 HCLGE_RING_RX_BD_LENGTH_REG,
88 HCLGE_RING_RX_MERGE_EN_REG,
89 HCLGE_RING_RX_TAIL_REG,
90 HCLGE_RING_RX_HEAD_REG,
91 HCLGE_RING_RX_FBD_NUM_REG,
92 HCLGE_RING_RX_OFFSET_REG,
93 HCLGE_RING_RX_FBD_OFFSET_REG,
94 HCLGE_RING_RX_STASH_REG,
95 HCLGE_RING_RX_BD_ERR_REG,
96 HCLGE_RING_TX_ADDR_L_REG,
97 HCLGE_RING_TX_ADDR_H_REG,
98 HCLGE_RING_TX_BD_NUM_REG,
99 HCLGE_RING_TX_PRIORITY_REG,
100 HCLGE_RING_TX_TC_REG,
101 HCLGE_RING_TX_MERGE_EN_REG,
102 HCLGE_RING_TX_TAIL_REG,
103 HCLGE_RING_TX_HEAD_REG,
104 HCLGE_RING_TX_FBD_NUM_REG,
105 HCLGE_RING_TX_OFFSET_REG,
106 HCLGE_RING_TX_EBD_NUM_REG,
107 HCLGE_RING_TX_EBD_OFFSET_REG,
108 HCLGE_RING_TX_BD_ERR_REG,
111 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
112 HCLGE_TQP_INTR_GL0_REG,
113 HCLGE_TQP_INTR_GL1_REG,
114 HCLGE_TQP_INTR_GL2_REG,
115 HCLGE_TQP_INTR_RL_REG};
117 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
119 "Serdes serial Loopback test",
120 "Serdes parallel Loopback test",
124 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
125 {"mac_tx_mac_pause_num",
126 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
127 {"mac_rx_mac_pause_num",
128 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
129 {"mac_tx_control_pkt_num",
130 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
131 {"mac_rx_control_pkt_num",
132 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
133 {"mac_tx_pfc_pkt_num",
134 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
135 {"mac_tx_pfc_pri0_pkt_num",
136 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
137 {"mac_tx_pfc_pri1_pkt_num",
138 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
139 {"mac_tx_pfc_pri2_pkt_num",
140 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
141 {"mac_tx_pfc_pri3_pkt_num",
142 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
143 {"mac_tx_pfc_pri4_pkt_num",
144 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
145 {"mac_tx_pfc_pri5_pkt_num",
146 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
147 {"mac_tx_pfc_pri6_pkt_num",
148 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
149 {"mac_tx_pfc_pri7_pkt_num",
150 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
151 {"mac_rx_pfc_pkt_num",
152 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
153 {"mac_rx_pfc_pri0_pkt_num",
154 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
155 {"mac_rx_pfc_pri1_pkt_num",
156 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
157 {"mac_rx_pfc_pri2_pkt_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
159 {"mac_rx_pfc_pri3_pkt_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
161 {"mac_rx_pfc_pri4_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
163 {"mac_rx_pfc_pri5_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
165 {"mac_rx_pfc_pri6_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
167 {"mac_rx_pfc_pri7_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
169 {"mac_tx_total_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
171 {"mac_tx_total_oct_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
173 {"mac_tx_good_pkt_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
175 {"mac_tx_bad_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
177 {"mac_tx_good_oct_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
179 {"mac_tx_bad_oct_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
181 {"mac_tx_uni_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
183 {"mac_tx_multi_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
185 {"mac_tx_broad_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
187 {"mac_tx_undersize_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
189 {"mac_tx_oversize_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
191 {"mac_tx_64_oct_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
193 {"mac_tx_65_127_oct_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
195 {"mac_tx_128_255_oct_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
197 {"mac_tx_256_511_oct_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
199 {"mac_tx_512_1023_oct_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
201 {"mac_tx_1024_1518_oct_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
203 {"mac_tx_1519_2047_oct_pkt_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
205 {"mac_tx_2048_4095_oct_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
207 {"mac_tx_4096_8191_oct_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
209 {"mac_tx_8192_9216_oct_pkt_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
211 {"mac_tx_9217_12287_oct_pkt_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
213 {"mac_tx_12288_16383_oct_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
215 {"mac_tx_1519_max_good_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
217 {"mac_tx_1519_max_bad_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
219 {"mac_rx_total_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
221 {"mac_rx_total_oct_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
223 {"mac_rx_good_pkt_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
225 {"mac_rx_bad_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
227 {"mac_rx_good_oct_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
229 {"mac_rx_bad_oct_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
231 {"mac_rx_uni_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
233 {"mac_rx_multi_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
235 {"mac_rx_broad_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
237 {"mac_rx_undersize_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
239 {"mac_rx_oversize_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
241 {"mac_rx_64_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
243 {"mac_rx_65_127_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
245 {"mac_rx_128_255_oct_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
247 {"mac_rx_256_511_oct_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
249 {"mac_rx_512_1023_oct_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
251 {"mac_rx_1024_1518_oct_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
253 {"mac_rx_1519_2047_oct_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
255 {"mac_rx_2048_4095_oct_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
257 {"mac_rx_4096_8191_oct_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
259 {"mac_rx_8192_9216_oct_pkt_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
261 {"mac_rx_9217_12287_oct_pkt_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
263 {"mac_rx_12288_16383_oct_pkt_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
265 {"mac_rx_1519_max_good_pkt_num",
266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
267 {"mac_rx_1519_max_bad_pkt_num",
268 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
270 {"mac_tx_fragment_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
272 {"mac_tx_undermin_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
274 {"mac_tx_jabber_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
276 {"mac_tx_err_all_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
278 {"mac_tx_from_app_good_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
280 {"mac_tx_from_app_bad_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
282 {"mac_rx_fragment_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
284 {"mac_rx_undermin_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
286 {"mac_rx_jabber_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
288 {"mac_rx_fcs_err_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
290 {"mac_rx_send_app_good_pkt_num",
291 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
292 {"mac_rx_send_app_bad_pkt_num",
293 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
296 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
298 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
299 .ethter_type = cpu_to_le16(ETH_P_LLDP),
300 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
301 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
302 .i_port_bitmap = 0x1,
306 static const u8 hclge_hash_key[] = {
307 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
308 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
309 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
310 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
311 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
314 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
316 #define HCLGE_MAC_CMD_NUM 21
318 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
319 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
324 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
325 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
327 dev_err(&hdev->pdev->dev,
328 "Get MAC pkt stats fail, status = %d.\n", ret);
333 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
334 /* for special opcode 0032, only the first desc has the head */
335 if (unlikely(i == 0)) {
336 desc_data = (__le64 *)(&desc[i].data[0]);
337 n = HCLGE_RD_FIRST_STATS_NUM;
339 desc_data = (__le64 *)(&desc[i]);
340 n = HCLGE_RD_OTHER_STATS_NUM;
343 for (k = 0; k < n; k++) {
344 *data += le64_to_cpu(*desc_data);
353 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
355 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
356 struct hclge_desc *desc;
361 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
364 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
365 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
371 for (i = 0; i < desc_num; i++) {
372 /* for special opcode 0034, only the first desc has the head */
374 desc_data = (__le64 *)(&desc[i].data[0]);
375 n = HCLGE_RD_FIRST_STATS_NUM;
377 desc_data = (__le64 *)(&desc[i]);
378 n = HCLGE_RD_OTHER_STATS_NUM;
381 for (k = 0; k < n; k++) {
382 *data += le64_to_cpu(*desc_data);
393 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
395 struct hclge_desc desc;
400 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
401 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
405 desc_data = (__le32 *)(&desc.data[0]);
406 reg_num = le32_to_cpu(*desc_data);
408 *desc_num = 1 + ((reg_num - 3) >> 2) +
409 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
414 static int hclge_mac_update_stats(struct hclge_dev *hdev)
419 ret = hclge_mac_query_reg_num(hdev, &desc_num);
421 /* The firmware supports the new statistics acquisition method */
423 ret = hclge_mac_update_stats_complete(hdev, desc_num);
424 else if (ret == -EOPNOTSUPP)
425 ret = hclge_mac_update_stats_defective(hdev);
427 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
432 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
434 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
435 struct hclge_vport *vport = hclge_get_vport(handle);
436 struct hclge_dev *hdev = vport->back;
437 struct hnae3_queue *queue;
438 struct hclge_desc desc[1];
439 struct hclge_tqp *tqp;
442 for (i = 0; i < kinfo->num_tqps; i++) {
443 queue = handle->kinfo.tqp[i];
444 tqp = container_of(queue, struct hclge_tqp, q);
445 /* command : HCLGE_OPC_QUERY_IGU_STAT */
446 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
449 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
450 ret = hclge_cmd_send(&hdev->hw, desc, 1);
452 dev_err(&hdev->pdev->dev,
453 "Query tqp stat fail, status = %d,queue = %d\n",
457 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
458 le32_to_cpu(desc[0].data[1]);
461 for (i = 0; i < kinfo->num_tqps; i++) {
462 queue = handle->kinfo.tqp[i];
463 tqp = container_of(queue, struct hclge_tqp, q);
464 /* command : HCLGE_OPC_QUERY_IGU_STAT */
465 hclge_cmd_setup_basic_desc(&desc[0],
466 HCLGE_OPC_QUERY_TX_STATUS,
469 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
470 ret = hclge_cmd_send(&hdev->hw, desc, 1);
472 dev_err(&hdev->pdev->dev,
473 "Query tqp stat fail, status = %d,queue = %d\n",
477 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
478 le32_to_cpu(desc[0].data[1]);
484 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
486 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
487 struct hclge_tqp *tqp;
491 for (i = 0; i < kinfo->num_tqps; i++) {
492 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
493 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
496 for (i = 0; i < kinfo->num_tqps; i++) {
497 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
498 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
504 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
506 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
508 /* each tqp has TX & RX two queues */
509 return kinfo->num_tqps * (2);
512 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
514 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
518 for (i = 0; i < kinfo->num_tqps; i++) {
519 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
520 struct hclge_tqp, q);
521 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
523 buff = buff + ETH_GSTRING_LEN;
526 for (i = 0; i < kinfo->num_tqps; i++) {
527 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
528 struct hclge_tqp, q);
529 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
531 buff = buff + ETH_GSTRING_LEN;
537 static u64 *hclge_comm_get_stats(void *comm_stats,
538 const struct hclge_comm_stats_str strs[],
544 for (i = 0; i < size; i++)
545 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
550 static u8 *hclge_comm_get_strings(u32 stringset,
551 const struct hclge_comm_stats_str strs[],
554 char *buff = (char *)data;
557 if (stringset != ETH_SS_STATS)
560 for (i = 0; i < size; i++) {
561 snprintf(buff, ETH_GSTRING_LEN,
563 buff = buff + ETH_GSTRING_LEN;
569 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
571 struct hnae3_handle *handle;
574 handle = &hdev->vport[0].nic;
575 if (handle->client) {
576 status = hclge_tqps_update_stats(handle);
578 dev_err(&hdev->pdev->dev,
579 "Update TQPS stats fail, status = %d.\n",
584 status = hclge_mac_update_stats(hdev);
586 dev_err(&hdev->pdev->dev,
587 "Update MAC stats fail, status = %d.\n", status);
590 static void hclge_update_stats(struct hnae3_handle *handle,
591 struct net_device_stats *net_stats)
593 struct hclge_vport *vport = hclge_get_vport(handle);
594 struct hclge_dev *hdev = vport->back;
597 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
600 status = hclge_mac_update_stats(hdev);
602 dev_err(&hdev->pdev->dev,
603 "Update MAC stats fail, status = %d.\n",
606 status = hclge_tqps_update_stats(handle);
608 dev_err(&hdev->pdev->dev,
609 "Update TQPS stats fail, status = %d.\n",
612 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
615 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
617 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
618 HNAE3_SUPPORT_PHY_LOOPBACK |\
619 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
620 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
622 struct hclge_vport *vport = hclge_get_vport(handle);
623 struct hclge_dev *hdev = vport->back;
626 /* Loopback test support rules:
627 * mac: only GE mode support
628 * serdes: all mac mode will support include GE/XGE/LGE/CGE
629 * phy: only support when phy device exist on board
631 if (stringset == ETH_SS_TEST) {
632 /* clear loopback bit flags at first */
633 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
634 if (hdev->pdev->revision >= 0x21 ||
635 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
636 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
637 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
639 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
643 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
644 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
645 } else if (stringset == ETH_SS_STATS) {
646 count = ARRAY_SIZE(g_mac_stats_string) +
647 hclge_tqps_get_sset_count(handle, stringset);
653 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
656 u8 *p = (char *)data;
659 if (stringset == ETH_SS_STATS) {
660 size = ARRAY_SIZE(g_mac_stats_string);
661 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
663 p = hclge_tqps_get_strings(handle, p);
664 } else if (stringset == ETH_SS_TEST) {
665 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
666 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
668 p += ETH_GSTRING_LEN;
670 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
671 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
673 p += ETH_GSTRING_LEN;
675 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
677 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
679 p += ETH_GSTRING_LEN;
681 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
682 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
684 p += ETH_GSTRING_LEN;
689 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
691 struct hclge_vport *vport = hclge_get_vport(handle);
692 struct hclge_dev *hdev = vport->back;
695 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string,
696 ARRAY_SIZE(g_mac_stats_string), data);
697 p = hclge_tqps_get_stats(handle, p);
700 static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
703 struct hclge_vport *vport = hclge_get_vport(handle);
704 struct hclge_dev *hdev = vport->back;
706 *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
707 *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
710 static int hclge_parse_func_status(struct hclge_dev *hdev,
711 struct hclge_func_status_cmd *status)
713 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
716 /* Set the pf to main pf */
717 if (status->pf_state & HCLGE_PF_STATE_MAIN)
718 hdev->flag |= HCLGE_FLAG_MAIN;
720 hdev->flag &= ~HCLGE_FLAG_MAIN;
725 static int hclge_query_function_status(struct hclge_dev *hdev)
727 #define HCLGE_QUERY_MAX_CNT 5
729 struct hclge_func_status_cmd *req;
730 struct hclge_desc desc;
734 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
735 req = (struct hclge_func_status_cmd *)desc.data;
738 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
740 dev_err(&hdev->pdev->dev,
741 "query function status failed %d.\n", ret);
745 /* Check pf reset is done */
748 usleep_range(1000, 2000);
749 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
751 ret = hclge_parse_func_status(hdev, req);
756 static int hclge_query_pf_resource(struct hclge_dev *hdev)
758 struct hclge_pf_res_cmd *req;
759 struct hclge_desc desc;
762 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
763 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
765 dev_err(&hdev->pdev->dev,
766 "query pf resource failed %d.\n", ret);
770 req = (struct hclge_pf_res_cmd *)desc.data;
771 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
772 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
774 if (req->tx_buf_size)
776 __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
778 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
780 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
782 if (req->dv_buf_size)
784 __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
786 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
788 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
790 if (hnae3_dev_roce_supported(hdev)) {
791 hdev->roce_base_msix_offset =
792 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
793 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
795 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
796 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
798 /* PF should have NIC vectors and Roce vectors,
799 * NIC vectors are queued before Roce vectors.
801 hdev->num_msi = hdev->num_roce_msi +
802 hdev->roce_base_msix_offset;
805 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
806 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
812 static int hclge_parse_speed(int speed_cmd, int *speed)
816 *speed = HCLGE_MAC_SPEED_10M;
819 *speed = HCLGE_MAC_SPEED_100M;
822 *speed = HCLGE_MAC_SPEED_1G;
825 *speed = HCLGE_MAC_SPEED_10G;
828 *speed = HCLGE_MAC_SPEED_25G;
831 *speed = HCLGE_MAC_SPEED_40G;
834 *speed = HCLGE_MAC_SPEED_50G;
837 *speed = HCLGE_MAC_SPEED_100G;
846 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
848 struct hclge_vport *vport = hclge_get_vport(handle);
849 struct hclge_dev *hdev = vport->back;
850 u32 speed_ability = hdev->hw.mac.speed_ability;
854 case HCLGE_MAC_SPEED_10M:
855 speed_bit = HCLGE_SUPPORT_10M_BIT;
857 case HCLGE_MAC_SPEED_100M:
858 speed_bit = HCLGE_SUPPORT_100M_BIT;
860 case HCLGE_MAC_SPEED_1G:
861 speed_bit = HCLGE_SUPPORT_1G_BIT;
863 case HCLGE_MAC_SPEED_10G:
864 speed_bit = HCLGE_SUPPORT_10G_BIT;
866 case HCLGE_MAC_SPEED_25G:
867 speed_bit = HCLGE_SUPPORT_25G_BIT;
869 case HCLGE_MAC_SPEED_40G:
870 speed_bit = HCLGE_SUPPORT_40G_BIT;
872 case HCLGE_MAC_SPEED_50G:
873 speed_bit = HCLGE_SUPPORT_50G_BIT;
875 case HCLGE_MAC_SPEED_100G:
876 speed_bit = HCLGE_SUPPORT_100G_BIT;
882 if (speed_bit & speed_ability)
888 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
890 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
891 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
893 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
894 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
896 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
897 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
899 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
900 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
902 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
903 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
907 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
909 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
910 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
912 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
913 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
915 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
916 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
918 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
919 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
921 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
922 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
926 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
928 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
929 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
931 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
932 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
934 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
935 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
937 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
938 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
940 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
941 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
945 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
947 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
948 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
950 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
951 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
953 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
954 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
956 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
957 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
959 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
960 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
962 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
963 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
967 static void hclge_convert_setting_fec(struct hclge_mac *mac)
969 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
970 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
972 switch (mac->speed) {
973 case HCLGE_MAC_SPEED_10G:
974 case HCLGE_MAC_SPEED_40G:
975 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
978 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
980 case HCLGE_MAC_SPEED_25G:
981 case HCLGE_MAC_SPEED_50G:
982 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
985 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
988 case HCLGE_MAC_SPEED_100G:
989 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
990 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
993 mac->fec_ability = 0;
998 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1001 struct hclge_mac *mac = &hdev->hw.mac;
1003 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1004 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1007 hclge_convert_setting_sr(mac, speed_ability);
1008 hclge_convert_setting_lr(mac, speed_ability);
1009 hclge_convert_setting_cr(mac, speed_ability);
1010 if (hdev->pdev->revision >= 0x21)
1011 hclge_convert_setting_fec(mac);
1013 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1014 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1015 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1018 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1021 struct hclge_mac *mac = &hdev->hw.mac;
1023 hclge_convert_setting_kr(mac, speed_ability);
1024 if (hdev->pdev->revision >= 0x21)
1025 hclge_convert_setting_fec(mac);
1026 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1027 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1028 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1031 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1034 unsigned long *supported = hdev->hw.mac.supported;
1036 /* default to support all speed for GE port */
1038 speed_ability = HCLGE_SUPPORT_GE;
1040 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1041 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1044 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1045 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1047 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1051 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1052 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1053 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1056 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1057 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1058 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1061 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1063 u8 media_type = hdev->hw.mac.media_type;
1065 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1066 hclge_parse_fiber_link_mode(hdev, speed_ability);
1067 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1068 hclge_parse_copper_link_mode(hdev, speed_ability);
1069 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1070 hclge_parse_backplane_link_mode(hdev, speed_ability);
1072 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1074 struct hclge_cfg_param_cmd *req;
1075 u64 mac_addr_tmp_high;
1079 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1081 /* get the configuration */
1082 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1085 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1086 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1087 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1088 HCLGE_CFG_TQP_DESC_N_M,
1089 HCLGE_CFG_TQP_DESC_N_S);
1091 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1092 HCLGE_CFG_PHY_ADDR_M,
1093 HCLGE_CFG_PHY_ADDR_S);
1094 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1095 HCLGE_CFG_MEDIA_TP_M,
1096 HCLGE_CFG_MEDIA_TP_S);
1097 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1098 HCLGE_CFG_RX_BUF_LEN_M,
1099 HCLGE_CFG_RX_BUF_LEN_S);
1100 /* get mac_address */
1101 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1102 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1103 HCLGE_CFG_MAC_ADDR_H_M,
1104 HCLGE_CFG_MAC_ADDR_H_S);
1106 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1108 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1109 HCLGE_CFG_DEFAULT_SPEED_M,
1110 HCLGE_CFG_DEFAULT_SPEED_S);
1111 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1112 HCLGE_CFG_RSS_SIZE_M,
1113 HCLGE_CFG_RSS_SIZE_S);
1115 for (i = 0; i < ETH_ALEN; i++)
1116 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1118 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1119 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1121 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1122 HCLGE_CFG_SPEED_ABILITY_M,
1123 HCLGE_CFG_SPEED_ABILITY_S);
1124 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1125 HCLGE_CFG_UMV_TBL_SPACE_M,
1126 HCLGE_CFG_UMV_TBL_SPACE_S);
1127 if (!cfg->umv_space)
1128 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1131 /* hclge_get_cfg: query the static parameter from flash
1132 * @hdev: pointer to struct hclge_dev
1133 * @hcfg: the config structure to be getted
1135 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1137 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1138 struct hclge_cfg_param_cmd *req;
1141 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1144 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1145 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1147 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1148 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1149 /* Len should be united by 4 bytes when send to hardware */
1150 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1151 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1152 req->offset = cpu_to_le32(offset);
1155 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1157 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1161 hclge_parse_cfg(hcfg, desc);
1166 static int hclge_get_cap(struct hclge_dev *hdev)
1170 ret = hclge_query_function_status(hdev);
1172 dev_err(&hdev->pdev->dev,
1173 "query function status error %d.\n", ret);
1177 /* get pf resource */
1178 ret = hclge_query_pf_resource(hdev);
1180 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1185 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1187 #define HCLGE_MIN_TX_DESC 64
1188 #define HCLGE_MIN_RX_DESC 64
1190 if (!is_kdump_kernel())
1193 dev_info(&hdev->pdev->dev,
1194 "Running kdump kernel. Using minimal resources\n");
1196 /* minimal queue pairs equals to the number of vports */
1197 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1198 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1199 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1202 static int hclge_configure(struct hclge_dev *hdev)
1204 struct hclge_cfg cfg;
1207 ret = hclge_get_cfg(hdev, &cfg);
1209 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1213 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1214 hdev->base_tqp_pid = 0;
1215 hdev->rss_size_max = cfg.rss_size_max;
1216 hdev->rx_buf_len = cfg.rx_buf_len;
1217 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1218 hdev->hw.mac.media_type = cfg.media_type;
1219 hdev->hw.mac.phy_addr = cfg.phy_addr;
1220 hdev->num_tx_desc = cfg.tqp_desc_num;
1221 hdev->num_rx_desc = cfg.tqp_desc_num;
1222 hdev->tm_info.num_pg = 1;
1223 hdev->tc_max = cfg.tc_num;
1224 hdev->tm_info.hw_pfc_map = 0;
1225 hdev->wanted_umv_size = cfg.umv_space;
1227 if (hnae3_dev_fd_supported(hdev)) {
1229 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1232 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1234 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1238 hclge_parse_link_mode(hdev, cfg.speed_ability);
1240 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1241 (hdev->tc_max < 1)) {
1242 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1247 /* Dev does not support DCB */
1248 if (!hnae3_dev_dcb_supported(hdev)) {
1252 hdev->pfc_max = hdev->tc_max;
1255 hdev->tm_info.num_tc = 1;
1257 /* Currently not support uncontiuous tc */
1258 for (i = 0; i < hdev->tm_info.num_tc; i++)
1259 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1261 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1263 hclge_init_kdump_kernel_config(hdev);
1268 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1271 struct hclge_cfg_tso_status_cmd *req;
1272 struct hclge_desc desc;
1275 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1277 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1280 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1281 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1282 req->tso_mss_min = cpu_to_le16(tso_mss);
1285 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1286 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1287 req->tso_mss_max = cpu_to_le16(tso_mss);
1289 return hclge_cmd_send(&hdev->hw, &desc, 1);
1292 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1294 struct hclge_cfg_gro_status_cmd *req;
1295 struct hclge_desc desc;
1298 if (!hnae3_dev_gro_supported(hdev))
1301 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1302 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1304 req->gro_en = cpu_to_le16(en ? 1 : 0);
1306 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1308 dev_err(&hdev->pdev->dev,
1309 "GRO hardware config cmd failed, ret = %d\n", ret);
1314 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1316 struct hclge_tqp *tqp;
1319 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1320 sizeof(struct hclge_tqp), GFP_KERNEL);
1326 for (i = 0; i < hdev->num_tqps; i++) {
1327 tqp->dev = &hdev->pdev->dev;
1330 tqp->q.ae_algo = &ae_algo;
1331 tqp->q.buf_size = hdev->rx_buf_len;
1332 tqp->q.tx_desc_num = hdev->num_tx_desc;
1333 tqp->q.rx_desc_num = hdev->num_rx_desc;
1334 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1335 i * HCLGE_TQP_REG_SIZE;
1343 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1344 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1346 struct hclge_tqp_map_cmd *req;
1347 struct hclge_desc desc;
1350 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1352 req = (struct hclge_tqp_map_cmd *)desc.data;
1353 req->tqp_id = cpu_to_le16(tqp_pid);
1354 req->tqp_vf = func_id;
1355 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1356 1 << HCLGE_TQP_MAP_EN_B;
1357 req->tqp_vid = cpu_to_le16(tqp_vid);
1359 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1361 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1366 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1368 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1369 struct hclge_dev *hdev = vport->back;
1372 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1373 alloced < num_tqps; i++) {
1374 if (!hdev->htqp[i].alloced) {
1375 hdev->htqp[i].q.handle = &vport->nic;
1376 hdev->htqp[i].q.tqp_index = alloced;
1377 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1378 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1379 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1380 hdev->htqp[i].alloced = true;
1384 vport->alloc_tqps = alloced;
1385 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1386 vport->alloc_tqps / hdev->tm_info.num_tc);
1391 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1392 u16 num_tx_desc, u16 num_rx_desc)
1395 struct hnae3_handle *nic = &vport->nic;
1396 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1397 struct hclge_dev *hdev = vport->back;
1400 kinfo->num_tx_desc = num_tx_desc;
1401 kinfo->num_rx_desc = num_rx_desc;
1403 kinfo->rx_buf_len = hdev->rx_buf_len;
1405 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1406 sizeof(struct hnae3_queue *), GFP_KERNEL);
1410 ret = hclge_assign_tqp(vport, num_tqps);
1412 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1417 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1418 struct hclge_vport *vport)
1420 struct hnae3_handle *nic = &vport->nic;
1421 struct hnae3_knic_private_info *kinfo;
1424 kinfo = &nic->kinfo;
1425 for (i = 0; i < vport->alloc_tqps; i++) {
1426 struct hclge_tqp *q =
1427 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1431 is_pf = !(vport->vport_id);
1432 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1441 static int hclge_map_tqp(struct hclge_dev *hdev)
1443 struct hclge_vport *vport = hdev->vport;
1446 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1447 for (i = 0; i < num_vport; i++) {
1450 ret = hclge_map_tqp_to_vport(hdev, vport);
1460 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1462 struct hnae3_handle *nic = &vport->nic;
1463 struct hclge_dev *hdev = vport->back;
1466 nic->pdev = hdev->pdev;
1467 nic->ae_algo = &ae_algo;
1468 nic->numa_node_mask = hdev->numa_node_mask;
1470 ret = hclge_knic_setup(vport, num_tqps,
1471 hdev->num_tx_desc, hdev->num_rx_desc);
1473 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1478 static int hclge_alloc_vport(struct hclge_dev *hdev)
1480 struct pci_dev *pdev = hdev->pdev;
1481 struct hclge_vport *vport;
1487 /* We need to alloc a vport for main NIC of PF */
1488 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1490 if (hdev->num_tqps < num_vport) {
1491 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1492 hdev->num_tqps, num_vport);
1496 /* Alloc the same number of TQPs for every vport */
1497 tqp_per_vport = hdev->num_tqps / num_vport;
1498 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1500 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1505 hdev->vport = vport;
1506 hdev->num_alloc_vport = num_vport;
1508 if (IS_ENABLED(CONFIG_PCI_IOV))
1509 hdev->num_alloc_vfs = hdev->num_req_vfs;
1511 for (i = 0; i < num_vport; i++) {
1513 vport->vport_id = i;
1514 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1515 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1516 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1517 INIT_LIST_HEAD(&vport->vlan_list);
1518 INIT_LIST_HEAD(&vport->uc_mac_list);
1519 INIT_LIST_HEAD(&vport->mc_mac_list);
1522 ret = hclge_vport_setup(vport, tqp_main_vport);
1524 ret = hclge_vport_setup(vport, tqp_per_vport);
1527 "vport setup failed for vport %d, %d\n",
1538 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1539 struct hclge_pkt_buf_alloc *buf_alloc)
1541 /* TX buffer size is unit by 128 byte */
1542 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1543 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1544 struct hclge_tx_buff_alloc_cmd *req;
1545 struct hclge_desc desc;
1549 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1551 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1552 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1553 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1555 req->tx_pkt_buff[i] =
1556 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1557 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1560 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1562 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1568 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1569 struct hclge_pkt_buf_alloc *buf_alloc)
1571 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1574 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1579 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1583 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1584 if (hdev->hw_tc_map & BIT(i))
1589 /* Get the number of pfc enabled TCs, which have private buffer */
1590 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1591 struct hclge_pkt_buf_alloc *buf_alloc)
1593 struct hclge_priv_buf *priv;
1596 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1597 priv = &buf_alloc->priv_buf[i];
1598 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1606 /* Get the number of pfc disabled TCs, which have private buffer */
1607 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1608 struct hclge_pkt_buf_alloc *buf_alloc)
1610 struct hclge_priv_buf *priv;
1613 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1614 priv = &buf_alloc->priv_buf[i];
1615 if (hdev->hw_tc_map & BIT(i) &&
1616 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1624 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1626 struct hclge_priv_buf *priv;
1630 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1631 priv = &buf_alloc->priv_buf[i];
1633 rx_priv += priv->buf_size;
1638 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1640 u32 i, total_tx_size = 0;
1642 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1643 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1645 return total_tx_size;
1648 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1649 struct hclge_pkt_buf_alloc *buf_alloc,
1652 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1653 u32 tc_num = hclge_get_tc_num(hdev);
1654 u32 shared_buf, aligned_mps;
1658 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1660 if (hnae3_dev_dcb_supported(hdev))
1661 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1664 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1665 + hdev->dv_buf_size;
1667 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1668 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1669 HCLGE_BUF_SIZE_UNIT);
1671 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1672 if (rx_all < rx_priv + shared_std)
1675 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1676 buf_alloc->s_buf.buf_size = shared_buf;
1677 if (hnae3_dev_dcb_supported(hdev)) {
1678 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1679 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1680 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1681 HCLGE_BUF_SIZE_UNIT);
1683 buf_alloc->s_buf.self.high = aligned_mps +
1684 HCLGE_NON_DCB_ADDITIONAL_BUF;
1685 buf_alloc->s_buf.self.low = aligned_mps;
1688 if (hnae3_dev_dcb_supported(hdev)) {
1690 hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num;
1692 hi_thrd = shared_buf - hdev->dv_buf_size;
1694 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1695 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1696 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1698 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1699 lo_thrd = aligned_mps;
1702 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1703 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1704 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1710 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1711 struct hclge_pkt_buf_alloc *buf_alloc)
1715 total_size = hdev->pkt_buf_size;
1717 /* alloc tx buffer for all enabled tc */
1718 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1719 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1721 if (hdev->hw_tc_map & BIT(i)) {
1722 if (total_size < hdev->tx_buf_size)
1725 priv->tx_buf_size = hdev->tx_buf_size;
1727 priv->tx_buf_size = 0;
1730 total_size -= priv->tx_buf_size;
1736 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1737 struct hclge_pkt_buf_alloc *buf_alloc)
1739 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1740 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1743 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1744 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1751 if (!(hdev->hw_tc_map & BIT(i)))
1756 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1757 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1758 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1759 HCLGE_BUF_SIZE_UNIT);
1762 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1766 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1769 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1772 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1773 struct hclge_pkt_buf_alloc *buf_alloc)
1775 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1776 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1779 /* let the last to be cleared first */
1780 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1781 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1783 if (hdev->hw_tc_map & BIT(i) &&
1784 !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1785 /* Clear the no pfc TC private buffer */
1793 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1794 no_pfc_priv_num == 0)
1798 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1801 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1802 struct hclge_pkt_buf_alloc *buf_alloc)
1804 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1805 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1808 /* let the last to be cleared first */
1809 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1810 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1812 if (hdev->hw_tc_map & BIT(i) &&
1813 hdev->tm_info.hw_pfc_map & BIT(i)) {
1814 /* Reduce the number of pfc TC with private buffer */
1822 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1827 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1830 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1831 * @hdev: pointer to struct hclge_dev
1832 * @buf_alloc: pointer to buffer calculation data
1833 * @return: 0: calculate sucessful, negative: fail
1835 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1836 struct hclge_pkt_buf_alloc *buf_alloc)
1838 /* When DCB is not supported, rx private buffer is not allocated. */
1839 if (!hnae3_dev_dcb_supported(hdev)) {
1840 u32 rx_all = hdev->pkt_buf_size;
1842 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1843 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1849 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1852 /* try to decrease the buffer size */
1853 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1856 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1859 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1865 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1866 struct hclge_pkt_buf_alloc *buf_alloc)
1868 struct hclge_rx_priv_buff_cmd *req;
1869 struct hclge_desc desc;
1873 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1874 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1876 /* Alloc private buffer TCs */
1877 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1878 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1881 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1883 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1887 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1888 (1 << HCLGE_TC0_PRI_BUF_EN_B));
1890 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1892 dev_err(&hdev->pdev->dev,
1893 "rx private buffer alloc cmd failed %d\n", ret);
1898 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1899 struct hclge_pkt_buf_alloc *buf_alloc)
1901 struct hclge_rx_priv_wl_buf *req;
1902 struct hclge_priv_buf *priv;
1903 struct hclge_desc desc[2];
1907 for (i = 0; i < 2; i++) {
1908 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1910 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1912 /* The first descriptor set the NEXT bit to 1 */
1914 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1916 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1918 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1919 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1921 priv = &buf_alloc->priv_buf[idx];
1922 req->tc_wl[j].high =
1923 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1924 req->tc_wl[j].high |=
1925 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1927 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1928 req->tc_wl[j].low |=
1929 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1933 /* Send 2 descriptor at one time */
1934 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1936 dev_err(&hdev->pdev->dev,
1937 "rx private waterline config cmd failed %d\n",
1942 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1943 struct hclge_pkt_buf_alloc *buf_alloc)
1945 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1946 struct hclge_rx_com_thrd *req;
1947 struct hclge_desc desc[2];
1948 struct hclge_tc_thrd *tc;
1952 for (i = 0; i < 2; i++) {
1953 hclge_cmd_setup_basic_desc(&desc[i],
1954 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1955 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1957 /* The first descriptor set the NEXT bit to 1 */
1959 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1961 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1963 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1964 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1966 req->com_thrd[j].high =
1967 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1968 req->com_thrd[j].high |=
1969 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1970 req->com_thrd[j].low =
1971 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1972 req->com_thrd[j].low |=
1973 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1977 /* Send 2 descriptors at one time */
1978 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1980 dev_err(&hdev->pdev->dev,
1981 "common threshold config cmd failed %d\n", ret);
1985 static int hclge_common_wl_config(struct hclge_dev *hdev,
1986 struct hclge_pkt_buf_alloc *buf_alloc)
1988 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1989 struct hclge_rx_com_wl *req;
1990 struct hclge_desc desc;
1993 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1995 req = (struct hclge_rx_com_wl *)desc.data;
1996 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1997 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1999 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2000 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2002 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2004 dev_err(&hdev->pdev->dev,
2005 "common waterline config cmd failed %d\n", ret);
2010 int hclge_buffer_alloc(struct hclge_dev *hdev)
2012 struct hclge_pkt_buf_alloc *pkt_buf;
2015 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2019 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2021 dev_err(&hdev->pdev->dev,
2022 "could not calc tx buffer size for all TCs %d\n", ret);
2026 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2028 dev_err(&hdev->pdev->dev,
2029 "could not alloc tx buffers %d\n", ret);
2033 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2035 dev_err(&hdev->pdev->dev,
2036 "could not calc rx priv buffer size for all TCs %d\n",
2041 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2043 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2048 if (hnae3_dev_dcb_supported(hdev)) {
2049 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2051 dev_err(&hdev->pdev->dev,
2052 "could not configure rx private waterline %d\n",
2057 ret = hclge_common_thrd_config(hdev, pkt_buf);
2059 dev_err(&hdev->pdev->dev,
2060 "could not configure common threshold %d\n",
2066 ret = hclge_common_wl_config(hdev, pkt_buf);
2068 dev_err(&hdev->pdev->dev,
2069 "could not configure common waterline %d\n", ret);
2076 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2078 struct hnae3_handle *roce = &vport->roce;
2079 struct hnae3_handle *nic = &vport->nic;
2081 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2083 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2084 vport->back->num_msi_left == 0)
2087 roce->rinfo.base_vector = vport->back->roce_base_vector;
2089 roce->rinfo.netdev = nic->kinfo.netdev;
2090 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2092 roce->pdev = nic->pdev;
2093 roce->ae_algo = nic->ae_algo;
2094 roce->numa_node_mask = nic->numa_node_mask;
2099 static int hclge_init_msi(struct hclge_dev *hdev)
2101 struct pci_dev *pdev = hdev->pdev;
2105 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2106 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2109 "failed(%d) to allocate MSI/MSI-X vectors\n",
2113 if (vectors < hdev->num_msi)
2114 dev_warn(&hdev->pdev->dev,
2115 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2116 hdev->num_msi, vectors);
2118 hdev->num_msi = vectors;
2119 hdev->num_msi_left = vectors;
2120 hdev->base_msi_vector = pdev->irq;
2121 hdev->roce_base_vector = hdev->base_msi_vector +
2122 hdev->roce_base_msix_offset;
2124 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2125 sizeof(u16), GFP_KERNEL);
2126 if (!hdev->vector_status) {
2127 pci_free_irq_vectors(pdev);
2131 for (i = 0; i < hdev->num_msi; i++)
2132 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2134 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2135 sizeof(int), GFP_KERNEL);
2136 if (!hdev->vector_irq) {
2137 pci_free_irq_vectors(pdev);
2144 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2146 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2147 duplex = HCLGE_MAC_FULL;
2152 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2155 struct hclge_config_mac_speed_dup_cmd *req;
2156 struct hclge_desc desc;
2159 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2161 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2164 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2167 case HCLGE_MAC_SPEED_10M:
2168 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2169 HCLGE_CFG_SPEED_S, 6);
2171 case HCLGE_MAC_SPEED_100M:
2172 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2173 HCLGE_CFG_SPEED_S, 7);
2175 case HCLGE_MAC_SPEED_1G:
2176 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2177 HCLGE_CFG_SPEED_S, 0);
2179 case HCLGE_MAC_SPEED_10G:
2180 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2181 HCLGE_CFG_SPEED_S, 1);
2183 case HCLGE_MAC_SPEED_25G:
2184 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2185 HCLGE_CFG_SPEED_S, 2);
2187 case HCLGE_MAC_SPEED_40G:
2188 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2189 HCLGE_CFG_SPEED_S, 3);
2191 case HCLGE_MAC_SPEED_50G:
2192 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2193 HCLGE_CFG_SPEED_S, 4);
2195 case HCLGE_MAC_SPEED_100G:
2196 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2197 HCLGE_CFG_SPEED_S, 5);
2200 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2204 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2207 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2209 dev_err(&hdev->pdev->dev,
2210 "mac speed/duplex config cmd failed %d.\n", ret);
2217 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2221 duplex = hclge_check_speed_dup(duplex, speed);
2222 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2225 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2229 hdev->hw.mac.speed = speed;
2230 hdev->hw.mac.duplex = duplex;
2235 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2238 struct hclge_vport *vport = hclge_get_vport(handle);
2239 struct hclge_dev *hdev = vport->back;
2241 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2244 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2246 struct hclge_config_auto_neg_cmd *req;
2247 struct hclge_desc desc;
2251 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2253 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2254 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2255 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2257 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2259 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2265 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2267 struct hclge_vport *vport = hclge_get_vport(handle);
2268 struct hclge_dev *hdev = vport->back;
2270 if (!hdev->hw.mac.support_autoneg) {
2272 dev_err(&hdev->pdev->dev,
2273 "autoneg is not supported by current port\n");
2280 return hclge_set_autoneg_en(hdev, enable);
2283 static int hclge_get_autoneg(struct hnae3_handle *handle)
2285 struct hclge_vport *vport = hclge_get_vport(handle);
2286 struct hclge_dev *hdev = vport->back;
2287 struct phy_device *phydev = hdev->hw.mac.phydev;
2290 return phydev->autoneg;
2292 return hdev->hw.mac.autoneg;
2295 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2297 struct hclge_vport *vport = hclge_get_vport(handle);
2298 struct hclge_dev *hdev = vport->back;
2301 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2303 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2306 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2309 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2311 struct hclge_config_fec_cmd *req;
2312 struct hclge_desc desc;
2315 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2317 req = (struct hclge_config_fec_cmd *)desc.data;
2318 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2319 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2320 if (fec_mode & BIT(HNAE3_FEC_RS))
2321 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2322 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2323 if (fec_mode & BIT(HNAE3_FEC_BASER))
2324 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2325 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2327 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2329 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2334 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2336 struct hclge_vport *vport = hclge_get_vport(handle);
2337 struct hclge_dev *hdev = vport->back;
2338 struct hclge_mac *mac = &hdev->hw.mac;
2341 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2342 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2346 ret = hclge_set_fec_hw(hdev, fec_mode);
2350 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2354 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2357 struct hclge_vport *vport = hclge_get_vport(handle);
2358 struct hclge_dev *hdev = vport->back;
2359 struct hclge_mac *mac = &hdev->hw.mac;
2362 *fec_ability = mac->fec_ability;
2364 *fec_mode = mac->fec_mode;
2367 static int hclge_mac_init(struct hclge_dev *hdev)
2369 struct hclge_mac *mac = &hdev->hw.mac;
2372 hdev->support_sfp_query = true;
2373 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2374 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2375 hdev->hw.mac.duplex);
2377 dev_err(&hdev->pdev->dev,
2378 "Config mac speed dup fail ret=%d\n", ret);
2384 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2385 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2387 dev_err(&hdev->pdev->dev,
2388 "Fec mode init fail, ret = %d\n", ret);
2393 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2395 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2399 ret = hclge_buffer_alloc(hdev);
2401 dev_err(&hdev->pdev->dev,
2402 "allocate buffer fail, ret=%d\n", ret);
2407 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2409 if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2410 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2411 schedule_work(&hdev->mbx_service_task);
2414 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2416 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2417 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2418 schedule_work(&hdev->rst_service_task);
2421 static void hclge_task_schedule(struct hclge_dev *hdev)
2423 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2424 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2425 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2426 (void)schedule_work(&hdev->service_task);
2429 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2431 struct hclge_link_status_cmd *req;
2432 struct hclge_desc desc;
2436 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2437 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2439 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2444 req = (struct hclge_link_status_cmd *)desc.data;
2445 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2447 return !!link_status;
2450 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2455 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2458 mac_state = hclge_get_mac_link_status(hdev);
2460 if (hdev->hw.mac.phydev) {
2461 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2462 link_stat = mac_state &
2463 hdev->hw.mac.phydev->link;
2468 link_stat = mac_state;
2474 static void hclge_update_link_status(struct hclge_dev *hdev)
2476 struct hnae3_client *rclient = hdev->roce_client;
2477 struct hnae3_client *client = hdev->nic_client;
2478 struct hnae3_handle *rhandle;
2479 struct hnae3_handle *handle;
2485 state = hclge_get_mac_phy_link(hdev);
2486 if (state != hdev->hw.mac.link) {
2487 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2488 handle = &hdev->vport[i].nic;
2489 client->ops->link_status_change(handle, state);
2490 hclge_config_mac_tnl_int(hdev, state);
2491 rhandle = &hdev->vport[i].roce;
2492 if (rclient && rclient->ops->link_status_change)
2493 rclient->ops->link_status_change(rhandle,
2496 hdev->hw.mac.link = state;
2500 static void hclge_update_port_capability(struct hclge_mac *mac)
2502 /* update fec ability by speed */
2503 hclge_convert_setting_fec(mac);
2505 /* firmware can not identify back plane type, the media type
2506 * read from configuration can help deal it
2508 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2509 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2510 mac->module_type = HNAE3_MODULE_TYPE_KR;
2511 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2512 mac->module_type = HNAE3_MODULE_TYPE_TP;
2514 if (mac->support_autoneg == true) {
2515 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2516 linkmode_copy(mac->advertising, mac->supported);
2518 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2520 linkmode_zero(mac->advertising);
2524 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2526 struct hclge_sfp_info_cmd *resp;
2527 struct hclge_desc desc;
2530 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2531 resp = (struct hclge_sfp_info_cmd *)desc.data;
2532 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2533 if (ret == -EOPNOTSUPP) {
2534 dev_warn(&hdev->pdev->dev,
2535 "IMP do not support get SFP speed %d\n", ret);
2538 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2542 *speed = le32_to_cpu(resp->speed);
2547 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2549 struct hclge_sfp_info_cmd *resp;
2550 struct hclge_desc desc;
2553 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2554 resp = (struct hclge_sfp_info_cmd *)desc.data;
2556 resp->query_type = QUERY_ACTIVE_SPEED;
2558 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2559 if (ret == -EOPNOTSUPP) {
2560 dev_warn(&hdev->pdev->dev,
2561 "IMP does not support get SFP info %d\n", ret);
2564 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2568 mac->speed = le32_to_cpu(resp->speed);
2569 /* if resp->speed_ability is 0, it means it's an old version
2570 * firmware, do not update these params
2572 if (resp->speed_ability) {
2573 mac->module_type = le32_to_cpu(resp->module_type);
2574 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2575 mac->autoneg = resp->autoneg;
2576 mac->support_autoneg = resp->autoneg_ability;
2577 if (!resp->active_fec)
2580 mac->fec_mode = BIT(resp->active_fec);
2582 mac->speed_type = QUERY_SFP_SPEED;
2588 static int hclge_update_port_info(struct hclge_dev *hdev)
2590 struct hclge_mac *mac = &hdev->hw.mac;
2591 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2594 /* get the port info from SFP cmd if not copper port */
2595 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2598 /* if IMP does not support get SFP/qSFP info, return directly */
2599 if (!hdev->support_sfp_query)
2602 if (hdev->pdev->revision >= 0x21)
2603 ret = hclge_get_sfp_info(hdev, mac);
2605 ret = hclge_get_sfp_speed(hdev, &speed);
2607 if (ret == -EOPNOTSUPP) {
2608 hdev->support_sfp_query = false;
2614 if (hdev->pdev->revision >= 0x21) {
2615 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2616 hclge_update_port_capability(mac);
2619 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2622 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2623 return 0; /* do nothing if no SFP */
2625 /* must config full duplex for SFP */
2626 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2630 static int hclge_get_status(struct hnae3_handle *handle)
2632 struct hclge_vport *vport = hclge_get_vport(handle);
2633 struct hclge_dev *hdev = vport->back;
2635 hclge_update_link_status(hdev);
2637 return hdev->hw.mac.link;
2640 static void hclge_service_timer(struct timer_list *t)
2642 struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2644 mod_timer(&hdev->service_timer, jiffies + HZ);
2645 hdev->hw_stats.stats_timer++;
2646 hdev->fd_arfs_expire_timer++;
2647 hclge_task_schedule(hdev);
2650 static void hclge_service_complete(struct hclge_dev *hdev)
2652 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2654 /* Flush memory before next watchdog */
2655 smp_mb__before_atomic();
2656 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2659 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2661 u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2663 /* fetch the events from their corresponding regs */
2664 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2665 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2666 msix_src_reg = hclge_read_dev(&hdev->hw,
2667 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2669 /* Assumption: If by any chance reset and mailbox events are reported
2670 * together then we will only process reset event in this go and will
2671 * defer the processing of the mailbox events. Since, we would have not
2672 * cleared RX CMDQ event this time we would receive again another
2673 * interrupt from H/W just for the mailbox.
2676 /* check for vector0 reset event sources */
2677 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2678 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2679 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2680 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2681 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2682 hdev->rst_stats.imp_rst_cnt++;
2683 return HCLGE_VECTOR0_EVENT_RST;
2686 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2687 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2688 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2689 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2690 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2691 hdev->rst_stats.global_rst_cnt++;
2692 return HCLGE_VECTOR0_EVENT_RST;
2695 /* check for vector0 msix event source */
2696 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2697 dev_dbg(&hdev->pdev->dev, "received event 0x%x\n",
2699 return HCLGE_VECTOR0_EVENT_ERR;
2702 /* check for vector0 mailbox(=CMDQ RX) event source */
2703 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2704 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2705 *clearval = cmdq_src_reg;
2706 return HCLGE_VECTOR0_EVENT_MBX;
2709 /* print other vector0 event source */
2710 dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n",
2711 cmdq_src_reg, msix_src_reg);
2712 return HCLGE_VECTOR0_EVENT_OTHER;
2715 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2718 switch (event_type) {
2719 case HCLGE_VECTOR0_EVENT_RST:
2720 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2722 case HCLGE_VECTOR0_EVENT_MBX:
2723 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2730 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2732 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2733 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2734 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2735 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2736 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2739 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2741 writel(enable ? 1 : 0, vector->addr);
2744 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2746 struct hclge_dev *hdev = data;
2750 hclge_enable_vector(&hdev->misc_vector, false);
2751 event_cause = hclge_check_event_cause(hdev, &clearval);
2753 /* vector 0 interrupt is shared with reset and mailbox source events.*/
2754 switch (event_cause) {
2755 case HCLGE_VECTOR0_EVENT_ERR:
2756 /* we do not know what type of reset is required now. This could
2757 * only be decided after we fetch the type of errors which
2758 * caused this event. Therefore, we will do below for now:
2759 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2760 * have defered type of reset to be used.
2761 * 2. Schedule the reset serivce task.
2762 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
2763 * will fetch the correct type of reset. This would be done
2764 * by first decoding the types of errors.
2766 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2768 case HCLGE_VECTOR0_EVENT_RST:
2769 hclge_reset_task_schedule(hdev);
2771 case HCLGE_VECTOR0_EVENT_MBX:
2772 /* If we are here then,
2773 * 1. Either we are not handling any mbx task and we are not
2776 * 2. We could be handling a mbx task but nothing more is
2778 * In both cases, we should schedule mbx task as there are more
2779 * mbx messages reported by this interrupt.
2781 hclge_mbx_task_schedule(hdev);
2784 dev_warn(&hdev->pdev->dev,
2785 "received unknown or unhandled event of vector0\n");
2789 /* clear the source of interrupt if it is not cause by reset */
2790 if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2791 hclge_clear_event_cause(hdev, event_cause, clearval);
2792 hclge_enable_vector(&hdev->misc_vector, true);
2798 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2800 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2801 dev_warn(&hdev->pdev->dev,
2802 "vector(vector_id %d) has been freed.\n", vector_id);
2806 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2807 hdev->num_msi_left += 1;
2808 hdev->num_msi_used -= 1;
2811 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2813 struct hclge_misc_vector *vector = &hdev->misc_vector;
2815 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2817 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2818 hdev->vector_status[0] = 0;
2820 hdev->num_msi_left -= 1;
2821 hdev->num_msi_used += 1;
2824 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2828 hclge_get_misc_vector(hdev);
2830 /* this would be explicitly freed in the end */
2831 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2832 0, "hclge_misc", hdev);
2834 hclge_free_vector(hdev, 0);
2835 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2836 hdev->misc_vector.vector_irq);
2842 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2844 free_irq(hdev->misc_vector.vector_irq, hdev);
2845 hclge_free_vector(hdev, 0);
2848 int hclge_notify_client(struct hclge_dev *hdev,
2849 enum hnae3_reset_notify_type type)
2851 struct hnae3_client *client = hdev->nic_client;
2854 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
2857 if (!client->ops->reset_notify)
2860 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2861 struct hnae3_handle *handle = &hdev->vport[i].nic;
2864 ret = client->ops->reset_notify(handle, type);
2866 dev_err(&hdev->pdev->dev,
2867 "notify nic client failed %d(%d)\n", type, ret);
2875 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2876 enum hnae3_reset_notify_type type)
2878 struct hnae3_client *client = hdev->roce_client;
2882 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
2885 if (!client->ops->reset_notify)
2888 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2889 struct hnae3_handle *handle = &hdev->vport[i].roce;
2891 ret = client->ops->reset_notify(handle, type);
2893 dev_err(&hdev->pdev->dev,
2894 "notify roce client failed %d(%d)",
2903 static int hclge_reset_wait(struct hclge_dev *hdev)
2905 #define HCLGE_RESET_WATI_MS 100
2906 #define HCLGE_RESET_WAIT_CNT 200
2907 u32 val, reg, reg_bit;
2910 switch (hdev->reset_type) {
2911 case HNAE3_IMP_RESET:
2912 reg = HCLGE_GLOBAL_RESET_REG;
2913 reg_bit = HCLGE_IMP_RESET_BIT;
2915 case HNAE3_GLOBAL_RESET:
2916 reg = HCLGE_GLOBAL_RESET_REG;
2917 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2919 case HNAE3_FUNC_RESET:
2920 reg = HCLGE_FUN_RST_ING;
2921 reg_bit = HCLGE_FUN_RST_ING_B;
2923 case HNAE3_FLR_RESET:
2926 dev_err(&hdev->pdev->dev,
2927 "Wait for unsupported reset type: %d\n",
2932 if (hdev->reset_type == HNAE3_FLR_RESET) {
2933 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2934 cnt++ < HCLGE_RESET_WAIT_CNT)
2935 msleep(HCLGE_RESET_WATI_MS);
2937 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2938 dev_err(&hdev->pdev->dev,
2939 "flr wait timeout: %d\n", cnt);
2946 val = hclge_read_dev(&hdev->hw, reg);
2947 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2948 msleep(HCLGE_RESET_WATI_MS);
2949 val = hclge_read_dev(&hdev->hw, reg);
2953 if (cnt >= HCLGE_RESET_WAIT_CNT) {
2954 dev_warn(&hdev->pdev->dev,
2955 "Wait for reset timeout: %d\n", hdev->reset_type);
2962 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2964 struct hclge_vf_rst_cmd *req;
2965 struct hclge_desc desc;
2967 req = (struct hclge_vf_rst_cmd *)desc.data;
2968 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2969 req->dest_vfid = func_id;
2974 return hclge_cmd_send(&hdev->hw, &desc, 1);
2977 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
2981 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
2982 struct hclge_vport *vport = &hdev->vport[i];
2985 /* Send cmd to set/clear VF's FUNC_RST_ING */
2986 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
2988 dev_err(&hdev->pdev->dev,
2989 "set vf(%d) rst failed %d!\n",
2990 vport->vport_id, ret);
2994 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
2997 /* Inform VF to process the reset.
2998 * hclge_inform_reset_assert_to_vf may fail if VF
2999 * driver is not loaded.
3001 ret = hclge_inform_reset_assert_to_vf(vport);
3003 dev_warn(&hdev->pdev->dev,
3004 "inform reset to vf(%d) failed %d!\n",
3005 vport->vport_id, ret);
3011 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3013 struct hclge_desc desc;
3014 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3017 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3018 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3019 req->fun_reset_vfid = func_id;
3021 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3023 dev_err(&hdev->pdev->dev,
3024 "send function reset cmd fail, status =%d\n", ret);
3029 static void hclge_do_reset(struct hclge_dev *hdev)
3031 struct hnae3_handle *handle = &hdev->vport[0].nic;
3032 struct pci_dev *pdev = hdev->pdev;
3035 if (hclge_get_hw_reset_stat(handle)) {
3036 dev_info(&pdev->dev, "Hardware reset not finish\n");
3037 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3038 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3039 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3043 switch (hdev->reset_type) {
3044 case HNAE3_GLOBAL_RESET:
3045 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3046 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3047 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3048 dev_info(&pdev->dev, "Global Reset requested\n");
3050 case HNAE3_FUNC_RESET:
3051 dev_info(&pdev->dev, "PF Reset requested\n");
3052 /* schedule again to check later */
3053 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3054 hclge_reset_task_schedule(hdev);
3056 case HNAE3_FLR_RESET:
3057 dev_info(&pdev->dev, "FLR requested\n");
3058 /* schedule again to check later */
3059 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3060 hclge_reset_task_schedule(hdev);
3063 dev_warn(&pdev->dev,
3064 "Unsupported reset type: %d\n", hdev->reset_type);
3069 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
3070 unsigned long *addr)
3072 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3074 /* first, resolve any unknown reset type to the known type(s) */
3075 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3076 /* we will intentionally ignore any errors from this function
3077 * as we will end up in *some* reset request in any case
3079 hclge_handle_hw_msix_error(hdev, addr);
3080 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3081 /* We defered the clearing of the error event which caused
3082 * interrupt since it was not posssible to do that in
3083 * interrupt context (and this is the reason we introduced
3084 * new UNKNOWN reset type). Now, the errors have been
3085 * handled and cleared in hardware we can safely enable
3086 * interrupts. This is an exception to the norm.
3088 hclge_enable_vector(&hdev->misc_vector, true);
3091 /* return the highest priority reset level amongst all */
3092 if (test_bit(HNAE3_IMP_RESET, addr)) {
3093 rst_level = HNAE3_IMP_RESET;
3094 clear_bit(HNAE3_IMP_RESET, addr);
3095 clear_bit(HNAE3_GLOBAL_RESET, addr);
3096 clear_bit(HNAE3_FUNC_RESET, addr);
3097 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3098 rst_level = HNAE3_GLOBAL_RESET;
3099 clear_bit(HNAE3_GLOBAL_RESET, addr);
3100 clear_bit(HNAE3_FUNC_RESET, addr);
3101 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3102 rst_level = HNAE3_FUNC_RESET;
3103 clear_bit(HNAE3_FUNC_RESET, addr);
3104 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3105 rst_level = HNAE3_FLR_RESET;
3106 clear_bit(HNAE3_FLR_RESET, addr);
3109 if (hdev->reset_type != HNAE3_NONE_RESET &&
3110 rst_level < hdev->reset_type)
3111 return HNAE3_NONE_RESET;
3116 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3120 switch (hdev->reset_type) {
3121 case HNAE3_IMP_RESET:
3122 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3124 case HNAE3_GLOBAL_RESET:
3125 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3134 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
3135 hclge_enable_vector(&hdev->misc_vector, true);
3138 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3142 switch (hdev->reset_type) {
3143 case HNAE3_FUNC_RESET:
3145 case HNAE3_FLR_RESET:
3146 ret = hclge_set_all_vf_rst(hdev, true);
3155 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3157 #define HCLGE_RESET_SYNC_TIME 100
3162 switch (hdev->reset_type) {
3163 case HNAE3_FUNC_RESET:
3164 /* There is no mechanism for PF to know if VF has stopped IO
3165 * for now, just wait 100 ms for VF to stop IO
3167 msleep(HCLGE_RESET_SYNC_TIME);
3168 ret = hclge_func_reset_cmd(hdev, 0);
3170 dev_err(&hdev->pdev->dev,
3171 "asserting function reset fail %d!\n", ret);
3175 /* After performaning pf reset, it is not necessary to do the
3176 * mailbox handling or send any command to firmware, because
3177 * any mailbox handling or command to firmware is only valid
3178 * after hclge_cmd_init is called.
3180 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3181 hdev->rst_stats.pf_rst_cnt++;
3183 case HNAE3_FLR_RESET:
3184 /* There is no mechanism for PF to know if VF has stopped IO
3185 * for now, just wait 100 ms for VF to stop IO
3187 msleep(HCLGE_RESET_SYNC_TIME);
3188 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3189 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3190 hdev->rst_stats.flr_rst_cnt++;
3192 case HNAE3_IMP_RESET:
3193 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3194 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3195 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3201 /* inform hardware that preparatory work is done */
3202 msleep(HCLGE_RESET_SYNC_TIME);
3203 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG,
3204 HCLGE_NIC_CMQ_ENABLE);
3205 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3210 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
3212 #define MAX_RESET_FAIL_CNT 5
3214 if (hdev->reset_pending) {
3215 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3216 hdev->reset_pending);
3218 } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
3219 (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
3220 BIT(HCLGE_IMP_RESET_BIT))) {
3221 dev_info(&hdev->pdev->dev,
3222 "reset failed because IMP Reset is pending\n");
3223 hclge_clear_reset_cause(hdev);
3225 } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3226 hdev->reset_fail_cnt++;
3228 set_bit(hdev->reset_type, &hdev->reset_pending);
3229 dev_info(&hdev->pdev->dev,
3230 "re-schedule to wait for hw reset done\n");
3234 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
3235 hclge_clear_reset_cause(hdev);
3236 mod_timer(&hdev->reset_timer,
3237 jiffies + HCLGE_RESET_INTERVAL);
3242 hclge_clear_reset_cause(hdev);
3243 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3247 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3251 switch (hdev->reset_type) {
3252 case HNAE3_FUNC_RESET:
3254 case HNAE3_FLR_RESET:
3255 ret = hclge_set_all_vf_rst(hdev, false);
3264 static int hclge_reset_stack(struct hclge_dev *hdev)
3268 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3272 ret = hclge_reset_ae_dev(hdev->ae_dev);
3276 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3280 return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3283 static void hclge_reset(struct hclge_dev *hdev)
3285 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3286 bool is_timeout = false;
3289 /* Initialize ae_dev reset status as well, in case enet layer wants to
3290 * know if device is undergoing reset
3292 ae_dev->reset_type = hdev->reset_type;
3293 hdev->rst_stats.reset_cnt++;
3294 /* perform reset of the stack & ae device for a client */
3295 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3299 ret = hclge_reset_prepare_down(hdev);
3304 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3306 goto err_reset_lock;
3310 ret = hclge_reset_prepare_wait(hdev);
3314 if (hclge_reset_wait(hdev)) {
3319 hdev->rst_stats.hw_reset_done_cnt++;
3321 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3327 ret = hclge_reset_stack(hdev);
3329 goto err_reset_lock;
3331 hclge_clear_reset_cause(hdev);
3333 ret = hclge_reset_prepare_up(hdev);
3335 goto err_reset_lock;
3339 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3340 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3343 if (ret && hdev->reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3348 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3350 goto err_reset_lock;
3354 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3358 hdev->last_reset_time = jiffies;
3359 hdev->reset_fail_cnt = 0;
3360 hdev->rst_stats.reset_done_cnt++;
3361 ae_dev->reset_type = HNAE3_NONE_RESET;
3362 del_timer(&hdev->reset_timer);
3369 if (hclge_reset_err_handle(hdev, is_timeout))
3370 hclge_reset_task_schedule(hdev);
3373 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3375 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3376 struct hclge_dev *hdev = ae_dev->priv;
3378 /* We might end up getting called broadly because of 2 below cases:
3379 * 1. Recoverable error was conveyed through APEI and only way to bring
3380 * normalcy is to reset.
3381 * 2. A new reset request from the stack due to timeout
3383 * For the first case,error event might not have ae handle available.
3384 * check if this is a new reset request and we are not here just because
3385 * last reset attempt did not succeed and watchdog hit us again. We will
3386 * know this if last reset request did not occur very recently (watchdog
3387 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3388 * In case of new request we reset the "reset level" to PF reset.
3389 * And if it is a repeat reset request of the most recent one then we
3390 * want to make sure we throttle the reset request. Therefore, we will
3391 * not allow it again before 3*HZ times.
3394 handle = &hdev->vport[0].nic;
3396 if (time_before(jiffies, (hdev->last_reset_time +
3397 HCLGE_RESET_INTERVAL)))
3399 else if (hdev->default_reset_request)
3401 hclge_get_reset_level(hdev,
3402 &hdev->default_reset_request);
3403 else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3404 hdev->reset_level = HNAE3_FUNC_RESET;
3406 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3409 /* request reset & schedule reset task */
3410 set_bit(hdev->reset_level, &hdev->reset_request);
3411 hclge_reset_task_schedule(hdev);
3413 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3414 hdev->reset_level++;
3417 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3418 enum hnae3_reset_type rst_type)
3420 struct hclge_dev *hdev = ae_dev->priv;
3422 set_bit(rst_type, &hdev->default_reset_request);
3425 static void hclge_reset_timer(struct timer_list *t)
3427 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3429 dev_info(&hdev->pdev->dev,
3430 "triggering global reset in reset timer\n");
3431 set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3432 hclge_reset_event(hdev->pdev, NULL);
3435 static void hclge_reset_subtask(struct hclge_dev *hdev)
3437 /* check if there is any ongoing reset in the hardware. This status can
3438 * be checked from reset_pending. If there is then, we need to wait for
3439 * hardware to complete reset.
3440 * a. If we are able to figure out in reasonable time that hardware
3441 * has fully resetted then, we can proceed with driver, client
3443 * b. else, we can come back later to check this status so re-sched
3446 hdev->last_reset_time = jiffies;
3447 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
3448 if (hdev->reset_type != HNAE3_NONE_RESET)
3451 /* check if we got any *new* reset requests to be honored */
3452 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
3453 if (hdev->reset_type != HNAE3_NONE_RESET)
3454 hclge_do_reset(hdev);
3456 hdev->reset_type = HNAE3_NONE_RESET;
3459 static void hclge_reset_service_task(struct work_struct *work)
3461 struct hclge_dev *hdev =
3462 container_of(work, struct hclge_dev, rst_service_task);
3464 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3467 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3469 hclge_reset_subtask(hdev);
3471 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3474 static void hclge_mailbox_service_task(struct work_struct *work)
3476 struct hclge_dev *hdev =
3477 container_of(work, struct hclge_dev, mbx_service_task);
3479 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3482 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3484 hclge_mbx_handler(hdev);
3486 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3489 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3493 /* start from vport 1 for PF is always alive */
3494 for (i = 1; i < hdev->num_alloc_vport; i++) {
3495 struct hclge_vport *vport = &hdev->vport[i];
3497 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3498 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3500 /* If vf is not alive, set to default value */
3501 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3502 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3506 static void hclge_service_task(struct work_struct *work)
3508 struct hclge_dev *hdev =
3509 container_of(work, struct hclge_dev, service_task);
3511 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3512 hclge_update_stats_for_all(hdev);
3513 hdev->hw_stats.stats_timer = 0;
3516 hclge_update_port_info(hdev);
3517 hclge_update_link_status(hdev);
3518 hclge_update_vport_alive(hdev);
3519 if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3520 hclge_rfs_filter_expire(hdev);
3521 hdev->fd_arfs_expire_timer = 0;
3523 hclge_service_complete(hdev);
3526 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3528 /* VF handle has no client */
3529 if (!handle->client)
3530 return container_of(handle, struct hclge_vport, nic);
3531 else if (handle->client->type == HNAE3_CLIENT_ROCE)
3532 return container_of(handle, struct hclge_vport, roce);
3534 return container_of(handle, struct hclge_vport, nic);
3537 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3538 struct hnae3_vector_info *vector_info)
3540 struct hclge_vport *vport = hclge_get_vport(handle);
3541 struct hnae3_vector_info *vector = vector_info;
3542 struct hclge_dev *hdev = vport->back;
3546 vector_num = min(hdev->num_msi_left, vector_num);
3548 for (j = 0; j < vector_num; j++) {
3549 for (i = 1; i < hdev->num_msi; i++) {
3550 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3551 vector->vector = pci_irq_vector(hdev->pdev, i);
3552 vector->io_addr = hdev->hw.io_base +
3553 HCLGE_VECTOR_REG_BASE +
3554 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3556 HCLGE_VECTOR_VF_OFFSET;
3557 hdev->vector_status[i] = vport->vport_id;
3558 hdev->vector_irq[i] = vector->vector;
3567 hdev->num_msi_left -= alloc;
3568 hdev->num_msi_used += alloc;
3573 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3577 for (i = 0; i < hdev->num_msi; i++)
3578 if (vector == hdev->vector_irq[i])
3584 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3586 struct hclge_vport *vport = hclge_get_vport(handle);
3587 struct hclge_dev *hdev = vport->back;
3590 vector_id = hclge_get_vector_index(hdev, vector);
3591 if (vector_id < 0) {
3592 dev_err(&hdev->pdev->dev,
3593 "Get vector index fail. vector_id =%d\n", vector_id);
3597 hclge_free_vector(hdev, vector_id);
3602 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3604 return HCLGE_RSS_KEY_SIZE;
3607 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3609 return HCLGE_RSS_IND_TBL_SIZE;
3612 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3613 const u8 hfunc, const u8 *key)
3615 struct hclge_rss_config_cmd *req;
3616 struct hclge_desc desc;
3622 key_counts = HCLGE_RSS_KEY_SIZE;
3623 req = (struct hclge_rss_config_cmd *)desc.data;
3625 while (key_counts) {
3626 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3629 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3630 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3632 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
3633 memcpy(req->hash_key,
3634 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3636 key_counts -= key_size;
3638 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3640 dev_err(&hdev->pdev->dev,
3641 "Configure RSS config fail, status = %d\n",
3649 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3651 struct hclge_rss_indirection_table_cmd *req;
3652 struct hclge_desc desc;
3656 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3658 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3659 hclge_cmd_setup_basic_desc
3660 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3662 req->start_table_index =
3663 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3664 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3666 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3667 req->rss_result[j] =
3668 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3670 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3672 dev_err(&hdev->pdev->dev,
3673 "Configure rss indir table fail,status = %d\n",
3681 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3682 u16 *tc_size, u16 *tc_offset)
3684 struct hclge_rss_tc_mode_cmd *req;
3685 struct hclge_desc desc;
3689 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3690 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3692 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3695 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3696 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3697 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3698 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3699 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3701 req->rss_tc_mode[i] = cpu_to_le16(mode);
3704 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3706 dev_err(&hdev->pdev->dev,
3707 "Configure rss tc mode fail, status = %d\n", ret);
3712 static void hclge_get_rss_type(struct hclge_vport *vport)
3714 if (vport->rss_tuple_sets.ipv4_tcp_en ||
3715 vport->rss_tuple_sets.ipv4_udp_en ||
3716 vport->rss_tuple_sets.ipv4_sctp_en ||
3717 vport->rss_tuple_sets.ipv6_tcp_en ||
3718 vport->rss_tuple_sets.ipv6_udp_en ||
3719 vport->rss_tuple_sets.ipv6_sctp_en)
3720 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3721 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3722 vport->rss_tuple_sets.ipv6_fragment_en)
3723 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3725 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3728 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3730 struct hclge_rss_input_tuple_cmd *req;
3731 struct hclge_desc desc;
3734 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3736 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3738 /* Get the tuple cfg from pf */
3739 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3740 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3741 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3742 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3743 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3744 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3745 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3746 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3747 hclge_get_rss_type(&hdev->vport[0]);
3748 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3750 dev_err(&hdev->pdev->dev,
3751 "Configure rss input fail, status = %d\n", ret);
3755 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3758 struct hclge_vport *vport = hclge_get_vport(handle);
3761 /* Get hash algorithm */
3763 switch (vport->rss_algo) {
3764 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3765 *hfunc = ETH_RSS_HASH_TOP;
3767 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3768 *hfunc = ETH_RSS_HASH_XOR;
3771 *hfunc = ETH_RSS_HASH_UNKNOWN;
3776 /* Get the RSS Key required by the user */
3778 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3780 /* Get indirect table */
3782 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3783 indir[i] = vport->rss_indirection_tbl[i];
3788 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3789 const u8 *key, const u8 hfunc)
3791 struct hclge_vport *vport = hclge_get_vport(handle);
3792 struct hclge_dev *hdev = vport->back;
3796 /* Set the RSS Hash Key if specififed by the user */
3799 case ETH_RSS_HASH_TOP:
3800 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3802 case ETH_RSS_HASH_XOR:
3803 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3805 case ETH_RSS_HASH_NO_CHANGE:
3806 hash_algo = vport->rss_algo;
3812 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3816 /* Update the shadow RSS key with user specified qids */
3817 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3818 vport->rss_algo = hash_algo;
3821 /* Update the shadow RSS table with user specified qids */
3822 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3823 vport->rss_indirection_tbl[i] = indir[i];
3825 /* Update the hardware */
3826 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3829 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3831 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3833 if (nfc->data & RXH_L4_B_2_3)
3834 hash_sets |= HCLGE_D_PORT_BIT;
3836 hash_sets &= ~HCLGE_D_PORT_BIT;
3838 if (nfc->data & RXH_IP_SRC)
3839 hash_sets |= HCLGE_S_IP_BIT;
3841 hash_sets &= ~HCLGE_S_IP_BIT;
3843 if (nfc->data & RXH_IP_DST)
3844 hash_sets |= HCLGE_D_IP_BIT;
3846 hash_sets &= ~HCLGE_D_IP_BIT;
3848 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3849 hash_sets |= HCLGE_V_TAG_BIT;
3854 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3855 struct ethtool_rxnfc *nfc)
3857 struct hclge_vport *vport = hclge_get_vport(handle);
3858 struct hclge_dev *hdev = vport->back;
3859 struct hclge_rss_input_tuple_cmd *req;
3860 struct hclge_desc desc;
3864 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3865 RXH_L4_B_0_1 | RXH_L4_B_2_3))
3868 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3869 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3871 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3872 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3873 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3874 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3875 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3876 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3877 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3878 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3880 tuple_sets = hclge_get_rss_hash_bits(nfc);
3881 switch (nfc->flow_type) {
3883 req->ipv4_tcp_en = tuple_sets;
3886 req->ipv6_tcp_en = tuple_sets;
3889 req->ipv4_udp_en = tuple_sets;
3892 req->ipv6_udp_en = tuple_sets;
3895 req->ipv4_sctp_en = tuple_sets;
3898 if ((nfc->data & RXH_L4_B_0_1) ||
3899 (nfc->data & RXH_L4_B_2_3))
3902 req->ipv6_sctp_en = tuple_sets;
3905 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3908 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3914 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3916 dev_err(&hdev->pdev->dev,
3917 "Set rss tuple fail, status = %d\n", ret);
3921 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3922 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3923 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3924 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3925 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3926 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3927 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3928 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3929 hclge_get_rss_type(vport);
3933 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3934 struct ethtool_rxnfc *nfc)
3936 struct hclge_vport *vport = hclge_get_vport(handle);
3941 switch (nfc->flow_type) {
3943 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3946 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3949 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3952 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3955 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3958 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3962 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3971 if (tuple_sets & HCLGE_D_PORT_BIT)
3972 nfc->data |= RXH_L4_B_2_3;
3973 if (tuple_sets & HCLGE_S_PORT_BIT)
3974 nfc->data |= RXH_L4_B_0_1;
3975 if (tuple_sets & HCLGE_D_IP_BIT)
3976 nfc->data |= RXH_IP_DST;
3977 if (tuple_sets & HCLGE_S_IP_BIT)
3978 nfc->data |= RXH_IP_SRC;
3983 static int hclge_get_tc_size(struct hnae3_handle *handle)
3985 struct hclge_vport *vport = hclge_get_vport(handle);
3986 struct hclge_dev *hdev = vport->back;
3988 return hdev->rss_size_max;
3991 int hclge_rss_init_hw(struct hclge_dev *hdev)
3993 struct hclge_vport *vport = hdev->vport;
3994 u8 *rss_indir = vport[0].rss_indirection_tbl;
3995 u16 rss_size = vport[0].alloc_rss_size;
3996 u8 *key = vport[0].rss_hash_key;
3997 u8 hfunc = vport[0].rss_algo;
3998 u16 tc_offset[HCLGE_MAX_TC_NUM];
3999 u16 tc_valid[HCLGE_MAX_TC_NUM];
4000 u16 tc_size[HCLGE_MAX_TC_NUM];
4004 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4008 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4012 ret = hclge_set_rss_input_tuple(hdev);
4016 /* Each TC have the same queue size, and tc_size set to hardware is
4017 * the log2 of roundup power of two of rss_size, the acutal queue
4018 * size is limited by indirection table.
4020 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4021 dev_err(&hdev->pdev->dev,
4022 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
4027 roundup_size = roundup_pow_of_two(rss_size);
4028 roundup_size = ilog2(roundup_size);
4030 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4033 if (!(hdev->hw_tc_map & BIT(i)))
4037 tc_size[i] = roundup_size;
4038 tc_offset[i] = rss_size * i;
4041 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4044 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4046 struct hclge_vport *vport = hdev->vport;
4049 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4050 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4051 vport[j].rss_indirection_tbl[i] =
4052 i % vport[j].alloc_rss_size;
4056 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4058 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4059 struct hclge_vport *vport = hdev->vport;
4061 if (hdev->pdev->revision >= 0x21)
4062 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4064 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4065 vport[i].rss_tuple_sets.ipv4_tcp_en =
4066 HCLGE_RSS_INPUT_TUPLE_OTHER;
4067 vport[i].rss_tuple_sets.ipv4_udp_en =
4068 HCLGE_RSS_INPUT_TUPLE_OTHER;
4069 vport[i].rss_tuple_sets.ipv4_sctp_en =
4070 HCLGE_RSS_INPUT_TUPLE_SCTP;
4071 vport[i].rss_tuple_sets.ipv4_fragment_en =
4072 HCLGE_RSS_INPUT_TUPLE_OTHER;
4073 vport[i].rss_tuple_sets.ipv6_tcp_en =
4074 HCLGE_RSS_INPUT_TUPLE_OTHER;
4075 vport[i].rss_tuple_sets.ipv6_udp_en =
4076 HCLGE_RSS_INPUT_TUPLE_OTHER;
4077 vport[i].rss_tuple_sets.ipv6_sctp_en =
4078 HCLGE_RSS_INPUT_TUPLE_SCTP;
4079 vport[i].rss_tuple_sets.ipv6_fragment_en =
4080 HCLGE_RSS_INPUT_TUPLE_OTHER;
4082 vport[i].rss_algo = rss_algo;
4084 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4085 HCLGE_RSS_KEY_SIZE);
4088 hclge_rss_indir_init_cfg(hdev);
4091 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4092 int vector_id, bool en,
4093 struct hnae3_ring_chain_node *ring_chain)
4095 struct hclge_dev *hdev = vport->back;
4096 struct hnae3_ring_chain_node *node;
4097 struct hclge_desc desc;
4098 struct hclge_ctrl_vector_chain_cmd *req
4099 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4100 enum hclge_cmd_status status;
4101 enum hclge_opcode_type op;
4102 u16 tqp_type_and_id;
4105 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4106 hclge_cmd_setup_basic_desc(&desc, op, false);
4107 req->int_vector_id = vector_id;
4110 for (node = ring_chain; node; node = node->next) {
4111 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4112 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4114 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4115 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4116 HCLGE_TQP_ID_S, node->tqp_index);
4117 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4119 hnae3_get_field(node->int_gl_idx,
4120 HNAE3_RING_GL_IDX_M,
4121 HNAE3_RING_GL_IDX_S));
4122 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4123 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4124 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4125 req->vfid = vport->vport_id;
4127 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4129 dev_err(&hdev->pdev->dev,
4130 "Map TQP fail, status is %d.\n",
4136 hclge_cmd_setup_basic_desc(&desc,
4139 req->int_vector_id = vector_id;
4144 req->int_cause_num = i;
4145 req->vfid = vport->vport_id;
4146 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4148 dev_err(&hdev->pdev->dev,
4149 "Map TQP fail, status is %d.\n", status);
4157 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4158 struct hnae3_ring_chain_node *ring_chain)
4160 struct hclge_vport *vport = hclge_get_vport(handle);
4161 struct hclge_dev *hdev = vport->back;
4164 vector_id = hclge_get_vector_index(hdev, vector);
4165 if (vector_id < 0) {
4166 dev_err(&hdev->pdev->dev,
4167 "Get vector index fail. vector_id =%d\n", vector_id);
4171 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4174 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4175 struct hnae3_ring_chain_node *ring_chain)
4177 struct hclge_vport *vport = hclge_get_vport(handle);
4178 struct hclge_dev *hdev = vport->back;
4181 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4184 vector_id = hclge_get_vector_index(hdev, vector);
4185 if (vector_id < 0) {
4186 dev_err(&handle->pdev->dev,
4187 "Get vector index fail. ret =%d\n", vector_id);
4191 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4193 dev_err(&handle->pdev->dev,
4194 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4200 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4201 struct hclge_promisc_param *param)
4203 struct hclge_promisc_cfg_cmd *req;
4204 struct hclge_desc desc;
4207 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4209 req = (struct hclge_promisc_cfg_cmd *)desc.data;
4210 req->vf_id = param->vf_id;
4212 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4213 * pdev revision(0x20), new revision support them. The
4214 * value of this two fields will not return error when driver
4215 * send command to fireware in revision(0x20).
4217 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4218 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4220 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4222 dev_err(&hdev->pdev->dev,
4223 "Set promisc mode fail, status is %d.\n", ret);
4228 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4229 bool en_mc, bool en_bc, int vport_id)
4234 memset(param, 0, sizeof(struct hclge_promisc_param));
4236 param->enable = HCLGE_PROMISC_EN_UC;
4238 param->enable |= HCLGE_PROMISC_EN_MC;
4240 param->enable |= HCLGE_PROMISC_EN_BC;
4241 param->vf_id = vport_id;
4244 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4247 struct hclge_vport *vport = hclge_get_vport(handle);
4248 struct hclge_dev *hdev = vport->back;
4249 struct hclge_promisc_param param;
4250 bool en_bc_pmc = true;
4252 /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4253 * always bypassed. So broadcast promisc should be disabled until
4254 * user enable promisc mode
4256 if (handle->pdev->revision == 0x20)
4257 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4259 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4261 return hclge_cmd_set_promisc_mode(hdev, ¶m);
4264 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4266 struct hclge_get_fd_mode_cmd *req;
4267 struct hclge_desc desc;
4270 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4272 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4274 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4276 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4280 *fd_mode = req->mode;
4285 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4286 u32 *stage1_entry_num,
4287 u32 *stage2_entry_num,
4288 u16 *stage1_counter_num,
4289 u16 *stage2_counter_num)
4291 struct hclge_get_fd_allocation_cmd *req;
4292 struct hclge_desc desc;
4295 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4297 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4299 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4301 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4306 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4307 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4308 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4309 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4314 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4316 struct hclge_set_fd_key_config_cmd *req;
4317 struct hclge_fd_key_cfg *stage;
4318 struct hclge_desc desc;
4321 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4323 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4324 stage = &hdev->fd_cfg.key_cfg[stage_num];
4325 req->stage = stage_num;
4326 req->key_select = stage->key_sel;
4327 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4328 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4329 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4330 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4331 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4332 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4334 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4336 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4341 static int hclge_init_fd_config(struct hclge_dev *hdev)
4343 #define LOW_2_WORDS 0x03
4344 struct hclge_fd_key_cfg *key_cfg;
4347 if (!hnae3_dev_fd_supported(hdev))
4350 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4354 switch (hdev->fd_cfg.fd_mode) {
4355 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4356 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4358 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4359 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4362 dev_err(&hdev->pdev->dev,
4363 "Unsupported flow director mode %d\n",
4364 hdev->fd_cfg.fd_mode);
4368 hdev->fd_cfg.proto_support =
4369 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4370 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4371 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4372 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4373 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4374 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4375 key_cfg->outer_sipv6_word_en = 0;
4376 key_cfg->outer_dipv6_word_en = 0;
4378 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4379 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4380 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4381 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4383 /* If use max 400bit key, we can support tuples for ether type */
4384 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4385 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4386 key_cfg->tuple_active |=
4387 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4390 /* roce_type is used to filter roce frames
4391 * dst_vport is used to specify the rule
4393 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4395 ret = hclge_get_fd_allocation(hdev,
4396 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4397 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4398 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4399 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4403 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4406 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4407 int loc, u8 *key, bool is_add)
4409 struct hclge_fd_tcam_config_1_cmd *req1;
4410 struct hclge_fd_tcam_config_2_cmd *req2;
4411 struct hclge_fd_tcam_config_3_cmd *req3;
4412 struct hclge_desc desc[3];
4415 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4416 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4417 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4418 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4419 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4421 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4422 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4423 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4425 req1->stage = stage;
4426 req1->xy_sel = sel_x ? 1 : 0;
4427 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4428 req1->index = cpu_to_le32(loc);
4429 req1->entry_vld = sel_x ? is_add : 0;
4432 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4433 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4434 sizeof(req2->tcam_data));
4435 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4436 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4439 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4441 dev_err(&hdev->pdev->dev,
4442 "config tcam key fail, ret=%d\n",
4448 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4449 struct hclge_fd_ad_data *action)
4451 struct hclge_fd_ad_config_cmd *req;
4452 struct hclge_desc desc;
4456 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4458 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4459 req->index = cpu_to_le32(loc);
4462 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4463 action->write_rule_id_to_bd);
4464 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4467 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4468 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4469 action->forward_to_direct_queue);
4470 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4472 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4473 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4474 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4475 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4476 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4477 action->counter_id);
4479 req->ad_data = cpu_to_le64(ad_data);
4480 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4482 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4487 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4488 struct hclge_fd_rule *rule)
4490 u16 tmp_x_s, tmp_y_s;
4491 u32 tmp_x_l, tmp_y_l;
4494 if (rule->unused_tuple & tuple_bit)
4497 switch (tuple_bit) {
4500 case BIT(INNER_DST_MAC):
4501 for (i = 0; i < ETH_ALEN; i++) {
4502 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4503 rule->tuples_mask.dst_mac[i]);
4504 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4505 rule->tuples_mask.dst_mac[i]);
4509 case BIT(INNER_SRC_MAC):
4510 for (i = 0; i < ETH_ALEN; i++) {
4511 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4512 rule->tuples.src_mac[i]);
4513 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4514 rule->tuples.src_mac[i]);
4518 case BIT(INNER_VLAN_TAG_FST):
4519 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4520 rule->tuples_mask.vlan_tag1);
4521 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4522 rule->tuples_mask.vlan_tag1);
4523 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4524 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4527 case BIT(INNER_ETH_TYPE):
4528 calc_x(tmp_x_s, rule->tuples.ether_proto,
4529 rule->tuples_mask.ether_proto);
4530 calc_y(tmp_y_s, rule->tuples.ether_proto,
4531 rule->tuples_mask.ether_proto);
4532 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4533 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4536 case BIT(INNER_IP_TOS):
4537 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4538 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4541 case BIT(INNER_IP_PROTO):
4542 calc_x(*key_x, rule->tuples.ip_proto,
4543 rule->tuples_mask.ip_proto);
4544 calc_y(*key_y, rule->tuples.ip_proto,
4545 rule->tuples_mask.ip_proto);
4548 case BIT(INNER_SRC_IP):
4549 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
4550 rule->tuples_mask.src_ip[IPV4_INDEX]);
4551 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
4552 rule->tuples_mask.src_ip[IPV4_INDEX]);
4553 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4554 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4557 case BIT(INNER_DST_IP):
4558 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
4559 rule->tuples_mask.dst_ip[IPV4_INDEX]);
4560 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
4561 rule->tuples_mask.dst_ip[IPV4_INDEX]);
4562 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4563 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4566 case BIT(INNER_SRC_PORT):
4567 calc_x(tmp_x_s, rule->tuples.src_port,
4568 rule->tuples_mask.src_port);
4569 calc_y(tmp_y_s, rule->tuples.src_port,
4570 rule->tuples_mask.src_port);
4571 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4572 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4575 case BIT(INNER_DST_PORT):
4576 calc_x(tmp_x_s, rule->tuples.dst_port,
4577 rule->tuples_mask.dst_port);
4578 calc_y(tmp_y_s, rule->tuples.dst_port,
4579 rule->tuples_mask.dst_port);
4580 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4581 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4589 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4590 u8 vf_id, u8 network_port_id)
4592 u32 port_number = 0;
4594 if (port_type == HOST_PORT) {
4595 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4597 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4599 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4601 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4602 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4603 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4609 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4610 __le32 *key_x, __le32 *key_y,
4611 struct hclge_fd_rule *rule)
4613 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4614 u8 cur_pos = 0, tuple_size, shift_bits;
4617 for (i = 0; i < MAX_META_DATA; i++) {
4618 tuple_size = meta_data_key_info[i].key_length;
4619 tuple_bit = key_cfg->meta_data_active & BIT(i);
4621 switch (tuple_bit) {
4622 case BIT(ROCE_TYPE):
4623 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4624 cur_pos += tuple_size;
4626 case BIT(DST_VPORT):
4627 port_number = hclge_get_port_number(HOST_PORT, 0,
4629 hnae3_set_field(meta_data,
4630 GENMASK(cur_pos + tuple_size, cur_pos),
4631 cur_pos, port_number);
4632 cur_pos += tuple_size;
4639 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4640 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4641 shift_bits = sizeof(meta_data) * 8 - cur_pos;
4643 *key_x = cpu_to_le32(tmp_x << shift_bits);
4644 *key_y = cpu_to_le32(tmp_y << shift_bits);
4647 /* A complete key is combined with meta data key and tuple key.
4648 * Meta data key is stored at the MSB region, and tuple key is stored at
4649 * the LSB region, unused bits will be filled 0.
4651 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4652 struct hclge_fd_rule *rule)
4654 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4655 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4656 u8 *cur_key_x, *cur_key_y;
4657 int i, ret, tuple_size;
4658 u8 meta_data_region;
4660 memset(key_x, 0, sizeof(key_x));
4661 memset(key_y, 0, sizeof(key_y));
4665 for (i = 0 ; i < MAX_TUPLE; i++) {
4669 tuple_size = tuple_key_info[i].key_length / 8;
4670 check_tuple = key_cfg->tuple_active & BIT(i);
4672 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4675 cur_key_x += tuple_size;
4676 cur_key_y += tuple_size;
4680 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4681 MAX_META_DATA_LENGTH / 8;
4683 hclge_fd_convert_meta_data(key_cfg,
4684 (__le32 *)(key_x + meta_data_region),
4685 (__le32 *)(key_y + meta_data_region),
4688 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4691 dev_err(&hdev->pdev->dev,
4692 "fd key_y config fail, loc=%d, ret=%d\n",
4693 rule->queue_id, ret);
4697 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4700 dev_err(&hdev->pdev->dev,
4701 "fd key_x config fail, loc=%d, ret=%d\n",
4702 rule->queue_id, ret);
4706 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4707 struct hclge_fd_rule *rule)
4709 struct hclge_fd_ad_data ad_data;
4711 ad_data.ad_id = rule->location;
4713 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4714 ad_data.drop_packet = true;
4715 ad_data.forward_to_direct_queue = false;
4716 ad_data.queue_id = 0;
4718 ad_data.drop_packet = false;
4719 ad_data.forward_to_direct_queue = true;
4720 ad_data.queue_id = rule->queue_id;
4723 ad_data.use_counter = false;
4724 ad_data.counter_id = 0;
4726 ad_data.use_next_stage = false;
4727 ad_data.next_input_key = 0;
4729 ad_data.write_rule_id_to_bd = true;
4730 ad_data.rule_id = rule->location;
4732 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4735 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4736 struct ethtool_rx_flow_spec *fs, u32 *unused)
4738 struct ethtool_tcpip4_spec *tcp_ip4_spec;
4739 struct ethtool_usrip4_spec *usr_ip4_spec;
4740 struct ethtool_tcpip6_spec *tcp_ip6_spec;
4741 struct ethtool_usrip6_spec *usr_ip6_spec;
4742 struct ethhdr *ether_spec;
4744 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4747 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4750 if ((fs->flow_type & FLOW_EXT) &&
4751 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4752 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4756 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4760 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4761 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4763 if (!tcp_ip4_spec->ip4src)
4764 *unused |= BIT(INNER_SRC_IP);
4766 if (!tcp_ip4_spec->ip4dst)
4767 *unused |= BIT(INNER_DST_IP);
4769 if (!tcp_ip4_spec->psrc)
4770 *unused |= BIT(INNER_SRC_PORT);
4772 if (!tcp_ip4_spec->pdst)
4773 *unused |= BIT(INNER_DST_PORT);
4775 if (!tcp_ip4_spec->tos)
4776 *unused |= BIT(INNER_IP_TOS);
4780 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4781 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4782 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4784 if (!usr_ip4_spec->ip4src)
4785 *unused |= BIT(INNER_SRC_IP);
4787 if (!usr_ip4_spec->ip4dst)
4788 *unused |= BIT(INNER_DST_IP);
4790 if (!usr_ip4_spec->tos)
4791 *unused |= BIT(INNER_IP_TOS);
4793 if (!usr_ip4_spec->proto)
4794 *unused |= BIT(INNER_IP_PROTO);
4796 if (usr_ip4_spec->l4_4_bytes)
4799 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4806 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4807 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4810 /* check whether src/dst ip address used */
4811 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4812 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4813 *unused |= BIT(INNER_SRC_IP);
4815 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4816 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4817 *unused |= BIT(INNER_DST_IP);
4819 if (!tcp_ip6_spec->psrc)
4820 *unused |= BIT(INNER_SRC_PORT);
4822 if (!tcp_ip6_spec->pdst)
4823 *unused |= BIT(INNER_DST_PORT);
4825 if (tcp_ip6_spec->tclass)
4829 case IPV6_USER_FLOW:
4830 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4831 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4832 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4833 BIT(INNER_DST_PORT);
4835 /* check whether src/dst ip address used */
4836 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4837 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4838 *unused |= BIT(INNER_SRC_IP);
4840 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4841 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4842 *unused |= BIT(INNER_DST_IP);
4844 if (!usr_ip6_spec->l4_proto)
4845 *unused |= BIT(INNER_IP_PROTO);
4847 if (usr_ip6_spec->tclass)
4850 if (usr_ip6_spec->l4_4_bytes)
4855 ether_spec = &fs->h_u.ether_spec;
4856 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4857 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4858 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4860 if (is_zero_ether_addr(ether_spec->h_source))
4861 *unused |= BIT(INNER_SRC_MAC);
4863 if (is_zero_ether_addr(ether_spec->h_dest))
4864 *unused |= BIT(INNER_DST_MAC);
4866 if (!ether_spec->h_proto)
4867 *unused |= BIT(INNER_ETH_TYPE);
4874 if ((fs->flow_type & FLOW_EXT)) {
4875 if (fs->h_ext.vlan_etype)
4877 if (!fs->h_ext.vlan_tci)
4878 *unused |= BIT(INNER_VLAN_TAG_FST);
4880 if (fs->m_ext.vlan_tci) {
4881 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4885 *unused |= BIT(INNER_VLAN_TAG_FST);
4888 if (fs->flow_type & FLOW_MAC_EXT) {
4889 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4892 if (is_zero_ether_addr(fs->h_ext.h_dest))
4893 *unused |= BIT(INNER_DST_MAC);
4895 *unused &= ~(BIT(INNER_DST_MAC));
4901 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4903 struct hclge_fd_rule *rule = NULL;
4904 struct hlist_node *node2;
4906 spin_lock_bh(&hdev->fd_rule_lock);
4907 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4908 if (rule->location >= location)
4912 spin_unlock_bh(&hdev->fd_rule_lock);
4914 return rule && rule->location == location;
4917 /* make sure being called after lock up with fd_rule_lock */
4918 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4919 struct hclge_fd_rule *new_rule,
4923 struct hclge_fd_rule *rule = NULL, *parent = NULL;
4924 struct hlist_node *node2;
4926 if (is_add && !new_rule)
4929 hlist_for_each_entry_safe(rule, node2,
4930 &hdev->fd_rule_list, rule_node) {
4931 if (rule->location >= location)
4936 if (rule && rule->location == location) {
4937 hlist_del(&rule->rule_node);
4939 hdev->hclge_fd_rule_num--;
4942 if (!hdev->hclge_fd_rule_num)
4943 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
4944 clear_bit(location, hdev->fd_bmap);
4948 } else if (!is_add) {
4949 dev_err(&hdev->pdev->dev,
4950 "delete fail, rule %d is inexistent\n",
4955 INIT_HLIST_NODE(&new_rule->rule_node);
4958 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4960 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4962 set_bit(location, hdev->fd_bmap);
4963 hdev->hclge_fd_rule_num++;
4964 hdev->fd_active_type = new_rule->rule_type;
4969 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4970 struct ethtool_rx_flow_spec *fs,
4971 struct hclge_fd_rule *rule)
4973 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4975 switch (flow_type) {
4979 rule->tuples.src_ip[IPV4_INDEX] =
4980 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
4981 rule->tuples_mask.src_ip[IPV4_INDEX] =
4982 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
4984 rule->tuples.dst_ip[IPV4_INDEX] =
4985 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
4986 rule->tuples_mask.dst_ip[IPV4_INDEX] =
4987 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
4989 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
4990 rule->tuples_mask.src_port =
4991 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
4993 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
4994 rule->tuples_mask.dst_port =
4995 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
4997 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
4998 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5000 rule->tuples.ether_proto = ETH_P_IP;
5001 rule->tuples_mask.ether_proto = 0xFFFF;
5005 rule->tuples.src_ip[IPV4_INDEX] =
5006 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5007 rule->tuples_mask.src_ip[IPV4_INDEX] =
5008 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5010 rule->tuples.dst_ip[IPV4_INDEX] =
5011 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5012 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5013 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5015 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5016 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5018 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5019 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5021 rule->tuples.ether_proto = ETH_P_IP;
5022 rule->tuples_mask.ether_proto = 0xFFFF;
5028 be32_to_cpu_array(rule->tuples.src_ip,
5029 fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5030 be32_to_cpu_array(rule->tuples_mask.src_ip,
5031 fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5033 be32_to_cpu_array(rule->tuples.dst_ip,
5034 fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5035 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5036 fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5038 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5039 rule->tuples_mask.src_port =
5040 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5042 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5043 rule->tuples_mask.dst_port =
5044 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5046 rule->tuples.ether_proto = ETH_P_IPV6;
5047 rule->tuples_mask.ether_proto = 0xFFFF;
5050 case IPV6_USER_FLOW:
5051 be32_to_cpu_array(rule->tuples.src_ip,
5052 fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5053 be32_to_cpu_array(rule->tuples_mask.src_ip,
5054 fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5056 be32_to_cpu_array(rule->tuples.dst_ip,
5057 fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5058 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5059 fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5061 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5062 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5064 rule->tuples.ether_proto = ETH_P_IPV6;
5065 rule->tuples_mask.ether_proto = 0xFFFF;
5069 ether_addr_copy(rule->tuples.src_mac,
5070 fs->h_u.ether_spec.h_source);
5071 ether_addr_copy(rule->tuples_mask.src_mac,
5072 fs->m_u.ether_spec.h_source);
5074 ether_addr_copy(rule->tuples.dst_mac,
5075 fs->h_u.ether_spec.h_dest);
5076 ether_addr_copy(rule->tuples_mask.dst_mac,
5077 fs->m_u.ether_spec.h_dest);
5079 rule->tuples.ether_proto =
5080 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5081 rule->tuples_mask.ether_proto =
5082 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5089 switch (flow_type) {
5092 rule->tuples.ip_proto = IPPROTO_SCTP;
5093 rule->tuples_mask.ip_proto = 0xFF;
5097 rule->tuples.ip_proto = IPPROTO_TCP;
5098 rule->tuples_mask.ip_proto = 0xFF;
5102 rule->tuples.ip_proto = IPPROTO_UDP;
5103 rule->tuples_mask.ip_proto = 0xFF;
5109 if ((fs->flow_type & FLOW_EXT)) {
5110 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5111 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5114 if (fs->flow_type & FLOW_MAC_EXT) {
5115 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5116 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5122 /* make sure being called after lock up with fd_rule_lock */
5123 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5124 struct hclge_fd_rule *rule)
5129 dev_err(&hdev->pdev->dev,
5130 "The flow director rule is NULL\n");
5134 /* it will never fail here, so needn't to check return value */
5135 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5137 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5141 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5148 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5152 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5153 struct ethtool_rxnfc *cmd)
5155 struct hclge_vport *vport = hclge_get_vport(handle);
5156 struct hclge_dev *hdev = vport->back;
5157 u16 dst_vport_id = 0, q_index = 0;
5158 struct ethtool_rx_flow_spec *fs;
5159 struct hclge_fd_rule *rule;
5164 if (!hnae3_dev_fd_supported(hdev))
5168 dev_warn(&hdev->pdev->dev,
5169 "Please enable flow director first\n");
5173 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5175 ret = hclge_fd_check_spec(hdev, fs, &unused);
5177 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5181 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5182 action = HCLGE_FD_ACTION_DROP_PACKET;
5184 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5185 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5188 if (vf > hdev->num_req_vfs) {
5189 dev_err(&hdev->pdev->dev,
5190 "Error: vf id (%d) > max vf num (%d)\n",
5191 vf, hdev->num_req_vfs);
5195 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5196 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5199 dev_err(&hdev->pdev->dev,
5200 "Error: queue id (%d) > max tqp num (%d)\n",
5205 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5209 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5213 ret = hclge_fd_get_tuple(hdev, fs, rule);
5219 rule->flow_type = fs->flow_type;
5221 rule->location = fs->location;
5222 rule->unused_tuple = unused;
5223 rule->vf_id = dst_vport_id;
5224 rule->queue_id = q_index;
5225 rule->action = action;
5226 rule->rule_type = HCLGE_FD_EP_ACTIVE;
5228 /* to avoid rule conflict, when user configure rule by ethtool,
5229 * we need to clear all arfs rules
5231 hclge_clear_arfs_rules(handle);
5233 spin_lock_bh(&hdev->fd_rule_lock);
5234 ret = hclge_fd_config_rule(hdev, rule);
5236 spin_unlock_bh(&hdev->fd_rule_lock);
5241 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5242 struct ethtool_rxnfc *cmd)
5244 struct hclge_vport *vport = hclge_get_vport(handle);
5245 struct hclge_dev *hdev = vport->back;
5246 struct ethtool_rx_flow_spec *fs;
5249 if (!hnae3_dev_fd_supported(hdev))
5252 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5254 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5257 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5258 dev_err(&hdev->pdev->dev,
5259 "Delete fail, rule %d is inexistent\n", fs->location);
5263 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5268 spin_lock_bh(&hdev->fd_rule_lock);
5269 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5271 spin_unlock_bh(&hdev->fd_rule_lock);
5276 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5279 struct hclge_vport *vport = hclge_get_vport(handle);
5280 struct hclge_dev *hdev = vport->back;
5281 struct hclge_fd_rule *rule;
5282 struct hlist_node *node;
5285 if (!hnae3_dev_fd_supported(hdev))
5288 spin_lock_bh(&hdev->fd_rule_lock);
5289 for_each_set_bit(location, hdev->fd_bmap,
5290 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5291 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5295 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5297 hlist_del(&rule->rule_node);
5300 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5301 hdev->hclge_fd_rule_num = 0;
5302 bitmap_zero(hdev->fd_bmap,
5303 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5306 spin_unlock_bh(&hdev->fd_rule_lock);
5309 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5311 struct hclge_vport *vport = hclge_get_vport(handle);
5312 struct hclge_dev *hdev = vport->back;
5313 struct hclge_fd_rule *rule;
5314 struct hlist_node *node;
5317 /* Return ok here, because reset error handling will check this
5318 * return value. If error is returned here, the reset process will
5321 if (!hnae3_dev_fd_supported(hdev))
5324 /* if fd is disabled, should not restore it when reset */
5328 spin_lock_bh(&hdev->fd_rule_lock);
5329 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5330 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5332 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5335 dev_warn(&hdev->pdev->dev,
5336 "Restore rule %d failed, remove it\n",
5338 clear_bit(rule->location, hdev->fd_bmap);
5339 hlist_del(&rule->rule_node);
5341 hdev->hclge_fd_rule_num--;
5345 if (hdev->hclge_fd_rule_num)
5346 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5348 spin_unlock_bh(&hdev->fd_rule_lock);
5353 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5354 struct ethtool_rxnfc *cmd)
5356 struct hclge_vport *vport = hclge_get_vport(handle);
5357 struct hclge_dev *hdev = vport->back;
5359 if (!hnae3_dev_fd_supported(hdev))
5362 cmd->rule_cnt = hdev->hclge_fd_rule_num;
5363 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5368 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5369 struct ethtool_rxnfc *cmd)
5371 struct hclge_vport *vport = hclge_get_vport(handle);
5372 struct hclge_fd_rule *rule = NULL;
5373 struct hclge_dev *hdev = vport->back;
5374 struct ethtool_rx_flow_spec *fs;
5375 struct hlist_node *node2;
5377 if (!hnae3_dev_fd_supported(hdev))
5380 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5382 spin_lock_bh(&hdev->fd_rule_lock);
5384 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5385 if (rule->location >= fs->location)
5389 if (!rule || fs->location != rule->location) {
5390 spin_unlock_bh(&hdev->fd_rule_lock);
5395 fs->flow_type = rule->flow_type;
5396 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5400 fs->h_u.tcp_ip4_spec.ip4src =
5401 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5402 fs->m_u.tcp_ip4_spec.ip4src =
5403 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5404 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5406 fs->h_u.tcp_ip4_spec.ip4dst =
5407 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5408 fs->m_u.tcp_ip4_spec.ip4dst =
5409 rule->unused_tuple & BIT(INNER_DST_IP) ?
5410 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5412 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5413 fs->m_u.tcp_ip4_spec.psrc =
5414 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5415 0 : cpu_to_be16(rule->tuples_mask.src_port);
5417 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5418 fs->m_u.tcp_ip4_spec.pdst =
5419 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5420 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5422 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5423 fs->m_u.tcp_ip4_spec.tos =
5424 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5425 0 : rule->tuples_mask.ip_tos;
5429 fs->h_u.usr_ip4_spec.ip4src =
5430 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5431 fs->m_u.tcp_ip4_spec.ip4src =
5432 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5433 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5435 fs->h_u.usr_ip4_spec.ip4dst =
5436 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5437 fs->m_u.usr_ip4_spec.ip4dst =
5438 rule->unused_tuple & BIT(INNER_DST_IP) ?
5439 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5441 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5442 fs->m_u.usr_ip4_spec.tos =
5443 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5444 0 : rule->tuples_mask.ip_tos;
5446 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5447 fs->m_u.usr_ip4_spec.proto =
5448 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5449 0 : rule->tuples_mask.ip_proto;
5451 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5457 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5458 rule->tuples.src_ip, IPV6_SIZE);
5459 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5460 memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5461 sizeof(int) * IPV6_SIZE);
5463 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5464 rule->tuples_mask.src_ip, IPV6_SIZE);
5466 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5467 rule->tuples.dst_ip, IPV6_SIZE);
5468 if (rule->unused_tuple & BIT(INNER_DST_IP))
5469 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
5470 sizeof(int) * IPV6_SIZE);
5472 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5473 rule->tuples_mask.dst_ip, IPV6_SIZE);
5475 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5476 fs->m_u.tcp_ip6_spec.psrc =
5477 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5478 0 : cpu_to_be16(rule->tuples_mask.src_port);
5480 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5481 fs->m_u.tcp_ip6_spec.pdst =
5482 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5483 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5486 case IPV6_USER_FLOW:
5487 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5488 rule->tuples.src_ip, IPV6_SIZE);
5489 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5490 memset(fs->m_u.usr_ip6_spec.ip6src, 0,
5491 sizeof(int) * IPV6_SIZE);
5493 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5494 rule->tuples_mask.src_ip, IPV6_SIZE);
5496 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5497 rule->tuples.dst_ip, IPV6_SIZE);
5498 if (rule->unused_tuple & BIT(INNER_DST_IP))
5499 memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
5500 sizeof(int) * IPV6_SIZE);
5502 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5503 rule->tuples_mask.dst_ip, IPV6_SIZE);
5505 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5506 fs->m_u.usr_ip6_spec.l4_proto =
5507 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5508 0 : rule->tuples_mask.ip_proto;
5512 ether_addr_copy(fs->h_u.ether_spec.h_source,
5513 rule->tuples.src_mac);
5514 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5515 eth_zero_addr(fs->m_u.ether_spec.h_source);
5517 ether_addr_copy(fs->m_u.ether_spec.h_source,
5518 rule->tuples_mask.src_mac);
5520 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5521 rule->tuples.dst_mac);
5522 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5523 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5525 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5526 rule->tuples_mask.dst_mac);
5528 fs->h_u.ether_spec.h_proto =
5529 cpu_to_be16(rule->tuples.ether_proto);
5530 fs->m_u.ether_spec.h_proto =
5531 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5532 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5536 spin_unlock_bh(&hdev->fd_rule_lock);
5540 if (fs->flow_type & FLOW_EXT) {
5541 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5542 fs->m_ext.vlan_tci =
5543 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5544 cpu_to_be16(VLAN_VID_MASK) :
5545 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5548 if (fs->flow_type & FLOW_MAC_EXT) {
5549 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5550 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5551 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5553 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5554 rule->tuples_mask.dst_mac);
5557 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5558 fs->ring_cookie = RX_CLS_FLOW_DISC;
5562 fs->ring_cookie = rule->queue_id;
5563 vf_id = rule->vf_id;
5564 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5565 fs->ring_cookie |= vf_id;
5568 spin_unlock_bh(&hdev->fd_rule_lock);
5573 static int hclge_get_all_rules(struct hnae3_handle *handle,
5574 struct ethtool_rxnfc *cmd, u32 *rule_locs)
5576 struct hclge_vport *vport = hclge_get_vport(handle);
5577 struct hclge_dev *hdev = vport->back;
5578 struct hclge_fd_rule *rule;
5579 struct hlist_node *node2;
5582 if (!hnae3_dev_fd_supported(hdev))
5585 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5587 spin_lock_bh(&hdev->fd_rule_lock);
5588 hlist_for_each_entry_safe(rule, node2,
5589 &hdev->fd_rule_list, rule_node) {
5590 if (cnt == cmd->rule_cnt) {
5591 spin_unlock_bh(&hdev->fd_rule_lock);
5595 rule_locs[cnt] = rule->location;
5599 spin_unlock_bh(&hdev->fd_rule_lock);
5601 cmd->rule_cnt = cnt;
5606 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
5607 struct hclge_fd_rule_tuples *tuples)
5609 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
5610 tuples->ip_proto = fkeys->basic.ip_proto;
5611 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
5613 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5614 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
5615 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
5617 memcpy(tuples->src_ip,
5618 fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
5619 sizeof(tuples->src_ip));
5620 memcpy(tuples->dst_ip,
5621 fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
5622 sizeof(tuples->dst_ip));
5626 /* traverse all rules, check whether an existed rule has the same tuples */
5627 static struct hclge_fd_rule *
5628 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
5629 const struct hclge_fd_rule_tuples *tuples)
5631 struct hclge_fd_rule *rule = NULL;
5632 struct hlist_node *node;
5634 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5635 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
5642 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
5643 struct hclge_fd_rule *rule)
5645 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5646 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
5647 BIT(INNER_SRC_PORT);
5650 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
5651 if (tuples->ether_proto == ETH_P_IP) {
5652 if (tuples->ip_proto == IPPROTO_TCP)
5653 rule->flow_type = TCP_V4_FLOW;
5655 rule->flow_type = UDP_V4_FLOW;
5657 if (tuples->ip_proto == IPPROTO_TCP)
5658 rule->flow_type = TCP_V6_FLOW;
5660 rule->flow_type = UDP_V6_FLOW;
5662 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
5663 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
5666 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
5667 u16 flow_id, struct flow_keys *fkeys)
5669 struct hclge_vport *vport = hclge_get_vport(handle);
5670 struct hclge_fd_rule_tuples new_tuples;
5671 struct hclge_dev *hdev = vport->back;
5672 struct hclge_fd_rule *rule;
5677 if (!hnae3_dev_fd_supported(hdev))
5680 memset(&new_tuples, 0, sizeof(new_tuples));
5681 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
5683 spin_lock_bh(&hdev->fd_rule_lock);
5685 /* when there is already fd rule existed add by user,
5686 * arfs should not work
5688 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
5689 spin_unlock_bh(&hdev->fd_rule_lock);
5694 /* check is there flow director filter existed for this flow,
5695 * if not, create a new filter for it;
5696 * if filter exist with different queue id, modify the filter;
5697 * if filter exist with same queue id, do nothing
5699 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
5701 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
5702 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5703 spin_unlock_bh(&hdev->fd_rule_lock);
5708 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5710 spin_unlock_bh(&hdev->fd_rule_lock);
5715 set_bit(bit_id, hdev->fd_bmap);
5716 rule->location = bit_id;
5717 rule->flow_id = flow_id;
5718 rule->queue_id = queue_id;
5719 hclge_fd_build_arfs_rule(&new_tuples, rule);
5720 ret = hclge_fd_config_rule(hdev, rule);
5722 spin_unlock_bh(&hdev->fd_rule_lock);
5727 return rule->location;
5730 spin_unlock_bh(&hdev->fd_rule_lock);
5732 if (rule->queue_id == queue_id)
5733 return rule->location;
5735 tmp_queue_id = rule->queue_id;
5736 rule->queue_id = queue_id;
5737 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5739 rule->queue_id = tmp_queue_id;
5743 return rule->location;
5746 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
5748 #ifdef CONFIG_RFS_ACCEL
5749 struct hnae3_handle *handle = &hdev->vport[0].nic;
5750 struct hclge_fd_rule *rule;
5751 struct hlist_node *node;
5752 HLIST_HEAD(del_list);
5754 spin_lock_bh(&hdev->fd_rule_lock);
5755 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
5756 spin_unlock_bh(&hdev->fd_rule_lock);
5759 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5760 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
5761 rule->flow_id, rule->location)) {
5762 hlist_del_init(&rule->rule_node);
5763 hlist_add_head(&rule->rule_node, &del_list);
5764 hdev->hclge_fd_rule_num--;
5765 clear_bit(rule->location, hdev->fd_bmap);
5768 spin_unlock_bh(&hdev->fd_rule_lock);
5770 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
5771 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5772 rule->location, NULL, false);
5778 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
5780 #ifdef CONFIG_RFS_ACCEL
5781 struct hclge_vport *vport = hclge_get_vport(handle);
5782 struct hclge_dev *hdev = vport->back;
5784 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
5785 hclge_del_all_fd_entries(handle, true);
5789 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5791 struct hclge_vport *vport = hclge_get_vport(handle);
5792 struct hclge_dev *hdev = vport->back;
5794 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5795 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5798 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5800 struct hclge_vport *vport = hclge_get_vport(handle);
5801 struct hclge_dev *hdev = vport->back;
5803 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5806 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5808 struct hclge_vport *vport = hclge_get_vport(handle);
5809 struct hclge_dev *hdev = vport->back;
5811 return hdev->rst_stats.hw_reset_done_cnt;
5814 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5816 struct hclge_vport *vport = hclge_get_vport(handle);
5817 struct hclge_dev *hdev = vport->back;
5820 hdev->fd_en = enable;
5821 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false;
5823 hclge_del_all_fd_entries(handle, clear);
5825 hclge_restore_fd_entries(handle);
5828 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5830 struct hclge_desc desc;
5831 struct hclge_config_mac_mode_cmd *req =
5832 (struct hclge_config_mac_mode_cmd *)desc.data;
5836 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5837 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5838 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5839 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5840 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5841 hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5842 hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5843 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5844 hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5845 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5846 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5847 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5848 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5849 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5850 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5851 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5853 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5855 dev_err(&hdev->pdev->dev,
5856 "mac enable fail, ret =%d.\n", ret);
5859 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5861 struct hclge_config_mac_mode_cmd *req;
5862 struct hclge_desc desc;
5866 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5867 /* 1 Read out the MAC mode config at first */
5868 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5869 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5871 dev_err(&hdev->pdev->dev,
5872 "mac loopback get fail, ret =%d.\n", ret);
5876 /* 2 Then setup the loopback flag */
5877 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5878 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5879 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5880 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5882 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5884 /* 3 Config mac work mode with loopback flag
5885 * and its original configure parameters
5887 hclge_cmd_reuse_desc(&desc, false);
5888 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5890 dev_err(&hdev->pdev->dev,
5891 "mac loopback set fail, ret =%d.\n", ret);
5895 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5896 enum hnae3_loop loop_mode)
5898 #define HCLGE_SERDES_RETRY_MS 10
5899 #define HCLGE_SERDES_RETRY_NUM 100
5901 #define HCLGE_MAC_LINK_STATUS_MS 10
5902 #define HCLGE_MAC_LINK_STATUS_NUM 100
5903 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5904 #define HCLGE_MAC_LINK_STATUS_UP 1
5906 struct hclge_serdes_lb_cmd *req;
5907 struct hclge_desc desc;
5908 int mac_link_ret = 0;
5912 req = (struct hclge_serdes_lb_cmd *)desc.data;
5913 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5915 switch (loop_mode) {
5916 case HNAE3_LOOP_SERIAL_SERDES:
5917 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5919 case HNAE3_LOOP_PARALLEL_SERDES:
5920 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5923 dev_err(&hdev->pdev->dev,
5924 "unsupported serdes loopback mode %d\n", loop_mode);
5929 req->enable = loop_mode_b;
5930 req->mask = loop_mode_b;
5931 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5933 req->mask = loop_mode_b;
5934 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5937 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5939 dev_err(&hdev->pdev->dev,
5940 "serdes loopback set fail, ret = %d\n", ret);
5945 msleep(HCLGE_SERDES_RETRY_MS);
5946 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5948 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5950 dev_err(&hdev->pdev->dev,
5951 "serdes loopback get, ret = %d\n", ret);
5954 } while (++i < HCLGE_SERDES_RETRY_NUM &&
5955 !(req->result & HCLGE_CMD_SERDES_DONE_B));
5957 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5958 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5960 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5961 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5965 hclge_cfg_mac_mode(hdev, en);
5969 /* serdes Internal loopback, independent of the network cable.*/
5970 msleep(HCLGE_MAC_LINK_STATUS_MS);
5971 ret = hclge_get_mac_link_status(hdev);
5972 if (ret == mac_link_ret)
5974 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
5976 dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
5981 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
5982 int stream_id, bool enable)
5984 struct hclge_desc desc;
5985 struct hclge_cfg_com_tqp_queue_cmd *req =
5986 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
5989 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
5990 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
5991 req->stream_id = cpu_to_le16(stream_id);
5992 req->enable |= enable << HCLGE_TQP_ENABLE_B;
5994 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5996 dev_err(&hdev->pdev->dev,
5997 "Tqp enable fail, status =%d.\n", ret);
6001 static int hclge_set_loopback(struct hnae3_handle *handle,
6002 enum hnae3_loop loop_mode, bool en)
6004 struct hclge_vport *vport = hclge_get_vport(handle);
6005 struct hnae3_knic_private_info *kinfo;
6006 struct hclge_dev *hdev = vport->back;
6009 switch (loop_mode) {
6010 case HNAE3_LOOP_APP:
6011 ret = hclge_set_app_loopback(hdev, en);
6013 case HNAE3_LOOP_SERIAL_SERDES:
6014 case HNAE3_LOOP_PARALLEL_SERDES:
6015 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6019 dev_err(&hdev->pdev->dev,
6020 "loop_mode %d is not supported\n", loop_mode);
6027 kinfo = &vport->nic.kinfo;
6028 for (i = 0; i < kinfo->num_tqps; i++) {
6029 ret = hclge_tqp_enable(hdev, i, 0, en);
6037 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6039 struct hclge_vport *vport = hclge_get_vport(handle);
6040 struct hnae3_knic_private_info *kinfo;
6041 struct hnae3_queue *queue;
6042 struct hclge_tqp *tqp;
6045 kinfo = &vport->nic.kinfo;
6046 for (i = 0; i < kinfo->num_tqps; i++) {
6047 queue = handle->kinfo.tqp[i];
6048 tqp = container_of(queue, struct hclge_tqp, q);
6049 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6053 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6055 struct hclge_vport *vport = hclge_get_vport(handle);
6056 struct hclge_dev *hdev = vport->back;
6059 mod_timer(&hdev->service_timer, jiffies + HZ);
6061 del_timer_sync(&hdev->service_timer);
6062 cancel_work_sync(&hdev->service_task);
6063 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6067 static int hclge_ae_start(struct hnae3_handle *handle)
6069 struct hclge_vport *vport = hclge_get_vport(handle);
6070 struct hclge_dev *hdev = vport->back;
6073 hclge_cfg_mac_mode(hdev, true);
6074 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6075 hdev->hw.mac.link = 0;
6077 /* reset tqp stats */
6078 hclge_reset_tqp_stats(handle);
6080 hclge_mac_start_phy(hdev);
6085 static void hclge_ae_stop(struct hnae3_handle *handle)
6087 struct hclge_vport *vport = hclge_get_vport(handle);
6088 struct hclge_dev *hdev = vport->back;
6091 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6093 hclge_clear_arfs_rules(handle);
6095 /* If it is not PF reset, the firmware will disable the MAC,
6096 * so it only need to stop phy here.
6098 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6099 hdev->reset_type != HNAE3_FUNC_RESET) {
6100 hclge_mac_stop_phy(hdev);
6104 for (i = 0; i < handle->kinfo.num_tqps; i++)
6105 hclge_reset_tqp(handle, i);
6108 hclge_cfg_mac_mode(hdev, false);
6110 hclge_mac_stop_phy(hdev);
6112 /* reset tqp stats */
6113 hclge_reset_tqp_stats(handle);
6114 hclge_update_link_status(hdev);
6117 int hclge_vport_start(struct hclge_vport *vport)
6119 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6120 vport->last_active_jiffies = jiffies;
6124 void hclge_vport_stop(struct hclge_vport *vport)
6126 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6129 static int hclge_client_start(struct hnae3_handle *handle)
6131 struct hclge_vport *vport = hclge_get_vport(handle);
6133 return hclge_vport_start(vport);
6136 static void hclge_client_stop(struct hnae3_handle *handle)
6138 struct hclge_vport *vport = hclge_get_vport(handle);
6140 hclge_vport_stop(vport);
6143 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6144 u16 cmdq_resp, u8 resp_code,
6145 enum hclge_mac_vlan_tbl_opcode op)
6147 struct hclge_dev *hdev = vport->back;
6148 int return_status = -EIO;
6151 dev_err(&hdev->pdev->dev,
6152 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
6157 if (op == HCLGE_MAC_VLAN_ADD) {
6158 if ((!resp_code) || (resp_code == 1)) {
6160 } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6161 return_status = -ENOSPC;
6162 dev_err(&hdev->pdev->dev,
6163 "add mac addr failed for uc_overflow.\n");
6164 } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6165 return_status = -ENOSPC;
6166 dev_err(&hdev->pdev->dev,
6167 "add mac addr failed for mc_overflow.\n");
6169 dev_err(&hdev->pdev->dev,
6170 "add mac addr failed for undefined, code=%d.\n",
6173 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6176 } else if (resp_code == 1) {
6177 return_status = -ENOENT;
6178 dev_dbg(&hdev->pdev->dev,
6179 "remove mac addr failed for miss.\n");
6181 dev_err(&hdev->pdev->dev,
6182 "remove mac addr failed for undefined, code=%d.\n",
6185 } else if (op == HCLGE_MAC_VLAN_LKUP) {
6188 } else if (resp_code == 1) {
6189 return_status = -ENOENT;
6190 dev_dbg(&hdev->pdev->dev,
6191 "lookup mac addr failed for miss.\n");
6193 dev_err(&hdev->pdev->dev,
6194 "lookup mac addr failed for undefined, code=%d.\n",
6198 return_status = -EINVAL;
6199 dev_err(&hdev->pdev->dev,
6200 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
6204 return return_status;
6207 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6209 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6214 if (vfid > 255 || vfid < 0)
6217 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6218 word_num = vfid / 32;
6219 bit_num = vfid % 32;
6221 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6223 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6225 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6226 bit_num = vfid % 32;
6228 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6230 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6236 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6238 #define HCLGE_DESC_NUMBER 3
6239 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6242 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6243 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6244 if (desc[i].data[j])
6250 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6251 const u8 *addr, bool is_mc)
6253 const unsigned char *mac_addr = addr;
6254 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6255 (mac_addr[0]) | (mac_addr[1] << 8);
6256 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
6258 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6260 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6261 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6264 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6265 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6268 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6269 struct hclge_mac_vlan_tbl_entry_cmd *req)
6271 struct hclge_dev *hdev = vport->back;
6272 struct hclge_desc desc;
6277 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6279 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6281 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6283 dev_err(&hdev->pdev->dev,
6284 "del mac addr failed for cmd_send, ret =%d.\n",
6288 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6289 retval = le16_to_cpu(desc.retval);
6291 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6292 HCLGE_MAC_VLAN_REMOVE);
6295 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6296 struct hclge_mac_vlan_tbl_entry_cmd *req,
6297 struct hclge_desc *desc,
6300 struct hclge_dev *hdev = vport->back;
6305 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6307 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6308 memcpy(desc[0].data,
6310 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6311 hclge_cmd_setup_basic_desc(&desc[1],
6312 HCLGE_OPC_MAC_VLAN_ADD,
6314 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6315 hclge_cmd_setup_basic_desc(&desc[2],
6316 HCLGE_OPC_MAC_VLAN_ADD,
6318 ret = hclge_cmd_send(&hdev->hw, desc, 3);
6320 memcpy(desc[0].data,
6322 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6323 ret = hclge_cmd_send(&hdev->hw, desc, 1);
6326 dev_err(&hdev->pdev->dev,
6327 "lookup mac addr failed for cmd_send, ret =%d.\n",
6331 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6332 retval = le16_to_cpu(desc[0].retval);
6334 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6335 HCLGE_MAC_VLAN_LKUP);
6338 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6339 struct hclge_mac_vlan_tbl_entry_cmd *req,
6340 struct hclge_desc *mc_desc)
6342 struct hclge_dev *hdev = vport->back;
6349 struct hclge_desc desc;
6351 hclge_cmd_setup_basic_desc(&desc,
6352 HCLGE_OPC_MAC_VLAN_ADD,
6354 memcpy(desc.data, req,
6355 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6356 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6357 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6358 retval = le16_to_cpu(desc.retval);
6360 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6362 HCLGE_MAC_VLAN_ADD);
6364 hclge_cmd_reuse_desc(&mc_desc[0], false);
6365 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6366 hclge_cmd_reuse_desc(&mc_desc[1], false);
6367 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6368 hclge_cmd_reuse_desc(&mc_desc[2], false);
6369 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6370 memcpy(mc_desc[0].data, req,
6371 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6372 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6373 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6374 retval = le16_to_cpu(mc_desc[0].retval);
6376 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6378 HCLGE_MAC_VLAN_ADD);
6382 dev_err(&hdev->pdev->dev,
6383 "add mac addr failed for cmd_send, ret =%d.\n",
6391 static int hclge_init_umv_space(struct hclge_dev *hdev)
6393 u16 allocated_size = 0;
6396 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6401 if (allocated_size < hdev->wanted_umv_size)
6402 dev_warn(&hdev->pdev->dev,
6403 "Alloc umv space failed, want %d, get %d\n",
6404 hdev->wanted_umv_size, allocated_size);
6406 mutex_init(&hdev->umv_mutex);
6407 hdev->max_umv_size = allocated_size;
6408 /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
6409 * preserve some unicast mac vlan table entries shared by pf
6412 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6413 hdev->share_umv_size = hdev->priv_umv_size +
6414 hdev->max_umv_size % (hdev->num_req_vfs + 2);
6419 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6423 if (hdev->max_umv_size > 0) {
6424 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
6428 hdev->max_umv_size = 0;
6430 mutex_destroy(&hdev->umv_mutex);
6435 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6436 u16 *allocated_size, bool is_alloc)
6438 struct hclge_umv_spc_alc_cmd *req;
6439 struct hclge_desc desc;
6442 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6443 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6445 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
6447 req->space_size = cpu_to_le32(space_size);
6449 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6451 dev_err(&hdev->pdev->dev,
6452 "%s umv space failed for cmd_send, ret =%d\n",
6453 is_alloc ? "allocate" : "free", ret);
6457 if (is_alloc && allocated_size)
6458 *allocated_size = le32_to_cpu(desc.data[1]);
6463 static void hclge_reset_umv_space(struct hclge_dev *hdev)
6465 struct hclge_vport *vport;
6468 for (i = 0; i < hdev->num_alloc_vport; i++) {
6469 vport = &hdev->vport[i];
6470 vport->used_umv_num = 0;
6473 mutex_lock(&hdev->umv_mutex);
6474 hdev->share_umv_size = hdev->priv_umv_size +
6475 hdev->max_umv_size % (hdev->num_req_vfs + 2);
6476 mutex_unlock(&hdev->umv_mutex);
6479 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
6481 struct hclge_dev *hdev = vport->back;
6484 mutex_lock(&hdev->umv_mutex);
6485 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
6486 hdev->share_umv_size == 0);
6487 mutex_unlock(&hdev->umv_mutex);
6492 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
6494 struct hclge_dev *hdev = vport->back;
6496 mutex_lock(&hdev->umv_mutex);
6498 if (vport->used_umv_num > hdev->priv_umv_size)
6499 hdev->share_umv_size++;
6501 if (vport->used_umv_num > 0)
6502 vport->used_umv_num--;
6504 if (vport->used_umv_num >= hdev->priv_umv_size &&
6505 hdev->share_umv_size > 0)
6506 hdev->share_umv_size--;
6507 vport->used_umv_num++;
6509 mutex_unlock(&hdev->umv_mutex);
6512 static int hclge_add_uc_addr(struct hnae3_handle *handle,
6513 const unsigned char *addr)
6515 struct hclge_vport *vport = hclge_get_vport(handle);
6517 return hclge_add_uc_addr_common(vport, addr);
6520 int hclge_add_uc_addr_common(struct hclge_vport *vport,
6521 const unsigned char *addr)
6523 struct hclge_dev *hdev = vport->back;
6524 struct hclge_mac_vlan_tbl_entry_cmd req;
6525 struct hclge_desc desc;
6526 u16 egress_port = 0;
6529 /* mac addr check */
6530 if (is_zero_ether_addr(addr) ||
6531 is_broadcast_ether_addr(addr) ||
6532 is_multicast_ether_addr(addr)) {
6533 dev_err(&hdev->pdev->dev,
6534 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
6535 addr, is_zero_ether_addr(addr),
6536 is_broadcast_ether_addr(addr),
6537 is_multicast_ether_addr(addr));
6541 memset(&req, 0, sizeof(req));
6543 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
6544 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
6546 req.egress_port = cpu_to_le16(egress_port);
6548 hclge_prepare_mac_addr(&req, addr, false);
6550 /* Lookup the mac address in the mac_vlan table, and add
6551 * it if the entry is inexistent. Repeated unicast entry
6552 * is not allowed in the mac vlan table.
6554 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
6555 if (ret == -ENOENT) {
6556 if (!hclge_is_umv_space_full(vport)) {
6557 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
6559 hclge_update_umv_space(vport, false);
6563 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
6564 hdev->priv_umv_size);
6569 /* check if we just hit the duplicate */
6571 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6572 vport->vport_id, addr);
6576 dev_err(&hdev->pdev->dev,
6577 "PF failed to add unicast entry(%pM) in the MAC table\n",
6583 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6584 const unsigned char *addr)
6586 struct hclge_vport *vport = hclge_get_vport(handle);
6588 return hclge_rm_uc_addr_common(vport, addr);
6591 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6592 const unsigned char *addr)
6594 struct hclge_dev *hdev = vport->back;
6595 struct hclge_mac_vlan_tbl_entry_cmd req;
6598 /* mac addr check */
6599 if (is_zero_ether_addr(addr) ||
6600 is_broadcast_ether_addr(addr) ||
6601 is_multicast_ether_addr(addr)) {
6602 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
6607 memset(&req, 0, sizeof(req));
6608 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6609 hclge_prepare_mac_addr(&req, addr, false);
6610 ret = hclge_remove_mac_vlan_tbl(vport, &req);
6612 hclge_update_umv_space(vport, true);
6617 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6618 const unsigned char *addr)
6620 struct hclge_vport *vport = hclge_get_vport(handle);
6622 return hclge_add_mc_addr_common(vport, addr);
6625 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6626 const unsigned char *addr)
6628 struct hclge_dev *hdev = vport->back;
6629 struct hclge_mac_vlan_tbl_entry_cmd req;
6630 struct hclge_desc desc[3];
6633 /* mac addr check */
6634 if (!is_multicast_ether_addr(addr)) {
6635 dev_err(&hdev->pdev->dev,
6636 "Add mc mac err! invalid mac:%pM.\n",
6640 memset(&req, 0, sizeof(req));
6641 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6642 hclge_prepare_mac_addr(&req, addr, true);
6643 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6645 /* This mac addr do not exist, add new entry for it */
6646 memset(desc[0].data, 0, sizeof(desc[0].data));
6647 memset(desc[1].data, 0, sizeof(desc[0].data));
6648 memset(desc[2].data, 0, sizeof(desc[0].data));
6650 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
6653 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6655 if (status == -ENOSPC)
6656 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6661 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6662 const unsigned char *addr)
6664 struct hclge_vport *vport = hclge_get_vport(handle);
6666 return hclge_rm_mc_addr_common(vport, addr);
6669 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6670 const unsigned char *addr)
6672 struct hclge_dev *hdev = vport->back;
6673 struct hclge_mac_vlan_tbl_entry_cmd req;
6674 enum hclge_cmd_status status;
6675 struct hclge_desc desc[3];
6677 /* mac addr check */
6678 if (!is_multicast_ether_addr(addr)) {
6679 dev_dbg(&hdev->pdev->dev,
6680 "Remove mc mac err! invalid mac:%pM.\n",
6685 memset(&req, 0, sizeof(req));
6686 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6687 hclge_prepare_mac_addr(&req, addr, true);
6688 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6690 /* This mac addr exist, remove this handle's VFID for it */
6691 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
6695 if (hclge_is_all_function_id_zero(desc))
6696 /* All the vfid is zero, so need to delete this entry */
6697 status = hclge_remove_mac_vlan_tbl(vport, &req);
6699 /* Not all the vfid is zero, update the vfid */
6700 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6703 /* Maybe this mac address is in mta table, but it cannot be
6704 * deleted here because an entry of mta represents an address
6705 * range rather than a specific address. the delete action to
6706 * all entries will take effect in update_mta_status called by
6707 * hns3_nic_set_rx_mode.
6715 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6716 enum HCLGE_MAC_ADDR_TYPE mac_type)
6718 struct hclge_vport_mac_addr_cfg *mac_cfg;
6719 struct list_head *list;
6721 if (!vport->vport_id)
6724 mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6728 mac_cfg->hd_tbl_status = true;
6729 memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6731 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6732 &vport->uc_mac_list : &vport->mc_mac_list;
6734 list_add_tail(&mac_cfg->node, list);
6737 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6739 enum HCLGE_MAC_ADDR_TYPE mac_type)
6741 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6742 struct list_head *list;
6743 bool uc_flag, mc_flag;
6745 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6746 &vport->uc_mac_list : &vport->mc_mac_list;
6748 uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6749 mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6751 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6752 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6753 if (uc_flag && mac_cfg->hd_tbl_status)
6754 hclge_rm_uc_addr_common(vport, mac_addr);
6756 if (mc_flag && mac_cfg->hd_tbl_status)
6757 hclge_rm_mc_addr_common(vport, mac_addr);
6759 list_del(&mac_cfg->node);
6766 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6767 enum HCLGE_MAC_ADDR_TYPE mac_type)
6769 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6770 struct list_head *list;
6772 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6773 &vport->uc_mac_list : &vport->mc_mac_list;
6775 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6776 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6777 hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6779 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6780 hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6782 mac_cfg->hd_tbl_status = false;
6784 list_del(&mac_cfg->node);
6790 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6792 struct hclge_vport_mac_addr_cfg *mac, *tmp;
6793 struct hclge_vport *vport;
6796 mutex_lock(&hdev->vport_cfg_mutex);
6797 for (i = 0; i < hdev->num_alloc_vport; i++) {
6798 vport = &hdev->vport[i];
6799 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6800 list_del(&mac->node);
6804 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6805 list_del(&mac->node);
6809 mutex_unlock(&hdev->vport_cfg_mutex);
6812 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6813 u16 cmdq_resp, u8 resp_code)
6815 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
6816 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
6817 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
6818 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
6823 dev_err(&hdev->pdev->dev,
6824 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6829 switch (resp_code) {
6830 case HCLGE_ETHERTYPE_SUCCESS_ADD:
6831 case HCLGE_ETHERTYPE_ALREADY_ADD:
6834 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6835 dev_err(&hdev->pdev->dev,
6836 "add mac ethertype failed for manager table overflow.\n");
6837 return_status = -EIO;
6839 case HCLGE_ETHERTYPE_KEY_CONFLICT:
6840 dev_err(&hdev->pdev->dev,
6841 "add mac ethertype failed for key conflict.\n");
6842 return_status = -EIO;
6845 dev_err(&hdev->pdev->dev,
6846 "add mac ethertype failed for undefined, code=%d.\n",
6848 return_status = -EIO;
6851 return return_status;
6854 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6855 const struct hclge_mac_mgr_tbl_entry_cmd *req)
6857 struct hclge_desc desc;
6862 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6863 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6865 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6867 dev_err(&hdev->pdev->dev,
6868 "add mac ethertype failed for cmd_send, ret =%d.\n",
6873 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6874 retval = le16_to_cpu(desc.retval);
6876 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6879 static int init_mgr_tbl(struct hclge_dev *hdev)
6884 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6885 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6887 dev_err(&hdev->pdev->dev,
6888 "add mac ethertype failed, ret =%d.\n",
6897 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6899 struct hclge_vport *vport = hclge_get_vport(handle);
6900 struct hclge_dev *hdev = vport->back;
6902 ether_addr_copy(p, hdev->hw.mac.mac_addr);
6905 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6908 const unsigned char *new_addr = (const unsigned char *)p;
6909 struct hclge_vport *vport = hclge_get_vport(handle);
6910 struct hclge_dev *hdev = vport->back;
6913 /* mac addr check */
6914 if (is_zero_ether_addr(new_addr) ||
6915 is_broadcast_ether_addr(new_addr) ||
6916 is_multicast_ether_addr(new_addr)) {
6917 dev_err(&hdev->pdev->dev,
6918 "Change uc mac err! invalid mac:%p.\n",
6923 if ((!is_first || is_kdump_kernel()) &&
6924 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6925 dev_warn(&hdev->pdev->dev,
6926 "remove old uc mac address fail.\n");
6928 ret = hclge_add_uc_addr(handle, new_addr);
6930 dev_err(&hdev->pdev->dev,
6931 "add uc mac address fail, ret =%d.\n",
6935 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6936 dev_err(&hdev->pdev->dev,
6937 "restore uc mac address fail.\n");
6942 ret = hclge_pause_addr_cfg(hdev, new_addr);
6944 dev_err(&hdev->pdev->dev,
6945 "configure mac pause address fail, ret =%d.\n",
6950 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6955 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6958 struct hclge_vport *vport = hclge_get_vport(handle);
6959 struct hclge_dev *hdev = vport->back;
6961 if (!hdev->hw.mac.phydev)
6964 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6967 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6968 u8 fe_type, bool filter_en, u8 vf_id)
6970 struct hclge_vlan_filter_ctrl_cmd *req;
6971 struct hclge_desc desc;
6974 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6976 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6977 req->vlan_type = vlan_type;
6978 req->vlan_fe = filter_en ? fe_type : 0;
6981 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6983 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
6989 #define HCLGE_FILTER_TYPE_VF 0
6990 #define HCLGE_FILTER_TYPE_PORT 1
6991 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
6992 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
6993 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
6994 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
6995 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
6996 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
6997 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
6998 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
6999 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7001 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7003 struct hclge_vport *vport = hclge_get_vport(handle);
7004 struct hclge_dev *hdev = vport->back;
7006 if (hdev->pdev->revision >= 0x21) {
7007 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7008 HCLGE_FILTER_FE_EGRESS, enable, 0);
7009 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7010 HCLGE_FILTER_FE_INGRESS, enable, 0);
7012 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7013 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7017 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7019 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7022 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
7023 bool is_kill, u16 vlan, u8 qos,
7026 #define HCLGE_MAX_VF_BYTES 16
7027 struct hclge_vlan_filter_vf_cfg_cmd *req0;
7028 struct hclge_vlan_filter_vf_cfg_cmd *req1;
7029 struct hclge_desc desc[2];
7034 /* if vf vlan table is full, firmware will close vf vlan filter, it
7035 * is unable and unnecessary to add new vlan id to vf vlan filter
7037 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill)
7040 hclge_cmd_setup_basic_desc(&desc[0],
7041 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7042 hclge_cmd_setup_basic_desc(&desc[1],
7043 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7045 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7047 vf_byte_off = vfid / 8;
7048 vf_byte_val = 1 << (vfid % 8);
7050 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7051 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7053 req0->vlan_id = cpu_to_le16(vlan);
7054 req0->vlan_cfg = is_kill;
7056 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7057 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7059 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7061 ret = hclge_cmd_send(&hdev->hw, desc, 2);
7063 dev_err(&hdev->pdev->dev,
7064 "Send vf vlan command fail, ret =%d.\n",
7070 #define HCLGE_VF_VLAN_NO_ENTRY 2
7071 if (!req0->resp_code || req0->resp_code == 1)
7074 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7075 set_bit(vfid, hdev->vf_vlan_full);
7076 dev_warn(&hdev->pdev->dev,
7077 "vf vlan table is full, vf vlan filter is disabled\n");
7081 dev_err(&hdev->pdev->dev,
7082 "Add vf vlan filter fail, ret =%d.\n",
7085 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
7086 if (!req0->resp_code)
7089 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
7090 dev_warn(&hdev->pdev->dev,
7091 "vlan %d filter is not in vf vlan table\n",
7096 dev_err(&hdev->pdev->dev,
7097 "Kill vf vlan filter fail, ret =%d.\n",
7104 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7105 u16 vlan_id, bool is_kill)
7107 struct hclge_vlan_filter_pf_cfg_cmd *req;
7108 struct hclge_desc desc;
7109 u8 vlan_offset_byte_val;
7110 u8 vlan_offset_byte;
7114 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7116 vlan_offset_160 = vlan_id / 160;
7117 vlan_offset_byte = (vlan_id % 160) / 8;
7118 vlan_offset_byte_val = 1 << (vlan_id % 8);
7120 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7121 req->vlan_offset = vlan_offset_160;
7122 req->vlan_cfg = is_kill;
7123 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7125 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7127 dev_err(&hdev->pdev->dev,
7128 "port vlan command, send fail, ret =%d.\n", ret);
7132 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7133 u16 vport_id, u16 vlan_id, u8 qos,
7136 u16 vport_idx, vport_num = 0;
7139 if (is_kill && !vlan_id)
7142 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7145 dev_err(&hdev->pdev->dev,
7146 "Set %d vport vlan filter config fail, ret =%d.\n",
7151 /* vlan 0 may be added twice when 8021q module is enabled */
7152 if (!is_kill && !vlan_id &&
7153 test_bit(vport_id, hdev->vlan_table[vlan_id]))
7156 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7157 dev_err(&hdev->pdev->dev,
7158 "Add port vlan failed, vport %d is already in vlan %d\n",
7164 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7165 dev_err(&hdev->pdev->dev,
7166 "Delete port vlan failed, vport %d is not in vlan %d\n",
7171 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7174 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7175 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7181 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7183 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7184 struct hclge_vport_vtag_tx_cfg_cmd *req;
7185 struct hclge_dev *hdev = vport->back;
7186 struct hclge_desc desc;
7189 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7191 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7192 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7193 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7194 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7195 vcfg->accept_tag1 ? 1 : 0);
7196 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7197 vcfg->accept_untag1 ? 1 : 0);
7198 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7199 vcfg->accept_tag2 ? 1 : 0);
7200 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7201 vcfg->accept_untag2 ? 1 : 0);
7202 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7203 vcfg->insert_tag1_en ? 1 : 0);
7204 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7205 vcfg->insert_tag2_en ? 1 : 0);
7206 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7208 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7209 req->vf_bitmap[req->vf_offset] =
7210 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7212 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7214 dev_err(&hdev->pdev->dev,
7215 "Send port txvlan cfg command fail, ret =%d\n",
7221 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7223 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7224 struct hclge_vport_vtag_rx_cfg_cmd *req;
7225 struct hclge_dev *hdev = vport->back;
7226 struct hclge_desc desc;
7229 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7231 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7232 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7233 vcfg->strip_tag1_en ? 1 : 0);
7234 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7235 vcfg->strip_tag2_en ? 1 : 0);
7236 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7237 vcfg->vlan1_vlan_prionly ? 1 : 0);
7238 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7239 vcfg->vlan2_vlan_prionly ? 1 : 0);
7241 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7242 req->vf_bitmap[req->vf_offset] =
7243 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7245 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7247 dev_err(&hdev->pdev->dev,
7248 "Send port rxvlan cfg command fail, ret =%d\n",
7254 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7255 u16 port_base_vlan_state,
7260 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7261 vport->txvlan_cfg.accept_tag1 = true;
7262 vport->txvlan_cfg.insert_tag1_en = false;
7263 vport->txvlan_cfg.default_tag1 = 0;
7265 vport->txvlan_cfg.accept_tag1 = false;
7266 vport->txvlan_cfg.insert_tag1_en = true;
7267 vport->txvlan_cfg.default_tag1 = vlan_tag;
7270 vport->txvlan_cfg.accept_untag1 = true;
7272 /* accept_tag2 and accept_untag2 are not supported on
7273 * pdev revision(0x20), new revision support them,
7274 * this two fields can not be configured by user.
7276 vport->txvlan_cfg.accept_tag2 = true;
7277 vport->txvlan_cfg.accept_untag2 = true;
7278 vport->txvlan_cfg.insert_tag2_en = false;
7279 vport->txvlan_cfg.default_tag2 = 0;
7281 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7282 vport->rxvlan_cfg.strip_tag1_en = false;
7283 vport->rxvlan_cfg.strip_tag2_en =
7284 vport->rxvlan_cfg.rx_vlan_offload_en;
7286 vport->rxvlan_cfg.strip_tag1_en =
7287 vport->rxvlan_cfg.rx_vlan_offload_en;
7288 vport->rxvlan_cfg.strip_tag2_en = true;
7290 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7291 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7293 ret = hclge_set_vlan_tx_offload_cfg(vport);
7297 return hclge_set_vlan_rx_offload_cfg(vport);
7300 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7302 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7303 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7304 struct hclge_desc desc;
7307 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7308 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7309 rx_req->ot_fst_vlan_type =
7310 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7311 rx_req->ot_sec_vlan_type =
7312 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7313 rx_req->in_fst_vlan_type =
7314 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7315 rx_req->in_sec_vlan_type =
7316 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7318 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7320 dev_err(&hdev->pdev->dev,
7321 "Send rxvlan protocol type command fail, ret =%d\n",
7326 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7328 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7329 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7330 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7332 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7334 dev_err(&hdev->pdev->dev,
7335 "Send txvlan protocol type command fail, ret =%d\n",
7341 static int hclge_init_vlan_config(struct hclge_dev *hdev)
7343 #define HCLGE_DEF_VLAN_TYPE 0x8100
7345 struct hnae3_handle *handle = &hdev->vport[0].nic;
7346 struct hclge_vport *vport;
7350 if (hdev->pdev->revision >= 0x21) {
7351 /* for revision 0x21, vf vlan filter is per function */
7352 for (i = 0; i < hdev->num_alloc_vport; i++) {
7353 vport = &hdev->vport[i];
7354 ret = hclge_set_vlan_filter_ctrl(hdev,
7355 HCLGE_FILTER_TYPE_VF,
7356 HCLGE_FILTER_FE_EGRESS,
7363 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7364 HCLGE_FILTER_FE_INGRESS, true,
7369 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7370 HCLGE_FILTER_FE_EGRESS_V1_B,
7376 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7378 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7379 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7380 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7381 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7382 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7383 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7385 ret = hclge_set_vlan_protocol_type(hdev);
7389 for (i = 0; i < hdev->num_alloc_vport; i++) {
7392 vport = &hdev->vport[i];
7393 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7395 ret = hclge_vlan_offload_cfg(vport,
7396 vport->port_base_vlan_cfg.state,
7402 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7405 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7408 struct hclge_vport_vlan_cfg *vlan;
7410 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7414 vlan->hd_tbl_status = writen_to_tbl;
7415 vlan->vlan_id = vlan_id;
7417 list_add_tail(&vlan->node, &vport->vlan_list);
7420 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
7422 struct hclge_vport_vlan_cfg *vlan, *tmp;
7423 struct hclge_dev *hdev = vport->back;
7426 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7427 if (!vlan->hd_tbl_status) {
7428 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7430 vlan->vlan_id, 0, false);
7432 dev_err(&hdev->pdev->dev,
7433 "restore vport vlan list failed, ret=%d\n",
7438 vlan->hd_tbl_status = true;
7444 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7447 struct hclge_vport_vlan_cfg *vlan, *tmp;
7448 struct hclge_dev *hdev = vport->back;
7450 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7451 if (vlan->vlan_id == vlan_id) {
7452 if (is_write_tbl && vlan->hd_tbl_status)
7453 hclge_set_vlan_filter_hw(hdev,
7459 list_del(&vlan->node);
7466 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
7468 struct hclge_vport_vlan_cfg *vlan, *tmp;
7469 struct hclge_dev *hdev = vport->back;
7471 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7472 if (vlan->hd_tbl_status)
7473 hclge_set_vlan_filter_hw(hdev,
7479 vlan->hd_tbl_status = false;
7481 list_del(&vlan->node);
7487 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
7489 struct hclge_vport_vlan_cfg *vlan, *tmp;
7490 struct hclge_vport *vport;
7493 mutex_lock(&hdev->vport_cfg_mutex);
7494 for (i = 0; i < hdev->num_alloc_vport; i++) {
7495 vport = &hdev->vport[i];
7496 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7497 list_del(&vlan->node);
7501 mutex_unlock(&hdev->vport_cfg_mutex);
7504 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
7506 struct hclge_vport *vport = hclge_get_vport(handle);
7507 struct hclge_vport_vlan_cfg *vlan, *tmp;
7508 struct hclge_dev *hdev = vport->back;
7509 u16 vlan_proto, qos;
7513 mutex_lock(&hdev->vport_cfg_mutex);
7514 for (i = 0; i < hdev->num_alloc_vport; i++) {
7515 vport = &hdev->vport[i];
7516 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
7517 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7518 qos = vport->port_base_vlan_cfg.vlan_info.qos;
7519 state = vport->port_base_vlan_cfg.state;
7521 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
7522 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
7523 vport->vport_id, vlan_id, qos,
7528 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7529 if (vlan->hd_tbl_status)
7530 hclge_set_vlan_filter_hw(hdev,
7538 mutex_unlock(&hdev->vport_cfg_mutex);
7541 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
7543 struct hclge_vport *vport = hclge_get_vport(handle);
7545 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7546 vport->rxvlan_cfg.strip_tag1_en = false;
7547 vport->rxvlan_cfg.strip_tag2_en = enable;
7549 vport->rxvlan_cfg.strip_tag1_en = enable;
7550 vport->rxvlan_cfg.strip_tag2_en = true;
7552 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7553 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7554 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
7556 return hclge_set_vlan_rx_offload_cfg(vport);
7559 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
7560 u16 port_base_vlan_state,
7561 struct hclge_vlan_info *new_info,
7562 struct hclge_vlan_info *old_info)
7564 struct hclge_dev *hdev = vport->back;
7567 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
7568 hclge_rm_vport_all_vlan_table(vport, false);
7569 return hclge_set_vlan_filter_hw(hdev,
7570 htons(new_info->vlan_proto),
7573 new_info->qos, false);
7576 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
7577 vport->vport_id, old_info->vlan_tag,
7578 old_info->qos, true);
7582 return hclge_add_vport_all_vlan_table(vport);
7585 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
7586 struct hclge_vlan_info *vlan_info)
7588 struct hnae3_handle *nic = &vport->nic;
7589 struct hclge_vlan_info *old_vlan_info;
7590 struct hclge_dev *hdev = vport->back;
7593 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
7595 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
7599 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
7600 /* add new VLAN tag */
7601 ret = hclge_set_vlan_filter_hw(hdev,
7602 htons(vlan_info->vlan_proto),
7604 vlan_info->vlan_tag,
7605 vlan_info->qos, false);
7609 /* remove old VLAN tag */
7610 ret = hclge_set_vlan_filter_hw(hdev,
7611 htons(old_vlan_info->vlan_proto),
7613 old_vlan_info->vlan_tag,
7614 old_vlan_info->qos, true);
7621 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7626 /* update state only when disable/enable port based VLAN */
7627 vport->port_base_vlan_cfg.state = state;
7628 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7629 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7631 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7634 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7635 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7636 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7641 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7642 enum hnae3_port_base_vlan_state state,
7645 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7647 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7649 return HNAE3_PORT_BASE_VLAN_ENABLE;
7652 return HNAE3_PORT_BASE_VLAN_DISABLE;
7653 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7654 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7656 return HNAE3_PORT_BASE_VLAN_MODIFY;
7660 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7661 u16 vlan, u8 qos, __be16 proto)
7663 struct hclge_vport *vport = hclge_get_vport(handle);
7664 struct hclge_dev *hdev = vport->back;
7665 struct hclge_vlan_info vlan_info;
7669 if (hdev->pdev->revision == 0x20)
7672 /* qos is a 3 bits value, so can not be bigger than 7 */
7673 if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7675 if (proto != htons(ETH_P_8021Q))
7676 return -EPROTONOSUPPORT;
7678 vport = &hdev->vport[vfid];
7679 state = hclge_get_port_base_vlan_state(vport,
7680 vport->port_base_vlan_cfg.state,
7682 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7685 vlan_info.vlan_tag = vlan;
7686 vlan_info.qos = qos;
7687 vlan_info.vlan_proto = ntohs(proto);
7689 /* update port based VLAN for PF */
7691 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7692 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7693 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7698 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7699 return hclge_update_port_base_vlan_cfg(vport, state,
7702 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7710 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7711 u16 vlan_id, bool is_kill)
7713 struct hclge_vport *vport = hclge_get_vport(handle);
7714 struct hclge_dev *hdev = vport->back;
7715 bool writen_to_tbl = false;
7718 /* when port based VLAN enabled, we use port based VLAN as the VLAN
7719 * filter entry. In this case, we don't update VLAN filter table
7720 * when user add new VLAN or remove exist VLAN, just update the vport
7721 * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
7722 * table until port based VLAN disabled
7724 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7725 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
7726 vlan_id, 0, is_kill);
7727 writen_to_tbl = true;
7734 hclge_rm_vport_vlan_table(vport, vlan_id, false);
7736 hclge_add_vport_vlan_table(vport, vlan_id,
7742 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
7744 struct hclge_config_max_frm_size_cmd *req;
7745 struct hclge_desc desc;
7747 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
7749 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
7750 req->max_frm_size = cpu_to_le16(new_mps);
7751 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
7753 return hclge_cmd_send(&hdev->hw, &desc, 1);
7756 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
7758 struct hclge_vport *vport = hclge_get_vport(handle);
7760 return hclge_set_vport_mtu(vport, new_mtu);
7763 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
7765 struct hclge_dev *hdev = vport->back;
7766 int i, max_frm_size, ret;
7768 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7769 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
7770 max_frm_size > HCLGE_MAC_MAX_FRAME)
7773 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
7774 mutex_lock(&hdev->vport_lock);
7775 /* VF's mps must fit within hdev->mps */
7776 if (vport->vport_id && max_frm_size > hdev->mps) {
7777 mutex_unlock(&hdev->vport_lock);
7779 } else if (vport->vport_id) {
7780 vport->mps = max_frm_size;
7781 mutex_unlock(&hdev->vport_lock);
7785 /* PF's mps must be greater then VF's mps */
7786 for (i = 1; i < hdev->num_alloc_vport; i++)
7787 if (max_frm_size < hdev->vport[i].mps) {
7788 mutex_unlock(&hdev->vport_lock);
7792 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7794 ret = hclge_set_mac_mtu(hdev, max_frm_size);
7796 dev_err(&hdev->pdev->dev,
7797 "Change mtu fail, ret =%d\n", ret);
7801 hdev->mps = max_frm_size;
7802 vport->mps = max_frm_size;
7804 ret = hclge_buffer_alloc(hdev);
7806 dev_err(&hdev->pdev->dev,
7807 "Allocate buffer fail, ret =%d\n", ret);
7810 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7811 mutex_unlock(&hdev->vport_lock);
7815 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7818 struct hclge_reset_tqp_queue_cmd *req;
7819 struct hclge_desc desc;
7822 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7824 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7825 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7826 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
7828 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7830 dev_err(&hdev->pdev->dev,
7831 "Send tqp reset cmd error, status =%d\n", ret);
7838 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
7840 struct hclge_reset_tqp_queue_cmd *req;
7841 struct hclge_desc desc;
7844 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
7846 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7847 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7849 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7851 dev_err(&hdev->pdev->dev,
7852 "Get reset status error, status =%d\n", ret);
7856 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
7859 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
7861 struct hnae3_queue *queue;
7862 struct hclge_tqp *tqp;
7864 queue = handle->kinfo.tqp[queue_id];
7865 tqp = container_of(queue, struct hclge_tqp, q);
7870 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
7872 struct hclge_vport *vport = hclge_get_vport(handle);
7873 struct hclge_dev *hdev = vport->back;
7874 int reset_try_times = 0;
7879 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
7881 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
7883 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
7887 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7889 dev_err(&hdev->pdev->dev,
7890 "Send reset tqp cmd fail, ret = %d\n", ret);
7894 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7895 /* Wait for tqp hw reset */
7897 reset_status = hclge_get_reset_status(hdev, queue_gid);
7902 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7903 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
7907 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7909 dev_err(&hdev->pdev->dev,
7910 "Deassert the soft reset fail, ret = %d\n", ret);
7915 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
7917 struct hclge_dev *hdev = vport->back;
7918 int reset_try_times = 0;
7923 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
7925 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7927 dev_warn(&hdev->pdev->dev,
7928 "Send reset tqp cmd fail, ret = %d\n", ret);
7932 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7933 /* Wait for tqp hw reset */
7935 reset_status = hclge_get_reset_status(hdev, queue_gid);
7940 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7941 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
7945 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7947 dev_warn(&hdev->pdev->dev,
7948 "Deassert the soft reset fail, ret = %d\n", ret);
7951 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
7953 struct hclge_vport *vport = hclge_get_vport(handle);
7954 struct hclge_dev *hdev = vport->back;
7956 return hdev->fw_version;
7959 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7961 struct phy_device *phydev = hdev->hw.mac.phydev;
7966 phy_set_asym_pause(phydev, rx_en, tx_en);
7969 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7974 hdev->fc_mode_last_time = HCLGE_FC_FULL;
7975 else if (rx_en && !tx_en)
7976 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
7977 else if (!rx_en && tx_en)
7978 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
7980 hdev->fc_mode_last_time = HCLGE_FC_NONE;
7982 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
7985 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
7987 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
7992 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
7997 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
7999 struct phy_device *phydev = hdev->hw.mac.phydev;
8000 u16 remote_advertising = 0;
8001 u16 local_advertising;
8002 u32 rx_pause, tx_pause;
8005 if (!phydev->link || !phydev->autoneg)
8008 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8011 remote_advertising = LPA_PAUSE_CAP;
8013 if (phydev->asym_pause)
8014 remote_advertising |= LPA_PAUSE_ASYM;
8016 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8017 remote_advertising);
8018 tx_pause = flowctl & FLOW_CTRL_TX;
8019 rx_pause = flowctl & FLOW_CTRL_RX;
8021 if (phydev->duplex == HCLGE_MAC_HALF) {
8026 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8029 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8030 u32 *rx_en, u32 *tx_en)
8032 struct hclge_vport *vport = hclge_get_vport(handle);
8033 struct hclge_dev *hdev = vport->back;
8035 *auto_neg = hclge_get_autoneg(handle);
8037 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8043 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8046 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8049 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8058 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8059 u32 rx_en, u32 tx_en)
8061 struct hclge_vport *vport = hclge_get_vport(handle);
8062 struct hclge_dev *hdev = vport->back;
8063 struct phy_device *phydev = hdev->hw.mac.phydev;
8066 fc_autoneg = hclge_get_autoneg(handle);
8067 if (auto_neg != fc_autoneg) {
8068 dev_info(&hdev->pdev->dev,
8069 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8073 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8074 dev_info(&hdev->pdev->dev,
8075 "Priority flow control enabled. Cannot set link flow control.\n");
8079 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8082 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8085 return phy_start_aneg(phydev);
8087 if (hdev->pdev->revision == 0x20)
8090 return hclge_restart_autoneg(handle);
8093 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8094 u8 *auto_neg, u32 *speed, u8 *duplex)
8096 struct hclge_vport *vport = hclge_get_vport(handle);
8097 struct hclge_dev *hdev = vport->back;
8100 *speed = hdev->hw.mac.speed;
8102 *duplex = hdev->hw.mac.duplex;
8104 *auto_neg = hdev->hw.mac.autoneg;
8107 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8110 struct hclge_vport *vport = hclge_get_vport(handle);
8111 struct hclge_dev *hdev = vport->back;
8114 *media_type = hdev->hw.mac.media_type;
8117 *module_type = hdev->hw.mac.module_type;
8120 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8121 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8123 struct hclge_vport *vport = hclge_get_vport(handle);
8124 struct hclge_dev *hdev = vport->back;
8125 struct phy_device *phydev = hdev->hw.mac.phydev;
8126 int mdix_ctrl, mdix, retval, is_resolved;
8129 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8130 *tp_mdix = ETH_TP_MDI_INVALID;
8134 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8136 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8137 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8138 HCLGE_PHY_MDIX_CTRL_S);
8140 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8141 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8142 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8144 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8146 switch (mdix_ctrl) {
8148 *tp_mdix_ctrl = ETH_TP_MDI;
8151 *tp_mdix_ctrl = ETH_TP_MDI_X;
8154 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8157 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8162 *tp_mdix = ETH_TP_MDI_INVALID;
8164 *tp_mdix = ETH_TP_MDI_X;
8166 *tp_mdix = ETH_TP_MDI;
8169 static void hclge_info_show(struct hclge_dev *hdev)
8171 struct device *dev = &hdev->pdev->dev;
8173 dev_info(dev, "PF info begin:\n");
8175 dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
8176 dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
8177 dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
8178 dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
8179 dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
8180 dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
8181 dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
8182 dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
8183 dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
8184 dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
8185 dev_info(dev, "This is %s PF\n",
8186 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8187 dev_info(dev, "DCB %s\n",
8188 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8189 dev_info(dev, "MQPRIO %s\n",
8190 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8192 dev_info(dev, "PF info end.\n");
8195 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
8196 struct hclge_vport *vport)
8198 struct hnae3_client *client = vport->nic.client;
8199 struct hclge_dev *hdev = ae_dev->priv;
8202 ret = client->ops->init_instance(&vport->nic);
8206 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8207 hnae3_set_client_init_flag(client, ae_dev, 1);
8209 /* Enable nic hw error interrupts */
8210 ret = hclge_config_nic_hw_error(hdev, true);
8212 dev_err(&ae_dev->pdev->dev,
8213 "fail(%d) to enable hw error interrupts\n", ret);
8215 if (netif_msg_drv(&hdev->vport->nic))
8216 hclge_info_show(hdev);
8221 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
8222 struct hclge_vport *vport)
8224 struct hnae3_client *client = vport->roce.client;
8225 struct hclge_dev *hdev = ae_dev->priv;
8228 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
8232 client = hdev->roce_client;
8233 ret = hclge_init_roce_base_info(vport);
8237 ret = client->ops->init_instance(&vport->roce);
8241 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8242 hnae3_set_client_init_flag(client, ae_dev, 1);
8247 static int hclge_init_client_instance(struct hnae3_client *client,
8248 struct hnae3_ae_dev *ae_dev)
8250 struct hclge_dev *hdev = ae_dev->priv;
8251 struct hclge_vport *vport;
8254 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8255 vport = &hdev->vport[i];
8257 switch (client->type) {
8258 case HNAE3_CLIENT_KNIC:
8260 hdev->nic_client = client;
8261 vport->nic.client = client;
8262 ret = hclge_init_nic_client_instance(ae_dev, vport);
8266 ret = hclge_init_roce_client_instance(ae_dev, vport);
8271 case HNAE3_CLIENT_ROCE:
8272 if (hnae3_dev_roce_supported(hdev)) {
8273 hdev->roce_client = client;
8274 vport->roce.client = client;
8277 ret = hclge_init_roce_client_instance(ae_dev, vport);
8287 /* Enable roce ras interrupts */
8288 ret = hclge_config_rocee_ras_interrupt(hdev, true);
8290 dev_err(&ae_dev->pdev->dev,
8291 "fail(%d) to enable roce ras interrupts\n", ret);
8296 hdev->nic_client = NULL;
8297 vport->nic.client = NULL;
8300 hdev->roce_client = NULL;
8301 vport->roce.client = NULL;
8305 static void hclge_uninit_client_instance(struct hnae3_client *client,
8306 struct hnae3_ae_dev *ae_dev)
8308 struct hclge_dev *hdev = ae_dev->priv;
8309 struct hclge_vport *vport;
8312 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8313 vport = &hdev->vport[i];
8314 if (hdev->roce_client) {
8315 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8316 hdev->roce_client->ops->uninit_instance(&vport->roce,
8318 hdev->roce_client = NULL;
8319 vport->roce.client = NULL;
8321 if (client->type == HNAE3_CLIENT_ROCE)
8323 if (hdev->nic_client && client->ops->uninit_instance) {
8324 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8325 client->ops->uninit_instance(&vport->nic, 0);
8326 hdev->nic_client = NULL;
8327 vport->nic.client = NULL;
8332 static int hclge_pci_init(struct hclge_dev *hdev)
8334 struct pci_dev *pdev = hdev->pdev;
8335 struct hclge_hw *hw;
8338 ret = pci_enable_device(pdev);
8340 dev_err(&pdev->dev, "failed to enable PCI device\n");
8344 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
8346 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8349 "can't set consistent PCI DMA");
8350 goto err_disable_device;
8352 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
8355 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
8357 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
8358 goto err_disable_device;
8361 pci_set_master(pdev);
8363 hw->io_base = pcim_iomap(pdev, 2, 0);
8365 dev_err(&pdev->dev, "Can't map configuration register space\n");
8367 goto err_clr_master;
8370 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
8374 pci_clear_master(pdev);
8375 pci_release_regions(pdev);
8377 pci_disable_device(pdev);
8382 static void hclge_pci_uninit(struct hclge_dev *hdev)
8384 struct pci_dev *pdev = hdev->pdev;
8386 pcim_iounmap(pdev, hdev->hw.io_base);
8387 pci_free_irq_vectors(pdev);
8388 pci_clear_master(pdev);
8389 pci_release_mem_regions(pdev);
8390 pci_disable_device(pdev);
8393 static void hclge_state_init(struct hclge_dev *hdev)
8395 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
8396 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8397 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
8398 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
8399 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
8400 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
8403 static void hclge_state_uninit(struct hclge_dev *hdev)
8405 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8406 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
8408 if (hdev->service_timer.function)
8409 del_timer_sync(&hdev->service_timer);
8410 if (hdev->reset_timer.function)
8411 del_timer_sync(&hdev->reset_timer);
8412 if (hdev->service_task.func)
8413 cancel_work_sync(&hdev->service_task);
8414 if (hdev->rst_service_task.func)
8415 cancel_work_sync(&hdev->rst_service_task);
8416 if (hdev->mbx_service_task.func)
8417 cancel_work_sync(&hdev->mbx_service_task);
8420 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
8422 #define HCLGE_FLR_WAIT_MS 100
8423 #define HCLGE_FLR_WAIT_CNT 50
8424 struct hclge_dev *hdev = ae_dev->priv;
8427 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
8428 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8429 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
8430 hclge_reset_event(hdev->pdev, NULL);
8432 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
8433 cnt++ < HCLGE_FLR_WAIT_CNT)
8434 msleep(HCLGE_FLR_WAIT_MS);
8436 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
8437 dev_err(&hdev->pdev->dev,
8438 "flr wait down timeout: %d\n", cnt);
8441 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
8443 struct hclge_dev *hdev = ae_dev->priv;
8445 set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8448 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
8450 struct pci_dev *pdev = ae_dev->pdev;
8451 struct hclge_dev *hdev;
8454 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
8461 hdev->ae_dev = ae_dev;
8462 hdev->reset_type = HNAE3_NONE_RESET;
8463 hdev->reset_level = HNAE3_FUNC_RESET;
8464 ae_dev->priv = hdev;
8465 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8467 mutex_init(&hdev->vport_lock);
8468 mutex_init(&hdev->vport_cfg_mutex);
8469 spin_lock_init(&hdev->fd_rule_lock);
8471 ret = hclge_pci_init(hdev);
8473 dev_err(&pdev->dev, "PCI init failed\n");
8477 /* Firmware command queue initialize */
8478 ret = hclge_cmd_queue_init(hdev);
8480 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
8481 goto err_pci_uninit;
8484 /* Firmware command initialize */
8485 ret = hclge_cmd_init(hdev);
8487 goto err_cmd_uninit;
8489 ret = hclge_get_cap(hdev);
8491 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
8493 goto err_cmd_uninit;
8496 ret = hclge_configure(hdev);
8498 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
8499 goto err_cmd_uninit;
8502 ret = hclge_init_msi(hdev);
8504 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
8505 goto err_cmd_uninit;
8508 ret = hclge_misc_irq_init(hdev);
8511 "Misc IRQ(vector0) init error, ret = %d.\n",
8513 goto err_msi_uninit;
8516 ret = hclge_alloc_tqps(hdev);
8518 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
8519 goto err_msi_irq_uninit;
8522 ret = hclge_alloc_vport(hdev);
8524 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
8525 goto err_msi_irq_uninit;
8528 ret = hclge_map_tqp(hdev);
8530 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8531 goto err_msi_irq_uninit;
8534 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
8535 ret = hclge_mac_mdio_config(hdev);
8537 dev_err(&hdev->pdev->dev,
8538 "mdio config fail ret=%d\n", ret);
8539 goto err_msi_irq_uninit;
8543 ret = hclge_init_umv_space(hdev);
8545 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
8546 goto err_mdiobus_unreg;
8549 ret = hclge_mac_init(hdev);
8551 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8552 goto err_mdiobus_unreg;
8555 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8557 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8558 goto err_mdiobus_unreg;
8561 ret = hclge_config_gro(hdev, true);
8563 goto err_mdiobus_unreg;
8565 ret = hclge_init_vlan_config(hdev);
8567 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8568 goto err_mdiobus_unreg;
8571 ret = hclge_tm_schd_init(hdev);
8573 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
8574 goto err_mdiobus_unreg;
8577 hclge_rss_init_cfg(hdev);
8578 ret = hclge_rss_init_hw(hdev);
8580 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8581 goto err_mdiobus_unreg;
8584 ret = init_mgr_tbl(hdev);
8586 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
8587 goto err_mdiobus_unreg;
8590 ret = hclge_init_fd_config(hdev);
8593 "fd table init fail, ret=%d\n", ret);
8594 goto err_mdiobus_unreg;
8597 INIT_KFIFO(hdev->mac_tnl_log);
8599 hclge_dcb_ops_set(hdev);
8601 timer_setup(&hdev->service_timer, hclge_service_timer, 0);
8602 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
8603 INIT_WORK(&hdev->service_task, hclge_service_task);
8604 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
8605 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
8607 hclge_clear_all_event_cause(hdev);
8609 /* Enable MISC vector(vector0) */
8610 hclge_enable_vector(&hdev->misc_vector, true);
8612 hclge_state_init(hdev);
8613 hdev->last_reset_time = jiffies;
8615 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
8619 if (hdev->hw.mac.phydev)
8620 mdiobus_unregister(hdev->hw.mac.mdio_bus);
8622 hclge_misc_irq_uninit(hdev);
8624 pci_free_irq_vectors(pdev);
8626 hclge_cmd_uninit(hdev);
8628 pcim_iounmap(pdev, hdev->hw.io_base);
8629 pci_clear_master(pdev);
8630 pci_release_regions(pdev);
8631 pci_disable_device(pdev);
8636 static void hclge_stats_clear(struct hclge_dev *hdev)
8638 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
8641 static void hclge_reset_vport_state(struct hclge_dev *hdev)
8643 struct hclge_vport *vport = hdev->vport;
8646 for (i = 0; i < hdev->num_alloc_vport; i++) {
8647 hclge_vport_stop(vport);
8652 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
8654 struct hclge_dev *hdev = ae_dev->priv;
8655 struct pci_dev *pdev = ae_dev->pdev;
8658 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8660 hclge_stats_clear(hdev);
8661 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
8662 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
8664 ret = hclge_cmd_init(hdev);
8666 dev_err(&pdev->dev, "Cmd queue init failed\n");
8670 ret = hclge_map_tqp(hdev);
8672 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8676 hclge_reset_umv_space(hdev);
8678 ret = hclge_mac_init(hdev);
8680 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8684 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8686 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8690 ret = hclge_config_gro(hdev, true);
8694 ret = hclge_init_vlan_config(hdev);
8696 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8700 ret = hclge_tm_init_hw(hdev, true);
8702 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
8706 ret = hclge_rss_init_hw(hdev);
8708 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8712 ret = hclge_init_fd_config(hdev);
8714 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
8718 /* Re-enable the hw error interrupts because
8719 * the interrupts get disabled on global reset.
8721 ret = hclge_config_nic_hw_error(hdev, true);
8724 "fail(%d) to re-enable NIC hw error interrupts\n",
8729 if (hdev->roce_client) {
8730 ret = hclge_config_rocee_ras_interrupt(hdev, true);
8733 "fail(%d) to re-enable roce ras interrupts\n",
8739 hclge_reset_vport_state(hdev);
8741 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
8747 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
8749 struct hclge_dev *hdev = ae_dev->priv;
8750 struct hclge_mac *mac = &hdev->hw.mac;
8752 hclge_state_uninit(hdev);
8755 mdiobus_unregister(mac->mdio_bus);
8757 hclge_uninit_umv_space(hdev);
8759 /* Disable MISC vector(vector0) */
8760 hclge_enable_vector(&hdev->misc_vector, false);
8761 synchronize_irq(hdev->misc_vector.vector_irq);
8763 /* Disable all hw interrupts */
8764 hclge_config_mac_tnl_int(hdev, false);
8765 hclge_config_nic_hw_error(hdev, false);
8766 hclge_config_rocee_ras_interrupt(hdev, false);
8768 hclge_cmd_uninit(hdev);
8769 hclge_misc_irq_uninit(hdev);
8770 hclge_pci_uninit(hdev);
8771 mutex_destroy(&hdev->vport_lock);
8772 hclge_uninit_vport_mac_table(hdev);
8773 hclge_uninit_vport_vlan_table(hdev);
8774 mutex_destroy(&hdev->vport_cfg_mutex);
8775 ae_dev->priv = NULL;
8778 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
8780 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8781 struct hclge_vport *vport = hclge_get_vport(handle);
8782 struct hclge_dev *hdev = vport->back;
8784 return min_t(u32, hdev->rss_size_max,
8785 vport->alloc_tqps / kinfo->num_tc);
8788 static void hclge_get_channels(struct hnae3_handle *handle,
8789 struct ethtool_channels *ch)
8791 ch->max_combined = hclge_get_max_channels(handle);
8792 ch->other_count = 1;
8794 ch->combined_count = handle->kinfo.rss_size;
8797 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
8798 u16 *alloc_tqps, u16 *max_rss_size)
8800 struct hclge_vport *vport = hclge_get_vport(handle);
8801 struct hclge_dev *hdev = vport->back;
8803 *alloc_tqps = vport->alloc_tqps;
8804 *max_rss_size = hdev->rss_size_max;
8807 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
8808 bool rxfh_configured)
8810 struct hclge_vport *vport = hclge_get_vport(handle);
8811 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
8812 struct hclge_dev *hdev = vport->back;
8813 int cur_rss_size = kinfo->rss_size;
8814 int cur_tqps = kinfo->num_tqps;
8815 u16 tc_offset[HCLGE_MAX_TC_NUM];
8816 u16 tc_valid[HCLGE_MAX_TC_NUM];
8817 u16 tc_size[HCLGE_MAX_TC_NUM];
8822 kinfo->req_rss_size = new_tqps_num;
8824 ret = hclge_tm_vport_map_update(hdev);
8826 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
8830 roundup_size = roundup_pow_of_two(kinfo->rss_size);
8831 roundup_size = ilog2(roundup_size);
8832 /* Set the RSS TC mode according to the new RSS size */
8833 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
8836 if (!(hdev->hw_tc_map & BIT(i)))
8840 tc_size[i] = roundup_size;
8841 tc_offset[i] = kinfo->rss_size * i;
8843 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
8847 /* RSS indirection table has been configuared by user */
8848 if (rxfh_configured)
8851 /* Reinitializes the rss indirect table according to the new RSS size */
8852 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
8856 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
8857 rss_indir[i] = i % kinfo->rss_size;
8859 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
8861 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
8868 dev_info(&hdev->pdev->dev,
8869 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
8870 cur_rss_size, kinfo->rss_size,
8871 cur_tqps, kinfo->rss_size * kinfo->num_tc);
8876 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
8877 u32 *regs_num_64_bit)
8879 struct hclge_desc desc;
8883 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
8884 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8886 dev_err(&hdev->pdev->dev,
8887 "Query register number cmd failed, ret = %d.\n", ret);
8891 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
8892 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
8894 total_num = *regs_num_32_bit + *regs_num_64_bit;
8901 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8904 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
8905 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
8907 struct hclge_desc *desc;
8908 u32 *reg_val = data;
8918 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
8919 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
8920 HCLGE_32_BIT_REG_RTN_DATANUM);
8921 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8925 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
8926 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8928 dev_err(&hdev->pdev->dev,
8929 "Query 32 bit register cmd failed, ret = %d.\n", ret);
8934 for (i = 0; i < cmd_num; i++) {
8936 desc_data = (__le32 *)(&desc[i].data[0]);
8937 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
8939 desc_data = (__le32 *)(&desc[i]);
8940 n = HCLGE_32_BIT_REG_RTN_DATANUM;
8942 for (k = 0; k < n; k++) {
8943 *reg_val++ = le32_to_cpu(*desc_data++);
8955 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8958 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
8959 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
8961 struct hclge_desc *desc;
8962 u64 *reg_val = data;
8972 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
8973 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
8974 HCLGE_64_BIT_REG_RTN_DATANUM);
8975 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8979 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
8980 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8982 dev_err(&hdev->pdev->dev,
8983 "Query 64 bit register cmd failed, ret = %d.\n", ret);
8988 for (i = 0; i < cmd_num; i++) {
8990 desc_data = (__le64 *)(&desc[i].data[0]);
8991 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
8993 desc_data = (__le64 *)(&desc[i]);
8994 n = HCLGE_64_BIT_REG_RTN_DATANUM;
8996 for (k = 0; k < n; k++) {
8997 *reg_val++ = le64_to_cpu(*desc_data++);
9009 #define MAX_SEPARATE_NUM 4
9010 #define SEPARATOR_VALUE 0xFFFFFFFF
9011 #define REG_NUM_PER_LINE 4
9012 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
9014 static int hclge_get_regs_len(struct hnae3_handle *handle)
9016 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
9017 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9018 struct hclge_vport *vport = hclge_get_vport(handle);
9019 struct hclge_dev *hdev = vport->back;
9020 u32 regs_num_32_bit, regs_num_64_bit;
9023 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
9025 dev_err(&hdev->pdev->dev,
9026 "Get register number failed, ret = %d.\n", ret);
9030 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
9031 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
9032 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
9033 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
9035 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
9036 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
9037 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
9040 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
9043 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9044 struct hclge_vport *vport = hclge_get_vport(handle);
9045 struct hclge_dev *hdev = vport->back;
9046 u32 regs_num_32_bit, regs_num_64_bit;
9047 int i, j, reg_um, separator_num;
9051 *version = hdev->fw_version;
9053 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
9055 dev_err(&hdev->pdev->dev,
9056 "Get register number failed, ret = %d.\n", ret);
9060 /* fetching per-PF registers valus from PF PCIe register space */
9061 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
9062 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9063 for (i = 0; i < reg_um; i++)
9064 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
9065 for (i = 0; i < separator_num; i++)
9066 *reg++ = SEPARATOR_VALUE;
9068 reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
9069 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9070 for (i = 0; i < reg_um; i++)
9071 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
9072 for (i = 0; i < separator_num; i++)
9073 *reg++ = SEPARATOR_VALUE;
9075 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
9076 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9077 for (j = 0; j < kinfo->num_tqps; j++) {
9078 for (i = 0; i < reg_um; i++)
9079 *reg++ = hclge_read_dev(&hdev->hw,
9080 ring_reg_addr_list[i] +
9082 for (i = 0; i < separator_num; i++)
9083 *reg++ = SEPARATOR_VALUE;
9086 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
9087 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9088 for (j = 0; j < hdev->num_msi_used - 1; j++) {
9089 for (i = 0; i < reg_um; i++)
9090 *reg++ = hclge_read_dev(&hdev->hw,
9091 tqp_intr_reg_addr_list[i] +
9093 for (i = 0; i < separator_num; i++)
9094 *reg++ = SEPARATOR_VALUE;
9097 /* fetching PF common registers values from firmware */
9098 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
9100 dev_err(&hdev->pdev->dev,
9101 "Get 32 bit register failed, ret = %d.\n", ret);
9105 reg += regs_num_32_bit;
9106 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
9108 dev_err(&hdev->pdev->dev,
9109 "Get 64 bit register failed, ret = %d.\n", ret);
9112 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
9114 struct hclge_set_led_state_cmd *req;
9115 struct hclge_desc desc;
9118 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
9120 req = (struct hclge_set_led_state_cmd *)desc.data;
9121 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
9122 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
9124 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9126 dev_err(&hdev->pdev->dev,
9127 "Send set led state cmd error, ret =%d\n", ret);
9132 enum hclge_led_status {
9135 HCLGE_LED_NO_CHANGE = 0xFF,
9138 static int hclge_set_led_id(struct hnae3_handle *handle,
9139 enum ethtool_phys_id_state status)
9141 struct hclge_vport *vport = hclge_get_vport(handle);
9142 struct hclge_dev *hdev = vport->back;
9145 case ETHTOOL_ID_ACTIVE:
9146 return hclge_set_led_status(hdev, HCLGE_LED_ON);
9147 case ETHTOOL_ID_INACTIVE:
9148 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
9154 static void hclge_get_link_mode(struct hnae3_handle *handle,
9155 unsigned long *supported,
9156 unsigned long *advertising)
9158 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
9159 struct hclge_vport *vport = hclge_get_vport(handle);
9160 struct hclge_dev *hdev = vport->back;
9161 unsigned int idx = 0;
9163 for (; idx < size; idx++) {
9164 supported[idx] = hdev->hw.mac.supported[idx];
9165 advertising[idx] = hdev->hw.mac.advertising[idx];
9169 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
9171 struct hclge_vport *vport = hclge_get_vport(handle);
9172 struct hclge_dev *hdev = vport->back;
9174 return hclge_config_gro(hdev, enable);
9177 static const struct hnae3_ae_ops hclge_ops = {
9178 .init_ae_dev = hclge_init_ae_dev,
9179 .uninit_ae_dev = hclge_uninit_ae_dev,
9180 .flr_prepare = hclge_flr_prepare,
9181 .flr_done = hclge_flr_done,
9182 .init_client_instance = hclge_init_client_instance,
9183 .uninit_client_instance = hclge_uninit_client_instance,
9184 .map_ring_to_vector = hclge_map_ring_to_vector,
9185 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
9186 .get_vector = hclge_get_vector,
9187 .put_vector = hclge_put_vector,
9188 .set_promisc_mode = hclge_set_promisc_mode,
9189 .set_loopback = hclge_set_loopback,
9190 .start = hclge_ae_start,
9191 .stop = hclge_ae_stop,
9192 .client_start = hclge_client_start,
9193 .client_stop = hclge_client_stop,
9194 .get_status = hclge_get_status,
9195 .get_ksettings_an_result = hclge_get_ksettings_an_result,
9196 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
9197 .get_media_type = hclge_get_media_type,
9198 .check_port_speed = hclge_check_port_speed,
9199 .get_fec = hclge_get_fec,
9200 .set_fec = hclge_set_fec,
9201 .get_rss_key_size = hclge_get_rss_key_size,
9202 .get_rss_indir_size = hclge_get_rss_indir_size,
9203 .get_rss = hclge_get_rss,
9204 .set_rss = hclge_set_rss,
9205 .set_rss_tuple = hclge_set_rss_tuple,
9206 .get_rss_tuple = hclge_get_rss_tuple,
9207 .get_tc_size = hclge_get_tc_size,
9208 .get_mac_addr = hclge_get_mac_addr,
9209 .set_mac_addr = hclge_set_mac_addr,
9210 .do_ioctl = hclge_do_ioctl,
9211 .add_uc_addr = hclge_add_uc_addr,
9212 .rm_uc_addr = hclge_rm_uc_addr,
9213 .add_mc_addr = hclge_add_mc_addr,
9214 .rm_mc_addr = hclge_rm_mc_addr,
9215 .set_autoneg = hclge_set_autoneg,
9216 .get_autoneg = hclge_get_autoneg,
9217 .restart_autoneg = hclge_restart_autoneg,
9218 .get_pauseparam = hclge_get_pauseparam,
9219 .set_pauseparam = hclge_set_pauseparam,
9220 .set_mtu = hclge_set_mtu,
9221 .reset_queue = hclge_reset_tqp,
9222 .get_stats = hclge_get_stats,
9223 .get_mac_pause_stats = hclge_get_mac_pause_stat,
9224 .update_stats = hclge_update_stats,
9225 .get_strings = hclge_get_strings,
9226 .get_sset_count = hclge_get_sset_count,
9227 .get_fw_version = hclge_get_fw_version,
9228 .get_mdix_mode = hclge_get_mdix_mode,
9229 .enable_vlan_filter = hclge_enable_vlan_filter,
9230 .set_vlan_filter = hclge_set_vlan_filter,
9231 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
9232 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
9233 .reset_event = hclge_reset_event,
9234 .set_default_reset_request = hclge_set_def_reset_request,
9235 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
9236 .set_channels = hclge_set_channels,
9237 .get_channels = hclge_get_channels,
9238 .get_regs_len = hclge_get_regs_len,
9239 .get_regs = hclge_get_regs,
9240 .set_led_id = hclge_set_led_id,
9241 .get_link_mode = hclge_get_link_mode,
9242 .add_fd_entry = hclge_add_fd_entry,
9243 .del_fd_entry = hclge_del_fd_entry,
9244 .del_all_fd_entries = hclge_del_all_fd_entries,
9245 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
9246 .get_fd_rule_info = hclge_get_fd_rule_info,
9247 .get_fd_all_rules = hclge_get_all_rules,
9248 .restore_fd_rules = hclge_restore_fd_entries,
9249 .enable_fd = hclge_enable_fd,
9250 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
9251 .dbg_run_cmd = hclge_dbg_run_cmd,
9252 .handle_hw_ras_error = hclge_handle_hw_ras_error,
9253 .get_hw_reset_stat = hclge_get_hw_reset_stat,
9254 .ae_dev_resetting = hclge_ae_dev_resetting,
9255 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
9256 .set_gro_en = hclge_gro_en,
9257 .get_global_queue_id = hclge_covert_handle_qid_global,
9258 .set_timer_task = hclge_set_timer_task,
9259 .mac_connect_phy = hclge_mac_connect_phy,
9260 .mac_disconnect_phy = hclge_mac_disconnect_phy,
9261 .restore_vlan_table = hclge_restore_vlan_table,
9264 static struct hnae3_ae_algo ae_algo = {
9266 .pdev_id_table = ae_algo_pci_tbl,
9269 static int hclge_init(void)
9271 pr_info("%s is initializing\n", HCLGE_NAME);
9273 hnae3_register_ae_algo(&ae_algo);
9278 static void hclge_exit(void)
9280 hnae3_unregister_ae_algo(&ae_algo);
9282 module_init(hclge_init);
9283 module_exit(hclge_exit);
9285 MODULE_LICENSE("GPL");
9286 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
9287 MODULE_DESCRIPTION("HCLGE Driver");
9288 MODULE_VERSION(HCLGE_MOD_VERSION);