1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
24 #include "hclge_err.h"
26 #include "hclge_devlink.h"
28 #define HCLGE_NAME "hclge"
29 #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
30 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
32 #define HCLGE_BUF_SIZE_UNIT 256U
33 #define HCLGE_BUF_MUL_BY 2
34 #define HCLGE_BUF_DIV_BY 2
35 #define NEED_RESERVE_TC_NUM 2
36 #define BUF_MAX_PERCENT 100
37 #define BUF_RESERVE_PERCENT 90
39 #define HCLGE_RESET_MAX_FAIL_CNT 5
40 #define HCLGE_RESET_SYNC_TIME 100
41 #define HCLGE_PF_RESET_SYNC_TIME 20
42 #define HCLGE_PF_RESET_SYNC_CNT 1500
44 /* Get DFX BD number offset */
45 #define HCLGE_DFX_BIOS_BD_OFFSET 1
46 #define HCLGE_DFX_SSU_0_BD_OFFSET 2
47 #define HCLGE_DFX_SSU_1_BD_OFFSET 3
48 #define HCLGE_DFX_IGU_BD_OFFSET 4
49 #define HCLGE_DFX_RPU_0_BD_OFFSET 5
50 #define HCLGE_DFX_RPU_1_BD_OFFSET 6
51 #define HCLGE_DFX_NCSI_BD_OFFSET 7
52 #define HCLGE_DFX_RTC_BD_OFFSET 8
53 #define HCLGE_DFX_PPP_BD_OFFSET 9
54 #define HCLGE_DFX_RCB_BD_OFFSET 10
55 #define HCLGE_DFX_TQP_BD_OFFSET 11
56 #define HCLGE_DFX_SSU_2_BD_OFFSET 12
58 #define HCLGE_LINK_STATUS_MS 10
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66 static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
67 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
69 static int hclge_set_default_loopback(struct hclge_dev *hdev);
71 static void hclge_sync_mac_table(struct hclge_dev *hdev);
72 static void hclge_restore_hw_table(struct hclge_dev *hdev);
73 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
74 static void hclge_sync_fd_table(struct hclge_dev *hdev);
76 static struct hnae3_ae_algo ae_algo;
78 static struct workqueue_struct *hclge_wq;
80 static const struct pci_device_id ae_algo_pci_tbl[] = {
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
83 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
85 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
86 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
87 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
88 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
89 /* required last entry */
93 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
95 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
96 HCLGE_CMDQ_TX_ADDR_H_REG,
97 HCLGE_CMDQ_TX_DEPTH_REG,
98 HCLGE_CMDQ_TX_TAIL_REG,
99 HCLGE_CMDQ_TX_HEAD_REG,
100 HCLGE_CMDQ_RX_ADDR_L_REG,
101 HCLGE_CMDQ_RX_ADDR_H_REG,
102 HCLGE_CMDQ_RX_DEPTH_REG,
103 HCLGE_CMDQ_RX_TAIL_REG,
104 HCLGE_CMDQ_RX_HEAD_REG,
105 HCLGE_VECTOR0_CMDQ_SRC_REG,
106 HCLGE_CMDQ_INTR_STS_REG,
107 HCLGE_CMDQ_INTR_EN_REG,
108 HCLGE_CMDQ_INTR_GEN_REG};
110 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
111 HCLGE_VECTOR0_OTER_EN_REG,
112 HCLGE_MISC_RESET_STS_REG,
113 HCLGE_MISC_VECTOR_INT_STS,
114 HCLGE_GLOBAL_RESET_REG,
118 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
119 HCLGE_RING_RX_ADDR_H_REG,
120 HCLGE_RING_RX_BD_NUM_REG,
121 HCLGE_RING_RX_BD_LENGTH_REG,
122 HCLGE_RING_RX_MERGE_EN_REG,
123 HCLGE_RING_RX_TAIL_REG,
124 HCLGE_RING_RX_HEAD_REG,
125 HCLGE_RING_RX_FBD_NUM_REG,
126 HCLGE_RING_RX_OFFSET_REG,
127 HCLGE_RING_RX_FBD_OFFSET_REG,
128 HCLGE_RING_RX_STASH_REG,
129 HCLGE_RING_RX_BD_ERR_REG,
130 HCLGE_RING_TX_ADDR_L_REG,
131 HCLGE_RING_TX_ADDR_H_REG,
132 HCLGE_RING_TX_BD_NUM_REG,
133 HCLGE_RING_TX_PRIORITY_REG,
134 HCLGE_RING_TX_TC_REG,
135 HCLGE_RING_TX_MERGE_EN_REG,
136 HCLGE_RING_TX_TAIL_REG,
137 HCLGE_RING_TX_HEAD_REG,
138 HCLGE_RING_TX_FBD_NUM_REG,
139 HCLGE_RING_TX_OFFSET_REG,
140 HCLGE_RING_TX_EBD_NUM_REG,
141 HCLGE_RING_TX_EBD_OFFSET_REG,
142 HCLGE_RING_TX_BD_ERR_REG,
145 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
146 HCLGE_TQP_INTR_GL0_REG,
147 HCLGE_TQP_INTR_GL1_REG,
148 HCLGE_TQP_INTR_GL2_REG,
149 HCLGE_TQP_INTR_RL_REG};
151 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
153 "Serdes serial Loopback test",
154 "Serdes parallel Loopback test",
158 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
159 {"mac_tx_mac_pause_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
161 {"mac_rx_mac_pause_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
163 {"mac_tx_control_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
165 {"mac_rx_control_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
167 {"mac_tx_pfc_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
169 {"mac_tx_pfc_pri0_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
171 {"mac_tx_pfc_pri1_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
173 {"mac_tx_pfc_pri2_pkt_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
175 {"mac_tx_pfc_pri3_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
177 {"mac_tx_pfc_pri4_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
179 {"mac_tx_pfc_pri5_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
181 {"mac_tx_pfc_pri6_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
183 {"mac_tx_pfc_pri7_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
185 {"mac_rx_pfc_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
187 {"mac_rx_pfc_pri0_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
189 {"mac_rx_pfc_pri1_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
191 {"mac_rx_pfc_pri2_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
193 {"mac_rx_pfc_pri3_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
195 {"mac_rx_pfc_pri4_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
197 {"mac_rx_pfc_pri5_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
199 {"mac_rx_pfc_pri6_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
201 {"mac_rx_pfc_pri7_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
203 {"mac_tx_total_pkt_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
205 {"mac_tx_total_oct_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
207 {"mac_tx_good_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
209 {"mac_tx_bad_pkt_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
211 {"mac_tx_good_oct_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
213 {"mac_tx_bad_oct_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
215 {"mac_tx_uni_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
217 {"mac_tx_multi_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
219 {"mac_tx_broad_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
221 {"mac_tx_undersize_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
223 {"mac_tx_oversize_pkt_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
225 {"mac_tx_64_oct_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
227 {"mac_tx_65_127_oct_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
229 {"mac_tx_128_255_oct_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
231 {"mac_tx_256_511_oct_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
233 {"mac_tx_512_1023_oct_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
235 {"mac_tx_1024_1518_oct_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
237 {"mac_tx_1519_2047_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
239 {"mac_tx_2048_4095_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
241 {"mac_tx_4096_8191_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
243 {"mac_tx_8192_9216_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
245 {"mac_tx_9217_12287_oct_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
247 {"mac_tx_12288_16383_oct_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
249 {"mac_tx_1519_max_good_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
251 {"mac_tx_1519_max_bad_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
253 {"mac_rx_total_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
255 {"mac_rx_total_oct_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
257 {"mac_rx_good_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
259 {"mac_rx_bad_pkt_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
261 {"mac_rx_good_oct_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
263 {"mac_rx_bad_oct_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
265 {"mac_rx_uni_pkt_num",
266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
267 {"mac_rx_multi_pkt_num",
268 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
269 {"mac_rx_broad_pkt_num",
270 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
271 {"mac_rx_undersize_pkt_num",
272 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
273 {"mac_rx_oversize_pkt_num",
274 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
275 {"mac_rx_64_oct_pkt_num",
276 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
277 {"mac_rx_65_127_oct_pkt_num",
278 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
279 {"mac_rx_128_255_oct_pkt_num",
280 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
281 {"mac_rx_256_511_oct_pkt_num",
282 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
283 {"mac_rx_512_1023_oct_pkt_num",
284 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
285 {"mac_rx_1024_1518_oct_pkt_num",
286 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
287 {"mac_rx_1519_2047_oct_pkt_num",
288 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
289 {"mac_rx_2048_4095_oct_pkt_num",
290 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
291 {"mac_rx_4096_8191_oct_pkt_num",
292 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
293 {"mac_rx_8192_9216_oct_pkt_num",
294 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
295 {"mac_rx_9217_12287_oct_pkt_num",
296 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
297 {"mac_rx_12288_16383_oct_pkt_num",
298 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
299 {"mac_rx_1519_max_good_pkt_num",
300 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
301 {"mac_rx_1519_max_bad_pkt_num",
302 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
304 {"mac_tx_fragment_pkt_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
306 {"mac_tx_undermin_pkt_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
308 {"mac_tx_jabber_pkt_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
310 {"mac_tx_err_all_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
312 {"mac_tx_from_app_good_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
314 {"mac_tx_from_app_bad_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
316 {"mac_rx_fragment_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
318 {"mac_rx_undermin_pkt_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
320 {"mac_rx_jabber_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
322 {"mac_rx_fcs_err_pkt_num",
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
324 {"mac_rx_send_app_good_pkt_num",
325 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
326 {"mac_rx_send_app_bad_pkt_num",
327 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
330 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
332 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
333 .ethter_type = cpu_to_le16(ETH_P_LLDP),
334 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
335 .i_port_bitmap = 0x1,
339 static const u8 hclge_hash_key[] = {
340 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
341 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
342 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
343 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
344 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
347 static const u32 hclge_dfx_bd_offset_list[] = {
348 HCLGE_DFX_BIOS_BD_OFFSET,
349 HCLGE_DFX_SSU_0_BD_OFFSET,
350 HCLGE_DFX_SSU_1_BD_OFFSET,
351 HCLGE_DFX_IGU_BD_OFFSET,
352 HCLGE_DFX_RPU_0_BD_OFFSET,
353 HCLGE_DFX_RPU_1_BD_OFFSET,
354 HCLGE_DFX_NCSI_BD_OFFSET,
355 HCLGE_DFX_RTC_BD_OFFSET,
356 HCLGE_DFX_PPP_BD_OFFSET,
357 HCLGE_DFX_RCB_BD_OFFSET,
358 HCLGE_DFX_TQP_BD_OFFSET,
359 HCLGE_DFX_SSU_2_BD_OFFSET
362 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
363 HCLGE_OPC_DFX_BIOS_COMMON_REG,
364 HCLGE_OPC_DFX_SSU_REG_0,
365 HCLGE_OPC_DFX_SSU_REG_1,
366 HCLGE_OPC_DFX_IGU_EGU_REG,
367 HCLGE_OPC_DFX_RPU_REG_0,
368 HCLGE_OPC_DFX_RPU_REG_1,
369 HCLGE_OPC_DFX_NCSI_REG,
370 HCLGE_OPC_DFX_RTC_REG,
371 HCLGE_OPC_DFX_PPP_REG,
372 HCLGE_OPC_DFX_RCB_REG,
373 HCLGE_OPC_DFX_TQP_REG,
374 HCLGE_OPC_DFX_SSU_REG_2
377 static const struct key_info meta_data_key_info[] = {
378 { PACKET_TYPE_ID, 6},
388 static const struct key_info tuple_key_info[] = {
389 { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
390 { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
391 { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
392 { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
393 { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
394 { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
395 { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
396 { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
397 { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
398 { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
399 { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
400 { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
401 { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
402 { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
403 { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
404 { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
405 { INNER_DST_MAC, 48, KEY_OPT_MAC,
406 offsetof(struct hclge_fd_rule, tuples.dst_mac),
407 offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
408 { INNER_SRC_MAC, 48, KEY_OPT_MAC,
409 offsetof(struct hclge_fd_rule, tuples.src_mac),
410 offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
411 { INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
412 offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
413 offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
414 { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
415 { INNER_ETH_TYPE, 16, KEY_OPT_LE16,
416 offsetof(struct hclge_fd_rule, tuples.ether_proto),
417 offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
418 { INNER_L2_RSV, 16, KEY_OPT_LE16,
419 offsetof(struct hclge_fd_rule, tuples.l2_user_def),
420 offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
421 { INNER_IP_TOS, 8, KEY_OPT_U8,
422 offsetof(struct hclge_fd_rule, tuples.ip_tos),
423 offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
424 { INNER_IP_PROTO, 8, KEY_OPT_U8,
425 offsetof(struct hclge_fd_rule, tuples.ip_proto),
426 offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
427 { INNER_SRC_IP, 32, KEY_OPT_IP,
428 offsetof(struct hclge_fd_rule, tuples.src_ip),
429 offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
430 { INNER_DST_IP, 32, KEY_OPT_IP,
431 offsetof(struct hclge_fd_rule, tuples.dst_ip),
432 offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
433 { INNER_L3_RSV, 16, KEY_OPT_LE16,
434 offsetof(struct hclge_fd_rule, tuples.l3_user_def),
435 offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
436 { INNER_SRC_PORT, 16, KEY_OPT_LE16,
437 offsetof(struct hclge_fd_rule, tuples.src_port),
438 offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
439 { INNER_DST_PORT, 16, KEY_OPT_LE16,
440 offsetof(struct hclge_fd_rule, tuples.dst_port),
441 offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
442 { INNER_L4_RSV, 32, KEY_OPT_LE32,
443 offsetof(struct hclge_fd_rule, tuples.l4_user_def),
444 offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
447 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
449 #define HCLGE_MAC_CMD_NUM 21
451 u64 *data = (u64 *)(&hdev->mac_stats);
452 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
457 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
458 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
460 dev_err(&hdev->pdev->dev,
461 "Get MAC pkt stats fail, status = %d.\n", ret);
466 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
467 /* for special opcode 0032, only the first desc has the head */
468 if (unlikely(i == 0)) {
469 desc_data = (__le64 *)(&desc[i].data[0]);
470 n = HCLGE_RD_FIRST_STATS_NUM;
472 desc_data = (__le64 *)(&desc[i]);
473 n = HCLGE_RD_OTHER_STATS_NUM;
476 for (k = 0; k < n; k++) {
477 *data += le64_to_cpu(*desc_data);
486 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
488 u64 *data = (u64 *)(&hdev->mac_stats);
489 struct hclge_desc *desc;
494 /* This may be called inside atomic sections,
495 * so GFP_ATOMIC is more suitalbe here
497 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
501 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
502 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
508 for (i = 0; i < desc_num; i++) {
509 /* for special opcode 0034, only the first desc has the head */
511 desc_data = (__le64 *)(&desc[i].data[0]);
512 n = HCLGE_RD_FIRST_STATS_NUM;
514 desc_data = (__le64 *)(&desc[i]);
515 n = HCLGE_RD_OTHER_STATS_NUM;
518 for (k = 0; k < n; k++) {
519 *data += le64_to_cpu(*desc_data);
530 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
532 struct hclge_desc desc;
537 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
538 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
542 desc_data = (__le32 *)(&desc.data[0]);
543 reg_num = le32_to_cpu(*desc_data);
545 *desc_num = 1 + ((reg_num - 3) >> 2) +
546 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
551 static int hclge_mac_update_stats(struct hclge_dev *hdev)
556 ret = hclge_mac_query_reg_num(hdev, &desc_num);
557 /* The firmware supports the new statistics acquisition method */
559 ret = hclge_mac_update_stats_complete(hdev, desc_num);
560 else if (ret == -EOPNOTSUPP)
561 ret = hclge_mac_update_stats_defective(hdev);
563 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
568 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
570 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
571 struct hclge_vport *vport = hclge_get_vport(handle);
572 struct hclge_dev *hdev = vport->back;
573 struct hnae3_queue *queue;
574 struct hclge_desc desc[1];
575 struct hclge_tqp *tqp;
578 for (i = 0; i < kinfo->num_tqps; i++) {
579 queue = handle->kinfo.tqp[i];
580 tqp = container_of(queue, struct hclge_tqp, q);
581 /* command : HCLGE_OPC_QUERY_IGU_STAT */
582 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
585 desc[0].data[0] = cpu_to_le32(tqp->index);
586 ret = hclge_cmd_send(&hdev->hw, desc, 1);
588 dev_err(&hdev->pdev->dev,
589 "Query tqp stat fail, status = %d,queue = %d\n",
593 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
594 le32_to_cpu(desc[0].data[1]);
597 for (i = 0; i < kinfo->num_tqps; i++) {
598 queue = handle->kinfo.tqp[i];
599 tqp = container_of(queue, struct hclge_tqp, q);
600 /* command : HCLGE_OPC_QUERY_IGU_STAT */
601 hclge_cmd_setup_basic_desc(&desc[0],
602 HCLGE_OPC_QUERY_TX_STATS,
605 desc[0].data[0] = cpu_to_le32(tqp->index);
606 ret = hclge_cmd_send(&hdev->hw, desc, 1);
608 dev_err(&hdev->pdev->dev,
609 "Query tqp stat fail, status = %d,queue = %d\n",
613 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
614 le32_to_cpu(desc[0].data[1]);
620 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
622 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
623 struct hclge_tqp *tqp;
627 for (i = 0; i < kinfo->num_tqps; i++) {
628 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
629 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
632 for (i = 0; i < kinfo->num_tqps; i++) {
633 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
634 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
640 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
642 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
644 /* each tqp has TX & RX two queues */
645 return kinfo->num_tqps * (2);
648 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
650 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
654 for (i = 0; i < kinfo->num_tqps; i++) {
655 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
656 struct hclge_tqp, q);
657 snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
659 buff = buff + ETH_GSTRING_LEN;
662 for (i = 0; i < kinfo->num_tqps; i++) {
663 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
664 struct hclge_tqp, q);
665 snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
667 buff = buff + ETH_GSTRING_LEN;
673 static u64 *hclge_comm_get_stats(const void *comm_stats,
674 const struct hclge_comm_stats_str strs[],
680 for (i = 0; i < size; i++)
681 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
686 static u8 *hclge_comm_get_strings(u32 stringset,
687 const struct hclge_comm_stats_str strs[],
690 char *buff = (char *)data;
693 if (stringset != ETH_SS_STATS)
696 for (i = 0; i < size; i++) {
697 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
698 buff = buff + ETH_GSTRING_LEN;
704 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
706 struct hnae3_handle *handle;
709 handle = &hdev->vport[0].nic;
710 if (handle->client) {
711 status = hclge_tqps_update_stats(handle);
713 dev_err(&hdev->pdev->dev,
714 "Update TQPS stats fail, status = %d.\n",
719 status = hclge_mac_update_stats(hdev);
721 dev_err(&hdev->pdev->dev,
722 "Update MAC stats fail, status = %d.\n", status);
725 static void hclge_update_stats(struct hnae3_handle *handle,
726 struct net_device_stats *net_stats)
728 struct hclge_vport *vport = hclge_get_vport(handle);
729 struct hclge_dev *hdev = vport->back;
732 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
735 status = hclge_mac_update_stats(hdev);
737 dev_err(&hdev->pdev->dev,
738 "Update MAC stats fail, status = %d.\n",
741 status = hclge_tqps_update_stats(handle);
743 dev_err(&hdev->pdev->dev,
744 "Update TQPS stats fail, status = %d.\n",
747 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
750 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
752 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
753 HNAE3_SUPPORT_PHY_LOOPBACK |\
754 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
755 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
757 struct hclge_vport *vport = hclge_get_vport(handle);
758 struct hclge_dev *hdev = vport->back;
761 /* Loopback test support rules:
762 * mac: only GE mode support
763 * serdes: all mac mode will support include GE/XGE/LGE/CGE
764 * phy: only support when phy device exist on board
766 if (stringset == ETH_SS_TEST) {
767 /* clear loopback bit flags at first */
768 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
769 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
770 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
771 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
772 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
774 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
778 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
779 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
781 if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
782 hdev->hw.mac.phydev->drv->set_loopback) ||
783 hnae3_dev_phy_imp_supported(hdev)) {
785 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
787 } else if (stringset == ETH_SS_STATS) {
788 count = ARRAY_SIZE(g_mac_stats_string) +
789 hclge_tqps_get_sset_count(handle, stringset);
795 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
798 u8 *p = (char *)data;
801 if (stringset == ETH_SS_STATS) {
802 size = ARRAY_SIZE(g_mac_stats_string);
803 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
805 p = hclge_tqps_get_strings(handle, p);
806 } else if (stringset == ETH_SS_TEST) {
807 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
808 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
810 p += ETH_GSTRING_LEN;
812 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
813 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
815 p += ETH_GSTRING_LEN;
817 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
819 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
821 p += ETH_GSTRING_LEN;
823 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
824 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
826 p += ETH_GSTRING_LEN;
831 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
833 struct hclge_vport *vport = hclge_get_vport(handle);
834 struct hclge_dev *hdev = vport->back;
837 p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
838 ARRAY_SIZE(g_mac_stats_string), data);
839 p = hclge_tqps_get_stats(handle, p);
842 static void hclge_get_mac_stat(struct hnae3_handle *handle,
843 struct hns3_mac_stats *mac_stats)
845 struct hclge_vport *vport = hclge_get_vport(handle);
846 struct hclge_dev *hdev = vport->back;
848 hclge_update_stats(handle, NULL);
850 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
851 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
854 static int hclge_parse_func_status(struct hclge_dev *hdev,
855 struct hclge_func_status_cmd *status)
857 #define HCLGE_MAC_ID_MASK 0xF
859 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
862 /* Set the pf to main pf */
863 if (status->pf_state & HCLGE_PF_STATE_MAIN)
864 hdev->flag |= HCLGE_FLAG_MAIN;
866 hdev->flag &= ~HCLGE_FLAG_MAIN;
868 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
872 static int hclge_query_function_status(struct hclge_dev *hdev)
874 #define HCLGE_QUERY_MAX_CNT 5
876 struct hclge_func_status_cmd *req;
877 struct hclge_desc desc;
881 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
882 req = (struct hclge_func_status_cmd *)desc.data;
885 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
887 dev_err(&hdev->pdev->dev,
888 "query function status failed %d.\n", ret);
892 /* Check pf reset is done */
895 usleep_range(1000, 2000);
896 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
898 return hclge_parse_func_status(hdev, req);
901 static int hclge_query_pf_resource(struct hclge_dev *hdev)
903 struct hclge_pf_res_cmd *req;
904 struct hclge_desc desc;
907 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
908 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
910 dev_err(&hdev->pdev->dev,
911 "query pf resource failed %d.\n", ret);
915 req = (struct hclge_pf_res_cmd *)desc.data;
916 hdev->num_tqps = le16_to_cpu(req->tqp_num) +
917 le16_to_cpu(req->ext_tqp_num);
918 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
920 if (req->tx_buf_size)
922 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
924 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
926 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
928 if (req->dv_buf_size)
930 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
932 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
934 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
936 hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
937 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
938 dev_err(&hdev->pdev->dev,
939 "only %u msi resources available, not enough for pf(min:2).\n",
944 if (hnae3_dev_roce_supported(hdev)) {
946 le16_to_cpu(req->pf_intr_vector_number_roce);
948 /* PF should have NIC vectors and Roce vectors,
949 * NIC vectors are queued before Roce vectors.
951 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
953 hdev->num_msi = hdev->num_nic_msi;
959 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
963 *speed = HCLGE_MAC_SPEED_10M;
966 *speed = HCLGE_MAC_SPEED_100M;
969 *speed = HCLGE_MAC_SPEED_1G;
972 *speed = HCLGE_MAC_SPEED_10G;
975 *speed = HCLGE_MAC_SPEED_25G;
978 *speed = HCLGE_MAC_SPEED_40G;
981 *speed = HCLGE_MAC_SPEED_50G;
984 *speed = HCLGE_MAC_SPEED_100G;
987 *speed = HCLGE_MAC_SPEED_200G;
996 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
998 struct hclge_vport *vport = hclge_get_vport(handle);
999 struct hclge_dev *hdev = vport->back;
1000 u32 speed_ability = hdev->hw.mac.speed_ability;
1004 case HCLGE_MAC_SPEED_10M:
1005 speed_bit = HCLGE_SUPPORT_10M_BIT;
1007 case HCLGE_MAC_SPEED_100M:
1008 speed_bit = HCLGE_SUPPORT_100M_BIT;
1010 case HCLGE_MAC_SPEED_1G:
1011 speed_bit = HCLGE_SUPPORT_1G_BIT;
1013 case HCLGE_MAC_SPEED_10G:
1014 speed_bit = HCLGE_SUPPORT_10G_BIT;
1016 case HCLGE_MAC_SPEED_25G:
1017 speed_bit = HCLGE_SUPPORT_25G_BIT;
1019 case HCLGE_MAC_SPEED_40G:
1020 speed_bit = HCLGE_SUPPORT_40G_BIT;
1022 case HCLGE_MAC_SPEED_50G:
1023 speed_bit = HCLGE_SUPPORT_50G_BIT;
1025 case HCLGE_MAC_SPEED_100G:
1026 speed_bit = HCLGE_SUPPORT_100G_BIT;
1028 case HCLGE_MAC_SPEED_200G:
1029 speed_bit = HCLGE_SUPPORT_200G_BIT;
1035 if (speed_bit & speed_ability)
1041 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1043 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1044 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1046 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1047 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1049 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1050 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1052 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1053 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1055 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1056 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1058 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1059 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1063 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1065 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1066 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1068 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1069 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1071 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1072 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1074 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1075 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1077 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1078 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1080 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1082 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1086 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1088 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1089 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1091 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1092 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1094 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1095 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1097 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1098 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1100 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1101 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1103 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1104 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1108 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1110 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1111 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1113 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1114 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1116 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1117 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1119 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1120 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1122 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1123 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1125 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1126 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1128 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1129 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1133 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1135 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1136 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1138 switch (mac->speed) {
1139 case HCLGE_MAC_SPEED_10G:
1140 case HCLGE_MAC_SPEED_40G:
1141 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1144 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1146 case HCLGE_MAC_SPEED_25G:
1147 case HCLGE_MAC_SPEED_50G:
1148 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1151 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1152 BIT(HNAE3_FEC_AUTO);
1154 case HCLGE_MAC_SPEED_100G:
1155 case HCLGE_MAC_SPEED_200G:
1156 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1157 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1160 mac->fec_ability = 0;
1165 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1168 struct hclge_mac *mac = &hdev->hw.mac;
1170 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1171 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1174 hclge_convert_setting_sr(mac, speed_ability);
1175 hclge_convert_setting_lr(mac, speed_ability);
1176 hclge_convert_setting_cr(mac, speed_ability);
1177 if (hnae3_dev_fec_supported(hdev))
1178 hclge_convert_setting_fec(mac);
1180 if (hnae3_dev_pause_supported(hdev))
1181 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1183 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1184 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1187 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1190 struct hclge_mac *mac = &hdev->hw.mac;
1192 hclge_convert_setting_kr(mac, speed_ability);
1193 if (hnae3_dev_fec_supported(hdev))
1194 hclge_convert_setting_fec(mac);
1196 if (hnae3_dev_pause_supported(hdev))
1197 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1199 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1200 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1203 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1206 unsigned long *supported = hdev->hw.mac.supported;
1208 /* default to support all speed for GE port */
1210 speed_ability = HCLGE_SUPPORT_GE;
1212 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1213 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1216 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1217 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1219 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1223 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1224 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1225 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1228 if (hnae3_dev_pause_supported(hdev)) {
1229 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1230 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1233 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1234 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1237 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1239 u8 media_type = hdev->hw.mac.media_type;
1241 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1242 hclge_parse_fiber_link_mode(hdev, speed_ability);
1243 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1244 hclge_parse_copper_link_mode(hdev, speed_ability);
1245 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1246 hclge_parse_backplane_link_mode(hdev, speed_ability);
1249 static u32 hclge_get_max_speed(u16 speed_ability)
1251 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1252 return HCLGE_MAC_SPEED_200G;
1254 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1255 return HCLGE_MAC_SPEED_100G;
1257 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1258 return HCLGE_MAC_SPEED_50G;
1260 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1261 return HCLGE_MAC_SPEED_40G;
1263 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1264 return HCLGE_MAC_SPEED_25G;
1266 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1267 return HCLGE_MAC_SPEED_10G;
1269 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1270 return HCLGE_MAC_SPEED_1G;
1272 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1273 return HCLGE_MAC_SPEED_100M;
1275 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1276 return HCLGE_MAC_SPEED_10M;
1278 return HCLGE_MAC_SPEED_1G;
1281 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1283 #define HCLGE_TX_SPARE_SIZE_UNIT 4096
1284 #define SPEED_ABILITY_EXT_SHIFT 8
1286 struct hclge_cfg_param_cmd *req;
1287 u64 mac_addr_tmp_high;
1288 u16 speed_ability_ext;
1292 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1294 /* get the configuration */
1295 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1296 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1297 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1298 HCLGE_CFG_TQP_DESC_N_M,
1299 HCLGE_CFG_TQP_DESC_N_S);
1301 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1302 HCLGE_CFG_PHY_ADDR_M,
1303 HCLGE_CFG_PHY_ADDR_S);
1304 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1305 HCLGE_CFG_MEDIA_TP_M,
1306 HCLGE_CFG_MEDIA_TP_S);
1307 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1308 HCLGE_CFG_RX_BUF_LEN_M,
1309 HCLGE_CFG_RX_BUF_LEN_S);
1310 /* get mac_address */
1311 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1312 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1313 HCLGE_CFG_MAC_ADDR_H_M,
1314 HCLGE_CFG_MAC_ADDR_H_S);
1316 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1318 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1319 HCLGE_CFG_DEFAULT_SPEED_M,
1320 HCLGE_CFG_DEFAULT_SPEED_S);
1321 cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1322 HCLGE_CFG_RSS_SIZE_M,
1323 HCLGE_CFG_RSS_SIZE_S);
1325 for (i = 0; i < ETH_ALEN; i++)
1326 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1328 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1329 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1331 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1332 HCLGE_CFG_SPEED_ABILITY_M,
1333 HCLGE_CFG_SPEED_ABILITY_S);
1334 speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1335 HCLGE_CFG_SPEED_ABILITY_EXT_M,
1336 HCLGE_CFG_SPEED_ABILITY_EXT_S);
1337 cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1339 cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]),
1340 HCLGE_CFG_VLAN_FLTR_CAP_M,
1341 HCLGE_CFG_VLAN_FLTR_CAP_S);
1343 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1344 HCLGE_CFG_UMV_TBL_SPACE_M,
1345 HCLGE_CFG_UMV_TBL_SPACE_S);
1346 if (!cfg->umv_space)
1347 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1349 cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1350 HCLGE_CFG_PF_RSS_SIZE_M,
1351 HCLGE_CFG_PF_RSS_SIZE_S);
1353 /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1354 * power of 2, instead of reading out directly. This would
1355 * be more flexible for future changes and expansions.
1356 * When VF max rss size field is HCLGE_CFG_RSS_SIZE_S,
1357 * it does not make sense if PF's field is 0. In this case, PF and VF
1358 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1360 cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1361 1U << cfg->pf_rss_size_max :
1362 cfg->vf_rss_size_max;
1364 /* The unit of the tx spare buffer size queried from configuration
1365 * file is HCLGE_TX_SPARE_SIZE_UNIT(4096) bytes, so a conversion is
1368 cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]),
1369 HCLGE_CFG_TX_SPARE_BUF_SIZE_M,
1370 HCLGE_CFG_TX_SPARE_BUF_SIZE_S);
1371 cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT;
1374 /* hclge_get_cfg: query the static parameter from flash
1375 * @hdev: pointer to struct hclge_dev
1376 * @hcfg: the config structure to be getted
1378 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1380 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1381 struct hclge_cfg_param_cmd *req;
1385 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1388 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1389 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1391 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1392 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1393 /* Len should be united by 4 bytes when send to hardware */
1394 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1395 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1396 req->offset = cpu_to_le32(offset);
1399 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1401 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1405 hclge_parse_cfg(hcfg, desc);
1410 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1412 #define HCLGE_MAX_NON_TSO_BD_NUM 8U
1414 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1416 ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1417 ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1418 ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1419 ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1420 ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1421 ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1422 ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1425 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1426 struct hclge_desc *desc)
1428 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1429 struct hclge_dev_specs_0_cmd *req0;
1430 struct hclge_dev_specs_1_cmd *req1;
1432 req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1433 req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1435 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1436 ae_dev->dev_specs.rss_ind_tbl_size =
1437 le16_to_cpu(req0->rss_ind_tbl_size);
1438 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1439 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1440 ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1441 ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1442 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1443 ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1446 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1448 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1450 if (!dev_specs->max_non_tso_bd_num)
1451 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1452 if (!dev_specs->rss_ind_tbl_size)
1453 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1454 if (!dev_specs->rss_key_size)
1455 dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1456 if (!dev_specs->max_tm_rate)
1457 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1458 if (!dev_specs->max_qset_num)
1459 dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1460 if (!dev_specs->max_int_gl)
1461 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1462 if (!dev_specs->max_frm_size)
1463 dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1466 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1468 struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1472 /* set default specifications as devices lower than version V3 do not
1473 * support querying specifications from firmware.
1475 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1476 hclge_set_default_dev_specs(hdev);
1480 for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1481 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1483 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1485 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1487 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1491 hclge_parse_dev_specs(hdev, desc);
1492 hclge_check_dev_specs(hdev);
1497 static int hclge_get_cap(struct hclge_dev *hdev)
1501 ret = hclge_query_function_status(hdev);
1503 dev_err(&hdev->pdev->dev,
1504 "query function status error %d.\n", ret);
1508 /* get pf resource */
1509 return hclge_query_pf_resource(hdev);
1512 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1514 #define HCLGE_MIN_TX_DESC 64
1515 #define HCLGE_MIN_RX_DESC 64
1517 if (!is_kdump_kernel())
1520 dev_info(&hdev->pdev->dev,
1521 "Running kdump kernel. Using minimal resources\n");
1523 /* minimal queue pairs equals to the number of vports */
1524 hdev->num_tqps = hdev->num_req_vfs + 1;
1525 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1526 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1529 static int hclge_configure(struct hclge_dev *hdev)
1531 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1532 struct hclge_cfg cfg;
1536 ret = hclge_get_cfg(hdev, &cfg);
1540 hdev->base_tqp_pid = 0;
1541 hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1542 hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1543 hdev->rx_buf_len = cfg.rx_buf_len;
1544 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1545 hdev->hw.mac.media_type = cfg.media_type;
1546 hdev->hw.mac.phy_addr = cfg.phy_addr;
1547 hdev->num_tx_desc = cfg.tqp_desc_num;
1548 hdev->num_rx_desc = cfg.tqp_desc_num;
1549 hdev->tm_info.num_pg = 1;
1550 hdev->tc_max = cfg.tc_num;
1551 hdev->tm_info.hw_pfc_map = 0;
1552 hdev->wanted_umv_size = cfg.umv_space;
1553 hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
1554 hdev->gro_en = true;
1555 if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
1556 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
1558 if (hnae3_dev_fd_supported(hdev)) {
1560 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1563 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1565 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1566 cfg.default_speed, ret);
1570 hclge_parse_link_mode(hdev, cfg.speed_ability);
1572 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1574 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1575 (hdev->tc_max < 1)) {
1576 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1581 /* Dev does not support DCB */
1582 if (!hnae3_dev_dcb_supported(hdev)) {
1586 hdev->pfc_max = hdev->tc_max;
1589 hdev->tm_info.num_tc = 1;
1591 /* Currently not support uncontiuous tc */
1592 for (i = 0; i < hdev->tm_info.num_tc; i++)
1593 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1595 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1597 hclge_init_kdump_kernel_config(hdev);
1599 /* Set the init affinity based on pci func number */
1600 i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1601 i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1602 cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1603 &hdev->affinity_mask);
1608 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1611 struct hclge_cfg_tso_status_cmd *req;
1612 struct hclge_desc desc;
1614 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1616 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1617 req->tso_mss_min = cpu_to_le16(tso_mss_min);
1618 req->tso_mss_max = cpu_to_le16(tso_mss_max);
1620 return hclge_cmd_send(&hdev->hw, &desc, 1);
1623 static int hclge_config_gro(struct hclge_dev *hdev)
1625 struct hclge_cfg_gro_status_cmd *req;
1626 struct hclge_desc desc;
1629 if (!hnae3_dev_gro_supported(hdev))
1632 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1633 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1635 req->gro_en = hdev->gro_en ? 1 : 0;
1637 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1639 dev_err(&hdev->pdev->dev,
1640 "GRO hardware config cmd failed, ret = %d\n", ret);
1645 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1647 struct hclge_tqp *tqp;
1650 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1651 sizeof(struct hclge_tqp), GFP_KERNEL);
1657 for (i = 0; i < hdev->num_tqps; i++) {
1658 tqp->dev = &hdev->pdev->dev;
1661 tqp->q.ae_algo = &ae_algo;
1662 tqp->q.buf_size = hdev->rx_buf_len;
1663 tqp->q.tx_desc_num = hdev->num_tx_desc;
1664 tqp->q.rx_desc_num = hdev->num_rx_desc;
1666 /* need an extended offset to configure queues >=
1667 * HCLGE_TQP_MAX_SIZE_DEV_V2
1669 if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1670 tqp->q.io_base = hdev->hw.io_base +
1671 HCLGE_TQP_REG_OFFSET +
1672 i * HCLGE_TQP_REG_SIZE;
1674 tqp->q.io_base = hdev->hw.io_base +
1675 HCLGE_TQP_REG_OFFSET +
1676 HCLGE_TQP_EXT_REG_OFFSET +
1677 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1686 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1687 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1689 struct hclge_tqp_map_cmd *req;
1690 struct hclge_desc desc;
1693 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1695 req = (struct hclge_tqp_map_cmd *)desc.data;
1696 req->tqp_id = cpu_to_le16(tqp_pid);
1697 req->tqp_vf = func_id;
1698 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1700 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1701 req->tqp_vid = cpu_to_le16(tqp_vid);
1703 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1705 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1710 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1712 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1713 struct hclge_dev *hdev = vport->back;
1716 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1717 alloced < num_tqps; i++) {
1718 if (!hdev->htqp[i].alloced) {
1719 hdev->htqp[i].q.handle = &vport->nic;
1720 hdev->htqp[i].q.tqp_index = alloced;
1721 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1722 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1723 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1724 hdev->htqp[i].alloced = true;
1728 vport->alloc_tqps = alloced;
1729 kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1730 vport->alloc_tqps / hdev->tm_info.num_tc);
1732 /* ensure one to one mapping between irq and queue at default */
1733 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1734 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1739 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1740 u16 num_tx_desc, u16 num_rx_desc)
1743 struct hnae3_handle *nic = &vport->nic;
1744 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1745 struct hclge_dev *hdev = vport->back;
1748 kinfo->num_tx_desc = num_tx_desc;
1749 kinfo->num_rx_desc = num_rx_desc;
1751 kinfo->rx_buf_len = hdev->rx_buf_len;
1752 kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size;
1754 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1755 sizeof(struct hnae3_queue *), GFP_KERNEL);
1759 ret = hclge_assign_tqp(vport, num_tqps);
1761 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1766 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1767 struct hclge_vport *vport)
1769 struct hnae3_handle *nic = &vport->nic;
1770 struct hnae3_knic_private_info *kinfo;
1773 kinfo = &nic->kinfo;
1774 for (i = 0; i < vport->alloc_tqps; i++) {
1775 struct hclge_tqp *q =
1776 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1780 is_pf = !(vport->vport_id);
1781 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1790 static int hclge_map_tqp(struct hclge_dev *hdev)
1792 struct hclge_vport *vport = hdev->vport;
1795 num_vport = hdev->num_req_vfs + 1;
1796 for (i = 0; i < num_vport; i++) {
1799 ret = hclge_map_tqp_to_vport(hdev, vport);
1809 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1811 struct hnae3_handle *nic = &vport->nic;
1812 struct hclge_dev *hdev = vport->back;
1815 nic->pdev = hdev->pdev;
1816 nic->ae_algo = &ae_algo;
1817 nic->numa_node_mask = hdev->numa_node_mask;
1818 nic->kinfo.io_base = hdev->hw.io_base;
1820 ret = hclge_knic_setup(vport, num_tqps,
1821 hdev->num_tx_desc, hdev->num_rx_desc);
1823 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1828 static int hclge_alloc_vport(struct hclge_dev *hdev)
1830 struct pci_dev *pdev = hdev->pdev;
1831 struct hclge_vport *vport;
1837 /* We need to alloc a vport for main NIC of PF */
1838 num_vport = hdev->num_req_vfs + 1;
1840 if (hdev->num_tqps < num_vport) {
1841 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1842 hdev->num_tqps, num_vport);
1846 /* Alloc the same number of TQPs for every vport */
1847 tqp_per_vport = hdev->num_tqps / num_vport;
1848 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1850 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1855 hdev->vport = vport;
1856 hdev->num_alloc_vport = num_vport;
1858 if (IS_ENABLED(CONFIG_PCI_IOV))
1859 hdev->num_alloc_vfs = hdev->num_req_vfs;
1861 for (i = 0; i < num_vport; i++) {
1863 vport->vport_id = i;
1864 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1865 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1866 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1867 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1868 vport->req_vlan_fltr_en = true;
1869 INIT_LIST_HEAD(&vport->vlan_list);
1870 INIT_LIST_HEAD(&vport->uc_mac_list);
1871 INIT_LIST_HEAD(&vport->mc_mac_list);
1872 spin_lock_init(&vport->mac_list_lock);
1875 ret = hclge_vport_setup(vport, tqp_main_vport);
1877 ret = hclge_vport_setup(vport, tqp_per_vport);
1880 "vport setup failed for vport %d, %d\n",
1891 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1892 struct hclge_pkt_buf_alloc *buf_alloc)
1894 /* TX buffer size is unit by 128 byte */
1895 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1896 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1897 struct hclge_tx_buff_alloc_cmd *req;
1898 struct hclge_desc desc;
1902 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1904 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1905 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1906 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1908 req->tx_pkt_buff[i] =
1909 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1910 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1913 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1915 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1921 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1922 struct hclge_pkt_buf_alloc *buf_alloc)
1924 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1927 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1932 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1937 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1938 if (hdev->hw_tc_map & BIT(i))
1943 /* Get the number of pfc enabled TCs, which have private buffer */
1944 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1945 struct hclge_pkt_buf_alloc *buf_alloc)
1947 struct hclge_priv_buf *priv;
1951 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1952 priv = &buf_alloc->priv_buf[i];
1953 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1961 /* Get the number of pfc disabled TCs, which have private buffer */
1962 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1963 struct hclge_pkt_buf_alloc *buf_alloc)
1965 struct hclge_priv_buf *priv;
1969 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1970 priv = &buf_alloc->priv_buf[i];
1971 if (hdev->hw_tc_map & BIT(i) &&
1972 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1980 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1982 struct hclge_priv_buf *priv;
1986 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1987 priv = &buf_alloc->priv_buf[i];
1989 rx_priv += priv->buf_size;
1994 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1996 u32 i, total_tx_size = 0;
1998 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1999 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
2001 return total_tx_size;
2004 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
2005 struct hclge_pkt_buf_alloc *buf_alloc,
2008 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
2009 u32 tc_num = hclge_get_tc_num(hdev);
2010 u32 shared_buf, aligned_mps;
2014 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2016 if (hnae3_dev_dcb_supported(hdev))
2017 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
2020 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
2021 + hdev->dv_buf_size;
2023 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
2024 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
2025 HCLGE_BUF_SIZE_UNIT);
2027 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
2028 if (rx_all < rx_priv + shared_std)
2031 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
2032 buf_alloc->s_buf.buf_size = shared_buf;
2033 if (hnae3_dev_dcb_supported(hdev)) {
2034 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
2035 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
2036 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
2037 HCLGE_BUF_SIZE_UNIT);
2039 buf_alloc->s_buf.self.high = aligned_mps +
2040 HCLGE_NON_DCB_ADDITIONAL_BUF;
2041 buf_alloc->s_buf.self.low = aligned_mps;
2044 if (hnae3_dev_dcb_supported(hdev)) {
2045 hi_thrd = shared_buf - hdev->dv_buf_size;
2047 if (tc_num <= NEED_RESERVE_TC_NUM)
2048 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2052 hi_thrd = hi_thrd / tc_num;
2054 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2055 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2056 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2058 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2059 lo_thrd = aligned_mps;
2062 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2063 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2064 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2070 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2071 struct hclge_pkt_buf_alloc *buf_alloc)
2075 total_size = hdev->pkt_buf_size;
2077 /* alloc tx buffer for all enabled tc */
2078 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2079 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2081 if (hdev->hw_tc_map & BIT(i)) {
2082 if (total_size < hdev->tx_buf_size)
2085 priv->tx_buf_size = hdev->tx_buf_size;
2087 priv->tx_buf_size = 0;
2090 total_size -= priv->tx_buf_size;
2096 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2097 struct hclge_pkt_buf_alloc *buf_alloc)
2099 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2100 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2103 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2104 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2111 if (!(hdev->hw_tc_map & BIT(i)))
2116 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2117 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2118 priv->wl.high = roundup(priv->wl.low + aligned_mps,
2119 HCLGE_BUF_SIZE_UNIT);
2122 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2126 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2129 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2132 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2133 struct hclge_pkt_buf_alloc *buf_alloc)
2135 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2136 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2139 /* let the last to be cleared first */
2140 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2141 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2142 unsigned int mask = BIT((unsigned int)i);
2144 if (hdev->hw_tc_map & mask &&
2145 !(hdev->tm_info.hw_pfc_map & mask)) {
2146 /* Clear the no pfc TC private buffer */
2154 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2155 no_pfc_priv_num == 0)
2159 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2162 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2163 struct hclge_pkt_buf_alloc *buf_alloc)
2165 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2166 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2169 /* let the last to be cleared first */
2170 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2171 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2172 unsigned int mask = BIT((unsigned int)i);
2174 if (hdev->hw_tc_map & mask &&
2175 hdev->tm_info.hw_pfc_map & mask) {
2176 /* Reduce the number of pfc TC with private buffer */
2184 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2189 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2192 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2193 struct hclge_pkt_buf_alloc *buf_alloc)
2195 #define COMPENSATE_BUFFER 0x3C00
2196 #define COMPENSATE_HALF_MPS_NUM 5
2197 #define PRIV_WL_GAP 0x1800
2199 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2200 u32 tc_num = hclge_get_tc_num(hdev);
2201 u32 half_mps = hdev->mps >> 1;
2206 rx_priv = rx_priv / tc_num;
2208 if (tc_num <= NEED_RESERVE_TC_NUM)
2209 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2211 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2212 COMPENSATE_HALF_MPS_NUM * half_mps;
2213 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2214 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2215 if (rx_priv < min_rx_priv)
2218 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2219 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2226 if (!(hdev->hw_tc_map & BIT(i)))
2230 priv->buf_size = rx_priv;
2231 priv->wl.high = rx_priv - hdev->dv_buf_size;
2232 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2235 buf_alloc->s_buf.buf_size = 0;
2240 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2241 * @hdev: pointer to struct hclge_dev
2242 * @buf_alloc: pointer to buffer calculation data
2243 * @return: 0: calculate successful, negative: fail
2245 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2246 struct hclge_pkt_buf_alloc *buf_alloc)
2248 /* When DCB is not supported, rx private buffer is not allocated. */
2249 if (!hnae3_dev_dcb_supported(hdev)) {
2250 u32 rx_all = hdev->pkt_buf_size;
2252 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2253 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2259 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2262 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2265 /* try to decrease the buffer size */
2266 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2269 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2272 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2278 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2279 struct hclge_pkt_buf_alloc *buf_alloc)
2281 struct hclge_rx_priv_buff_cmd *req;
2282 struct hclge_desc desc;
2286 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2287 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2289 /* Alloc private buffer TCs */
2290 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2291 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2294 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2296 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2300 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2301 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2303 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2305 dev_err(&hdev->pdev->dev,
2306 "rx private buffer alloc cmd failed %d\n", ret);
2311 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2312 struct hclge_pkt_buf_alloc *buf_alloc)
2314 struct hclge_rx_priv_wl_buf *req;
2315 struct hclge_priv_buf *priv;
2316 struct hclge_desc desc[2];
2320 for (i = 0; i < 2; i++) {
2321 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2323 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2325 /* The first descriptor set the NEXT bit to 1 */
2327 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2329 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2331 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2332 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2334 priv = &buf_alloc->priv_buf[idx];
2335 req->tc_wl[j].high =
2336 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2337 req->tc_wl[j].high |=
2338 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2340 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2341 req->tc_wl[j].low |=
2342 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2346 /* Send 2 descriptor at one time */
2347 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2349 dev_err(&hdev->pdev->dev,
2350 "rx private waterline config cmd failed %d\n",
2355 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2356 struct hclge_pkt_buf_alloc *buf_alloc)
2358 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2359 struct hclge_rx_com_thrd *req;
2360 struct hclge_desc desc[2];
2361 struct hclge_tc_thrd *tc;
2365 for (i = 0; i < 2; i++) {
2366 hclge_cmd_setup_basic_desc(&desc[i],
2367 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2368 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2370 /* The first descriptor set the NEXT bit to 1 */
2372 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2374 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2376 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2377 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2379 req->com_thrd[j].high =
2380 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2381 req->com_thrd[j].high |=
2382 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2383 req->com_thrd[j].low =
2384 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2385 req->com_thrd[j].low |=
2386 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2390 /* Send 2 descriptors at one time */
2391 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2393 dev_err(&hdev->pdev->dev,
2394 "common threshold config cmd failed %d\n", ret);
2398 static int hclge_common_wl_config(struct hclge_dev *hdev,
2399 struct hclge_pkt_buf_alloc *buf_alloc)
2401 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2402 struct hclge_rx_com_wl *req;
2403 struct hclge_desc desc;
2406 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2408 req = (struct hclge_rx_com_wl *)desc.data;
2409 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2410 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2412 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2413 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2415 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2417 dev_err(&hdev->pdev->dev,
2418 "common waterline config cmd failed %d\n", ret);
2423 int hclge_buffer_alloc(struct hclge_dev *hdev)
2425 struct hclge_pkt_buf_alloc *pkt_buf;
2428 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2432 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2434 dev_err(&hdev->pdev->dev,
2435 "could not calc tx buffer size for all TCs %d\n", ret);
2439 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2441 dev_err(&hdev->pdev->dev,
2442 "could not alloc tx buffers %d\n", ret);
2446 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2448 dev_err(&hdev->pdev->dev,
2449 "could not calc rx priv buffer size for all TCs %d\n",
2454 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2456 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2461 if (hnae3_dev_dcb_supported(hdev)) {
2462 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2464 dev_err(&hdev->pdev->dev,
2465 "could not configure rx private waterline %d\n",
2470 ret = hclge_common_thrd_config(hdev, pkt_buf);
2472 dev_err(&hdev->pdev->dev,
2473 "could not configure common threshold %d\n",
2479 ret = hclge_common_wl_config(hdev, pkt_buf);
2481 dev_err(&hdev->pdev->dev,
2482 "could not configure common waterline %d\n", ret);
2489 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2491 struct hnae3_handle *roce = &vport->roce;
2492 struct hnae3_handle *nic = &vport->nic;
2493 struct hclge_dev *hdev = vport->back;
2495 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2497 if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2500 roce->rinfo.base_vector = hdev->roce_base_vector;
2502 roce->rinfo.netdev = nic->kinfo.netdev;
2503 roce->rinfo.roce_io_base = hdev->hw.io_base;
2504 roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2506 roce->pdev = nic->pdev;
2507 roce->ae_algo = nic->ae_algo;
2508 roce->numa_node_mask = nic->numa_node_mask;
2513 static int hclge_init_msi(struct hclge_dev *hdev)
2515 struct pci_dev *pdev = hdev->pdev;
2519 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2521 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2524 "failed(%d) to allocate MSI/MSI-X vectors\n",
2528 if (vectors < hdev->num_msi)
2529 dev_warn(&hdev->pdev->dev,
2530 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2531 hdev->num_msi, vectors);
2533 hdev->num_msi = vectors;
2534 hdev->num_msi_left = vectors;
2536 hdev->base_msi_vector = pdev->irq;
2537 hdev->roce_base_vector = hdev->base_msi_vector +
2540 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2541 sizeof(u16), GFP_KERNEL);
2542 if (!hdev->vector_status) {
2543 pci_free_irq_vectors(pdev);
2547 for (i = 0; i < hdev->num_msi; i++)
2548 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2550 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2551 sizeof(int), GFP_KERNEL);
2552 if (!hdev->vector_irq) {
2553 pci_free_irq_vectors(pdev);
2560 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2562 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2563 duplex = HCLGE_MAC_FULL;
2568 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2571 struct hclge_config_mac_speed_dup_cmd *req;
2572 struct hclge_desc desc;
2575 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2577 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2580 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2583 case HCLGE_MAC_SPEED_10M:
2584 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2585 HCLGE_CFG_SPEED_S, 6);
2587 case HCLGE_MAC_SPEED_100M:
2588 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2589 HCLGE_CFG_SPEED_S, 7);
2591 case HCLGE_MAC_SPEED_1G:
2592 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2593 HCLGE_CFG_SPEED_S, 0);
2595 case HCLGE_MAC_SPEED_10G:
2596 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2597 HCLGE_CFG_SPEED_S, 1);
2599 case HCLGE_MAC_SPEED_25G:
2600 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2601 HCLGE_CFG_SPEED_S, 2);
2603 case HCLGE_MAC_SPEED_40G:
2604 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2605 HCLGE_CFG_SPEED_S, 3);
2607 case HCLGE_MAC_SPEED_50G:
2608 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2609 HCLGE_CFG_SPEED_S, 4);
2611 case HCLGE_MAC_SPEED_100G:
2612 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2613 HCLGE_CFG_SPEED_S, 5);
2615 case HCLGE_MAC_SPEED_200G:
2616 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2617 HCLGE_CFG_SPEED_S, 8);
2620 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2624 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2627 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2629 dev_err(&hdev->pdev->dev,
2630 "mac speed/duplex config cmd failed %d.\n", ret);
2637 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2639 struct hclge_mac *mac = &hdev->hw.mac;
2642 duplex = hclge_check_speed_dup(duplex, speed);
2643 if (!mac->support_autoneg && mac->speed == speed &&
2644 mac->duplex == duplex)
2647 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2651 hdev->hw.mac.speed = speed;
2652 hdev->hw.mac.duplex = duplex;
2657 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2660 struct hclge_vport *vport = hclge_get_vport(handle);
2661 struct hclge_dev *hdev = vport->back;
2663 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2666 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2668 struct hclge_config_auto_neg_cmd *req;
2669 struct hclge_desc desc;
2673 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2675 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2677 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2678 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2680 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2682 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2688 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2690 struct hclge_vport *vport = hclge_get_vport(handle);
2691 struct hclge_dev *hdev = vport->back;
2693 if (!hdev->hw.mac.support_autoneg) {
2695 dev_err(&hdev->pdev->dev,
2696 "autoneg is not supported by current port\n");
2703 return hclge_set_autoneg_en(hdev, enable);
2706 static int hclge_get_autoneg(struct hnae3_handle *handle)
2708 struct hclge_vport *vport = hclge_get_vport(handle);
2709 struct hclge_dev *hdev = vport->back;
2710 struct phy_device *phydev = hdev->hw.mac.phydev;
2713 return phydev->autoneg;
2715 return hdev->hw.mac.autoneg;
2718 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2720 struct hclge_vport *vport = hclge_get_vport(handle);
2721 struct hclge_dev *hdev = vport->back;
2724 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2726 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2729 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2732 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2734 struct hclge_vport *vport = hclge_get_vport(handle);
2735 struct hclge_dev *hdev = vport->back;
2737 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2738 return hclge_set_autoneg_en(hdev, !halt);
2743 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2745 struct hclge_config_fec_cmd *req;
2746 struct hclge_desc desc;
2749 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2751 req = (struct hclge_config_fec_cmd *)desc.data;
2752 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2753 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2754 if (fec_mode & BIT(HNAE3_FEC_RS))
2755 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2756 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2757 if (fec_mode & BIT(HNAE3_FEC_BASER))
2758 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2759 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2761 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2763 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2768 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2770 struct hclge_vport *vport = hclge_get_vport(handle);
2771 struct hclge_dev *hdev = vport->back;
2772 struct hclge_mac *mac = &hdev->hw.mac;
2775 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2776 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2780 ret = hclge_set_fec_hw(hdev, fec_mode);
2784 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2788 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2791 struct hclge_vport *vport = hclge_get_vport(handle);
2792 struct hclge_dev *hdev = vport->back;
2793 struct hclge_mac *mac = &hdev->hw.mac;
2796 *fec_ability = mac->fec_ability;
2798 *fec_mode = mac->fec_mode;
2801 static int hclge_mac_init(struct hclge_dev *hdev)
2803 struct hclge_mac *mac = &hdev->hw.mac;
2806 hdev->support_sfp_query = true;
2807 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2808 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2809 hdev->hw.mac.duplex);
2813 if (hdev->hw.mac.support_autoneg) {
2814 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2821 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2822 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2827 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2829 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2833 ret = hclge_set_default_loopback(hdev);
2837 ret = hclge_buffer_alloc(hdev);
2839 dev_err(&hdev->pdev->dev,
2840 "allocate buffer fail, ret=%d\n", ret);
2845 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2847 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2848 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2849 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2850 hclge_wq, &hdev->service_task, 0);
2853 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2855 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2856 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2857 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2858 hclge_wq, &hdev->service_task, 0);
2861 static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
2863 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2864 !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
2865 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2866 hclge_wq, &hdev->service_task, 0);
2869 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2871 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2872 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2873 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2874 hclge_wq, &hdev->service_task,
2878 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2880 struct hclge_link_status_cmd *req;
2881 struct hclge_desc desc;
2884 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2885 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2887 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2892 req = (struct hclge_link_status_cmd *)desc.data;
2893 *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2894 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2899 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2901 struct phy_device *phydev = hdev->hw.mac.phydev;
2903 *link_status = HCLGE_LINK_STATUS_DOWN;
2905 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2908 if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2911 return hclge_get_mac_link_status(hdev, link_status);
2914 static void hclge_push_link_status(struct hclge_dev *hdev)
2916 struct hclge_vport *vport;
2920 for (i = 0; i < pci_num_vf(hdev->pdev); i++) {
2921 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
2923 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) ||
2924 vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO)
2927 ret = hclge_push_vf_link_status(vport);
2929 dev_err(&hdev->pdev->dev,
2930 "failed to push link status to vf%u, ret = %d\n",
2936 static void hclge_update_link_status(struct hclge_dev *hdev)
2938 struct hnae3_handle *rhandle = &hdev->vport[0].roce;
2939 struct hnae3_handle *handle = &hdev->vport[0].nic;
2940 struct hnae3_client *rclient = hdev->roce_client;
2941 struct hnae3_client *client = hdev->nic_client;
2948 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2951 ret = hclge_get_mac_phy_link(hdev, &state);
2953 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2957 if (state != hdev->hw.mac.link) {
2958 hdev->hw.mac.link = state;
2959 client->ops->link_status_change(handle, state);
2960 hclge_config_mac_tnl_int(hdev, state);
2961 if (rclient && rclient->ops->link_status_change)
2962 rclient->ops->link_status_change(rhandle, state);
2964 hclge_push_link_status(hdev);
2967 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2970 static void hclge_update_port_capability(struct hclge_dev *hdev,
2971 struct hclge_mac *mac)
2973 if (hnae3_dev_fec_supported(hdev))
2974 /* update fec ability by speed */
2975 hclge_convert_setting_fec(mac);
2977 /* firmware can not identify back plane type, the media type
2978 * read from configuration can help deal it
2980 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2981 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2982 mac->module_type = HNAE3_MODULE_TYPE_KR;
2983 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2984 mac->module_type = HNAE3_MODULE_TYPE_TP;
2986 if (mac->support_autoneg) {
2987 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2988 linkmode_copy(mac->advertising, mac->supported);
2990 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2992 linkmode_zero(mac->advertising);
2996 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2998 struct hclge_sfp_info_cmd *resp;
2999 struct hclge_desc desc;
3002 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3003 resp = (struct hclge_sfp_info_cmd *)desc.data;
3004 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3005 if (ret == -EOPNOTSUPP) {
3006 dev_warn(&hdev->pdev->dev,
3007 "IMP do not support get SFP speed %d\n", ret);
3010 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
3014 *speed = le32_to_cpu(resp->speed);
3019 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
3021 struct hclge_sfp_info_cmd *resp;
3022 struct hclge_desc desc;
3025 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3026 resp = (struct hclge_sfp_info_cmd *)desc.data;
3028 resp->query_type = QUERY_ACTIVE_SPEED;
3030 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3031 if (ret == -EOPNOTSUPP) {
3032 dev_warn(&hdev->pdev->dev,
3033 "IMP does not support get SFP info %d\n", ret);
3036 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
3040 /* In some case, mac speed get from IMP may be 0, it shouldn't be
3041 * set to mac->speed.
3043 if (!le32_to_cpu(resp->speed))
3046 mac->speed = le32_to_cpu(resp->speed);
3047 /* if resp->speed_ability is 0, it means it's an old version
3048 * firmware, do not update these params
3050 if (resp->speed_ability) {
3051 mac->module_type = le32_to_cpu(resp->module_type);
3052 mac->speed_ability = le32_to_cpu(resp->speed_ability);
3053 mac->autoneg = resp->autoneg;
3054 mac->support_autoneg = resp->autoneg_ability;
3055 mac->speed_type = QUERY_ACTIVE_SPEED;
3056 if (!resp->active_fec)
3059 mac->fec_mode = BIT(resp->active_fec);
3061 mac->speed_type = QUERY_SFP_SPEED;
3067 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
3068 struct ethtool_link_ksettings *cmd)
3070 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3071 struct hclge_vport *vport = hclge_get_vport(handle);
3072 struct hclge_phy_link_ksetting_0_cmd *req0;
3073 struct hclge_phy_link_ksetting_1_cmd *req1;
3074 u32 supported, advertising, lp_advertising;
3075 struct hclge_dev *hdev = vport->back;
3078 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3080 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3081 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3084 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3086 dev_err(&hdev->pdev->dev,
3087 "failed to get phy link ksetting, ret = %d.\n", ret);
3091 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3092 cmd->base.autoneg = req0->autoneg;
3093 cmd->base.speed = le32_to_cpu(req0->speed);
3094 cmd->base.duplex = req0->duplex;
3095 cmd->base.port = req0->port;
3096 cmd->base.transceiver = req0->transceiver;
3097 cmd->base.phy_address = req0->phy_address;
3098 cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3099 cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3100 supported = le32_to_cpu(req0->supported);
3101 advertising = le32_to_cpu(req0->advertising);
3102 lp_advertising = le32_to_cpu(req0->lp_advertising);
3103 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3105 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3107 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3110 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3111 cmd->base.master_slave_cfg = req1->master_slave_cfg;
3112 cmd->base.master_slave_state = req1->master_slave_state;
3118 hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3119 const struct ethtool_link_ksettings *cmd)
3121 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3122 struct hclge_vport *vport = hclge_get_vport(handle);
3123 struct hclge_phy_link_ksetting_0_cmd *req0;
3124 struct hclge_phy_link_ksetting_1_cmd *req1;
3125 struct hclge_dev *hdev = vport->back;
3129 if (cmd->base.autoneg == AUTONEG_DISABLE &&
3130 ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3131 (cmd->base.duplex != DUPLEX_HALF &&
3132 cmd->base.duplex != DUPLEX_FULL)))
3135 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3137 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3138 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3141 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3142 req0->autoneg = cmd->base.autoneg;
3143 req0->speed = cpu_to_le32(cmd->base.speed);
3144 req0->duplex = cmd->base.duplex;
3145 ethtool_convert_link_mode_to_legacy_u32(&advertising,
3146 cmd->link_modes.advertising);
3147 req0->advertising = cpu_to_le32(advertising);
3148 req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3150 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3151 req1->master_slave_cfg = cmd->base.master_slave_cfg;
3153 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3155 dev_err(&hdev->pdev->dev,
3156 "failed to set phy link ksettings, ret = %d.\n", ret);
3160 hdev->hw.mac.autoneg = cmd->base.autoneg;
3161 hdev->hw.mac.speed = cmd->base.speed;
3162 hdev->hw.mac.duplex = cmd->base.duplex;
3163 linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3168 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3170 struct ethtool_link_ksettings cmd;
3173 if (!hnae3_dev_phy_imp_supported(hdev))
3176 ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3180 hdev->hw.mac.autoneg = cmd.base.autoneg;
3181 hdev->hw.mac.speed = cmd.base.speed;
3182 hdev->hw.mac.duplex = cmd.base.duplex;
3187 static int hclge_tp_port_init(struct hclge_dev *hdev)
3189 struct ethtool_link_ksettings cmd;
3191 if (!hnae3_dev_phy_imp_supported(hdev))
3194 cmd.base.autoneg = hdev->hw.mac.autoneg;
3195 cmd.base.speed = hdev->hw.mac.speed;
3196 cmd.base.duplex = hdev->hw.mac.duplex;
3197 linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3199 return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3202 static int hclge_update_port_info(struct hclge_dev *hdev)
3204 struct hclge_mac *mac = &hdev->hw.mac;
3205 int speed = HCLGE_MAC_SPEED_UNKNOWN;
3208 /* get the port info from SFP cmd if not copper port */
3209 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3210 return hclge_update_tp_port_info(hdev);
3212 /* if IMP does not support get SFP/qSFP info, return directly */
3213 if (!hdev->support_sfp_query)
3216 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3217 ret = hclge_get_sfp_info(hdev, mac);
3219 ret = hclge_get_sfp_speed(hdev, &speed);
3221 if (ret == -EOPNOTSUPP) {
3222 hdev->support_sfp_query = false;
3228 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3229 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3230 hclge_update_port_capability(hdev, mac);
3233 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3236 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3237 return 0; /* do nothing if no SFP */
3239 /* must config full duplex for SFP */
3240 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3244 static int hclge_get_status(struct hnae3_handle *handle)
3246 struct hclge_vport *vport = hclge_get_vport(handle);
3247 struct hclge_dev *hdev = vport->back;
3249 hclge_update_link_status(hdev);
3251 return hdev->hw.mac.link;
3254 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3256 if (!pci_num_vf(hdev->pdev)) {
3257 dev_err(&hdev->pdev->dev,
3258 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3262 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3263 dev_err(&hdev->pdev->dev,
3264 "vf id(%d) is out of range(0 <= vfid < %d)\n",
3265 vf, pci_num_vf(hdev->pdev));
3269 /* VF start from 1 in vport */
3270 vf += HCLGE_VF_VPORT_START_NUM;
3271 return &hdev->vport[vf];
3274 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3275 struct ifla_vf_info *ivf)
3277 struct hclge_vport *vport = hclge_get_vport(handle);
3278 struct hclge_dev *hdev = vport->back;
3280 vport = hclge_get_vf_vport(hdev, vf);
3285 ivf->linkstate = vport->vf_info.link_state;
3286 ivf->spoofchk = vport->vf_info.spoofchk;
3287 ivf->trusted = vport->vf_info.trusted;
3288 ivf->min_tx_rate = 0;
3289 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3290 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3291 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3292 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3293 ether_addr_copy(ivf->mac, vport->vf_info.mac);
3298 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3301 struct hclge_vport *vport = hclge_get_vport(handle);
3302 struct hclge_dev *hdev = vport->back;
3306 vport = hclge_get_vf_vport(hdev, vf);
3310 link_state_old = vport->vf_info.link_state;
3311 vport->vf_info.link_state = link_state;
3313 ret = hclge_push_vf_link_status(vport);
3315 vport->vf_info.link_state = link_state_old;
3316 dev_err(&hdev->pdev->dev,
3317 "failed to push vf%d link status, ret = %d\n", vf, ret);
3323 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3325 u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
3327 /* fetch the events from their corresponding regs */
3328 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3329 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3330 hw_err_src_reg = hclge_read_dev(&hdev->hw,
3331 HCLGE_RAS_PF_OTHER_INT_STS_REG);
3333 /* Assumption: If by any chance reset and mailbox events are reported
3334 * together then we will only process reset event in this go and will
3335 * defer the processing of the mailbox events. Since, we would have not
3336 * cleared RX CMDQ event this time we would receive again another
3337 * interrupt from H/W just for the mailbox.
3339 * check for vector0 reset event sources
3341 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3342 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3343 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3344 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3345 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3346 hdev->rst_stats.imp_rst_cnt++;
3347 return HCLGE_VECTOR0_EVENT_RST;
3350 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3351 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3352 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3353 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3354 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3355 hdev->rst_stats.global_rst_cnt++;
3356 return HCLGE_VECTOR0_EVENT_RST;
3359 /* check for vector0 msix event and hardware error event source */
3360 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK ||
3361 hw_err_src_reg & HCLGE_RAS_REG_ERR_MASK)
3362 return HCLGE_VECTOR0_EVENT_ERR;
3364 /* check for vector0 ptp event source */
3365 if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B) & msix_src_reg) {
3366 *clearval = msix_src_reg;
3367 return HCLGE_VECTOR0_EVENT_PTP;
3370 /* check for vector0 mailbox(=CMDQ RX) event source */
3371 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3372 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3373 *clearval = cmdq_src_reg;
3374 return HCLGE_VECTOR0_EVENT_MBX;
3377 /* print other vector0 event source */
3378 dev_info(&hdev->pdev->dev,
3379 "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n",
3380 cmdq_src_reg, hw_err_src_reg, msix_src_reg);
3382 return HCLGE_VECTOR0_EVENT_OTHER;
3385 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3388 switch (event_type) {
3389 case HCLGE_VECTOR0_EVENT_PTP:
3390 case HCLGE_VECTOR0_EVENT_RST:
3391 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3393 case HCLGE_VECTOR0_EVENT_MBX:
3394 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3401 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3403 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3404 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3405 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3406 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3407 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3410 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3412 writel(enable ? 1 : 0, vector->addr);
3415 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3417 struct hclge_dev *hdev = data;
3418 unsigned long flags;
3422 hclge_enable_vector(&hdev->misc_vector, false);
3423 event_cause = hclge_check_event_cause(hdev, &clearval);
3425 /* vector 0 interrupt is shared with reset and mailbox source events.*/
3426 switch (event_cause) {
3427 case HCLGE_VECTOR0_EVENT_ERR:
3428 hclge_errhand_task_schedule(hdev);
3430 case HCLGE_VECTOR0_EVENT_RST:
3431 hclge_reset_task_schedule(hdev);
3433 case HCLGE_VECTOR0_EVENT_PTP:
3434 spin_lock_irqsave(&hdev->ptp->lock, flags);
3435 hclge_ptp_clean_tx_hwts(hdev);
3436 spin_unlock_irqrestore(&hdev->ptp->lock, flags);
3438 case HCLGE_VECTOR0_EVENT_MBX:
3439 /* If we are here then,
3440 * 1. Either we are not handling any mbx task and we are not
3443 * 2. We could be handling a mbx task but nothing more is
3445 * In both cases, we should schedule mbx task as there are more
3446 * mbx messages reported by this interrupt.
3448 hclge_mbx_task_schedule(hdev);
3451 dev_warn(&hdev->pdev->dev,
3452 "received unknown or unhandled event of vector0\n");
3456 hclge_clear_event_cause(hdev, event_cause, clearval);
3458 /* Enable interrupt if it is not caused by reset event or error event */
3459 if (event_cause == HCLGE_VECTOR0_EVENT_PTP ||
3460 event_cause == HCLGE_VECTOR0_EVENT_MBX ||
3461 event_cause == HCLGE_VECTOR0_EVENT_OTHER)
3462 hclge_enable_vector(&hdev->misc_vector, true);
3467 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3469 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3470 dev_warn(&hdev->pdev->dev,
3471 "vector(vector_id %d) has been freed.\n", vector_id);
3475 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3476 hdev->num_msi_left += 1;
3477 hdev->num_msi_used -= 1;
3480 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3482 struct hclge_misc_vector *vector = &hdev->misc_vector;
3484 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3486 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3487 hdev->vector_status[0] = 0;
3489 hdev->num_msi_left -= 1;
3490 hdev->num_msi_used += 1;
3493 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3494 const cpumask_t *mask)
3496 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3499 cpumask_copy(&hdev->affinity_mask, mask);
3502 static void hclge_irq_affinity_release(struct kref *ref)
3506 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3508 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3509 &hdev->affinity_mask);
3511 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3512 hdev->affinity_notify.release = hclge_irq_affinity_release;
3513 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3514 &hdev->affinity_notify);
3517 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3519 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3520 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3523 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3527 hclge_get_misc_vector(hdev);
3529 /* this would be explicitly freed in the end */
3530 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3531 HCLGE_NAME, pci_name(hdev->pdev));
3532 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3533 0, hdev->misc_vector.name, hdev);
3535 hclge_free_vector(hdev, 0);
3536 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3537 hdev->misc_vector.vector_irq);
3543 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3545 free_irq(hdev->misc_vector.vector_irq, hdev);
3546 hclge_free_vector(hdev, 0);
3549 int hclge_notify_client(struct hclge_dev *hdev,
3550 enum hnae3_reset_notify_type type)
3552 struct hnae3_handle *handle = &hdev->vport[0].nic;
3553 struct hnae3_client *client = hdev->nic_client;
3556 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3559 if (!client->ops->reset_notify)
3562 ret = client->ops->reset_notify(handle, type);
3564 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
3570 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3571 enum hnae3_reset_notify_type type)
3573 struct hnae3_handle *handle = &hdev->vport[0].roce;
3574 struct hnae3_client *client = hdev->roce_client;
3577 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3580 if (!client->ops->reset_notify)
3583 ret = client->ops->reset_notify(handle, type);
3585 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
3591 static int hclge_reset_wait(struct hclge_dev *hdev)
3593 #define HCLGE_RESET_WATI_MS 100
3594 #define HCLGE_RESET_WAIT_CNT 350
3596 u32 val, reg, reg_bit;
3599 switch (hdev->reset_type) {
3600 case HNAE3_IMP_RESET:
3601 reg = HCLGE_GLOBAL_RESET_REG;
3602 reg_bit = HCLGE_IMP_RESET_BIT;
3604 case HNAE3_GLOBAL_RESET:
3605 reg = HCLGE_GLOBAL_RESET_REG;
3606 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3608 case HNAE3_FUNC_RESET:
3609 reg = HCLGE_FUN_RST_ING;
3610 reg_bit = HCLGE_FUN_RST_ING_B;
3613 dev_err(&hdev->pdev->dev,
3614 "Wait for unsupported reset type: %d\n",
3619 val = hclge_read_dev(&hdev->hw, reg);
3620 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3621 msleep(HCLGE_RESET_WATI_MS);
3622 val = hclge_read_dev(&hdev->hw, reg);
3626 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3627 dev_warn(&hdev->pdev->dev,
3628 "Wait for reset timeout: %d\n", hdev->reset_type);
3635 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3637 struct hclge_vf_rst_cmd *req;
3638 struct hclge_desc desc;
3640 req = (struct hclge_vf_rst_cmd *)desc.data;
3641 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3642 req->dest_vfid = func_id;
3647 return hclge_cmd_send(&hdev->hw, &desc, 1);
3650 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3654 for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) {
3655 struct hclge_vport *vport = &hdev->vport[i];
3658 /* Send cmd to set/clear VF's FUNC_RST_ING */
3659 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3661 dev_err(&hdev->pdev->dev,
3662 "set vf(%u) rst failed %d!\n",
3663 vport->vport_id, ret);
3667 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3670 /* Inform VF to process the reset.
3671 * hclge_inform_reset_assert_to_vf may fail if VF
3672 * driver is not loaded.
3674 ret = hclge_inform_reset_assert_to_vf(vport);
3676 dev_warn(&hdev->pdev->dev,
3677 "inform reset to vf(%u) failed %d!\n",
3678 vport->vport_id, ret);
3684 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3686 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3687 test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3688 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3691 hclge_mbx_handler(hdev);
3693 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3696 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3698 struct hclge_pf_rst_sync_cmd *req;
3699 struct hclge_desc desc;
3703 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3704 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3707 /* vf need to down netdev by mbx during PF or FLR reset */
3708 hclge_mailbox_service_task(hdev);
3710 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3711 /* for compatible with old firmware, wait
3712 * 100 ms for VF to stop IO
3714 if (ret == -EOPNOTSUPP) {
3715 msleep(HCLGE_RESET_SYNC_TIME);
3718 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3721 } else if (req->all_vf_ready) {
3724 msleep(HCLGE_PF_RESET_SYNC_TIME);
3725 hclge_cmd_reuse_desc(&desc, true);
3726 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3728 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3731 void hclge_report_hw_error(struct hclge_dev *hdev,
3732 enum hnae3_hw_error_type type)
3734 struct hnae3_client *client = hdev->nic_client;
3736 if (!client || !client->ops->process_hw_error ||
3737 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3740 client->ops->process_hw_error(&hdev->vport[0].nic, type);
3743 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3747 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3748 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3749 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3750 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3751 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3754 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3755 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3756 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3757 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3761 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3763 struct hclge_desc desc;
3764 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3767 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3768 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3769 req->fun_reset_vfid = func_id;
3771 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3773 dev_err(&hdev->pdev->dev,
3774 "send function reset cmd fail, status =%d\n", ret);
3779 static void hclge_do_reset(struct hclge_dev *hdev)
3781 struct hnae3_handle *handle = &hdev->vport[0].nic;
3782 struct pci_dev *pdev = hdev->pdev;
3785 if (hclge_get_hw_reset_stat(handle)) {
3786 dev_info(&pdev->dev, "hardware reset not finish\n");
3787 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3788 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3789 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3793 switch (hdev->reset_type) {
3794 case HNAE3_IMP_RESET:
3795 dev_info(&pdev->dev, "IMP reset requested\n");
3796 val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3797 hnae3_set_bit(val, HCLGE_TRIGGER_IMP_RESET_B, 1);
3798 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val);
3800 case HNAE3_GLOBAL_RESET:
3801 dev_info(&pdev->dev, "global reset requested\n");
3802 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3803 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3804 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3806 case HNAE3_FUNC_RESET:
3807 dev_info(&pdev->dev, "PF reset requested\n");
3808 /* schedule again to check later */
3809 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3810 hclge_reset_task_schedule(hdev);
3813 dev_warn(&pdev->dev,
3814 "unsupported reset type: %d\n", hdev->reset_type);
3819 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3820 unsigned long *addr)
3822 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3823 struct hclge_dev *hdev = ae_dev->priv;
3825 /* return the highest priority reset level amongst all */
3826 if (test_bit(HNAE3_IMP_RESET, addr)) {
3827 rst_level = HNAE3_IMP_RESET;
3828 clear_bit(HNAE3_IMP_RESET, addr);
3829 clear_bit(HNAE3_GLOBAL_RESET, addr);
3830 clear_bit(HNAE3_FUNC_RESET, addr);
3831 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3832 rst_level = HNAE3_GLOBAL_RESET;
3833 clear_bit(HNAE3_GLOBAL_RESET, addr);
3834 clear_bit(HNAE3_FUNC_RESET, addr);
3835 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3836 rst_level = HNAE3_FUNC_RESET;
3837 clear_bit(HNAE3_FUNC_RESET, addr);
3838 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3839 rst_level = HNAE3_FLR_RESET;
3840 clear_bit(HNAE3_FLR_RESET, addr);
3843 if (hdev->reset_type != HNAE3_NONE_RESET &&
3844 rst_level < hdev->reset_type)
3845 return HNAE3_NONE_RESET;
3850 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3854 switch (hdev->reset_type) {
3855 case HNAE3_IMP_RESET:
3856 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3858 case HNAE3_GLOBAL_RESET:
3859 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3868 /* For revision 0x20, the reset interrupt source
3869 * can only be cleared after hardware reset done
3871 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3872 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3875 hclge_enable_vector(&hdev->misc_vector, true);
3878 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3882 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3884 reg_val |= HCLGE_NIC_SW_RST_RDY;
3886 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3888 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3891 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3895 ret = hclge_set_all_vf_rst(hdev, true);
3899 hclge_func_reset_sync_vf(hdev);
3904 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3909 switch (hdev->reset_type) {
3910 case HNAE3_FUNC_RESET:
3911 ret = hclge_func_reset_notify_vf(hdev);
3915 ret = hclge_func_reset_cmd(hdev, 0);
3917 dev_err(&hdev->pdev->dev,
3918 "asserting function reset fail %d!\n", ret);
3922 /* After performaning pf reset, it is not necessary to do the
3923 * mailbox handling or send any command to firmware, because
3924 * any mailbox handling or command to firmware is only valid
3925 * after hclge_cmd_init is called.
3927 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3928 hdev->rst_stats.pf_rst_cnt++;
3930 case HNAE3_FLR_RESET:
3931 ret = hclge_func_reset_notify_vf(hdev);
3935 case HNAE3_IMP_RESET:
3936 hclge_handle_imp_error(hdev);
3937 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3938 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3939 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3945 /* inform hardware that preparatory work is done */
3946 msleep(HCLGE_RESET_SYNC_TIME);
3947 hclge_reset_handshake(hdev, true);
3948 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3953 static void hclge_show_rst_info(struct hclge_dev *hdev)
3957 buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL);
3961 hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN);
3963 dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf);
3968 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3970 #define MAX_RESET_FAIL_CNT 5
3972 if (hdev->reset_pending) {
3973 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3974 hdev->reset_pending);
3976 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3977 HCLGE_RESET_INT_M) {
3978 dev_info(&hdev->pdev->dev,
3979 "reset failed because new reset interrupt\n");
3980 hclge_clear_reset_cause(hdev);
3982 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3983 hdev->rst_stats.reset_fail_cnt++;
3984 set_bit(hdev->reset_type, &hdev->reset_pending);
3985 dev_info(&hdev->pdev->dev,
3986 "re-schedule reset task(%u)\n",
3987 hdev->rst_stats.reset_fail_cnt);
3991 hclge_clear_reset_cause(hdev);
3993 /* recover the handshake status when reset fail */
3994 hclge_reset_handshake(hdev, true);
3996 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3998 hclge_show_rst_info(hdev);
4000 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4005 static void hclge_update_reset_level(struct hclge_dev *hdev)
4007 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4008 enum hnae3_reset_type reset_level;
4010 /* reset request will not be set during reset, so clear
4011 * pending reset request to avoid unnecessary reset
4012 * caused by the same reason.
4014 hclge_get_reset_level(ae_dev, &hdev->reset_request);
4016 /* if default_reset_request has a higher level reset request,
4017 * it should be handled as soon as possible. since some errors
4018 * need this kind of reset to fix.
4020 reset_level = hclge_get_reset_level(ae_dev,
4021 &hdev->default_reset_request);
4022 if (reset_level != HNAE3_NONE_RESET)
4023 set_bit(reset_level, &hdev->reset_request);
4026 static int hclge_set_rst_done(struct hclge_dev *hdev)
4028 struct hclge_pf_rst_done_cmd *req;
4029 struct hclge_desc desc;
4032 req = (struct hclge_pf_rst_done_cmd *)desc.data;
4033 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
4034 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
4036 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4037 /* To be compatible with the old firmware, which does not support
4038 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
4041 if (ret == -EOPNOTSUPP) {
4042 dev_warn(&hdev->pdev->dev,
4043 "current firmware does not support command(0x%x)!\n",
4044 HCLGE_OPC_PF_RST_DONE);
4047 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
4054 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
4058 switch (hdev->reset_type) {
4059 case HNAE3_FUNC_RESET:
4060 case HNAE3_FLR_RESET:
4061 ret = hclge_set_all_vf_rst(hdev, false);
4063 case HNAE3_GLOBAL_RESET:
4064 case HNAE3_IMP_RESET:
4065 ret = hclge_set_rst_done(hdev);
4071 /* clear up the handshake status after re-initialize done */
4072 hclge_reset_handshake(hdev, false);
4077 static int hclge_reset_stack(struct hclge_dev *hdev)
4081 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
4085 ret = hclge_reset_ae_dev(hdev->ae_dev);
4089 return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4092 static int hclge_reset_prepare(struct hclge_dev *hdev)
4096 hdev->rst_stats.reset_cnt++;
4097 /* perform reset of the stack & ae device for a client */
4098 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4103 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4108 return hclge_reset_prepare_wait(hdev);
4111 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4115 hdev->rst_stats.hw_reset_done_cnt++;
4117 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4122 ret = hclge_reset_stack(hdev);
4127 hclge_clear_reset_cause(hdev);
4129 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4130 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4134 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4137 ret = hclge_reset_prepare_up(hdev);
4142 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4147 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4151 hdev->last_reset_time = jiffies;
4152 hdev->rst_stats.reset_fail_cnt = 0;
4153 hdev->rst_stats.reset_done_cnt++;
4154 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4156 hclge_update_reset_level(hdev);
4161 static void hclge_reset(struct hclge_dev *hdev)
4163 if (hclge_reset_prepare(hdev))
4166 if (hclge_reset_wait(hdev))
4169 if (hclge_reset_rebuild(hdev))
4175 if (hclge_reset_err_handle(hdev))
4176 hclge_reset_task_schedule(hdev);
4179 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4181 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4182 struct hclge_dev *hdev = ae_dev->priv;
4184 /* We might end up getting called broadly because of 2 below cases:
4185 * 1. Recoverable error was conveyed through APEI and only way to bring
4186 * normalcy is to reset.
4187 * 2. A new reset request from the stack due to timeout
4189 * check if this is a new reset request and we are not here just because
4190 * last reset attempt did not succeed and watchdog hit us again. We will
4191 * know this if last reset request did not occur very recently (watchdog
4192 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4193 * In case of new request we reset the "reset level" to PF reset.
4194 * And if it is a repeat reset request of the most recent one then we
4195 * want to make sure we throttle the reset request. Therefore, we will
4196 * not allow it again before 3*HZ times.
4199 if (time_before(jiffies, (hdev->last_reset_time +
4200 HCLGE_RESET_INTERVAL))) {
4201 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4205 if (hdev->default_reset_request) {
4207 hclge_get_reset_level(ae_dev,
4208 &hdev->default_reset_request);
4209 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4210 hdev->reset_level = HNAE3_FUNC_RESET;
4213 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4216 /* request reset & schedule reset task */
4217 set_bit(hdev->reset_level, &hdev->reset_request);
4218 hclge_reset_task_schedule(hdev);
4220 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4221 hdev->reset_level++;
4224 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4225 enum hnae3_reset_type rst_type)
4227 struct hclge_dev *hdev = ae_dev->priv;
4229 set_bit(rst_type, &hdev->default_reset_request);
4232 static void hclge_reset_timer(struct timer_list *t)
4234 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4236 /* if default_reset_request has no value, it means that this reset
4237 * request has already be handled, so just return here
4239 if (!hdev->default_reset_request)
4242 dev_info(&hdev->pdev->dev,
4243 "triggering reset in reset timer\n");
4244 hclge_reset_event(hdev->pdev, NULL);
4247 static void hclge_reset_subtask(struct hclge_dev *hdev)
4249 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4251 /* check if there is any ongoing reset in the hardware. This status can
4252 * be checked from reset_pending. If there is then, we need to wait for
4253 * hardware to complete reset.
4254 * a. If we are able to figure out in reasonable time that hardware
4255 * has fully resetted then, we can proceed with driver, client
4257 * b. else, we can come back later to check this status so re-sched
4260 hdev->last_reset_time = jiffies;
4261 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4262 if (hdev->reset_type != HNAE3_NONE_RESET)
4265 /* check if we got any *new* reset requests to be honored */
4266 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4267 if (hdev->reset_type != HNAE3_NONE_RESET)
4268 hclge_do_reset(hdev);
4270 hdev->reset_type = HNAE3_NONE_RESET;
4273 static void hclge_handle_err_reset_request(struct hclge_dev *hdev)
4275 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4276 enum hnae3_reset_type reset_type;
4278 if (ae_dev->hw_err_reset_req) {
4279 reset_type = hclge_get_reset_level(ae_dev,
4280 &ae_dev->hw_err_reset_req);
4281 hclge_set_def_reset_request(ae_dev, reset_type);
4284 if (hdev->default_reset_request && ae_dev->ops->reset_event)
4285 ae_dev->ops->reset_event(hdev->pdev, NULL);
4287 /* enable interrupt after error handling complete */
4288 hclge_enable_vector(&hdev->misc_vector, true);
4291 static void hclge_handle_err_recovery(struct hclge_dev *hdev)
4293 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4295 ae_dev->hw_err_reset_req = 0;
4297 if (hclge_find_error_source(hdev)) {
4298 hclge_handle_error_info_log(ae_dev);
4299 hclge_handle_mac_tnl(hdev);
4302 hclge_handle_err_reset_request(hdev);
4305 static void hclge_misc_err_recovery(struct hclge_dev *hdev)
4307 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4308 struct device *dev = &hdev->pdev->dev;
4311 msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
4312 if (msix_sts_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
4313 if (hclge_handle_hw_msix_error
4314 (hdev, &hdev->default_reset_request))
4315 dev_info(dev, "received msix interrupt 0x%x\n",
4319 hclge_handle_hw_ras_error(ae_dev);
4321 hclge_handle_err_reset_request(hdev);
4324 static void hclge_errhand_service_task(struct hclge_dev *hdev)
4326 if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
4329 if (hnae3_dev_ras_imp_supported(hdev))
4330 hclge_handle_err_recovery(hdev);
4332 hclge_misc_err_recovery(hdev);
4335 static void hclge_reset_service_task(struct hclge_dev *hdev)
4337 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4340 down(&hdev->reset_sem);
4341 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4343 hclge_reset_subtask(hdev);
4345 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4346 up(&hdev->reset_sem);
4349 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4353 /* start from vport 1 for PF is always alive */
4354 for (i = 1; i < hdev->num_alloc_vport; i++) {
4355 struct hclge_vport *vport = &hdev->vport[i];
4357 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4358 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4360 /* If vf is not alive, set to default value */
4361 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4362 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4366 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4368 unsigned long delta = round_jiffies_relative(HZ);
4370 if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4373 /* Always handle the link updating to make sure link state is
4374 * updated when it is triggered by mbx.
4376 hclge_update_link_status(hdev);
4377 hclge_sync_mac_table(hdev);
4378 hclge_sync_promisc_mode(hdev);
4379 hclge_sync_fd_table(hdev);
4381 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4382 delta = jiffies - hdev->last_serv_processed;
4384 if (delta < round_jiffies_relative(HZ)) {
4385 delta = round_jiffies_relative(HZ) - delta;
4390 hdev->serv_processed_cnt++;
4391 hclge_update_vport_alive(hdev);
4393 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4394 hdev->last_serv_processed = jiffies;
4398 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4399 hclge_update_stats_for_all(hdev);
4401 hclge_update_port_info(hdev);
4402 hclge_sync_vlan_filter(hdev);
4404 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4405 hclge_rfs_filter_expire(hdev);
4407 hdev->last_serv_processed = jiffies;
4410 hclge_task_schedule(hdev, delta);
4413 static void hclge_ptp_service_task(struct hclge_dev *hdev)
4415 unsigned long flags;
4417 if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) ||
4418 !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) ||
4419 !time_is_before_jiffies(hdev->ptp->tx_start + HZ))
4422 /* to prevent concurrence with the irq handler */
4423 spin_lock_irqsave(&hdev->ptp->lock, flags);
4425 /* check HCLGE_STATE_PTP_TX_HANDLING here again, since the irq
4426 * handler may handle it just before spin_lock_irqsave().
4428 if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state))
4429 hclge_ptp_clean_tx_hwts(hdev);
4431 spin_unlock_irqrestore(&hdev->ptp->lock, flags);
4434 static void hclge_service_task(struct work_struct *work)
4436 struct hclge_dev *hdev =
4437 container_of(work, struct hclge_dev, service_task.work);
4439 hclge_errhand_service_task(hdev);
4440 hclge_reset_service_task(hdev);
4441 hclge_ptp_service_task(hdev);
4442 hclge_mailbox_service_task(hdev);
4443 hclge_periodic_service_task(hdev);
4445 /* Handle error recovery, reset and mbx again in case periodical task
4446 * delays the handling by calling hclge_task_schedule() in
4447 * hclge_periodic_service_task().
4449 hclge_errhand_service_task(hdev);
4450 hclge_reset_service_task(hdev);
4451 hclge_mailbox_service_task(hdev);
4454 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4456 /* VF handle has no client */
4457 if (!handle->client)
4458 return container_of(handle, struct hclge_vport, nic);
4459 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4460 return container_of(handle, struct hclge_vport, roce);
4462 return container_of(handle, struct hclge_vport, nic);
4465 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4466 struct hnae3_vector_info *vector_info)
4468 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 64
4470 vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4472 /* need an extend offset to config vector >= 64 */
4473 if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4474 vector_info->io_addr = hdev->hw.io_base +
4475 HCLGE_VECTOR_REG_BASE +
4476 (idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4478 vector_info->io_addr = hdev->hw.io_base +
4479 HCLGE_VECTOR_EXT_REG_BASE +
4480 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4481 HCLGE_VECTOR_REG_OFFSET_H +
4482 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4483 HCLGE_VECTOR_REG_OFFSET;
4485 hdev->vector_status[idx] = hdev->vport[0].vport_id;
4486 hdev->vector_irq[idx] = vector_info->vector;
4489 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4490 struct hnae3_vector_info *vector_info)
4492 struct hclge_vport *vport = hclge_get_vport(handle);
4493 struct hnae3_vector_info *vector = vector_info;
4494 struct hclge_dev *hdev = vport->back;
4499 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4500 vector_num = min(hdev->num_msi_left, vector_num);
4502 for (j = 0; j < vector_num; j++) {
4503 while (++i < hdev->num_nic_msi) {
4504 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4505 hclge_get_vector_info(hdev, i, vector);
4513 hdev->num_msi_left -= alloc;
4514 hdev->num_msi_used += alloc;
4519 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4523 for (i = 0; i < hdev->num_msi; i++)
4524 if (vector == hdev->vector_irq[i])
4530 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4532 struct hclge_vport *vport = hclge_get_vport(handle);
4533 struct hclge_dev *hdev = vport->back;
4536 vector_id = hclge_get_vector_index(hdev, vector);
4537 if (vector_id < 0) {
4538 dev_err(&hdev->pdev->dev,
4539 "Get vector index fail. vector = %d\n", vector);
4543 hclge_free_vector(hdev, vector_id);
4548 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4550 return HCLGE_RSS_KEY_SIZE;
4553 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4554 const u8 hfunc, const u8 *key)
4556 struct hclge_rss_config_cmd *req;
4557 unsigned int key_offset = 0;
4558 struct hclge_desc desc;
4563 key_counts = HCLGE_RSS_KEY_SIZE;
4564 req = (struct hclge_rss_config_cmd *)desc.data;
4566 while (key_counts) {
4567 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4570 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4571 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4573 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4574 memcpy(req->hash_key,
4575 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4577 key_counts -= key_size;
4579 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4581 dev_err(&hdev->pdev->dev,
4582 "Configure RSS config fail, status = %d\n",
4590 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4592 struct hclge_rss_indirection_table_cmd *req;
4593 struct hclge_desc desc;
4594 int rss_cfg_tbl_num;
4602 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4603 rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4604 HCLGE_RSS_CFG_TBL_SIZE;
4606 for (i = 0; i < rss_cfg_tbl_num; i++) {
4607 hclge_cmd_setup_basic_desc
4608 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4610 req->start_table_index =
4611 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4612 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4613 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4614 qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4615 req->rss_qid_l[j] = qid & 0xff;
4617 j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4618 rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4619 (j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4620 req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4622 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4624 dev_err(&hdev->pdev->dev,
4625 "Configure rss indir table fail,status = %d\n",
4633 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4634 u16 *tc_size, u16 *tc_offset)
4636 struct hclge_rss_tc_mode_cmd *req;
4637 struct hclge_desc desc;
4641 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4642 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4644 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4647 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4648 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4649 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4650 hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4651 tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4652 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4653 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4655 req->rss_tc_mode[i] = cpu_to_le16(mode);
4658 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4660 dev_err(&hdev->pdev->dev,
4661 "Configure rss tc mode fail, status = %d\n", ret);
4666 static void hclge_get_rss_type(struct hclge_vport *vport)
4668 if (vport->rss_tuple_sets.ipv4_tcp_en ||
4669 vport->rss_tuple_sets.ipv4_udp_en ||
4670 vport->rss_tuple_sets.ipv4_sctp_en ||
4671 vport->rss_tuple_sets.ipv6_tcp_en ||
4672 vport->rss_tuple_sets.ipv6_udp_en ||
4673 vport->rss_tuple_sets.ipv6_sctp_en)
4674 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4675 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4676 vport->rss_tuple_sets.ipv6_fragment_en)
4677 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4679 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4682 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4684 struct hclge_rss_input_tuple_cmd *req;
4685 struct hclge_desc desc;
4688 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4690 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4692 /* Get the tuple cfg from pf */
4693 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4694 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4695 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4696 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4697 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4698 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4699 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4700 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4701 hclge_get_rss_type(&hdev->vport[0]);
4702 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4704 dev_err(&hdev->pdev->dev,
4705 "Configure rss input fail, status = %d\n", ret);
4709 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4712 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4713 struct hclge_vport *vport = hclge_get_vport(handle);
4716 /* Get hash algorithm */
4718 switch (vport->rss_algo) {
4719 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4720 *hfunc = ETH_RSS_HASH_TOP;
4722 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4723 *hfunc = ETH_RSS_HASH_XOR;
4726 *hfunc = ETH_RSS_HASH_UNKNOWN;
4731 /* Get the RSS Key required by the user */
4733 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4735 /* Get indirect table */
4737 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4738 indir[i] = vport->rss_indirection_tbl[i];
4743 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4744 const u8 *key, const u8 hfunc)
4746 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4747 struct hclge_vport *vport = hclge_get_vport(handle);
4748 struct hclge_dev *hdev = vport->back;
4752 /* Set the RSS Hash Key if specififed by the user */
4755 case ETH_RSS_HASH_TOP:
4756 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4758 case ETH_RSS_HASH_XOR:
4759 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4761 case ETH_RSS_HASH_NO_CHANGE:
4762 hash_algo = vport->rss_algo;
4768 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4772 /* Update the shadow RSS key with user specified qids */
4773 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4774 vport->rss_algo = hash_algo;
4777 /* Update the shadow RSS table with user specified qids */
4778 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4779 vport->rss_indirection_tbl[i] = indir[i];
4781 /* Update the hardware */
4782 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4785 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4787 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4789 if (nfc->data & RXH_L4_B_2_3)
4790 hash_sets |= HCLGE_D_PORT_BIT;
4792 hash_sets &= ~HCLGE_D_PORT_BIT;
4794 if (nfc->data & RXH_IP_SRC)
4795 hash_sets |= HCLGE_S_IP_BIT;
4797 hash_sets &= ~HCLGE_S_IP_BIT;
4799 if (nfc->data & RXH_IP_DST)
4800 hash_sets |= HCLGE_D_IP_BIT;
4802 hash_sets &= ~HCLGE_D_IP_BIT;
4804 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4805 hash_sets |= HCLGE_V_TAG_BIT;
4810 static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4811 struct ethtool_rxnfc *nfc,
4812 struct hclge_rss_input_tuple_cmd *req)
4814 struct hclge_dev *hdev = vport->back;
4817 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4818 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4819 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4820 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4821 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4822 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4823 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4824 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4826 tuple_sets = hclge_get_rss_hash_bits(nfc);
4827 switch (nfc->flow_type) {
4829 req->ipv4_tcp_en = tuple_sets;
4832 req->ipv6_tcp_en = tuple_sets;
4835 req->ipv4_udp_en = tuple_sets;
4838 req->ipv6_udp_en = tuple_sets;
4841 req->ipv4_sctp_en = tuple_sets;
4844 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4845 (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4848 req->ipv6_sctp_en = tuple_sets;
4851 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4854 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4863 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4864 struct ethtool_rxnfc *nfc)
4866 struct hclge_vport *vport = hclge_get_vport(handle);
4867 struct hclge_dev *hdev = vport->back;
4868 struct hclge_rss_input_tuple_cmd *req;
4869 struct hclge_desc desc;
4872 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4873 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4876 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4877 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4879 ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
4881 dev_err(&hdev->pdev->dev,
4882 "failed to init rss tuple cmd, ret = %d\n", ret);
4886 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4888 dev_err(&hdev->pdev->dev,
4889 "Set rss tuple fail, status = %d\n", ret);
4893 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4894 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4895 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4896 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4897 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4898 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4899 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4900 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4901 hclge_get_rss_type(vport);
4905 static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
4908 switch (flow_type) {
4910 *tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4913 *tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4916 *tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4919 *tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4922 *tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4925 *tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4929 *tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4938 static u64 hclge_convert_rss_tuple(u8 tuple_sets)
4942 if (tuple_sets & HCLGE_D_PORT_BIT)
4943 tuple_data |= RXH_L4_B_2_3;
4944 if (tuple_sets & HCLGE_S_PORT_BIT)
4945 tuple_data |= RXH_L4_B_0_1;
4946 if (tuple_sets & HCLGE_D_IP_BIT)
4947 tuple_data |= RXH_IP_DST;
4948 if (tuple_sets & HCLGE_S_IP_BIT)
4949 tuple_data |= RXH_IP_SRC;
4954 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4955 struct ethtool_rxnfc *nfc)
4957 struct hclge_vport *vport = hclge_get_vport(handle);
4963 ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
4964 if (ret || !tuple_sets)
4967 nfc->data = hclge_convert_rss_tuple(tuple_sets);
4972 static int hclge_get_tc_size(struct hnae3_handle *handle)
4974 struct hclge_vport *vport = hclge_get_vport(handle);
4975 struct hclge_dev *hdev = vport->back;
4977 return hdev->pf_rss_size_max;
4980 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4982 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4983 struct hclge_vport *vport = hdev->vport;
4984 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4985 u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4986 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4987 struct hnae3_tc_info *tc_info;
4992 tc_info = &vport->nic.kinfo.tc_info;
4993 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4994 rss_size = tc_info->tqp_count[i];
4997 if (!(hdev->hw_tc_map & BIT(i)))
5000 /* tc_size set to hardware is the log2 of roundup power of two
5001 * of rss_size, the acutal queue size is limited by indirection
5004 if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
5006 dev_err(&hdev->pdev->dev,
5007 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
5012 roundup_size = roundup_pow_of_two(rss_size);
5013 roundup_size = ilog2(roundup_size);
5016 tc_size[i] = roundup_size;
5017 tc_offset[i] = tc_info->tqp_offset[i];
5020 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
5023 int hclge_rss_init_hw(struct hclge_dev *hdev)
5025 struct hclge_vport *vport = hdev->vport;
5026 u16 *rss_indir = vport[0].rss_indirection_tbl;
5027 u8 *key = vport[0].rss_hash_key;
5028 u8 hfunc = vport[0].rss_algo;
5031 ret = hclge_set_rss_indir_table(hdev, rss_indir);
5035 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
5039 ret = hclge_set_rss_input_tuple(hdev);
5043 return hclge_init_rss_tc_mode(hdev);
5046 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
5048 struct hclge_vport *vport = &hdev->vport[0];
5051 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
5052 vport->rss_indirection_tbl[i] = i % vport->alloc_rss_size;
5055 static int hclge_rss_init_cfg(struct hclge_dev *hdev)
5057 u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
5058 int rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
5059 struct hclge_vport *vport = &hdev->vport[0];
5062 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
5063 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
5065 vport->rss_tuple_sets.ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5066 vport->rss_tuple_sets.ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5067 vport->rss_tuple_sets.ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
5068 vport->rss_tuple_sets.ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5069 vport->rss_tuple_sets.ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5070 vport->rss_tuple_sets.ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5071 vport->rss_tuple_sets.ipv6_sctp_en =
5072 hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
5073 HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
5074 HCLGE_RSS_INPUT_TUPLE_SCTP;
5075 vport->rss_tuple_sets.ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5077 vport->rss_algo = rss_algo;
5079 rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
5080 sizeof(*rss_ind_tbl), GFP_KERNEL);
5084 vport->rss_indirection_tbl = rss_ind_tbl;
5085 memcpy(vport->rss_hash_key, hclge_hash_key, HCLGE_RSS_KEY_SIZE);
5087 hclge_rss_indir_init_cfg(hdev);
5092 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
5093 int vector_id, bool en,
5094 struct hnae3_ring_chain_node *ring_chain)
5096 struct hclge_dev *hdev = vport->back;
5097 struct hnae3_ring_chain_node *node;
5098 struct hclge_desc desc;
5099 struct hclge_ctrl_vector_chain_cmd *req =
5100 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
5101 enum hclge_cmd_status status;
5102 enum hclge_opcode_type op;
5103 u16 tqp_type_and_id;
5106 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
5107 hclge_cmd_setup_basic_desc(&desc, op, false);
5108 req->int_vector_id_l = hnae3_get_field(vector_id,
5109 HCLGE_VECTOR_ID_L_M,
5110 HCLGE_VECTOR_ID_L_S);
5111 req->int_vector_id_h = hnae3_get_field(vector_id,
5112 HCLGE_VECTOR_ID_H_M,
5113 HCLGE_VECTOR_ID_H_S);
5116 for (node = ring_chain; node; node = node->next) {
5117 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
5118 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
5120 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
5121 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
5122 HCLGE_TQP_ID_S, node->tqp_index);
5123 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
5125 hnae3_get_field(node->int_gl_idx,
5126 HNAE3_RING_GL_IDX_M,
5127 HNAE3_RING_GL_IDX_S));
5128 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
5129 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
5130 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
5131 req->vfid = vport->vport_id;
5133 status = hclge_cmd_send(&hdev->hw, &desc, 1);
5135 dev_err(&hdev->pdev->dev,
5136 "Map TQP fail, status is %d.\n",
5142 hclge_cmd_setup_basic_desc(&desc,
5145 req->int_vector_id_l =
5146 hnae3_get_field(vector_id,
5147 HCLGE_VECTOR_ID_L_M,
5148 HCLGE_VECTOR_ID_L_S);
5149 req->int_vector_id_h =
5150 hnae3_get_field(vector_id,
5151 HCLGE_VECTOR_ID_H_M,
5152 HCLGE_VECTOR_ID_H_S);
5157 req->int_cause_num = i;
5158 req->vfid = vport->vport_id;
5159 status = hclge_cmd_send(&hdev->hw, &desc, 1);
5161 dev_err(&hdev->pdev->dev,
5162 "Map TQP fail, status is %d.\n", status);
5170 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
5171 struct hnae3_ring_chain_node *ring_chain)
5173 struct hclge_vport *vport = hclge_get_vport(handle);
5174 struct hclge_dev *hdev = vport->back;
5177 vector_id = hclge_get_vector_index(hdev, vector);
5178 if (vector_id < 0) {
5179 dev_err(&hdev->pdev->dev,
5180 "failed to get vector index. vector=%d\n", vector);
5184 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
5187 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
5188 struct hnae3_ring_chain_node *ring_chain)
5190 struct hclge_vport *vport = hclge_get_vport(handle);
5191 struct hclge_dev *hdev = vport->back;
5194 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5197 vector_id = hclge_get_vector_index(hdev, vector);
5198 if (vector_id < 0) {
5199 dev_err(&handle->pdev->dev,
5200 "Get vector index fail. ret =%d\n", vector_id);
5204 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
5206 dev_err(&handle->pdev->dev,
5207 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5213 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5214 bool en_uc, bool en_mc, bool en_bc)
5216 struct hclge_vport *vport = &hdev->vport[vf_id];
5217 struct hnae3_handle *handle = &vport->nic;
5218 struct hclge_promisc_cfg_cmd *req;
5219 struct hclge_desc desc;
5220 bool uc_tx_en = en_uc;
5224 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
5226 req = (struct hclge_promisc_cfg_cmd *)desc.data;
5229 if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
5232 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
5233 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
5234 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5235 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
5236 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
5237 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
5238 req->extend_promisc = promisc_cfg;
5240 /* to be compatible with DEVICE_VERSION_V1/2 */
5242 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
5243 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
5244 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
5245 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
5246 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
5247 req->promisc = promisc_cfg;
5249 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5251 dev_err(&hdev->pdev->dev,
5252 "failed to set vport %u promisc mode, ret = %d.\n",
5258 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
5259 bool en_mc_pmc, bool en_bc_pmc)
5261 return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
5262 en_uc_pmc, en_mc_pmc, en_bc_pmc);
5265 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
5268 struct hclge_vport *vport = hclge_get_vport(handle);
5269 struct hclge_dev *hdev = vport->back;
5270 bool en_bc_pmc = true;
5272 /* For device whose version below V2, if broadcast promisc enabled,
5273 * vlan filter is always bypassed. So broadcast promisc should be
5274 * disabled until user enable promisc mode
5276 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5277 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
5279 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
5283 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
5285 struct hclge_vport *vport = hclge_get_vport(handle);
5287 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
5290 static void hclge_sync_fd_state(struct hclge_dev *hdev)
5292 if (hlist_empty(&hdev->fd_rule_list))
5293 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5296 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
5298 if (!test_bit(location, hdev->fd_bmap)) {
5299 set_bit(location, hdev->fd_bmap);
5300 hdev->hclge_fd_rule_num++;
5304 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
5306 if (test_bit(location, hdev->fd_bmap)) {
5307 clear_bit(location, hdev->fd_bmap);
5308 hdev->hclge_fd_rule_num--;
5312 static void hclge_fd_free_node(struct hclge_dev *hdev,
5313 struct hclge_fd_rule *rule)
5315 hlist_del(&rule->rule_node);
5317 hclge_sync_fd_state(hdev);
5320 static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
5321 struct hclge_fd_rule *old_rule,
5322 struct hclge_fd_rule *new_rule,
5323 enum HCLGE_FD_NODE_STATE state)
5326 case HCLGE_FD_TO_ADD:
5327 case HCLGE_FD_ACTIVE:
5328 /* 1) if the new state is TO_ADD, just replace the old rule
5329 * with the same location, no matter its state, because the
5330 * new rule will be configured to the hardware.
5331 * 2) if the new state is ACTIVE, it means the new rule
5332 * has been configured to the hardware, so just replace
5333 * the old rule node with the same location.
5334 * 3) for it doesn't add a new node to the list, so it's
5335 * unnecessary to update the rule number and fd_bmap.
5337 new_rule->rule_node.next = old_rule->rule_node.next;
5338 new_rule->rule_node.pprev = old_rule->rule_node.pprev;
5339 memcpy(old_rule, new_rule, sizeof(*old_rule));
5342 case HCLGE_FD_DELETED:
5343 hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5344 hclge_fd_free_node(hdev, old_rule);
5346 case HCLGE_FD_TO_DEL:
5347 /* if new request is TO_DEL, and old rule is existent
5348 * 1) the state of old rule is TO_DEL, we need do nothing,
5349 * because we delete rule by location, other rule content
5351 * 2) the state of old rule is ACTIVE, we need to change its
5352 * state to TO_DEL, so the rule will be deleted when periodic
5353 * task being scheduled.
5354 * 3) the state of old rule is TO_ADD, it means the rule hasn't
5355 * been added to hardware, so we just delete the rule node from
5356 * fd_rule_list directly.
5358 if (old_rule->state == HCLGE_FD_TO_ADD) {
5359 hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5360 hclge_fd_free_node(hdev, old_rule);
5363 old_rule->state = HCLGE_FD_TO_DEL;
5368 static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist,
5370 struct hclge_fd_rule **parent)
5372 struct hclge_fd_rule *rule;
5373 struct hlist_node *node;
5375 hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
5376 if (rule->location == location)
5378 else if (rule->location > location)
5380 /* record the parent node, use to keep the nodes in fd_rule_list
5389 /* insert fd rule node in ascend order according to rule->location */
5390 static void hclge_fd_insert_rule_node(struct hlist_head *hlist,
5391 struct hclge_fd_rule *rule,
5392 struct hclge_fd_rule *parent)
5394 INIT_HLIST_NODE(&rule->rule_node);
5397 hlist_add_behind(&rule->rule_node, &parent->rule_node);
5399 hlist_add_head(&rule->rule_node, hlist);
5402 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
5403 struct hclge_fd_user_def_cfg *cfg)
5405 struct hclge_fd_user_def_cfg_cmd *req;
5406 struct hclge_desc desc;
5410 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false);
5412 req = (struct hclge_fd_user_def_cfg_cmd *)desc.data;
5414 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0);
5415 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5416 HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset);
5417 req->ol2_cfg = cpu_to_le16(data);
5420 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0);
5421 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5422 HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset);
5423 req->ol3_cfg = cpu_to_le16(data);
5426 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0);
5427 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5428 HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset);
5429 req->ol4_cfg = cpu_to_le16(data);
5431 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5433 dev_err(&hdev->pdev->dev,
5434 "failed to set fd user def data, ret= %d\n", ret);
5438 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
5442 if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
5446 spin_lock_bh(&hdev->fd_rule_lock);
5448 ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
5450 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5453 spin_unlock_bh(&hdev->fd_rule_lock);
5456 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
5457 struct hclge_fd_rule *rule)
5459 struct hlist_head *hlist = &hdev->fd_rule_list;
5460 struct hclge_fd_rule *fd_rule, *parent = NULL;
5461 struct hclge_fd_user_def_info *info, *old_info;
5462 struct hclge_fd_user_def_cfg *cfg;
5464 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5465 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5468 /* for valid layer is start from 1, so need minus 1 to get the cfg */
5469 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5470 info = &rule->ep.user_def;
5472 if (!cfg->ref_cnt || cfg->offset == info->offset)
5475 if (cfg->ref_cnt > 1)
5478 fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
5480 old_info = &fd_rule->ep.user_def;
5481 if (info->layer == old_info->layer)
5486 dev_err(&hdev->pdev->dev,
5487 "No available offset for layer%d fd rule, each layer only support one user def offset.\n",
5492 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
5493 struct hclge_fd_rule *rule)
5495 struct hclge_fd_user_def_cfg *cfg;
5497 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5498 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5501 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5502 if (!cfg->ref_cnt) {
5503 cfg->offset = rule->ep.user_def.offset;
5504 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5509 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
5510 struct hclge_fd_rule *rule)
5512 struct hclge_fd_user_def_cfg *cfg;
5514 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5515 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5518 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5523 if (!cfg->ref_cnt) {
5525 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5529 static void hclge_update_fd_list(struct hclge_dev *hdev,
5530 enum HCLGE_FD_NODE_STATE state, u16 location,
5531 struct hclge_fd_rule *new_rule)
5533 struct hlist_head *hlist = &hdev->fd_rule_list;
5534 struct hclge_fd_rule *fd_rule, *parent = NULL;
5536 fd_rule = hclge_find_fd_rule(hlist, location, &parent);
5538 hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
5539 if (state == HCLGE_FD_ACTIVE)
5540 hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5541 hclge_sync_fd_user_def_cfg(hdev, true);
5543 hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
5547 /* it's unlikely to fail here, because we have checked the rule
5550 if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) {
5551 dev_warn(&hdev->pdev->dev,
5552 "failed to delete fd rule %u, it's inexistent\n",
5557 hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5558 hclge_sync_fd_user_def_cfg(hdev, true);
5560 hclge_fd_insert_rule_node(hlist, new_rule, parent);
5561 hclge_fd_inc_rule_cnt(hdev, new_rule->location);
5563 if (state == HCLGE_FD_TO_ADD) {
5564 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
5565 hclge_task_schedule(hdev, 0);
5569 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5571 struct hclge_get_fd_mode_cmd *req;
5572 struct hclge_desc desc;
5575 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5577 req = (struct hclge_get_fd_mode_cmd *)desc.data;
5579 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5581 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5585 *fd_mode = req->mode;
5590 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5591 u32 *stage1_entry_num,
5592 u32 *stage2_entry_num,
5593 u16 *stage1_counter_num,
5594 u16 *stage2_counter_num)
5596 struct hclge_get_fd_allocation_cmd *req;
5597 struct hclge_desc desc;
5600 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5602 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5604 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5606 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5611 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5612 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5613 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5614 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5619 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5620 enum HCLGE_FD_STAGE stage_num)
5622 struct hclge_set_fd_key_config_cmd *req;
5623 struct hclge_fd_key_cfg *stage;
5624 struct hclge_desc desc;
5627 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5629 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5630 stage = &hdev->fd_cfg.key_cfg[stage_num];
5631 req->stage = stage_num;
5632 req->key_select = stage->key_sel;
5633 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5634 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5635 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5636 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5637 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5638 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5640 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5642 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5647 static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
5649 struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
5651 spin_lock_bh(&hdev->fd_rule_lock);
5652 memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
5653 spin_unlock_bh(&hdev->fd_rule_lock);
5655 hclge_fd_set_user_def_cmd(hdev, cfg);
5658 static int hclge_init_fd_config(struct hclge_dev *hdev)
5660 #define LOW_2_WORDS 0x03
5661 struct hclge_fd_key_cfg *key_cfg;
5664 if (!hnae3_dev_fd_supported(hdev))
5667 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5671 switch (hdev->fd_cfg.fd_mode) {
5672 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5673 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5675 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5676 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5679 dev_err(&hdev->pdev->dev,
5680 "Unsupported flow director mode %u\n",
5681 hdev->fd_cfg.fd_mode);
5685 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5686 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5687 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5688 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5689 key_cfg->outer_sipv6_word_en = 0;
5690 key_cfg->outer_dipv6_word_en = 0;
5692 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5693 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5694 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5695 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5697 /* If use max 400bit key, we can support tuples for ether type */
5698 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5699 key_cfg->tuple_active |=
5700 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5701 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5702 key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
5705 /* roce_type is used to filter roce frames
5706 * dst_vport is used to specify the rule
5708 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5710 ret = hclge_get_fd_allocation(hdev,
5711 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5712 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5713 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5714 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5718 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5721 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5722 int loc, u8 *key, bool is_add)
5724 struct hclge_fd_tcam_config_1_cmd *req1;
5725 struct hclge_fd_tcam_config_2_cmd *req2;
5726 struct hclge_fd_tcam_config_3_cmd *req3;
5727 struct hclge_desc desc[3];
5730 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5731 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5732 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5733 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5734 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5736 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5737 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5738 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5740 req1->stage = stage;
5741 req1->xy_sel = sel_x ? 1 : 0;
5742 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5743 req1->index = cpu_to_le32(loc);
5744 req1->entry_vld = sel_x ? is_add : 0;
5747 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5748 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5749 sizeof(req2->tcam_data));
5750 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5751 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5754 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5756 dev_err(&hdev->pdev->dev,
5757 "config tcam key fail, ret=%d\n",
5763 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5764 struct hclge_fd_ad_data *action)
5766 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5767 struct hclge_fd_ad_config_cmd *req;
5768 struct hclge_desc desc;
5772 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5774 req = (struct hclge_fd_ad_config_cmd *)desc.data;
5775 req->index = cpu_to_le32(loc);
5778 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5779 action->write_rule_id_to_bd);
5780 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5782 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5783 hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5784 action->override_tc);
5785 hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5786 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5789 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5790 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5791 action->forward_to_direct_queue);
5792 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5794 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5795 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5796 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5797 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5798 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5799 action->counter_id);
5801 req->ad_data = cpu_to_le64(ad_data);
5802 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5804 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5809 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5810 struct hclge_fd_rule *rule)
5812 int offset, moffset, ip_offset;
5813 enum HCLGE_FD_KEY_OPT key_opt;
5814 u16 tmp_x_s, tmp_y_s;
5815 u32 tmp_x_l, tmp_y_l;
5819 if (rule->unused_tuple & BIT(tuple_bit))
5822 key_opt = tuple_key_info[tuple_bit].key_opt;
5823 offset = tuple_key_info[tuple_bit].offset;
5824 moffset = tuple_key_info[tuple_bit].moffset;
5828 calc_x(*key_x, p[offset], p[moffset]);
5829 calc_y(*key_y, p[offset], p[moffset]);
5833 calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5834 calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5835 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5836 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5840 calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5841 calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5842 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5843 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5847 for (i = 0; i < ETH_ALEN; i++) {
5848 calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
5850 calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
5856 ip_offset = IPV4_INDEX * sizeof(u32);
5857 calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
5858 *(u32 *)(&p[moffset + ip_offset]));
5859 calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
5860 *(u32 *)(&p[moffset + ip_offset]));
5861 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5862 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5870 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5871 u8 vf_id, u8 network_port_id)
5873 u32 port_number = 0;
5875 if (port_type == HOST_PORT) {
5876 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5878 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5880 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5882 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5883 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5884 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5890 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5891 __le32 *key_x, __le32 *key_y,
5892 struct hclge_fd_rule *rule)
5894 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5895 u8 cur_pos = 0, tuple_size, shift_bits;
5898 for (i = 0; i < MAX_META_DATA; i++) {
5899 tuple_size = meta_data_key_info[i].key_length;
5900 tuple_bit = key_cfg->meta_data_active & BIT(i);
5902 switch (tuple_bit) {
5903 case BIT(ROCE_TYPE):
5904 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5905 cur_pos += tuple_size;
5907 case BIT(DST_VPORT):
5908 port_number = hclge_get_port_number(HOST_PORT, 0,
5910 hnae3_set_field(meta_data,
5911 GENMASK(cur_pos + tuple_size, cur_pos),
5912 cur_pos, port_number);
5913 cur_pos += tuple_size;
5920 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5921 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5922 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5924 *key_x = cpu_to_le32(tmp_x << shift_bits);
5925 *key_y = cpu_to_le32(tmp_y << shift_bits);
5928 /* A complete key is combined with meta data key and tuple key.
5929 * Meta data key is stored at the MSB region, and tuple key is stored at
5930 * the LSB region, unused bits will be filled 0.
5932 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5933 struct hclge_fd_rule *rule)
5935 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5936 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5937 u8 *cur_key_x, *cur_key_y;
5938 u8 meta_data_region;
5943 memset(key_x, 0, sizeof(key_x));
5944 memset(key_y, 0, sizeof(key_y));
5948 for (i = 0 ; i < MAX_TUPLE; i++) {
5951 tuple_size = tuple_key_info[i].key_length / 8;
5952 if (!(key_cfg->tuple_active & BIT(i)))
5955 tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
5958 cur_key_x += tuple_size;
5959 cur_key_y += tuple_size;
5963 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5964 MAX_META_DATA_LENGTH / 8;
5966 hclge_fd_convert_meta_data(key_cfg,
5967 (__le32 *)(key_x + meta_data_region),
5968 (__le32 *)(key_y + meta_data_region),
5971 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5974 dev_err(&hdev->pdev->dev,
5975 "fd key_y config fail, loc=%u, ret=%d\n",
5976 rule->queue_id, ret);
5980 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5983 dev_err(&hdev->pdev->dev,
5984 "fd key_x config fail, loc=%u, ret=%d\n",
5985 rule->queue_id, ret);
5989 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5990 struct hclge_fd_rule *rule)
5992 struct hclge_vport *vport = hdev->vport;
5993 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5994 struct hclge_fd_ad_data ad_data;
5996 memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5997 ad_data.ad_id = rule->location;
5999 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6000 ad_data.drop_packet = true;
6001 } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
6002 ad_data.override_tc = true;
6004 kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
6006 ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
6008 ad_data.forward_to_direct_queue = true;
6009 ad_data.queue_id = rule->queue_id;
6012 if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) {
6013 ad_data.use_counter = true;
6014 ad_data.counter_id = rule->vf_id %
6015 hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1];
6017 ad_data.use_counter = false;
6018 ad_data.counter_id = 0;
6021 ad_data.use_next_stage = false;
6022 ad_data.next_input_key = 0;
6024 ad_data.write_rule_id_to_bd = true;
6025 ad_data.rule_id = rule->location;
6027 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
6030 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
6033 if (!spec || !unused_tuple)
6036 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
6039 *unused_tuple |= BIT(INNER_SRC_IP);
6042 *unused_tuple |= BIT(INNER_DST_IP);
6045 *unused_tuple |= BIT(INNER_SRC_PORT);
6048 *unused_tuple |= BIT(INNER_DST_PORT);
6051 *unused_tuple |= BIT(INNER_IP_TOS);
6056 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
6059 if (!spec || !unused_tuple)
6062 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6063 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6066 *unused_tuple |= BIT(INNER_SRC_IP);
6069 *unused_tuple |= BIT(INNER_DST_IP);
6072 *unused_tuple |= BIT(INNER_IP_TOS);
6075 *unused_tuple |= BIT(INNER_IP_PROTO);
6077 if (spec->l4_4_bytes)
6080 if (spec->ip_ver != ETH_RX_NFC_IP4)
6086 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
6089 if (!spec || !unused_tuple)
6092 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
6094 /* check whether src/dst ip address used */
6095 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6096 *unused_tuple |= BIT(INNER_SRC_IP);
6098 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6099 *unused_tuple |= BIT(INNER_DST_IP);
6102 *unused_tuple |= BIT(INNER_SRC_PORT);
6105 *unused_tuple |= BIT(INNER_DST_PORT);
6108 *unused_tuple |= BIT(INNER_IP_TOS);
6113 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
6116 if (!spec || !unused_tuple)
6119 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6120 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6122 /* check whether src/dst ip address used */
6123 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6124 *unused_tuple |= BIT(INNER_SRC_IP);
6126 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6127 *unused_tuple |= BIT(INNER_DST_IP);
6129 if (!spec->l4_proto)
6130 *unused_tuple |= BIT(INNER_IP_PROTO);
6133 *unused_tuple |= BIT(INNER_IP_TOS);
6135 if (spec->l4_4_bytes)
6141 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
6143 if (!spec || !unused_tuple)
6146 *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
6147 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
6148 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
6150 if (is_zero_ether_addr(spec->h_source))
6151 *unused_tuple |= BIT(INNER_SRC_MAC);
6153 if (is_zero_ether_addr(spec->h_dest))
6154 *unused_tuple |= BIT(INNER_DST_MAC);
6157 *unused_tuple |= BIT(INNER_ETH_TYPE);
6162 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
6163 struct ethtool_rx_flow_spec *fs,
6166 if (fs->flow_type & FLOW_EXT) {
6167 if (fs->h_ext.vlan_etype) {
6168 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
6172 if (!fs->h_ext.vlan_tci)
6173 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6175 if (fs->m_ext.vlan_tci &&
6176 be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
6177 dev_err(&hdev->pdev->dev,
6178 "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
6179 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
6183 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6186 if (fs->flow_type & FLOW_MAC_EXT) {
6187 if (hdev->fd_cfg.fd_mode !=
6188 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6189 dev_err(&hdev->pdev->dev,
6190 "FLOW_MAC_EXT is not supported in current fd mode!\n");
6194 if (is_zero_ether_addr(fs->h_ext.h_dest))
6195 *unused_tuple |= BIT(INNER_DST_MAC);
6197 *unused_tuple &= ~BIT(INNER_DST_MAC);
6203 static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple,
6204 struct hclge_fd_user_def_info *info)
6206 switch (flow_type) {
6208 info->layer = HCLGE_FD_USER_DEF_L2;
6209 *unused_tuple &= ~BIT(INNER_L2_RSV);
6212 case IPV6_USER_FLOW:
6213 info->layer = HCLGE_FD_USER_DEF_L3;
6214 *unused_tuple &= ~BIT(INNER_L3_RSV);
6220 info->layer = HCLGE_FD_USER_DEF_L4;
6221 *unused_tuple &= ~BIT(INNER_L4_RSV);
6230 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs)
6232 return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0;
6235 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
6236 struct ethtool_rx_flow_spec *fs,
6238 struct hclge_fd_user_def_info *info)
6240 u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
6241 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6242 u16 data, offset, data_mask, offset_mask;
6245 info->layer = HCLGE_FD_USER_DEF_NONE;
6246 *unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
6248 if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs))
6251 /* user-def data from ethtool is 64 bit value, the bit0~15 is used
6252 * for data, and bit32~47 is used for offset.
6254 data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6255 data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6256 offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6257 offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6259 if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) {
6260 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
6264 if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) {
6265 dev_err(&hdev->pdev->dev,
6266 "user-def offset[%u] should be no more than %u\n",
6267 offset, HCLGE_FD_MAX_USER_DEF_OFFSET);
6271 if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) {
6272 dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
6276 ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info);
6278 dev_err(&hdev->pdev->dev,
6279 "unsupported flow type for user-def bytes, ret = %d\n",
6285 info->data_mask = data_mask;
6286 info->offset = offset;
6291 static int hclge_fd_check_spec(struct hclge_dev *hdev,
6292 struct ethtool_rx_flow_spec *fs,
6294 struct hclge_fd_user_def_info *info)
6299 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6300 dev_err(&hdev->pdev->dev,
6301 "failed to config fd rules, invalid rule location: %u, max is %u\n.",
6303 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
6307 ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
6311 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6312 switch (flow_type) {
6316 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
6320 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
6326 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
6329 case IPV6_USER_FLOW:
6330 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
6334 if (hdev->fd_cfg.fd_mode !=
6335 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6336 dev_err(&hdev->pdev->dev,
6337 "ETHER_FLOW is not supported in current fd mode!\n");
6341 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
6345 dev_err(&hdev->pdev->dev,
6346 "unsupported protocol type, protocol type = %#x\n",
6352 dev_err(&hdev->pdev->dev,
6353 "failed to check flow union tuple, ret = %d\n",
6358 return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
6361 static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev,
6362 struct ethtool_rx_flow_spec *fs,
6363 struct hclge_fd_rule *rule, u8 ip_proto)
6365 rule->tuples.src_ip[IPV4_INDEX] =
6366 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
6367 rule->tuples_mask.src_ip[IPV4_INDEX] =
6368 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
6370 rule->tuples.dst_ip[IPV4_INDEX] =
6371 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
6372 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6373 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
6375 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
6376 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
6378 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
6379 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
6381 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
6382 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
6384 rule->tuples.ether_proto = ETH_P_IP;
6385 rule->tuples_mask.ether_proto = 0xFFFF;
6387 rule->tuples.ip_proto = ip_proto;
6388 rule->tuples_mask.ip_proto = 0xFF;
6391 static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev,
6392 struct ethtool_rx_flow_spec *fs,
6393 struct hclge_fd_rule *rule)
6395 rule->tuples.src_ip[IPV4_INDEX] =
6396 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
6397 rule->tuples_mask.src_ip[IPV4_INDEX] =
6398 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
6400 rule->tuples.dst_ip[IPV4_INDEX] =
6401 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
6402 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6403 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
6405 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
6406 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
6408 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
6409 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
6411 rule->tuples.ether_proto = ETH_P_IP;
6412 rule->tuples_mask.ether_proto = 0xFFFF;
6415 static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev,
6416 struct ethtool_rx_flow_spec *fs,
6417 struct hclge_fd_rule *rule, u8 ip_proto)
6419 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
6421 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
6424 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
6426 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
6429 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6430 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6432 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6433 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6435 rule->tuples.ether_proto = ETH_P_IPV6;
6436 rule->tuples_mask.ether_proto = 0xFFFF;
6438 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6439 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6441 rule->tuples.ip_proto = ip_proto;
6442 rule->tuples_mask.ip_proto = 0xFF;
6445 static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev,
6446 struct ethtool_rx_flow_spec *fs,
6447 struct hclge_fd_rule *rule)
6449 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
6451 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
6454 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
6456 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
6459 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6460 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6462 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6463 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6465 rule->tuples.ether_proto = ETH_P_IPV6;
6466 rule->tuples_mask.ether_proto = 0xFFFF;
6469 static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev,
6470 struct ethtool_rx_flow_spec *fs,
6471 struct hclge_fd_rule *rule)
6473 ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
6474 ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
6476 ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
6477 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
6479 rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
6480 rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
6483 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
6484 struct hclge_fd_rule *rule)
6486 switch (info->layer) {
6487 case HCLGE_FD_USER_DEF_L2:
6488 rule->tuples.l2_user_def = info->data;
6489 rule->tuples_mask.l2_user_def = info->data_mask;
6491 case HCLGE_FD_USER_DEF_L3:
6492 rule->tuples.l3_user_def = info->data;
6493 rule->tuples_mask.l3_user_def = info->data_mask;
6495 case HCLGE_FD_USER_DEF_L4:
6496 rule->tuples.l4_user_def = (u32)info->data << 16;
6497 rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
6503 rule->ep.user_def = *info;
6506 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
6507 struct ethtool_rx_flow_spec *fs,
6508 struct hclge_fd_rule *rule,
6509 struct hclge_fd_user_def_info *info)
6511 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6513 switch (flow_type) {
6515 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP);
6518 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP);
6521 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP);
6524 hclge_fd_get_ip4_tuple(hdev, fs, rule);
6527 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP);
6530 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP);
6533 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP);
6535 case IPV6_USER_FLOW:
6536 hclge_fd_get_ip6_tuple(hdev, fs, rule);
6539 hclge_fd_get_ether_tuple(hdev, fs, rule);
6545 if (fs->flow_type & FLOW_EXT) {
6546 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6547 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6548 hclge_fd_get_user_def_tuple(info, rule);
6551 if (fs->flow_type & FLOW_MAC_EXT) {
6552 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6553 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6559 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6560 struct hclge_fd_rule *rule)
6564 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6568 return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6571 static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
6572 struct hclge_fd_rule *rule)
6576 spin_lock_bh(&hdev->fd_rule_lock);
6578 if (hdev->fd_active_type != rule->rule_type &&
6579 (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6580 hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
6581 dev_err(&hdev->pdev->dev,
6582 "mode conflict(new type %d, active type %d), please delete existent rules first\n",
6583 rule->rule_type, hdev->fd_active_type);
6584 spin_unlock_bh(&hdev->fd_rule_lock);
6588 ret = hclge_fd_check_user_def_refcnt(hdev, rule);
6592 ret = hclge_clear_arfs_rules(hdev);
6596 ret = hclge_fd_config_rule(hdev, rule);
6600 rule->state = HCLGE_FD_ACTIVE;
6601 hdev->fd_active_type = rule->rule_type;
6602 hclge_update_fd_list(hdev, rule->state, rule->location, rule);
6605 spin_unlock_bh(&hdev->fd_rule_lock);
6609 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6611 struct hclge_vport *vport = hclge_get_vport(handle);
6612 struct hclge_dev *hdev = vport->back;
6614 return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6617 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
6618 u16 *vport_id, u8 *action, u16 *queue_id)
6620 struct hclge_vport *vport = hdev->vport;
6622 if (ring_cookie == RX_CLS_FLOW_DISC) {
6623 *action = HCLGE_FD_ACTION_DROP_PACKET;
6625 u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
6626 u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
6629 if (vf > hdev->num_req_vfs) {
6630 dev_err(&hdev->pdev->dev,
6631 "Error: vf id (%u) > max vf num (%u)\n",
6632 vf, hdev->num_req_vfs);
6636 *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6637 tqps = hdev->vport[vf].nic.kinfo.num_tqps;
6640 dev_err(&hdev->pdev->dev,
6641 "Error: queue id (%u) > max tqp num (%u)\n",
6646 *action = HCLGE_FD_ACTION_SELECT_QUEUE;
6653 static int hclge_add_fd_entry(struct hnae3_handle *handle,
6654 struct ethtool_rxnfc *cmd)
6656 struct hclge_vport *vport = hclge_get_vport(handle);
6657 struct hclge_dev *hdev = vport->back;
6658 struct hclge_fd_user_def_info info;
6659 u16 dst_vport_id = 0, q_index = 0;
6660 struct ethtool_rx_flow_spec *fs;
6661 struct hclge_fd_rule *rule;
6666 if (!hnae3_dev_fd_supported(hdev)) {
6667 dev_err(&hdev->pdev->dev,
6668 "flow table director is not supported\n");
6673 dev_err(&hdev->pdev->dev,
6674 "please enable flow director first\n");
6678 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6680 ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
6684 ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
6689 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6693 ret = hclge_fd_get_tuple(hdev, fs, rule, &info);
6699 rule->flow_type = fs->flow_type;
6700 rule->location = fs->location;
6701 rule->unused_tuple = unused;
6702 rule->vf_id = dst_vport_id;
6703 rule->queue_id = q_index;
6704 rule->action = action;
6705 rule->rule_type = HCLGE_FD_EP_ACTIVE;
6707 ret = hclge_add_fd_entry_common(hdev, rule);
6714 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6715 struct ethtool_rxnfc *cmd)
6717 struct hclge_vport *vport = hclge_get_vport(handle);
6718 struct hclge_dev *hdev = vport->back;
6719 struct ethtool_rx_flow_spec *fs;
6722 if (!hnae3_dev_fd_supported(hdev))
6725 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6727 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6730 spin_lock_bh(&hdev->fd_rule_lock);
6731 if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6732 !test_bit(fs->location, hdev->fd_bmap)) {
6733 dev_err(&hdev->pdev->dev,
6734 "Delete fail, rule %u is inexistent\n", fs->location);
6735 spin_unlock_bh(&hdev->fd_rule_lock);
6739 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6744 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
6747 spin_unlock_bh(&hdev->fd_rule_lock);
6751 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
6754 struct hclge_fd_rule *rule;
6755 struct hlist_node *node;
6758 if (!hnae3_dev_fd_supported(hdev))
6761 spin_lock_bh(&hdev->fd_rule_lock);
6763 for_each_set_bit(location, hdev->fd_bmap,
6764 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6765 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6769 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6771 hlist_del(&rule->rule_node);
6774 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6775 hdev->hclge_fd_rule_num = 0;
6776 bitmap_zero(hdev->fd_bmap,
6777 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6780 spin_unlock_bh(&hdev->fd_rule_lock);
6783 static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
6785 hclge_clear_fd_rules_in_list(hdev, true);
6786 hclge_fd_disable_user_def(hdev);
6789 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6791 struct hclge_vport *vport = hclge_get_vport(handle);
6792 struct hclge_dev *hdev = vport->back;
6793 struct hclge_fd_rule *rule;
6794 struct hlist_node *node;
6796 /* Return ok here, because reset error handling will check this
6797 * return value. If error is returned here, the reset process will
6800 if (!hnae3_dev_fd_supported(hdev))
6803 /* if fd is disabled, should not restore it when reset */
6807 spin_lock_bh(&hdev->fd_rule_lock);
6808 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6809 if (rule->state == HCLGE_FD_ACTIVE)
6810 rule->state = HCLGE_FD_TO_ADD;
6812 spin_unlock_bh(&hdev->fd_rule_lock);
6813 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6818 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6819 struct ethtool_rxnfc *cmd)
6821 struct hclge_vport *vport = hclge_get_vport(handle);
6822 struct hclge_dev *hdev = vport->back;
6824 if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6827 cmd->rule_cnt = hdev->hclge_fd_rule_num;
6828 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6833 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6834 struct ethtool_tcpip4_spec *spec,
6835 struct ethtool_tcpip4_spec *spec_mask)
6837 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6838 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6839 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6841 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6842 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6843 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6845 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6846 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6847 0 : cpu_to_be16(rule->tuples_mask.src_port);
6849 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6850 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6851 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6853 spec->tos = rule->tuples.ip_tos;
6854 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6855 0 : rule->tuples_mask.ip_tos;
6858 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6859 struct ethtool_usrip4_spec *spec,
6860 struct ethtool_usrip4_spec *spec_mask)
6862 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6863 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6864 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6866 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6867 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6868 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6870 spec->tos = rule->tuples.ip_tos;
6871 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6872 0 : rule->tuples_mask.ip_tos;
6874 spec->proto = rule->tuples.ip_proto;
6875 spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6876 0 : rule->tuples_mask.ip_proto;
6878 spec->ip_ver = ETH_RX_NFC_IP4;
6881 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6882 struct ethtool_tcpip6_spec *spec,
6883 struct ethtool_tcpip6_spec *spec_mask)
6885 cpu_to_be32_array(spec->ip6src,
6886 rule->tuples.src_ip, IPV6_SIZE);
6887 cpu_to_be32_array(spec->ip6dst,
6888 rule->tuples.dst_ip, IPV6_SIZE);
6889 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6890 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6892 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6895 if (rule->unused_tuple & BIT(INNER_DST_IP))
6896 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6898 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6901 spec->tclass = rule->tuples.ip_tos;
6902 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6903 0 : rule->tuples_mask.ip_tos;
6905 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6906 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6907 0 : cpu_to_be16(rule->tuples_mask.src_port);
6909 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6910 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6911 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6914 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6915 struct ethtool_usrip6_spec *spec,
6916 struct ethtool_usrip6_spec *spec_mask)
6918 cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6919 cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6920 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6921 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6923 cpu_to_be32_array(spec_mask->ip6src,
6924 rule->tuples_mask.src_ip, IPV6_SIZE);
6926 if (rule->unused_tuple & BIT(INNER_DST_IP))
6927 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6929 cpu_to_be32_array(spec_mask->ip6dst,
6930 rule->tuples_mask.dst_ip, IPV6_SIZE);
6932 spec->tclass = rule->tuples.ip_tos;
6933 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6934 0 : rule->tuples_mask.ip_tos;
6936 spec->l4_proto = rule->tuples.ip_proto;
6937 spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6938 0 : rule->tuples_mask.ip_proto;
6941 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6942 struct ethhdr *spec,
6943 struct ethhdr *spec_mask)
6945 ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6946 ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6948 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6949 eth_zero_addr(spec_mask->h_source);
6951 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6953 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6954 eth_zero_addr(spec_mask->h_dest);
6956 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6958 spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6959 spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6960 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6963 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs,
6964 struct hclge_fd_rule *rule)
6966 if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
6967 HCLGE_FD_TUPLE_USER_DEF_TUPLES) {
6968 fs->h_ext.data[0] = 0;
6969 fs->h_ext.data[1] = 0;
6970 fs->m_ext.data[0] = 0;
6971 fs->m_ext.data[1] = 0;
6973 fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
6974 fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
6976 cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK);
6977 fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
6981 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6982 struct hclge_fd_rule *rule)
6984 if (fs->flow_type & FLOW_EXT) {
6985 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6986 fs->m_ext.vlan_tci =
6987 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6988 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6990 hclge_fd_get_user_def_info(fs, rule);
6993 if (fs->flow_type & FLOW_MAC_EXT) {
6994 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6995 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6996 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6998 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6999 rule->tuples_mask.dst_mac);
7003 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
7004 struct ethtool_rxnfc *cmd)
7006 struct hclge_vport *vport = hclge_get_vport(handle);
7007 struct hclge_fd_rule *rule = NULL;
7008 struct hclge_dev *hdev = vport->back;
7009 struct ethtool_rx_flow_spec *fs;
7010 struct hlist_node *node2;
7012 if (!hnae3_dev_fd_supported(hdev))
7015 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
7017 spin_lock_bh(&hdev->fd_rule_lock);
7019 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
7020 if (rule->location >= fs->location)
7024 if (!rule || fs->location != rule->location) {
7025 spin_unlock_bh(&hdev->fd_rule_lock);
7030 fs->flow_type = rule->flow_type;
7031 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
7035 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
7036 &fs->m_u.tcp_ip4_spec);
7039 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
7040 &fs->m_u.usr_ip4_spec);
7045 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
7046 &fs->m_u.tcp_ip6_spec);
7048 case IPV6_USER_FLOW:
7049 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
7050 &fs->m_u.usr_ip6_spec);
7052 /* The flow type of fd rule has been checked before adding in to rule
7053 * list. As other flow types have been handled, it must be ETHER_FLOW
7054 * for the default case
7057 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
7058 &fs->m_u.ether_spec);
7062 hclge_fd_get_ext_info(fs, rule);
7064 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
7065 fs->ring_cookie = RX_CLS_FLOW_DISC;
7069 fs->ring_cookie = rule->queue_id;
7070 vf_id = rule->vf_id;
7071 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
7072 fs->ring_cookie |= vf_id;
7075 spin_unlock_bh(&hdev->fd_rule_lock);
7080 static int hclge_get_all_rules(struct hnae3_handle *handle,
7081 struct ethtool_rxnfc *cmd, u32 *rule_locs)
7083 struct hclge_vport *vport = hclge_get_vport(handle);
7084 struct hclge_dev *hdev = vport->back;
7085 struct hclge_fd_rule *rule;
7086 struct hlist_node *node2;
7089 if (!hnae3_dev_fd_supported(hdev))
7092 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
7094 spin_lock_bh(&hdev->fd_rule_lock);
7095 hlist_for_each_entry_safe(rule, node2,
7096 &hdev->fd_rule_list, rule_node) {
7097 if (cnt == cmd->rule_cnt) {
7098 spin_unlock_bh(&hdev->fd_rule_lock);
7102 if (rule->state == HCLGE_FD_TO_DEL)
7105 rule_locs[cnt] = rule->location;
7109 spin_unlock_bh(&hdev->fd_rule_lock);
7111 cmd->rule_cnt = cnt;
7116 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
7117 struct hclge_fd_rule_tuples *tuples)
7119 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
7120 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
7122 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
7123 tuples->ip_proto = fkeys->basic.ip_proto;
7124 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
7126 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
7127 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
7128 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
7132 for (i = 0; i < IPV6_SIZE; i++) {
7133 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
7134 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
7139 /* traverse all rules, check whether an existed rule has the same tuples */
7140 static struct hclge_fd_rule *
7141 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
7142 const struct hclge_fd_rule_tuples *tuples)
7144 struct hclge_fd_rule *rule = NULL;
7145 struct hlist_node *node;
7147 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7148 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
7155 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
7156 struct hclge_fd_rule *rule)
7158 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
7159 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
7160 BIT(INNER_SRC_PORT);
7163 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
7164 rule->state = HCLGE_FD_TO_ADD;
7165 if (tuples->ether_proto == ETH_P_IP) {
7166 if (tuples->ip_proto == IPPROTO_TCP)
7167 rule->flow_type = TCP_V4_FLOW;
7169 rule->flow_type = UDP_V4_FLOW;
7171 if (tuples->ip_proto == IPPROTO_TCP)
7172 rule->flow_type = TCP_V6_FLOW;
7174 rule->flow_type = UDP_V6_FLOW;
7176 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
7177 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
7180 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
7181 u16 flow_id, struct flow_keys *fkeys)
7183 struct hclge_vport *vport = hclge_get_vport(handle);
7184 struct hclge_fd_rule_tuples new_tuples = {};
7185 struct hclge_dev *hdev = vport->back;
7186 struct hclge_fd_rule *rule;
7189 if (!hnae3_dev_fd_supported(hdev))
7192 /* when there is already fd rule existed add by user,
7193 * arfs should not work
7195 spin_lock_bh(&hdev->fd_rule_lock);
7196 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
7197 hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
7198 spin_unlock_bh(&hdev->fd_rule_lock);
7202 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
7204 /* check is there flow director filter existed for this flow,
7205 * if not, create a new filter for it;
7206 * if filter exist with different queue id, modify the filter;
7207 * if filter exist with same queue id, do nothing
7209 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
7211 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
7212 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7213 spin_unlock_bh(&hdev->fd_rule_lock);
7217 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
7219 spin_unlock_bh(&hdev->fd_rule_lock);
7223 rule->location = bit_id;
7224 rule->arfs.flow_id = flow_id;
7225 rule->queue_id = queue_id;
7226 hclge_fd_build_arfs_rule(&new_tuples, rule);
7227 hclge_update_fd_list(hdev, rule->state, rule->location, rule);
7228 hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
7229 } else if (rule->queue_id != queue_id) {
7230 rule->queue_id = queue_id;
7231 rule->state = HCLGE_FD_TO_ADD;
7232 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7233 hclge_task_schedule(hdev, 0);
7235 spin_unlock_bh(&hdev->fd_rule_lock);
7236 return rule->location;
7239 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
7241 #ifdef CONFIG_RFS_ACCEL
7242 struct hnae3_handle *handle = &hdev->vport[0].nic;
7243 struct hclge_fd_rule *rule;
7244 struct hlist_node *node;
7246 spin_lock_bh(&hdev->fd_rule_lock);
7247 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
7248 spin_unlock_bh(&hdev->fd_rule_lock);
7251 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7252 if (rule->state != HCLGE_FD_ACTIVE)
7254 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
7255 rule->arfs.flow_id, rule->location)) {
7256 rule->state = HCLGE_FD_TO_DEL;
7257 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7260 spin_unlock_bh(&hdev->fd_rule_lock);
7264 /* make sure being called after lock up with fd_rule_lock */
7265 static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
7267 #ifdef CONFIG_RFS_ACCEL
7268 struct hclge_fd_rule *rule;
7269 struct hlist_node *node;
7272 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
7275 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7276 switch (rule->state) {
7277 case HCLGE_FD_TO_DEL:
7278 case HCLGE_FD_ACTIVE:
7279 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7280 rule->location, NULL, false);
7284 case HCLGE_FD_TO_ADD:
7285 hclge_fd_dec_rule_cnt(hdev, rule->location);
7286 hlist_del(&rule->rule_node);
7293 hclge_sync_fd_state(hdev);
7299 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
7300 struct hclge_fd_rule *rule)
7302 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
7303 struct flow_match_basic match;
7304 u16 ethtype_key, ethtype_mask;
7306 flow_rule_match_basic(flow, &match);
7307 ethtype_key = ntohs(match.key->n_proto);
7308 ethtype_mask = ntohs(match.mask->n_proto);
7310 if (ethtype_key == ETH_P_ALL) {
7314 rule->tuples.ether_proto = ethtype_key;
7315 rule->tuples_mask.ether_proto = ethtype_mask;
7316 rule->tuples.ip_proto = match.key->ip_proto;
7317 rule->tuples_mask.ip_proto = match.mask->ip_proto;
7319 rule->unused_tuple |= BIT(INNER_IP_PROTO);
7320 rule->unused_tuple |= BIT(INNER_ETH_TYPE);
7324 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
7325 struct hclge_fd_rule *rule)
7327 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7328 struct flow_match_eth_addrs match;
7330 flow_rule_match_eth_addrs(flow, &match);
7331 ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
7332 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
7333 ether_addr_copy(rule->tuples.src_mac, match.key->src);
7334 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
7336 rule->unused_tuple |= BIT(INNER_DST_MAC);
7337 rule->unused_tuple |= BIT(INNER_SRC_MAC);
7341 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
7342 struct hclge_fd_rule *rule)
7344 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
7345 struct flow_match_vlan match;
7347 flow_rule_match_vlan(flow, &match);
7348 rule->tuples.vlan_tag1 = match.key->vlan_id |
7349 (match.key->vlan_priority << VLAN_PRIO_SHIFT);
7350 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
7351 (match.mask->vlan_priority << VLAN_PRIO_SHIFT);
7353 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
7357 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
7358 struct hclge_fd_rule *rule)
7362 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
7363 struct flow_match_control match;
7365 flow_rule_match_control(flow, &match);
7366 addr_type = match.key->addr_type;
7369 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7370 struct flow_match_ipv4_addrs match;
7372 flow_rule_match_ipv4_addrs(flow, &match);
7373 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
7374 rule->tuples_mask.src_ip[IPV4_INDEX] =
7375 be32_to_cpu(match.mask->src);
7376 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
7377 rule->tuples_mask.dst_ip[IPV4_INDEX] =
7378 be32_to_cpu(match.mask->dst);
7379 } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7380 struct flow_match_ipv6_addrs match;
7382 flow_rule_match_ipv6_addrs(flow, &match);
7383 be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
7385 be32_to_cpu_array(rule->tuples_mask.src_ip,
7386 match.mask->src.s6_addr32, IPV6_SIZE);
7387 be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
7389 be32_to_cpu_array(rule->tuples_mask.dst_ip,
7390 match.mask->dst.s6_addr32, IPV6_SIZE);
7392 rule->unused_tuple |= BIT(INNER_SRC_IP);
7393 rule->unused_tuple |= BIT(INNER_DST_IP);
7397 static void hclge_get_cls_key_port(const struct flow_rule *flow,
7398 struct hclge_fd_rule *rule)
7400 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
7401 struct flow_match_ports match;
7403 flow_rule_match_ports(flow, &match);
7405 rule->tuples.src_port = be16_to_cpu(match.key->src);
7406 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
7407 rule->tuples.dst_port = be16_to_cpu(match.key->dst);
7408 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
7410 rule->unused_tuple |= BIT(INNER_SRC_PORT);
7411 rule->unused_tuple |= BIT(INNER_DST_PORT);
7415 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
7416 struct flow_cls_offload *cls_flower,
7417 struct hclge_fd_rule *rule)
7419 struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
7420 struct flow_dissector *dissector = flow->match.dissector;
7422 if (dissector->used_keys &
7423 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7424 BIT(FLOW_DISSECTOR_KEY_BASIC) |
7425 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7426 BIT(FLOW_DISSECTOR_KEY_VLAN) |
7427 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7428 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7429 BIT(FLOW_DISSECTOR_KEY_PORTS))) {
7430 dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
7431 dissector->used_keys);
7435 hclge_get_cls_key_basic(flow, rule);
7436 hclge_get_cls_key_mac(flow, rule);
7437 hclge_get_cls_key_vlan(flow, rule);
7438 hclge_get_cls_key_ip(flow, rule);
7439 hclge_get_cls_key_port(flow, rule);
7444 static int hclge_check_cls_flower(struct hclge_dev *hdev,
7445 struct flow_cls_offload *cls_flower, int tc)
7447 u32 prio = cls_flower->common.prio;
7449 if (tc < 0 || tc > hdev->tc_max) {
7450 dev_err(&hdev->pdev->dev, "invalid traffic class\n");
7455 prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7456 dev_err(&hdev->pdev->dev,
7457 "prio %u should be in range[1, %u]\n",
7458 prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
7462 if (test_bit(prio - 1, hdev->fd_bmap)) {
7463 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
7469 static int hclge_add_cls_flower(struct hnae3_handle *handle,
7470 struct flow_cls_offload *cls_flower,
7473 struct hclge_vport *vport = hclge_get_vport(handle);
7474 struct hclge_dev *hdev = vport->back;
7475 struct hclge_fd_rule *rule;
7478 ret = hclge_check_cls_flower(hdev, cls_flower, tc);
7480 dev_err(&hdev->pdev->dev,
7481 "failed to check cls flower params, ret = %d\n", ret);
7485 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
7489 ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
7495 rule->action = HCLGE_FD_ACTION_SELECT_TC;
7496 rule->cls_flower.tc = tc;
7497 rule->location = cls_flower->common.prio - 1;
7499 rule->cls_flower.cookie = cls_flower->cookie;
7500 rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
7502 ret = hclge_add_fd_entry_common(hdev, rule);
7509 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7510 unsigned long cookie)
7512 struct hclge_fd_rule *rule;
7513 struct hlist_node *node;
7515 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7516 if (rule->cls_flower.cookie == cookie)
7523 static int hclge_del_cls_flower(struct hnae3_handle *handle,
7524 struct flow_cls_offload *cls_flower)
7526 struct hclge_vport *vport = hclge_get_vport(handle);
7527 struct hclge_dev *hdev = vport->back;
7528 struct hclge_fd_rule *rule;
7531 spin_lock_bh(&hdev->fd_rule_lock);
7533 rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7535 spin_unlock_bh(&hdev->fd_rule_lock);
7539 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7542 spin_unlock_bh(&hdev->fd_rule_lock);
7546 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
7547 spin_unlock_bh(&hdev->fd_rule_lock);
7552 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
7554 struct hclge_fd_rule *rule;
7555 struct hlist_node *node;
7558 if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
7561 spin_lock_bh(&hdev->fd_rule_lock);
7563 hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
7564 switch (rule->state) {
7565 case HCLGE_FD_TO_ADD:
7566 ret = hclge_fd_config_rule(hdev, rule);
7569 rule->state = HCLGE_FD_ACTIVE;
7571 case HCLGE_FD_TO_DEL:
7572 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7573 rule->location, NULL, false);
7576 hclge_fd_dec_rule_cnt(hdev, rule->location);
7577 hclge_fd_free_node(hdev, rule);
7586 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7588 spin_unlock_bh(&hdev->fd_rule_lock);
7591 static void hclge_sync_fd_table(struct hclge_dev *hdev)
7593 if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
7594 bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7596 hclge_clear_fd_rules_in_list(hdev, clear_list);
7599 hclge_sync_fd_user_def_cfg(hdev, false);
7601 hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
7604 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7606 struct hclge_vport *vport = hclge_get_vport(handle);
7607 struct hclge_dev *hdev = vport->back;
7609 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7610 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7613 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7615 struct hclge_vport *vport = hclge_get_vport(handle);
7616 struct hclge_dev *hdev = vport->back;
7618 return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
7621 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7623 struct hclge_vport *vport = hclge_get_vport(handle);
7624 struct hclge_dev *hdev = vport->back;
7626 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7629 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7631 struct hclge_vport *vport = hclge_get_vport(handle);
7632 struct hclge_dev *hdev = vport->back;
7634 return hdev->rst_stats.hw_reset_done_cnt;
7637 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7639 struct hclge_vport *vport = hclge_get_vport(handle);
7640 struct hclge_dev *hdev = vport->back;
7642 hdev->fd_en = enable;
7645 set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
7647 hclge_restore_fd_entries(handle);
7649 hclge_task_schedule(hdev, 0);
7652 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7654 struct hclge_desc desc;
7655 struct hclge_config_mac_mode_cmd *req =
7656 (struct hclge_config_mac_mode_cmd *)desc.data;
7660 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7663 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7664 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7665 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7666 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7667 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7668 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7669 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7670 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7671 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7672 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7675 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7677 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7679 dev_err(&hdev->pdev->dev,
7680 "mac enable fail, ret =%d.\n", ret);
7683 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7684 u8 switch_param, u8 param_mask)
7686 struct hclge_mac_vlan_switch_cmd *req;
7687 struct hclge_desc desc;
7691 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7692 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7694 /* read current config parameter */
7695 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7697 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7698 req->func_id = cpu_to_le32(func_id);
7700 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7702 dev_err(&hdev->pdev->dev,
7703 "read mac vlan switch parameter fail, ret = %d\n", ret);
7707 /* modify and write new config parameter */
7708 hclge_cmd_reuse_desc(&desc, false);
7709 req->switch_param = (req->switch_param & param_mask) | switch_param;
7710 req->param_mask = param_mask;
7712 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7714 dev_err(&hdev->pdev->dev,
7715 "set mac vlan switch parameter fail, ret = %d\n", ret);
7719 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7722 #define HCLGE_PHY_LINK_STATUS_NUM 200
7724 struct phy_device *phydev = hdev->hw.mac.phydev;
7729 ret = phy_read_status(phydev);
7731 dev_err(&hdev->pdev->dev,
7732 "phy update link status fail, ret = %d\n", ret);
7736 if (phydev->link == link_ret)
7739 msleep(HCLGE_LINK_STATUS_MS);
7740 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7743 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7745 #define HCLGE_MAC_LINK_STATUS_NUM 100
7752 ret = hclge_get_mac_link_status(hdev, &link_status);
7755 if (link_status == link_ret)
7758 msleep(HCLGE_LINK_STATUS_MS);
7759 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7763 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7768 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7771 hclge_phy_link_status_wait(hdev, link_ret);
7773 return hclge_mac_link_status_wait(hdev, link_ret);
7776 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7778 struct hclge_config_mac_mode_cmd *req;
7779 struct hclge_desc desc;
7783 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7784 /* 1 Read out the MAC mode config at first */
7785 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7786 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7788 dev_err(&hdev->pdev->dev,
7789 "mac loopback get fail, ret =%d.\n", ret);
7793 /* 2 Then setup the loopback flag */
7794 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7795 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7797 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7799 /* 3 Config mac work mode with loopback flag
7800 * and its original configure parameters
7802 hclge_cmd_reuse_desc(&desc, false);
7803 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7805 dev_err(&hdev->pdev->dev,
7806 "mac loopback set fail, ret =%d.\n", ret);
7810 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7811 enum hnae3_loop loop_mode)
7813 #define HCLGE_COMMON_LB_RETRY_MS 10
7814 #define HCLGE_COMMON_LB_RETRY_NUM 100
7816 struct hclge_common_lb_cmd *req;
7817 struct hclge_desc desc;
7821 req = (struct hclge_common_lb_cmd *)desc.data;
7822 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
7824 switch (loop_mode) {
7825 case HNAE3_LOOP_SERIAL_SERDES:
7826 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7828 case HNAE3_LOOP_PARALLEL_SERDES:
7829 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7831 case HNAE3_LOOP_PHY:
7832 loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
7835 dev_err(&hdev->pdev->dev,
7836 "unsupported common loopback mode %d\n", loop_mode);
7841 req->enable = loop_mode_b;
7842 req->mask = loop_mode_b;
7844 req->mask = loop_mode_b;
7847 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7849 dev_err(&hdev->pdev->dev,
7850 "common loopback set fail, ret = %d\n", ret);
7855 msleep(HCLGE_COMMON_LB_RETRY_MS);
7856 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
7858 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7860 dev_err(&hdev->pdev->dev,
7861 "common loopback get, ret = %d\n", ret);
7864 } while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
7865 !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
7867 if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
7868 dev_err(&hdev->pdev->dev, "common loopback set timeout\n");
7870 } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
7871 dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n");
7877 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
7878 enum hnae3_loop loop_mode)
7882 ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
7886 hclge_cfg_mac_mode(hdev, en);
7888 ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7890 dev_err(&hdev->pdev->dev,
7891 "serdes loopback config mac mode timeout\n");
7896 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7897 struct phy_device *phydev)
7901 if (!phydev->suspended) {
7902 ret = phy_suspend(phydev);
7907 ret = phy_resume(phydev);
7911 return phy_loopback(phydev, true);
7914 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7915 struct phy_device *phydev)
7919 ret = phy_loopback(phydev, false);
7923 return phy_suspend(phydev);
7926 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7928 struct phy_device *phydev = hdev->hw.mac.phydev;
7932 if (hnae3_dev_phy_imp_supported(hdev))
7933 return hclge_set_common_loopback(hdev, en,
7939 ret = hclge_enable_phy_loopback(hdev, phydev);
7941 ret = hclge_disable_phy_loopback(hdev, phydev);
7943 dev_err(&hdev->pdev->dev,
7944 "set phy loopback fail, ret = %d\n", ret);
7948 hclge_cfg_mac_mode(hdev, en);
7950 ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7952 dev_err(&hdev->pdev->dev,
7953 "phy loopback config mac mode timeout\n");
7958 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id,
7959 u16 stream_id, bool enable)
7961 struct hclge_desc desc;
7962 struct hclge_cfg_com_tqp_queue_cmd *req =
7963 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7965 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7966 req->tqp_id = cpu_to_le16(tqp_id);
7967 req->stream_id = cpu_to_le16(stream_id);
7969 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7971 return hclge_cmd_send(&hdev->hw, &desc, 1);
7974 static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable)
7976 struct hclge_vport *vport = hclge_get_vport(handle);
7977 struct hclge_dev *hdev = vport->back;
7981 for (i = 0; i < handle->kinfo.num_tqps; i++) {
7982 ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable);
7989 static int hclge_set_loopback(struct hnae3_handle *handle,
7990 enum hnae3_loop loop_mode, bool en)
7992 struct hclge_vport *vport = hclge_get_vport(handle);
7993 struct hclge_dev *hdev = vport->back;
7996 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7997 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7998 * the same, the packets are looped back in the SSU. If SSU loopback
7999 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
8001 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8002 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
8004 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
8005 HCLGE_SWITCH_ALW_LPBK_MASK);
8010 switch (loop_mode) {
8011 case HNAE3_LOOP_APP:
8012 ret = hclge_set_app_loopback(hdev, en);
8014 case HNAE3_LOOP_SERIAL_SERDES:
8015 case HNAE3_LOOP_PARALLEL_SERDES:
8016 ret = hclge_set_common_loopback(hdev, en, loop_mode);
8018 case HNAE3_LOOP_PHY:
8019 ret = hclge_set_phy_loopback(hdev, en);
8023 dev_err(&hdev->pdev->dev,
8024 "loop_mode %d is not supported\n", loop_mode);
8031 ret = hclge_tqp_enable(handle, en);
8033 dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
8034 en ? "enable" : "disable", ret);
8039 static int hclge_set_default_loopback(struct hclge_dev *hdev)
8043 ret = hclge_set_app_loopback(hdev, false);
8047 ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
8051 return hclge_cfg_common_loopback(hdev, false,
8052 HNAE3_LOOP_PARALLEL_SERDES);
8055 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
8057 struct hclge_vport *vport = hclge_get_vport(handle);
8058 struct hnae3_knic_private_info *kinfo;
8059 struct hnae3_queue *queue;
8060 struct hclge_tqp *tqp;
8063 kinfo = &vport->nic.kinfo;
8064 for (i = 0; i < kinfo->num_tqps; i++) {
8065 queue = handle->kinfo.tqp[i];
8066 tqp = container_of(queue, struct hclge_tqp, q);
8067 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
8071 static void hclge_flush_link_update(struct hclge_dev *hdev)
8073 #define HCLGE_FLUSH_LINK_TIMEOUT 100000
8075 unsigned long last = hdev->serv_processed_cnt;
8078 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
8079 i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
8080 last == hdev->serv_processed_cnt)
8084 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
8086 struct hclge_vport *vport = hclge_get_vport(handle);
8087 struct hclge_dev *hdev = vport->back;
8090 hclge_task_schedule(hdev, 0);
8092 /* Set the DOWN flag here to disable link updating */
8093 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8095 /* flush memory to make sure DOWN is seen by service task */
8096 smp_mb__before_atomic();
8097 hclge_flush_link_update(hdev);
8101 static int hclge_ae_start(struct hnae3_handle *handle)
8103 struct hclge_vport *vport = hclge_get_vport(handle);
8104 struct hclge_dev *hdev = vport->back;
8107 hclge_cfg_mac_mode(hdev, true);
8108 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
8109 hdev->hw.mac.link = 0;
8111 /* reset tqp stats */
8112 hclge_reset_tqp_stats(handle);
8114 hclge_mac_start_phy(hdev);
8119 static void hclge_ae_stop(struct hnae3_handle *handle)
8121 struct hclge_vport *vport = hclge_get_vport(handle);
8122 struct hclge_dev *hdev = vport->back;
8124 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8125 spin_lock_bh(&hdev->fd_rule_lock);
8126 hclge_clear_arfs_rules(hdev);
8127 spin_unlock_bh(&hdev->fd_rule_lock);
8129 /* If it is not PF reset, the firmware will disable the MAC,
8130 * so it only need to stop phy here.
8132 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
8133 hdev->reset_type != HNAE3_FUNC_RESET) {
8134 hclge_mac_stop_phy(hdev);
8135 hclge_update_link_status(hdev);
8139 hclge_reset_tqp(handle);
8141 hclge_config_mac_tnl_int(hdev, false);
8144 hclge_cfg_mac_mode(hdev, false);
8146 hclge_mac_stop_phy(hdev);
8148 /* reset tqp stats */
8149 hclge_reset_tqp_stats(handle);
8150 hclge_update_link_status(hdev);
8153 int hclge_vport_start(struct hclge_vport *vport)
8155 struct hclge_dev *hdev = vport->back;
8157 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8158 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
8159 vport->last_active_jiffies = jiffies;
8161 if (test_bit(vport->vport_id, hdev->vport_config_block)) {
8162 if (vport->vport_id) {
8163 hclge_restore_mac_table_common(vport);
8164 hclge_restore_vport_vlan_table(vport);
8166 hclge_restore_hw_table(hdev);
8170 clear_bit(vport->vport_id, hdev->vport_config_block);
8175 void hclge_vport_stop(struct hclge_vport *vport)
8177 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8180 static int hclge_client_start(struct hnae3_handle *handle)
8182 struct hclge_vport *vport = hclge_get_vport(handle);
8184 return hclge_vport_start(vport);
8187 static void hclge_client_stop(struct hnae3_handle *handle)
8189 struct hclge_vport *vport = hclge_get_vport(handle);
8191 hclge_vport_stop(vport);
8194 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
8195 u16 cmdq_resp, u8 resp_code,
8196 enum hclge_mac_vlan_tbl_opcode op)
8198 struct hclge_dev *hdev = vport->back;
8201 dev_err(&hdev->pdev->dev,
8202 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
8207 if (op == HCLGE_MAC_VLAN_ADD) {
8208 if (!resp_code || resp_code == 1)
8210 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
8211 resp_code == HCLGE_ADD_MC_OVERFLOW)
8214 dev_err(&hdev->pdev->dev,
8215 "add mac addr failed for undefined, code=%u.\n",
8218 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
8221 } else if (resp_code == 1) {
8222 dev_dbg(&hdev->pdev->dev,
8223 "remove mac addr failed for miss.\n");
8227 dev_err(&hdev->pdev->dev,
8228 "remove mac addr failed for undefined, code=%u.\n",
8231 } else if (op == HCLGE_MAC_VLAN_LKUP) {
8234 } else if (resp_code == 1) {
8235 dev_dbg(&hdev->pdev->dev,
8236 "lookup mac addr failed for miss.\n");
8240 dev_err(&hdev->pdev->dev,
8241 "lookup mac addr failed for undefined, code=%u.\n",
8246 dev_err(&hdev->pdev->dev,
8247 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
8252 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
8254 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
8256 unsigned int word_num;
8257 unsigned int bit_num;
8259 if (vfid > 255 || vfid < 0)
8262 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
8263 word_num = vfid / 32;
8264 bit_num = vfid % 32;
8266 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8268 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
8270 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
8271 bit_num = vfid % 32;
8273 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8275 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
8281 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
8283 #define HCLGE_DESC_NUMBER 3
8284 #define HCLGE_FUNC_NUMBER_PER_DESC 6
8287 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
8288 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
8289 if (desc[i].data[j])
8295 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
8296 const u8 *addr, bool is_mc)
8298 const unsigned char *mac_addr = addr;
8299 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
8300 (mac_addr[0]) | (mac_addr[1] << 8);
8301 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
8303 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8305 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
8306 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8309 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
8310 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
8313 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
8314 struct hclge_mac_vlan_tbl_entry_cmd *req)
8316 struct hclge_dev *hdev = vport->back;
8317 struct hclge_desc desc;
8322 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
8324 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8326 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8328 dev_err(&hdev->pdev->dev,
8329 "del mac addr failed for cmd_send, ret =%d.\n",
8333 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8334 retval = le16_to_cpu(desc.retval);
8336 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8337 HCLGE_MAC_VLAN_REMOVE);
8340 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
8341 struct hclge_mac_vlan_tbl_entry_cmd *req,
8342 struct hclge_desc *desc,
8345 struct hclge_dev *hdev = vport->back;
8350 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
8352 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8353 memcpy(desc[0].data,
8355 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8356 hclge_cmd_setup_basic_desc(&desc[1],
8357 HCLGE_OPC_MAC_VLAN_ADD,
8359 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8360 hclge_cmd_setup_basic_desc(&desc[2],
8361 HCLGE_OPC_MAC_VLAN_ADD,
8363 ret = hclge_cmd_send(&hdev->hw, desc, 3);
8365 memcpy(desc[0].data,
8367 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8368 ret = hclge_cmd_send(&hdev->hw, desc, 1);
8371 dev_err(&hdev->pdev->dev,
8372 "lookup mac addr failed for cmd_send, ret =%d.\n",
8376 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
8377 retval = le16_to_cpu(desc[0].retval);
8379 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8380 HCLGE_MAC_VLAN_LKUP);
8383 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
8384 struct hclge_mac_vlan_tbl_entry_cmd *req,
8385 struct hclge_desc *mc_desc)
8387 struct hclge_dev *hdev = vport->back;
8394 struct hclge_desc desc;
8396 hclge_cmd_setup_basic_desc(&desc,
8397 HCLGE_OPC_MAC_VLAN_ADD,
8399 memcpy(desc.data, req,
8400 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8401 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8402 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8403 retval = le16_to_cpu(desc.retval);
8405 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8407 HCLGE_MAC_VLAN_ADD);
8409 hclge_cmd_reuse_desc(&mc_desc[0], false);
8410 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8411 hclge_cmd_reuse_desc(&mc_desc[1], false);
8412 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8413 hclge_cmd_reuse_desc(&mc_desc[2], false);
8414 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
8415 memcpy(mc_desc[0].data, req,
8416 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8417 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
8418 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
8419 retval = le16_to_cpu(mc_desc[0].retval);
8421 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8423 HCLGE_MAC_VLAN_ADD);
8427 dev_err(&hdev->pdev->dev,
8428 "add mac addr failed for cmd_send, ret =%d.\n",
8436 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
8437 u16 *allocated_size)
8439 struct hclge_umv_spc_alc_cmd *req;
8440 struct hclge_desc desc;
8443 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
8444 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
8446 req->space_size = cpu_to_le32(space_size);
8448 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8450 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
8455 *allocated_size = le32_to_cpu(desc.data[1]);
8460 static int hclge_init_umv_space(struct hclge_dev *hdev)
8462 u16 allocated_size = 0;
8465 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
8469 if (allocated_size < hdev->wanted_umv_size)
8470 dev_warn(&hdev->pdev->dev,
8471 "failed to alloc umv space, want %u, get %u\n",
8472 hdev->wanted_umv_size, allocated_size);
8474 hdev->max_umv_size = allocated_size;
8475 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
8476 hdev->share_umv_size = hdev->priv_umv_size +
8477 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8482 static void hclge_reset_umv_space(struct hclge_dev *hdev)
8484 struct hclge_vport *vport;
8487 for (i = 0; i < hdev->num_alloc_vport; i++) {
8488 vport = &hdev->vport[i];
8489 vport->used_umv_num = 0;
8492 mutex_lock(&hdev->vport_lock);
8493 hdev->share_umv_size = hdev->priv_umv_size +
8494 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8495 mutex_unlock(&hdev->vport_lock);
8498 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
8500 struct hclge_dev *hdev = vport->back;
8504 mutex_lock(&hdev->vport_lock);
8506 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
8507 hdev->share_umv_size == 0);
8510 mutex_unlock(&hdev->vport_lock);
8515 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
8517 struct hclge_dev *hdev = vport->back;
8520 if (vport->used_umv_num > hdev->priv_umv_size)
8521 hdev->share_umv_size++;
8523 if (vport->used_umv_num > 0)
8524 vport->used_umv_num--;
8526 if (vport->used_umv_num >= hdev->priv_umv_size &&
8527 hdev->share_umv_size > 0)
8528 hdev->share_umv_size--;
8529 vport->used_umv_num++;
8533 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
8536 struct hclge_mac_node *mac_node, *tmp;
8538 list_for_each_entry_safe(mac_node, tmp, list, node)
8539 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
8545 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
8546 enum HCLGE_MAC_NODE_STATE state)
8549 /* from set_rx_mode or tmp_add_list */
8550 case HCLGE_MAC_TO_ADD:
8551 if (mac_node->state == HCLGE_MAC_TO_DEL)
8552 mac_node->state = HCLGE_MAC_ACTIVE;
8554 /* only from set_rx_mode */
8555 case HCLGE_MAC_TO_DEL:
8556 if (mac_node->state == HCLGE_MAC_TO_ADD) {
8557 list_del(&mac_node->node);
8560 mac_node->state = HCLGE_MAC_TO_DEL;
8563 /* only from tmp_add_list, the mac_node->state won't be
8566 case HCLGE_MAC_ACTIVE:
8567 if (mac_node->state == HCLGE_MAC_TO_ADD)
8568 mac_node->state = HCLGE_MAC_ACTIVE;
8574 int hclge_update_mac_list(struct hclge_vport *vport,
8575 enum HCLGE_MAC_NODE_STATE state,
8576 enum HCLGE_MAC_ADDR_TYPE mac_type,
8577 const unsigned char *addr)
8579 struct hclge_dev *hdev = vport->back;
8580 struct hclge_mac_node *mac_node;
8581 struct list_head *list;
8583 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8584 &vport->uc_mac_list : &vport->mc_mac_list;
8586 spin_lock_bh(&vport->mac_list_lock);
8588 /* if the mac addr is already in the mac list, no need to add a new
8589 * one into it, just check the mac addr state, convert it to a new
8590 * state, or just remove it, or do nothing.
8592 mac_node = hclge_find_mac_node(list, addr);
8594 hclge_update_mac_node(mac_node, state);
8595 spin_unlock_bh(&vport->mac_list_lock);
8596 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8600 /* if this address is never added, unnecessary to delete */
8601 if (state == HCLGE_MAC_TO_DEL) {
8602 spin_unlock_bh(&vport->mac_list_lock);
8603 dev_err(&hdev->pdev->dev,
8604 "failed to delete address %pM from mac list\n",
8609 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8611 spin_unlock_bh(&vport->mac_list_lock);
8615 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8617 mac_node->state = state;
8618 ether_addr_copy(mac_node->mac_addr, addr);
8619 list_add_tail(&mac_node->node, list);
8621 spin_unlock_bh(&vport->mac_list_lock);
8626 static int hclge_add_uc_addr(struct hnae3_handle *handle,
8627 const unsigned char *addr)
8629 struct hclge_vport *vport = hclge_get_vport(handle);
8631 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8635 int hclge_add_uc_addr_common(struct hclge_vport *vport,
8636 const unsigned char *addr)
8638 struct hclge_dev *hdev = vport->back;
8639 struct hclge_mac_vlan_tbl_entry_cmd req;
8640 struct hclge_desc desc;
8641 u16 egress_port = 0;
8644 /* mac addr check */
8645 if (is_zero_ether_addr(addr) ||
8646 is_broadcast_ether_addr(addr) ||
8647 is_multicast_ether_addr(addr)) {
8648 dev_err(&hdev->pdev->dev,
8649 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
8650 addr, is_zero_ether_addr(addr),
8651 is_broadcast_ether_addr(addr),
8652 is_multicast_ether_addr(addr));
8656 memset(&req, 0, sizeof(req));
8658 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8659 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8661 req.egress_port = cpu_to_le16(egress_port);
8663 hclge_prepare_mac_addr(&req, addr, false);
8665 /* Lookup the mac address in the mac_vlan table, and add
8666 * it if the entry is inexistent. Repeated unicast entry
8667 * is not allowed in the mac vlan table.
8669 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8670 if (ret == -ENOENT) {
8671 mutex_lock(&hdev->vport_lock);
8672 if (!hclge_is_umv_space_full(vport, false)) {
8673 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8675 hclge_update_umv_space(vport, false);
8676 mutex_unlock(&hdev->vport_lock);
8679 mutex_unlock(&hdev->vport_lock);
8681 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8682 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8683 hdev->priv_umv_size);
8688 /* check if we just hit the duplicate */
8690 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
8691 vport->vport_id, addr);
8695 dev_err(&hdev->pdev->dev,
8696 "PF failed to add unicast entry(%pM) in the MAC table\n",
8702 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8703 const unsigned char *addr)
8705 struct hclge_vport *vport = hclge_get_vport(handle);
8707 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8711 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8712 const unsigned char *addr)
8714 struct hclge_dev *hdev = vport->back;
8715 struct hclge_mac_vlan_tbl_entry_cmd req;
8718 /* mac addr check */
8719 if (is_zero_ether_addr(addr) ||
8720 is_broadcast_ether_addr(addr) ||
8721 is_multicast_ether_addr(addr)) {
8722 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8727 memset(&req, 0, sizeof(req));
8728 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8729 hclge_prepare_mac_addr(&req, addr, false);
8730 ret = hclge_remove_mac_vlan_tbl(vport, &req);
8732 mutex_lock(&hdev->vport_lock);
8733 hclge_update_umv_space(vport, true);
8734 mutex_unlock(&hdev->vport_lock);
8735 } else if (ret == -ENOENT) {
8742 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8743 const unsigned char *addr)
8745 struct hclge_vport *vport = hclge_get_vport(handle);
8747 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8751 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8752 const unsigned char *addr)
8754 struct hclge_dev *hdev = vport->back;
8755 struct hclge_mac_vlan_tbl_entry_cmd req;
8756 struct hclge_desc desc[3];
8759 /* mac addr check */
8760 if (!is_multicast_ether_addr(addr)) {
8761 dev_err(&hdev->pdev->dev,
8762 "Add mc mac err! invalid mac:%pM.\n",
8766 memset(&req, 0, sizeof(req));
8767 hclge_prepare_mac_addr(&req, addr, true);
8768 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8770 /* This mac addr do not exist, add new entry for it */
8771 memset(desc[0].data, 0, sizeof(desc[0].data));
8772 memset(desc[1].data, 0, sizeof(desc[0].data));
8773 memset(desc[2].data, 0, sizeof(desc[0].data));
8775 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8778 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8779 /* if already overflow, not to print each time */
8780 if (status == -ENOSPC &&
8781 !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8782 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8787 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8788 const unsigned char *addr)
8790 struct hclge_vport *vport = hclge_get_vport(handle);
8792 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8796 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8797 const unsigned char *addr)
8799 struct hclge_dev *hdev = vport->back;
8800 struct hclge_mac_vlan_tbl_entry_cmd req;
8801 enum hclge_cmd_status status;
8802 struct hclge_desc desc[3];
8804 /* mac addr check */
8805 if (!is_multicast_ether_addr(addr)) {
8806 dev_dbg(&hdev->pdev->dev,
8807 "Remove mc mac err! invalid mac:%pM.\n",
8812 memset(&req, 0, sizeof(req));
8813 hclge_prepare_mac_addr(&req, addr, true);
8814 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8816 /* This mac addr exist, remove this handle's VFID for it */
8817 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8821 if (hclge_is_all_function_id_zero(desc))
8822 /* All the vfid is zero, so need to delete this entry */
8823 status = hclge_remove_mac_vlan_tbl(vport, &req);
8825 /* Not all the vfid is zero, update the vfid */
8826 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8827 } else if (status == -ENOENT) {
8834 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8835 struct list_head *list,
8836 int (*sync)(struct hclge_vport *,
8837 const unsigned char *))
8839 struct hclge_mac_node *mac_node, *tmp;
8842 list_for_each_entry_safe(mac_node, tmp, list, node) {
8843 ret = sync(vport, mac_node->mac_addr);
8845 mac_node->state = HCLGE_MAC_ACTIVE;
8847 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8854 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8855 struct list_head *list,
8856 int (*unsync)(struct hclge_vport *,
8857 const unsigned char *))
8859 struct hclge_mac_node *mac_node, *tmp;
8862 list_for_each_entry_safe(mac_node, tmp, list, node) {
8863 ret = unsync(vport, mac_node->mac_addr);
8864 if (!ret || ret == -ENOENT) {
8865 list_del(&mac_node->node);
8868 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8875 static bool hclge_sync_from_add_list(struct list_head *add_list,
8876 struct list_head *mac_list)
8878 struct hclge_mac_node *mac_node, *tmp, *new_node;
8879 bool all_added = true;
8881 list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8882 if (mac_node->state == HCLGE_MAC_TO_ADD)
8885 /* if the mac address from tmp_add_list is not in the
8886 * uc/mc_mac_list, it means have received a TO_DEL request
8887 * during the time window of adding the mac address into mac
8888 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8889 * then it will be removed at next time. else it must be TO_ADD,
8890 * this address hasn't been added into mac table,
8891 * so just remove the mac node.
8893 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8895 hclge_update_mac_node(new_node, mac_node->state);
8896 list_del(&mac_node->node);
8898 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8899 mac_node->state = HCLGE_MAC_TO_DEL;
8900 list_move_tail(&mac_node->node, mac_list);
8902 list_del(&mac_node->node);
8910 static void hclge_sync_from_del_list(struct list_head *del_list,
8911 struct list_head *mac_list)
8913 struct hclge_mac_node *mac_node, *tmp, *new_node;
8915 list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8916 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8918 /* If the mac addr exists in the mac list, it means
8919 * received a new TO_ADD request during the time window
8920 * of configuring the mac address. For the mac node
8921 * state is TO_ADD, and the address is already in the
8922 * in the hardware(due to delete fail), so we just need
8923 * to change the mac node state to ACTIVE.
8925 new_node->state = HCLGE_MAC_ACTIVE;
8926 list_del(&mac_node->node);
8929 list_move_tail(&mac_node->node, mac_list);
8934 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8935 enum HCLGE_MAC_ADDR_TYPE mac_type,
8938 if (mac_type == HCLGE_MAC_ADDR_UC) {
8940 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8942 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8945 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8947 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8951 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8952 enum HCLGE_MAC_ADDR_TYPE mac_type)
8954 struct hclge_mac_node *mac_node, *tmp, *new_node;
8955 struct list_head tmp_add_list, tmp_del_list;
8956 struct list_head *list;
8959 INIT_LIST_HEAD(&tmp_add_list);
8960 INIT_LIST_HEAD(&tmp_del_list);
8962 /* move the mac addr to the tmp_add_list and tmp_del_list, then
8963 * we can add/delete these mac addr outside the spin lock
8965 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8966 &vport->uc_mac_list : &vport->mc_mac_list;
8968 spin_lock_bh(&vport->mac_list_lock);
8970 list_for_each_entry_safe(mac_node, tmp, list, node) {
8971 switch (mac_node->state) {
8972 case HCLGE_MAC_TO_DEL:
8973 list_move_tail(&mac_node->node, &tmp_del_list);
8975 case HCLGE_MAC_TO_ADD:
8976 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8979 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8980 new_node->state = mac_node->state;
8981 list_add_tail(&new_node->node, &tmp_add_list);
8989 spin_unlock_bh(&vport->mac_list_lock);
8991 /* delete first, in order to get max mac table space for adding */
8992 if (mac_type == HCLGE_MAC_ADDR_UC) {
8993 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8994 hclge_rm_uc_addr_common);
8995 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8996 hclge_add_uc_addr_common);
8998 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8999 hclge_rm_mc_addr_common);
9000 hclge_sync_vport_mac_list(vport, &tmp_add_list,
9001 hclge_add_mc_addr_common);
9004 /* if some mac addresses were added/deleted fail, move back to the
9005 * mac_list, and retry at next time.
9007 spin_lock_bh(&vport->mac_list_lock);
9009 hclge_sync_from_del_list(&tmp_del_list, list);
9010 all_added = hclge_sync_from_add_list(&tmp_add_list, list);
9012 spin_unlock_bh(&vport->mac_list_lock);
9014 hclge_update_overflow_flags(vport, mac_type, all_added);
9017 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
9019 struct hclge_dev *hdev = vport->back;
9021 if (test_bit(vport->vport_id, hdev->vport_config_block))
9024 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
9030 static void hclge_sync_mac_table(struct hclge_dev *hdev)
9034 for (i = 0; i < hdev->num_alloc_vport; i++) {
9035 struct hclge_vport *vport = &hdev->vport[i];
9037 if (!hclge_need_sync_mac_table(vport))
9040 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
9041 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
9045 static void hclge_build_del_list(struct list_head *list,
9047 struct list_head *tmp_del_list)
9049 struct hclge_mac_node *mac_cfg, *tmp;
9051 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
9052 switch (mac_cfg->state) {
9053 case HCLGE_MAC_TO_DEL:
9054 case HCLGE_MAC_ACTIVE:
9055 list_move_tail(&mac_cfg->node, tmp_del_list);
9057 case HCLGE_MAC_TO_ADD:
9059 list_del(&mac_cfg->node);
9067 static void hclge_unsync_del_list(struct hclge_vport *vport,
9068 int (*unsync)(struct hclge_vport *vport,
9069 const unsigned char *addr),
9071 struct list_head *tmp_del_list)
9073 struct hclge_mac_node *mac_cfg, *tmp;
9076 list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
9077 ret = unsync(vport, mac_cfg->mac_addr);
9078 if (!ret || ret == -ENOENT) {
9079 /* clear all mac addr from hardware, but remain these
9080 * mac addr in the mac list, and restore them after
9081 * vf reset finished.
9084 mac_cfg->state == HCLGE_MAC_ACTIVE) {
9085 mac_cfg->state = HCLGE_MAC_TO_ADD;
9087 list_del(&mac_cfg->node);
9090 } else if (is_del_list) {
9091 mac_cfg->state = HCLGE_MAC_TO_DEL;
9096 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
9097 enum HCLGE_MAC_ADDR_TYPE mac_type)
9099 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
9100 struct hclge_dev *hdev = vport->back;
9101 struct list_head tmp_del_list, *list;
9103 if (mac_type == HCLGE_MAC_ADDR_UC) {
9104 list = &vport->uc_mac_list;
9105 unsync = hclge_rm_uc_addr_common;
9107 list = &vport->mc_mac_list;
9108 unsync = hclge_rm_mc_addr_common;
9111 INIT_LIST_HEAD(&tmp_del_list);
9114 set_bit(vport->vport_id, hdev->vport_config_block);
9116 spin_lock_bh(&vport->mac_list_lock);
9118 hclge_build_del_list(list, is_del_list, &tmp_del_list);
9120 spin_unlock_bh(&vport->mac_list_lock);
9122 hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
9124 spin_lock_bh(&vport->mac_list_lock);
9126 hclge_sync_from_del_list(&tmp_del_list, list);
9128 spin_unlock_bh(&vport->mac_list_lock);
9131 /* remove all mac address when uninitailize */
9132 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
9133 enum HCLGE_MAC_ADDR_TYPE mac_type)
9135 struct hclge_mac_node *mac_node, *tmp;
9136 struct hclge_dev *hdev = vport->back;
9137 struct list_head tmp_del_list, *list;
9139 INIT_LIST_HEAD(&tmp_del_list);
9141 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
9142 &vport->uc_mac_list : &vport->mc_mac_list;
9144 spin_lock_bh(&vport->mac_list_lock);
9146 list_for_each_entry_safe(mac_node, tmp, list, node) {
9147 switch (mac_node->state) {
9148 case HCLGE_MAC_TO_DEL:
9149 case HCLGE_MAC_ACTIVE:
9150 list_move_tail(&mac_node->node, &tmp_del_list);
9152 case HCLGE_MAC_TO_ADD:
9153 list_del(&mac_node->node);
9159 spin_unlock_bh(&vport->mac_list_lock);
9161 if (mac_type == HCLGE_MAC_ADDR_UC)
9162 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9163 hclge_rm_uc_addr_common);
9165 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9166 hclge_rm_mc_addr_common);
9168 if (!list_empty(&tmp_del_list))
9169 dev_warn(&hdev->pdev->dev,
9170 "uninit %s mac list for vport %u not completely.\n",
9171 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
9174 list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
9175 list_del(&mac_node->node);
9180 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
9182 struct hclge_vport *vport;
9185 for (i = 0; i < hdev->num_alloc_vport; i++) {
9186 vport = &hdev->vport[i];
9187 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
9188 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
9192 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
9193 u16 cmdq_resp, u8 resp_code)
9195 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
9196 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
9197 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
9198 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
9203 dev_err(&hdev->pdev->dev,
9204 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
9209 switch (resp_code) {
9210 case HCLGE_ETHERTYPE_SUCCESS_ADD:
9211 case HCLGE_ETHERTYPE_ALREADY_ADD:
9214 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
9215 dev_err(&hdev->pdev->dev,
9216 "add mac ethertype failed for manager table overflow.\n");
9217 return_status = -EIO;
9219 case HCLGE_ETHERTYPE_KEY_CONFLICT:
9220 dev_err(&hdev->pdev->dev,
9221 "add mac ethertype failed for key conflict.\n");
9222 return_status = -EIO;
9225 dev_err(&hdev->pdev->dev,
9226 "add mac ethertype failed for undefined, code=%u.\n",
9228 return_status = -EIO;
9231 return return_status;
9234 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
9237 struct hclge_mac_vlan_tbl_entry_cmd req;
9238 struct hclge_dev *hdev = vport->back;
9239 struct hclge_desc desc;
9240 u16 egress_port = 0;
9243 if (is_zero_ether_addr(mac_addr))
9246 memset(&req, 0, sizeof(req));
9247 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
9248 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
9249 req.egress_port = cpu_to_le16(egress_port);
9250 hclge_prepare_mac_addr(&req, mac_addr, false);
9252 if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
9255 vf_idx += HCLGE_VF_VPORT_START_NUM;
9256 for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++)
9258 ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
9264 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
9267 struct hclge_vport *vport = hclge_get_vport(handle);
9268 struct hclge_dev *hdev = vport->back;
9270 vport = hclge_get_vf_vport(hdev, vf);
9274 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
9275 dev_info(&hdev->pdev->dev,
9276 "Specified MAC(=%pM) is same as before, no change committed!\n",
9281 if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
9282 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
9287 ether_addr_copy(vport->vf_info.mac, mac_addr);
9289 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9290 dev_info(&hdev->pdev->dev,
9291 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
9293 return hclge_inform_reset_assert_to_vf(vport);
9296 dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
9301 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
9302 const struct hclge_mac_mgr_tbl_entry_cmd *req)
9304 struct hclge_desc desc;
9309 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
9310 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
9312 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9314 dev_err(&hdev->pdev->dev,
9315 "add mac ethertype failed for cmd_send, ret =%d.\n",
9320 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
9321 retval = le16_to_cpu(desc.retval);
9323 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
9326 static int init_mgr_tbl(struct hclge_dev *hdev)
9331 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
9332 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
9334 dev_err(&hdev->pdev->dev,
9335 "add mac ethertype failed, ret =%d.\n",
9344 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
9346 struct hclge_vport *vport = hclge_get_vport(handle);
9347 struct hclge_dev *hdev = vport->back;
9349 ether_addr_copy(p, hdev->hw.mac.mac_addr);
9352 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
9353 const u8 *old_addr, const u8 *new_addr)
9355 struct list_head *list = &vport->uc_mac_list;
9356 struct hclge_mac_node *old_node, *new_node;
9358 new_node = hclge_find_mac_node(list, new_addr);
9360 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9364 new_node->state = HCLGE_MAC_TO_ADD;
9365 ether_addr_copy(new_node->mac_addr, new_addr);
9366 list_add(&new_node->node, list);
9368 if (new_node->state == HCLGE_MAC_TO_DEL)
9369 new_node->state = HCLGE_MAC_ACTIVE;
9371 /* make sure the new addr is in the list head, avoid dev
9372 * addr may be not re-added into mac table for the umv space
9373 * limitation after global/imp reset which will clear mac
9374 * table by hardware.
9376 list_move(&new_node->node, list);
9379 if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
9380 old_node = hclge_find_mac_node(list, old_addr);
9382 if (old_node->state == HCLGE_MAC_TO_ADD) {
9383 list_del(&old_node->node);
9386 old_node->state = HCLGE_MAC_TO_DEL;
9391 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9396 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
9399 const unsigned char *new_addr = (const unsigned char *)p;
9400 struct hclge_vport *vport = hclge_get_vport(handle);
9401 struct hclge_dev *hdev = vport->back;
9402 unsigned char *old_addr = NULL;
9405 /* mac addr check */
9406 if (is_zero_ether_addr(new_addr) ||
9407 is_broadcast_ether_addr(new_addr) ||
9408 is_multicast_ether_addr(new_addr)) {
9409 dev_err(&hdev->pdev->dev,
9410 "change uc mac err! invalid mac: %pM.\n",
9415 ret = hclge_pause_addr_cfg(hdev, new_addr);
9417 dev_err(&hdev->pdev->dev,
9418 "failed to configure mac pause address, ret = %d\n",
9424 old_addr = hdev->hw.mac.mac_addr;
9426 spin_lock_bh(&vport->mac_list_lock);
9427 ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
9429 dev_err(&hdev->pdev->dev,
9430 "failed to change the mac addr:%pM, ret = %d\n",
9432 spin_unlock_bh(&vport->mac_list_lock);
9435 hclge_pause_addr_cfg(hdev, old_addr);
9439 /* we must update dev addr with spin lock protect, preventing dev addr
9440 * being removed by set_rx_mode path.
9442 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
9443 spin_unlock_bh(&vport->mac_list_lock);
9445 hclge_task_schedule(hdev, 0);
9450 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
9452 struct mii_ioctl_data *data = if_mii(ifr);
9454 if (!hnae3_dev_phy_imp_supported(hdev))
9459 data->phy_id = hdev->hw.mac.phy_addr;
9460 /* this command reads phy id and register at the same time */
9463 data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
9467 return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
9473 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
9476 struct hclge_vport *vport = hclge_get_vport(handle);
9477 struct hclge_dev *hdev = vport->back;
9481 return hclge_ptp_get_cfg(hdev, ifr);
9483 return hclge_ptp_set_cfg(hdev, ifr);
9485 if (!hdev->hw.mac.phydev)
9486 return hclge_mii_ioctl(hdev, ifr, cmd);
9489 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
9492 static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id,
9495 struct hclge_port_vlan_filter_bypass_cmd *req;
9496 struct hclge_desc desc;
9499 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false);
9500 req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
9502 hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B,
9505 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9507 dev_err(&hdev->pdev->dev,
9508 "failed to set vport%u port vlan filter bypass state, ret = %d.\n",
9514 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
9515 u8 fe_type, bool filter_en, u8 vf_id)
9517 struct hclge_vlan_filter_ctrl_cmd *req;
9518 struct hclge_desc desc;
9521 /* read current vlan filter parameter */
9522 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
9523 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
9524 req->vlan_type = vlan_type;
9527 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9529 dev_err(&hdev->pdev->dev,
9530 "failed to get vlan filter config, ret = %d.\n", ret);
9534 /* modify and write new config parameter */
9535 hclge_cmd_reuse_desc(&desc, false);
9536 req->vlan_fe = filter_en ?
9537 (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
9539 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9541 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
9547 static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable)
9549 struct hclge_dev *hdev = vport->back;
9550 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
9553 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9554 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9555 HCLGE_FILTER_FE_EGRESS_V1_B,
9556 enable, vport->vport_id);
9558 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9559 HCLGE_FILTER_FE_EGRESS, enable,
9564 if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) {
9565 ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id,
9567 } else if (!vport->vport_id) {
9568 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
9571 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9572 HCLGE_FILTER_FE_INGRESS,
9579 static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
9581 struct hnae3_handle *handle = &vport->nic;
9582 struct hclge_vport_vlan_cfg *vlan, *tmp;
9583 struct hclge_dev *hdev = vport->back;
9585 if (vport->vport_id) {
9586 if (vport->port_base_vlan_cfg.state !=
9587 HNAE3_PORT_BASE_VLAN_DISABLE)
9590 if (vport->vf_info.trusted && vport->vf_info.request_uc_en)
9592 } else if (handle->netdev_flags & HNAE3_USER_UPE) {
9596 if (!vport->req_vlan_fltr_en)
9599 /* compatible with former device, always enable vlan filter */
9600 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
9603 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
9604 if (vlan->vlan_id != 0)
9610 int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
9612 struct hclge_dev *hdev = vport->back;
9616 mutex_lock(&hdev->vport_lock);
9618 vport->req_vlan_fltr_en = request_en;
9620 need_en = hclge_need_enable_vport_vlan_filter(vport);
9621 if (need_en == vport->cur_vlan_fltr_en) {
9622 mutex_unlock(&hdev->vport_lock);
9626 ret = hclge_set_vport_vlan_filter(vport, need_en);
9628 mutex_unlock(&hdev->vport_lock);
9632 vport->cur_vlan_fltr_en = need_en;
9634 mutex_unlock(&hdev->vport_lock);
9639 static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
9641 struct hclge_vport *vport = hclge_get_vport(handle);
9643 return hclge_enable_vport_vlan_filter(vport, enable);
9646 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9647 bool is_kill, u16 vlan,
9648 struct hclge_desc *desc)
9650 struct hclge_vlan_filter_vf_cfg_cmd *req0;
9651 struct hclge_vlan_filter_vf_cfg_cmd *req1;
9656 hclge_cmd_setup_basic_desc(&desc[0],
9657 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9658 hclge_cmd_setup_basic_desc(&desc[1],
9659 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9661 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9663 vf_byte_off = vfid / 8;
9664 vf_byte_val = 1 << (vfid % 8);
9666 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9667 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9669 req0->vlan_id = cpu_to_le16(vlan);
9670 req0->vlan_cfg = is_kill;
9672 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9673 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9675 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9677 ret = hclge_cmd_send(&hdev->hw, desc, 2);
9679 dev_err(&hdev->pdev->dev,
9680 "Send vf vlan command fail, ret =%d.\n",
9688 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9689 bool is_kill, struct hclge_desc *desc)
9691 struct hclge_vlan_filter_vf_cfg_cmd *req;
9693 req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9696 #define HCLGE_VF_VLAN_NO_ENTRY 2
9697 if (!req->resp_code || req->resp_code == 1)
9700 if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9701 set_bit(vfid, hdev->vf_vlan_full);
9702 dev_warn(&hdev->pdev->dev,
9703 "vf vlan table is full, vf vlan filter is disabled\n");
9707 dev_err(&hdev->pdev->dev,
9708 "Add vf vlan filter fail, ret =%u.\n",
9711 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
9712 if (!req->resp_code)
9715 /* vf vlan filter is disabled when vf vlan table is full,
9716 * then new vlan id will not be added into vf vlan table.
9717 * Just return 0 without warning, avoid massive verbose
9718 * print logs when unload.
9720 if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9723 dev_err(&hdev->pdev->dev,
9724 "Kill vf vlan filter fail, ret =%u.\n",
9731 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9732 bool is_kill, u16 vlan)
9734 struct hclge_vport *vport = &hdev->vport[vfid];
9735 struct hclge_desc desc[2];
9738 /* if vf vlan table is full, firmware will close vf vlan filter, it
9739 * is unable and unnecessary to add new vlan id to vf vlan filter.
9740 * If spoof check is enable, and vf vlan is full, it shouldn't add
9741 * new vlan, because tx packets with these vlan id will be dropped.
9743 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9744 if (vport->vf_info.spoofchk && vlan) {
9745 dev_err(&hdev->pdev->dev,
9746 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
9752 ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9756 return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9759 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9760 u16 vlan_id, bool is_kill)
9762 struct hclge_vlan_filter_pf_cfg_cmd *req;
9763 struct hclge_desc desc;
9764 u8 vlan_offset_byte_val;
9765 u8 vlan_offset_byte;
9769 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9771 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9772 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9773 HCLGE_VLAN_BYTE_SIZE;
9774 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9776 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9777 req->vlan_offset = vlan_offset_160;
9778 req->vlan_cfg = is_kill;
9779 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9781 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9783 dev_err(&hdev->pdev->dev,
9784 "port vlan command, send fail, ret =%d.\n", ret);
9788 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9789 u16 vport_id, u16 vlan_id,
9792 u16 vport_idx, vport_num = 0;
9795 if (is_kill && !vlan_id)
9798 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
9800 dev_err(&hdev->pdev->dev,
9801 "Set %u vport vlan filter config fail, ret =%d.\n",
9806 /* vlan 0 may be added twice when 8021q module is enabled */
9807 if (!is_kill && !vlan_id &&
9808 test_bit(vport_id, hdev->vlan_table[vlan_id]))
9811 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9812 dev_err(&hdev->pdev->dev,
9813 "Add port vlan failed, vport %u is already in vlan %u\n",
9819 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9820 dev_err(&hdev->pdev->dev,
9821 "Delete port vlan failed, vport %u is not in vlan %u\n",
9826 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9829 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9830 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9836 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9838 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9839 struct hclge_vport_vtag_tx_cfg_cmd *req;
9840 struct hclge_dev *hdev = vport->back;
9841 struct hclge_desc desc;
9845 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9847 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9848 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9849 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9850 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9851 vcfg->accept_tag1 ? 1 : 0);
9852 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9853 vcfg->accept_untag1 ? 1 : 0);
9854 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9855 vcfg->accept_tag2 ? 1 : 0);
9856 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9857 vcfg->accept_untag2 ? 1 : 0);
9858 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9859 vcfg->insert_tag1_en ? 1 : 0);
9860 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9861 vcfg->insert_tag2_en ? 1 : 0);
9862 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9863 vcfg->tag_shift_mode_en ? 1 : 0);
9864 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9866 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9867 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9868 HCLGE_VF_NUM_PER_BYTE;
9869 req->vf_bitmap[bmap_index] =
9870 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9872 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9874 dev_err(&hdev->pdev->dev,
9875 "Send port txvlan cfg command fail, ret =%d\n",
9881 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9883 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9884 struct hclge_vport_vtag_rx_cfg_cmd *req;
9885 struct hclge_dev *hdev = vport->back;
9886 struct hclge_desc desc;
9890 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9892 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9893 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9894 vcfg->strip_tag1_en ? 1 : 0);
9895 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9896 vcfg->strip_tag2_en ? 1 : 0);
9897 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9898 vcfg->vlan1_vlan_prionly ? 1 : 0);
9899 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9900 vcfg->vlan2_vlan_prionly ? 1 : 0);
9901 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9902 vcfg->strip_tag1_discard_en ? 1 : 0);
9903 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9904 vcfg->strip_tag2_discard_en ? 1 : 0);
9906 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9907 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9908 HCLGE_VF_NUM_PER_BYTE;
9909 req->vf_bitmap[bmap_index] =
9910 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9912 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9914 dev_err(&hdev->pdev->dev,
9915 "Send port rxvlan cfg command fail, ret =%d\n",
9921 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9922 u16 port_base_vlan_state,
9923 u16 vlan_tag, u8 qos)
9927 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9928 vport->txvlan_cfg.accept_tag1 = true;
9929 vport->txvlan_cfg.insert_tag1_en = false;
9930 vport->txvlan_cfg.default_tag1 = 0;
9932 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9934 vport->txvlan_cfg.accept_tag1 =
9935 ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9936 vport->txvlan_cfg.insert_tag1_en = true;
9937 vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) |
9941 vport->txvlan_cfg.accept_untag1 = true;
9943 /* accept_tag2 and accept_untag2 are not supported on
9944 * pdev revision(0x20), new revision support them,
9945 * this two fields can not be configured by user.
9947 vport->txvlan_cfg.accept_tag2 = true;
9948 vport->txvlan_cfg.accept_untag2 = true;
9949 vport->txvlan_cfg.insert_tag2_en = false;
9950 vport->txvlan_cfg.default_tag2 = 0;
9951 vport->txvlan_cfg.tag_shift_mode_en = true;
9953 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9954 vport->rxvlan_cfg.strip_tag1_en = false;
9955 vport->rxvlan_cfg.strip_tag2_en =
9956 vport->rxvlan_cfg.rx_vlan_offload_en;
9957 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9959 vport->rxvlan_cfg.strip_tag1_en =
9960 vport->rxvlan_cfg.rx_vlan_offload_en;
9961 vport->rxvlan_cfg.strip_tag2_en = true;
9962 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9965 vport->rxvlan_cfg.strip_tag1_discard_en = false;
9966 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9967 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9969 ret = hclge_set_vlan_tx_offload_cfg(vport);
9973 return hclge_set_vlan_rx_offload_cfg(vport);
9976 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9978 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9979 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9980 struct hclge_desc desc;
9983 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9984 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9985 rx_req->ot_fst_vlan_type =
9986 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9987 rx_req->ot_sec_vlan_type =
9988 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9989 rx_req->in_fst_vlan_type =
9990 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9991 rx_req->in_sec_vlan_type =
9992 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9994 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9996 dev_err(&hdev->pdev->dev,
9997 "Send rxvlan protocol type command fail, ret =%d\n",
10002 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
10004 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
10005 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
10006 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
10008 status = hclge_cmd_send(&hdev->hw, &desc, 1);
10010 dev_err(&hdev->pdev->dev,
10011 "Send txvlan protocol type command fail, ret =%d\n",
10017 static int hclge_init_vlan_config(struct hclge_dev *hdev)
10019 #define HCLGE_DEF_VLAN_TYPE 0x8100
10021 struct hnae3_handle *handle = &hdev->vport[0].nic;
10022 struct hclge_vport *vport;
10026 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
10027 /* for revision 0x21, vf vlan filter is per function */
10028 for (i = 0; i < hdev->num_alloc_vport; i++) {
10029 vport = &hdev->vport[i];
10030 ret = hclge_set_vlan_filter_ctrl(hdev,
10031 HCLGE_FILTER_TYPE_VF,
10032 HCLGE_FILTER_FE_EGRESS,
10037 vport->cur_vlan_fltr_en = true;
10040 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
10041 HCLGE_FILTER_FE_INGRESS, true,
10046 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10047 HCLGE_FILTER_FE_EGRESS_V1_B,
10053 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
10054 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
10055 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
10056 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
10057 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
10058 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
10060 ret = hclge_set_vlan_protocol_type(hdev);
10064 for (i = 0; i < hdev->num_alloc_vport; i++) {
10068 vport = &hdev->vport[i];
10069 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10070 qos = vport->port_base_vlan_cfg.vlan_info.qos;
10072 ret = hclge_vlan_offload_cfg(vport,
10073 vport->port_base_vlan_cfg.state,
10079 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
10082 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10083 bool writen_to_tbl)
10085 struct hclge_vport_vlan_cfg *vlan, *tmp;
10087 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
10088 if (vlan->vlan_id == vlan_id)
10091 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
10095 vlan->hd_tbl_status = writen_to_tbl;
10096 vlan->vlan_id = vlan_id;
10098 list_add_tail(&vlan->node, &vport->vlan_list);
10101 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
10103 struct hclge_vport_vlan_cfg *vlan, *tmp;
10104 struct hclge_dev *hdev = vport->back;
10107 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10108 if (!vlan->hd_tbl_status) {
10109 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10111 vlan->vlan_id, false);
10113 dev_err(&hdev->pdev->dev,
10114 "restore vport vlan list failed, ret=%d\n",
10119 vlan->hd_tbl_status = true;
10125 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10128 struct hclge_vport_vlan_cfg *vlan, *tmp;
10129 struct hclge_dev *hdev = vport->back;
10131 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10132 if (vlan->vlan_id == vlan_id) {
10133 if (is_write_tbl && vlan->hd_tbl_status)
10134 hclge_set_vlan_filter_hw(hdev,
10135 htons(ETH_P_8021Q),
10140 list_del(&vlan->node);
10147 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
10149 struct hclge_vport_vlan_cfg *vlan, *tmp;
10150 struct hclge_dev *hdev = vport->back;
10152 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10153 if (vlan->hd_tbl_status)
10154 hclge_set_vlan_filter_hw(hdev,
10155 htons(ETH_P_8021Q),
10160 vlan->hd_tbl_status = false;
10162 list_del(&vlan->node);
10166 clear_bit(vport->vport_id, hdev->vf_vlan_full);
10169 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
10171 struct hclge_vport_vlan_cfg *vlan, *tmp;
10172 struct hclge_vport *vport;
10175 for (i = 0; i < hdev->num_alloc_vport; i++) {
10176 vport = &hdev->vport[i];
10177 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10178 list_del(&vlan->node);
10184 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
10186 struct hclge_vport_vlan_cfg *vlan, *tmp;
10187 struct hclge_dev *hdev = vport->back;
10193 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
10194 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10195 state = vport->port_base_vlan_cfg.state;
10197 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
10198 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
10199 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
10200 vport->vport_id, vlan_id,
10205 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10206 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10208 vlan->vlan_id, false);
10211 vlan->hd_tbl_status = true;
10215 /* For global reset and imp reset, hardware will clear the mac table,
10216 * so we change the mac address state from ACTIVE to TO_ADD, then they
10217 * can be restored in the service task after reset complete. Furtherly,
10218 * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
10219 * be restored after reset, so just remove these mac nodes from mac_list.
10221 static void hclge_mac_node_convert_for_reset(struct list_head *list)
10223 struct hclge_mac_node *mac_node, *tmp;
10225 list_for_each_entry_safe(mac_node, tmp, list, node) {
10226 if (mac_node->state == HCLGE_MAC_ACTIVE) {
10227 mac_node->state = HCLGE_MAC_TO_ADD;
10228 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
10229 list_del(&mac_node->node);
10235 void hclge_restore_mac_table_common(struct hclge_vport *vport)
10237 spin_lock_bh(&vport->mac_list_lock);
10239 hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
10240 hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
10241 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
10243 spin_unlock_bh(&vport->mac_list_lock);
10246 static void hclge_restore_hw_table(struct hclge_dev *hdev)
10248 struct hclge_vport *vport = &hdev->vport[0];
10249 struct hnae3_handle *handle = &vport->nic;
10251 hclge_restore_mac_table_common(vport);
10252 hclge_restore_vport_vlan_table(vport);
10253 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
10254 hclge_restore_fd_entries(handle);
10257 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
10259 struct hclge_vport *vport = hclge_get_vport(handle);
10261 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10262 vport->rxvlan_cfg.strip_tag1_en = false;
10263 vport->rxvlan_cfg.strip_tag2_en = enable;
10264 vport->rxvlan_cfg.strip_tag2_discard_en = false;
10266 vport->rxvlan_cfg.strip_tag1_en = enable;
10267 vport->rxvlan_cfg.strip_tag2_en = true;
10268 vport->rxvlan_cfg.strip_tag2_discard_en = true;
10271 vport->rxvlan_cfg.strip_tag1_discard_en = false;
10272 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10273 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10274 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
10276 return hclge_set_vlan_rx_offload_cfg(vport);
10279 static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport)
10281 struct hclge_dev *hdev = vport->back;
10283 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
10284 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state);
10287 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
10288 u16 port_base_vlan_state,
10289 struct hclge_vlan_info *new_info,
10290 struct hclge_vlan_info *old_info)
10292 struct hclge_dev *hdev = vport->back;
10295 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
10296 hclge_rm_vport_all_vlan_table(vport, false);
10297 /* force clear VLAN 0 */
10298 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0);
10301 return hclge_set_vlan_filter_hw(hdev,
10302 htons(new_info->vlan_proto),
10304 new_info->vlan_tag,
10308 /* force add VLAN 0 */
10309 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0);
10313 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
10314 vport->vport_id, old_info->vlan_tag,
10319 return hclge_add_vport_all_vlan_table(vport);
10322 static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
10323 const struct hclge_vlan_info *old_cfg)
10325 if (new_cfg->vlan_tag != old_cfg->vlan_tag)
10328 if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0))
10334 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
10335 struct hclge_vlan_info *vlan_info)
10337 struct hnae3_handle *nic = &vport->nic;
10338 struct hclge_vlan_info *old_vlan_info;
10339 struct hclge_dev *hdev = vport->back;
10342 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10344 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag,
10349 if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
10352 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
10353 /* add new VLAN tag */
10354 ret = hclge_set_vlan_filter_hw(hdev,
10355 htons(vlan_info->vlan_proto),
10357 vlan_info->vlan_tag,
10362 /* remove old VLAN tag */
10363 if (old_vlan_info->vlan_tag == 0)
10364 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
10367 ret = hclge_set_vlan_filter_hw(hdev,
10368 htons(ETH_P_8021Q),
10370 old_vlan_info->vlan_tag,
10373 dev_err(&hdev->pdev->dev,
10374 "failed to clear vport%u port base vlan %u, ret = %d.\n",
10375 vport->vport_id, old_vlan_info->vlan_tag, ret);
10382 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
10388 vport->port_base_vlan_cfg.state = state;
10389 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
10390 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
10392 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
10394 vport->port_base_vlan_cfg.vlan_info = *vlan_info;
10395 hclge_set_vport_vlan_fltr_change(vport);
10400 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
10401 enum hnae3_port_base_vlan_state state,
10404 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10406 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10408 return HNAE3_PORT_BASE_VLAN_ENABLE;
10412 return HNAE3_PORT_BASE_VLAN_DISABLE;
10414 if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan &&
10415 vport->port_base_vlan_cfg.vlan_info.qos == qos)
10416 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10418 return HNAE3_PORT_BASE_VLAN_MODIFY;
10421 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
10422 u16 vlan, u8 qos, __be16 proto)
10424 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
10425 struct hclge_vport *vport = hclge_get_vport(handle);
10426 struct hclge_dev *hdev = vport->back;
10427 struct hclge_vlan_info vlan_info;
10431 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10432 return -EOPNOTSUPP;
10434 vport = hclge_get_vf_vport(hdev, vfid);
10438 /* qos is a 3 bits value, so can not be bigger than 7 */
10439 if (vlan > VLAN_N_VID - 1 || qos > 7)
10441 if (proto != htons(ETH_P_8021Q))
10442 return -EPROTONOSUPPORT;
10444 state = hclge_get_port_base_vlan_state(vport,
10445 vport->port_base_vlan_cfg.state,
10447 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
10450 vlan_info.vlan_tag = vlan;
10451 vlan_info.qos = qos;
10452 vlan_info.vlan_proto = ntohs(proto);
10454 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
10456 dev_err(&hdev->pdev->dev,
10457 "failed to update port base vlan for vf %d, ret = %d\n",
10462 /* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
10465 if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
10466 test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
10467 hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
10468 vport->vport_id, state,
10474 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
10476 struct hclge_vlan_info *vlan_info;
10477 struct hclge_vport *vport;
10481 /* clear port base vlan for all vf */
10482 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10483 vport = &hdev->vport[vf];
10484 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10486 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10488 vlan_info->vlan_tag, true);
10490 dev_err(&hdev->pdev->dev,
10491 "failed to clear vf vlan for vf%d, ret = %d\n",
10492 vf - HCLGE_VF_VPORT_START_NUM, ret);
10496 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
10497 u16 vlan_id, bool is_kill)
10499 struct hclge_vport *vport = hclge_get_vport(handle);
10500 struct hclge_dev *hdev = vport->back;
10501 bool writen_to_tbl = false;
10504 /* When device is resetting or reset failed, firmware is unable to
10505 * handle mailbox. Just record the vlan id, and remove it after
10508 if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10509 test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
10510 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10514 /* when port base vlan enabled, we use port base vlan as the vlan
10515 * filter entry. In this case, we don't update vlan filter table
10516 * when user add new vlan or remove exist vlan, just update the vport
10517 * vlan list. The vlan id in vlan list will be writen in vlan filter
10518 * table until port base vlan disabled
10520 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10521 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
10523 writen_to_tbl = true;
10528 hclge_rm_vport_vlan_table(vport, vlan_id, false);
10530 hclge_add_vport_vlan_table(vport, vlan_id,
10532 } else if (is_kill) {
10533 /* when remove hw vlan filter failed, record the vlan id,
10534 * and try to remove it from hw later, to be consistence
10537 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10540 hclge_set_vport_vlan_fltr_change(vport);
10545 static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
10547 struct hclge_vport *vport;
10551 for (i = 0; i < hdev->num_alloc_vport; i++) {
10552 vport = &hdev->vport[i];
10553 if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10557 ret = hclge_enable_vport_vlan_filter(vport,
10558 vport->req_vlan_fltr_en);
10560 dev_err(&hdev->pdev->dev,
10561 "failed to sync vlan filter state for vport%u, ret = %d\n",
10562 vport->vport_id, ret);
10563 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10570 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
10572 #define HCLGE_MAX_SYNC_COUNT 60
10574 int i, ret, sync_cnt = 0;
10577 /* start from vport 1 for PF is always alive */
10578 for (i = 0; i < hdev->num_alloc_vport; i++) {
10579 struct hclge_vport *vport = &hdev->vport[i];
10581 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10583 while (vlan_id != VLAN_N_VID) {
10584 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10585 vport->vport_id, vlan_id,
10587 if (ret && ret != -EINVAL)
10590 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
10591 hclge_rm_vport_vlan_table(vport, vlan_id, false);
10592 hclge_set_vport_vlan_fltr_change(vport);
10595 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
10598 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10603 hclge_sync_vlan_fltr_state(hdev);
10606 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
10608 struct hclge_config_max_frm_size_cmd *req;
10609 struct hclge_desc desc;
10611 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
10613 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
10614 req->max_frm_size = cpu_to_le16(new_mps);
10615 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
10617 return hclge_cmd_send(&hdev->hw, &desc, 1);
10620 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
10622 struct hclge_vport *vport = hclge_get_vport(handle);
10624 return hclge_set_vport_mtu(vport, new_mtu);
10627 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
10629 struct hclge_dev *hdev = vport->back;
10630 int i, max_frm_size, ret;
10632 /* HW supprt 2 layer vlan */
10633 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10634 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
10635 max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
10638 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
10639 mutex_lock(&hdev->vport_lock);
10640 /* VF's mps must fit within hdev->mps */
10641 if (vport->vport_id && max_frm_size > hdev->mps) {
10642 mutex_unlock(&hdev->vport_lock);
10644 } else if (vport->vport_id) {
10645 vport->mps = max_frm_size;
10646 mutex_unlock(&hdev->vport_lock);
10650 /* PF's mps must be greater then VF's mps */
10651 for (i = 1; i < hdev->num_alloc_vport; i++)
10652 if (max_frm_size < hdev->vport[i].mps) {
10653 mutex_unlock(&hdev->vport_lock);
10657 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
10659 ret = hclge_set_mac_mtu(hdev, max_frm_size);
10661 dev_err(&hdev->pdev->dev,
10662 "Change mtu fail, ret =%d\n", ret);
10666 hdev->mps = max_frm_size;
10667 vport->mps = max_frm_size;
10669 ret = hclge_buffer_alloc(hdev);
10671 dev_err(&hdev->pdev->dev,
10672 "Allocate buffer fail, ret =%d\n", ret);
10675 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
10676 mutex_unlock(&hdev->vport_lock);
10680 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
10683 struct hclge_reset_tqp_queue_cmd *req;
10684 struct hclge_desc desc;
10687 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
10689 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10690 req->tqp_id = cpu_to_le16(queue_id);
10692 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
10694 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10696 dev_err(&hdev->pdev->dev,
10697 "Send tqp reset cmd error, status =%d\n", ret);
10704 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
10706 struct hclge_reset_tqp_queue_cmd *req;
10707 struct hclge_desc desc;
10710 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
10712 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10713 req->tqp_id = cpu_to_le16(queue_id);
10715 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10717 dev_err(&hdev->pdev->dev,
10718 "Get reset status error, status =%d\n", ret);
10722 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
10725 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
10727 struct hnae3_queue *queue;
10728 struct hclge_tqp *tqp;
10730 queue = handle->kinfo.tqp[queue_id];
10731 tqp = container_of(queue, struct hclge_tqp, q);
10736 static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
10738 struct hclge_vport *vport = hclge_get_vport(handle);
10739 struct hclge_dev *hdev = vport->back;
10740 u16 reset_try_times = 0;
10746 for (i = 0; i < handle->kinfo.num_tqps; i++) {
10747 queue_gid = hclge_covert_handle_qid_global(handle, i);
10748 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true);
10750 dev_err(&hdev->pdev->dev,
10751 "failed to send reset tqp cmd, ret = %d\n",
10756 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10757 reset_status = hclge_get_reset_status(hdev, queue_gid);
10761 /* Wait for tqp hw reset */
10762 usleep_range(1000, 1200);
10765 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10766 dev_err(&hdev->pdev->dev,
10767 "wait for tqp hw reset timeout\n");
10771 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false);
10773 dev_err(&hdev->pdev->dev,
10774 "failed to deassert soft reset, ret = %d\n",
10778 reset_try_times = 0;
10783 static int hclge_reset_rcb(struct hnae3_handle *handle)
10785 #define HCLGE_RESET_RCB_NOT_SUPPORT 0U
10786 #define HCLGE_RESET_RCB_SUCCESS 1U
10788 struct hclge_vport *vport = hclge_get_vport(handle);
10789 struct hclge_dev *hdev = vport->back;
10790 struct hclge_reset_cmd *req;
10791 struct hclge_desc desc;
10796 queue_gid = hclge_covert_handle_qid_global(handle, 0);
10798 req = (struct hclge_reset_cmd *)desc.data;
10799 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
10800 hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1);
10801 req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid);
10802 req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps);
10804 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10806 dev_err(&hdev->pdev->dev,
10807 "failed to send rcb reset cmd, ret = %d\n", ret);
10811 return_status = req->fun_reset_rcb_return_status;
10812 if (return_status == HCLGE_RESET_RCB_SUCCESS)
10815 if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) {
10816 dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n",
10821 /* if reset rcb cmd is unsupported, we need to send reset tqp cmd
10822 * again to reset all tqps
10824 return hclge_reset_tqp_cmd(handle);
10827 int hclge_reset_tqp(struct hnae3_handle *handle)
10829 struct hclge_vport *vport = hclge_get_vport(handle);
10830 struct hclge_dev *hdev = vport->back;
10833 /* only need to disable PF's tqp */
10834 if (!vport->vport_id) {
10835 ret = hclge_tqp_enable(handle, false);
10837 dev_err(&hdev->pdev->dev,
10838 "failed to disable tqp, ret = %d\n", ret);
10843 return hclge_reset_rcb(handle);
10846 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
10848 struct hclge_vport *vport = hclge_get_vport(handle);
10849 struct hclge_dev *hdev = vport->back;
10851 return hdev->fw_version;
10854 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10856 struct phy_device *phydev = hdev->hw.mac.phydev;
10861 phy_set_asym_pause(phydev, rx_en, tx_en);
10864 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10868 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
10871 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
10873 dev_err(&hdev->pdev->dev,
10874 "configure pauseparam error, ret = %d.\n", ret);
10879 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
10881 struct phy_device *phydev = hdev->hw.mac.phydev;
10882 u16 remote_advertising = 0;
10883 u16 local_advertising;
10884 u32 rx_pause, tx_pause;
10887 if (!phydev->link || !phydev->autoneg)
10890 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
10893 remote_advertising = LPA_PAUSE_CAP;
10895 if (phydev->asym_pause)
10896 remote_advertising |= LPA_PAUSE_ASYM;
10898 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
10899 remote_advertising);
10900 tx_pause = flowctl & FLOW_CTRL_TX;
10901 rx_pause = flowctl & FLOW_CTRL_RX;
10903 if (phydev->duplex == HCLGE_MAC_HALF) {
10908 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10911 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10912 u32 *rx_en, u32 *tx_en)
10914 struct hclge_vport *vport = hclge_get_vport(handle);
10915 struct hclge_dev *hdev = vport->back;
10916 u8 media_type = hdev->hw.mac.media_type;
10918 *auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
10919 hclge_get_autoneg(handle) : 0;
10921 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10927 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10930 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10933 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10942 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
10943 u32 rx_en, u32 tx_en)
10945 if (rx_en && tx_en)
10946 hdev->fc_mode_last_time = HCLGE_FC_FULL;
10947 else if (rx_en && !tx_en)
10948 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
10949 else if (!rx_en && tx_en)
10950 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
10952 hdev->fc_mode_last_time = HCLGE_FC_NONE;
10954 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
10957 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
10958 u32 rx_en, u32 tx_en)
10960 struct hclge_vport *vport = hclge_get_vport(handle);
10961 struct hclge_dev *hdev = vport->back;
10962 struct phy_device *phydev = hdev->hw.mac.phydev;
10965 if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
10966 fc_autoneg = hclge_get_autoneg(handle);
10967 if (auto_neg != fc_autoneg) {
10968 dev_info(&hdev->pdev->dev,
10969 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
10970 return -EOPNOTSUPP;
10974 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10975 dev_info(&hdev->pdev->dev,
10976 "Priority flow control enabled. Cannot set link flow control.\n");
10977 return -EOPNOTSUPP;
10980 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
10982 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
10984 if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
10985 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
10988 return phy_start_aneg(phydev);
10990 return -EOPNOTSUPP;
10993 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
10994 u8 *auto_neg, u32 *speed, u8 *duplex)
10996 struct hclge_vport *vport = hclge_get_vport(handle);
10997 struct hclge_dev *hdev = vport->back;
11000 *speed = hdev->hw.mac.speed;
11002 *duplex = hdev->hw.mac.duplex;
11004 *auto_neg = hdev->hw.mac.autoneg;
11007 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
11010 struct hclge_vport *vport = hclge_get_vport(handle);
11011 struct hclge_dev *hdev = vport->back;
11013 /* When nic is down, the service task is not running, doesn't update
11014 * the port information per second. Query the port information before
11015 * return the media type, ensure getting the correct media information.
11017 hclge_update_port_info(hdev);
11020 *media_type = hdev->hw.mac.media_type;
11023 *module_type = hdev->hw.mac.module_type;
11026 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
11027 u8 *tp_mdix_ctrl, u8 *tp_mdix)
11029 struct hclge_vport *vport = hclge_get_vport(handle);
11030 struct hclge_dev *hdev = vport->back;
11031 struct phy_device *phydev = hdev->hw.mac.phydev;
11032 int mdix_ctrl, mdix, is_resolved;
11033 unsigned int retval;
11036 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11037 *tp_mdix = ETH_TP_MDI_INVALID;
11041 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
11043 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
11044 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
11045 HCLGE_PHY_MDIX_CTRL_S);
11047 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
11048 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
11049 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
11051 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
11053 switch (mdix_ctrl) {
11055 *tp_mdix_ctrl = ETH_TP_MDI;
11058 *tp_mdix_ctrl = ETH_TP_MDI_X;
11061 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
11064 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11069 *tp_mdix = ETH_TP_MDI_INVALID;
11071 *tp_mdix = ETH_TP_MDI_X;
11073 *tp_mdix = ETH_TP_MDI;
11076 static void hclge_info_show(struct hclge_dev *hdev)
11078 struct device *dev = &hdev->pdev->dev;
11080 dev_info(dev, "PF info begin:\n");
11082 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
11083 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
11084 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
11085 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
11086 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
11087 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
11088 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
11089 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
11090 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
11091 dev_info(dev, "This is %s PF\n",
11092 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
11093 dev_info(dev, "DCB %s\n",
11094 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
11095 dev_info(dev, "MQPRIO %s\n",
11096 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
11097 dev_info(dev, "Default tx spare buffer size: %u\n",
11098 hdev->tx_spare_buf_size);
11100 dev_info(dev, "PF info end.\n");
11103 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
11104 struct hclge_vport *vport)
11106 struct hnae3_client *client = vport->nic.client;
11107 struct hclge_dev *hdev = ae_dev->priv;
11108 int rst_cnt = hdev->rst_stats.reset_cnt;
11111 ret = client->ops->init_instance(&vport->nic);
11115 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11116 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11117 rst_cnt != hdev->rst_stats.reset_cnt) {
11122 /* Enable nic hw error interrupts */
11123 ret = hclge_config_nic_hw_error(hdev, true);
11125 dev_err(&ae_dev->pdev->dev,
11126 "fail(%d) to enable hw error interrupts\n", ret);
11130 hnae3_set_client_init_flag(client, ae_dev, 1);
11132 if (netif_msg_drv(&hdev->vport->nic))
11133 hclge_info_show(hdev);
11138 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11139 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11140 msleep(HCLGE_WAIT_RESET_DONE);
11142 client->ops->uninit_instance(&vport->nic, 0);
11147 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
11148 struct hclge_vport *vport)
11150 struct hclge_dev *hdev = ae_dev->priv;
11151 struct hnae3_client *client;
11155 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
11159 client = hdev->roce_client;
11160 ret = hclge_init_roce_base_info(vport);
11164 rst_cnt = hdev->rst_stats.reset_cnt;
11165 ret = client->ops->init_instance(&vport->roce);
11169 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11170 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11171 rst_cnt != hdev->rst_stats.reset_cnt) {
11173 goto init_roce_err;
11176 /* Enable roce ras interrupts */
11177 ret = hclge_config_rocee_ras_interrupt(hdev, true);
11179 dev_err(&ae_dev->pdev->dev,
11180 "fail(%d) to enable roce ras interrupts\n", ret);
11181 goto init_roce_err;
11184 hnae3_set_client_init_flag(client, ae_dev, 1);
11189 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11190 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11191 msleep(HCLGE_WAIT_RESET_DONE);
11193 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11198 static int hclge_init_client_instance(struct hnae3_client *client,
11199 struct hnae3_ae_dev *ae_dev)
11201 struct hclge_dev *hdev = ae_dev->priv;
11202 struct hclge_vport *vport = &hdev->vport[0];
11205 switch (client->type) {
11206 case HNAE3_CLIENT_KNIC:
11207 hdev->nic_client = client;
11208 vport->nic.client = client;
11209 ret = hclge_init_nic_client_instance(ae_dev, vport);
11213 ret = hclge_init_roce_client_instance(ae_dev, vport);
11218 case HNAE3_CLIENT_ROCE:
11219 if (hnae3_dev_roce_supported(hdev)) {
11220 hdev->roce_client = client;
11221 vport->roce.client = client;
11224 ret = hclge_init_roce_client_instance(ae_dev, vport);
11236 hdev->nic_client = NULL;
11237 vport->nic.client = NULL;
11240 hdev->roce_client = NULL;
11241 vport->roce.client = NULL;
11245 static void hclge_uninit_client_instance(struct hnae3_client *client,
11246 struct hnae3_ae_dev *ae_dev)
11248 struct hclge_dev *hdev = ae_dev->priv;
11249 struct hclge_vport *vport = &hdev->vport[0];
11251 if (hdev->roce_client) {
11252 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11253 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11254 msleep(HCLGE_WAIT_RESET_DONE);
11256 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11257 hdev->roce_client = NULL;
11258 vport->roce.client = NULL;
11260 if (client->type == HNAE3_CLIENT_ROCE)
11262 if (hdev->nic_client && client->ops->uninit_instance) {
11263 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11264 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11265 msleep(HCLGE_WAIT_RESET_DONE);
11267 client->ops->uninit_instance(&vport->nic, 0);
11268 hdev->nic_client = NULL;
11269 vport->nic.client = NULL;
11273 static int hclge_dev_mem_map(struct hclge_dev *hdev)
11275 #define HCLGE_MEM_BAR 4
11277 struct pci_dev *pdev = hdev->pdev;
11278 struct hclge_hw *hw = &hdev->hw;
11280 /* for device does not have device memory, return directly */
11281 if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
11284 hw->mem_base = devm_ioremap_wc(&pdev->dev,
11285 pci_resource_start(pdev, HCLGE_MEM_BAR),
11286 pci_resource_len(pdev, HCLGE_MEM_BAR));
11287 if (!hw->mem_base) {
11288 dev_err(&pdev->dev, "failed to map device memory\n");
11295 static int hclge_pci_init(struct hclge_dev *hdev)
11297 struct pci_dev *pdev = hdev->pdev;
11298 struct hclge_hw *hw;
11301 ret = pci_enable_device(pdev);
11303 dev_err(&pdev->dev, "failed to enable PCI device\n");
11307 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11309 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11311 dev_err(&pdev->dev,
11312 "can't set consistent PCI DMA");
11313 goto err_disable_device;
11315 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
11318 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
11320 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
11321 goto err_disable_device;
11324 pci_set_master(pdev);
11326 hw->io_base = pcim_iomap(pdev, 2, 0);
11327 if (!hw->io_base) {
11328 dev_err(&pdev->dev, "Can't map configuration register space\n");
11330 goto err_clr_master;
11333 ret = hclge_dev_mem_map(hdev);
11335 goto err_unmap_io_base;
11337 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
11342 pcim_iounmap(pdev, hdev->hw.io_base);
11344 pci_clear_master(pdev);
11345 pci_release_regions(pdev);
11346 err_disable_device:
11347 pci_disable_device(pdev);
11352 static void hclge_pci_uninit(struct hclge_dev *hdev)
11354 struct pci_dev *pdev = hdev->pdev;
11356 if (hdev->hw.mem_base)
11357 devm_iounmap(&pdev->dev, hdev->hw.mem_base);
11359 pcim_iounmap(pdev, hdev->hw.io_base);
11360 pci_free_irq_vectors(pdev);
11361 pci_clear_master(pdev);
11362 pci_release_mem_regions(pdev);
11363 pci_disable_device(pdev);
11366 static void hclge_state_init(struct hclge_dev *hdev)
11368 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
11369 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11370 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
11371 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11372 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
11373 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
11374 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
11377 static void hclge_state_uninit(struct hclge_dev *hdev)
11379 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11380 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
11382 if (hdev->reset_timer.function)
11383 del_timer_sync(&hdev->reset_timer);
11384 if (hdev->service_task.work.func)
11385 cancel_delayed_work_sync(&hdev->service_task);
11388 static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
11389 enum hnae3_reset_type rst_type)
11391 #define HCLGE_RESET_RETRY_WAIT_MS 500
11392 #define HCLGE_RESET_RETRY_CNT 5
11394 struct hclge_dev *hdev = ae_dev->priv;
11399 down(&hdev->reset_sem);
11400 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11401 hdev->reset_type = rst_type;
11402 ret = hclge_reset_prepare(hdev);
11403 if (ret || hdev->reset_pending) {
11404 dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n",
11406 if (hdev->reset_pending ||
11407 retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
11408 dev_err(&hdev->pdev->dev,
11409 "reset_pending:0x%lx, retry_cnt:%d\n",
11410 hdev->reset_pending, retry_cnt);
11411 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11412 up(&hdev->reset_sem);
11413 msleep(HCLGE_RESET_RETRY_WAIT_MS);
11418 /* disable misc vector before reset done */
11419 hclge_enable_vector(&hdev->misc_vector, false);
11420 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
11422 if (hdev->reset_type == HNAE3_FLR_RESET)
11423 hdev->rst_stats.flr_rst_cnt++;
11426 static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
11428 struct hclge_dev *hdev = ae_dev->priv;
11431 hclge_enable_vector(&hdev->misc_vector, true);
11433 ret = hclge_reset_rebuild(hdev);
11435 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
11437 hdev->reset_type = HNAE3_NONE_RESET;
11438 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11439 up(&hdev->reset_sem);
11442 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
11446 for (i = 0; i < hdev->num_alloc_vport; i++) {
11447 struct hclge_vport *vport = &hdev->vport[i];
11450 /* Send cmd to clear VF's FUNC_RST_ING */
11451 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
11453 dev_warn(&hdev->pdev->dev,
11454 "clear vf(%u) rst failed %d!\n",
11455 vport->vport_id, ret);
11459 static int hclge_clear_hw_resource(struct hclge_dev *hdev)
11461 struct hclge_desc desc;
11464 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false);
11466 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11467 /* This new command is only supported by new firmware, it will
11468 * fail with older firmware. Error value -EOPNOSUPP can only be
11469 * returned by older firmware running this command, to keep code
11470 * backward compatible we will override this value and return
11473 if (ret && ret != -EOPNOTSUPP) {
11474 dev_err(&hdev->pdev->dev,
11475 "failed to clear hw resource, ret = %d\n", ret);
11481 static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev)
11483 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11484 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1);
11487 static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev)
11489 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11490 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0);
11493 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
11495 struct pci_dev *pdev = ae_dev->pdev;
11496 struct hclge_dev *hdev;
11499 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
11504 hdev->ae_dev = ae_dev;
11505 hdev->reset_type = HNAE3_NONE_RESET;
11506 hdev->reset_level = HNAE3_FUNC_RESET;
11507 ae_dev->priv = hdev;
11509 /* HW supprt 2 layer vlan */
11510 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
11512 mutex_init(&hdev->vport_lock);
11513 spin_lock_init(&hdev->fd_rule_lock);
11514 sema_init(&hdev->reset_sem, 1);
11516 ret = hclge_pci_init(hdev);
11520 ret = hclge_devlink_init(hdev);
11522 goto err_pci_uninit;
11524 /* Firmware command queue initialize */
11525 ret = hclge_cmd_queue_init(hdev);
11527 goto err_devlink_uninit;
11529 /* Firmware command initialize */
11530 ret = hclge_cmd_init(hdev);
11532 goto err_cmd_uninit;
11534 ret = hclge_clear_hw_resource(hdev);
11536 goto err_cmd_uninit;
11538 ret = hclge_get_cap(hdev);
11540 goto err_cmd_uninit;
11542 ret = hclge_query_dev_specs(hdev);
11544 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
11546 goto err_cmd_uninit;
11549 ret = hclge_configure(hdev);
11551 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
11552 goto err_cmd_uninit;
11555 ret = hclge_init_msi(hdev);
11557 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
11558 goto err_cmd_uninit;
11561 ret = hclge_misc_irq_init(hdev);
11563 goto err_msi_uninit;
11565 ret = hclge_alloc_tqps(hdev);
11567 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
11568 goto err_msi_irq_uninit;
11571 ret = hclge_alloc_vport(hdev);
11573 goto err_msi_irq_uninit;
11575 ret = hclge_map_tqp(hdev);
11577 goto err_msi_irq_uninit;
11579 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
11580 !hnae3_dev_phy_imp_supported(hdev)) {
11581 ret = hclge_mac_mdio_config(hdev);
11583 goto err_msi_irq_uninit;
11586 ret = hclge_init_umv_space(hdev);
11588 goto err_mdiobus_unreg;
11590 ret = hclge_mac_init(hdev);
11592 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11593 goto err_mdiobus_unreg;
11596 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11598 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11599 goto err_mdiobus_unreg;
11602 ret = hclge_config_gro(hdev);
11604 goto err_mdiobus_unreg;
11606 ret = hclge_init_vlan_config(hdev);
11608 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11609 goto err_mdiobus_unreg;
11612 ret = hclge_tm_schd_init(hdev);
11614 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
11615 goto err_mdiobus_unreg;
11618 ret = hclge_rss_init_cfg(hdev);
11620 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
11621 goto err_mdiobus_unreg;
11624 ret = hclge_rss_init_hw(hdev);
11626 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11627 goto err_mdiobus_unreg;
11630 ret = init_mgr_tbl(hdev);
11632 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
11633 goto err_mdiobus_unreg;
11636 ret = hclge_init_fd_config(hdev);
11638 dev_err(&pdev->dev,
11639 "fd table init fail, ret=%d\n", ret);
11640 goto err_mdiobus_unreg;
11643 ret = hclge_ptp_init(hdev);
11645 goto err_mdiobus_unreg;
11647 INIT_KFIFO(hdev->mac_tnl_log);
11649 hclge_dcb_ops_set(hdev);
11651 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
11652 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
11654 /* Setup affinity after service timer setup because add_timer_on
11655 * is called in affinity notify.
11657 hclge_misc_affinity_setup(hdev);
11659 hclge_clear_all_event_cause(hdev);
11660 hclge_clear_resetting_state(hdev);
11662 /* Log and clear the hw errors those already occurred */
11663 if (hnae3_dev_ras_imp_supported(hdev))
11664 hclge_handle_occurred_error(hdev);
11666 hclge_handle_all_hns_hw_errors(ae_dev);
11668 /* request delayed reset for the error recovery because an immediate
11669 * global reset on a PF affecting pending initialization of other PFs
11671 if (ae_dev->hw_err_reset_req) {
11672 enum hnae3_reset_type reset_level;
11674 reset_level = hclge_get_reset_level(ae_dev,
11675 &ae_dev->hw_err_reset_req);
11676 hclge_set_def_reset_request(ae_dev, reset_level);
11677 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
11680 hclge_init_rxd_adv_layout(hdev);
11682 /* Enable MISC vector(vector0) */
11683 hclge_enable_vector(&hdev->misc_vector, true);
11685 hclge_state_init(hdev);
11686 hdev->last_reset_time = jiffies;
11688 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
11689 HCLGE_DRIVER_NAME);
11691 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
11696 if (hdev->hw.mac.phydev)
11697 mdiobus_unregister(hdev->hw.mac.mdio_bus);
11698 err_msi_irq_uninit:
11699 hclge_misc_irq_uninit(hdev);
11701 pci_free_irq_vectors(pdev);
11703 hclge_cmd_uninit(hdev);
11704 err_devlink_uninit:
11705 hclge_devlink_uninit(hdev);
11707 pcim_iounmap(pdev, hdev->hw.io_base);
11708 pci_clear_master(pdev);
11709 pci_release_regions(pdev);
11710 pci_disable_device(pdev);
11712 mutex_destroy(&hdev->vport_lock);
11716 static void hclge_stats_clear(struct hclge_dev *hdev)
11718 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
11721 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11723 return hclge_config_switch_param(hdev, vf, enable,
11724 HCLGE_SWITCH_ANTI_SPOOF_MASK);
11727 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11729 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
11730 HCLGE_FILTER_FE_NIC_INGRESS_B,
11734 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
11738 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
11740 dev_err(&hdev->pdev->dev,
11741 "Set vf %d mac spoof check %s failed, ret=%d\n",
11742 vf, enable ? "on" : "off", ret);
11746 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
11748 dev_err(&hdev->pdev->dev,
11749 "Set vf %d vlan spoof check %s failed, ret=%d\n",
11750 vf, enable ? "on" : "off", ret);
11755 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
11758 struct hclge_vport *vport = hclge_get_vport(handle);
11759 struct hclge_dev *hdev = vport->back;
11760 u32 new_spoofchk = enable ? 1 : 0;
11763 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11764 return -EOPNOTSUPP;
11766 vport = hclge_get_vf_vport(hdev, vf);
11770 if (vport->vf_info.spoofchk == new_spoofchk)
11773 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
11774 dev_warn(&hdev->pdev->dev,
11775 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11777 else if (enable && hclge_is_umv_space_full(vport, true))
11778 dev_warn(&hdev->pdev->dev,
11779 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11782 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
11786 vport->vf_info.spoofchk = new_spoofchk;
11790 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
11792 struct hclge_vport *vport = hdev->vport;
11796 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11799 /* resume the vf spoof check state after reset */
11800 for (i = 0; i < hdev->num_alloc_vport; i++) {
11801 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
11802 vport->vf_info.spoofchk);
11812 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
11814 struct hclge_vport *vport = hclge_get_vport(handle);
11815 struct hclge_dev *hdev = vport->back;
11816 u32 new_trusted = enable ? 1 : 0;
11818 vport = hclge_get_vf_vport(hdev, vf);
11822 if (vport->vf_info.trusted == new_trusted)
11825 vport->vf_info.trusted = new_trusted;
11826 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
11827 hclge_task_schedule(hdev, 0);
11832 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
11837 /* reset vf rate to default value */
11838 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
11839 struct hclge_vport *vport = &hdev->vport[vf];
11841 vport->vf_info.max_tx_rate = 0;
11842 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
11844 dev_err(&hdev->pdev->dev,
11845 "vf%d failed to reset to default, ret=%d\n",
11846 vf - HCLGE_VF_VPORT_START_NUM, ret);
11850 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
11851 int min_tx_rate, int max_tx_rate)
11853 if (min_tx_rate != 0 ||
11854 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
11855 dev_err(&hdev->pdev->dev,
11856 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
11857 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
11864 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
11865 int min_tx_rate, int max_tx_rate, bool force)
11867 struct hclge_vport *vport = hclge_get_vport(handle);
11868 struct hclge_dev *hdev = vport->back;
11871 ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
11875 vport = hclge_get_vf_vport(hdev, vf);
11879 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
11882 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
11886 vport->vf_info.max_tx_rate = max_tx_rate;
11891 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
11893 struct hnae3_handle *handle = &hdev->vport->nic;
11894 struct hclge_vport *vport;
11898 /* resume the vf max_tx_rate after reset */
11899 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
11900 vport = hclge_get_vf_vport(hdev, vf);
11904 /* zero means max rate, after reset, firmware already set it to
11905 * max rate, so just continue.
11907 if (!vport->vf_info.max_tx_rate)
11910 ret = hclge_set_vf_rate(handle, vf, 0,
11911 vport->vf_info.max_tx_rate, true);
11913 dev_err(&hdev->pdev->dev,
11914 "vf%d failed to resume tx_rate:%u, ret=%d\n",
11915 vf, vport->vf_info.max_tx_rate, ret);
11923 static void hclge_reset_vport_state(struct hclge_dev *hdev)
11925 struct hclge_vport *vport = hdev->vport;
11928 for (i = 0; i < hdev->num_alloc_vport; i++) {
11929 hclge_vport_stop(vport);
11934 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
11936 struct hclge_dev *hdev = ae_dev->priv;
11937 struct pci_dev *pdev = ae_dev->pdev;
11940 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11942 hclge_stats_clear(hdev);
11943 /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
11944 * so here should not clean table in memory.
11946 if (hdev->reset_type == HNAE3_IMP_RESET ||
11947 hdev->reset_type == HNAE3_GLOBAL_RESET) {
11948 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
11949 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
11950 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
11951 hclge_reset_umv_space(hdev);
11954 ret = hclge_cmd_init(hdev);
11956 dev_err(&pdev->dev, "Cmd queue init failed\n");
11960 ret = hclge_map_tqp(hdev);
11962 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
11966 ret = hclge_mac_init(hdev);
11968 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11972 ret = hclge_tp_port_init(hdev);
11974 dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
11979 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11981 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11985 ret = hclge_config_gro(hdev);
11989 ret = hclge_init_vlan_config(hdev);
11991 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11995 ret = hclge_tm_init_hw(hdev, true);
11997 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
12001 ret = hclge_rss_init_hw(hdev);
12003 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
12007 ret = init_mgr_tbl(hdev);
12009 dev_err(&pdev->dev,
12010 "failed to reinit manager table, ret = %d\n", ret);
12014 ret = hclge_init_fd_config(hdev);
12016 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
12020 ret = hclge_ptp_init(hdev);
12024 /* Log and clear the hw errors those already occurred */
12025 if (hnae3_dev_ras_imp_supported(hdev))
12026 hclge_handle_occurred_error(hdev);
12028 hclge_handle_all_hns_hw_errors(ae_dev);
12030 /* Re-enable the hw error interrupts because
12031 * the interrupts get disabled on global reset.
12033 ret = hclge_config_nic_hw_error(hdev, true);
12035 dev_err(&pdev->dev,
12036 "fail(%d) to re-enable NIC hw error interrupts\n",
12041 if (hdev->roce_client) {
12042 ret = hclge_config_rocee_ras_interrupt(hdev, true);
12044 dev_err(&pdev->dev,
12045 "fail(%d) to re-enable roce ras interrupts\n",
12051 hclge_reset_vport_state(hdev);
12052 ret = hclge_reset_vport_spoofchk(hdev);
12056 ret = hclge_resume_vf_rate(hdev);
12060 hclge_init_rxd_adv_layout(hdev);
12062 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
12063 HCLGE_DRIVER_NAME);
12068 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
12070 struct hclge_dev *hdev = ae_dev->priv;
12071 struct hclge_mac *mac = &hdev->hw.mac;
12073 hclge_reset_vf_rate(hdev);
12074 hclge_clear_vf_vlan(hdev);
12075 hclge_misc_affinity_teardown(hdev);
12076 hclge_state_uninit(hdev);
12077 hclge_ptp_uninit(hdev);
12078 hclge_uninit_rxd_adv_layout(hdev);
12079 hclge_uninit_mac_table(hdev);
12080 hclge_del_all_fd_entries(hdev);
12083 mdiobus_unregister(mac->mdio_bus);
12085 /* Disable MISC vector(vector0) */
12086 hclge_enable_vector(&hdev->misc_vector, false);
12087 synchronize_irq(hdev->misc_vector.vector_irq);
12089 /* Disable all hw interrupts */
12090 hclge_config_mac_tnl_int(hdev, false);
12091 hclge_config_nic_hw_error(hdev, false);
12092 hclge_config_rocee_ras_interrupt(hdev, false);
12094 hclge_cmd_uninit(hdev);
12095 hclge_misc_irq_uninit(hdev);
12096 hclge_devlink_uninit(hdev);
12097 hclge_pci_uninit(hdev);
12098 mutex_destroy(&hdev->vport_lock);
12099 hclge_uninit_vport_vlan_table(hdev);
12100 ae_dev->priv = NULL;
12103 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
12105 struct hclge_vport *vport = hclge_get_vport(handle);
12106 struct hclge_dev *hdev = vport->back;
12108 return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
12111 static void hclge_get_channels(struct hnae3_handle *handle,
12112 struct ethtool_channels *ch)
12114 ch->max_combined = hclge_get_max_channels(handle);
12115 ch->other_count = 1;
12117 ch->combined_count = handle->kinfo.rss_size;
12120 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
12121 u16 *alloc_tqps, u16 *max_rss_size)
12123 struct hclge_vport *vport = hclge_get_vport(handle);
12124 struct hclge_dev *hdev = vport->back;
12126 *alloc_tqps = vport->alloc_tqps;
12127 *max_rss_size = hdev->pf_rss_size_max;
12130 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
12131 bool rxfh_configured)
12133 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
12134 struct hclge_vport *vport = hclge_get_vport(handle);
12135 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
12136 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
12137 struct hclge_dev *hdev = vport->back;
12138 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
12139 u16 cur_rss_size = kinfo->rss_size;
12140 u16 cur_tqps = kinfo->num_tqps;
12141 u16 tc_valid[HCLGE_MAX_TC_NUM];
12147 kinfo->req_rss_size = new_tqps_num;
12149 ret = hclge_tm_vport_map_update(hdev);
12151 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
12155 roundup_size = roundup_pow_of_two(kinfo->rss_size);
12156 roundup_size = ilog2(roundup_size);
12157 /* Set the RSS TC mode according to the new RSS size */
12158 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
12161 if (!(hdev->hw_tc_map & BIT(i)))
12165 tc_size[i] = roundup_size;
12166 tc_offset[i] = kinfo->rss_size * i;
12168 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
12172 /* RSS indirection table has been configured by user */
12173 if (rxfh_configured)
12176 /* Reinitializes the rss indirect table according to the new RSS size */
12177 rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
12182 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
12183 rss_indir[i] = i % kinfo->rss_size;
12185 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
12187 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
12194 dev_info(&hdev->pdev->dev,
12195 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
12196 cur_rss_size, kinfo->rss_size,
12197 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
12202 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
12203 u32 *regs_num_64_bit)
12205 struct hclge_desc desc;
12209 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
12210 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12212 dev_err(&hdev->pdev->dev,
12213 "Query register number cmd failed, ret = %d.\n", ret);
12217 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
12218 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
12220 total_num = *regs_num_32_bit + *regs_num_64_bit;
12227 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12230 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
12231 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
12233 struct hclge_desc *desc;
12234 u32 *reg_val = data;
12244 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
12245 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
12246 HCLGE_32_BIT_REG_RTN_DATANUM);
12247 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12251 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
12252 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12254 dev_err(&hdev->pdev->dev,
12255 "Query 32 bit register cmd failed, ret = %d.\n", ret);
12260 for (i = 0; i < cmd_num; i++) {
12262 desc_data = (__le32 *)(&desc[i].data[0]);
12263 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
12265 desc_data = (__le32 *)(&desc[i]);
12266 n = HCLGE_32_BIT_REG_RTN_DATANUM;
12268 for (k = 0; k < n; k++) {
12269 *reg_val++ = le32_to_cpu(*desc_data++);
12281 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12284 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
12285 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
12287 struct hclge_desc *desc;
12288 u64 *reg_val = data;
12298 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
12299 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
12300 HCLGE_64_BIT_REG_RTN_DATANUM);
12301 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12305 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
12306 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12308 dev_err(&hdev->pdev->dev,
12309 "Query 64 bit register cmd failed, ret = %d.\n", ret);
12314 for (i = 0; i < cmd_num; i++) {
12316 desc_data = (__le64 *)(&desc[i].data[0]);
12317 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
12319 desc_data = (__le64 *)(&desc[i]);
12320 n = HCLGE_64_BIT_REG_RTN_DATANUM;
12322 for (k = 0; k < n; k++) {
12323 *reg_val++ = le64_to_cpu(*desc_data++);
12335 #define MAX_SEPARATE_NUM 4
12336 #define SEPARATOR_VALUE 0xFDFCFBFA
12337 #define REG_NUM_PER_LINE 4
12338 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
12339 #define REG_SEPARATOR_LINE 1
12340 #define REG_NUM_REMAIN_MASK 3
12342 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
12346 /* initialize command BD except the last one */
12347 for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
12348 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
12350 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12353 /* initialize the last command BD */
12354 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
12356 return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
12359 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
12363 u32 entries_per_desc, desc_index, index, offset, i;
12364 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
12367 ret = hclge_query_bd_num_cmd_send(hdev, desc);
12369 dev_err(&hdev->pdev->dev,
12370 "Get dfx bd num fail, status is %d.\n", ret);
12374 entries_per_desc = ARRAY_SIZE(desc[0].data);
12375 for (i = 0; i < type_num; i++) {
12376 offset = hclge_dfx_bd_offset_list[i];
12377 index = offset % entries_per_desc;
12378 desc_index = offset / entries_per_desc;
12379 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
12385 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
12386 struct hclge_desc *desc_src, int bd_num,
12387 enum hclge_opcode_type cmd)
12389 struct hclge_desc *desc = desc_src;
12392 hclge_cmd_setup_basic_desc(desc, cmd, true);
12393 for (i = 0; i < bd_num - 1; i++) {
12394 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12396 hclge_cmd_setup_basic_desc(desc, cmd, true);
12400 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
12402 dev_err(&hdev->pdev->dev,
12403 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
12409 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
12412 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
12413 struct hclge_desc *desc = desc_src;
12416 entries_per_desc = ARRAY_SIZE(desc->data);
12417 reg_num = entries_per_desc * bd_num;
12418 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
12419 for (i = 0; i < reg_num; i++) {
12420 index = i % entries_per_desc;
12421 desc_index = i / entries_per_desc;
12422 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
12424 for (i = 0; i < separator_num; i++)
12425 *reg++ = SEPARATOR_VALUE;
12427 return reg_num + separator_num;
12430 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
12432 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12433 int data_len_per_desc, bd_num, i;
12438 bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12442 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12444 dev_err(&hdev->pdev->dev,
12445 "Get dfx reg bd num fail, status is %d.\n", ret);
12449 data_len_per_desc = sizeof_field(struct hclge_desc, data);
12451 for (i = 0; i < dfx_reg_type_num; i++) {
12452 bd_num = bd_num_list[i];
12453 data_len = data_len_per_desc * bd_num;
12454 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
12458 kfree(bd_num_list);
12462 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
12464 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12465 int bd_num, bd_num_max, buf_len, i;
12466 struct hclge_desc *desc_src;
12471 bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12475 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12477 dev_err(&hdev->pdev->dev,
12478 "Get dfx reg bd num fail, status is %d.\n", ret);
12482 bd_num_max = bd_num_list[0];
12483 for (i = 1; i < dfx_reg_type_num; i++)
12484 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
12486 buf_len = sizeof(*desc_src) * bd_num_max;
12487 desc_src = kzalloc(buf_len, GFP_KERNEL);
12493 for (i = 0; i < dfx_reg_type_num; i++) {
12494 bd_num = bd_num_list[i];
12495 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
12496 hclge_dfx_reg_opcode_list[i]);
12498 dev_err(&hdev->pdev->dev,
12499 "Get dfx reg fail, status is %d.\n", ret);
12503 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
12508 kfree(bd_num_list);
12512 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
12513 struct hnae3_knic_private_info *kinfo)
12515 #define HCLGE_RING_REG_OFFSET 0x200
12516 #define HCLGE_RING_INT_REG_OFFSET 0x4
12518 int i, j, reg_num, separator_num;
12522 /* fetching per-PF registers valus from PF PCIe register space */
12523 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
12524 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12525 for (i = 0; i < reg_num; i++)
12526 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
12527 for (i = 0; i < separator_num; i++)
12528 *reg++ = SEPARATOR_VALUE;
12529 data_num_sum = reg_num + separator_num;
12531 reg_num = ARRAY_SIZE(common_reg_addr_list);
12532 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12533 for (i = 0; i < reg_num; i++)
12534 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
12535 for (i = 0; i < separator_num; i++)
12536 *reg++ = SEPARATOR_VALUE;
12537 data_num_sum += reg_num + separator_num;
12539 reg_num = ARRAY_SIZE(ring_reg_addr_list);
12540 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12541 for (j = 0; j < kinfo->num_tqps; j++) {
12542 for (i = 0; i < reg_num; i++)
12543 *reg++ = hclge_read_dev(&hdev->hw,
12544 ring_reg_addr_list[i] +
12545 HCLGE_RING_REG_OFFSET * j);
12546 for (i = 0; i < separator_num; i++)
12547 *reg++ = SEPARATOR_VALUE;
12549 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
12551 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
12552 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12553 for (j = 0; j < hdev->num_msi_used - 1; j++) {
12554 for (i = 0; i < reg_num; i++)
12555 *reg++ = hclge_read_dev(&hdev->hw,
12556 tqp_intr_reg_addr_list[i] +
12557 HCLGE_RING_INT_REG_OFFSET * j);
12558 for (i = 0; i < separator_num; i++)
12559 *reg++ = SEPARATOR_VALUE;
12561 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
12563 return data_num_sum;
12566 static int hclge_get_regs_len(struct hnae3_handle *handle)
12568 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
12569 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12570 struct hclge_vport *vport = hclge_get_vport(handle);
12571 struct hclge_dev *hdev = vport->back;
12572 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
12573 int regs_lines_32_bit, regs_lines_64_bit;
12576 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
12578 dev_err(&hdev->pdev->dev,
12579 "Get register number failed, ret = %d.\n", ret);
12583 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
12585 dev_err(&hdev->pdev->dev,
12586 "Get dfx reg len failed, ret = %d.\n", ret);
12590 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
12591 REG_SEPARATOR_LINE;
12592 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
12593 REG_SEPARATOR_LINE;
12594 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
12595 REG_SEPARATOR_LINE;
12596 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
12597 REG_SEPARATOR_LINE;
12598 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
12599 REG_SEPARATOR_LINE;
12600 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
12601 REG_SEPARATOR_LINE;
12603 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
12604 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
12605 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
12608 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
12611 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12612 struct hclge_vport *vport = hclge_get_vport(handle);
12613 struct hclge_dev *hdev = vport->back;
12614 u32 regs_num_32_bit, regs_num_64_bit;
12615 int i, reg_num, separator_num, ret;
12618 *version = hdev->fw_version;
12620 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
12622 dev_err(&hdev->pdev->dev,
12623 "Get register number failed, ret = %d.\n", ret);
12627 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
12629 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
12631 dev_err(&hdev->pdev->dev,
12632 "Get 32 bit register failed, ret = %d.\n", ret);
12635 reg_num = regs_num_32_bit;
12637 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12638 for (i = 0; i < separator_num; i++)
12639 *reg++ = SEPARATOR_VALUE;
12641 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
12643 dev_err(&hdev->pdev->dev,
12644 "Get 64 bit register failed, ret = %d.\n", ret);
12647 reg_num = regs_num_64_bit * 2;
12649 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12650 for (i = 0; i < separator_num; i++)
12651 *reg++ = SEPARATOR_VALUE;
12653 ret = hclge_get_dfx_reg(hdev, reg);
12655 dev_err(&hdev->pdev->dev,
12656 "Get dfx register failed, ret = %d.\n", ret);
12659 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
12661 struct hclge_set_led_state_cmd *req;
12662 struct hclge_desc desc;
12665 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
12667 req = (struct hclge_set_led_state_cmd *)desc.data;
12668 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
12669 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
12671 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12673 dev_err(&hdev->pdev->dev,
12674 "Send set led state cmd error, ret =%d\n", ret);
12679 enum hclge_led_status {
12682 HCLGE_LED_NO_CHANGE = 0xFF,
12685 static int hclge_set_led_id(struct hnae3_handle *handle,
12686 enum ethtool_phys_id_state status)
12688 struct hclge_vport *vport = hclge_get_vport(handle);
12689 struct hclge_dev *hdev = vport->back;
12692 case ETHTOOL_ID_ACTIVE:
12693 return hclge_set_led_status(hdev, HCLGE_LED_ON);
12694 case ETHTOOL_ID_INACTIVE:
12695 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
12701 static void hclge_get_link_mode(struct hnae3_handle *handle,
12702 unsigned long *supported,
12703 unsigned long *advertising)
12705 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
12706 struct hclge_vport *vport = hclge_get_vport(handle);
12707 struct hclge_dev *hdev = vport->back;
12708 unsigned int idx = 0;
12710 for (; idx < size; idx++) {
12711 supported[idx] = hdev->hw.mac.supported[idx];
12712 advertising[idx] = hdev->hw.mac.advertising[idx];
12716 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
12718 struct hclge_vport *vport = hclge_get_vport(handle);
12719 struct hclge_dev *hdev = vport->back;
12720 bool gro_en_old = hdev->gro_en;
12723 hdev->gro_en = enable;
12724 ret = hclge_config_gro(hdev);
12726 hdev->gro_en = gro_en_old;
12731 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
12733 struct hclge_vport *vport = &hdev->vport[0];
12734 struct hnae3_handle *handle = &vport->nic;
12739 if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
12740 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
12741 vport->last_promisc_flags = vport->overflow_promisc_flags;
12744 if (test_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state)) {
12745 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
12746 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
12747 tmp_flags & HNAE3_MPE);
12749 clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12751 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
12756 for (i = 1; i < hdev->num_alloc_vport; i++) {
12757 bool uc_en = false;
12758 bool mc_en = false;
12761 vport = &hdev->vport[i];
12763 if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12767 if (vport->vf_info.trusted) {
12768 uc_en = vport->vf_info.request_uc_en > 0;
12769 mc_en = vport->vf_info.request_mc_en > 0;
12771 bc_en = vport->vf_info.request_bc_en > 0;
12773 ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
12776 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12780 hclge_set_vport_vlan_fltr_change(vport);
12784 static bool hclge_module_existed(struct hclge_dev *hdev)
12786 struct hclge_desc desc;
12790 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
12791 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12793 dev_err(&hdev->pdev->dev,
12794 "failed to get SFP exist state, ret = %d\n", ret);
12798 existed = le32_to_cpu(desc.data[0]);
12800 return existed != 0;
12803 /* need 6 bds(total 140 bytes) in one reading
12804 * return the number of bytes actually read, 0 means read failed.
12806 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
12809 struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
12810 struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
12816 /* setup all 6 bds to read module eeprom info. */
12817 for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12818 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
12821 /* bd0~bd4 need next flag */
12822 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
12823 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12826 /* setup bd0, this bd contains offset and read length. */
12827 sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
12828 sfp_info_bd0->offset = cpu_to_le16((u16)offset);
12829 read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
12830 sfp_info_bd0->read_len = cpu_to_le16(read_len);
12832 ret = hclge_cmd_send(&hdev->hw, desc, i);
12834 dev_err(&hdev->pdev->dev,
12835 "failed to get SFP eeprom info, ret = %d\n", ret);
12839 /* copy sfp info from bd0 to out buffer. */
12840 copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
12841 memcpy(data, sfp_info_bd0->data, copy_len);
12842 read_len = copy_len;
12844 /* copy sfp info from bd1~bd5 to out buffer if needed. */
12845 for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12846 if (read_len >= len)
12849 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
12850 memcpy(data + read_len, desc[i].data, copy_len);
12851 read_len += copy_len;
12857 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
12860 struct hclge_vport *vport = hclge_get_vport(handle);
12861 struct hclge_dev *hdev = vport->back;
12865 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
12866 return -EOPNOTSUPP;
12868 if (!hclge_module_existed(hdev))
12871 while (read_len < len) {
12872 data_len = hclge_get_sfp_eeprom_info(hdev,
12879 read_len += data_len;
12885 static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle,
12888 struct hclge_vport *vport = hclge_get_vport(handle);
12889 struct hclge_dev *hdev = vport->back;
12890 struct hclge_desc desc;
12893 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2)
12894 return -EOPNOTSUPP;
12896 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_DIAGNOSIS, true);
12897 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12899 dev_err(&hdev->pdev->dev,
12900 "failed to query link diagnosis info, ret = %d\n", ret);
12904 *status_code = le32_to_cpu(desc.data[0]);
12908 static const struct hnae3_ae_ops hclge_ops = {
12909 .init_ae_dev = hclge_init_ae_dev,
12910 .uninit_ae_dev = hclge_uninit_ae_dev,
12911 .reset_prepare = hclge_reset_prepare_general,
12912 .reset_done = hclge_reset_done,
12913 .init_client_instance = hclge_init_client_instance,
12914 .uninit_client_instance = hclge_uninit_client_instance,
12915 .map_ring_to_vector = hclge_map_ring_to_vector,
12916 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
12917 .get_vector = hclge_get_vector,
12918 .put_vector = hclge_put_vector,
12919 .set_promisc_mode = hclge_set_promisc_mode,
12920 .request_update_promisc_mode = hclge_request_update_promisc_mode,
12921 .set_loopback = hclge_set_loopback,
12922 .start = hclge_ae_start,
12923 .stop = hclge_ae_stop,
12924 .client_start = hclge_client_start,
12925 .client_stop = hclge_client_stop,
12926 .get_status = hclge_get_status,
12927 .get_ksettings_an_result = hclge_get_ksettings_an_result,
12928 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
12929 .get_media_type = hclge_get_media_type,
12930 .check_port_speed = hclge_check_port_speed,
12931 .get_fec = hclge_get_fec,
12932 .set_fec = hclge_set_fec,
12933 .get_rss_key_size = hclge_get_rss_key_size,
12934 .get_rss = hclge_get_rss,
12935 .set_rss = hclge_set_rss,
12936 .set_rss_tuple = hclge_set_rss_tuple,
12937 .get_rss_tuple = hclge_get_rss_tuple,
12938 .get_tc_size = hclge_get_tc_size,
12939 .get_mac_addr = hclge_get_mac_addr,
12940 .set_mac_addr = hclge_set_mac_addr,
12941 .do_ioctl = hclge_do_ioctl,
12942 .add_uc_addr = hclge_add_uc_addr,
12943 .rm_uc_addr = hclge_rm_uc_addr,
12944 .add_mc_addr = hclge_add_mc_addr,
12945 .rm_mc_addr = hclge_rm_mc_addr,
12946 .set_autoneg = hclge_set_autoneg,
12947 .get_autoneg = hclge_get_autoneg,
12948 .restart_autoneg = hclge_restart_autoneg,
12949 .halt_autoneg = hclge_halt_autoneg,
12950 .get_pauseparam = hclge_get_pauseparam,
12951 .set_pauseparam = hclge_set_pauseparam,
12952 .set_mtu = hclge_set_mtu,
12953 .reset_queue = hclge_reset_tqp,
12954 .get_stats = hclge_get_stats,
12955 .get_mac_stats = hclge_get_mac_stat,
12956 .update_stats = hclge_update_stats,
12957 .get_strings = hclge_get_strings,
12958 .get_sset_count = hclge_get_sset_count,
12959 .get_fw_version = hclge_get_fw_version,
12960 .get_mdix_mode = hclge_get_mdix_mode,
12961 .enable_vlan_filter = hclge_enable_vlan_filter,
12962 .set_vlan_filter = hclge_set_vlan_filter,
12963 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
12964 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
12965 .reset_event = hclge_reset_event,
12966 .get_reset_level = hclge_get_reset_level,
12967 .set_default_reset_request = hclge_set_def_reset_request,
12968 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
12969 .set_channels = hclge_set_channels,
12970 .get_channels = hclge_get_channels,
12971 .get_regs_len = hclge_get_regs_len,
12972 .get_regs = hclge_get_regs,
12973 .set_led_id = hclge_set_led_id,
12974 .get_link_mode = hclge_get_link_mode,
12975 .add_fd_entry = hclge_add_fd_entry,
12976 .del_fd_entry = hclge_del_fd_entry,
12977 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
12978 .get_fd_rule_info = hclge_get_fd_rule_info,
12979 .get_fd_all_rules = hclge_get_all_rules,
12980 .enable_fd = hclge_enable_fd,
12981 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
12982 .dbg_read_cmd = hclge_dbg_read_cmd,
12983 .handle_hw_ras_error = hclge_handle_hw_ras_error,
12984 .get_hw_reset_stat = hclge_get_hw_reset_stat,
12985 .ae_dev_resetting = hclge_ae_dev_resetting,
12986 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
12987 .set_gro_en = hclge_gro_en,
12988 .get_global_queue_id = hclge_covert_handle_qid_global,
12989 .set_timer_task = hclge_set_timer_task,
12990 .mac_connect_phy = hclge_mac_connect_phy,
12991 .mac_disconnect_phy = hclge_mac_disconnect_phy,
12992 .get_vf_config = hclge_get_vf_config,
12993 .set_vf_link_state = hclge_set_vf_link_state,
12994 .set_vf_spoofchk = hclge_set_vf_spoofchk,
12995 .set_vf_trust = hclge_set_vf_trust,
12996 .set_vf_rate = hclge_set_vf_rate,
12997 .set_vf_mac = hclge_set_vf_mac,
12998 .get_module_eeprom = hclge_get_module_eeprom,
12999 .get_cmdq_stat = hclge_get_cmdq_stat,
13000 .add_cls_flower = hclge_add_cls_flower,
13001 .del_cls_flower = hclge_del_cls_flower,
13002 .cls_flower_active = hclge_is_cls_flower_active,
13003 .get_phy_link_ksettings = hclge_get_phy_link_ksettings,
13004 .set_phy_link_ksettings = hclge_set_phy_link_ksettings,
13005 .set_tx_hwts_info = hclge_ptp_set_tx_info,
13006 .get_rx_hwts = hclge_ptp_get_rx_hwts,
13007 .get_ts_info = hclge_ptp_get_ts_info,
13008 .get_link_diagnosis_info = hclge_get_link_diagnosis_info,
13011 static struct hnae3_ae_algo ae_algo = {
13013 .pdev_id_table = ae_algo_pci_tbl,
13016 static int hclge_init(void)
13018 pr_info("%s is initializing\n", HCLGE_NAME);
13020 hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
13022 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
13026 hnae3_register_ae_algo(&ae_algo);
13031 static void hclge_exit(void)
13033 hnae3_unregister_ae_algo(&ae_algo);
13034 destroy_workqueue(hclge_wq);
13036 module_init(hclge_init);
13037 module_exit(hclge_exit);
13039 MODULE_LICENSE("GPL");
13040 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
13041 MODULE_DESCRIPTION("HCLGE Driver");
13042 MODULE_VERSION(HCLGE_MOD_VERSION);